4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
137 "NOSSH_KEYCHECK_OPT",
151 "PREALLOC_WIPE_DISKS_OPT",
152 "PRIMARY_IP_VERSION_OPT",
158 "REMOVE_INSTANCE_OPT",
163 "SECONDARY_ONLY_OPT",
167 "SHUTDOWN_TIMEOUT_OPT",
169 "SPECS_CPU_COUNT_OPT",
170 "SPECS_DISK_COUNT_OPT",
171 "SPECS_DISK_SIZE_OPT",
172 "SPECS_MEM_SIZE_OPT",
173 "SPECS_NIC_COUNT_OPT",
174 "SPECS_DISK_TEMPLATES",
180 "STARTUP_PAUSED_OPT",
189 "USE_EXTERNAL_MIP_SCRIPT",
196 "IGNORE_IPOLICY_OPT",
197 "INSTANCE_POLICY_OPTS",
198 # Generic functions for CLI programs
201 "GenericInstanceCreate",
207 "JobSubmittedException",
209 "RunWhileClusterStopped",
213 # Formatting functions
214 "ToStderr", "ToStdout",
217 "FormatParameterDict",
226 # command line options support infrastructure
227 "ARGS_MANY_INSTANCES",
246 "OPT_COMPL_INST_ADD_NODES",
247 "OPT_COMPL_MANY_NODES",
248 "OPT_COMPL_ONE_IALLOCATOR",
249 "OPT_COMPL_ONE_INSTANCE",
250 "OPT_COMPL_ONE_NODE",
251 "OPT_COMPL_ONE_NODEGROUP",
257 "COMMON_CREATE_OPTS",
263 #: Priorities (sorted)
265 ("low", constants.OP_PRIO_LOW),
266 ("normal", constants.OP_PRIO_NORMAL),
267 ("high", constants.OP_PRIO_HIGH),
270 #: Priority dictionary for easier lookup
271 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
272 # we migrate to Python 2.6
273 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
275 # Query result status for clients
278 QR_INCOMPLETE) = range(3)
280 #: Maximum batch size for ChooseJob
285 def __init__(self, min=0, max=None): # pylint: disable=W0622
290 return ("<%s min=%s max=%s>" %
291 (self.__class__.__name__, self.min, self.max))
294 class ArgSuggest(_Argument):
295 """Suggesting argument.
297 Value can be any of the ones passed to the constructor.
300 # pylint: disable=W0622
301 def __init__(self, min=0, max=None, choices=None):
302 _Argument.__init__(self, min=min, max=max)
303 self.choices = choices
306 return ("<%s min=%s max=%s choices=%r>" %
307 (self.__class__.__name__, self.min, self.max, self.choices))
310 class ArgChoice(ArgSuggest):
313 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
314 but value must be one of the choices.
319 class ArgUnknown(_Argument):
320 """Unknown argument to program (e.g. determined at runtime).
325 class ArgInstance(_Argument):
326 """Instances argument.
331 class ArgNode(_Argument):
337 class ArgGroup(_Argument):
338 """Node group argument.
343 class ArgJobId(_Argument):
349 class ArgFile(_Argument):
350 """File path argument.
355 class ArgCommand(_Argument):
361 class ArgHost(_Argument):
367 class ArgOs(_Argument):
374 ARGS_MANY_INSTANCES = [ArgInstance()]
375 ARGS_MANY_NODES = [ArgNode()]
376 ARGS_MANY_GROUPS = [ArgGroup()]
377 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
378 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
380 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
381 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
384 def _ExtractTagsObject(opts, args):
385 """Extract the tag type object.
387 Note that this function will modify its args parameter.
390 if not hasattr(opts, "tag_type"):
391 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
393 if kind == constants.TAG_CLUSTER:
395 elif kind in (constants.TAG_NODEGROUP,
397 constants.TAG_INSTANCE):
399 raise errors.OpPrereqError("no arguments passed to the command")
403 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
407 def _ExtendTags(opts, args):
408 """Extend the args if a source file has been given.
410 This function will extend the tags with the contents of the file
411 passed in the 'tags_source' attribute of the opts parameter. A file
412 named '-' will be replaced by stdin.
415 fname = opts.tags_source
421 new_fh = open(fname, "r")
424 # we don't use the nice 'new_data = [line.strip() for line in fh]'
425 # because of python bug 1633941
427 line = new_fh.readline()
430 new_data.append(line.strip())
433 args.extend(new_data)
436 def ListTags(opts, args):
437 """List the tags on a given object.
439 This is a generic implementation that knows how to deal with all
440 three cases of tag objects (cluster, node, instance). The opts
441 argument is expected to contain a tag_type field denoting what
442 object type we work on.
445 kind, name = _ExtractTagsObject(opts, args)
447 result = cl.QueryTags(kind, name)
448 result = list(result)
454 def AddTags(opts, args):
455 """Add tags on a given object.
457 This is a generic implementation that knows how to deal with all
458 three cases of tag objects (cluster, node, instance). The opts
459 argument is expected to contain a tag_type field denoting what
460 object type we work on.
463 kind, name = _ExtractTagsObject(opts, args)
464 _ExtendTags(opts, args)
466 raise errors.OpPrereqError("No tags to be added")
467 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
468 SubmitOpCode(op, opts=opts)
471 def RemoveTags(opts, args):
472 """Remove tags from a given object.
474 This is a generic implementation that knows how to deal with all
475 three cases of tag objects (cluster, node, instance). The opts
476 argument is expected to contain a tag_type field denoting what
477 object type we work on.
480 kind, name = _ExtractTagsObject(opts, args)
481 _ExtendTags(opts, args)
483 raise errors.OpPrereqError("No tags to be removed")
484 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
485 SubmitOpCode(op, opts=opts)
488 def check_unit(option, opt, value): # pylint: disable=W0613
489 """OptParsers custom converter for units.
493 return utils.ParseUnit(value)
494 except errors.UnitParseError, err:
495 raise OptionValueError("option %s: %s" % (opt, err))
498 def _SplitKeyVal(opt, data):
499 """Convert a KeyVal string into a dict.
501 This function will convert a key=val[,...] string into a dict. Empty
502 values will be converted specially: keys which have the prefix 'no_'
503 will have the value=False and the prefix stripped, the others will
507 @param opt: a string holding the option name for which we process the
508 data, used in building error messages
510 @param data: a string of the format key=val,key=val,...
512 @return: {key=val, key=val}
513 @raises errors.ParameterError: if there are duplicate keys
518 for elem in utils.UnescapeAndSplit(data, sep=","):
520 key, val = elem.split("=", 1)
522 if elem.startswith(NO_PREFIX):
523 key, val = elem[len(NO_PREFIX):], False
524 elif elem.startswith(UN_PREFIX):
525 key, val = elem[len(UN_PREFIX):], None
527 key, val = elem, True
529 raise errors.ParameterError("Duplicate key '%s' in option %s" %
535 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
536 """Custom parser for ident:key=val,key=val options.
538 This will store the parsed values as a tuple (ident, {key: val}). As such,
539 multiple uses of this option via action=append is possible.
543 ident, rest = value, ""
545 ident, rest = value.split(":", 1)
547 if ident.startswith(NO_PREFIX):
549 msg = "Cannot pass options when removing parameter groups: %s" % value
550 raise errors.ParameterError(msg)
551 retval = (ident[len(NO_PREFIX):], False)
552 elif ident.startswith(UN_PREFIX):
554 msg = "Cannot pass options when removing parameter groups: %s" % value
555 raise errors.ParameterError(msg)
556 retval = (ident[len(UN_PREFIX):], None)
558 kv_dict = _SplitKeyVal(opt, rest)
559 retval = (ident, kv_dict)
563 def check_key_val(option, opt, value): # pylint: disable=W0613
564 """Custom parser class for key=val,key=val options.
566 This will store the parsed values as a dict {key: val}.
569 return _SplitKeyVal(opt, value)
572 def check_bool(option, opt, value): # pylint: disable=W0613
573 """Custom parser for yes/no options.
575 This will store the parsed value as either True or False.
578 value = value.lower()
579 if value == constants.VALUE_FALSE or value == "no":
581 elif value == constants.VALUE_TRUE or value == "yes":
584 raise errors.ParameterError("Invalid boolean value '%s'" % value)
587 def check_list(option, opt, value): # pylint: disable=W0613
588 """Custom parser for comma-separated lists.
591 # we have to make this explicit check since "".split(",") is [""],
592 # not an empty list :(
596 return utils.UnescapeAndSplit(value)
599 # completion_suggestion is normally a list. Using numeric values not evaluating
600 # to False for dynamic completion.
601 (OPT_COMPL_MANY_NODES,
603 OPT_COMPL_ONE_INSTANCE,
605 OPT_COMPL_ONE_IALLOCATOR,
606 OPT_COMPL_INST_ADD_NODES,
607 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
609 OPT_COMPL_ALL = frozenset([
610 OPT_COMPL_MANY_NODES,
612 OPT_COMPL_ONE_INSTANCE,
614 OPT_COMPL_ONE_IALLOCATOR,
615 OPT_COMPL_INST_ADD_NODES,
616 OPT_COMPL_ONE_NODEGROUP,
620 class CliOption(Option):
621 """Custom option class for optparse.
624 ATTRS = Option.ATTRS + [
625 "completion_suggest",
627 TYPES = Option.TYPES + (
634 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
635 TYPE_CHECKER["identkeyval"] = check_ident_key_val
636 TYPE_CHECKER["keyval"] = check_key_val
637 TYPE_CHECKER["unit"] = check_unit
638 TYPE_CHECKER["bool"] = check_bool
639 TYPE_CHECKER["list"] = check_list
642 # optparse.py sets make_option, so we do it for our own option class, too
643 cli_option = CliOption
648 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
649 help="Increase debugging level")
651 NOHDR_OPT = cli_option("--no-headers", default=False,
652 action="store_true", dest="no_headers",
653 help="Don't display column headers")
655 SEP_OPT = cli_option("--separator", default=None,
656 action="store", dest="separator",
657 help=("Separator between output fields"
658 " (defaults to one space)"))
660 USEUNITS_OPT = cli_option("--units", default=None,
661 dest="units", choices=("h", "m", "g", "t"),
662 help="Specify units for output (one of h/m/g/t)")
664 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
665 type="string", metavar="FIELDS",
666 help="Comma separated list of output fields")
668 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
669 default=False, help="Force the operation")
671 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
672 default=False, help="Do not require confirmation")
674 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
675 action="store_true", default=False,
676 help=("Ignore offline nodes and do as much"
679 TAG_ADD_OPT = cli_option("--tags", dest="tags",
680 default=None, help="Comma-separated list of instance"
683 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
684 default=None, help="File with tag names")
686 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
687 default=False, action="store_true",
688 help=("Submit the job and return the job ID, but"
689 " don't wait for the job to finish"))
691 SYNC_OPT = cli_option("--sync", dest="do_locking",
692 default=False, action="store_true",
693 help=("Grab locks while doing the queries"
694 " in order to ensure more consistent results"))
696 DRY_RUN_OPT = cli_option("--dry-run", default=False,
698 help=("Do not execute the operation, just run the"
699 " check steps and verify it it could be"
702 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
704 help="Increase the verbosity of the operation")
706 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
707 action="store_true", dest="simulate_errors",
708 help="Debugging option that makes the operation"
709 " treat most runtime checks as failed")
711 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
712 default=True, action="store_false",
713 help="Don't wait for sync (DANGEROUS!)")
715 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
716 action="store_true", default=False,
717 help="Enable offline instance")
719 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
720 action="store_true", default=False,
721 help="Disable down instance")
723 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
724 help=("Custom disk setup (%s)" %
725 utils.CommaJoin(constants.DISK_TEMPLATES)),
726 default=None, metavar="TEMPL",
727 choices=list(constants.DISK_TEMPLATES))
729 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
730 help="Do not create any network cards for"
733 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
734 help="Relative path under default cluster-wide"
735 " file storage dir to store file-based disks",
736 default=None, metavar="<DIR>")
738 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
739 help="Driver to use for image files",
740 default="loop", metavar="<DRIVER>",
741 choices=list(constants.FILE_DRIVER))
743 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
744 help="Select nodes for the instance automatically"
745 " using the <NAME> iallocator plugin",
746 default=None, type="string",
747 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
749 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
751 help="Set the default instance allocator plugin",
752 default=None, type="string",
753 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
755 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
757 completion_suggest=OPT_COMPL_ONE_OS)
759 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
760 type="keyval", default={},
761 help="OS parameters")
763 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
764 action="store_true", default=False,
765 help="Force an unknown variant")
767 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
768 action="store_true", default=False,
769 help="Do not install the OS (will"
772 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
773 type="keyval", default={},
774 help="Backend parameters")
776 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
777 default={}, dest="hvparams",
778 help="Hypervisor parameters")
780 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
781 help="Disk template parameters, in the format"
782 " template:option=value,option=value,...",
783 type="identkeyval", action="append", default=[])
785 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
786 type="keyval", default={},
787 help="Memory count specs: min, max, std"
790 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
791 type="keyval", default={},
792 help="CPU count specs: min, max, std")
794 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
795 dest="ispecs_disk_count",
796 type="keyval", default={},
797 help="Disk count specs: min, max, std")
799 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
800 type="keyval", default={},
801 help="Disk size specs: min, max, std (in MB)")
803 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
804 type="keyval", default={},
805 help="NIC count specs: min, max, std")
807 SPECS_DISK_TEMPLATES = cli_option("--specs-disk-templates",
808 dest="ispecs_disk_templates",
809 type="list", default=None,
810 help="Comma-separated list of"
811 " enabled disk templates")
813 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
814 help="Hypervisor and hypervisor options, in the"
815 " format hypervisor:option=value,option=value,...",
816 default=None, type="identkeyval")
818 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
819 help="Hypervisor and hypervisor options, in the"
820 " format hypervisor:option=value,option=value,...",
821 default=[], action="append", type="identkeyval")
823 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
824 action="store_false",
825 help="Don't check that the instance's IP"
828 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
829 default=True, action="store_false",
830 help="Don't check that the instance's name"
833 NET_OPT = cli_option("--net",
834 help="NIC parameters", default=[],
835 dest="nics", action="append", type="identkeyval")
837 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
838 dest="disks", action="append", type="identkeyval")
840 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
841 help="Comma-separated list of disks"
842 " indices to act on (e.g. 0,2) (optional,"
843 " defaults to all disks)")
845 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
846 help="Enforces a single-disk configuration using the"
847 " given disk size, in MiB unless a suffix is used",
848 default=None, type="unit", metavar="<size>")
850 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
851 dest="ignore_consistency",
852 action="store_true", default=False,
853 help="Ignore the consistency of the disks on"
856 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
857 dest="allow_failover",
858 action="store_true", default=False,
859 help="If migration is not possible fallback to"
862 NONLIVE_OPT = cli_option("--non-live", dest="live",
863 default=True, action="store_false",
864 help="Do a non-live migration (this usually means"
865 " freeze the instance, save the state, transfer and"
866 " only then resume running on the secondary node)")
868 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
870 choices=list(constants.HT_MIGRATION_MODES),
871 help="Override default migration mode (choose"
872 " either live or non-live")
874 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
875 help="Target node and optional secondary node",
876 metavar="<pnode>[:<snode>]",
877 completion_suggest=OPT_COMPL_INST_ADD_NODES)
879 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
880 action="append", metavar="<node>",
881 help="Use only this node (can be used multiple"
882 " times, if not given defaults to all nodes)",
883 completion_suggest=OPT_COMPL_ONE_NODE)
885 NODEGROUP_OPT_NAME = "--node-group"
886 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
888 help="Node group (name or uuid)",
889 metavar="<nodegroup>",
890 default=None, type="string",
891 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
893 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
895 completion_suggest=OPT_COMPL_ONE_NODE)
897 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
898 action="store_false",
899 help="Don't start the instance after creation")
901 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
902 action="store_true", default=False,
903 help="Show command instead of executing it")
905 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
906 default=False, action="store_true",
907 help="Instead of performing the migration, try to"
908 " recover from a failed cleanup. This is safe"
909 " to run even if the instance is healthy, but it"
910 " will create extra replication traffic and "
911 " disrupt briefly the replication (like during the"
914 STATIC_OPT = cli_option("-s", "--static", dest="static",
915 action="store_true", default=False,
916 help="Only show configuration data, not runtime data")
918 ALL_OPT = cli_option("--all", dest="show_all",
919 default=False, action="store_true",
920 help="Show info on all instances on the cluster."
921 " This can take a long time to run, use wisely")
923 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
924 action="store_true", default=False,
925 help="Interactive OS reinstall, lists available"
926 " OS templates for selection")
928 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
929 action="store_true", default=False,
930 help="Remove the instance from the cluster"
931 " configuration even if there are failures"
932 " during the removal process")
934 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
935 dest="ignore_remove_failures",
936 action="store_true", default=False,
937 help="Remove the instance from the"
938 " cluster configuration even if there"
939 " are failures during the removal"
942 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
943 action="store_true", default=False,
944 help="Remove the instance from the cluster")
946 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
947 help="Specifies the new node for the instance",
948 metavar="NODE", default=None,
949 completion_suggest=OPT_COMPL_ONE_NODE)
951 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
952 help="Specifies the new secondary node",
953 metavar="NODE", default=None,
954 completion_suggest=OPT_COMPL_ONE_NODE)
956 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
957 default=False, action="store_true",
958 help="Replace the disk(s) on the primary"
959 " node (applies only to internally mirrored"
960 " disk templates, e.g. %s)" %
961 utils.CommaJoin(constants.DTS_INT_MIRROR))
963 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
964 default=False, action="store_true",
965 help="Replace the disk(s) on the secondary"
966 " node (applies only to internally mirrored"
967 " disk templates, e.g. %s)" %
968 utils.CommaJoin(constants.DTS_INT_MIRROR))
970 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
971 default=False, action="store_true",
972 help="Lock all nodes and auto-promote as needed"
975 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
976 default=False, action="store_true",
977 help="Automatically replace faulty disks"
978 " (applies only to internally mirrored"
979 " disk templates, e.g. %s)" %
980 utils.CommaJoin(constants.DTS_INT_MIRROR))
982 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
983 default=False, action="store_true",
984 help="Ignore current recorded size"
985 " (useful for forcing activation when"
986 " the recorded size is wrong)")
988 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
990 completion_suggest=OPT_COMPL_ONE_NODE)
992 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
995 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
996 help="Specify the secondary ip for the node",
997 metavar="ADDRESS", default=None)
999 READD_OPT = cli_option("--readd", dest="readd",
1000 default=False, action="store_true",
1001 help="Readd old node after replacing it")
1003 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1004 default=True, action="store_false",
1005 help="Disable SSH key fingerprint checking")
1007 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1008 default=False, action="store_true",
1009 help="Force the joining of a node")
1011 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1012 type="bool", default=None, metavar=_YORNO,
1013 help="Set the master_candidate flag on the node")
1015 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1016 type="bool", default=None,
1017 help=("Set the offline flag on the node"
1018 " (cluster does not communicate with offline"
1021 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1022 type="bool", default=None,
1023 help=("Set the drained flag on the node"
1024 " (excluded from allocation operations)"))
1026 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1027 type="bool", default=None, metavar=_YORNO,
1028 help="Set the master_capable flag on the node")
1030 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1031 type="bool", default=None, metavar=_YORNO,
1032 help="Set the vm_capable flag on the node")
1034 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1035 type="bool", default=None, metavar=_YORNO,
1036 help="Set the allocatable flag on a volume")
1038 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1039 help="Disable support for lvm based instances"
1041 action="store_false", default=True)
1043 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1044 dest="enabled_hypervisors",
1045 help="Comma-separated list of hypervisors",
1046 type="string", default=None)
1048 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1049 type="keyval", default={},
1050 help="NIC parameters")
1052 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1053 dest="candidate_pool_size", type="int",
1054 help="Set the candidate pool size")
1056 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1057 help=("Enables LVM and specifies the volume group"
1058 " name (cluster-wide) for disk allocation"
1059 " [%s]" % constants.DEFAULT_VG),
1060 metavar="VG", default=None)
1062 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1063 help="Destroy cluster", action="store_true")
1065 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1066 help="Skip node agreement check (dangerous)",
1067 action="store_true", default=False)
1069 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1070 help="Specify the mac prefix for the instance IP"
1071 " addresses, in the format XX:XX:XX",
1075 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1076 help="Specify the node interface (cluster-wide)"
1077 " on which the master IP address will be added"
1078 " (cluster init default: %s)" %
1079 constants.DEFAULT_BRIDGE,
1083 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1084 help="Specify the netmask of the master IP",
1088 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1089 dest="use_external_mip_script",
1090 help="Specify whether to run a user-provided"
1091 " script for the master IP address turnup and"
1092 " turndown operations",
1093 type="bool", metavar=_YORNO, default=None)
1095 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1096 help="Specify the default directory (cluster-"
1097 "wide) for storing the file-based disks [%s]" %
1098 constants.DEFAULT_FILE_STORAGE_DIR,
1100 default=constants.DEFAULT_FILE_STORAGE_DIR)
1102 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1103 dest="shared_file_storage_dir",
1104 help="Specify the default directory (cluster-"
1105 "wide) for storing the shared file-based"
1107 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1108 metavar="SHAREDDIR",
1109 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1111 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1112 help="Don't modify /etc/hosts",
1113 action="store_false", default=True)
1115 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1116 help="Don't initialize SSH keys",
1117 action="store_false", default=True)
1119 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1120 help="Enable parseable error messages",
1121 action="store_true", default=False)
1123 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1124 help="Skip N+1 memory redundancy tests",
1125 action="store_true", default=False)
1127 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1128 help="Type of reboot: soft/hard/full",
1129 default=constants.INSTANCE_REBOOT_HARD,
1131 choices=list(constants.REBOOT_TYPES))
1133 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1134 dest="ignore_secondaries",
1135 default=False, action="store_true",
1136 help="Ignore errors from secondaries")
1138 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1139 action="store_false", default=True,
1140 help="Don't shutdown the instance (unsafe)")
1142 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1143 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1144 help="Maximum time to wait")
1146 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1147 dest="shutdown_timeout", type="int",
1148 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1149 help="Maximum time to wait for instance shutdown")
1151 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1153 help=("Number of seconds between repetions of the"
1156 EARLY_RELEASE_OPT = cli_option("--early-release",
1157 dest="early_release", default=False,
1158 action="store_true",
1159 help="Release the locks on the secondary"
1162 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1163 dest="new_cluster_cert",
1164 default=False, action="store_true",
1165 help="Generate a new cluster certificate")
1167 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1169 help="File containing new RAPI certificate")
1171 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1172 default=None, action="store_true",
1173 help=("Generate a new self-signed RAPI"
1176 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1178 help="File containing new SPICE certificate")
1180 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1182 help="File containing the certificate of the CA"
1183 " which signed the SPICE certificate")
1185 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1186 dest="new_spice_cert", default=None,
1187 action="store_true",
1188 help=("Generate a new self-signed SPICE"
1191 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1192 dest="new_confd_hmac_key",
1193 default=False, action="store_true",
1194 help=("Create a new HMAC key for %s" %
1197 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1198 dest="cluster_domain_secret",
1200 help=("Load new new cluster domain"
1201 " secret from file"))
1203 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1204 dest="new_cluster_domain_secret",
1205 default=False, action="store_true",
1206 help=("Create a new cluster domain"
1209 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1210 dest="use_replication_network",
1211 help="Whether to use the replication network"
1212 " for talking to the nodes",
1213 action="store_true", default=False)
1215 MAINTAIN_NODE_HEALTH_OPT = \
1216 cli_option("--maintain-node-health", dest="maintain_node_health",
1217 metavar=_YORNO, default=None, type="bool",
1218 help="Configure the cluster to automatically maintain node"
1219 " health, by shutting down unknown instances, shutting down"
1220 " unknown DRBD devices, etc.")
1222 IDENTIFY_DEFAULTS_OPT = \
1223 cli_option("--identify-defaults", dest="identify_defaults",
1224 default=False, action="store_true",
1225 help="Identify which saved instance parameters are equal to"
1226 " the current cluster defaults and set them as such, instead"
1227 " of marking them as overridden")
1229 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1230 action="store", dest="uid_pool",
1231 help=("A list of user-ids or user-id"
1232 " ranges separated by commas"))
1234 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1235 action="store", dest="add_uids",
1236 help=("A list of user-ids or user-id"
1237 " ranges separated by commas, to be"
1238 " added to the user-id pool"))
1240 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1241 action="store", dest="remove_uids",
1242 help=("A list of user-ids or user-id"
1243 " ranges separated by commas, to be"
1244 " removed from the user-id pool"))
1246 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1247 action="store", dest="reserved_lvs",
1248 help=("A comma-separated list of reserved"
1249 " logical volumes names, that will be"
1250 " ignored by cluster verify"))
1252 ROMAN_OPT = cli_option("--roman",
1253 dest="roman_integers", default=False,
1254 action="store_true",
1255 help="Use roman numbers for positive integers")
1257 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1258 action="store", default=None,
1259 help="Specifies usermode helper for DRBD")
1261 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1262 action="store_false", default=True,
1263 help="Disable support for DRBD")
1265 PRIMARY_IP_VERSION_OPT = \
1266 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1267 action="store", dest="primary_ip_version",
1268 metavar="%d|%d" % (constants.IP4_VERSION,
1269 constants.IP6_VERSION),
1270 help="Cluster-wide IP version for primary IP")
1272 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1273 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1274 choices=_PRIONAME_TO_VALUE.keys(),
1275 help="Priority for opcode processing")
1277 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1278 type="bool", default=None, metavar=_YORNO,
1279 help="Sets the hidden flag on the OS")
1281 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1282 type="bool", default=None, metavar=_YORNO,
1283 help="Sets the blacklisted flag on the OS")
1285 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1286 type="bool", metavar=_YORNO,
1287 dest="prealloc_wipe_disks",
1288 help=("Wipe disks prior to instance"
1291 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1292 type="keyval", default=None,
1293 help="Node parameters")
1295 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1296 action="store", metavar="POLICY", default=None,
1297 help="Allocation policy for the node group")
1299 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1300 type="bool", metavar=_YORNO,
1301 dest="node_powered",
1302 help="Specify if the SoR for node is powered")
1304 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1305 default=constants.OOB_TIMEOUT,
1306 help="Maximum time to wait for out-of-band helper")
1308 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1309 default=constants.OOB_POWER_DELAY,
1310 help="Time in seconds to wait between power-ons")
1312 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1313 action="store_true", default=False,
1314 help=("Whether command argument should be treated"
1317 NO_REMEMBER_OPT = cli_option("--no-remember",
1319 action="store_true", default=False,
1320 help="Perform but do not record the change"
1321 " in the configuration")
1323 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1324 default=False, action="store_true",
1325 help="Evacuate primary instances only")
1327 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1328 default=False, action="store_true",
1329 help="Evacuate secondary instances only"
1330 " (applies only to internally mirrored"
1331 " disk templates, e.g. %s)" %
1332 utils.CommaJoin(constants.DTS_INT_MIRROR))
1334 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1335 action="store_true", default=False,
1336 help="Pause instance at startup")
1338 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1339 help="Destination node group (name or uuid)",
1340 default=None, action="append",
1341 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1343 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1344 action="append", dest="ignore_errors",
1345 choices=list(constants.CV_ALL_ECODES_STRINGS),
1346 help="Error code to be ignored")
1348 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1350 help=("Specify disk state information in the format"
1351 " storage_type/identifier:option=value,..."),
1354 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1356 help=("Specify hypervisor state information in the"
1357 " format hypervisor:option=value,..."),
1360 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1361 action="store_true", default=False,
1362 help="Ignore instance policy violations")
1365 #: Options provided by all commands
1366 COMMON_OPTS = [DEBUG_OPT]
1368 # common options for creating instances. add and import then add their own
1370 COMMON_CREATE_OPTS = [
1375 FILESTORE_DRIVER_OPT,
1392 # common instance policy options
1393 INSTANCE_POLICY_OPTS = [
1394 SPECS_CPU_COUNT_OPT,
1395 SPECS_DISK_COUNT_OPT,
1396 SPECS_DISK_SIZE_OPT,
1398 SPECS_NIC_COUNT_OPT,
1399 SPECS_DISK_TEMPLATES,
1403 def _ParseArgs(argv, commands, aliases, env_override):
1404 """Parser for the command line arguments.
1406 This function parses the arguments and returns the function which
1407 must be executed together with its (modified) arguments.
1409 @param argv: the command line
1410 @param commands: dictionary with special contents, see the design
1411 doc for cmdline handling
1412 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1413 @param env_override: list of env variables allowed for default args
1416 assert not (env_override - set(commands))
1419 binary = "<command>"
1421 binary = argv[0].split("/")[-1]
1423 if len(argv) > 1 and argv[1] == "--version":
1424 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1425 constants.RELEASE_VERSION)
1426 # Quit right away. That way we don't have to care about this special
1427 # argument. optparse.py does it the same.
1430 if len(argv) < 2 or not (argv[1] in commands or
1431 argv[1] in aliases):
1432 # let's do a nice thing
1433 sortedcmds = commands.keys()
1436 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1437 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1440 # compute the max line length for cmd + usage
1441 mlen = max([len(" %s" % cmd) for cmd in commands])
1442 mlen = min(60, mlen) # should not get here...
1444 # and format a nice command list
1445 ToStdout("Commands:")
1446 for cmd in sortedcmds:
1447 cmdstr = " %s" % (cmd,)
1448 help_text = commands[cmd][4]
1449 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1450 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1451 for line in help_lines:
1452 ToStdout("%-*s %s", mlen, "", line)
1456 return None, None, None
1458 # get command, unalias it, and look it up in commands
1462 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1465 if aliases[cmd] not in commands:
1466 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1467 " command '%s'" % (cmd, aliases[cmd]))
1471 if cmd in env_override:
1472 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1473 env_args = os.environ.get(args_env_name)
1475 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1477 func, args_def, parser_opts, usage, description = commands[cmd]
1478 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1479 description=description,
1480 formatter=TitledHelpFormatter(),
1481 usage="%%prog %s %s" % (cmd, usage))
1482 parser.disable_interspersed_args()
1483 options, args = parser.parse_args(args=argv[1:])
1485 if not _CheckArguments(cmd, args_def, args):
1486 return None, None, None
1488 return func, options, args
1491 def _CheckArguments(cmd, args_def, args):
1492 """Verifies the arguments using the argument definition.
1496 1. Abort with error if values specified by user but none expected.
1498 1. For each argument in definition
1500 1. Keep running count of minimum number of values (min_count)
1501 1. Keep running count of maximum number of values (max_count)
1502 1. If it has an unlimited number of values
1504 1. Abort with error if it's not the last argument in the definition
1506 1. If last argument has limited number of values
1508 1. Abort with error if number of values doesn't match or is too large
1510 1. Abort with error if user didn't pass enough values (min_count)
1513 if args and not args_def:
1514 ToStderr("Error: Command %s expects no arguments", cmd)
1521 last_idx = len(args_def) - 1
1523 for idx, arg in enumerate(args_def):
1524 if min_count is None:
1526 elif arg.min is not None:
1527 min_count += arg.min
1529 if max_count is None:
1531 elif arg.max is not None:
1532 max_count += arg.max
1535 check_max = (arg.max is not None)
1537 elif arg.max is None:
1538 raise errors.ProgrammerError("Only the last argument can have max=None")
1541 # Command with exact number of arguments
1542 if (min_count is not None and max_count is not None and
1543 min_count == max_count and len(args) != min_count):
1544 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1547 # Command with limited number of arguments
1548 if max_count is not None and len(args) > max_count:
1549 ToStderr("Error: Command %s expects only %d argument(s)",
1553 # Command with some required arguments
1554 if min_count is not None and len(args) < min_count:
1555 ToStderr("Error: Command %s expects at least %d argument(s)",
1562 def SplitNodeOption(value):
1563 """Splits the value of a --node option.
1566 if value and ":" in value:
1567 return value.split(":", 1)
1569 return (value, None)
1572 def CalculateOSNames(os_name, os_variants):
1573 """Calculates all the names an OS can be called, according to its variants.
1575 @type os_name: string
1576 @param os_name: base name of the os
1577 @type os_variants: list or None
1578 @param os_variants: list of supported variants
1580 @return: list of valid names
1584 return ["%s+%s" % (os_name, v) for v in os_variants]
1589 def ParseFields(selected, default):
1590 """Parses the values of "--field"-like options.
1592 @type selected: string or None
1593 @param selected: User-selected options
1595 @param default: Default fields
1598 if selected is None:
1601 if selected.startswith("+"):
1602 return default + selected[1:].split(",")
1604 return selected.split(",")
1607 UsesRPC = rpc.RunWithRPC
1610 def AskUser(text, choices=None):
1611 """Ask the user a question.
1613 @param text: the question to ask
1615 @param choices: list with elements tuples (input_char, return_value,
1616 description); if not given, it will default to: [('y', True,
1617 'Perform the operation'), ('n', False, 'Do no do the operation')];
1618 note that the '?' char is reserved for help
1620 @return: one of the return values from the choices list; if input is
1621 not possible (i.e. not running with a tty, we return the last
1626 choices = [("y", True, "Perform the operation"),
1627 ("n", False, "Do not perform the operation")]
1628 if not choices or not isinstance(choices, list):
1629 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1630 for entry in choices:
1631 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1632 raise errors.ProgrammerError("Invalid choices element to AskUser")
1634 answer = choices[-1][1]
1636 for line in text.splitlines():
1637 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1638 text = "\n".join(new_text)
1640 f = file("/dev/tty", "a+")
1644 chars = [entry[0] for entry in choices]
1645 chars[-1] = "[%s]" % chars[-1]
1647 maps = dict([(entry[0], entry[1]) for entry in choices])
1651 f.write("/".join(chars))
1653 line = f.readline(2).strip().lower()
1658 for entry in choices:
1659 f.write(" %s - %s\n" % (entry[0], entry[2]))
1667 class JobSubmittedException(Exception):
1668 """Job was submitted, client should exit.
1670 This exception has one argument, the ID of the job that was
1671 submitted. The handler should print this ID.
1673 This is not an error, just a structured way to exit from clients.
1678 def SendJob(ops, cl=None):
1679 """Function to submit an opcode without waiting for the results.
1682 @param ops: list of opcodes
1683 @type cl: luxi.Client
1684 @param cl: the luxi client to use for communicating with the master;
1685 if None, a new client will be created
1691 job_id = cl.SubmitJob(ops)
1696 def GenericPollJob(job_id, cbs, report_cbs):
1697 """Generic job-polling function.
1699 @type job_id: number
1700 @param job_id: Job ID
1701 @type cbs: Instance of L{JobPollCbBase}
1702 @param cbs: Data callbacks
1703 @type report_cbs: Instance of L{JobPollReportCbBase}
1704 @param report_cbs: Reporting callbacks
1707 prev_job_info = None
1708 prev_logmsg_serial = None
1713 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1716 # job not found, go away!
1717 raise errors.JobLost("Job with id %s lost" % job_id)
1719 if result == constants.JOB_NOTCHANGED:
1720 report_cbs.ReportNotChanged(job_id, status)
1725 # Split result, a tuple of (field values, log entries)
1726 (job_info, log_entries) = result
1727 (status, ) = job_info
1730 for log_entry in log_entries:
1731 (serial, timestamp, log_type, message) = log_entry
1732 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1734 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1736 # TODO: Handle canceled and archived jobs
1737 elif status in (constants.JOB_STATUS_SUCCESS,
1738 constants.JOB_STATUS_ERROR,
1739 constants.JOB_STATUS_CANCELING,
1740 constants.JOB_STATUS_CANCELED):
1743 prev_job_info = job_info
1745 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1747 raise errors.JobLost("Job with id %s lost" % job_id)
1749 status, opstatus, result = jobs[0]
1751 if status == constants.JOB_STATUS_SUCCESS:
1754 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1755 raise errors.OpExecError("Job was canceled")
1758 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1759 if status == constants.OP_STATUS_SUCCESS:
1761 elif status == constants.OP_STATUS_ERROR:
1762 errors.MaybeRaise(msg)
1765 raise errors.OpExecError("partial failure (opcode %d): %s" %
1768 raise errors.OpExecError(str(msg))
1770 # default failure mode
1771 raise errors.OpExecError(result)
1774 class JobPollCbBase:
1775 """Base class for L{GenericPollJob} callbacks.
1779 """Initializes this class.
1783 def WaitForJobChangeOnce(self, job_id, fields,
1784 prev_job_info, prev_log_serial):
1785 """Waits for changes on a job.
1788 raise NotImplementedError()
1790 def QueryJobs(self, job_ids, fields):
1791 """Returns the selected fields for the selected job IDs.
1793 @type job_ids: list of numbers
1794 @param job_ids: Job IDs
1795 @type fields: list of strings
1796 @param fields: Fields
1799 raise NotImplementedError()
1802 class JobPollReportCbBase:
1803 """Base class for L{GenericPollJob} reporting callbacks.
1807 """Initializes this class.
1811 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1812 """Handles a log message.
1815 raise NotImplementedError()
1817 def ReportNotChanged(self, job_id, status):
1818 """Called for if a job hasn't changed in a while.
1820 @type job_id: number
1821 @param job_id: Job ID
1822 @type status: string or None
1823 @param status: Job status if available
1826 raise NotImplementedError()
1829 class _LuxiJobPollCb(JobPollCbBase):
1830 def __init__(self, cl):
1831 """Initializes this class.
1834 JobPollCbBase.__init__(self)
1837 def WaitForJobChangeOnce(self, job_id, fields,
1838 prev_job_info, prev_log_serial):
1839 """Waits for changes on a job.
1842 return self.cl.WaitForJobChangeOnce(job_id, fields,
1843 prev_job_info, prev_log_serial)
1845 def QueryJobs(self, job_ids, fields):
1846 """Returns the selected fields for the selected job IDs.
1849 return self.cl.QueryJobs(job_ids, fields)
1852 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1853 def __init__(self, feedback_fn):
1854 """Initializes this class.
1857 JobPollReportCbBase.__init__(self)
1859 self.feedback_fn = feedback_fn
1861 assert callable(feedback_fn)
1863 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1864 """Handles a log message.
1867 self.feedback_fn((timestamp, log_type, log_msg))
1869 def ReportNotChanged(self, job_id, status):
1870 """Called if a job hasn't changed in a while.
1876 class StdioJobPollReportCb(JobPollReportCbBase):
1878 """Initializes this class.
1881 JobPollReportCbBase.__init__(self)
1883 self.notified_queued = False
1884 self.notified_waitlock = False
1886 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1887 """Handles a log message.
1890 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1891 FormatLogMessage(log_type, log_msg))
1893 def ReportNotChanged(self, job_id, status):
1894 """Called if a job hasn't changed in a while.
1900 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1901 ToStderr("Job %s is waiting in queue", job_id)
1902 self.notified_queued = True
1904 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1905 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1906 self.notified_waitlock = True
1909 def FormatLogMessage(log_type, log_msg):
1910 """Formats a job message according to its type.
1913 if log_type != constants.ELOG_MESSAGE:
1914 log_msg = str(log_msg)
1916 return utils.SafeEncode(log_msg)
1919 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1920 """Function to poll for the result of a job.
1922 @type job_id: job identified
1923 @param job_id: the job to poll for results
1924 @type cl: luxi.Client
1925 @param cl: the luxi client to use for communicating with the master;
1926 if None, a new client will be created
1932 if reporter is None:
1934 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1936 reporter = StdioJobPollReportCb()
1938 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1940 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1943 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1944 """Legacy function to submit an opcode.
1946 This is just a simple wrapper over the construction of the processor
1947 instance. It should be extended to better handle feedback and
1948 interaction functions.
1954 SetGenericOpcodeOpts([op], opts)
1956 job_id = SendJob([op], cl=cl)
1958 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1961 return op_results[0]
1964 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1965 """Wrapper around SubmitOpCode or SendJob.
1967 This function will decide, based on the 'opts' parameter, whether to
1968 submit and wait for the result of the opcode (and return it), or
1969 whether to just send the job and print its identifier. It is used in
1970 order to simplify the implementation of the '--submit' option.
1972 It will also process the opcodes if we're sending the via SendJob
1973 (otherwise SubmitOpCode does it).
1976 if opts and opts.submit_only:
1978 SetGenericOpcodeOpts(job, opts)
1979 job_id = SendJob(job, cl=cl)
1980 raise JobSubmittedException(job_id)
1982 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1985 def SetGenericOpcodeOpts(opcode_list, options):
1986 """Processor for generic options.
1988 This function updates the given opcodes based on generic command
1989 line options (like debug, dry-run, etc.).
1991 @param opcode_list: list of opcodes
1992 @param options: command line options or None
1993 @return: None (in-place modification)
1998 for op in opcode_list:
1999 op.debug_level = options.debug
2000 if hasattr(options, "dry_run"):
2001 op.dry_run = options.dry_run
2002 if getattr(options, "priority", None) is not None:
2003 op.priority = _PRIONAME_TO_VALUE[options.priority]
2007 # TODO: Cache object?
2009 client = luxi.Client()
2010 except luxi.NoMasterError:
2011 ss = ssconf.SimpleStore()
2013 # Try to read ssconf file
2016 except errors.ConfigurationError:
2017 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2018 " not part of a cluster")
2020 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2021 if master != myself:
2022 raise errors.OpPrereqError("This is not the master node, please connect"
2023 " to node '%s' and rerun the command" %
2029 def FormatError(err):
2030 """Return a formatted error message for a given error.
2032 This function takes an exception instance and returns a tuple
2033 consisting of two values: first, the recommended exit code, and
2034 second, a string describing the error message (not
2035 newline-terminated).
2041 if isinstance(err, errors.ConfigurationError):
2042 txt = "Corrupt configuration file: %s" % msg
2044 obuf.write(txt + "\n")
2045 obuf.write("Aborting.")
2047 elif isinstance(err, errors.HooksAbort):
2048 obuf.write("Failure: hooks execution failed:\n")
2049 for node, script, out in err.args[0]:
2051 obuf.write(" node: %s, script: %s, output: %s\n" %
2052 (node, script, out))
2054 obuf.write(" node: %s, script: %s (no output)\n" %
2056 elif isinstance(err, errors.HooksFailure):
2057 obuf.write("Failure: hooks general failure: %s" % msg)
2058 elif isinstance(err, errors.ResolverError):
2059 this_host = netutils.Hostname.GetSysName()
2060 if err.args[0] == this_host:
2061 msg = "Failure: can't resolve my own hostname ('%s')"
2063 msg = "Failure: can't resolve hostname '%s'"
2064 obuf.write(msg % err.args[0])
2065 elif isinstance(err, errors.OpPrereqError):
2066 if len(err.args) == 2:
2067 obuf.write("Failure: prerequisites not met for this"
2068 " operation:\nerror type: %s, error details:\n%s" %
2069 (err.args[1], err.args[0]))
2071 obuf.write("Failure: prerequisites not met for this"
2072 " operation:\n%s" % msg)
2073 elif isinstance(err, errors.OpExecError):
2074 obuf.write("Failure: command execution error:\n%s" % msg)
2075 elif isinstance(err, errors.TagError):
2076 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2077 elif isinstance(err, errors.JobQueueDrainError):
2078 obuf.write("Failure: the job queue is marked for drain and doesn't"
2079 " accept new requests\n")
2080 elif isinstance(err, errors.JobQueueFull):
2081 obuf.write("Failure: the job queue is full and doesn't accept new"
2082 " job submissions until old jobs are archived\n")
2083 elif isinstance(err, errors.TypeEnforcementError):
2084 obuf.write("Parameter Error: %s" % msg)
2085 elif isinstance(err, errors.ParameterError):
2086 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2087 elif isinstance(err, luxi.NoMasterError):
2088 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2089 " and listening for connections?")
2090 elif isinstance(err, luxi.TimeoutError):
2091 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2092 " been submitted and will continue to run even if the call"
2093 " timed out. Useful commands in this situation are \"gnt-job"
2094 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2096 elif isinstance(err, luxi.PermissionError):
2097 obuf.write("It seems you don't have permissions to connect to the"
2098 " master daemon.\nPlease retry as a different user.")
2099 elif isinstance(err, luxi.ProtocolError):
2100 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2102 elif isinstance(err, errors.JobLost):
2103 obuf.write("Error checking job status: %s" % msg)
2104 elif isinstance(err, errors.QueryFilterParseError):
2105 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2106 obuf.write("\n".join(err.GetDetails()))
2107 elif isinstance(err, errors.GenericError):
2108 obuf.write("Unhandled Ganeti error: %s" % msg)
2109 elif isinstance(err, JobSubmittedException):
2110 obuf.write("JobID: %s\n" % err.args[0])
2113 obuf.write("Unhandled exception: %s" % msg)
2114 return retcode, obuf.getvalue().rstrip("\n")
2117 def GenericMain(commands, override=None, aliases=None,
2118 env_override=frozenset()):
2119 """Generic main function for all the gnt-* commands.
2121 @param commands: a dictionary with a special structure, see the design doc
2122 for command line handling.
2123 @param override: if not None, we expect a dictionary with keys that will
2124 override command line options; this can be used to pass
2125 options from the scripts to generic functions
2126 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2127 @param env_override: list of environment names which are allowed to submit
2128 default args for commands
2131 # save the program name and the entire command line for later logging
2133 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2134 if len(sys.argv) >= 2:
2135 binary += " " + sys.argv[1]
2136 old_cmdline = " ".join(sys.argv[2:])
2140 binary = "<unknown program>"
2147 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2148 except errors.ParameterError, err:
2149 result, err_msg = FormatError(err)
2153 if func is None: # parse error
2156 if override is not None:
2157 for key, val in override.iteritems():
2158 setattr(options, key, val)
2160 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2161 stderr_logging=True)
2164 logging.info("run with arguments '%s'", old_cmdline)
2166 logging.info("run with no arguments")
2169 result = func(options, args)
2170 except (errors.GenericError, luxi.ProtocolError,
2171 JobSubmittedException), err:
2172 result, err_msg = FormatError(err)
2173 logging.exception("Error during command processing")
2175 except KeyboardInterrupt:
2176 result = constants.EXIT_FAILURE
2177 ToStderr("Aborted. Note that if the operation created any jobs, they"
2178 " might have been submitted and"
2179 " will continue to run in the background.")
2180 except IOError, err:
2181 if err.errno == errno.EPIPE:
2182 # our terminal went away, we'll exit
2183 sys.exit(constants.EXIT_FAILURE)
2190 def ParseNicOption(optvalue):
2191 """Parses the value of the --net option(s).
2195 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2196 except (TypeError, ValueError), err:
2197 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2199 nics = [{}] * nic_max
2200 for nidx, ndict in optvalue:
2203 if not isinstance(ndict, dict):
2204 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2205 " got %s" % (nidx, ndict))
2207 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2214 def GenericInstanceCreate(mode, opts, args):
2215 """Add an instance to the cluster via either creation or import.
2217 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2218 @param opts: the command line options selected by the user
2220 @param args: should contain only one element, the new instance name
2222 @return: the desired exit code
2227 (pnode, snode) = SplitNodeOption(opts.node)
2232 hypervisor, hvparams = opts.hypervisor
2235 nics = ParseNicOption(opts.nics)
2239 elif mode == constants.INSTANCE_CREATE:
2240 # default of one nic, all auto
2246 if opts.disk_template == constants.DT_DISKLESS:
2247 if opts.disks or opts.sd_size is not None:
2248 raise errors.OpPrereqError("Diskless instance but disk"
2249 " information passed")
2252 if (not opts.disks and not opts.sd_size
2253 and mode == constants.INSTANCE_CREATE):
2254 raise errors.OpPrereqError("No disk information specified")
2255 if opts.disks and opts.sd_size is not None:
2256 raise errors.OpPrereqError("Please use either the '--disk' or"
2258 if opts.sd_size is not None:
2259 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2263 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2264 except ValueError, err:
2265 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2266 disks = [{}] * disk_max
2269 for didx, ddict in opts.disks:
2271 if not isinstance(ddict, dict):
2272 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2273 raise errors.OpPrereqError(msg)
2274 elif constants.IDISK_SIZE in ddict:
2275 if constants.IDISK_ADOPT in ddict:
2276 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2277 " (disk %d)" % didx)
2279 ddict[constants.IDISK_SIZE] = \
2280 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2281 except ValueError, err:
2282 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2284 elif constants.IDISK_ADOPT in ddict:
2285 if mode == constants.INSTANCE_IMPORT:
2286 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2288 ddict[constants.IDISK_SIZE] = 0
2290 raise errors.OpPrereqError("Missing size or adoption source for"
2294 if opts.tags is not None:
2295 tags = opts.tags.split(",")
2299 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2300 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2302 if mode == constants.INSTANCE_CREATE:
2305 force_variant = opts.force_variant
2308 no_install = opts.no_install
2309 identify_defaults = False
2310 elif mode == constants.INSTANCE_IMPORT:
2313 force_variant = False
2314 src_node = opts.src_node
2315 src_path = opts.src_dir
2317 identify_defaults = opts.identify_defaults
2319 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2321 op = opcodes.OpInstanceCreate(instance_name=instance,
2323 disk_template=opts.disk_template,
2325 pnode=pnode, snode=snode,
2326 ip_check=opts.ip_check,
2327 name_check=opts.name_check,
2328 wait_for_sync=opts.wait_for_sync,
2329 file_storage_dir=opts.file_storage_dir,
2330 file_driver=opts.file_driver,
2331 iallocator=opts.iallocator,
2332 hypervisor=hypervisor,
2334 beparams=opts.beparams,
2335 osparams=opts.osparams,
2339 force_variant=force_variant,
2343 no_install=no_install,
2344 identify_defaults=identify_defaults,
2345 ignore_ipolicy=opts.ignore_ipolicy)
2347 SubmitOrSend(op, opts)
2351 class _RunWhileClusterStoppedHelper:
2352 """Helper class for L{RunWhileClusterStopped} to simplify state management
2355 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2356 """Initializes this class.
2358 @type feedback_fn: callable
2359 @param feedback_fn: Feedback function
2360 @type cluster_name: string
2361 @param cluster_name: Cluster name
2362 @type master_node: string
2363 @param master_node Master node name
2364 @type online_nodes: list
2365 @param online_nodes: List of names of online nodes
2368 self.feedback_fn = feedback_fn
2369 self.cluster_name = cluster_name
2370 self.master_node = master_node
2371 self.online_nodes = online_nodes
2373 self.ssh = ssh.SshRunner(self.cluster_name)
2375 self.nonmaster_nodes = [name for name in online_nodes
2376 if name != master_node]
2378 assert self.master_node not in self.nonmaster_nodes
2380 def _RunCmd(self, node_name, cmd):
2381 """Runs a command on the local or a remote machine.
2383 @type node_name: string
2384 @param node_name: Machine name
2389 if node_name is None or node_name == self.master_node:
2390 # No need to use SSH
2391 result = utils.RunCmd(cmd)
2393 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2396 errmsg = ["Failed to run command %s" % result.cmd]
2398 errmsg.append("on node %s" % node_name)
2399 errmsg.append(": exitcode %s and error %s" %
2400 (result.exit_code, result.output))
2401 raise errors.OpExecError(" ".join(errmsg))
2403 def Call(self, fn, *args):
2404 """Call function while all daemons are stopped.
2407 @param fn: Function to be called
2410 # Pause watcher by acquiring an exclusive lock on watcher state file
2411 self.feedback_fn("Blocking watcher")
2412 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2414 # TODO: Currently, this just blocks. There's no timeout.
2415 # TODO: Should it be a shared lock?
2416 watcher_block.Exclusive(blocking=True)
2418 # Stop master daemons, so that no new jobs can come in and all running
2420 self.feedback_fn("Stopping master daemons")
2421 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2423 # Stop daemons on all nodes
2424 for node_name in self.online_nodes:
2425 self.feedback_fn("Stopping daemons on %s" % node_name)
2426 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2428 # All daemons are shut down now
2430 return fn(self, *args)
2431 except Exception, err:
2432 _, errmsg = FormatError(err)
2433 logging.exception("Caught exception")
2434 self.feedback_fn(errmsg)
2437 # Start cluster again, master node last
2438 for node_name in self.nonmaster_nodes + [self.master_node]:
2439 self.feedback_fn("Starting daemons on %s" % node_name)
2440 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2443 watcher_block.Close()
2446 def RunWhileClusterStopped(feedback_fn, fn, *args):
2447 """Calls a function while all cluster daemons are stopped.
2449 @type feedback_fn: callable
2450 @param feedback_fn: Feedback function
2452 @param fn: Function to be called when daemons are stopped
2455 feedback_fn("Gathering cluster information")
2457 # This ensures we're running on the master daemon
2460 (cluster_name, master_node) = \
2461 cl.QueryConfigValues(["cluster_name", "master_node"])
2463 online_nodes = GetOnlineNodes([], cl=cl)
2465 # Don't keep a reference to the client. The master daemon will go away.
2468 assert master_node in online_nodes
2470 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2471 online_nodes).Call(fn, *args)
2474 def GenerateTable(headers, fields, separator, data,
2475 numfields=None, unitfields=None,
2477 """Prints a table with headers and different fields.
2480 @param headers: dictionary mapping field names to headers for
2483 @param fields: the field names corresponding to each row in
2485 @param separator: the separator to be used; if this is None,
2486 the default 'smart' algorithm is used which computes optimal
2487 field width, otherwise just the separator is used between
2490 @param data: a list of lists, each sublist being one row to be output
2491 @type numfields: list
2492 @param numfields: a list with the fields that hold numeric
2493 values and thus should be right-aligned
2494 @type unitfields: list
2495 @param unitfields: a list with the fields that hold numeric
2496 values that should be formatted with the units field
2497 @type units: string or None
2498 @param units: the units we should use for formatting, or None for
2499 automatic choice (human-readable for non-separator usage, otherwise
2500 megabytes); this is a one-letter string
2509 if numfields is None:
2511 if unitfields is None:
2514 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2515 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2518 for field in fields:
2519 if headers and field not in headers:
2520 # TODO: handle better unknown fields (either revert to old
2521 # style of raising exception, or deal more intelligently with
2523 headers[field] = field
2524 if separator is not None:
2525 format_fields.append("%s")
2526 elif numfields.Matches(field):
2527 format_fields.append("%*s")
2529 format_fields.append("%-*s")
2531 if separator is None:
2532 mlens = [0 for name in fields]
2533 format_str = " ".join(format_fields)
2535 format_str = separator.replace("%", "%%").join(format_fields)
2540 for idx, val in enumerate(row):
2541 if unitfields.Matches(fields[idx]):
2544 except (TypeError, ValueError):
2547 val = row[idx] = utils.FormatUnit(val, units)
2548 val = row[idx] = str(val)
2549 if separator is None:
2550 mlens[idx] = max(mlens[idx], len(val))
2555 for idx, name in enumerate(fields):
2557 if separator is None:
2558 mlens[idx] = max(mlens[idx], len(hdr))
2559 args.append(mlens[idx])
2561 result.append(format_str % tuple(args))
2563 if separator is None:
2564 assert len(mlens) == len(fields)
2566 if fields and not numfields.Matches(fields[-1]):
2572 line = ["-" for _ in fields]
2573 for idx in range(len(fields)):
2574 if separator is None:
2575 args.append(mlens[idx])
2576 args.append(line[idx])
2577 result.append(format_str % tuple(args))
2582 def _FormatBool(value):
2583 """Formats a boolean value as a string.
2591 #: Default formatting for query results; (callback, align right)
2592 _DEFAULT_FORMAT_QUERY = {
2593 constants.QFT_TEXT: (str, False),
2594 constants.QFT_BOOL: (_FormatBool, False),
2595 constants.QFT_NUMBER: (str, True),
2596 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2597 constants.QFT_OTHER: (str, False),
2598 constants.QFT_UNKNOWN: (str, False),
2602 def _GetColumnFormatter(fdef, override, unit):
2603 """Returns formatting function for a field.
2605 @type fdef: L{objects.QueryFieldDefinition}
2606 @type override: dict
2607 @param override: Dictionary for overriding field formatting functions,
2608 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2610 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2611 @rtype: tuple; (callable, bool)
2612 @return: Returns the function to format a value (takes one parameter) and a
2613 boolean for aligning the value on the right-hand side
2616 fmt = override.get(fdef.name, None)
2620 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2622 if fdef.kind == constants.QFT_UNIT:
2623 # Can't keep this information in the static dictionary
2624 return (lambda value: utils.FormatUnit(value, unit), True)
2626 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2630 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2633 class _QueryColumnFormatter:
2634 """Callable class for formatting fields of a query.
2637 def __init__(self, fn, status_fn, verbose):
2638 """Initializes this class.
2641 @param fn: Formatting function
2642 @type status_fn: callable
2643 @param status_fn: Function to report fields' status
2644 @type verbose: boolean
2645 @param verbose: whether to use verbose field descriptions or not
2649 self._status_fn = status_fn
2650 self._verbose = verbose
2652 def __call__(self, data):
2653 """Returns a field's string representation.
2656 (status, value) = data
2659 self._status_fn(status)
2661 if status == constants.RS_NORMAL:
2662 return self._fn(value)
2664 assert value is None, \
2665 "Found value %r for abnormal status %s" % (value, status)
2667 return FormatResultError(status, self._verbose)
2670 def FormatResultError(status, verbose):
2671 """Formats result status other than L{constants.RS_NORMAL}.
2673 @param status: The result status
2674 @type verbose: boolean
2675 @param verbose: Whether to return the verbose text
2676 @return: Text of result status
2679 assert status != constants.RS_NORMAL, \
2680 "FormatResultError called with status equal to constants.RS_NORMAL"
2682 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2684 raise NotImplementedError("Unknown status %s" % status)
2691 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2692 header=False, verbose=False):
2693 """Formats data in L{objects.QueryResponse}.
2695 @type result: L{objects.QueryResponse}
2696 @param result: result of query operation
2698 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2699 see L{utils.text.FormatUnit}
2700 @type format_override: dict
2701 @param format_override: Dictionary for overriding field formatting functions,
2702 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2703 @type separator: string or None
2704 @param separator: String used to separate fields
2706 @param header: Whether to output header row
2707 @type verbose: boolean
2708 @param verbose: whether to use verbose field descriptions or not
2717 if format_override is None:
2718 format_override = {}
2720 stats = dict.fromkeys(constants.RS_ALL, 0)
2722 def _RecordStatus(status):
2727 for fdef in result.fields:
2728 assert fdef.title and fdef.name
2729 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2730 columns.append(TableColumn(fdef.title,
2731 _QueryColumnFormatter(fn, _RecordStatus,
2735 table = FormatTable(result.data, columns, header, separator)
2737 # Collect statistics
2738 assert len(stats) == len(constants.RS_ALL)
2739 assert compat.all(count >= 0 for count in stats.values())
2741 # Determine overall status. If there was no data, unknown fields must be
2742 # detected via the field definitions.
2743 if (stats[constants.RS_UNKNOWN] or
2744 (not result.data and _GetUnknownFields(result.fields))):
2746 elif compat.any(count > 0 for key, count in stats.items()
2747 if key != constants.RS_NORMAL):
2748 status = QR_INCOMPLETE
2752 return (status, table)
2755 def _GetUnknownFields(fdefs):
2756 """Returns list of unknown fields included in C{fdefs}.
2758 @type fdefs: list of L{objects.QueryFieldDefinition}
2761 return [fdef for fdef in fdefs
2762 if fdef.kind == constants.QFT_UNKNOWN]
2765 def _WarnUnknownFields(fdefs):
2766 """Prints a warning to stderr if a query included unknown fields.
2768 @type fdefs: list of L{objects.QueryFieldDefinition}
2771 unknown = _GetUnknownFields(fdefs)
2773 ToStderr("Warning: Queried for unknown fields %s",
2774 utils.CommaJoin(fdef.name for fdef in unknown))
2780 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2781 format_override=None, verbose=False, force_filter=False):
2782 """Generic implementation for listing all items of a resource.
2784 @param resource: One of L{constants.QR_VIA_LUXI}
2785 @type fields: list of strings
2786 @param fields: List of fields to query for
2787 @type names: list of strings
2788 @param names: Names of items to query for
2789 @type unit: string or None
2790 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2791 None for automatic choice (human-readable for non-separator usage,
2792 otherwise megabytes); this is a one-letter string
2793 @type separator: string or None
2794 @param separator: String used to separate fields
2796 @param header: Whether to show header row
2797 @type force_filter: bool
2798 @param force_filter: Whether to always treat names as filter
2799 @type format_override: dict
2800 @param format_override: Dictionary for overriding field formatting functions,
2801 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2802 @type verbose: boolean
2803 @param verbose: whether to use verbose field descriptions or not
2809 qfilter = qlang.MakeFilter(names, force_filter)
2814 response = cl.Query(resource, fields, qfilter)
2816 found_unknown = _WarnUnknownFields(response.fields)
2818 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2820 format_override=format_override,
2826 assert ((found_unknown and status == QR_UNKNOWN) or
2827 (not found_unknown and status != QR_UNKNOWN))
2829 if status == QR_UNKNOWN:
2830 return constants.EXIT_UNKNOWN_FIELD
2832 # TODO: Should the list command fail if not all data could be collected?
2833 return constants.EXIT_SUCCESS
2836 def GenericListFields(resource, fields, separator, header, cl=None):
2837 """Generic implementation for listing fields for a resource.
2839 @param resource: One of L{constants.QR_VIA_LUXI}
2840 @type fields: list of strings
2841 @param fields: List of fields to query for
2842 @type separator: string or None
2843 @param separator: String used to separate fields
2845 @param header: Whether to show header row
2854 response = cl.QueryFields(resource, fields)
2856 found_unknown = _WarnUnknownFields(response.fields)
2859 TableColumn("Name", str, False),
2860 TableColumn("Title", str, False),
2861 TableColumn("Description", str, False),
2864 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2866 for line in FormatTable(rows, columns, header, separator):
2870 return constants.EXIT_UNKNOWN_FIELD
2872 return constants.EXIT_SUCCESS
2876 """Describes a column for L{FormatTable}.
2879 def __init__(self, title, fn, align_right):
2880 """Initializes this class.
2883 @param title: Column title
2885 @param fn: Formatting function
2886 @type align_right: bool
2887 @param align_right: Whether to align values on the right-hand side
2892 self.align_right = align_right
2895 def _GetColFormatString(width, align_right):
2896 """Returns the format string for a field.
2904 return "%%%s%ss" % (sign, width)
2907 def FormatTable(rows, columns, header, separator):
2908 """Formats data as a table.
2910 @type rows: list of lists
2911 @param rows: Row data, one list per row
2912 @type columns: list of L{TableColumn}
2913 @param columns: Column descriptions
2915 @param header: Whether to show header row
2916 @type separator: string or None
2917 @param separator: String used to separate columns
2921 data = [[col.title for col in columns]]
2922 colwidth = [len(col.title) for col in columns]
2925 colwidth = [0 for _ in columns]
2929 assert len(row) == len(columns)
2931 formatted = [col.format(value) for value, col in zip(row, columns)]
2933 if separator is None:
2934 # Update column widths
2935 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2936 # Modifying a list's items while iterating is fine
2937 colwidth[idx] = max(oldwidth, len(value))
2939 data.append(formatted)
2941 if separator is not None:
2942 # Return early if a separator is used
2943 return [separator.join(row) for row in data]
2945 if columns and not columns[-1].align_right:
2946 # Avoid unnecessary spaces at end of line
2949 # Build format string
2950 fmt = " ".join([_GetColFormatString(width, col.align_right)
2951 for col, width in zip(columns, colwidth)])
2953 return [fmt % tuple(row) for row in data]
2956 def FormatTimestamp(ts):
2957 """Formats a given timestamp.
2960 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2963 @return: a string with the formatted timestamp
2966 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2969 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2972 def ParseTimespec(value):
2973 """Parse a time specification.
2975 The following suffixed will be recognized:
2983 Without any suffix, the value will be taken to be in seconds.
2988 raise errors.OpPrereqError("Empty time specification passed")
2996 if value[-1] not in suffix_map:
2999 except (TypeError, ValueError):
3000 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3002 multiplier = suffix_map[value[-1]]
3004 if not value: # no data left after stripping the suffix
3005 raise errors.OpPrereqError("Invalid time specification (only"
3008 value = int(value) * multiplier
3009 except (TypeError, ValueError):
3010 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3014 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3015 filter_master=False, nodegroup=None):
3016 """Returns the names of online nodes.
3018 This function will also log a warning on stderr with the names of
3021 @param nodes: if not empty, use only this subset of nodes (minus the
3023 @param cl: if not None, luxi client to use
3024 @type nowarn: boolean
3025 @param nowarn: by default, this function will output a note with the
3026 offline nodes that are skipped; if this parameter is True the
3027 note is not displayed
3028 @type secondary_ips: boolean
3029 @param secondary_ips: if True, return the secondary IPs instead of the
3030 names, useful for doing network traffic over the replication interface
3032 @type filter_master: boolean
3033 @param filter_master: if True, do not return the master node in the list
3034 (useful in coordination with secondary_ips where we cannot check our
3035 node name against the list)
3036 @type nodegroup: string
3037 @param nodegroup: If set, only return nodes in this node group
3046 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3048 if nodegroup is not None:
3049 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3050 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3053 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3056 if len(qfilter) > 1:
3057 final_filter = [qlang.OP_AND] + qfilter
3059 assert len(qfilter) == 1
3060 final_filter = qfilter[0]
3064 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3066 def _IsOffline(row):
3067 (_, (_, offline), _) = row
3071 ((_, name), _, _) = row
3075 (_, _, (_, sip)) = row
3078 (offline, online) = compat.partition(result.data, _IsOffline)
3080 if offline and not nowarn:
3081 ToStderr("Note: skipping offline node(s): %s" %
3082 utils.CommaJoin(map(_GetName, offline)))
3089 return map(fn, online)
3092 def _ToStream(stream, txt, *args):
3093 """Write a message to a stream, bypassing the logging system
3095 @type stream: file object
3096 @param stream: the file to which we should write
3098 @param txt: the message
3104 stream.write(txt % args)
3109 except IOError, err:
3110 if err.errno == errno.EPIPE:
3111 # our terminal went away, we'll exit
3112 sys.exit(constants.EXIT_FAILURE)
3117 def ToStdout(txt, *args):
3118 """Write a message to stdout only, bypassing the logging system
3120 This is just a wrapper over _ToStream.
3123 @param txt: the message
3126 _ToStream(sys.stdout, txt, *args)
3129 def ToStderr(txt, *args):
3130 """Write a message to stderr only, bypassing the logging system
3132 This is just a wrapper over _ToStream.
3135 @param txt: the message
3138 _ToStream(sys.stderr, txt, *args)
3141 class JobExecutor(object):
3142 """Class which manages the submission and execution of multiple jobs.
3144 Note that instances of this class should not be reused between
3148 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3153 self.verbose = verbose
3156 self.feedback_fn = feedback_fn
3157 self._counter = itertools.count()
3160 def _IfName(name, fmt):
3161 """Helper function for formatting name.
3169 def QueueJob(self, name, *ops):
3170 """Record a job for later submit.
3173 @param name: a description of the job, will be used in WaitJobSet
3176 SetGenericOpcodeOpts(ops, self.opts)
3177 self.queue.append((self._counter.next(), name, ops))
3179 def AddJobId(self, name, status, job_id):
3180 """Adds a job ID to the internal queue.
3183 self.jobs.append((self._counter.next(), status, job_id, name))
3185 def SubmitPending(self, each=False):
3186 """Submit all pending jobs.
3191 for (_, _, ops) in self.queue:
3192 # SubmitJob will remove the success status, but raise an exception if
3193 # the submission fails, so we'll notice that anyway.
3194 results.append([True, self.cl.SubmitJob(ops)[0]])
3196 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3197 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3198 self.jobs.append((idx, status, data, name))
3200 def _ChooseJob(self):
3201 """Choose a non-waiting/queued job to poll next.
3204 assert self.jobs, "_ChooseJob called with empty job list"
3206 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3210 for job_data, status in zip(self.jobs, result):
3211 if (isinstance(status, list) and status and
3212 status[0] in (constants.JOB_STATUS_QUEUED,
3213 constants.JOB_STATUS_WAITING,
3214 constants.JOB_STATUS_CANCELING)):
3215 # job is still present and waiting
3217 # good candidate found (either running job or lost job)
3218 self.jobs.remove(job_data)
3222 return self.jobs.pop(0)
3224 def GetResults(self):
3225 """Wait for and return the results of all jobs.
3228 @return: list of tuples (success, job results), in the same order
3229 as the submitted jobs; if a job has failed, instead of the result
3230 there will be the error message
3234 self.SubmitPending()
3237 ok_jobs = [row[2] for row in self.jobs if row[1]]
3239 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3241 # first, remove any non-submitted jobs
3242 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3243 for idx, _, jid, name in failures:
3244 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3245 results.append((idx, False, jid))
3248 (idx, _, jid, name) = self._ChooseJob()
3249 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3251 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3253 except errors.JobLost, err:
3254 _, job_result = FormatError(err)
3255 ToStderr("Job %s%s has been archived, cannot check its result",
3256 jid, self._IfName(name, " for %s"))
3258 except (errors.GenericError, luxi.ProtocolError), err:
3259 _, job_result = FormatError(err)
3261 # the error message will always be shown, verbose or not
3262 ToStderr("Job %s%s has failed: %s",
3263 jid, self._IfName(name, " for %s"), job_result)
3265 results.append((idx, success, job_result))
3267 # sort based on the index, then drop it
3269 results = [i[1:] for i in results]
3273 def WaitOrShow(self, wait):
3274 """Wait for job results or only print the job IDs.
3277 @param wait: whether to wait or not
3281 return self.GetResults()
3284 self.SubmitPending()
3285 for _, status, result, name in self.jobs:
3287 ToStdout("%s: %s", result, name)
3289 ToStderr("Failure for %s: %s", name, result)
3290 return [row[1:3] for row in self.jobs]
3293 def FormatParameterDict(buf, param_dict, actual, level=1):
3294 """Formats a parameter dictionary.
3296 @type buf: L{StringIO}
3297 @param buf: the buffer into which to write
3298 @type param_dict: dict
3299 @param param_dict: the own parameters
3301 @param actual: the current parameter set (including defaults)
3302 @param level: Level of indent
3305 indent = " " * level
3306 for key in sorted(actual):
3307 val = param_dict.get(key, "default (%s)" % actual[key])
3308 buf.write("%s- %s: %s\n" % (indent, key, val))
3311 def ConfirmOperation(names, list_type, text, extra=""):
3312 """Ask the user to confirm an operation on a list of list_type.
3314 This function is used to request confirmation for doing an operation
3315 on a given list of list_type.
3318 @param names: the list of names that we display when
3319 we ask for confirmation
3320 @type list_type: str
3321 @param list_type: Human readable name for elements in the list (e.g. nodes)
3323 @param text: the operation that the user should confirm
3325 @return: True or False depending on user's confirmation.
3329 msg = ("The %s will operate on %d %s.\n%s"
3330 "Do you want to continue?" % (text, count, list_type, extra))
3331 affected = (("\nAffected %s:\n" % list_type) +
3332 "\n".join([" %s" % name for name in names]))
3334 choices = [("y", True, "Yes, execute the %s" % text),
3335 ("n", False, "No, abort the %s" % text)]
3338 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3341 question = msg + affected
3343 choice = AskUser(question, choices)
3346 choice = AskUser(msg + affected, choices)