4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
137 "NOSSH_KEYCHECK_OPT",
151 "PREALLOC_WIPE_DISKS_OPT",
152 "PRIMARY_IP_VERSION_OPT",
158 "REMOVE_INSTANCE_OPT",
163 "SECONDARY_ONLY_OPT",
167 "SHUTDOWN_TIMEOUT_OPT",
169 "SPECS_CPU_COUNT_OPT",
170 "SPECS_DISK_COUNT_OPT",
171 "SPECS_DISK_SIZE_OPT",
172 "SPECS_MEM_SIZE_OPT",
173 "SPECS_NIC_COUNT_OPT",
179 "STARTUP_PAUSED_OPT",
188 "USE_EXTERNAL_MIP_SCRIPT",
195 # Generic functions for CLI programs
198 "GenericInstanceCreate",
204 "JobSubmittedException",
206 "RunWhileClusterStopped",
210 # Formatting functions
211 "ToStderr", "ToStdout",
214 "FormatParameterDict",
223 # command line options support infrastructure
224 "ARGS_MANY_INSTANCES",
243 "OPT_COMPL_INST_ADD_NODES",
244 "OPT_COMPL_MANY_NODES",
245 "OPT_COMPL_ONE_IALLOCATOR",
246 "OPT_COMPL_ONE_INSTANCE",
247 "OPT_COMPL_ONE_NODE",
248 "OPT_COMPL_ONE_NODEGROUP",
254 "COMMON_CREATE_OPTS",
260 #: Priorities (sorted)
262 ("low", constants.OP_PRIO_LOW),
263 ("normal", constants.OP_PRIO_NORMAL),
264 ("high", constants.OP_PRIO_HIGH),
267 #: Priority dictionary for easier lookup
268 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
269 # we migrate to Python 2.6
270 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
272 # Query result status for clients
275 QR_INCOMPLETE) = range(3)
277 #: Maximum batch size for ChooseJob
282 def __init__(self, min=0, max=None): # pylint: disable=W0622
287 return ("<%s min=%s max=%s>" %
288 (self.__class__.__name__, self.min, self.max))
291 class ArgSuggest(_Argument):
292 """Suggesting argument.
294 Value can be any of the ones passed to the constructor.
297 # pylint: disable=W0622
298 def __init__(self, min=0, max=None, choices=None):
299 _Argument.__init__(self, min=min, max=max)
300 self.choices = choices
303 return ("<%s min=%s max=%s choices=%r>" %
304 (self.__class__.__name__, self.min, self.max, self.choices))
307 class ArgChoice(ArgSuggest):
310 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
311 but value must be one of the choices.
316 class ArgUnknown(_Argument):
317 """Unknown argument to program (e.g. determined at runtime).
322 class ArgInstance(_Argument):
323 """Instances argument.
328 class ArgNode(_Argument):
334 class ArgGroup(_Argument):
335 """Node group argument.
340 class ArgJobId(_Argument):
346 class ArgFile(_Argument):
347 """File path argument.
352 class ArgCommand(_Argument):
358 class ArgHost(_Argument):
364 class ArgOs(_Argument):
371 ARGS_MANY_INSTANCES = [ArgInstance()]
372 ARGS_MANY_NODES = [ArgNode()]
373 ARGS_MANY_GROUPS = [ArgGroup()]
374 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
375 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
377 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
378 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
381 def _ExtractTagsObject(opts, args):
382 """Extract the tag type object.
384 Note that this function will modify its args parameter.
387 if not hasattr(opts, "tag_type"):
388 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
390 if kind == constants.TAG_CLUSTER:
392 elif kind in (constants.TAG_NODEGROUP,
394 constants.TAG_INSTANCE):
396 raise errors.OpPrereqError("no arguments passed to the command")
400 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
404 def _ExtendTags(opts, args):
405 """Extend the args if a source file has been given.
407 This function will extend the tags with the contents of the file
408 passed in the 'tags_source' attribute of the opts parameter. A file
409 named '-' will be replaced by stdin.
412 fname = opts.tags_source
418 new_fh = open(fname, "r")
421 # we don't use the nice 'new_data = [line.strip() for line in fh]'
422 # because of python bug 1633941
424 line = new_fh.readline()
427 new_data.append(line.strip())
430 args.extend(new_data)
433 def ListTags(opts, args):
434 """List the tags on a given object.
436 This is a generic implementation that knows how to deal with all
437 three cases of tag objects (cluster, node, instance). The opts
438 argument is expected to contain a tag_type field denoting what
439 object type we work on.
442 kind, name = _ExtractTagsObject(opts, args)
444 result = cl.QueryTags(kind, name)
445 result = list(result)
451 def AddTags(opts, args):
452 """Add tags on a given object.
454 This is a generic implementation that knows how to deal with all
455 three cases of tag objects (cluster, node, instance). The opts
456 argument is expected to contain a tag_type field denoting what
457 object type we work on.
460 kind, name = _ExtractTagsObject(opts, args)
461 _ExtendTags(opts, args)
463 raise errors.OpPrereqError("No tags to be added")
464 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
465 SubmitOpCode(op, opts=opts)
468 def RemoveTags(opts, args):
469 """Remove tags from a given object.
471 This is a generic implementation that knows how to deal with all
472 three cases of tag objects (cluster, node, instance). The opts
473 argument is expected to contain a tag_type field denoting what
474 object type we work on.
477 kind, name = _ExtractTagsObject(opts, args)
478 _ExtendTags(opts, args)
480 raise errors.OpPrereqError("No tags to be removed")
481 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
482 SubmitOpCode(op, opts=opts)
485 def check_unit(option, opt, value): # pylint: disable=W0613
486 """OptParsers custom converter for units.
490 return utils.ParseUnit(value)
491 except errors.UnitParseError, err:
492 raise OptionValueError("option %s: %s" % (opt, err))
495 def _SplitKeyVal(opt, data):
496 """Convert a KeyVal string into a dict.
498 This function will convert a key=val[,...] string into a dict. Empty
499 values will be converted specially: keys which have the prefix 'no_'
500 will have the value=False and the prefix stripped, the others will
504 @param opt: a string holding the option name for which we process the
505 data, used in building error messages
507 @param data: a string of the format key=val,key=val,...
509 @return: {key=val, key=val}
510 @raises errors.ParameterError: if there are duplicate keys
515 for elem in utils.UnescapeAndSplit(data, sep=","):
517 key, val = elem.split("=", 1)
519 if elem.startswith(NO_PREFIX):
520 key, val = elem[len(NO_PREFIX):], False
521 elif elem.startswith(UN_PREFIX):
522 key, val = elem[len(UN_PREFIX):], None
524 key, val = elem, True
526 raise errors.ParameterError("Duplicate key '%s' in option %s" %
532 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
533 """Custom parser for ident:key=val,key=val options.
535 This will store the parsed values as a tuple (ident, {key: val}). As such,
536 multiple uses of this option via action=append is possible.
540 ident, rest = value, ""
542 ident, rest = value.split(":", 1)
544 if ident.startswith(NO_PREFIX):
546 msg = "Cannot pass options when removing parameter groups: %s" % value
547 raise errors.ParameterError(msg)
548 retval = (ident[len(NO_PREFIX):], False)
549 elif ident.startswith(UN_PREFIX):
551 msg = "Cannot pass options when removing parameter groups: %s" % value
552 raise errors.ParameterError(msg)
553 retval = (ident[len(UN_PREFIX):], None)
555 kv_dict = _SplitKeyVal(opt, rest)
556 retval = (ident, kv_dict)
560 def check_key_val(option, opt, value): # pylint: disable=W0613
561 """Custom parser class for key=val,key=val options.
563 This will store the parsed values as a dict {key: val}.
566 return _SplitKeyVal(opt, value)
569 def check_bool(option, opt, value): # pylint: disable=W0613
570 """Custom parser for yes/no options.
572 This will store the parsed value as either True or False.
575 value = value.lower()
576 if value == constants.VALUE_FALSE or value == "no":
578 elif value == constants.VALUE_TRUE or value == "yes":
581 raise errors.ParameterError("Invalid boolean value '%s'" % value)
584 # completion_suggestion is normally a list. Using numeric values not evaluating
585 # to False for dynamic completion.
586 (OPT_COMPL_MANY_NODES,
588 OPT_COMPL_ONE_INSTANCE,
590 OPT_COMPL_ONE_IALLOCATOR,
591 OPT_COMPL_INST_ADD_NODES,
592 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
594 OPT_COMPL_ALL = frozenset([
595 OPT_COMPL_MANY_NODES,
597 OPT_COMPL_ONE_INSTANCE,
599 OPT_COMPL_ONE_IALLOCATOR,
600 OPT_COMPL_INST_ADD_NODES,
601 OPT_COMPL_ONE_NODEGROUP,
605 class CliOption(Option):
606 """Custom option class for optparse.
609 ATTRS = Option.ATTRS + [
610 "completion_suggest",
612 TYPES = Option.TYPES + (
618 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
619 TYPE_CHECKER["identkeyval"] = check_ident_key_val
620 TYPE_CHECKER["keyval"] = check_key_val
621 TYPE_CHECKER["unit"] = check_unit
622 TYPE_CHECKER["bool"] = check_bool
625 # optparse.py sets make_option, so we do it for our own option class, too
626 cli_option = CliOption
631 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
632 help="Increase debugging level")
634 NOHDR_OPT = cli_option("--no-headers", default=False,
635 action="store_true", dest="no_headers",
636 help="Don't display column headers")
638 SEP_OPT = cli_option("--separator", default=None,
639 action="store", dest="separator",
640 help=("Separator between output fields"
641 " (defaults to one space)"))
643 USEUNITS_OPT = cli_option("--units", default=None,
644 dest="units", choices=("h", "m", "g", "t"),
645 help="Specify units for output (one of h/m/g/t)")
647 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
648 type="string", metavar="FIELDS",
649 help="Comma separated list of output fields")
651 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
652 default=False, help="Force the operation")
654 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
655 default=False, help="Do not require confirmation")
657 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
658 action="store_true", default=False,
659 help=("Ignore offline nodes and do as much"
662 TAG_ADD_OPT = cli_option("--tags", dest="tags",
663 default=None, help="Comma-separated list of instance"
666 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
667 default=None, help="File with tag names")
669 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
670 default=False, action="store_true",
671 help=("Submit the job and return the job ID, but"
672 " don't wait for the job to finish"))
674 SYNC_OPT = cli_option("--sync", dest="do_locking",
675 default=False, action="store_true",
676 help=("Grab locks while doing the queries"
677 " in order to ensure more consistent results"))
679 DRY_RUN_OPT = cli_option("--dry-run", default=False,
681 help=("Do not execute the operation, just run the"
682 " check steps and verify it it could be"
685 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
687 help="Increase the verbosity of the operation")
689 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
690 action="store_true", dest="simulate_errors",
691 help="Debugging option that makes the operation"
692 " treat most runtime checks as failed")
694 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
695 default=True, action="store_false",
696 help="Don't wait for sync (DANGEROUS!)")
698 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
699 action="store_true", default=False,
700 help="Enable offline instance")
702 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
703 action="store_true", default=False,
704 help="Disable down instance")
706 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
707 help=("Custom disk setup (%s)" %
708 utils.CommaJoin(constants.DISK_TEMPLATES)),
709 default=None, metavar="TEMPL",
710 choices=list(constants.DISK_TEMPLATES))
712 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
713 help="Do not create any network cards for"
716 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
717 help="Relative path under default cluster-wide"
718 " file storage dir to store file-based disks",
719 default=None, metavar="<DIR>")
721 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
722 help="Driver to use for image files",
723 default="loop", metavar="<DRIVER>",
724 choices=list(constants.FILE_DRIVER))
726 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
727 help="Select nodes for the instance automatically"
728 " using the <NAME> iallocator plugin",
729 default=None, type="string",
730 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
732 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
734 help="Set the default instance allocator plugin",
735 default=None, type="string",
736 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
738 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
740 completion_suggest=OPT_COMPL_ONE_OS)
742 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
743 type="keyval", default={},
744 help="OS parameters")
746 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
747 action="store_true", default=False,
748 help="Force an unknown variant")
750 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
751 action="store_true", default=False,
752 help="Do not install the OS (will"
755 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
756 type="keyval", default={},
757 help="Backend parameters")
759 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
760 default={}, dest="hvparams",
761 help="Hypervisor parameters")
763 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
764 help="Disk template parameters, in the format"
765 " template:option=value,option=value,...",
766 type="identkeyval", action="append", default=[])
768 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
769 type="keyval", default={},
770 help="Memory count specs: min, max, std"
773 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
774 type="keyval", default={},
775 help="CPU count specs: min, max, std")
777 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
778 dest="ispecs_disk_count",
779 type="keyval", default={},
780 help="Disk count specs: min, max, std")
782 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
783 type="keyval", default={},
784 help="Disk size specs: min, max, std (in MB)")
786 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
787 type="keyval", default={},
788 help="NIC count specs: min, max, std")
790 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
791 help="Hypervisor and hypervisor options, in the"
792 " format hypervisor:option=value,option=value,...",
793 default=None, type="identkeyval")
795 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
796 help="Hypervisor and hypervisor options, in the"
797 " format hypervisor:option=value,option=value,...",
798 default=[], action="append", type="identkeyval")
800 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
801 action="store_false",
802 help="Don't check that the instance's IP"
805 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
806 default=True, action="store_false",
807 help="Don't check that the instance's name"
810 NET_OPT = cli_option("--net",
811 help="NIC parameters", default=[],
812 dest="nics", action="append", type="identkeyval")
814 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
815 dest="disks", action="append", type="identkeyval")
817 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
818 help="Comma-separated list of disks"
819 " indices to act on (e.g. 0,2) (optional,"
820 " defaults to all disks)")
822 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
823 help="Enforces a single-disk configuration using the"
824 " given disk size, in MiB unless a suffix is used",
825 default=None, type="unit", metavar="<size>")
827 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
828 dest="ignore_consistency",
829 action="store_true", default=False,
830 help="Ignore the consistency of the disks on"
833 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
834 dest="allow_failover",
835 action="store_true", default=False,
836 help="If migration is not possible fallback to"
839 NONLIVE_OPT = cli_option("--non-live", dest="live",
840 default=True, action="store_false",
841 help="Do a non-live migration (this usually means"
842 " freeze the instance, save the state, transfer and"
843 " only then resume running on the secondary node)")
845 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
847 choices=list(constants.HT_MIGRATION_MODES),
848 help="Override default migration mode (choose"
849 " either live or non-live")
851 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
852 help="Target node and optional secondary node",
853 metavar="<pnode>[:<snode>]",
854 completion_suggest=OPT_COMPL_INST_ADD_NODES)
856 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
857 action="append", metavar="<node>",
858 help="Use only this node (can be used multiple"
859 " times, if not given defaults to all nodes)",
860 completion_suggest=OPT_COMPL_ONE_NODE)
862 NODEGROUP_OPT_NAME = "--node-group"
863 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
865 help="Node group (name or uuid)",
866 metavar="<nodegroup>",
867 default=None, type="string",
868 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
870 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
872 completion_suggest=OPT_COMPL_ONE_NODE)
874 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
875 action="store_false",
876 help="Don't start the instance after creation")
878 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
879 action="store_true", default=False,
880 help="Show command instead of executing it")
882 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
883 default=False, action="store_true",
884 help="Instead of performing the migration, try to"
885 " recover from a failed cleanup. This is safe"
886 " to run even if the instance is healthy, but it"
887 " will create extra replication traffic and "
888 " disrupt briefly the replication (like during the"
891 STATIC_OPT = cli_option("-s", "--static", dest="static",
892 action="store_true", default=False,
893 help="Only show configuration data, not runtime data")
895 ALL_OPT = cli_option("--all", dest="show_all",
896 default=False, action="store_true",
897 help="Show info on all instances on the cluster."
898 " This can take a long time to run, use wisely")
900 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
901 action="store_true", default=False,
902 help="Interactive OS reinstall, lists available"
903 " OS templates for selection")
905 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
906 action="store_true", default=False,
907 help="Remove the instance from the cluster"
908 " configuration even if there are failures"
909 " during the removal process")
911 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
912 dest="ignore_remove_failures",
913 action="store_true", default=False,
914 help="Remove the instance from the"
915 " cluster configuration even if there"
916 " are failures during the removal"
919 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
920 action="store_true", default=False,
921 help="Remove the instance from the cluster")
923 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
924 help="Specifies the new node for the instance",
925 metavar="NODE", default=None,
926 completion_suggest=OPT_COMPL_ONE_NODE)
928 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
929 help="Specifies the new secondary node",
930 metavar="NODE", default=None,
931 completion_suggest=OPT_COMPL_ONE_NODE)
933 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
934 default=False, action="store_true",
935 help="Replace the disk(s) on the primary"
936 " node (applies only to internally mirrored"
937 " disk templates, e.g. %s)" %
938 utils.CommaJoin(constants.DTS_INT_MIRROR))
940 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
941 default=False, action="store_true",
942 help="Replace the disk(s) on the secondary"
943 " node (applies only to internally mirrored"
944 " disk templates, e.g. %s)" %
945 utils.CommaJoin(constants.DTS_INT_MIRROR))
947 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
948 default=False, action="store_true",
949 help="Lock all nodes and auto-promote as needed"
952 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
953 default=False, action="store_true",
954 help="Automatically replace faulty disks"
955 " (applies only to internally mirrored"
956 " disk templates, e.g. %s)" %
957 utils.CommaJoin(constants.DTS_INT_MIRROR))
959 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
960 default=False, action="store_true",
961 help="Ignore current recorded size"
962 " (useful for forcing activation when"
963 " the recorded size is wrong)")
965 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
967 completion_suggest=OPT_COMPL_ONE_NODE)
969 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
972 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
973 help="Specify the secondary ip for the node",
974 metavar="ADDRESS", default=None)
976 READD_OPT = cli_option("--readd", dest="readd",
977 default=False, action="store_true",
978 help="Readd old node after replacing it")
980 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
981 default=True, action="store_false",
982 help="Disable SSH key fingerprint checking")
984 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
985 default=False, action="store_true",
986 help="Force the joining of a node")
988 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
989 type="bool", default=None, metavar=_YORNO,
990 help="Set the master_candidate flag on the node")
992 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
993 type="bool", default=None,
994 help=("Set the offline flag on the node"
995 " (cluster does not communicate with offline"
998 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
999 type="bool", default=None,
1000 help=("Set the drained flag on the node"
1001 " (excluded from allocation operations)"))
1003 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1004 type="bool", default=None, metavar=_YORNO,
1005 help="Set the master_capable flag on the node")
1007 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1008 type="bool", default=None, metavar=_YORNO,
1009 help="Set the vm_capable flag on the node")
1011 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1012 type="bool", default=None, metavar=_YORNO,
1013 help="Set the allocatable flag on a volume")
1015 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1016 help="Disable support for lvm based instances"
1018 action="store_false", default=True)
1020 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1021 dest="enabled_hypervisors",
1022 help="Comma-separated list of hypervisors",
1023 type="string", default=None)
1025 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1026 type="keyval", default={},
1027 help="NIC parameters")
1029 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1030 dest="candidate_pool_size", type="int",
1031 help="Set the candidate pool size")
1033 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1034 help=("Enables LVM and specifies the volume group"
1035 " name (cluster-wide) for disk allocation"
1036 " [%s]" % constants.DEFAULT_VG),
1037 metavar="VG", default=None)
1039 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1040 help="Destroy cluster", action="store_true")
1042 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1043 help="Skip node agreement check (dangerous)",
1044 action="store_true", default=False)
1046 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1047 help="Specify the mac prefix for the instance IP"
1048 " addresses, in the format XX:XX:XX",
1052 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1053 help="Specify the node interface (cluster-wide)"
1054 " on which the master IP address will be added"
1055 " (cluster init default: %s)" %
1056 constants.DEFAULT_BRIDGE,
1060 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1061 help="Specify the netmask of the master IP",
1065 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1066 dest="use_external_mip_script",
1067 help="Specify whether to run a user-provided"
1068 " script for the master IP address turnup and"
1069 " turndown operations",
1070 type="bool", metavar=_YORNO, default=None)
1072 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1073 help="Specify the default directory (cluster-"
1074 "wide) for storing the file-based disks [%s]" %
1075 constants.DEFAULT_FILE_STORAGE_DIR,
1077 default=constants.DEFAULT_FILE_STORAGE_DIR)
1079 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1080 dest="shared_file_storage_dir",
1081 help="Specify the default directory (cluster-"
1082 "wide) for storing the shared file-based"
1084 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1085 metavar="SHAREDDIR",
1086 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1088 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1089 help="Don't modify /etc/hosts",
1090 action="store_false", default=True)
1092 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1093 help="Don't initialize SSH keys",
1094 action="store_false", default=True)
1096 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1097 help="Enable parseable error messages",
1098 action="store_true", default=False)
1100 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1101 help="Skip N+1 memory redundancy tests",
1102 action="store_true", default=False)
1104 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1105 help="Type of reboot: soft/hard/full",
1106 default=constants.INSTANCE_REBOOT_HARD,
1108 choices=list(constants.REBOOT_TYPES))
1110 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1111 dest="ignore_secondaries",
1112 default=False, action="store_true",
1113 help="Ignore errors from secondaries")
1115 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1116 action="store_false", default=True,
1117 help="Don't shutdown the instance (unsafe)")
1119 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1120 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1121 help="Maximum time to wait")
1123 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1124 dest="shutdown_timeout", type="int",
1125 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1126 help="Maximum time to wait for instance shutdown")
1128 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1130 help=("Number of seconds between repetions of the"
1133 EARLY_RELEASE_OPT = cli_option("--early-release",
1134 dest="early_release", default=False,
1135 action="store_true",
1136 help="Release the locks on the secondary"
1139 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1140 dest="new_cluster_cert",
1141 default=False, action="store_true",
1142 help="Generate a new cluster certificate")
1144 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1146 help="File containing new RAPI certificate")
1148 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1149 default=None, action="store_true",
1150 help=("Generate a new self-signed RAPI"
1153 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1155 help="File containing new SPICE certificate")
1157 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1159 help="File containing the certificate of the CA"
1160 " which signed the SPICE certificate")
1162 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1163 dest="new_spice_cert", default=None,
1164 action="store_true",
1165 help=("Generate a new self-signed SPICE"
1168 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1169 dest="new_confd_hmac_key",
1170 default=False, action="store_true",
1171 help=("Create a new HMAC key for %s" %
1174 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1175 dest="cluster_domain_secret",
1177 help=("Load new new cluster domain"
1178 " secret from file"))
1180 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1181 dest="new_cluster_domain_secret",
1182 default=False, action="store_true",
1183 help=("Create a new cluster domain"
1186 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1187 dest="use_replication_network",
1188 help="Whether to use the replication network"
1189 " for talking to the nodes",
1190 action="store_true", default=False)
1192 MAINTAIN_NODE_HEALTH_OPT = \
1193 cli_option("--maintain-node-health", dest="maintain_node_health",
1194 metavar=_YORNO, default=None, type="bool",
1195 help="Configure the cluster to automatically maintain node"
1196 " health, by shutting down unknown instances, shutting down"
1197 " unknown DRBD devices, etc.")
1199 IDENTIFY_DEFAULTS_OPT = \
1200 cli_option("--identify-defaults", dest="identify_defaults",
1201 default=False, action="store_true",
1202 help="Identify which saved instance parameters are equal to"
1203 " the current cluster defaults and set them as such, instead"
1204 " of marking them as overridden")
1206 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1207 action="store", dest="uid_pool",
1208 help=("A list of user-ids or user-id"
1209 " ranges separated by commas"))
1211 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1212 action="store", dest="add_uids",
1213 help=("A list of user-ids or user-id"
1214 " ranges separated by commas, to be"
1215 " added to the user-id pool"))
1217 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1218 action="store", dest="remove_uids",
1219 help=("A list of user-ids or user-id"
1220 " ranges separated by commas, to be"
1221 " removed from the user-id pool"))
1223 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1224 action="store", dest="reserved_lvs",
1225 help=("A comma-separated list of reserved"
1226 " logical volumes names, that will be"
1227 " ignored by cluster verify"))
1229 ROMAN_OPT = cli_option("--roman",
1230 dest="roman_integers", default=False,
1231 action="store_true",
1232 help="Use roman numbers for positive integers")
1234 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1235 action="store", default=None,
1236 help="Specifies usermode helper for DRBD")
1238 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1239 action="store_false", default=True,
1240 help="Disable support for DRBD")
1242 PRIMARY_IP_VERSION_OPT = \
1243 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1244 action="store", dest="primary_ip_version",
1245 metavar="%d|%d" % (constants.IP4_VERSION,
1246 constants.IP6_VERSION),
1247 help="Cluster-wide IP version for primary IP")
1249 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1250 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1251 choices=_PRIONAME_TO_VALUE.keys(),
1252 help="Priority for opcode processing")
1254 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1255 type="bool", default=None, metavar=_YORNO,
1256 help="Sets the hidden flag on the OS")
1258 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1259 type="bool", default=None, metavar=_YORNO,
1260 help="Sets the blacklisted flag on the OS")
1262 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1263 type="bool", metavar=_YORNO,
1264 dest="prealloc_wipe_disks",
1265 help=("Wipe disks prior to instance"
1268 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1269 type="keyval", default=None,
1270 help="Node parameters")
1272 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1273 action="store", metavar="POLICY", default=None,
1274 help="Allocation policy for the node group")
1276 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1277 type="bool", metavar=_YORNO,
1278 dest="node_powered",
1279 help="Specify if the SoR for node is powered")
1281 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1282 default=constants.OOB_TIMEOUT,
1283 help="Maximum time to wait for out-of-band helper")
1285 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1286 default=constants.OOB_POWER_DELAY,
1287 help="Time in seconds to wait between power-ons")
1289 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1290 action="store_true", default=False,
1291 help=("Whether command argument should be treated"
1294 NO_REMEMBER_OPT = cli_option("--no-remember",
1296 action="store_true", default=False,
1297 help="Perform but do not record the change"
1298 " in the configuration")
1300 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1301 default=False, action="store_true",
1302 help="Evacuate primary instances only")
1304 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1305 default=False, action="store_true",
1306 help="Evacuate secondary instances only"
1307 " (applies only to internally mirrored"
1308 " disk templates, e.g. %s)" %
1309 utils.CommaJoin(constants.DTS_INT_MIRROR))
1311 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1312 action="store_true", default=False,
1313 help="Pause instance at startup")
1315 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1316 help="Destination node group (name or uuid)",
1317 default=None, action="append",
1318 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1320 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1321 action="append", dest="ignore_errors",
1322 choices=list(constants.CV_ALL_ECODES_STRINGS),
1323 help="Error code to be ignored")
1325 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1327 help=("Specify disk state information in the format"
1328 " storage_type/identifier:option=value,..."),
1331 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1333 help=("Specify hypervisor state information in the"
1334 " format hypervisor:option=value,..."),
1338 #: Options provided by all commands
1339 COMMON_OPTS = [DEBUG_OPT]
1341 # common options for creating instances. add and import then add their own
1343 COMMON_CREATE_OPTS = [
1348 FILESTORE_DRIVER_OPT,
1366 def _ParseArgs(argv, commands, aliases, env_override):
1367 """Parser for the command line arguments.
1369 This function parses the arguments and returns the function which
1370 must be executed together with its (modified) arguments.
1372 @param argv: the command line
1373 @param commands: dictionary with special contents, see the design
1374 doc for cmdline handling
1375 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1376 @param env_override: list of env variables allowed for default args
1379 assert not (env_override - set(commands))
1382 binary = "<command>"
1384 binary = argv[0].split("/")[-1]
1386 if len(argv) > 1 and argv[1] == "--version":
1387 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1388 constants.RELEASE_VERSION)
1389 # Quit right away. That way we don't have to care about this special
1390 # argument. optparse.py does it the same.
1393 if len(argv) < 2 or not (argv[1] in commands or
1394 argv[1] in aliases):
1395 # let's do a nice thing
1396 sortedcmds = commands.keys()
1399 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1400 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1403 # compute the max line length for cmd + usage
1404 mlen = max([len(" %s" % cmd) for cmd in commands])
1405 mlen = min(60, mlen) # should not get here...
1407 # and format a nice command list
1408 ToStdout("Commands:")
1409 for cmd in sortedcmds:
1410 cmdstr = " %s" % (cmd,)
1411 help_text = commands[cmd][4]
1412 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1413 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1414 for line in help_lines:
1415 ToStdout("%-*s %s", mlen, "", line)
1419 return None, None, None
1421 # get command, unalias it, and look it up in commands
1425 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1428 if aliases[cmd] not in commands:
1429 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1430 " command '%s'" % (cmd, aliases[cmd]))
1434 if cmd in env_override:
1435 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1436 env_args = os.environ.get(args_env_name)
1438 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1440 func, args_def, parser_opts, usage, description = commands[cmd]
1441 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1442 description=description,
1443 formatter=TitledHelpFormatter(),
1444 usage="%%prog %s %s" % (cmd, usage))
1445 parser.disable_interspersed_args()
1446 options, args = parser.parse_args(args=argv[1:])
1448 if not _CheckArguments(cmd, args_def, args):
1449 return None, None, None
1451 return func, options, args
1454 def _CheckArguments(cmd, args_def, args):
1455 """Verifies the arguments using the argument definition.
1459 1. Abort with error if values specified by user but none expected.
1461 1. For each argument in definition
1463 1. Keep running count of minimum number of values (min_count)
1464 1. Keep running count of maximum number of values (max_count)
1465 1. If it has an unlimited number of values
1467 1. Abort with error if it's not the last argument in the definition
1469 1. If last argument has limited number of values
1471 1. Abort with error if number of values doesn't match or is too large
1473 1. Abort with error if user didn't pass enough values (min_count)
1476 if args and not args_def:
1477 ToStderr("Error: Command %s expects no arguments", cmd)
1484 last_idx = len(args_def) - 1
1486 for idx, arg in enumerate(args_def):
1487 if min_count is None:
1489 elif arg.min is not None:
1490 min_count += arg.min
1492 if max_count is None:
1494 elif arg.max is not None:
1495 max_count += arg.max
1498 check_max = (arg.max is not None)
1500 elif arg.max is None:
1501 raise errors.ProgrammerError("Only the last argument can have max=None")
1504 # Command with exact number of arguments
1505 if (min_count is not None and max_count is not None and
1506 min_count == max_count and len(args) != min_count):
1507 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1510 # Command with limited number of arguments
1511 if max_count is not None and len(args) > max_count:
1512 ToStderr("Error: Command %s expects only %d argument(s)",
1516 # Command with some required arguments
1517 if min_count is not None and len(args) < min_count:
1518 ToStderr("Error: Command %s expects at least %d argument(s)",
1525 def SplitNodeOption(value):
1526 """Splits the value of a --node option.
1529 if value and ":" in value:
1530 return value.split(":", 1)
1532 return (value, None)
1535 def CalculateOSNames(os_name, os_variants):
1536 """Calculates all the names an OS can be called, according to its variants.
1538 @type os_name: string
1539 @param os_name: base name of the os
1540 @type os_variants: list or None
1541 @param os_variants: list of supported variants
1543 @return: list of valid names
1547 return ["%s+%s" % (os_name, v) for v in os_variants]
1552 def ParseFields(selected, default):
1553 """Parses the values of "--field"-like options.
1555 @type selected: string or None
1556 @param selected: User-selected options
1558 @param default: Default fields
1561 if selected is None:
1564 if selected.startswith("+"):
1565 return default + selected[1:].split(",")
1567 return selected.split(",")
1570 UsesRPC = rpc.RunWithRPC
1573 def AskUser(text, choices=None):
1574 """Ask the user a question.
1576 @param text: the question to ask
1578 @param choices: list with elements tuples (input_char, return_value,
1579 description); if not given, it will default to: [('y', True,
1580 'Perform the operation'), ('n', False, 'Do no do the operation')];
1581 note that the '?' char is reserved for help
1583 @return: one of the return values from the choices list; if input is
1584 not possible (i.e. not running with a tty, we return the last
1589 choices = [("y", True, "Perform the operation"),
1590 ("n", False, "Do not perform the operation")]
1591 if not choices or not isinstance(choices, list):
1592 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1593 for entry in choices:
1594 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1595 raise errors.ProgrammerError("Invalid choices element to AskUser")
1597 answer = choices[-1][1]
1599 for line in text.splitlines():
1600 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1601 text = "\n".join(new_text)
1603 f = file("/dev/tty", "a+")
1607 chars = [entry[0] for entry in choices]
1608 chars[-1] = "[%s]" % chars[-1]
1610 maps = dict([(entry[0], entry[1]) for entry in choices])
1614 f.write("/".join(chars))
1616 line = f.readline(2).strip().lower()
1621 for entry in choices:
1622 f.write(" %s - %s\n" % (entry[0], entry[2]))
1630 class JobSubmittedException(Exception):
1631 """Job was submitted, client should exit.
1633 This exception has one argument, the ID of the job that was
1634 submitted. The handler should print this ID.
1636 This is not an error, just a structured way to exit from clients.
1641 def SendJob(ops, cl=None):
1642 """Function to submit an opcode without waiting for the results.
1645 @param ops: list of opcodes
1646 @type cl: luxi.Client
1647 @param cl: the luxi client to use for communicating with the master;
1648 if None, a new client will be created
1654 job_id = cl.SubmitJob(ops)
1659 def GenericPollJob(job_id, cbs, report_cbs):
1660 """Generic job-polling function.
1662 @type job_id: number
1663 @param job_id: Job ID
1664 @type cbs: Instance of L{JobPollCbBase}
1665 @param cbs: Data callbacks
1666 @type report_cbs: Instance of L{JobPollReportCbBase}
1667 @param report_cbs: Reporting callbacks
1670 prev_job_info = None
1671 prev_logmsg_serial = None
1676 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1679 # job not found, go away!
1680 raise errors.JobLost("Job with id %s lost" % job_id)
1682 if result == constants.JOB_NOTCHANGED:
1683 report_cbs.ReportNotChanged(job_id, status)
1688 # Split result, a tuple of (field values, log entries)
1689 (job_info, log_entries) = result
1690 (status, ) = job_info
1693 for log_entry in log_entries:
1694 (serial, timestamp, log_type, message) = log_entry
1695 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1697 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1699 # TODO: Handle canceled and archived jobs
1700 elif status in (constants.JOB_STATUS_SUCCESS,
1701 constants.JOB_STATUS_ERROR,
1702 constants.JOB_STATUS_CANCELING,
1703 constants.JOB_STATUS_CANCELED):
1706 prev_job_info = job_info
1708 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1710 raise errors.JobLost("Job with id %s lost" % job_id)
1712 status, opstatus, result = jobs[0]
1714 if status == constants.JOB_STATUS_SUCCESS:
1717 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1718 raise errors.OpExecError("Job was canceled")
1721 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1722 if status == constants.OP_STATUS_SUCCESS:
1724 elif status == constants.OP_STATUS_ERROR:
1725 errors.MaybeRaise(msg)
1728 raise errors.OpExecError("partial failure (opcode %d): %s" %
1731 raise errors.OpExecError(str(msg))
1733 # default failure mode
1734 raise errors.OpExecError(result)
1737 class JobPollCbBase:
1738 """Base class for L{GenericPollJob} callbacks.
1742 """Initializes this class.
1746 def WaitForJobChangeOnce(self, job_id, fields,
1747 prev_job_info, prev_log_serial):
1748 """Waits for changes on a job.
1751 raise NotImplementedError()
1753 def QueryJobs(self, job_ids, fields):
1754 """Returns the selected fields for the selected job IDs.
1756 @type job_ids: list of numbers
1757 @param job_ids: Job IDs
1758 @type fields: list of strings
1759 @param fields: Fields
1762 raise NotImplementedError()
1765 class JobPollReportCbBase:
1766 """Base class for L{GenericPollJob} reporting callbacks.
1770 """Initializes this class.
1774 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1775 """Handles a log message.
1778 raise NotImplementedError()
1780 def ReportNotChanged(self, job_id, status):
1781 """Called for if a job hasn't changed in a while.
1783 @type job_id: number
1784 @param job_id: Job ID
1785 @type status: string or None
1786 @param status: Job status if available
1789 raise NotImplementedError()
1792 class _LuxiJobPollCb(JobPollCbBase):
1793 def __init__(self, cl):
1794 """Initializes this class.
1797 JobPollCbBase.__init__(self)
1800 def WaitForJobChangeOnce(self, job_id, fields,
1801 prev_job_info, prev_log_serial):
1802 """Waits for changes on a job.
1805 return self.cl.WaitForJobChangeOnce(job_id, fields,
1806 prev_job_info, prev_log_serial)
1808 def QueryJobs(self, job_ids, fields):
1809 """Returns the selected fields for the selected job IDs.
1812 return self.cl.QueryJobs(job_ids, fields)
1815 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1816 def __init__(self, feedback_fn):
1817 """Initializes this class.
1820 JobPollReportCbBase.__init__(self)
1822 self.feedback_fn = feedback_fn
1824 assert callable(feedback_fn)
1826 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1827 """Handles a log message.
1830 self.feedback_fn((timestamp, log_type, log_msg))
1832 def ReportNotChanged(self, job_id, status):
1833 """Called if a job hasn't changed in a while.
1839 class StdioJobPollReportCb(JobPollReportCbBase):
1841 """Initializes this class.
1844 JobPollReportCbBase.__init__(self)
1846 self.notified_queued = False
1847 self.notified_waitlock = False
1849 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1850 """Handles a log message.
1853 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1854 FormatLogMessage(log_type, log_msg))
1856 def ReportNotChanged(self, job_id, status):
1857 """Called if a job hasn't changed in a while.
1863 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1864 ToStderr("Job %s is waiting in queue", job_id)
1865 self.notified_queued = True
1867 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1868 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1869 self.notified_waitlock = True
1872 def FormatLogMessage(log_type, log_msg):
1873 """Formats a job message according to its type.
1876 if log_type != constants.ELOG_MESSAGE:
1877 log_msg = str(log_msg)
1879 return utils.SafeEncode(log_msg)
1882 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1883 """Function to poll for the result of a job.
1885 @type job_id: job identified
1886 @param job_id: the job to poll for results
1887 @type cl: luxi.Client
1888 @param cl: the luxi client to use for communicating with the master;
1889 if None, a new client will be created
1895 if reporter is None:
1897 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1899 reporter = StdioJobPollReportCb()
1901 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1903 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1906 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1907 """Legacy function to submit an opcode.
1909 This is just a simple wrapper over the construction of the processor
1910 instance. It should be extended to better handle feedback and
1911 interaction functions.
1917 SetGenericOpcodeOpts([op], opts)
1919 job_id = SendJob([op], cl=cl)
1921 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1924 return op_results[0]
1927 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1928 """Wrapper around SubmitOpCode or SendJob.
1930 This function will decide, based on the 'opts' parameter, whether to
1931 submit and wait for the result of the opcode (and return it), or
1932 whether to just send the job and print its identifier. It is used in
1933 order to simplify the implementation of the '--submit' option.
1935 It will also process the opcodes if we're sending the via SendJob
1936 (otherwise SubmitOpCode does it).
1939 if opts and opts.submit_only:
1941 SetGenericOpcodeOpts(job, opts)
1942 job_id = SendJob(job, cl=cl)
1943 raise JobSubmittedException(job_id)
1945 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1948 def SetGenericOpcodeOpts(opcode_list, options):
1949 """Processor for generic options.
1951 This function updates the given opcodes based on generic command
1952 line options (like debug, dry-run, etc.).
1954 @param opcode_list: list of opcodes
1955 @param options: command line options or None
1956 @return: None (in-place modification)
1961 for op in opcode_list:
1962 op.debug_level = options.debug
1963 if hasattr(options, "dry_run"):
1964 op.dry_run = options.dry_run
1965 if getattr(options, "priority", None) is not None:
1966 op.priority = _PRIONAME_TO_VALUE[options.priority]
1970 # TODO: Cache object?
1972 client = luxi.Client()
1973 except luxi.NoMasterError:
1974 ss = ssconf.SimpleStore()
1976 # Try to read ssconf file
1979 except errors.ConfigurationError:
1980 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1981 " not part of a cluster")
1983 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1984 if master != myself:
1985 raise errors.OpPrereqError("This is not the master node, please connect"
1986 " to node '%s' and rerun the command" %
1992 def FormatError(err):
1993 """Return a formatted error message for a given error.
1995 This function takes an exception instance and returns a tuple
1996 consisting of two values: first, the recommended exit code, and
1997 second, a string describing the error message (not
1998 newline-terminated).
2004 if isinstance(err, errors.ConfigurationError):
2005 txt = "Corrupt configuration file: %s" % msg
2007 obuf.write(txt + "\n")
2008 obuf.write("Aborting.")
2010 elif isinstance(err, errors.HooksAbort):
2011 obuf.write("Failure: hooks execution failed:\n")
2012 for node, script, out in err.args[0]:
2014 obuf.write(" node: %s, script: %s, output: %s\n" %
2015 (node, script, out))
2017 obuf.write(" node: %s, script: %s (no output)\n" %
2019 elif isinstance(err, errors.HooksFailure):
2020 obuf.write("Failure: hooks general failure: %s" % msg)
2021 elif isinstance(err, errors.ResolverError):
2022 this_host = netutils.Hostname.GetSysName()
2023 if err.args[0] == this_host:
2024 msg = "Failure: can't resolve my own hostname ('%s')"
2026 msg = "Failure: can't resolve hostname '%s'"
2027 obuf.write(msg % err.args[0])
2028 elif isinstance(err, errors.OpPrereqError):
2029 if len(err.args) == 2:
2030 obuf.write("Failure: prerequisites not met for this"
2031 " operation:\nerror type: %s, error details:\n%s" %
2032 (err.args[1], err.args[0]))
2034 obuf.write("Failure: prerequisites not met for this"
2035 " operation:\n%s" % msg)
2036 elif isinstance(err, errors.OpExecError):
2037 obuf.write("Failure: command execution error:\n%s" % msg)
2038 elif isinstance(err, errors.TagError):
2039 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2040 elif isinstance(err, errors.JobQueueDrainError):
2041 obuf.write("Failure: the job queue is marked for drain and doesn't"
2042 " accept new requests\n")
2043 elif isinstance(err, errors.JobQueueFull):
2044 obuf.write("Failure: the job queue is full and doesn't accept new"
2045 " job submissions until old jobs are archived\n")
2046 elif isinstance(err, errors.TypeEnforcementError):
2047 obuf.write("Parameter Error: %s" % msg)
2048 elif isinstance(err, errors.ParameterError):
2049 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2050 elif isinstance(err, luxi.NoMasterError):
2051 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2052 " and listening for connections?")
2053 elif isinstance(err, luxi.TimeoutError):
2054 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2055 " been submitted and will continue to run even if the call"
2056 " timed out. Useful commands in this situation are \"gnt-job"
2057 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2059 elif isinstance(err, luxi.PermissionError):
2060 obuf.write("It seems you don't have permissions to connect to the"
2061 " master daemon.\nPlease retry as a different user.")
2062 elif isinstance(err, luxi.ProtocolError):
2063 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2065 elif isinstance(err, errors.JobLost):
2066 obuf.write("Error checking job status: %s" % msg)
2067 elif isinstance(err, errors.QueryFilterParseError):
2068 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2069 obuf.write("\n".join(err.GetDetails()))
2070 elif isinstance(err, errors.GenericError):
2071 obuf.write("Unhandled Ganeti error: %s" % msg)
2072 elif isinstance(err, JobSubmittedException):
2073 obuf.write("JobID: %s\n" % err.args[0])
2076 obuf.write("Unhandled exception: %s" % msg)
2077 return retcode, obuf.getvalue().rstrip("\n")
2080 def GenericMain(commands, override=None, aliases=None,
2081 env_override=frozenset()):
2082 """Generic main function for all the gnt-* commands.
2084 @param commands: a dictionary with a special structure, see the design doc
2085 for command line handling.
2086 @param override: if not None, we expect a dictionary with keys that will
2087 override command line options; this can be used to pass
2088 options from the scripts to generic functions
2089 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2090 @param env_override: list of environment names which are allowed to submit
2091 default args for commands
2094 # save the program name and the entire command line for later logging
2096 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2097 if len(sys.argv) >= 2:
2098 binary += " " + sys.argv[1]
2099 old_cmdline = " ".join(sys.argv[2:])
2103 binary = "<unknown program>"
2110 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2111 except errors.ParameterError, err:
2112 result, err_msg = FormatError(err)
2116 if func is None: # parse error
2119 if override is not None:
2120 for key, val in override.iteritems():
2121 setattr(options, key, val)
2123 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2124 stderr_logging=True)
2127 logging.info("run with arguments '%s'", old_cmdline)
2129 logging.info("run with no arguments")
2132 result = func(options, args)
2133 except (errors.GenericError, luxi.ProtocolError,
2134 JobSubmittedException), err:
2135 result, err_msg = FormatError(err)
2136 logging.exception("Error during command processing")
2138 except KeyboardInterrupt:
2139 result = constants.EXIT_FAILURE
2140 ToStderr("Aborted. Note that if the operation created any jobs, they"
2141 " might have been submitted and"
2142 " will continue to run in the background.")
2143 except IOError, err:
2144 if err.errno == errno.EPIPE:
2145 # our terminal went away, we'll exit
2146 sys.exit(constants.EXIT_FAILURE)
2153 def ParseNicOption(optvalue):
2154 """Parses the value of the --net option(s).
2158 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2159 except (TypeError, ValueError), err:
2160 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2162 nics = [{}] * nic_max
2163 for nidx, ndict in optvalue:
2166 if not isinstance(ndict, dict):
2167 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2168 " got %s" % (nidx, ndict))
2170 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2177 def GenericInstanceCreate(mode, opts, args):
2178 """Add an instance to the cluster via either creation or import.
2180 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2181 @param opts: the command line options selected by the user
2183 @param args: should contain only one element, the new instance name
2185 @return: the desired exit code
2190 (pnode, snode) = SplitNodeOption(opts.node)
2195 hypervisor, hvparams = opts.hypervisor
2198 nics = ParseNicOption(opts.nics)
2202 elif mode == constants.INSTANCE_CREATE:
2203 # default of one nic, all auto
2209 if opts.disk_template == constants.DT_DISKLESS:
2210 if opts.disks or opts.sd_size is not None:
2211 raise errors.OpPrereqError("Diskless instance but disk"
2212 " information passed")
2215 if (not opts.disks and not opts.sd_size
2216 and mode == constants.INSTANCE_CREATE):
2217 raise errors.OpPrereqError("No disk information specified")
2218 if opts.disks and opts.sd_size is not None:
2219 raise errors.OpPrereqError("Please use either the '--disk' or"
2221 if opts.sd_size is not None:
2222 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2226 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2227 except ValueError, err:
2228 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2229 disks = [{}] * disk_max
2232 for didx, ddict in opts.disks:
2234 if not isinstance(ddict, dict):
2235 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2236 raise errors.OpPrereqError(msg)
2237 elif constants.IDISK_SIZE in ddict:
2238 if constants.IDISK_ADOPT in ddict:
2239 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2240 " (disk %d)" % didx)
2242 ddict[constants.IDISK_SIZE] = \
2243 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2244 except ValueError, err:
2245 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2247 elif constants.IDISK_ADOPT in ddict:
2248 if mode == constants.INSTANCE_IMPORT:
2249 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2251 ddict[constants.IDISK_SIZE] = 0
2253 raise errors.OpPrereqError("Missing size or adoption source for"
2257 if opts.tags is not None:
2258 tags = opts.tags.split(",")
2262 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2263 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2265 if mode == constants.INSTANCE_CREATE:
2268 force_variant = opts.force_variant
2271 no_install = opts.no_install
2272 identify_defaults = False
2273 elif mode == constants.INSTANCE_IMPORT:
2276 force_variant = False
2277 src_node = opts.src_node
2278 src_path = opts.src_dir
2280 identify_defaults = opts.identify_defaults
2282 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2284 op = opcodes.OpInstanceCreate(instance_name=instance,
2286 disk_template=opts.disk_template,
2288 pnode=pnode, snode=snode,
2289 ip_check=opts.ip_check,
2290 name_check=opts.name_check,
2291 wait_for_sync=opts.wait_for_sync,
2292 file_storage_dir=opts.file_storage_dir,
2293 file_driver=opts.file_driver,
2294 iallocator=opts.iallocator,
2295 hypervisor=hypervisor,
2297 beparams=opts.beparams,
2298 osparams=opts.osparams,
2302 force_variant=force_variant,
2306 no_install=no_install,
2307 identify_defaults=identify_defaults)
2309 SubmitOrSend(op, opts)
2313 class _RunWhileClusterStoppedHelper:
2314 """Helper class for L{RunWhileClusterStopped} to simplify state management
2317 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2318 """Initializes this class.
2320 @type feedback_fn: callable
2321 @param feedback_fn: Feedback function
2322 @type cluster_name: string
2323 @param cluster_name: Cluster name
2324 @type master_node: string
2325 @param master_node Master node name
2326 @type online_nodes: list
2327 @param online_nodes: List of names of online nodes
2330 self.feedback_fn = feedback_fn
2331 self.cluster_name = cluster_name
2332 self.master_node = master_node
2333 self.online_nodes = online_nodes
2335 self.ssh = ssh.SshRunner(self.cluster_name)
2337 self.nonmaster_nodes = [name for name in online_nodes
2338 if name != master_node]
2340 assert self.master_node not in self.nonmaster_nodes
2342 def _RunCmd(self, node_name, cmd):
2343 """Runs a command on the local or a remote machine.
2345 @type node_name: string
2346 @param node_name: Machine name
2351 if node_name is None or node_name == self.master_node:
2352 # No need to use SSH
2353 result = utils.RunCmd(cmd)
2355 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2358 errmsg = ["Failed to run command %s" % result.cmd]
2360 errmsg.append("on node %s" % node_name)
2361 errmsg.append(": exitcode %s and error %s" %
2362 (result.exit_code, result.output))
2363 raise errors.OpExecError(" ".join(errmsg))
2365 def Call(self, fn, *args):
2366 """Call function while all daemons are stopped.
2369 @param fn: Function to be called
2372 # Pause watcher by acquiring an exclusive lock on watcher state file
2373 self.feedback_fn("Blocking watcher")
2374 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2376 # TODO: Currently, this just blocks. There's no timeout.
2377 # TODO: Should it be a shared lock?
2378 watcher_block.Exclusive(blocking=True)
2380 # Stop master daemons, so that no new jobs can come in and all running
2382 self.feedback_fn("Stopping master daemons")
2383 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2385 # Stop daemons on all nodes
2386 for node_name in self.online_nodes:
2387 self.feedback_fn("Stopping daemons on %s" % node_name)
2388 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2390 # All daemons are shut down now
2392 return fn(self, *args)
2393 except Exception, err:
2394 _, errmsg = FormatError(err)
2395 logging.exception("Caught exception")
2396 self.feedback_fn(errmsg)
2399 # Start cluster again, master node last
2400 for node_name in self.nonmaster_nodes + [self.master_node]:
2401 self.feedback_fn("Starting daemons on %s" % node_name)
2402 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2405 watcher_block.Close()
2408 def RunWhileClusterStopped(feedback_fn, fn, *args):
2409 """Calls a function while all cluster daemons are stopped.
2411 @type feedback_fn: callable
2412 @param feedback_fn: Feedback function
2414 @param fn: Function to be called when daemons are stopped
2417 feedback_fn("Gathering cluster information")
2419 # This ensures we're running on the master daemon
2422 (cluster_name, master_node) = \
2423 cl.QueryConfigValues(["cluster_name", "master_node"])
2425 online_nodes = GetOnlineNodes([], cl=cl)
2427 # Don't keep a reference to the client. The master daemon will go away.
2430 assert master_node in online_nodes
2432 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2433 online_nodes).Call(fn, *args)
2436 def GenerateTable(headers, fields, separator, data,
2437 numfields=None, unitfields=None,
2439 """Prints a table with headers and different fields.
2442 @param headers: dictionary mapping field names to headers for
2445 @param fields: the field names corresponding to each row in
2447 @param separator: the separator to be used; if this is None,
2448 the default 'smart' algorithm is used which computes optimal
2449 field width, otherwise just the separator is used between
2452 @param data: a list of lists, each sublist being one row to be output
2453 @type numfields: list
2454 @param numfields: a list with the fields that hold numeric
2455 values and thus should be right-aligned
2456 @type unitfields: list
2457 @param unitfields: a list with the fields that hold numeric
2458 values that should be formatted with the units field
2459 @type units: string or None
2460 @param units: the units we should use for formatting, or None for
2461 automatic choice (human-readable for non-separator usage, otherwise
2462 megabytes); this is a one-letter string
2471 if numfields is None:
2473 if unitfields is None:
2476 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2477 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2480 for field in fields:
2481 if headers and field not in headers:
2482 # TODO: handle better unknown fields (either revert to old
2483 # style of raising exception, or deal more intelligently with
2485 headers[field] = field
2486 if separator is not None:
2487 format_fields.append("%s")
2488 elif numfields.Matches(field):
2489 format_fields.append("%*s")
2491 format_fields.append("%-*s")
2493 if separator is None:
2494 mlens = [0 for name in fields]
2495 format_str = " ".join(format_fields)
2497 format_str = separator.replace("%", "%%").join(format_fields)
2502 for idx, val in enumerate(row):
2503 if unitfields.Matches(fields[idx]):
2506 except (TypeError, ValueError):
2509 val = row[idx] = utils.FormatUnit(val, units)
2510 val = row[idx] = str(val)
2511 if separator is None:
2512 mlens[idx] = max(mlens[idx], len(val))
2517 for idx, name in enumerate(fields):
2519 if separator is None:
2520 mlens[idx] = max(mlens[idx], len(hdr))
2521 args.append(mlens[idx])
2523 result.append(format_str % tuple(args))
2525 if separator is None:
2526 assert len(mlens) == len(fields)
2528 if fields and not numfields.Matches(fields[-1]):
2534 line = ["-" for _ in fields]
2535 for idx in range(len(fields)):
2536 if separator is None:
2537 args.append(mlens[idx])
2538 args.append(line[idx])
2539 result.append(format_str % tuple(args))
2544 def _FormatBool(value):
2545 """Formats a boolean value as a string.
2553 #: Default formatting for query results; (callback, align right)
2554 _DEFAULT_FORMAT_QUERY = {
2555 constants.QFT_TEXT: (str, False),
2556 constants.QFT_BOOL: (_FormatBool, False),
2557 constants.QFT_NUMBER: (str, True),
2558 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2559 constants.QFT_OTHER: (str, False),
2560 constants.QFT_UNKNOWN: (str, False),
2564 def _GetColumnFormatter(fdef, override, unit):
2565 """Returns formatting function for a field.
2567 @type fdef: L{objects.QueryFieldDefinition}
2568 @type override: dict
2569 @param override: Dictionary for overriding field formatting functions,
2570 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2572 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2573 @rtype: tuple; (callable, bool)
2574 @return: Returns the function to format a value (takes one parameter) and a
2575 boolean for aligning the value on the right-hand side
2578 fmt = override.get(fdef.name, None)
2582 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2584 if fdef.kind == constants.QFT_UNIT:
2585 # Can't keep this information in the static dictionary
2586 return (lambda value: utils.FormatUnit(value, unit), True)
2588 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2592 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2595 class _QueryColumnFormatter:
2596 """Callable class for formatting fields of a query.
2599 def __init__(self, fn, status_fn, verbose):
2600 """Initializes this class.
2603 @param fn: Formatting function
2604 @type status_fn: callable
2605 @param status_fn: Function to report fields' status
2606 @type verbose: boolean
2607 @param verbose: whether to use verbose field descriptions or not
2611 self._status_fn = status_fn
2612 self._verbose = verbose
2614 def __call__(self, data):
2615 """Returns a field's string representation.
2618 (status, value) = data
2621 self._status_fn(status)
2623 if status == constants.RS_NORMAL:
2624 return self._fn(value)
2626 assert value is None, \
2627 "Found value %r for abnormal status %s" % (value, status)
2629 return FormatResultError(status, self._verbose)
2632 def FormatResultError(status, verbose):
2633 """Formats result status other than L{constants.RS_NORMAL}.
2635 @param status: The result status
2636 @type verbose: boolean
2637 @param verbose: Whether to return the verbose text
2638 @return: Text of result status
2641 assert status != constants.RS_NORMAL, \
2642 "FormatResultError called with status equal to constants.RS_NORMAL"
2644 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2646 raise NotImplementedError("Unknown status %s" % status)
2653 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2654 header=False, verbose=False):
2655 """Formats data in L{objects.QueryResponse}.
2657 @type result: L{objects.QueryResponse}
2658 @param result: result of query operation
2660 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2661 see L{utils.text.FormatUnit}
2662 @type format_override: dict
2663 @param format_override: Dictionary for overriding field formatting functions,
2664 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2665 @type separator: string or None
2666 @param separator: String used to separate fields
2668 @param header: Whether to output header row
2669 @type verbose: boolean
2670 @param verbose: whether to use verbose field descriptions or not
2679 if format_override is None:
2680 format_override = {}
2682 stats = dict.fromkeys(constants.RS_ALL, 0)
2684 def _RecordStatus(status):
2689 for fdef in result.fields:
2690 assert fdef.title and fdef.name
2691 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2692 columns.append(TableColumn(fdef.title,
2693 _QueryColumnFormatter(fn, _RecordStatus,
2697 table = FormatTable(result.data, columns, header, separator)
2699 # Collect statistics
2700 assert len(stats) == len(constants.RS_ALL)
2701 assert compat.all(count >= 0 for count in stats.values())
2703 # Determine overall status. If there was no data, unknown fields must be
2704 # detected via the field definitions.
2705 if (stats[constants.RS_UNKNOWN] or
2706 (not result.data and _GetUnknownFields(result.fields))):
2708 elif compat.any(count > 0 for key, count in stats.items()
2709 if key != constants.RS_NORMAL):
2710 status = QR_INCOMPLETE
2714 return (status, table)
2717 def _GetUnknownFields(fdefs):
2718 """Returns list of unknown fields included in C{fdefs}.
2720 @type fdefs: list of L{objects.QueryFieldDefinition}
2723 return [fdef for fdef in fdefs
2724 if fdef.kind == constants.QFT_UNKNOWN]
2727 def _WarnUnknownFields(fdefs):
2728 """Prints a warning to stderr if a query included unknown fields.
2730 @type fdefs: list of L{objects.QueryFieldDefinition}
2733 unknown = _GetUnknownFields(fdefs)
2735 ToStderr("Warning: Queried for unknown fields %s",
2736 utils.CommaJoin(fdef.name for fdef in unknown))
2742 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2743 format_override=None, verbose=False, force_filter=False):
2744 """Generic implementation for listing all items of a resource.
2746 @param resource: One of L{constants.QR_VIA_LUXI}
2747 @type fields: list of strings
2748 @param fields: List of fields to query for
2749 @type names: list of strings
2750 @param names: Names of items to query for
2751 @type unit: string or None
2752 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2753 None for automatic choice (human-readable for non-separator usage,
2754 otherwise megabytes); this is a one-letter string
2755 @type separator: string or None
2756 @param separator: String used to separate fields
2758 @param header: Whether to show header row
2759 @type force_filter: bool
2760 @param force_filter: Whether to always treat names as filter
2761 @type format_override: dict
2762 @param format_override: Dictionary for overriding field formatting functions,
2763 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2764 @type verbose: boolean
2765 @param verbose: whether to use verbose field descriptions or not
2771 qfilter = qlang.MakeFilter(names, force_filter)
2776 response = cl.Query(resource, fields, qfilter)
2778 found_unknown = _WarnUnknownFields(response.fields)
2780 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2782 format_override=format_override,
2788 assert ((found_unknown and status == QR_UNKNOWN) or
2789 (not found_unknown and status != QR_UNKNOWN))
2791 if status == QR_UNKNOWN:
2792 return constants.EXIT_UNKNOWN_FIELD
2794 # TODO: Should the list command fail if not all data could be collected?
2795 return constants.EXIT_SUCCESS
2798 def GenericListFields(resource, fields, separator, header, cl=None):
2799 """Generic implementation for listing fields for a resource.
2801 @param resource: One of L{constants.QR_VIA_LUXI}
2802 @type fields: list of strings
2803 @param fields: List of fields to query for
2804 @type separator: string or None
2805 @param separator: String used to separate fields
2807 @param header: Whether to show header row
2816 response = cl.QueryFields(resource, fields)
2818 found_unknown = _WarnUnknownFields(response.fields)
2821 TableColumn("Name", str, False),
2822 TableColumn("Title", str, False),
2823 TableColumn("Description", str, False),
2826 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2828 for line in FormatTable(rows, columns, header, separator):
2832 return constants.EXIT_UNKNOWN_FIELD
2834 return constants.EXIT_SUCCESS
2838 """Describes a column for L{FormatTable}.
2841 def __init__(self, title, fn, align_right):
2842 """Initializes this class.
2845 @param title: Column title
2847 @param fn: Formatting function
2848 @type align_right: bool
2849 @param align_right: Whether to align values on the right-hand side
2854 self.align_right = align_right
2857 def _GetColFormatString(width, align_right):
2858 """Returns the format string for a field.
2866 return "%%%s%ss" % (sign, width)
2869 def FormatTable(rows, columns, header, separator):
2870 """Formats data as a table.
2872 @type rows: list of lists
2873 @param rows: Row data, one list per row
2874 @type columns: list of L{TableColumn}
2875 @param columns: Column descriptions
2877 @param header: Whether to show header row
2878 @type separator: string or None
2879 @param separator: String used to separate columns
2883 data = [[col.title for col in columns]]
2884 colwidth = [len(col.title) for col in columns]
2887 colwidth = [0 for _ in columns]
2891 assert len(row) == len(columns)
2893 formatted = [col.format(value) for value, col in zip(row, columns)]
2895 if separator is None:
2896 # Update column widths
2897 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2898 # Modifying a list's items while iterating is fine
2899 colwidth[idx] = max(oldwidth, len(value))
2901 data.append(formatted)
2903 if separator is not None:
2904 # Return early if a separator is used
2905 return [separator.join(row) for row in data]
2907 if columns and not columns[-1].align_right:
2908 # Avoid unnecessary spaces at end of line
2911 # Build format string
2912 fmt = " ".join([_GetColFormatString(width, col.align_right)
2913 for col, width in zip(columns, colwidth)])
2915 return [fmt % tuple(row) for row in data]
2918 def FormatTimestamp(ts):
2919 """Formats a given timestamp.
2922 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2925 @return: a string with the formatted timestamp
2928 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2931 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2934 def ParseTimespec(value):
2935 """Parse a time specification.
2937 The following suffixed will be recognized:
2945 Without any suffix, the value will be taken to be in seconds.
2950 raise errors.OpPrereqError("Empty time specification passed")
2958 if value[-1] not in suffix_map:
2961 except (TypeError, ValueError):
2962 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2964 multiplier = suffix_map[value[-1]]
2966 if not value: # no data left after stripping the suffix
2967 raise errors.OpPrereqError("Invalid time specification (only"
2970 value = int(value) * multiplier
2971 except (TypeError, ValueError):
2972 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2976 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2977 filter_master=False, nodegroup=None):
2978 """Returns the names of online nodes.
2980 This function will also log a warning on stderr with the names of
2983 @param nodes: if not empty, use only this subset of nodes (minus the
2985 @param cl: if not None, luxi client to use
2986 @type nowarn: boolean
2987 @param nowarn: by default, this function will output a note with the
2988 offline nodes that are skipped; if this parameter is True the
2989 note is not displayed
2990 @type secondary_ips: boolean
2991 @param secondary_ips: if True, return the secondary IPs instead of the
2992 names, useful for doing network traffic over the replication interface
2994 @type filter_master: boolean
2995 @param filter_master: if True, do not return the master node in the list
2996 (useful in coordination with secondary_ips where we cannot check our
2997 node name against the list)
2998 @type nodegroup: string
2999 @param nodegroup: If set, only return nodes in this node group
3008 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3010 if nodegroup is not None:
3011 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3012 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3015 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3018 if len(qfilter) > 1:
3019 final_filter = [qlang.OP_AND] + qfilter
3021 assert len(qfilter) == 1
3022 final_filter = qfilter[0]
3026 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3028 def _IsOffline(row):
3029 (_, (_, offline), _) = row
3033 ((_, name), _, _) = row
3037 (_, _, (_, sip)) = row
3040 (offline, online) = compat.partition(result.data, _IsOffline)
3042 if offline and not nowarn:
3043 ToStderr("Note: skipping offline node(s): %s" %
3044 utils.CommaJoin(map(_GetName, offline)))
3051 return map(fn, online)
3054 def _ToStream(stream, txt, *args):
3055 """Write a message to a stream, bypassing the logging system
3057 @type stream: file object
3058 @param stream: the file to which we should write
3060 @param txt: the message
3066 stream.write(txt % args)
3071 except IOError, err:
3072 if err.errno == errno.EPIPE:
3073 # our terminal went away, we'll exit
3074 sys.exit(constants.EXIT_FAILURE)
3079 def ToStdout(txt, *args):
3080 """Write a message to stdout only, bypassing the logging system
3082 This is just a wrapper over _ToStream.
3085 @param txt: the message
3088 _ToStream(sys.stdout, txt, *args)
3091 def ToStderr(txt, *args):
3092 """Write a message to stderr only, bypassing the logging system
3094 This is just a wrapper over _ToStream.
3097 @param txt: the message
3100 _ToStream(sys.stderr, txt, *args)
3103 class JobExecutor(object):
3104 """Class which manages the submission and execution of multiple jobs.
3106 Note that instances of this class should not be reused between
3110 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3115 self.verbose = verbose
3118 self.feedback_fn = feedback_fn
3119 self._counter = itertools.count()
3122 def _IfName(name, fmt):
3123 """Helper function for formatting name.
3131 def QueueJob(self, name, *ops):
3132 """Record a job for later submit.
3135 @param name: a description of the job, will be used in WaitJobSet
3138 SetGenericOpcodeOpts(ops, self.opts)
3139 self.queue.append((self._counter.next(), name, ops))
3141 def AddJobId(self, name, status, job_id):
3142 """Adds a job ID to the internal queue.
3145 self.jobs.append((self._counter.next(), status, job_id, name))
3147 def SubmitPending(self, each=False):
3148 """Submit all pending jobs.
3153 for (_, _, ops) in self.queue:
3154 # SubmitJob will remove the success status, but raise an exception if
3155 # the submission fails, so we'll notice that anyway.
3156 results.append([True, self.cl.SubmitJob(ops)[0]])
3158 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3159 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3160 self.jobs.append((idx, status, data, name))
3162 def _ChooseJob(self):
3163 """Choose a non-waiting/queued job to poll next.
3166 assert self.jobs, "_ChooseJob called with empty job list"
3168 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3172 for job_data, status in zip(self.jobs, result):
3173 if (isinstance(status, list) and status and
3174 status[0] in (constants.JOB_STATUS_QUEUED,
3175 constants.JOB_STATUS_WAITING,
3176 constants.JOB_STATUS_CANCELING)):
3177 # job is still present and waiting
3179 # good candidate found (either running job or lost job)
3180 self.jobs.remove(job_data)
3184 return self.jobs.pop(0)
3186 def GetResults(self):
3187 """Wait for and return the results of all jobs.
3190 @return: list of tuples (success, job results), in the same order
3191 as the submitted jobs; if a job has failed, instead of the result
3192 there will be the error message
3196 self.SubmitPending()
3199 ok_jobs = [row[2] for row in self.jobs if row[1]]
3201 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3203 # first, remove any non-submitted jobs
3204 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3205 for idx, _, jid, name in failures:
3206 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3207 results.append((idx, False, jid))
3210 (idx, _, jid, name) = self._ChooseJob()
3211 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3213 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3215 except errors.JobLost, err:
3216 _, job_result = FormatError(err)
3217 ToStderr("Job %s%s has been archived, cannot check its result",
3218 jid, self._IfName(name, " for %s"))
3220 except (errors.GenericError, luxi.ProtocolError), err:
3221 _, job_result = FormatError(err)
3223 # the error message will always be shown, verbose or not
3224 ToStderr("Job %s%s has failed: %s",
3225 jid, self._IfName(name, " for %s"), job_result)
3227 results.append((idx, success, job_result))
3229 # sort based on the index, then drop it
3231 results = [i[1:] for i in results]
3235 def WaitOrShow(self, wait):
3236 """Wait for job results or only print the job IDs.
3239 @param wait: whether to wait or not
3243 return self.GetResults()
3246 self.SubmitPending()
3247 for _, status, result, name in self.jobs:
3249 ToStdout("%s: %s", result, name)
3251 ToStderr("Failure for %s: %s", name, result)
3252 return [row[1:3] for row in self.jobs]
3255 def FormatParameterDict(buf, param_dict, actual, level=1):
3256 """Formats a parameter dictionary.
3258 @type buf: L{StringIO}
3259 @param buf: the buffer into which to write
3260 @type param_dict: dict
3261 @param param_dict: the own parameters
3263 @param actual: the current parameter set (including defaults)
3264 @param level: Level of indent
3267 indent = " " * level
3268 for key in sorted(actual):
3269 val = param_dict.get(key, "default (%s)" % actual[key])
3270 buf.write("%s- %s: %s\n" % (indent, key, val))
3273 def ConfirmOperation(names, list_type, text, extra=""):
3274 """Ask the user to confirm an operation on a list of list_type.
3276 This function is used to request confirmation for doing an operation
3277 on a given list of list_type.
3280 @param names: the list of names that we display when
3281 we ask for confirmation
3282 @type list_type: str
3283 @param list_type: Human readable name for elements in the list (e.g. nodes)
3285 @param text: the operation that the user should confirm
3287 @return: True or False depending on user's confirmation.
3291 msg = ("The %s will operate on %d %s.\n%s"
3292 "Do you want to continue?" % (text, count, list_type, extra))
3293 affected = (("\nAffected %s:\n" % list_type) +
3294 "\n".join([" %s" % name for name in names]))
3296 choices = [("y", True, "Yes, execute the %s" % text),
3297 ("n", False, "No, abort the %s" % text)]
3300 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3303 question = msg + affected
3305 choice = AskUser(question, choices)
3308 choice = AskUser(msg + affected, choices)