4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
40 from optparse import (OptionParser, TitledHelpFormatter,
41 Option, OptionValueError)
45 # Command line options
64 "FILESTORE_DRIVER_OPT",
73 "IGNORE_FAILURES_OPT",
74 "IGNORE_SECONDARIES_OPT",
88 "NOMODIFY_ETCHOSTS_OPT",
89 "NOMODIFY_SSH_SETUP_OPT",
109 "SHUTDOWN_TIMEOUT_OPT",
122 # Generic functions for CLI programs
124 "GenericInstanceCreate",
128 "JobSubmittedException",
133 # Formatting functions
134 "ToStderr", "ToStdout",
143 # command line options support infrastructure
144 "ARGS_MANY_INSTANCES",
158 "OPT_COMPL_INST_ADD_NODES",
159 "OPT_COMPL_MANY_NODES",
160 "OPT_COMPL_ONE_IALLOCATOR",
161 "OPT_COMPL_ONE_INSTANCE",
162 "OPT_COMPL_ONE_NODE",
174 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
179 return ("<%s min=%s max=%s>" %
180 (self.__class__.__name__, self.min, self.max))
183 class ArgSuggest(_Argument):
184 """Suggesting argument.
186 Value can be any of the ones passed to the constructor.
189 # pylint: disable-msg=W0622
190 def __init__(self, min=0, max=None, choices=None):
191 _Argument.__init__(self, min=min, max=max)
192 self.choices = choices
195 return ("<%s min=%s max=%s choices=%r>" %
196 (self.__class__.__name__, self.min, self.max, self.choices))
199 class ArgChoice(ArgSuggest):
202 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
203 but value must be one of the choices.
208 class ArgUnknown(_Argument):
209 """Unknown argument to program (e.g. determined at runtime).
214 class ArgInstance(_Argument):
215 """Instances argument.
220 class ArgNode(_Argument):
225 class ArgJobId(_Argument):
231 class ArgFile(_Argument):
232 """File path argument.
237 class ArgCommand(_Argument):
243 class ArgHost(_Argument):
250 ARGS_MANY_INSTANCES = [ArgInstance()]
251 ARGS_MANY_NODES = [ArgNode()]
252 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
253 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
256 def _ExtractTagsObject(opts, args):
257 """Extract the tag type object.
259 Note that this function will modify its args parameter.
262 if not hasattr(opts, "tag_type"):
263 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
265 if kind == constants.TAG_CLUSTER:
267 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
269 raise errors.OpPrereqError("no arguments passed to the command")
273 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
277 def _ExtendTags(opts, args):
278 """Extend the args if a source file has been given.
280 This function will extend the tags with the contents of the file
281 passed in the 'tags_source' attribute of the opts parameter. A file
282 named '-' will be replaced by stdin.
285 fname = opts.tags_source
291 new_fh = open(fname, "r")
294 # we don't use the nice 'new_data = [line.strip() for line in fh]'
295 # because of python bug 1633941
297 line = new_fh.readline()
300 new_data.append(line.strip())
303 args.extend(new_data)
306 def ListTags(opts, args):
307 """List the tags on a given object.
309 This is a generic implementation that knows how to deal with all
310 three cases of tag objects (cluster, node, instance). The opts
311 argument is expected to contain a tag_type field denoting what
312 object type we work on.
315 kind, name = _ExtractTagsObject(opts, args)
317 result = cl.QueryTags(kind, name)
318 result = list(result)
324 def AddTags(opts, args):
325 """Add tags on a given object.
327 This is a generic implementation that knows how to deal with all
328 three cases of tag objects (cluster, node, instance). The opts
329 argument is expected to contain a tag_type field denoting what
330 object type we work on.
333 kind, name = _ExtractTagsObject(opts, args)
334 _ExtendTags(opts, args)
336 raise errors.OpPrereqError("No tags to be added")
337 op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
341 def RemoveTags(opts, args):
342 """Remove tags from a given object.
344 This is a generic implementation that knows how to deal with all
345 three cases of tag objects (cluster, node, instance). The opts
346 argument is expected to contain a tag_type field denoting what
347 object type we work on.
350 kind, name = _ExtractTagsObject(opts, args)
351 _ExtendTags(opts, args)
353 raise errors.OpPrereqError("No tags to be removed")
354 op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
358 def check_unit(option, opt, value): # pylint: disable-msg=W0613
359 """OptParsers custom converter for units.
363 return utils.ParseUnit(value)
364 except errors.UnitParseError, err:
365 raise OptionValueError("option %s: %s" % (opt, err))
368 def _SplitKeyVal(opt, data):
369 """Convert a KeyVal string into a dict.
371 This function will convert a key=val[,...] string into a dict. Empty
372 values will be converted specially: keys which have the prefix 'no_'
373 will have the value=False and the prefix stripped, the others will
377 @param opt: a string holding the option name for which we process the
378 data, used in building error messages
380 @param data: a string of the format key=val,key=val,...
382 @return: {key=val, key=val}
383 @raises errors.ParameterError: if there are duplicate keys
388 for elem in utils.UnescapeAndSplit(data, sep=","):
390 key, val = elem.split("=", 1)
392 if elem.startswith(NO_PREFIX):
393 key, val = elem[len(NO_PREFIX):], False
394 elif elem.startswith(UN_PREFIX):
395 key, val = elem[len(UN_PREFIX):], None
397 key, val = elem, True
399 raise errors.ParameterError("Duplicate key '%s' in option %s" %
405 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
406 """Custom parser for ident:key=val,key=val options.
408 This will store the parsed values as a tuple (ident, {key: val}). As such,
409 multiple uses of this option via action=append is possible.
413 ident, rest = value, ''
415 ident, rest = value.split(":", 1)
417 if ident.startswith(NO_PREFIX):
419 msg = "Cannot pass options when removing parameter groups: %s" % value
420 raise errors.ParameterError(msg)
421 retval = (ident[len(NO_PREFIX):], False)
422 elif ident.startswith(UN_PREFIX):
424 msg = "Cannot pass options when removing parameter groups: %s" % value
425 raise errors.ParameterError(msg)
426 retval = (ident[len(UN_PREFIX):], None)
428 kv_dict = _SplitKeyVal(opt, rest)
429 retval = (ident, kv_dict)
433 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
434 """Custom parser class for key=val,key=val options.
436 This will store the parsed values as a dict {key: val}.
439 return _SplitKeyVal(opt, value)
442 # completion_suggestion is normally a list. Using numeric values not evaluating
443 # to False for dynamic completion.
444 (OPT_COMPL_MANY_NODES,
446 OPT_COMPL_ONE_INSTANCE,
448 OPT_COMPL_ONE_IALLOCATOR,
449 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
451 OPT_COMPL_ALL = frozenset([
452 OPT_COMPL_MANY_NODES,
454 OPT_COMPL_ONE_INSTANCE,
456 OPT_COMPL_ONE_IALLOCATOR,
457 OPT_COMPL_INST_ADD_NODES,
461 class CliOption(Option):
462 """Custom option class for optparse.
465 ATTRS = Option.ATTRS + [
466 "completion_suggest",
468 TYPES = Option.TYPES + (
473 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
474 TYPE_CHECKER["identkeyval"] = check_ident_key_val
475 TYPE_CHECKER["keyval"] = check_key_val
476 TYPE_CHECKER["unit"] = check_unit
479 # optparse.py sets make_option, so we do it for our own option class, too
480 cli_option = CliOption
483 _YESNO = ("yes", "no")
486 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
487 help="Increase debugging level")
489 NOHDR_OPT = cli_option("--no-headers", default=False,
490 action="store_true", dest="no_headers",
491 help="Don't display column headers")
493 SEP_OPT = cli_option("--separator", default=None,
494 action="store", dest="separator",
495 help=("Separator between output fields"
496 " (defaults to one space)"))
498 USEUNITS_OPT = cli_option("--units", default=None,
499 dest="units", choices=('h', 'm', 'g', 't'),
500 help="Specify units for output (one of hmgt)")
502 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
503 type="string", metavar="FIELDS",
504 help="Comma separated list of output fields")
506 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
507 default=False, help="Force the operation")
509 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
510 default=False, help="Do not require confirmation")
512 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
513 default=None, help="File with tag names")
515 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
516 default=False, action="store_true",
517 help=("Submit the job and return the job ID, but"
518 " don't wait for the job to finish"))
520 SYNC_OPT = cli_option("--sync", dest="do_locking",
521 default=False, action="store_true",
522 help=("Grab locks while doing the queries"
523 " in order to ensure more consistent results"))
525 _DRY_RUN_OPT = cli_option("--dry-run", default=False,
527 help=("Do not execute the operation, just run the"
528 " check steps and verify it it could be"
531 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
533 help="Increase the verbosity of the operation")
535 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
536 action="store_true", dest="simulate_errors",
537 help="Debugging option that makes the operation"
538 " treat most runtime checks as failed")
540 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
541 default=True, action="store_false",
542 help="Don't wait for sync (DANGEROUS!)")
544 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
545 help="Custom disk setup (diskless, file,"
547 default=None, metavar="TEMPL",
548 choices=list(constants.DISK_TEMPLATES))
550 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
551 help="Do not create any network cards for"
554 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
555 help="Relative path under default cluster-wide"
556 " file storage dir to store file-based disks",
557 default=None, metavar="<DIR>")
559 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
560 help="Driver to use for image files",
561 default="loop", metavar="<DRIVER>",
562 choices=list(constants.FILE_DRIVER))
564 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
565 help="Select nodes for the instance automatically"
566 " using the <NAME> iallocator plugin",
567 default=None, type="string",
568 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
570 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
572 completion_suggest=OPT_COMPL_ONE_OS)
574 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
575 action="store_true", default=False,
576 help="Force an unknown variant")
578 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
579 type="keyval", default={},
580 help="Backend parameters")
582 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
583 default={}, dest="hvparams",
584 help="Hypervisor parameters")
586 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
587 help="Hypervisor and hypervisor options, in the"
588 " format hypervisor:option=value,option=value,...",
589 default=None, type="identkeyval")
591 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
592 help="Hypervisor and hypervisor options, in the"
593 " format hypervisor:option=value,option=value,...",
594 default=[], action="append", type="identkeyval")
596 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
597 action="store_false",
598 help="Don't check that the instance's IP"
601 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
602 default=True, action="store_false",
603 help="Don't check that the instance's name"
606 NET_OPT = cli_option("--net",
607 help="NIC parameters", default=[],
608 dest="nics", action="append", type="identkeyval")
610 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
611 dest="disks", action="append", type="identkeyval")
613 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
614 help="Comma-separated list of disks"
615 " indices to act on (e.g. 0,2) (optional,"
616 " defaults to all disks)")
618 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
619 help="Enforces a single-disk configuration using the"
620 " given disk size, in MiB unless a suffix is used",
621 default=None, type="unit", metavar="<size>")
623 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
624 dest="ignore_consistency",
625 action="store_true", default=False,
626 help="Ignore the consistency of the disks on"
629 NONLIVE_OPT = cli_option("--non-live", dest="live",
630 default=True, action="store_false",
631 help="Do a non-live migration (this usually means"
632 " freeze the instance, save the state, transfer and"
633 " only then resume running on the secondary node)")
635 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
636 help="Target node and optional secondary node",
637 metavar="<pnode>[:<snode>]",
638 completion_suggest=OPT_COMPL_INST_ADD_NODES)
640 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
641 action="append", metavar="<node>",
642 help="Use only this node (can be used multiple"
643 " times, if not given defaults to all nodes)",
644 completion_suggest=OPT_COMPL_ONE_NODE)
646 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
648 completion_suggest=OPT_COMPL_ONE_NODE)
650 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
651 action="store_false",
652 help="Don't start the instance after creation")
654 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
655 action="store_true", default=False,
656 help="Show command instead of executing it")
658 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
659 default=False, action="store_true",
660 help="Instead of performing the migration, try to"
661 " recover from a failed cleanup. This is safe"
662 " to run even if the instance is healthy, but it"
663 " will create extra replication traffic and "
664 " disrupt briefly the replication (like during the"
667 STATIC_OPT = cli_option("-s", "--static", dest="static",
668 action="store_true", default=False,
669 help="Only show configuration data, not runtime data")
671 ALL_OPT = cli_option("--all", dest="show_all",
672 default=False, action="store_true",
673 help="Show info on all instances on the cluster."
674 " This can take a long time to run, use wisely")
676 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
677 action="store_true", default=False,
678 help="Interactive OS reinstall, lists available"
679 " OS templates for selection")
681 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
682 action="store_true", default=False,
683 help="Remove the instance from the cluster"
684 " configuration even if there are failures"
685 " during the removal process")
687 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
688 help="Specifies the new secondary node",
689 metavar="NODE", default=None,
690 completion_suggest=OPT_COMPL_ONE_NODE)
692 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
693 default=False, action="store_true",
694 help="Replace the disk(s) on the primary"
695 " node (only for the drbd template)")
697 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
698 default=False, action="store_true",
699 help="Replace the disk(s) on the secondary"
700 " node (only for the drbd template)")
702 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
703 default=False, action="store_true",
704 help="Automatically replace faulty disks"
705 " (only for the drbd template)")
707 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
708 default=False, action="store_true",
709 help="Ignore current recorded size"
710 " (useful for forcing activation when"
711 " the recorded size is wrong)")
713 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
715 completion_suggest=OPT_COMPL_ONE_NODE)
717 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
720 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
721 help="Specify the secondary ip for the node",
722 metavar="ADDRESS", default=None)
724 READD_OPT = cli_option("--readd", dest="readd",
725 default=False, action="store_true",
726 help="Readd old node after replacing it")
728 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
729 default=True, action="store_false",
730 help="Disable SSH key fingerprint checking")
733 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
734 choices=_YESNO, default=None, metavar=_YORNO,
735 help="Set the master_candidate flag on the node")
737 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
738 choices=_YESNO, default=None,
739 help="Set the offline flag on the node")
741 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
742 choices=_YESNO, default=None,
743 help="Set the drained flag on the node")
745 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
746 choices=_YESNO, default=None, metavar=_YORNO,
747 help="Set the allocatable flag on a volume")
749 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
750 help="Disable support for lvm based instances"
752 action="store_false", default=True)
754 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
755 dest="enabled_hypervisors",
756 help="Comma-separated list of hypervisors",
757 type="string", default=None)
759 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
760 type="keyval", default={},
761 help="NIC parameters")
763 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
764 dest="candidate_pool_size", type="int",
765 help="Set the candidate pool size")
767 VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
768 help="Enables LVM and specifies the volume group"
769 " name (cluster-wide) for disk allocation [xenvg]",
770 metavar="VG", default=None)
772 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
773 help="Destroy cluster", action="store_true")
775 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
776 help="Skip node agreement check (dangerous)",
777 action="store_true", default=False)
779 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
780 help="Specify the mac prefix for the instance IP"
781 " addresses, in the format XX:XX:XX",
785 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
786 help="Specify the node interface (cluster-wide)"
787 " on which the master IP address will be added "
788 " [%s]" % constants.DEFAULT_BRIDGE,
790 default=constants.DEFAULT_BRIDGE)
793 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
794 help="Specify the default directory (cluster-"
795 "wide) for storing the file-based disks [%s]" %
796 constants.DEFAULT_FILE_STORAGE_DIR,
798 default=constants.DEFAULT_FILE_STORAGE_DIR)
800 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
801 help="Don't modify /etc/hosts",
802 action="store_false", default=True)
804 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
805 help="Don't initialize SSH keys",
806 action="store_false", default=True)
808 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
809 help="Enable parseable error messages",
810 action="store_true", default=False)
812 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
813 help="Skip N+1 memory redundancy tests",
814 action="store_true", default=False)
816 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
817 help="Type of reboot: soft/hard/full",
818 default=constants.INSTANCE_REBOOT_HARD,
820 choices=list(constants.REBOOT_TYPES))
822 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
823 dest="ignore_secondaries",
824 default=False, action="store_true",
825 help="Ignore errors from secondaries")
827 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
828 action="store_false", default=True,
829 help="Don't shutdown the instance (unsafe)")
831 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
832 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
833 help="Maximum time to wait")
835 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
836 dest="shutdown_timeout", type="int",
837 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
838 help="Maximum time to wait for instance shutdown")
840 EARLY_RELEASE_OPT = cli_option("--early-release",
841 dest="early_release", default=False,
843 help="Release the locks on the secondary"
847 def _ParseArgs(argv, commands, aliases):
848 """Parser for the command line arguments.
850 This function parses the arguments and returns the function which
851 must be executed together with its (modified) arguments.
853 @param argv: the command line
854 @param commands: dictionary with special contents, see the design
855 doc for cmdline handling
856 @param aliases: dictionary with command aliases {'alias': 'target, ...}
862 binary = argv[0].split("/")[-1]
864 if len(argv) > 1 and argv[1] == "--version":
865 ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
866 # Quit right away. That way we don't have to care about this special
867 # argument. optparse.py does it the same.
870 if len(argv) < 2 or not (argv[1] in commands or
872 # let's do a nice thing
873 sortedcmds = commands.keys()
876 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
877 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
880 # compute the max line length for cmd + usage
881 mlen = max([len(" %s" % cmd) for cmd in commands])
882 mlen = min(60, mlen) # should not get here...
884 # and format a nice command list
885 ToStdout("Commands:")
886 for cmd in sortedcmds:
887 cmdstr = " %s" % (cmd,)
888 help_text = commands[cmd][4]
889 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
890 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
891 for line in help_lines:
892 ToStdout("%-*s %s", mlen, "", line)
896 return None, None, None
898 # get command, unalias it, and look it up in commands
902 raise errors.ProgrammerError("Alias '%s' overrides an existing"
905 if aliases[cmd] not in commands:
906 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
907 " command '%s'" % (cmd, aliases[cmd]))
911 func, args_def, parser_opts, usage, description = commands[cmd]
912 parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
913 description=description,
914 formatter=TitledHelpFormatter(),
915 usage="%%prog %s %s" % (cmd, usage))
916 parser.disable_interspersed_args()
917 options, args = parser.parse_args()
919 if not _CheckArguments(cmd, args_def, args):
920 return None, None, None
922 return func, options, args
925 def _CheckArguments(cmd, args_def, args):
926 """Verifies the arguments using the argument definition.
930 1. Abort with error if values specified by user but none expected.
932 1. For each argument in definition
934 1. Keep running count of minimum number of values (min_count)
935 1. Keep running count of maximum number of values (max_count)
936 1. If it has an unlimited number of values
938 1. Abort with error if it's not the last argument in the definition
940 1. If last argument has limited number of values
942 1. Abort with error if number of values doesn't match or is too large
944 1. Abort with error if user didn't pass enough values (min_count)
947 if args and not args_def:
948 ToStderr("Error: Command %s expects no arguments", cmd)
955 last_idx = len(args_def) - 1
957 for idx, arg in enumerate(args_def):
958 if min_count is None:
960 elif arg.min is not None:
963 if max_count is None:
965 elif arg.max is not None:
969 check_max = (arg.max is not None)
971 elif arg.max is None:
972 raise errors.ProgrammerError("Only the last argument can have max=None")
975 # Command with exact number of arguments
976 if (min_count is not None and max_count is not None and
977 min_count == max_count and len(args) != min_count):
978 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
981 # Command with limited number of arguments
982 if max_count is not None and len(args) > max_count:
983 ToStderr("Error: Command %s expects only %d argument(s)",
987 # Command with some required arguments
988 if min_count is not None and len(args) < min_count:
989 ToStderr("Error: Command %s expects at least %d argument(s)",
996 def SplitNodeOption(value):
997 """Splits the value of a --node option.
1000 if value and ':' in value:
1001 return value.split(':', 1)
1003 return (value, None)
1006 def CalculateOSNames(os_name, os_variants):
1007 """Calculates all the names an OS can be called, according to its variants.
1009 @type os_name: string
1010 @param os_name: base name of the os
1011 @type os_variants: list or None
1012 @param os_variants: list of supported variants
1014 @return: list of valid names
1018 return ['%s+%s' % (os_name, v) for v in os_variants]
1024 def wrapper(*args, **kwargs):
1027 return fn(*args, **kwargs)
1033 def AskUser(text, choices=None):
1034 """Ask the user a question.
1036 @param text: the question to ask
1038 @param choices: list with elements tuples (input_char, return_value,
1039 description); if not given, it will default to: [('y', True,
1040 'Perform the operation'), ('n', False, 'Do no do the operation')];
1041 note that the '?' char is reserved for help
1043 @return: one of the return values from the choices list; if input is
1044 not possible (i.e. not running with a tty, we return the last
1049 choices = [('y', True, 'Perform the operation'),
1050 ('n', False, 'Do not perform the operation')]
1051 if not choices or not isinstance(choices, list):
1052 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1053 for entry in choices:
1054 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1055 raise errors.ProgrammerError("Invalid choices element to AskUser")
1057 answer = choices[-1][1]
1059 for line in text.splitlines():
1060 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1061 text = "\n".join(new_text)
1063 f = file("/dev/tty", "a+")
1067 chars = [entry[0] for entry in choices]
1068 chars[-1] = "[%s]" % chars[-1]
1070 maps = dict([(entry[0], entry[1]) for entry in choices])
1074 f.write("/".join(chars))
1076 line = f.readline(2).strip().lower()
1081 for entry in choices:
1082 f.write(" %s - %s\n" % (entry[0], entry[2]))
1090 class JobSubmittedException(Exception):
1091 """Job was submitted, client should exit.
1093 This exception has one argument, the ID of the job that was
1094 submitted. The handler should print this ID.
1096 This is not an error, just a structured way to exit from clients.
1101 def SendJob(ops, cl=None):
1102 """Function to submit an opcode without waiting for the results.
1105 @param ops: list of opcodes
1106 @type cl: luxi.Client
1107 @param cl: the luxi client to use for communicating with the master;
1108 if None, a new client will be created
1114 job_id = cl.SubmitJob(ops)
1119 def PollJob(job_id, cl=None, feedback_fn=None):
1120 """Function to poll for the result of a job.
1122 @type job_id: job identified
1123 @param job_id: the job to poll for results
1124 @type cl: luxi.Client
1125 @param cl: the luxi client to use for communicating with the master;
1126 if None, a new client will be created
1132 prev_job_info = None
1133 prev_logmsg_serial = None
1137 notified_queued = False
1138 notified_waitlock = False
1141 result = cl.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1144 # job not found, go away!
1145 raise errors.JobLost("Job with id %s lost" % job_id)
1146 elif result == constants.JOB_NOTCHANGED:
1147 if status is not None and not callable(feedback_fn):
1148 if status == constants.JOB_STATUS_QUEUED and not notified_queued:
1149 ToStderr("Job %s is waiting in queue", job_id)
1150 notified_queued = True
1151 elif status == constants.JOB_STATUS_WAITLOCK and not notified_waitlock:
1152 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1153 notified_waitlock = True
1158 # Split result, a tuple of (field values, log entries)
1159 (job_info, log_entries) = result
1160 (status, ) = job_info
1163 for log_entry in log_entries:
1164 (serial, timestamp, _, message) = log_entry
1165 if callable(feedback_fn):
1166 feedback_fn(log_entry[1:])
1168 encoded = utils.SafeEncode(message)
1169 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), encoded)
1170 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1172 # TODO: Handle canceled and archived jobs
1173 elif status in (constants.JOB_STATUS_SUCCESS,
1174 constants.JOB_STATUS_ERROR,
1175 constants.JOB_STATUS_CANCELING,
1176 constants.JOB_STATUS_CANCELED):
1179 prev_job_info = job_info
1181 jobs = cl.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1183 raise errors.JobLost("Job with id %s lost" % job_id)
1185 status, opstatus, result = jobs[0]
1186 if status == constants.JOB_STATUS_SUCCESS:
1188 elif status in (constants.JOB_STATUS_CANCELING,
1189 constants.JOB_STATUS_CANCELED):
1190 raise errors.OpExecError("Job was canceled")
1193 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1194 if status == constants.OP_STATUS_SUCCESS:
1196 elif status == constants.OP_STATUS_ERROR:
1197 errors.MaybeRaise(msg)
1199 raise errors.OpExecError("partial failure (opcode %d): %s" %
1202 raise errors.OpExecError(str(msg))
1203 # default failure mode
1204 raise errors.OpExecError(result)
1207 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None):
1208 """Legacy function to submit an opcode.
1210 This is just a simple wrapper over the construction of the processor
1211 instance. It should be extended to better handle feedback and
1212 interaction functions.
1218 SetGenericOpcodeOpts([op], opts)
1220 job_id = SendJob([op], cl)
1222 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn)
1224 return op_results[0]
1227 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1228 """Wrapper around SubmitOpCode or SendJob.
1230 This function will decide, based on the 'opts' parameter, whether to
1231 submit and wait for the result of the opcode (and return it), or
1232 whether to just send the job and print its identifier. It is used in
1233 order to simplify the implementation of the '--submit' option.
1235 It will also process the opcodes if we're sending the via SendJob
1236 (otherwise SubmitOpCode does it).
1239 if opts and opts.submit_only:
1241 SetGenericOpcodeOpts(job, opts)
1242 job_id = SendJob(job, cl=cl)
1243 raise JobSubmittedException(job_id)
1245 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1248 def SetGenericOpcodeOpts(opcode_list, options):
1249 """Processor for generic options.
1251 This function updates the given opcodes based on generic command
1252 line options (like debug, dry-run, etc.).
1254 @param opcode_list: list of opcodes
1255 @param options: command line options or None
1256 @return: None (in-place modification)
1261 for op in opcode_list:
1262 op.dry_run = options.dry_run
1263 op.debug_level = options.debug
1267 # TODO: Cache object?
1269 client = luxi.Client()
1270 except luxi.NoMasterError:
1271 ss = ssconf.SimpleStore()
1273 # Try to read ssconf file
1276 except errors.ConfigurationError:
1277 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1278 " not part of a cluster")
1280 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1281 if master != myself:
1282 raise errors.OpPrereqError("This is not the master node, please connect"
1283 " to node '%s' and rerun the command" %
1289 def FormatError(err):
1290 """Return a formatted error message for a given error.
1292 This function takes an exception instance and returns a tuple
1293 consisting of two values: first, the recommended exit code, and
1294 second, a string describing the error message (not
1295 newline-terminated).
1301 if isinstance(err, errors.ConfigurationError):
1302 txt = "Corrupt configuration file: %s" % msg
1304 obuf.write(txt + "\n")
1305 obuf.write("Aborting.")
1307 elif isinstance(err, errors.HooksAbort):
1308 obuf.write("Failure: hooks execution failed:\n")
1309 for node, script, out in err.args[0]:
1311 obuf.write(" node: %s, script: %s, output: %s\n" %
1312 (node, script, out))
1314 obuf.write(" node: %s, script: %s (no output)\n" %
1316 elif isinstance(err, errors.HooksFailure):
1317 obuf.write("Failure: hooks general failure: %s" % msg)
1318 elif isinstance(err, errors.ResolverError):
1319 this_host = utils.HostInfo.SysName()
1320 if err.args[0] == this_host:
1321 msg = "Failure: can't resolve my own hostname ('%s')"
1323 msg = "Failure: can't resolve hostname '%s'"
1324 obuf.write(msg % err.args[0])
1325 elif isinstance(err, errors.OpPrereqError):
1326 if len(err.args) == 2:
1327 obuf.write("Failure: prerequisites not met for this"
1328 " operation:\nerror type: %s, error details:\n%s" %
1329 (err.args[1], err.args[0]))
1331 obuf.write("Failure: prerequisites not met for this"
1332 " operation:\n%s" % msg)
1333 elif isinstance(err, errors.OpExecError):
1334 obuf.write("Failure: command execution error:\n%s" % msg)
1335 elif isinstance(err, errors.TagError):
1336 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1337 elif isinstance(err, errors.JobQueueDrainError):
1338 obuf.write("Failure: the job queue is marked for drain and doesn't"
1339 " accept new requests\n")
1340 elif isinstance(err, errors.JobQueueFull):
1341 obuf.write("Failure: the job queue is full and doesn't accept new"
1342 " job submissions until old jobs are archived\n")
1343 elif isinstance(err, errors.TypeEnforcementError):
1344 obuf.write("Parameter Error: %s" % msg)
1345 elif isinstance(err, errors.ParameterError):
1346 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1347 elif isinstance(err, errors.GenericError):
1348 obuf.write("Unhandled Ganeti error: %s" % msg)
1349 elif isinstance(err, luxi.NoMasterError):
1350 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1351 " and listening for connections?")
1352 elif isinstance(err, luxi.TimeoutError):
1353 obuf.write("Timeout while talking to the master daemon. Error:\n"
1355 elif isinstance(err, luxi.ProtocolError):
1356 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1358 elif isinstance(err, JobSubmittedException):
1359 obuf.write("JobID: %s\n" % err.args[0])
1362 obuf.write("Unhandled exception: %s" % msg)
1363 return retcode, obuf.getvalue().rstrip('\n')
1366 def GenericMain(commands, override=None, aliases=None):
1367 """Generic main function for all the gnt-* commands.
1370 - commands: a dictionary with a special structure, see the design doc
1371 for command line handling.
1372 - override: if not None, we expect a dictionary with keys that will
1373 override command line options; this can be used to pass
1374 options from the scripts to generic functions
1375 - aliases: dictionary with command aliases {'alias': 'target, ...}
1378 # save the program name and the entire command line for later logging
1380 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1381 if len(sys.argv) >= 2:
1382 binary += " " + sys.argv[1]
1383 old_cmdline = " ".join(sys.argv[2:])
1387 binary = "<unknown program>"
1394 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1395 except errors.ParameterError, err:
1396 result, err_msg = FormatError(err)
1400 if func is None: # parse error
1403 if override is not None:
1404 for key, val in override.iteritems():
1405 setattr(options, key, val)
1407 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1408 stderr_logging=True, program=binary)
1411 logging.info("run with arguments '%s'", old_cmdline)
1413 logging.info("run with no arguments")
1416 result = func(options, args)
1417 except (errors.GenericError, luxi.ProtocolError,
1418 JobSubmittedException), err:
1419 result, err_msg = FormatError(err)
1420 logging.exception("Error during command processing")
1426 def GenericInstanceCreate(mode, opts, args):
1427 """Add an instance to the cluster via either creation or import.
1429 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1430 @param opts: the command line options selected by the user
1432 @param args: should contain only one element, the new instance name
1434 @return: the desired exit code
1439 (pnode, snode) = SplitNodeOption(opts.node)
1444 hypervisor, hvparams = opts.hypervisor
1448 nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1449 except ValueError, err:
1450 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1451 nics = [{}] * nic_max
1452 for nidx, ndict in opts.nics:
1454 if not isinstance(ndict, dict):
1455 msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1456 raise errors.OpPrereqError(msg)
1462 # default of one nic, all auto
1465 if opts.disk_template == constants.DT_DISKLESS:
1466 if opts.disks or opts.sd_size is not None:
1467 raise errors.OpPrereqError("Diskless instance but disk"
1468 " information passed")
1471 if not opts.disks and not opts.sd_size:
1472 raise errors.OpPrereqError("No disk information specified")
1473 if opts.disks and opts.sd_size is not None:
1474 raise errors.OpPrereqError("Please use either the '--disk' or"
1476 if opts.sd_size is not None:
1477 opts.disks = [(0, {"size": opts.sd_size})]
1479 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1480 except ValueError, err:
1481 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1482 disks = [{}] * disk_max
1483 for didx, ddict in opts.disks:
1485 if not isinstance(ddict, dict):
1486 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1487 raise errors.OpPrereqError(msg)
1488 elif "size" not in ddict:
1489 raise errors.OpPrereqError("Missing size for disk %d" % didx)
1491 ddict["size"] = utils.ParseUnit(ddict["size"])
1492 except ValueError, err:
1493 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1497 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1498 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1500 if mode == constants.INSTANCE_CREATE:
1505 elif mode == constants.INSTANCE_IMPORT:
1508 src_node = opts.src_node
1509 src_path = opts.src_dir
1511 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1513 op = opcodes.OpCreateInstance(instance_name=instance,
1515 disk_template=opts.disk_template,
1517 pnode=pnode, snode=snode,
1518 ip_check=opts.ip_check,
1519 name_check=opts.name_check,
1520 wait_for_sync=opts.wait_for_sync,
1521 file_storage_dir=opts.file_storage_dir,
1522 file_driver=opts.file_driver,
1523 iallocator=opts.iallocator,
1524 hypervisor=hypervisor,
1526 beparams=opts.beparams,
1533 SubmitOrSend(op, opts)
1537 def GenerateTable(headers, fields, separator, data,
1538 numfields=None, unitfields=None,
1540 """Prints a table with headers and different fields.
1543 @param headers: dictionary mapping field names to headers for
1546 @param fields: the field names corresponding to each row in
1548 @param separator: the separator to be used; if this is None,
1549 the default 'smart' algorithm is used which computes optimal
1550 field width, otherwise just the separator is used between
1553 @param data: a list of lists, each sublist being one row to be output
1554 @type numfields: list
1555 @param numfields: a list with the fields that hold numeric
1556 values and thus should be right-aligned
1557 @type unitfields: list
1558 @param unitfields: a list with the fields that hold numeric
1559 values that should be formatted with the units field
1560 @type units: string or None
1561 @param units: the units we should use for formatting, or None for
1562 automatic choice (human-readable for non-separator usage, otherwise
1563 megabytes); this is a one-letter string
1572 if numfields is None:
1574 if unitfields is None:
1577 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
1578 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
1581 for field in fields:
1582 if headers and field not in headers:
1583 # TODO: handle better unknown fields (either revert to old
1584 # style of raising exception, or deal more intelligently with
1586 headers[field] = field
1587 if separator is not None:
1588 format_fields.append("%s")
1589 elif numfields.Matches(field):
1590 format_fields.append("%*s")
1592 format_fields.append("%-*s")
1594 if separator is None:
1595 mlens = [0 for name in fields]
1596 format = ' '.join(format_fields)
1598 format = separator.replace("%", "%%").join(format_fields)
1603 for idx, val in enumerate(row):
1604 if unitfields.Matches(fields[idx]):
1607 except (TypeError, ValueError):
1610 val = row[idx] = utils.FormatUnit(val, units)
1611 val = row[idx] = str(val)
1612 if separator is None:
1613 mlens[idx] = max(mlens[idx], len(val))
1618 for idx, name in enumerate(fields):
1620 if separator is None:
1621 mlens[idx] = max(mlens[idx], len(hdr))
1622 args.append(mlens[idx])
1624 result.append(format % tuple(args))
1626 if separator is None:
1627 assert len(mlens) == len(fields)
1629 if fields and not numfields.Matches(fields[-1]):
1635 line = ['-' for _ in fields]
1636 for idx in range(len(fields)):
1637 if separator is None:
1638 args.append(mlens[idx])
1639 args.append(line[idx])
1640 result.append(format % tuple(args))
1645 def FormatTimestamp(ts):
1646 """Formats a given timestamp.
1649 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
1652 @return: a string with the formatted timestamp
1655 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
1658 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
1661 def ParseTimespec(value):
1662 """Parse a time specification.
1664 The following suffixed will be recognized:
1672 Without any suffix, the value will be taken to be in seconds.
1677 raise errors.OpPrereqError("Empty time specification passed")
1685 if value[-1] not in suffix_map:
1688 except (TypeError, ValueError):
1689 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1691 multiplier = suffix_map[value[-1]]
1693 if not value: # no data left after stripping the suffix
1694 raise errors.OpPrereqError("Invalid time specification (only"
1697 value = int(value) * multiplier
1698 except (TypeError, ValueError):
1699 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1703 def GetOnlineNodes(nodes, cl=None, nowarn=False):
1704 """Returns the names of online nodes.
1706 This function will also log a warning on stderr with the names of
1709 @param nodes: if not empty, use only this subset of nodes (minus the
1711 @param cl: if not None, luxi client to use
1712 @type nowarn: boolean
1713 @param nowarn: by default, this function will output a note with the
1714 offline nodes that are skipped; if this parameter is True the
1715 note is not displayed
1721 result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
1723 offline = [row[0] for row in result if row[1]]
1724 if offline and not nowarn:
1725 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
1726 return [row[0] for row in result if not row[1]]
1729 def _ToStream(stream, txt, *args):
1730 """Write a message to a stream, bypassing the logging system
1732 @type stream: file object
1733 @param stream: the file to which we should write
1735 @param txt: the message
1740 stream.write(txt % args)
1747 def ToStdout(txt, *args):
1748 """Write a message to stdout only, bypassing the logging system
1750 This is just a wrapper over _ToStream.
1753 @param txt: the message
1756 _ToStream(sys.stdout, txt, *args)
1759 def ToStderr(txt, *args):
1760 """Write a message to stderr only, bypassing the logging system
1762 This is just a wrapper over _ToStream.
1765 @param txt: the message
1768 _ToStream(sys.stderr, txt, *args)
1771 class JobExecutor(object):
1772 """Class which manages the submission and execution of multiple jobs.
1774 Note that instances of this class should not be reused between
1778 def __init__(self, cl=None, verbose=True, opts=None):
1783 self.verbose = verbose
1787 def QueueJob(self, name, *ops):
1788 """Record a job for later submit.
1791 @param name: a description of the job, will be used in WaitJobSet
1793 SetGenericOpcodeOpts(ops, self.opts)
1794 self.queue.append((name, ops))
1796 def SubmitPending(self):
1797 """Submit all pending jobs.
1800 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
1801 for ((status, data), (name, _)) in zip(results, self.queue):
1802 self.jobs.append((status, data, name))
1804 def GetResults(self):
1805 """Wait for and return the results of all jobs.
1808 @return: list of tuples (success, job results), in the same order
1809 as the submitted jobs; if a job has failed, instead of the result
1810 there will be the error message
1814 self.SubmitPending()
1817 ok_jobs = [row[1] for row in self.jobs if row[0]]
1819 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
1820 for submit_status, jid, name in self.jobs:
1821 if not submit_status:
1822 ToStderr("Failed to submit job for %s: %s", name, jid)
1823 results.append((False, jid))
1826 ToStdout("Waiting for job %s for %s...", jid, name)
1828 job_result = PollJob(jid, cl=self.cl)
1830 except (errors.GenericError, luxi.ProtocolError), err:
1831 _, job_result = FormatError(err)
1833 # the error message will always be shown, verbose or not
1834 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
1836 results.append((success, job_result))
1839 def WaitOrShow(self, wait):
1840 """Wait for job results or only print the job IDs.
1843 @param wait: whether to wait or not
1847 return self.GetResults()
1850 self.SubmitPending()
1851 for status, result, name in self.jobs:
1853 ToStdout("%s: %s", result, name)
1855 ToStderr("Failure for %s: %s", name, result)