4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
40 from optparse import (OptionParser, TitledHelpFormatter,
41 Option, OptionValueError)
45 # Command line options
63 "FILESTORE_DRIVER_OPT",
72 "IGNORE_FAILURES_OPT",
73 "IGNORE_SECONDARIES_OPT",
86 "NOMODIFY_ETCHOSTS_OPT",
87 "NOMODIFY_SSH_SETUP_OPT",
107 "SHUTDOWN_TIMEOUT_OPT",
120 # Generic functions for CLI programs
122 "GenericInstanceCreate",
126 "JobSubmittedException",
131 # Formatting functions
132 "ToStderr", "ToStdout",
141 # command line options support infrastructure
142 "ARGS_MANY_INSTANCES",
156 "OPT_COMPL_INST_ADD_NODES",
157 "OPT_COMPL_MANY_NODES",
158 "OPT_COMPL_ONE_IALLOCATOR",
159 "OPT_COMPL_ONE_INSTANCE",
160 "OPT_COMPL_ONE_NODE",
172 def __init__(self, min=0, max=None):
177 return ("<%s min=%s max=%s>" %
178 (self.__class__.__name__, self.min, self.max))
181 class ArgSuggest(_Argument):
182 """Suggesting argument.
184 Value can be any of the ones passed to the constructor.
187 def __init__(self, min=0, max=None, choices=None):
188 _Argument.__init__(self, min=min, max=max)
189 self.choices = choices
192 return ("<%s min=%s max=%s choices=%r>" %
193 (self.__class__.__name__, self.min, self.max, self.choices))
196 class ArgChoice(ArgSuggest):
199 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
200 but value must be one of the choices.
205 class ArgUnknown(_Argument):
206 """Unknown argument to program (e.g. determined at runtime).
211 class ArgInstance(_Argument):
212 """Instances argument.
217 class ArgNode(_Argument):
222 class ArgJobId(_Argument):
228 class ArgFile(_Argument):
229 """File path argument.
234 class ArgCommand(_Argument):
240 class ArgHost(_Argument):
247 ARGS_MANY_INSTANCES = [ArgInstance()]
248 ARGS_MANY_NODES = [ArgNode()]
249 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
250 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
254 def _ExtractTagsObject(opts, args):
255 """Extract the tag type object.
257 Note that this function will modify its args parameter.
260 if not hasattr(opts, "tag_type"):
261 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
263 if kind == constants.TAG_CLUSTER:
265 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
267 raise errors.OpPrereqError("no arguments passed to the command")
271 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
275 def _ExtendTags(opts, args):
276 """Extend the args if a source file has been given.
278 This function will extend the tags with the contents of the file
279 passed in the 'tags_source' attribute of the opts parameter. A file
280 named '-' will be replaced by stdin.
283 fname = opts.tags_source
289 new_fh = open(fname, "r")
292 # we don't use the nice 'new_data = [line.strip() for line in fh]'
293 # because of python bug 1633941
295 line = new_fh.readline()
298 new_data.append(line.strip())
301 args.extend(new_data)
304 def ListTags(opts, args):
305 """List the tags on a given object.
307 This is a generic implementation that knows how to deal with all
308 three cases of tag objects (cluster, node, instance). The opts
309 argument is expected to contain a tag_type field denoting what
310 object type we work on.
313 kind, name = _ExtractTagsObject(opts, args)
314 op = opcodes.OpGetTags(kind=kind, name=name)
315 result = SubmitOpCode(op)
316 result = list(result)
322 def AddTags(opts, args):
323 """Add tags on a given object.
325 This is a generic implementation that knows how to deal with all
326 three cases of tag objects (cluster, node, instance). The opts
327 argument is expected to contain a tag_type field denoting what
328 object type we work on.
331 kind, name = _ExtractTagsObject(opts, args)
332 _ExtendTags(opts, args)
334 raise errors.OpPrereqError("No tags to be added")
335 op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
339 def RemoveTags(opts, args):
340 """Remove tags from a given object.
342 This is a generic implementation that knows how to deal with all
343 three cases of tag objects (cluster, node, instance). The opts
344 argument is expected to contain a tag_type field denoting what
345 object type we work on.
348 kind, name = _ExtractTagsObject(opts, args)
349 _ExtendTags(opts, args)
351 raise errors.OpPrereqError("No tags to be removed")
352 op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
356 def check_unit(option, opt, value):
357 """OptParsers custom converter for units.
361 return utils.ParseUnit(value)
362 except errors.UnitParseError, err:
363 raise OptionValueError("option %s: %s" % (opt, err))
366 def _SplitKeyVal(opt, data):
367 """Convert a KeyVal string into a dict.
369 This function will convert a key=val[,...] string into a dict. Empty
370 values will be converted specially: keys which have the prefix 'no_'
371 will have the value=False and the prefix stripped, the others will
375 @param opt: a string holding the option name for which we process the
376 data, used in building error messages
378 @param data: a string of the format key=val,key=val,...
380 @return: {key=val, key=val}
381 @raises errors.ParameterError: if there are duplicate keys
386 for elem in data.split(","):
388 key, val = elem.split("=", 1)
390 if elem.startswith(NO_PREFIX):
391 key, val = elem[len(NO_PREFIX):], False
392 elif elem.startswith(UN_PREFIX):
393 key, val = elem[len(UN_PREFIX):], None
395 key, val = elem, True
397 raise errors.ParameterError("Duplicate key '%s' in option %s" %
403 def check_ident_key_val(option, opt, value):
404 """Custom parser for ident:key=val,key=val options.
406 This will store the parsed values as a tuple (ident, {key: val}). As such,
407 multiple uses of this option via action=append is possible.
411 ident, rest = value, ''
413 ident, rest = value.split(":", 1)
415 if ident.startswith(NO_PREFIX):
417 msg = "Cannot pass options when removing parameter groups: %s" % value
418 raise errors.ParameterError(msg)
419 retval = (ident[len(NO_PREFIX):], False)
420 elif ident.startswith(UN_PREFIX):
422 msg = "Cannot pass options when removing parameter groups: %s" % value
423 raise errors.ParameterError(msg)
424 retval = (ident[len(UN_PREFIX):], None)
426 kv_dict = _SplitKeyVal(opt, rest)
427 retval = (ident, kv_dict)
431 def check_key_val(option, opt, value):
432 """Custom parser class for key=val,key=val options.
434 This will store the parsed values as a dict {key: val}.
437 return _SplitKeyVal(opt, value)
440 # completion_suggestion is normally a list. Using numeric values not evaluating
441 # to False for dynamic completion.
442 (OPT_COMPL_MANY_NODES,
444 OPT_COMPL_ONE_INSTANCE,
446 OPT_COMPL_ONE_IALLOCATOR,
447 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
449 OPT_COMPL_ALL = frozenset([
450 OPT_COMPL_MANY_NODES,
452 OPT_COMPL_ONE_INSTANCE,
454 OPT_COMPL_ONE_IALLOCATOR,
455 OPT_COMPL_INST_ADD_NODES,
459 class CliOption(Option):
460 """Custom option class for optparse.
463 ATTRS = Option.ATTRS + [
464 "completion_suggest",
466 TYPES = Option.TYPES + (
471 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
472 TYPE_CHECKER["identkeyval"] = check_ident_key_val
473 TYPE_CHECKER["keyval"] = check_key_val
474 TYPE_CHECKER["unit"] = check_unit
477 # optparse.py sets make_option, so we do it for our own option class, too
478 cli_option = CliOption
481 _YESNO = ("yes", "no")
484 DEBUG_OPT = cli_option("-d", "--debug", default=False,
486 help="Turn debugging on")
488 NOHDR_OPT = cli_option("--no-headers", default=False,
489 action="store_true", dest="no_headers",
490 help="Don't display column headers")
492 SEP_OPT = cli_option("--separator", default=None,
493 action="store", dest="separator",
494 help=("Separator between output fields"
495 " (defaults to one space)"))
497 USEUNITS_OPT = cli_option("--units", default=None,
498 dest="units", choices=('h', 'm', 'g', 't'),
499 help="Specify units for output (one of hmgt)")
501 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
502 type="string", metavar="FIELDS",
503 help="Comma separated list of output fields")
505 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
506 default=False, help="Force the operation")
508 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
509 default=False, help="Do not require confirmation")
511 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
512 default=None, help="File with tag names")
514 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
515 default=False, action="store_true",
516 help=("Submit the job and return the job ID, but"
517 " don't wait for the job to finish"))
519 SYNC_OPT = cli_option("--sync", dest="do_locking",
520 default=False, action="store_true",
521 help=("Grab locks while doing the queries"
522 " in order to ensure more consistent results"))
524 _DRY_RUN_OPT = cli_option("--dry-run", default=False,
526 help=("Do not execute the operation, just run the"
527 " check steps and verify it it could be"
530 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
532 help="Increase the verbosity of the operation")
534 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
535 action="store_true", dest="simulate_errors",
536 help="Debugging option that makes the operation"
537 " treat most runtime checks as failed")
539 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
540 default=True, action="store_false",
541 help="Don't wait for sync (DANGEROUS!)")
543 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
544 help="Custom disk setup (diskless, file,"
546 default=None, metavar="TEMPL",
547 choices=list(constants.DISK_TEMPLATES))
549 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
550 help="Do not create any network cards for"
553 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
554 help="Relative path under default cluster-wide"
555 " file storage dir to store file-based disks",
556 default=None, metavar="<DIR>")
558 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
559 help="Driver to use for image files",
560 default="loop", metavar="<DRIVER>",
561 choices=list(constants.FILE_DRIVER))
563 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
564 help="Select nodes for the instance automatically"
565 " using the <NAME> iallocator plugin",
566 default=None, type="string",
567 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
569 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
571 completion_suggest=OPT_COMPL_ONE_OS)
573 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
574 action="store_true", default=False,
575 help="Force an unknown variant")
577 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
578 type="keyval", default={},
579 help="Backend parameters")
581 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
582 default={}, dest="hvparams",
583 help="Hypervisor parameters")
585 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
586 help="Hypervisor and hypervisor options, in the"
587 " format hypervisor:option=value,option=value,...",
588 default=None, type="identkeyval")
590 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
591 help="Hypervisor and hypervisor options, in the"
592 " format hypervisor:option=value,option=value,...",
593 default=[], action="append", type="identkeyval")
595 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
596 action="store_false",
597 help="Don't check that the instance's IP"
600 NET_OPT = cli_option("--net",
601 help="NIC parameters", default=[],
602 dest="nics", action="append", type="identkeyval")
604 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
605 dest="disks", action="append", type="identkeyval")
607 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
608 help="Comma-separated list of disks"
609 " indices to act on (e.g. 0,2) (optional,"
610 " defaults to all disks)")
612 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
613 help="Enforces a single-disk configuration using the"
614 " given disk size, in MiB unless a suffix is used",
615 default=None, type="unit", metavar="<size>")
617 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
618 dest="ignore_consistency",
619 action="store_true", default=False,
620 help="Ignore the consistency of the disks on"
623 NONLIVE_OPT = cli_option("--non-live", dest="live",
624 default=True, action="store_false",
625 help="Do a non-live migration (this usually means"
626 " freeze the instance, save the state, transfer and"
627 " only then resume running on the secondary node)")
629 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
630 help="Target node and optional secondary node",
631 metavar="<pnode>[:<snode>]",
632 completion_suggest=OPT_COMPL_INST_ADD_NODES)
634 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
635 action="append", metavar="<node>",
636 help="Use only this node (can be used multiple"
637 " times, if not given defaults to all nodes)",
638 completion_suggest=OPT_COMPL_ONE_NODE)
640 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
642 completion_suggest=OPT_COMPL_ONE_NODE)
644 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
645 action="store_false",
646 help="Don't start the instance after creation")
648 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
649 action="store_true", default=False,
650 help="Show command instead of executing it")
652 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
653 default=False, action="store_true",
654 help="Instead of performing the migration, try to"
655 " recover from a failed cleanup. This is safe"
656 " to run even if the instance is healthy, but it"
657 " will create extra replication traffic and "
658 " disrupt briefly the replication (like during the"
661 STATIC_OPT = cli_option("-s", "--static", dest="static",
662 action="store_true", default=False,
663 help="Only show configuration data, not runtime data")
665 ALL_OPT = cli_option("--all", dest="show_all",
666 default=False, action="store_true",
667 help="Show info on all instances on the cluster."
668 " This can take a long time to run, use wisely")
670 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
671 action="store_true", default=False,
672 help="Interactive OS reinstall, lists available"
673 " OS templates for selection")
675 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
676 action="store_true", default=False,
677 help="Remove the instance from the cluster"
678 " configuration even if there are failures"
679 " during the removal process")
681 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
682 help="Specifies the new secondary node",
683 metavar="NODE", default=None,
684 completion_suggest=OPT_COMPL_ONE_NODE)
686 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
687 default=False, action="store_true",
688 help="Replace the disk(s) on the primary"
689 " node (only for the drbd template)")
691 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
692 default=False, action="store_true",
693 help="Replace the disk(s) on the secondary"
694 " node (only for the drbd template)")
696 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
697 default=False, action="store_true",
698 help="Automatically replace faulty disks"
699 " (only for the drbd template)")
701 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
702 default=False, action="store_true",
703 help="Ignore current recorded size"
704 " (useful for forcing activation when"
705 " the recorded size is wrong)")
707 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
709 completion_suggest=OPT_COMPL_ONE_NODE)
711 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
714 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
715 help="Specify the secondary ip for the node",
716 metavar="ADDRESS", default=None)
718 READD_OPT = cli_option("--readd", dest="readd",
719 default=False, action="store_true",
720 help="Readd old node after replacing it")
722 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
723 default=True, action="store_false",
724 help="Disable SSH key fingerprint checking")
727 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
728 choices=_YESNO, default=None, metavar=_YORNO,
729 help="Set the master_candidate flag on the node")
731 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
732 choices=_YESNO, default=None,
733 help="Set the offline flag on the node")
735 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
736 choices=_YESNO, default=None,
737 help="Set the drained flag on the node")
739 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
740 choices=_YESNO, default=None, metavar=_YORNO,
741 help="Set the allocatable flag on a volume")
743 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
744 help="Disable support for lvm based instances"
746 action="store_false", default=True)
748 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
749 dest="enabled_hypervisors",
750 help="Comma-separated list of hypervisors",
751 type="string", default=None)
753 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
754 type="keyval", default={},
755 help="NIC parameters")
757 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
758 dest="candidate_pool_size", type="int",
759 help="Set the candidate pool size")
761 VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
762 help="Enables LVM and specifies the volume group"
763 " name (cluster-wide) for disk allocation [xenvg]",
764 metavar="VG", default=None)
766 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
767 help="Destroy cluster", action="store_true")
769 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
770 help="Skip node agreement check (dangerous)",
771 action="store_true", default=False)
773 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
774 help="Specify the mac prefix for the instance IP"
775 " addresses, in the format XX:XX:XX",
779 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
780 help="Specify the node interface (cluster-wide)"
781 " on which the master IP address will be added "
782 " [%s]" % constants.DEFAULT_BRIDGE,
784 default=constants.DEFAULT_BRIDGE)
787 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
788 help="Specify the default directory (cluster-"
789 "wide) for storing the file-based disks [%s]" %
790 constants.DEFAULT_FILE_STORAGE_DIR,
792 default=constants.DEFAULT_FILE_STORAGE_DIR)
794 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
795 help="Don't modify /etc/hosts",
796 action="store_false", default=True)
798 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
799 help="Don't initialize SSH keys",
800 action="store_false", default=True)
802 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
803 help="Enable parseable error messages",
804 action="store_true", default=False)
806 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
807 help="Skip N+1 memory redundancy tests",
808 action="store_true", default=False)
810 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
811 help="Type of reboot: soft/hard/full",
812 default=constants.INSTANCE_REBOOT_HARD,
814 choices=list(constants.REBOOT_TYPES))
816 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
817 dest="ignore_secondaries",
818 default=False, action="store_true",
819 help="Ignore errors from secondaries")
821 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
822 action="store_false", default=True,
823 help="Don't shutdown the instance (unsafe)")
825 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
826 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
827 help="Maximum time to wait")
829 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
830 dest="shutdown_timeout", type="int",
831 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
832 help="Maximum time to wait for instance shutdown")
835 def _ParseArgs(argv, commands, aliases):
836 """Parser for the command line arguments.
838 This function parses the arguments and returns the function which
839 must be executed together with its (modified) arguments.
841 @param argv: the command line
842 @param commands: dictionary with special contents, see the design
843 doc for cmdline handling
844 @param aliases: dictionary with command aliases {'alias': 'target, ...}
850 binary = argv[0].split("/")[-1]
852 if len(argv) > 1 and argv[1] == "--version":
853 ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
854 # Quit right away. That way we don't have to care about this special
855 # argument. optparse.py does it the same.
858 if len(argv) < 2 or not (argv[1] in commands or
860 # let's do a nice thing
861 sortedcmds = commands.keys()
864 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
865 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
868 # compute the max line length for cmd + usage
869 mlen = max([len(" %s" % cmd) for cmd in commands])
870 mlen = min(60, mlen) # should not get here...
872 # and format a nice command list
873 ToStdout("Commands:")
874 for cmd in sortedcmds:
875 cmdstr = " %s" % (cmd,)
876 help_text = commands[cmd][4]
877 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
878 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
879 for line in help_lines:
880 ToStdout("%-*s %s", mlen, "", line)
884 return None, None, None
886 # get command, unalias it, and look it up in commands
890 raise errors.ProgrammerError("Alias '%s' overrides an existing"
893 if aliases[cmd] not in commands:
894 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
895 " command '%s'" % (cmd, aliases[cmd]))
899 func, args_def, parser_opts, usage, description = commands[cmd]
900 parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
901 description=description,
902 formatter=TitledHelpFormatter(),
903 usage="%%prog %s %s" % (cmd, usage))
904 parser.disable_interspersed_args()
905 options, args = parser.parse_args()
907 if not _CheckArguments(cmd, args_def, args):
908 return None, None, None
910 return func, options, args
913 def _CheckArguments(cmd, args_def, args):
914 """Verifies the arguments using the argument definition.
918 1. Abort with error if values specified by user but none expected.
920 1. For each argument in definition
922 1. Keep running count of minimum number of values (min_count)
923 1. Keep running count of maximum number of values (max_count)
924 1. If it has an unlimited number of values
926 1. Abort with error if it's not the last argument in the definition
928 1. If last argument has limited number of values
930 1. Abort with error if number of values doesn't match or is too large
932 1. Abort with error if user didn't pass enough values (min_count)
935 if args and not args_def:
936 ToStderr("Error: Command %s expects no arguments", cmd)
943 last_idx = len(args_def) - 1
945 for idx, arg in enumerate(args_def):
946 if min_count is None:
948 elif arg.min is not None:
951 if max_count is None:
953 elif arg.max is not None:
957 check_max = (arg.max is not None)
959 elif arg.max is None:
960 raise errors.ProgrammerError("Only the last argument can have max=None")
963 # Command with exact number of arguments
964 if (min_count is not None and max_count is not None and
965 min_count == max_count and len(args) != min_count):
966 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
969 # Command with limited number of arguments
970 if max_count is not None and len(args) > max_count:
971 ToStderr("Error: Command %s expects only %d argument(s)",
975 # Command with some required arguments
976 if min_count is not None and len(args) < min_count:
977 ToStderr("Error: Command %s expects at least %d argument(s)",
984 def SplitNodeOption(value):
985 """Splits the value of a --node option.
988 if value and ':' in value:
989 return value.split(':', 1)
994 def CalculateOSNames(os_name, os_variants):
995 """Calculates all the names an OS can be called, according to its variants.
997 @type os_name: string
998 @param os_name: base name of the os
999 @type os_variants: list or None
1000 @param os_variants: list of supported variants
1002 @return: list of valid names
1006 return ['%s+%s' % (os_name, v) for v in os_variants]
1012 def wrapper(*args, **kwargs):
1015 return fn(*args, **kwargs)
1021 def AskUser(text, choices=None):
1022 """Ask the user a question.
1024 @param text: the question to ask
1026 @param choices: list with elements tuples (input_char, return_value,
1027 description); if not given, it will default to: [('y', True,
1028 'Perform the operation'), ('n', False, 'Do no do the operation')];
1029 note that the '?' char is reserved for help
1031 @return: one of the return values from the choices list; if input is
1032 not possible (i.e. not running with a tty, we return the last
1037 choices = [('y', True, 'Perform the operation'),
1038 ('n', False, 'Do not perform the operation')]
1039 if not choices or not isinstance(choices, list):
1040 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1041 for entry in choices:
1042 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1043 raise errors.ProgrammerError("Invalid choices element to AskUser")
1045 answer = choices[-1][1]
1047 for line in text.splitlines():
1048 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1049 text = "\n".join(new_text)
1051 f = file("/dev/tty", "a+")
1055 chars = [entry[0] for entry in choices]
1056 chars[-1] = "[%s]" % chars[-1]
1058 maps = dict([(entry[0], entry[1]) for entry in choices])
1062 f.write("/".join(chars))
1064 line = f.readline(2).strip().lower()
1069 for entry in choices:
1070 f.write(" %s - %s\n" % (entry[0], entry[2]))
1078 class JobSubmittedException(Exception):
1079 """Job was submitted, client should exit.
1081 This exception has one argument, the ID of the job that was
1082 submitted. The handler should print this ID.
1084 This is not an error, just a structured way to exit from clients.
1089 def SendJob(ops, cl=None):
1090 """Function to submit an opcode without waiting for the results.
1093 @param ops: list of opcodes
1094 @type cl: luxi.Client
1095 @param cl: the luxi client to use for communicating with the master;
1096 if None, a new client will be created
1102 job_id = cl.SubmitJob(ops)
1107 def PollJob(job_id, cl=None, feedback_fn=None):
1108 """Function to poll for the result of a job.
1110 @type job_id: job identified
1111 @param job_id: the job to poll for results
1112 @type cl: luxi.Client
1113 @param cl: the luxi client to use for communicating with the master;
1114 if None, a new client will be created
1120 prev_job_info = None
1121 prev_logmsg_serial = None
1124 result = cl.WaitForJobChange(job_id, ["status"], prev_job_info,
1127 # job not found, go away!
1128 raise errors.JobLost("Job with id %s lost" % job_id)
1130 # Split result, a tuple of (field values, log entries)
1131 (job_info, log_entries) = result
1132 (status, ) = job_info
1135 for log_entry in log_entries:
1136 (serial, timestamp, _, message) = log_entry
1137 if callable(feedback_fn):
1138 feedback_fn(log_entry[1:])
1140 encoded = utils.SafeEncode(message)
1141 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), encoded)
1142 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1144 # TODO: Handle canceled and archived jobs
1145 elif status in (constants.JOB_STATUS_SUCCESS,
1146 constants.JOB_STATUS_ERROR,
1147 constants.JOB_STATUS_CANCELING,
1148 constants.JOB_STATUS_CANCELED):
1151 prev_job_info = job_info
1153 jobs = cl.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1155 raise errors.JobLost("Job with id %s lost" % job_id)
1157 status, opstatus, result = jobs[0]
1158 if status == constants.JOB_STATUS_SUCCESS:
1160 elif status in (constants.JOB_STATUS_CANCELING,
1161 constants.JOB_STATUS_CANCELED):
1162 raise errors.OpExecError("Job was canceled")
1165 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1166 if status == constants.OP_STATUS_SUCCESS:
1168 elif status == constants.OP_STATUS_ERROR:
1169 errors.MaybeRaise(msg)
1171 raise errors.OpExecError("partial failure (opcode %d): %s" %
1174 raise errors.OpExecError(str(msg))
1175 # default failure mode
1176 raise errors.OpExecError(result)
1179 def SubmitOpCode(op, cl=None, feedback_fn=None):
1180 """Legacy function to submit an opcode.
1182 This is just a simple wrapper over the construction of the processor
1183 instance. It should be extended to better handle feedback and
1184 interaction functions.
1190 job_id = SendJob([op], cl)
1192 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn)
1194 return op_results[0]
1197 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1198 """Wrapper around SubmitOpCode or SendJob.
1200 This function will decide, based on the 'opts' parameter, whether to
1201 submit and wait for the result of the opcode (and return it), or
1202 whether to just send the job and print its identifier. It is used in
1203 order to simplify the implementation of the '--submit' option.
1205 It will also add the dry-run parameter from the options passed, if true.
1208 if opts and opts.dry_run:
1209 op.dry_run = opts.dry_run
1210 if opts and opts.submit_only:
1211 job_id = SendJob([op], cl=cl)
1212 raise JobSubmittedException(job_id)
1214 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn)
1218 # TODO: Cache object?
1220 client = luxi.Client()
1221 except luxi.NoMasterError:
1222 master, myself = ssconf.GetMasterAndMyself()
1223 if master != myself:
1224 raise errors.OpPrereqError("This is not the master node, please connect"
1225 " to node '%s' and rerun the command" %
1232 def FormatError(err):
1233 """Return a formatted error message for a given error.
1235 This function takes an exception instance and returns a tuple
1236 consisting of two values: first, the recommended exit code, and
1237 second, a string describing the error message (not
1238 newline-terminated).
1244 if isinstance(err, errors.ConfigurationError):
1245 txt = "Corrupt configuration file: %s" % msg
1247 obuf.write(txt + "\n")
1248 obuf.write("Aborting.")
1250 elif isinstance(err, errors.HooksAbort):
1251 obuf.write("Failure: hooks execution failed:\n")
1252 for node, script, out in err.args[0]:
1254 obuf.write(" node: %s, script: %s, output: %s\n" %
1255 (node, script, out))
1257 obuf.write(" node: %s, script: %s (no output)\n" %
1259 elif isinstance(err, errors.HooksFailure):
1260 obuf.write("Failure: hooks general failure: %s" % msg)
1261 elif isinstance(err, errors.ResolverError):
1262 this_host = utils.HostInfo.SysName()
1263 if err.args[0] == this_host:
1264 msg = "Failure: can't resolve my own hostname ('%s')"
1266 msg = "Failure: can't resolve hostname '%s'"
1267 obuf.write(msg % err.args[0])
1268 elif isinstance(err, errors.OpPrereqError):
1269 if len(err.args) == 2:
1270 obuf.write("Failure: prerequisites not met for this"
1271 " operation:\nerror type: %s, error details:\n%s" %
1272 (err.args[1], err.args[0]))
1274 obuf.write("Failure: prerequisites not met for this"
1275 " operation:\n%s" % msg)
1276 elif isinstance(err, errors.OpExecError):
1277 obuf.write("Failure: command execution error:\n%s" % msg)
1278 elif isinstance(err, errors.TagError):
1279 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1280 elif isinstance(err, errors.JobQueueDrainError):
1281 obuf.write("Failure: the job queue is marked for drain and doesn't"
1282 " accept new requests\n")
1283 elif isinstance(err, errors.JobQueueFull):
1284 obuf.write("Failure: the job queue is full and doesn't accept new"
1285 " job submissions until old jobs are archived\n")
1286 elif isinstance(err, errors.TypeEnforcementError):
1287 obuf.write("Parameter Error: %s" % msg)
1288 elif isinstance(err, errors.ParameterError):
1289 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1290 elif isinstance(err, errors.GenericError):
1291 obuf.write("Unhandled Ganeti error: %s" % msg)
1292 elif isinstance(err, luxi.NoMasterError):
1293 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1294 " and listening for connections?")
1295 elif isinstance(err, luxi.TimeoutError):
1296 obuf.write("Timeout while talking to the master daemon. Error:\n"
1298 elif isinstance(err, luxi.ProtocolError):
1299 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1301 elif isinstance(err, JobSubmittedException):
1302 obuf.write("JobID: %s\n" % err.args[0])
1305 obuf.write("Unhandled exception: %s" % msg)
1306 return retcode, obuf.getvalue().rstrip('\n')
1309 def GenericMain(commands, override=None, aliases=None):
1310 """Generic main function for all the gnt-* commands.
1313 - commands: a dictionary with a special structure, see the design doc
1314 for command line handling.
1315 - override: if not None, we expect a dictionary with keys that will
1316 override command line options; this can be used to pass
1317 options from the scripts to generic functions
1318 - aliases: dictionary with command aliases {'alias': 'target, ...}
1321 # save the program name and the entire command line for later logging
1323 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1324 if len(sys.argv) >= 2:
1325 binary += " " + sys.argv[1]
1326 old_cmdline = " ".join(sys.argv[2:])
1330 binary = "<unknown program>"
1337 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1338 except errors.ParameterError, err:
1339 result, err_msg = FormatError(err)
1343 if func is None: # parse error
1346 if override is not None:
1347 for key, val in override.iteritems():
1348 setattr(options, key, val)
1350 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1351 stderr_logging=True, program=binary)
1354 logging.info("run with arguments '%s'", old_cmdline)
1356 logging.info("run with no arguments")
1359 result = func(options, args)
1360 except (errors.GenericError, luxi.ProtocolError,
1361 JobSubmittedException), err:
1362 result, err_msg = FormatError(err)
1363 logging.exception("Error during command processing")
1369 def GenericInstanceCreate(mode, opts, args):
1370 """Add an instance to the cluster via either creation or import.
1372 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1373 @param opts: the command line options selected by the user
1375 @param args: should contain only one element, the new instance name
1377 @return: the desired exit code
1382 (pnode, snode) = SplitNodeOption(opts.node)
1387 hypervisor, hvparams = opts.hypervisor
1391 nic_max = max(int(nidx[0])+1 for nidx in opts.nics)
1392 except ValueError, err:
1393 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1394 nics = [{}] * nic_max
1395 for nidx, ndict in opts.nics:
1397 if not isinstance(ndict, dict):
1398 msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1399 raise errors.OpPrereqError(msg)
1405 # default of one nic, all auto
1408 if opts.disk_template == constants.DT_DISKLESS:
1409 if opts.disks or opts.sd_size is not None:
1410 raise errors.OpPrereqError("Diskless instance but disk"
1411 " information passed")
1414 if not opts.disks and not opts.sd_size:
1415 raise errors.OpPrereqError("No disk information specified")
1416 if opts.disks and opts.sd_size is not None:
1417 raise errors.OpPrereqError("Please use either the '--disk' or"
1419 if opts.sd_size is not None:
1420 opts.disks = [(0, {"size": opts.sd_size})]
1422 disk_max = max(int(didx[0])+1 for didx in opts.disks)
1423 except ValueError, err:
1424 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1425 disks = [{}] * disk_max
1426 for didx, ddict in opts.disks:
1428 if not isinstance(ddict, dict):
1429 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1430 raise errors.OpPrereqError(msg)
1431 elif "size" not in ddict:
1432 raise errors.OpPrereqError("Missing size for disk %d" % didx)
1434 ddict["size"] = utils.ParseUnit(ddict["size"])
1435 except ValueError, err:
1436 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1440 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1441 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1443 if mode == constants.INSTANCE_CREATE:
1448 elif mode == constants.INSTANCE_IMPORT:
1451 src_node = opts.src_node
1452 src_path = opts.src_dir
1454 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1456 op = opcodes.OpCreateInstance(instance_name=instance,
1458 disk_template=opts.disk_template,
1460 pnode=pnode, snode=snode,
1461 ip_check=opts.ip_check,
1462 wait_for_sync=opts.wait_for_sync,
1463 file_storage_dir=opts.file_storage_dir,
1464 file_driver=opts.file_driver,
1465 iallocator=opts.iallocator,
1466 hypervisor=hypervisor,
1468 beparams=opts.beparams,
1475 SubmitOrSend(op, opts)
1479 def GenerateTable(headers, fields, separator, data,
1480 numfields=None, unitfields=None,
1482 """Prints a table with headers and different fields.
1485 @param headers: dictionary mapping field names to headers for
1488 @param fields: the field names corresponding to each row in
1490 @param separator: the separator to be used; if this is None,
1491 the default 'smart' algorithm is used which computes optimal
1492 field width, otherwise just the separator is used between
1495 @param data: a list of lists, each sublist being one row to be output
1496 @type numfields: list
1497 @param numfields: a list with the fields that hold numeric
1498 values and thus should be right-aligned
1499 @type unitfields: list
1500 @param unitfields: a list with the fields that hold numeric
1501 values that should be formatted with the units field
1502 @type units: string or None
1503 @param units: the units we should use for formatting, or None for
1504 automatic choice (human-readable for non-separator usage, otherwise
1505 megabytes); this is a one-letter string
1514 if numfields is None:
1516 if unitfields is None:
1519 numfields = utils.FieldSet(*numfields)
1520 unitfields = utils.FieldSet(*unitfields)
1523 for field in fields:
1524 if headers and field not in headers:
1525 # TODO: handle better unknown fields (either revert to old
1526 # style of raising exception, or deal more intelligently with
1528 headers[field] = field
1529 if separator is not None:
1530 format_fields.append("%s")
1531 elif numfields.Matches(field):
1532 format_fields.append("%*s")
1534 format_fields.append("%-*s")
1536 if separator is None:
1537 mlens = [0 for name in fields]
1538 format = ' '.join(format_fields)
1540 format = separator.replace("%", "%%").join(format_fields)
1545 for idx, val in enumerate(row):
1546 if unitfields.Matches(fields[idx]):
1552 val = row[idx] = utils.FormatUnit(val, units)
1553 val = row[idx] = str(val)
1554 if separator is None:
1555 mlens[idx] = max(mlens[idx], len(val))
1560 for idx, name in enumerate(fields):
1562 if separator is None:
1563 mlens[idx] = max(mlens[idx], len(hdr))
1564 args.append(mlens[idx])
1566 result.append(format % tuple(args))
1571 line = ['-' for _ in fields]
1572 for idx in range(len(fields)):
1573 if separator is None:
1574 args.append(mlens[idx])
1575 args.append(line[idx])
1576 result.append(format % tuple(args))
1581 def FormatTimestamp(ts):
1582 """Formats a given timestamp.
1585 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
1588 @return: a string with the formatted timestamp
1591 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
1594 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
1597 def ParseTimespec(value):
1598 """Parse a time specification.
1600 The following suffixed will be recognized:
1608 Without any suffix, the value will be taken to be in seconds.
1613 raise errors.OpPrereqError("Empty time specification passed")
1621 if value[-1] not in suffix_map:
1625 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1627 multiplier = suffix_map[value[-1]]
1629 if not value: # no data left after stripping the suffix
1630 raise errors.OpPrereqError("Invalid time specification (only"
1633 value = int(value) * multiplier
1635 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1639 def GetOnlineNodes(nodes, cl=None, nowarn=False):
1640 """Returns the names of online nodes.
1642 This function will also log a warning on stderr with the names of
1645 @param nodes: if not empty, use only this subset of nodes (minus the
1647 @param cl: if not None, luxi client to use
1648 @type nowarn: boolean
1649 @param nowarn: by default, this function will output a note with the
1650 offline nodes that are skipped; if this parameter is True the
1651 note is not displayed
1657 result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
1659 offline = [row[0] for row in result if row[1]]
1660 if offline and not nowarn:
1661 ToStderr("Note: skipping offline node(s): %s" % ", ".join(offline))
1662 return [row[0] for row in result if not row[1]]
1665 def _ToStream(stream, txt, *args):
1666 """Write a message to a stream, bypassing the logging system
1668 @type stream: file object
1669 @param stream: the file to which we should write
1671 @param txt: the message
1676 stream.write(txt % args)
1683 def ToStdout(txt, *args):
1684 """Write a message to stdout only, bypassing the logging system
1686 This is just a wrapper over _ToStream.
1689 @param txt: the message
1692 _ToStream(sys.stdout, txt, *args)
1695 def ToStderr(txt, *args):
1696 """Write a message to stderr only, bypassing the logging system
1698 This is just a wrapper over _ToStream.
1701 @param txt: the message
1704 _ToStream(sys.stderr, txt, *args)
1707 class JobExecutor(object):
1708 """Class which manages the submission and execution of multiple jobs.
1710 Note that instances of this class should not be reused between
1714 def __init__(self, cl=None, verbose=True):
1719 self.verbose = verbose
1722 def QueueJob(self, name, *ops):
1723 """Record a job for later submit.
1726 @param name: a description of the job, will be used in WaitJobSet
1728 self.queue.append((name, ops))
1730 def SubmitPending(self):
1731 """Submit all pending jobs.
1734 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
1735 for ((status, data), (name, _)) in zip(results, self.queue):
1736 self.jobs.append((status, data, name))
1738 def GetResults(self):
1739 """Wait for and return the results of all jobs.
1742 @return: list of tuples (success, job results), in the same order
1743 as the submitted jobs; if a job has failed, instead of the result
1744 there will be the error message
1748 self.SubmitPending()
1751 ok_jobs = [row[1] for row in self.jobs if row[0]]
1753 ToStdout("Submitted jobs %s", ", ".join(ok_jobs))
1754 for submit_status, jid, name in self.jobs:
1755 if not submit_status:
1756 ToStderr("Failed to submit job for %s: %s", name, jid)
1757 results.append((False, jid))
1760 ToStdout("Waiting for job %s for %s...", jid, name)
1762 job_result = PollJob(jid, cl=self.cl)
1764 except (errors.GenericError, luxi.ProtocolError), err:
1765 _, job_result = FormatError(err)
1767 # the error message will always be shown, verbose or not
1768 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
1770 results.append((success, job_result))
1773 def WaitOrShow(self, wait):
1774 """Wait for job results or only print the job IDs.
1777 @param wait: whether to wait or not
1781 return self.GetResults()
1784 self.SubmitPending()
1785 for status, result, name in self.jobs:
1787 ToStdout("%s: %s", result, name)
1789 ToStderr("Failure for %s: %s", name, result)