4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
40 from optparse import (OptionParser, TitledHelpFormatter,
41 Option, OptionValueError)
45 # Command line options
63 "FILESTORE_DRIVER_OPT",
72 "IGNORE_FAILURES_OPT",
73 "IGNORE_SECONDARIES_OPT",
87 "NOMODIFY_ETCHOSTS_OPT",
88 "NOMODIFY_SSH_SETUP_OPT",
108 "SHUTDOWN_TIMEOUT_OPT",
121 # Generic functions for CLI programs
123 "GenericInstanceCreate",
127 "JobSubmittedException",
132 # Formatting functions
133 "ToStderr", "ToStdout",
142 # command line options support infrastructure
143 "ARGS_MANY_INSTANCES",
157 "OPT_COMPL_INST_ADD_NODES",
158 "OPT_COMPL_MANY_NODES",
159 "OPT_COMPL_ONE_IALLOCATOR",
160 "OPT_COMPL_ONE_INSTANCE",
161 "OPT_COMPL_ONE_NODE",
173 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
178 return ("<%s min=%s max=%s>" %
179 (self.__class__.__name__, self.min, self.max))
182 class ArgSuggest(_Argument):
183 """Suggesting argument.
185 Value can be any of the ones passed to the constructor.
188 # pylint: disable-msg=W0622
189 def __init__(self, min=0, max=None, choices=None):
190 _Argument.__init__(self, min=min, max=max)
191 self.choices = choices
194 return ("<%s min=%s max=%s choices=%r>" %
195 (self.__class__.__name__, self.min, self.max, self.choices))
198 class ArgChoice(ArgSuggest):
201 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
202 but value must be one of the choices.
207 class ArgUnknown(_Argument):
208 """Unknown argument to program (e.g. determined at runtime).
213 class ArgInstance(_Argument):
214 """Instances argument.
219 class ArgNode(_Argument):
224 class ArgJobId(_Argument):
230 class ArgFile(_Argument):
231 """File path argument.
236 class ArgCommand(_Argument):
242 class ArgHost(_Argument):
249 ARGS_MANY_INSTANCES = [ArgInstance()]
250 ARGS_MANY_NODES = [ArgNode()]
251 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
252 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
255 def _ExtractTagsObject(opts, args):
256 """Extract the tag type object.
258 Note that this function will modify its args parameter.
261 if not hasattr(opts, "tag_type"):
262 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
264 if kind == constants.TAG_CLUSTER:
266 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
268 raise errors.OpPrereqError("no arguments passed to the command")
272 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
276 def _ExtendTags(opts, args):
277 """Extend the args if a source file has been given.
279 This function will extend the tags with the contents of the file
280 passed in the 'tags_source' attribute of the opts parameter. A file
281 named '-' will be replaced by stdin.
284 fname = opts.tags_source
290 new_fh = open(fname, "r")
293 # we don't use the nice 'new_data = [line.strip() for line in fh]'
294 # because of python bug 1633941
296 line = new_fh.readline()
299 new_data.append(line.strip())
302 args.extend(new_data)
305 def ListTags(opts, args):
306 """List the tags on a given object.
308 This is a generic implementation that knows how to deal with all
309 three cases of tag objects (cluster, node, instance). The opts
310 argument is expected to contain a tag_type field denoting what
311 object type we work on.
314 kind, name = _ExtractTagsObject(opts, args)
316 result = cl.QueryTags(kind, name)
317 result = list(result)
323 def AddTags(opts, args):
324 """Add tags on a given object.
326 This is a generic implementation that knows how to deal with all
327 three cases of tag objects (cluster, node, instance). The opts
328 argument is expected to contain a tag_type field denoting what
329 object type we work on.
332 kind, name = _ExtractTagsObject(opts, args)
333 _ExtendTags(opts, args)
335 raise errors.OpPrereqError("No tags to be added")
336 op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
340 def RemoveTags(opts, args):
341 """Remove tags from a given object.
343 This is a generic implementation that knows how to deal with all
344 three cases of tag objects (cluster, node, instance). The opts
345 argument is expected to contain a tag_type field denoting what
346 object type we work on.
349 kind, name = _ExtractTagsObject(opts, args)
350 _ExtendTags(opts, args)
352 raise errors.OpPrereqError("No tags to be removed")
353 op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
357 def check_unit(option, opt, value): # pylint: disable-msg=W0613
358 """OptParsers custom converter for units.
362 return utils.ParseUnit(value)
363 except errors.UnitParseError, err:
364 raise OptionValueError("option %s: %s" % (opt, err))
367 def _SplitKeyVal(opt, data):
368 """Convert a KeyVal string into a dict.
370 This function will convert a key=val[,...] string into a dict. Empty
371 values will be converted specially: keys which have the prefix 'no_'
372 will have the value=False and the prefix stripped, the others will
376 @param opt: a string holding the option name for which we process the
377 data, used in building error messages
379 @param data: a string of the format key=val,key=val,...
381 @return: {key=val, key=val}
382 @raises errors.ParameterError: if there are duplicate keys
387 for elem in utils.UnescapeAndSplit(data, sep=","):
389 key, val = elem.split("=", 1)
391 if elem.startswith(NO_PREFIX):
392 key, val = elem[len(NO_PREFIX):], False
393 elif elem.startswith(UN_PREFIX):
394 key, val = elem[len(UN_PREFIX):], None
396 key, val = elem, True
398 raise errors.ParameterError("Duplicate key '%s' in option %s" %
404 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
405 """Custom parser for ident:key=val,key=val options.
407 This will store the parsed values as a tuple (ident, {key: val}). As such,
408 multiple uses of this option via action=append is possible.
412 ident, rest = value, ''
414 ident, rest = value.split(":", 1)
416 if ident.startswith(NO_PREFIX):
418 msg = "Cannot pass options when removing parameter groups: %s" % value
419 raise errors.ParameterError(msg)
420 retval = (ident[len(NO_PREFIX):], False)
421 elif ident.startswith(UN_PREFIX):
423 msg = "Cannot pass options when removing parameter groups: %s" % value
424 raise errors.ParameterError(msg)
425 retval = (ident[len(UN_PREFIX):], None)
427 kv_dict = _SplitKeyVal(opt, rest)
428 retval = (ident, kv_dict)
432 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
433 """Custom parser class for key=val,key=val options.
435 This will store the parsed values as a dict {key: val}.
438 return _SplitKeyVal(opt, value)
441 # completion_suggestion is normally a list. Using numeric values not evaluating
442 # to False for dynamic completion.
443 (OPT_COMPL_MANY_NODES,
445 OPT_COMPL_ONE_INSTANCE,
447 OPT_COMPL_ONE_IALLOCATOR,
448 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
450 OPT_COMPL_ALL = frozenset([
451 OPT_COMPL_MANY_NODES,
453 OPT_COMPL_ONE_INSTANCE,
455 OPT_COMPL_ONE_IALLOCATOR,
456 OPT_COMPL_INST_ADD_NODES,
460 class CliOption(Option):
461 """Custom option class for optparse.
464 ATTRS = Option.ATTRS + [
465 "completion_suggest",
467 TYPES = Option.TYPES + (
472 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
473 TYPE_CHECKER["identkeyval"] = check_ident_key_val
474 TYPE_CHECKER["keyval"] = check_key_val
475 TYPE_CHECKER["unit"] = check_unit
478 # optparse.py sets make_option, so we do it for our own option class, too
479 cli_option = CliOption
482 _YESNO = ("yes", "no")
485 DEBUG_OPT = cli_option("-d", "--debug", default=False,
487 help="Turn debugging on")
489 NOHDR_OPT = cli_option("--no-headers", default=False,
490 action="store_true", dest="no_headers",
491 help="Don't display column headers")
493 SEP_OPT = cli_option("--separator", default=None,
494 action="store", dest="separator",
495 help=("Separator between output fields"
496 " (defaults to one space)"))
498 USEUNITS_OPT = cli_option("--units", default=None,
499 dest="units", choices=('h', 'm', 'g', 't'),
500 help="Specify units for output (one of hmgt)")
502 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
503 type="string", metavar="FIELDS",
504 help="Comma separated list of output fields")
506 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
507 default=False, help="Force the operation")
509 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
510 default=False, help="Do not require confirmation")
512 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
513 default=None, help="File with tag names")
515 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
516 default=False, action="store_true",
517 help=("Submit the job and return the job ID, but"
518 " don't wait for the job to finish"))
520 SYNC_OPT = cli_option("--sync", dest="do_locking",
521 default=False, action="store_true",
522 help=("Grab locks while doing the queries"
523 " in order to ensure more consistent results"))
525 _DRY_RUN_OPT = cli_option("--dry-run", default=False,
527 help=("Do not execute the operation, just run the"
528 " check steps and verify it it could be"
531 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
533 help="Increase the verbosity of the operation")
535 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
536 action="store_true", dest="simulate_errors",
537 help="Debugging option that makes the operation"
538 " treat most runtime checks as failed")
540 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
541 default=True, action="store_false",
542 help="Don't wait for sync (DANGEROUS!)")
544 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
545 help="Custom disk setup (diskless, file,"
547 default=None, metavar="TEMPL",
548 choices=list(constants.DISK_TEMPLATES))
550 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
551 help="Do not create any network cards for"
554 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
555 help="Relative path under default cluster-wide"
556 " file storage dir to store file-based disks",
557 default=None, metavar="<DIR>")
559 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
560 help="Driver to use for image files",
561 default="loop", metavar="<DRIVER>",
562 choices=list(constants.FILE_DRIVER))
564 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
565 help="Select nodes for the instance automatically"
566 " using the <NAME> iallocator plugin",
567 default=None, type="string",
568 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
570 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
572 completion_suggest=OPT_COMPL_ONE_OS)
574 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
575 action="store_true", default=False,
576 help="Force an unknown variant")
578 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
579 type="keyval", default={},
580 help="Backend parameters")
582 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
583 default={}, dest="hvparams",
584 help="Hypervisor parameters")
586 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
587 help="Hypervisor and hypervisor options, in the"
588 " format hypervisor:option=value,option=value,...",
589 default=None, type="identkeyval")
591 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
592 help="Hypervisor and hypervisor options, in the"
593 " format hypervisor:option=value,option=value,...",
594 default=[], action="append", type="identkeyval")
596 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
597 action="store_false",
598 help="Don't check that the instance's IP"
601 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
602 default=True, action="store_false",
603 help="Don't check that the instance's name"
606 NET_OPT = cli_option("--net",
607 help="NIC parameters", default=[],
608 dest="nics", action="append", type="identkeyval")
610 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
611 dest="disks", action="append", type="identkeyval")
613 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
614 help="Comma-separated list of disks"
615 " indices to act on (e.g. 0,2) (optional,"
616 " defaults to all disks)")
618 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
619 help="Enforces a single-disk configuration using the"
620 " given disk size, in MiB unless a suffix is used",
621 default=None, type="unit", metavar="<size>")
623 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
624 dest="ignore_consistency",
625 action="store_true", default=False,
626 help="Ignore the consistency of the disks on"
629 NONLIVE_OPT = cli_option("--non-live", dest="live",
630 default=True, action="store_false",
631 help="Do a non-live migration (this usually means"
632 " freeze the instance, save the state, transfer and"
633 " only then resume running on the secondary node)")
635 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
636 help="Target node and optional secondary node",
637 metavar="<pnode>[:<snode>]",
638 completion_suggest=OPT_COMPL_INST_ADD_NODES)
640 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
641 action="append", metavar="<node>",
642 help="Use only this node (can be used multiple"
643 " times, if not given defaults to all nodes)",
644 completion_suggest=OPT_COMPL_ONE_NODE)
646 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
648 completion_suggest=OPT_COMPL_ONE_NODE)
650 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
651 action="store_false",
652 help="Don't start the instance after creation")
654 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
655 action="store_true", default=False,
656 help="Show command instead of executing it")
658 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
659 default=False, action="store_true",
660 help="Instead of performing the migration, try to"
661 " recover from a failed cleanup. This is safe"
662 " to run even if the instance is healthy, but it"
663 " will create extra replication traffic and "
664 " disrupt briefly the replication (like during the"
667 STATIC_OPT = cli_option("-s", "--static", dest="static",
668 action="store_true", default=False,
669 help="Only show configuration data, not runtime data")
671 ALL_OPT = cli_option("--all", dest="show_all",
672 default=False, action="store_true",
673 help="Show info on all instances on the cluster."
674 " This can take a long time to run, use wisely")
676 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
677 action="store_true", default=False,
678 help="Interactive OS reinstall, lists available"
679 " OS templates for selection")
681 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
682 action="store_true", default=False,
683 help="Remove the instance from the cluster"
684 " configuration even if there are failures"
685 " during the removal process")
687 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
688 help="Specifies the new secondary node",
689 metavar="NODE", default=None,
690 completion_suggest=OPT_COMPL_ONE_NODE)
692 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
693 default=False, action="store_true",
694 help="Replace the disk(s) on the primary"
695 " node (only for the drbd template)")
697 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
698 default=False, action="store_true",
699 help="Replace the disk(s) on the secondary"
700 " node (only for the drbd template)")
702 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
703 default=False, action="store_true",
704 help="Automatically replace faulty disks"
705 " (only for the drbd template)")
707 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
708 default=False, action="store_true",
709 help="Ignore current recorded size"
710 " (useful for forcing activation when"
711 " the recorded size is wrong)")
713 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
715 completion_suggest=OPT_COMPL_ONE_NODE)
717 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
720 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
721 help="Specify the secondary ip for the node",
722 metavar="ADDRESS", default=None)
724 READD_OPT = cli_option("--readd", dest="readd",
725 default=False, action="store_true",
726 help="Readd old node after replacing it")
728 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
729 default=True, action="store_false",
730 help="Disable SSH key fingerprint checking")
733 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
734 choices=_YESNO, default=None, metavar=_YORNO,
735 help="Set the master_candidate flag on the node")
737 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
738 choices=_YESNO, default=None,
739 help="Set the offline flag on the node")
741 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
742 choices=_YESNO, default=None,
743 help="Set the drained flag on the node")
745 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
746 choices=_YESNO, default=None, metavar=_YORNO,
747 help="Set the allocatable flag on a volume")
749 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
750 help="Disable support for lvm based instances"
752 action="store_false", default=True)
754 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
755 dest="enabled_hypervisors",
756 help="Comma-separated list of hypervisors",
757 type="string", default=None)
759 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
760 type="keyval", default={},
761 help="NIC parameters")
763 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
764 dest="candidate_pool_size", type="int",
765 help="Set the candidate pool size")
767 VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
768 help="Enables LVM and specifies the volume group"
769 " name (cluster-wide) for disk allocation [xenvg]",
770 metavar="VG", default=None)
772 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
773 help="Destroy cluster", action="store_true")
775 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
776 help="Skip node agreement check (dangerous)",
777 action="store_true", default=False)
779 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
780 help="Specify the mac prefix for the instance IP"
781 " addresses, in the format XX:XX:XX",
785 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
786 help="Specify the node interface (cluster-wide)"
787 " on which the master IP address will be added "
788 " [%s]" % constants.DEFAULT_BRIDGE,
790 default=constants.DEFAULT_BRIDGE)
793 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
794 help="Specify the default directory (cluster-"
795 "wide) for storing the file-based disks [%s]" %
796 constants.DEFAULT_FILE_STORAGE_DIR,
798 default=constants.DEFAULT_FILE_STORAGE_DIR)
800 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
801 help="Don't modify /etc/hosts",
802 action="store_false", default=True)
804 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
805 help="Don't initialize SSH keys",
806 action="store_false", default=True)
808 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
809 help="Enable parseable error messages",
810 action="store_true", default=False)
812 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
813 help="Skip N+1 memory redundancy tests",
814 action="store_true", default=False)
816 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
817 help="Type of reboot: soft/hard/full",
818 default=constants.INSTANCE_REBOOT_HARD,
820 choices=list(constants.REBOOT_TYPES))
822 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
823 dest="ignore_secondaries",
824 default=False, action="store_true",
825 help="Ignore errors from secondaries")
827 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
828 action="store_false", default=True,
829 help="Don't shutdown the instance (unsafe)")
831 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
832 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
833 help="Maximum time to wait")
835 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
836 dest="shutdown_timeout", type="int",
837 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
838 help="Maximum time to wait for instance shutdown")
841 def _ParseArgs(argv, commands, aliases):
842 """Parser for the command line arguments.
844 This function parses the arguments and returns the function which
845 must be executed together with its (modified) arguments.
847 @param argv: the command line
848 @param commands: dictionary with special contents, see the design
849 doc for cmdline handling
850 @param aliases: dictionary with command aliases {'alias': 'target, ...}
856 binary = argv[0].split("/")[-1]
858 if len(argv) > 1 and argv[1] == "--version":
859 ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
860 # Quit right away. That way we don't have to care about this special
861 # argument. optparse.py does it the same.
864 if len(argv) < 2 or not (argv[1] in commands or
866 # let's do a nice thing
867 sortedcmds = commands.keys()
870 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
871 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
874 # compute the max line length for cmd + usage
875 mlen = max([len(" %s" % cmd) for cmd in commands])
876 mlen = min(60, mlen) # should not get here...
878 # and format a nice command list
879 ToStdout("Commands:")
880 for cmd in sortedcmds:
881 cmdstr = " %s" % (cmd,)
882 help_text = commands[cmd][4]
883 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
884 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
885 for line in help_lines:
886 ToStdout("%-*s %s", mlen, "", line)
890 return None, None, None
892 # get command, unalias it, and look it up in commands
896 raise errors.ProgrammerError("Alias '%s' overrides an existing"
899 if aliases[cmd] not in commands:
900 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
901 " command '%s'" % (cmd, aliases[cmd]))
905 func, args_def, parser_opts, usage, description = commands[cmd]
906 parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
907 description=description,
908 formatter=TitledHelpFormatter(),
909 usage="%%prog %s %s" % (cmd, usage))
910 parser.disable_interspersed_args()
911 options, args = parser.parse_args()
913 if not _CheckArguments(cmd, args_def, args):
914 return None, None, None
916 return func, options, args
919 def _CheckArguments(cmd, args_def, args):
920 """Verifies the arguments using the argument definition.
924 1. Abort with error if values specified by user but none expected.
926 1. For each argument in definition
928 1. Keep running count of minimum number of values (min_count)
929 1. Keep running count of maximum number of values (max_count)
930 1. If it has an unlimited number of values
932 1. Abort with error if it's not the last argument in the definition
934 1. If last argument has limited number of values
936 1. Abort with error if number of values doesn't match or is too large
938 1. Abort with error if user didn't pass enough values (min_count)
941 if args and not args_def:
942 ToStderr("Error: Command %s expects no arguments", cmd)
949 last_idx = len(args_def) - 1
951 for idx, arg in enumerate(args_def):
952 if min_count is None:
954 elif arg.min is not None:
957 if max_count is None:
959 elif arg.max is not None:
963 check_max = (arg.max is not None)
965 elif arg.max is None:
966 raise errors.ProgrammerError("Only the last argument can have max=None")
969 # Command with exact number of arguments
970 if (min_count is not None and max_count is not None and
971 min_count == max_count and len(args) != min_count):
972 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
975 # Command with limited number of arguments
976 if max_count is not None and len(args) > max_count:
977 ToStderr("Error: Command %s expects only %d argument(s)",
981 # Command with some required arguments
982 if min_count is not None and len(args) < min_count:
983 ToStderr("Error: Command %s expects at least %d argument(s)",
990 def SplitNodeOption(value):
991 """Splits the value of a --node option.
994 if value and ':' in value:
995 return value.split(':', 1)
1000 def CalculateOSNames(os_name, os_variants):
1001 """Calculates all the names an OS can be called, according to its variants.
1003 @type os_name: string
1004 @param os_name: base name of the os
1005 @type os_variants: list or None
1006 @param os_variants: list of supported variants
1008 @return: list of valid names
1012 return ['%s+%s' % (os_name, v) for v in os_variants]
1018 def wrapper(*args, **kwargs):
1021 return fn(*args, **kwargs)
1027 def AskUser(text, choices=None):
1028 """Ask the user a question.
1030 @param text: the question to ask
1032 @param choices: list with elements tuples (input_char, return_value,
1033 description); if not given, it will default to: [('y', True,
1034 'Perform the operation'), ('n', False, 'Do no do the operation')];
1035 note that the '?' char is reserved for help
1037 @return: one of the return values from the choices list; if input is
1038 not possible (i.e. not running with a tty, we return the last
1043 choices = [('y', True, 'Perform the operation'),
1044 ('n', False, 'Do not perform the operation')]
1045 if not choices or not isinstance(choices, list):
1046 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1047 for entry in choices:
1048 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1049 raise errors.ProgrammerError("Invalid choices element to AskUser")
1051 answer = choices[-1][1]
1053 for line in text.splitlines():
1054 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1055 text = "\n".join(new_text)
1057 f = file("/dev/tty", "a+")
1061 chars = [entry[0] for entry in choices]
1062 chars[-1] = "[%s]" % chars[-1]
1064 maps = dict([(entry[0], entry[1]) for entry in choices])
1068 f.write("/".join(chars))
1070 line = f.readline(2).strip().lower()
1075 for entry in choices:
1076 f.write(" %s - %s\n" % (entry[0], entry[2]))
1084 class JobSubmittedException(Exception):
1085 """Job was submitted, client should exit.
1087 This exception has one argument, the ID of the job that was
1088 submitted. The handler should print this ID.
1090 This is not an error, just a structured way to exit from clients.
1095 def SendJob(ops, cl=None):
1096 """Function to submit an opcode without waiting for the results.
1099 @param ops: list of opcodes
1100 @type cl: luxi.Client
1101 @param cl: the luxi client to use for communicating with the master;
1102 if None, a new client will be created
1108 job_id = cl.SubmitJob(ops)
1113 def PollJob(job_id, cl=None, feedback_fn=None):
1114 """Function to poll for the result of a job.
1116 @type job_id: job identified
1117 @param job_id: the job to poll for results
1118 @type cl: luxi.Client
1119 @param cl: the luxi client to use for communicating with the master;
1120 if None, a new client will be created
1126 prev_job_info = None
1127 prev_logmsg_serial = None
1130 result = cl.WaitForJobChange(job_id, ["status"], prev_job_info,
1133 # job not found, go away!
1134 raise errors.JobLost("Job with id %s lost" % job_id)
1136 # Split result, a tuple of (field values, log entries)
1137 (job_info, log_entries) = result
1138 (status, ) = job_info
1141 for log_entry in log_entries:
1142 (serial, timestamp, _, message) = log_entry
1143 if callable(feedback_fn):
1144 feedback_fn(log_entry[1:])
1146 encoded = utils.SafeEncode(message)
1147 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), encoded)
1148 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1150 # TODO: Handle canceled and archived jobs
1151 elif status in (constants.JOB_STATUS_SUCCESS,
1152 constants.JOB_STATUS_ERROR,
1153 constants.JOB_STATUS_CANCELING,
1154 constants.JOB_STATUS_CANCELED):
1157 prev_job_info = job_info
1159 jobs = cl.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1161 raise errors.JobLost("Job with id %s lost" % job_id)
1163 status, opstatus, result = jobs[0]
1164 if status == constants.JOB_STATUS_SUCCESS:
1166 elif status in (constants.JOB_STATUS_CANCELING,
1167 constants.JOB_STATUS_CANCELED):
1168 raise errors.OpExecError("Job was canceled")
1171 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1172 if status == constants.OP_STATUS_SUCCESS:
1174 elif status == constants.OP_STATUS_ERROR:
1175 errors.MaybeRaise(msg)
1177 raise errors.OpExecError("partial failure (opcode %d): %s" %
1180 raise errors.OpExecError(str(msg))
1181 # default failure mode
1182 raise errors.OpExecError(result)
1185 def SubmitOpCode(op, cl=None, feedback_fn=None):
1186 """Legacy function to submit an opcode.
1188 This is just a simple wrapper over the construction of the processor
1189 instance. It should be extended to better handle feedback and
1190 interaction functions.
1196 job_id = SendJob([op], cl)
1198 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn)
1200 return op_results[0]
1203 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1204 """Wrapper around SubmitOpCode or SendJob.
1206 This function will decide, based on the 'opts' parameter, whether to
1207 submit and wait for the result of the opcode (and return it), or
1208 whether to just send the job and print its identifier. It is used in
1209 order to simplify the implementation of the '--submit' option.
1211 It will also add the dry-run parameter from the options passed, if true.
1214 if opts and opts.dry_run:
1215 op.dry_run = opts.dry_run
1216 if opts and opts.submit_only:
1217 job_id = SendJob([op], cl=cl)
1218 raise JobSubmittedException(job_id)
1220 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn)
1224 # TODO: Cache object?
1226 client = luxi.Client()
1227 except luxi.NoMasterError:
1228 ss = ssconf.SimpleStore()
1230 # Try to read ssconf file
1233 except errors.ConfigurationError:
1234 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1235 " not part of a cluster")
1237 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1238 if master != myself:
1239 raise errors.OpPrereqError("This is not the master node, please connect"
1240 " to node '%s' and rerun the command" %
1246 def FormatError(err):
1247 """Return a formatted error message for a given error.
1249 This function takes an exception instance and returns a tuple
1250 consisting of two values: first, the recommended exit code, and
1251 second, a string describing the error message (not
1252 newline-terminated).
1258 if isinstance(err, errors.ConfigurationError):
1259 txt = "Corrupt configuration file: %s" % msg
1261 obuf.write(txt + "\n")
1262 obuf.write("Aborting.")
1264 elif isinstance(err, errors.HooksAbort):
1265 obuf.write("Failure: hooks execution failed:\n")
1266 for node, script, out in err.args[0]:
1268 obuf.write(" node: %s, script: %s, output: %s\n" %
1269 (node, script, out))
1271 obuf.write(" node: %s, script: %s (no output)\n" %
1273 elif isinstance(err, errors.HooksFailure):
1274 obuf.write("Failure: hooks general failure: %s" % msg)
1275 elif isinstance(err, errors.ResolverError):
1276 this_host = utils.HostInfo.SysName()
1277 if err.args[0] == this_host:
1278 msg = "Failure: can't resolve my own hostname ('%s')"
1280 msg = "Failure: can't resolve hostname '%s'"
1281 obuf.write(msg % err.args[0])
1282 elif isinstance(err, errors.OpPrereqError):
1283 if len(err.args) == 2:
1284 obuf.write("Failure: prerequisites not met for this"
1285 " operation:\nerror type: %s, error details:\n%s" %
1286 (err.args[1], err.args[0]))
1288 obuf.write("Failure: prerequisites not met for this"
1289 " operation:\n%s" % msg)
1290 elif isinstance(err, errors.OpExecError):
1291 obuf.write("Failure: command execution error:\n%s" % msg)
1292 elif isinstance(err, errors.TagError):
1293 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1294 elif isinstance(err, errors.JobQueueDrainError):
1295 obuf.write("Failure: the job queue is marked for drain and doesn't"
1296 " accept new requests\n")
1297 elif isinstance(err, errors.JobQueueFull):
1298 obuf.write("Failure: the job queue is full and doesn't accept new"
1299 " job submissions until old jobs are archived\n")
1300 elif isinstance(err, errors.TypeEnforcementError):
1301 obuf.write("Parameter Error: %s" % msg)
1302 elif isinstance(err, errors.ParameterError):
1303 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1304 elif isinstance(err, errors.GenericError):
1305 obuf.write("Unhandled Ganeti error: %s" % msg)
1306 elif isinstance(err, luxi.NoMasterError):
1307 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1308 " and listening for connections?")
1309 elif isinstance(err, luxi.TimeoutError):
1310 obuf.write("Timeout while talking to the master daemon. Error:\n"
1312 elif isinstance(err, luxi.ProtocolError):
1313 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1315 elif isinstance(err, JobSubmittedException):
1316 obuf.write("JobID: %s\n" % err.args[0])
1319 obuf.write("Unhandled exception: %s" % msg)
1320 return retcode, obuf.getvalue().rstrip('\n')
1323 def GenericMain(commands, override=None, aliases=None):
1324 """Generic main function for all the gnt-* commands.
1327 - commands: a dictionary with a special structure, see the design doc
1328 for command line handling.
1329 - override: if not None, we expect a dictionary with keys that will
1330 override command line options; this can be used to pass
1331 options from the scripts to generic functions
1332 - aliases: dictionary with command aliases {'alias': 'target, ...}
1335 # save the program name and the entire command line for later logging
1337 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1338 if len(sys.argv) >= 2:
1339 binary += " " + sys.argv[1]
1340 old_cmdline = " ".join(sys.argv[2:])
1344 binary = "<unknown program>"
1351 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1352 except errors.ParameterError, err:
1353 result, err_msg = FormatError(err)
1357 if func is None: # parse error
1360 if override is not None:
1361 for key, val in override.iteritems():
1362 setattr(options, key, val)
1364 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1365 stderr_logging=True, program=binary)
1368 logging.info("run with arguments '%s'", old_cmdline)
1370 logging.info("run with no arguments")
1373 result = func(options, args)
1374 except (errors.GenericError, luxi.ProtocolError,
1375 JobSubmittedException), err:
1376 result, err_msg = FormatError(err)
1377 logging.exception("Error during command processing")
1383 def GenericInstanceCreate(mode, opts, args):
1384 """Add an instance to the cluster via either creation or import.
1386 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1387 @param opts: the command line options selected by the user
1389 @param args: should contain only one element, the new instance name
1391 @return: the desired exit code
1396 (pnode, snode) = SplitNodeOption(opts.node)
1401 hypervisor, hvparams = opts.hypervisor
1405 nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1406 except ValueError, err:
1407 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1408 nics = [{}] * nic_max
1409 for nidx, ndict in opts.nics:
1411 if not isinstance(ndict, dict):
1412 msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1413 raise errors.OpPrereqError(msg)
1419 # default of one nic, all auto
1422 if opts.disk_template == constants.DT_DISKLESS:
1423 if opts.disks or opts.sd_size is not None:
1424 raise errors.OpPrereqError("Diskless instance but disk"
1425 " information passed")
1428 if not opts.disks and not opts.sd_size:
1429 raise errors.OpPrereqError("No disk information specified")
1430 if opts.disks and opts.sd_size is not None:
1431 raise errors.OpPrereqError("Please use either the '--disk' or"
1433 if opts.sd_size is not None:
1434 opts.disks = [(0, {"size": opts.sd_size})]
1436 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1437 except ValueError, err:
1438 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1439 disks = [{}] * disk_max
1440 for didx, ddict in opts.disks:
1442 if not isinstance(ddict, dict):
1443 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1444 raise errors.OpPrereqError(msg)
1445 elif "size" not in ddict:
1446 raise errors.OpPrereqError("Missing size for disk %d" % didx)
1448 ddict["size"] = utils.ParseUnit(ddict["size"])
1449 except ValueError, err:
1450 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1454 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1455 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1457 if mode == constants.INSTANCE_CREATE:
1462 elif mode == constants.INSTANCE_IMPORT:
1465 src_node = opts.src_node
1466 src_path = opts.src_dir
1468 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1470 op = opcodes.OpCreateInstance(instance_name=instance,
1472 disk_template=opts.disk_template,
1474 pnode=pnode, snode=snode,
1475 ip_check=opts.ip_check,
1476 name_check=opts.name_check,
1477 wait_for_sync=opts.wait_for_sync,
1478 file_storage_dir=opts.file_storage_dir,
1479 file_driver=opts.file_driver,
1480 iallocator=opts.iallocator,
1481 hypervisor=hypervisor,
1483 beparams=opts.beparams,
1490 SubmitOrSend(op, opts)
1494 def GenerateTable(headers, fields, separator, data,
1495 numfields=None, unitfields=None,
1497 """Prints a table with headers and different fields.
1500 @param headers: dictionary mapping field names to headers for
1503 @param fields: the field names corresponding to each row in
1505 @param separator: the separator to be used; if this is None,
1506 the default 'smart' algorithm is used which computes optimal
1507 field width, otherwise just the separator is used between
1510 @param data: a list of lists, each sublist being one row to be output
1511 @type numfields: list
1512 @param numfields: a list with the fields that hold numeric
1513 values and thus should be right-aligned
1514 @type unitfields: list
1515 @param unitfields: a list with the fields that hold numeric
1516 values that should be formatted with the units field
1517 @type units: string or None
1518 @param units: the units we should use for formatting, or None for
1519 automatic choice (human-readable for non-separator usage, otherwise
1520 megabytes); this is a one-letter string
1529 if numfields is None:
1531 if unitfields is None:
1534 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
1535 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
1538 for field in fields:
1539 if headers and field not in headers:
1540 # TODO: handle better unknown fields (either revert to old
1541 # style of raising exception, or deal more intelligently with
1543 headers[field] = field
1544 if separator is not None:
1545 format_fields.append("%s")
1546 elif numfields.Matches(field):
1547 format_fields.append("%*s")
1549 format_fields.append("%-*s")
1551 if separator is None:
1552 mlens = [0 for name in fields]
1553 format = ' '.join(format_fields)
1555 format = separator.replace("%", "%%").join(format_fields)
1560 for idx, val in enumerate(row):
1561 if unitfields.Matches(fields[idx]):
1564 except (TypeError, ValueError):
1567 val = row[idx] = utils.FormatUnit(val, units)
1568 val = row[idx] = str(val)
1569 if separator is None:
1570 mlens[idx] = max(mlens[idx], len(val))
1575 for idx, name in enumerate(fields):
1577 if separator is None:
1578 mlens[idx] = max(mlens[idx], len(hdr))
1579 args.append(mlens[idx])
1581 result.append(format % tuple(args))
1583 if separator is None:
1584 assert len(mlens) == len(fields)
1586 if fields and not numfields.Matches(fields[-1]):
1592 line = ['-' for _ in fields]
1593 for idx in range(len(fields)):
1594 if separator is None:
1595 args.append(mlens[idx])
1596 args.append(line[idx])
1597 result.append(format % tuple(args))
1602 def FormatTimestamp(ts):
1603 """Formats a given timestamp.
1606 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
1609 @return: a string with the formatted timestamp
1612 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
1615 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
1618 def ParseTimespec(value):
1619 """Parse a time specification.
1621 The following suffixed will be recognized:
1629 Without any suffix, the value will be taken to be in seconds.
1634 raise errors.OpPrereqError("Empty time specification passed")
1642 if value[-1] not in suffix_map:
1645 except (TypeError, ValueError):
1646 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1648 multiplier = suffix_map[value[-1]]
1650 if not value: # no data left after stripping the suffix
1651 raise errors.OpPrereqError("Invalid time specification (only"
1654 value = int(value) * multiplier
1655 except (TypeError, ValueError):
1656 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1660 def GetOnlineNodes(nodes, cl=None, nowarn=False):
1661 """Returns the names of online nodes.
1663 This function will also log a warning on stderr with the names of
1666 @param nodes: if not empty, use only this subset of nodes (minus the
1668 @param cl: if not None, luxi client to use
1669 @type nowarn: boolean
1670 @param nowarn: by default, this function will output a note with the
1671 offline nodes that are skipped; if this parameter is True the
1672 note is not displayed
1678 result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
1680 offline = [row[0] for row in result if row[1]]
1681 if offline and not nowarn:
1682 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
1683 return [row[0] for row in result if not row[1]]
1686 def _ToStream(stream, txt, *args):
1687 """Write a message to a stream, bypassing the logging system
1689 @type stream: file object
1690 @param stream: the file to which we should write
1692 @param txt: the message
1697 stream.write(txt % args)
1704 def ToStdout(txt, *args):
1705 """Write a message to stdout only, bypassing the logging system
1707 This is just a wrapper over _ToStream.
1710 @param txt: the message
1713 _ToStream(sys.stdout, txt, *args)
1716 def ToStderr(txt, *args):
1717 """Write a message to stderr only, bypassing the logging system
1719 This is just a wrapper over _ToStream.
1722 @param txt: the message
1725 _ToStream(sys.stderr, txt, *args)
1728 class JobExecutor(object):
1729 """Class which manages the submission and execution of multiple jobs.
1731 Note that instances of this class should not be reused between
1735 def __init__(self, cl=None, verbose=True):
1740 self.verbose = verbose
1743 def QueueJob(self, name, *ops):
1744 """Record a job for later submit.
1747 @param name: a description of the job, will be used in WaitJobSet
1749 self.queue.append((name, ops))
1751 def SubmitPending(self):
1752 """Submit all pending jobs.
1755 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
1756 for ((status, data), (name, _)) in zip(results, self.queue):
1757 self.jobs.append((status, data, name))
1759 def GetResults(self):
1760 """Wait for and return the results of all jobs.
1763 @return: list of tuples (success, job results), in the same order
1764 as the submitted jobs; if a job has failed, instead of the result
1765 there will be the error message
1769 self.SubmitPending()
1772 ok_jobs = [row[1] for row in self.jobs if row[0]]
1774 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
1775 for submit_status, jid, name in self.jobs:
1776 if not submit_status:
1777 ToStderr("Failed to submit job for %s: %s", name, jid)
1778 results.append((False, jid))
1781 ToStdout("Waiting for job %s for %s...", jid, name)
1783 job_result = PollJob(jid, cl=self.cl)
1785 except (errors.GenericError, luxi.ProtocolError), err:
1786 _, job_result = FormatError(err)
1788 # the error message will always be shown, verbose or not
1789 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
1791 results.append((success, job_result))
1794 def WaitOrShow(self, wait):
1795 """Wait for job results or only print the job IDs.
1798 @param wait: whether to wait or not
1802 return self.GetResults()
1805 self.SubmitPending()
1806 for status, result, name in self.jobs:
1808 ToStdout("%s: %s", result, name)
1810 ToStderr("Failure for %s: %s", name, result)