4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
31 from cStringIO import StringIO
33 from ganeti import utils
34 from ganeti import errors
35 from ganeti import constants
36 from ganeti import opcodes
37 from ganeti import luxi
38 from ganeti import ssconf
39 from ganeti import rpc
41 from optparse import (OptionParser, TitledHelpFormatter,
42 Option, OptionValueError)
46 # Command line options
64 "FILESTORE_DRIVER_OPT",
71 "IGNORE_FAILURES_OPT",
72 "IGNORE_SECONDARIES_OPT",
86 "NOMODIFY_ETCHOSTS_OPT",
117 # Generic functions for CLI programs
119 "GenericInstanceCreate",
123 "JobSubmittedException",
128 # Formatting functions
129 "ToStderr", "ToStdout",
138 # command line options support infrastructure
139 "ARGS_MANY_INSTANCES",
153 "OPT_COMPL_INST_ADD_NODES",
154 "OPT_COMPL_MANY_NODES",
155 "OPT_COMPL_ONE_IALLOCATOR",
156 "OPT_COMPL_ONE_INSTANCE",
157 "OPT_COMPL_ONE_NODE",
168 def __init__(self, min=0, max=None):
173 return ("<%s min=%s max=%s>" %
174 (self.__class__.__name__, self.min, self.max))
177 class ArgSuggest(_Argument):
178 """Suggesting argument.
180 Value can be any of the ones passed to the constructor.
183 def __init__(self, min=0, max=None, choices=None):
184 _Argument.__init__(self, min=min, max=max)
185 self.choices = choices
188 return ("<%s min=%s max=%s choices=%r>" %
189 (self.__class__.__name__, self.min, self.max, self.choices))
192 class ArgChoice(ArgSuggest):
195 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
196 but value must be one of the choices.
201 class ArgUnknown(_Argument):
202 """Unknown argument to program (e.g. determined at runtime).
207 class ArgInstance(_Argument):
208 """Instances argument.
213 class ArgNode(_Argument):
218 class ArgJobId(_Argument):
224 class ArgFile(_Argument):
225 """File path argument.
230 class ArgCommand(_Argument):
236 class ArgHost(_Argument):
243 ARGS_MANY_INSTANCES = [ArgInstance()]
244 ARGS_MANY_NODES = [ArgNode()]
245 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
246 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
250 def _ExtractTagsObject(opts, args):
251 """Extract the tag type object.
253 Note that this function will modify its args parameter.
256 if not hasattr(opts, "tag_type"):
257 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
259 if kind == constants.TAG_CLUSTER:
261 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
263 raise errors.OpPrereqError("no arguments passed to the command")
267 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
271 def _ExtendTags(opts, args):
272 """Extend the args if a source file has been given.
274 This function will extend the tags with the contents of the file
275 passed in the 'tags_source' attribute of the opts parameter. A file
276 named '-' will be replaced by stdin.
279 fname = opts.tags_source
285 new_fh = open(fname, "r")
288 # we don't use the nice 'new_data = [line.strip() for line in fh]'
289 # because of python bug 1633941
291 line = new_fh.readline()
294 new_data.append(line.strip())
297 args.extend(new_data)
300 def ListTags(opts, args):
301 """List the tags on a given object.
303 This is a generic implementation that knows how to deal with all
304 three cases of tag objects (cluster, node, instance). The opts
305 argument is expected to contain a tag_type field denoting what
306 object type we work on.
309 kind, name = _ExtractTagsObject(opts, args)
310 op = opcodes.OpGetTags(kind=kind, name=name)
311 result = SubmitOpCode(op)
312 result = list(result)
318 def AddTags(opts, args):
319 """Add tags on a given object.
321 This is a generic implementation that knows how to deal with all
322 three cases of tag objects (cluster, node, instance). The opts
323 argument is expected to contain a tag_type field denoting what
324 object type we work on.
327 kind, name = _ExtractTagsObject(opts, args)
328 _ExtendTags(opts, args)
330 raise errors.OpPrereqError("No tags to be added")
331 op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
335 def RemoveTags(opts, args):
336 """Remove tags from a given object.
338 This is a generic implementation that knows how to deal with all
339 three cases of tag objects (cluster, node, instance). The opts
340 argument is expected to contain a tag_type field denoting what
341 object type we work on.
344 kind, name = _ExtractTagsObject(opts, args)
345 _ExtendTags(opts, args)
347 raise errors.OpPrereqError("No tags to be removed")
348 op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
352 def check_unit(option, opt, value):
353 """OptParsers custom converter for units.
357 return utils.ParseUnit(value)
358 except errors.UnitParseError, err:
359 raise OptionValueError("option %s: %s" % (opt, err))
362 def _SplitKeyVal(opt, data):
363 """Convert a KeyVal string into a dict.
365 This function will convert a key=val[,...] string into a dict. Empty
366 values will be converted specially: keys which have the prefix 'no_'
367 will have the value=False and the prefix stripped, the others will
371 @param opt: a string holding the option name for which we process the
372 data, used in building error messages
374 @param data: a string of the format key=val,key=val,...
376 @return: {key=val, key=val}
377 @raises errors.ParameterError: if there are duplicate keys
382 for elem in data.split(","):
384 key, val = elem.split("=", 1)
386 if elem.startswith(NO_PREFIX):
387 key, val = elem[len(NO_PREFIX):], False
388 elif elem.startswith(UN_PREFIX):
389 key, val = elem[len(UN_PREFIX):], None
391 key, val = elem, True
393 raise errors.ParameterError("Duplicate key '%s' in option %s" %
399 def check_ident_key_val(option, opt, value):
400 """Custom parser for ident:key=val,key=val options.
402 This will store the parsed values as a tuple (ident, {key: val}). As such,
403 multiple uses of this option via action=append is possible.
407 ident, rest = value, ''
409 ident, rest = value.split(":", 1)
411 if ident.startswith(NO_PREFIX):
413 msg = "Cannot pass options when removing parameter groups: %s" % value
414 raise errors.ParameterError(msg)
415 retval = (ident[len(NO_PREFIX):], False)
416 elif ident.startswith(UN_PREFIX):
418 msg = "Cannot pass options when removing parameter groups: %s" % value
419 raise errors.ParameterError(msg)
420 retval = (ident[len(UN_PREFIX):], None)
422 kv_dict = _SplitKeyVal(opt, rest)
423 retval = (ident, kv_dict)
427 def check_key_val(option, opt, value):
428 """Custom parser class for key=val,key=val options.
430 This will store the parsed values as a dict {key: val}.
433 return _SplitKeyVal(opt, value)
436 # completion_suggestion is normally a list. Using numeric values not evaluating
437 # to False for dynamic completion.
438 (OPT_COMPL_MANY_NODES,
440 OPT_COMPL_ONE_INSTANCE,
442 OPT_COMPL_ONE_IALLOCATOR,
443 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
445 OPT_COMPL_ALL = frozenset([
446 OPT_COMPL_MANY_NODES,
448 OPT_COMPL_ONE_INSTANCE,
450 OPT_COMPL_ONE_IALLOCATOR,
451 OPT_COMPL_INST_ADD_NODES,
455 class CliOption(Option):
456 """Custom option class for optparse.
459 ATTRS = Option.ATTRS + [
460 "completion_suggest",
462 TYPES = Option.TYPES + (
467 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
468 TYPE_CHECKER["identkeyval"] = check_ident_key_val
469 TYPE_CHECKER["keyval"] = check_key_val
470 TYPE_CHECKER["unit"] = check_unit
473 # optparse.py sets make_option, so we do it for our own option class, too
474 cli_option = CliOption
477 _YESNO = ("yes", "no")
480 DEBUG_OPT = cli_option("-d", "--debug", default=False,
482 help="Turn debugging on")
484 NOHDR_OPT = cli_option("--no-headers", default=False,
485 action="store_true", dest="no_headers",
486 help="Don't display column headers")
488 SEP_OPT = cli_option("--separator", default=None,
489 action="store", dest="separator",
490 help=("Separator between output fields"
491 " (defaults to one space)"))
493 USEUNITS_OPT = cli_option("--units", default=None,
494 dest="units", choices=('h', 'm', 'g', 't'),
495 help="Specify units for output (one of hmgt)")
497 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
498 type="string", metavar="FIELDS",
499 help="Comma separated list of output fields")
501 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
502 default=False, help="Force the operation")
504 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
505 default=False, help="Do not require confirmation")
507 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
508 default=None, help="File with tag names")
510 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
511 default=False, action="store_true",
512 help=("Submit the job and return the job ID, but"
513 " don't wait for the job to finish"))
515 SYNC_OPT = cli_option("--sync", dest="do_locking",
516 default=False, action="store_true",
517 help=("Grab locks while doing the queries"
518 " in order to ensure more consistent results"))
520 _DRY_RUN_OPT = cli_option("--dry-run", default=False,
522 help=("Do not execute the operation, just run the"
523 " check steps and verify it it could be"
526 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
528 help="Increase the verbosity of the operation")
530 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
531 action="store_true", dest="simulate_errors",
532 help="Debugging option that makes the operation"
533 " treat most runtime checks as failed")
535 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
536 default=True, action="store_false",
537 help="Don't wait for sync (DANGEROUS!)")
539 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
540 help="Custom disk setup (diskless, file,"
542 default=None, metavar="TEMPL",
543 choices=list(constants.DISK_TEMPLATES))
545 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
546 help="Do not create any network cards for"
549 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
550 help="Relative path under default cluster-wide"
551 " file storage dir to store file-based disks",
552 default=None, metavar="<DIR>")
554 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
555 help="Driver to use for image files",
556 default="loop", metavar="<DRIVER>",
557 choices=list(constants.FILE_DRIVER))
559 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
560 help="Select nodes for the instance automatically"
561 " using the <NAME> iallocator plugin",
562 default=None, type="string",
563 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
565 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
567 completion_suggest=OPT_COMPL_ONE_OS)
569 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
570 type="keyval", default={},
571 help="Backend parameters")
573 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
574 default={}, dest="hvparams",
575 help="Hypervisor parameters")
577 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
578 help="Hypervisor and hypervisor options, in the"
579 " format hypervisor:option=value,option=value,...",
580 default=None, type="identkeyval")
582 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
583 help="Hypervisor and hypervisor options, in the"
584 " format hypervisor:option=value,option=value,...",
585 default=[], action="append", type="identkeyval")
587 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
588 action="store_false",
589 help="Don't check that the instance's IP"
592 NET_OPT = cli_option("--net",
593 help="NIC parameters", default=[],
594 dest="nics", action="append", type="identkeyval")
596 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
597 dest="disks", action="append", type="identkeyval")
599 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
600 help="Comma-separated list of disks"
601 " indices to act on (e.g. 0,2) (optional,"
602 " defaults to all disks)")
604 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
605 help="Enforces a single-disk configuration using the"
606 " given disk size, in MiB unless a suffix is used",
607 default=None, type="unit", metavar="<size>")
609 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
610 dest="ignore_consistency",
611 action="store_true", default=False,
612 help="Ignore the consistency of the disks on"
615 NONLIVE_OPT = cli_option("--non-live", dest="live",
616 default=True, action="store_false",
617 help="Do a non-live migration (this usually means"
618 " freeze the instance, save the state, transfer and"
619 " only then resume running on the secondary node)")
621 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
622 help="Target node and optional secondary node",
623 metavar="<pnode>[:<snode>]",
624 completion_suggest=OPT_COMPL_INST_ADD_NODES)
626 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
627 action="append", metavar="<node>",
628 help="Use only this node (can be used multiple"
629 " times, if not given defaults to all nodes)",
630 completion_suggest=OPT_COMPL_ONE_NODE)
632 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
634 completion_suggest=OPT_COMPL_ONE_NODE)
636 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
637 action="store_false",
638 help="Don't start the instance after creation")
640 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
641 action="store_true", default=False,
642 help="Show command instead of executing it")
644 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
645 default=False, action="store_true",
646 help="Instead of performing the migration, try to"
647 " recover from a failed cleanup. This is safe"
648 " to run even if the instance is healthy, but it"
649 " will create extra replication traffic and "
650 " disrupt briefly the replication (like during the"
653 STATIC_OPT = cli_option("-s", "--static", dest="static",
654 action="store_true", default=False,
655 help="Only show configuration data, not runtime data")
657 ALL_OPT = cli_option("--all", dest="show_all",
658 default=False, action="store_true",
659 help="Show info on all instances on the cluster."
660 " This can take a long time to run, use wisely")
662 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
663 action="store_true", default=False,
664 help="Interactive OS reinstall, lists available"
665 " OS templates for selection")
667 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
668 action="store_true", default=False,
669 help="Remove the instance from the cluster"
670 " configuration even if there are failures"
671 " during the removal process")
673 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
674 help="Specifies the new secondary node",
675 metavar="NODE", default=None,
676 completion_suggest=OPT_COMPL_ONE_NODE)
678 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
679 default=False, action="store_true",
680 help="Replace the disk(s) on the primary"
681 " node (only for the drbd template)")
683 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
684 default=False, action="store_true",
685 help="Replace the disk(s) on the secondary"
686 " node (only for the drbd template)")
688 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
689 default=False, action="store_true",
690 help="Automatically replace faulty disks"
691 " (only for the drbd template)")
693 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
694 default=False, action="store_true",
695 help="Ignore current recorded size"
696 " (useful for forcing activation when"
697 " the recorded size is wrong)")
699 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
701 completion_suggest=OPT_COMPL_ONE_NODE)
703 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
706 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
707 help="Specify the secondary ip for the node",
708 metavar="ADDRESS", default=None)
710 READD_OPT = cli_option("--readd", dest="readd",
711 default=False, action="store_true",
712 help="Readd old node after replacing it")
714 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
715 default=True, action="store_false",
716 help="Disable SSH key fingerprint checking")
719 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
720 choices=_YESNO, default=None, metavar=_YORNO,
721 help="Set the master_candidate flag on the node")
723 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
724 choices=_YESNO, default=None,
725 help="Set the offline flag on the node")
727 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
728 choices=_YESNO, default=None,
729 help="Set the drained flag on the node")
731 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
732 choices=_YESNO, default=None, metavar=_YORNO,
733 help="Set the allocatable flag on a volume")
735 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
736 help="Disable support for lvm based instances"
738 action="store_false", default=True)
740 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
741 dest="enabled_hypervisors",
742 help="Comma-separated list of hypervisors",
743 type="string", default=None)
745 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
746 type="keyval", default={},
747 help="NIC parameters")
749 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
750 dest="candidate_pool_size", type="int",
751 help="Set the candidate pool size")
753 VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
754 help="Enables LVM and specifies the volume group"
755 " name (cluster-wide) for disk allocation [xenvg]",
756 metavar="VG", default=None)
758 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
759 help="Destroy cluster", action="store_true")
761 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
762 help="Skip node agreement check (dangerous)",
763 action="store_true", default=False)
765 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
766 help="Specify the mac prefix for the instance IP"
767 " addresses, in the format XX:XX:XX",
771 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
772 help="Specify the node interface (cluster-wide)"
773 " on which the master IP address will be added "
774 " [%s]" % constants.DEFAULT_BRIDGE,
776 default=constants.DEFAULT_BRIDGE)
779 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
780 help="Specify the default directory (cluster-"
781 "wide) for storing the file-based disks [%s]" %
782 constants.DEFAULT_FILE_STORAGE_DIR,
784 default=constants.DEFAULT_FILE_STORAGE_DIR)
786 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
787 help="Don't modify /etc/hosts",
788 action="store_false", default=True)
790 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
791 help="Enable parseable error messages",
792 action="store_true", default=False)
794 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
795 help="Skip N+1 memory redundancy tests",
796 action="store_true", default=False)
798 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
799 help="Type of reboot: soft/hard/full",
800 default=constants.INSTANCE_REBOOT_HARD,
802 choices=list(constants.REBOOT_TYPES))
804 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
805 dest="ignore_secondaries",
806 default=False, action="store_true",
807 help="Ignore errors from secondaries")
809 NOSHUTDOWN_OPT = cli_option("","--noshutdown", dest="shutdown",
810 action="store_false", default=True,
811 help="Don't shutdown the instance (unsafe)")
815 def _ParseArgs(argv, commands, aliases):
816 """Parser for the command line arguments.
818 This function parses the arguments and returns the function which
819 must be executed together with its (modified) arguments.
821 @param argv: the command line
822 @param commands: dictionary with special contents, see the design
823 doc for cmdline handling
824 @param aliases: dictionary with command aliases {'alias': 'target, ...}
830 binary = argv[0].split("/")[-1]
832 if len(argv) > 1 and argv[1] == "--version":
833 ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
834 # Quit right away. That way we don't have to care about this special
835 # argument. optparse.py does it the same.
838 if len(argv) < 2 or not (argv[1] in commands or
840 # let's do a nice thing
841 sortedcmds = commands.keys()
844 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
845 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
848 # compute the max line length for cmd + usage
849 mlen = max([len(" %s" % cmd) for cmd in commands])
850 mlen = min(60, mlen) # should not get here...
852 # and format a nice command list
853 ToStdout("Commands:")
854 for cmd in sortedcmds:
855 cmdstr = " %s" % (cmd,)
856 help_text = commands[cmd][4]
857 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
858 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
859 for line in help_lines:
860 ToStdout("%-*s %s", mlen, "", line)
864 return None, None, None
866 # get command, unalias it, and look it up in commands
870 raise errors.ProgrammerError("Alias '%s' overrides an existing"
873 if aliases[cmd] not in commands:
874 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
875 " command '%s'" % (cmd, aliases[cmd]))
879 func, args_def, parser_opts, usage, description = commands[cmd]
880 parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
881 description=description,
882 formatter=TitledHelpFormatter(),
883 usage="%%prog %s %s" % (cmd, usage))
884 parser.disable_interspersed_args()
885 options, args = parser.parse_args()
887 if not _CheckArguments(cmd, args_def, args):
888 return None, None, None
890 return func, options, args
893 def _CheckArguments(cmd, args_def, args):
894 """Verifies the arguments using the argument definition.
898 1. Abort with error if values specified by user but none expected.
900 1. For each argument in definition
902 1. Keep running count of minimum number of values (min_count)
903 1. Keep running count of maximum number of values (max_count)
904 1. If it has an unlimited number of values
906 1. Abort with error if it's not the last argument in the definition
908 1. If last argument has limited number of values
910 1. Abort with error if number of values doesn't match or is too large
912 1. Abort with error if user didn't pass enough values (min_count)
915 if args and not args_def:
916 ToStderr("Error: Command %s expects no arguments", cmd)
923 last_idx = len(args_def) - 1
925 for idx, arg in enumerate(args_def):
926 if min_count is None:
928 elif arg.min is not None:
931 if max_count is None:
933 elif arg.max is not None:
937 check_max = (arg.max is not None)
939 elif arg.max is None:
940 raise errors.ProgrammerError("Only the last argument can have max=None")
943 # Command with exact number of arguments
944 if (min_count is not None and max_count is not None and
945 min_count == max_count and len(args) != min_count):
946 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
949 # Command with limited number of arguments
950 if max_count is not None and len(args) > max_count:
951 ToStderr("Error: Command %s expects only %d argument(s)",
955 # Command with some required arguments
956 if min_count is not None and len(args) < min_count:
957 ToStderr("Error: Command %s expects at least %d argument(s)",
964 def SplitNodeOption(value):
965 """Splits the value of a --node option.
968 if value and ':' in value:
969 return value.split(':', 1)
975 def wrapper(*args, **kwargs):
978 return fn(*args, **kwargs)
984 def AskUser(text, choices=None):
985 """Ask the user a question.
987 @param text: the question to ask
989 @param choices: list with elements tuples (input_char, return_value,
990 description); if not given, it will default to: [('y', True,
991 'Perform the operation'), ('n', False, 'Do no do the operation')];
992 note that the '?' char is reserved for help
994 @return: one of the return values from the choices list; if input is
995 not possible (i.e. not running with a tty, we return the last
1000 choices = [('y', True, 'Perform the operation'),
1001 ('n', False, 'Do not perform the operation')]
1002 if not choices or not isinstance(choices, list):
1003 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1004 for entry in choices:
1005 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1006 raise errors.ProgrammerError("Invalid choices element to AskUser")
1008 answer = choices[-1][1]
1010 for line in text.splitlines():
1011 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1012 text = "\n".join(new_text)
1014 f = file("/dev/tty", "a+")
1018 chars = [entry[0] for entry in choices]
1019 chars[-1] = "[%s]" % chars[-1]
1021 maps = dict([(entry[0], entry[1]) for entry in choices])
1025 f.write("/".join(chars))
1027 line = f.readline(2).strip().lower()
1032 for entry in choices:
1033 f.write(" %s - %s\n" % (entry[0], entry[2]))
1041 class JobSubmittedException(Exception):
1042 """Job was submitted, client should exit.
1044 This exception has one argument, the ID of the job that was
1045 submitted. The handler should print this ID.
1047 This is not an error, just a structured way to exit from clients.
1052 def SendJob(ops, cl=None):
1053 """Function to submit an opcode without waiting for the results.
1056 @param ops: list of opcodes
1057 @type cl: luxi.Client
1058 @param cl: the luxi client to use for communicating with the master;
1059 if None, a new client will be created
1065 job_id = cl.SubmitJob(ops)
1070 def PollJob(job_id, cl=None, feedback_fn=None):
1071 """Function to poll for the result of a job.
1073 @type job_id: job identified
1074 @param job_id: the job to poll for results
1075 @type cl: luxi.Client
1076 @param cl: the luxi client to use for communicating with the master;
1077 if None, a new client will be created
1083 prev_job_info = None
1084 prev_logmsg_serial = None
1087 result = cl.WaitForJobChange(job_id, ["status"], prev_job_info,
1090 # job not found, go away!
1091 raise errors.JobLost("Job with id %s lost" % job_id)
1093 # Split result, a tuple of (field values, log entries)
1094 (job_info, log_entries) = result
1095 (status, ) = job_info
1098 for log_entry in log_entries:
1099 (serial, timestamp, _, message) = log_entry
1100 if callable(feedback_fn):
1101 feedback_fn(log_entry[1:])
1103 encoded = utils.SafeEncode(message)
1104 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), encoded)
1105 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1107 # TODO: Handle canceled and archived jobs
1108 elif status in (constants.JOB_STATUS_SUCCESS,
1109 constants.JOB_STATUS_ERROR,
1110 constants.JOB_STATUS_CANCELING,
1111 constants.JOB_STATUS_CANCELED):
1114 prev_job_info = job_info
1116 jobs = cl.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1118 raise errors.JobLost("Job with id %s lost" % job_id)
1120 status, opstatus, result = jobs[0]
1121 if status == constants.JOB_STATUS_SUCCESS:
1123 elif status in (constants.JOB_STATUS_CANCELING,
1124 constants.JOB_STATUS_CANCELED):
1125 raise errors.OpExecError("Job was canceled")
1128 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1129 if status == constants.OP_STATUS_SUCCESS:
1131 elif status == constants.OP_STATUS_ERROR:
1132 errors.MaybeRaise(msg)
1134 raise errors.OpExecError("partial failure (opcode %d): %s" %
1137 raise errors.OpExecError(str(msg))
1138 # default failure mode
1139 raise errors.OpExecError(result)
1142 def SubmitOpCode(op, cl=None, feedback_fn=None):
1143 """Legacy function to submit an opcode.
1145 This is just a simple wrapper over the construction of the processor
1146 instance. It should be extended to better handle feedback and
1147 interaction functions.
1153 job_id = SendJob([op], cl)
1155 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn)
1157 return op_results[0]
1160 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1161 """Wrapper around SubmitOpCode or SendJob.
1163 This function will decide, based on the 'opts' parameter, whether to
1164 submit and wait for the result of the opcode (and return it), or
1165 whether to just send the job and print its identifier. It is used in
1166 order to simplify the implementation of the '--submit' option.
1168 It will also add the dry-run parameter from the options passed, if true.
1171 if opts and opts.dry_run:
1172 op.dry_run = opts.dry_run
1173 if opts and opts.submit_only:
1174 job_id = SendJob([op], cl=cl)
1175 raise JobSubmittedException(job_id)
1177 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn)
1181 # TODO: Cache object?
1183 client = luxi.Client()
1184 except luxi.NoMasterError:
1185 master, myself = ssconf.GetMasterAndMyself()
1186 if master != myself:
1187 raise errors.OpPrereqError("This is not the master node, please connect"
1188 " to node '%s' and rerun the command" %
1195 def FormatError(err):
1196 """Return a formatted error message for a given error.
1198 This function takes an exception instance and returns a tuple
1199 consisting of two values: first, the recommended exit code, and
1200 second, a string describing the error message (not
1201 newline-terminated).
1207 if isinstance(err, errors.ConfigurationError):
1208 txt = "Corrupt configuration file: %s" % msg
1210 obuf.write(txt + "\n")
1211 obuf.write("Aborting.")
1213 elif isinstance(err, errors.HooksAbort):
1214 obuf.write("Failure: hooks execution failed:\n")
1215 for node, script, out in err.args[0]:
1217 obuf.write(" node: %s, script: %s, output: %s\n" %
1218 (node, script, out))
1220 obuf.write(" node: %s, script: %s (no output)\n" %
1222 elif isinstance(err, errors.HooksFailure):
1223 obuf.write("Failure: hooks general failure: %s" % msg)
1224 elif isinstance(err, errors.ResolverError):
1225 this_host = utils.HostInfo.SysName()
1226 if err.args[0] == this_host:
1227 msg = "Failure: can't resolve my own hostname ('%s')"
1229 msg = "Failure: can't resolve hostname '%s'"
1230 obuf.write(msg % err.args[0])
1231 elif isinstance(err, errors.OpPrereqError):
1232 obuf.write("Failure: prerequisites not met for this"
1233 " operation:\n%s" % msg)
1234 elif isinstance(err, errors.OpExecError):
1235 obuf.write("Failure: command execution error:\n%s" % msg)
1236 elif isinstance(err, errors.TagError):
1237 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1238 elif isinstance(err, errors.JobQueueDrainError):
1239 obuf.write("Failure: the job queue is marked for drain and doesn't"
1240 " accept new requests\n")
1241 elif isinstance(err, errors.JobQueueFull):
1242 obuf.write("Failure: the job queue is full and doesn't accept new"
1243 " job submissions until old jobs are archived\n")
1244 elif isinstance(err, errors.TypeEnforcementError):
1245 obuf.write("Parameter Error: %s" % msg)
1246 elif isinstance(err, errors.ParameterError):
1247 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1248 elif isinstance(err, errors.GenericError):
1249 obuf.write("Unhandled Ganeti error: %s" % msg)
1250 elif isinstance(err, luxi.NoMasterError):
1251 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1252 " and listening for connections?")
1253 elif isinstance(err, luxi.TimeoutError):
1254 obuf.write("Timeout while talking to the master daemon. Error:\n"
1256 elif isinstance(err, luxi.ProtocolError):
1257 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1259 elif isinstance(err, JobSubmittedException):
1260 obuf.write("JobID: %s\n" % err.args[0])
1263 obuf.write("Unhandled exception: %s" % msg)
1264 return retcode, obuf.getvalue().rstrip('\n')
1267 def GenericMain(commands, override=None, aliases=None):
1268 """Generic main function for all the gnt-* commands.
1271 - commands: a dictionary with a special structure, see the design doc
1272 for command line handling.
1273 - override: if not None, we expect a dictionary with keys that will
1274 override command line options; this can be used to pass
1275 options from the scripts to generic functions
1276 - aliases: dictionary with command aliases {'alias': 'target, ...}
1279 # save the program name and the entire command line for later logging
1281 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1282 if len(sys.argv) >= 2:
1283 binary += " " + sys.argv[1]
1284 old_cmdline = " ".join(sys.argv[2:])
1288 binary = "<unknown program>"
1295 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1296 except errors.ParameterError, err:
1297 result, err_msg = FormatError(err)
1301 if func is None: # parse error
1304 if override is not None:
1305 for key, val in override.iteritems():
1306 setattr(options, key, val)
1308 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1309 stderr_logging=True, program=binary)
1312 logging.info("run with arguments '%s'", old_cmdline)
1314 logging.info("run with no arguments")
1317 result = func(options, args)
1318 except (errors.GenericError, luxi.ProtocolError,
1319 JobSubmittedException), err:
1320 result, err_msg = FormatError(err)
1321 logging.exception("Error during command processing")
1327 def GenericInstanceCreate(mode, opts, args):
1328 """Add an instance to the cluster via either creation or import.
1330 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1331 @param opts: the command line options selected by the user
1333 @param args: should contain only one element, the new instance name
1335 @return: the desired exit code
1340 (pnode, snode) = SplitNodeOption(opts.node)
1345 hypervisor, hvparams = opts.hypervisor
1349 nic_max = max(int(nidx[0])+1 for nidx in opts.nics)
1350 except ValueError, err:
1351 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1352 nics = [{}] * nic_max
1353 for nidx, ndict in opts.nics:
1355 if not isinstance(ndict, dict):
1356 msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1357 raise errors.OpPrereqError(msg)
1363 # default of one nic, all auto
1366 if opts.disk_template == constants.DT_DISKLESS:
1367 if opts.disks or opts.sd_size is not None:
1368 raise errors.OpPrereqError("Diskless instance but disk"
1369 " information passed")
1372 if not opts.disks and not opts.sd_size:
1373 raise errors.OpPrereqError("No disk information specified")
1374 if opts.disks and opts.sd_size is not None:
1375 raise errors.OpPrereqError("Please use either the '--disk' or"
1377 if opts.sd_size is not None:
1378 opts.disks = [(0, {"size": opts.sd_size})]
1380 disk_max = max(int(didx[0])+1 for didx in opts.disks)
1381 except ValueError, err:
1382 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1383 disks = [{}] * disk_max
1384 for didx, ddict in opts.disks:
1386 if not isinstance(ddict, dict):
1387 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1388 raise errors.OpPrereqError(msg)
1389 elif "size" not in ddict:
1390 raise errors.OpPrereqError("Missing size for disk %d" % didx)
1392 ddict["size"] = utils.ParseUnit(ddict["size"])
1393 except ValueError, err:
1394 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1398 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1399 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1401 if mode == constants.INSTANCE_CREATE:
1406 elif mode == constants.INSTANCE_IMPORT:
1409 src_node = opts.src_node
1410 src_path = opts.src_dir
1412 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1414 op = opcodes.OpCreateInstance(instance_name=instance,
1416 disk_template=opts.disk_template,
1418 pnode=pnode, snode=snode,
1419 ip_check=opts.ip_check,
1420 wait_for_sync=opts.wait_for_sync,
1421 file_storage_dir=opts.file_storage_dir,
1422 file_driver=opts.file_driver,
1423 iallocator=opts.iallocator,
1424 hypervisor=hypervisor,
1426 beparams=opts.beparams,
1433 SubmitOrSend(op, opts)
1437 def GenerateTable(headers, fields, separator, data,
1438 numfields=None, unitfields=None,
1440 """Prints a table with headers and different fields.
1443 @param headers: dictionary mapping field names to headers for
1446 @param fields: the field names corresponding to each row in
1448 @param separator: the separator to be used; if this is None,
1449 the default 'smart' algorithm is used which computes optimal
1450 field width, otherwise just the separator is used between
1453 @param data: a list of lists, each sublist being one row to be output
1454 @type numfields: list
1455 @param numfields: a list with the fields that hold numeric
1456 values and thus should be right-aligned
1457 @type unitfields: list
1458 @param unitfields: a list with the fields that hold numeric
1459 values that should be formatted with the units field
1460 @type units: string or None
1461 @param units: the units we should use for formatting, or None for
1462 automatic choice (human-readable for non-separator usage, otherwise
1463 megabytes); this is a one-letter string
1472 if numfields is None:
1474 if unitfields is None:
1477 numfields = utils.FieldSet(*numfields)
1478 unitfields = utils.FieldSet(*unitfields)
1481 for field in fields:
1482 if headers and field not in headers:
1483 # TODO: handle better unknown fields (either revert to old
1484 # style of raising exception, or deal more intelligently with
1486 headers[field] = field
1487 if separator is not None:
1488 format_fields.append("%s")
1489 elif numfields.Matches(field):
1490 format_fields.append("%*s")
1492 format_fields.append("%-*s")
1494 if separator is None:
1495 mlens = [0 for name in fields]
1496 format = ' '.join(format_fields)
1498 format = separator.replace("%", "%%").join(format_fields)
1503 for idx, val in enumerate(row):
1504 if unitfields.Matches(fields[idx]):
1510 val = row[idx] = utils.FormatUnit(val, units)
1511 val = row[idx] = str(val)
1512 if separator is None:
1513 mlens[idx] = max(mlens[idx], len(val))
1518 for idx, name in enumerate(fields):
1520 if separator is None:
1521 mlens[idx] = max(mlens[idx], len(hdr))
1522 args.append(mlens[idx])
1524 result.append(format % tuple(args))
1529 line = ['-' for _ in fields]
1530 for idx in range(len(fields)):
1531 if separator is None:
1532 args.append(mlens[idx])
1533 args.append(line[idx])
1534 result.append(format % tuple(args))
1539 def FormatTimestamp(ts):
1540 """Formats a given timestamp.
1543 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
1546 @return: a string with the formatted timestamp
1549 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
1552 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
1555 def ParseTimespec(value):
1556 """Parse a time specification.
1558 The following suffixed will be recognized:
1566 Without any suffix, the value will be taken to be in seconds.
1571 raise errors.OpPrereqError("Empty time specification passed")
1579 if value[-1] not in suffix_map:
1583 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1585 multiplier = suffix_map[value[-1]]
1587 if not value: # no data left after stripping the suffix
1588 raise errors.OpPrereqError("Invalid time specification (only"
1591 value = int(value) * multiplier
1593 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1597 def GetOnlineNodes(nodes, cl=None, nowarn=False):
1598 """Returns the names of online nodes.
1600 This function will also log a warning on stderr with the names of
1603 @param nodes: if not empty, use only this subset of nodes (minus the
1605 @param cl: if not None, luxi client to use
1606 @type nowarn: boolean
1607 @param nowarn: by default, this function will output a note with the
1608 offline nodes that are skipped; if this parameter is True the
1609 note is not displayed
1615 result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
1617 offline = [row[0] for row in result if row[1]]
1618 if offline and not nowarn:
1619 ToStderr("Note: skipping offline node(s): %s" % ", ".join(offline))
1620 return [row[0] for row in result if not row[1]]
1623 def _ToStream(stream, txt, *args):
1624 """Write a message to a stream, bypassing the logging system
1626 @type stream: file object
1627 @param stream: the file to which we should write
1629 @param txt: the message
1634 stream.write(txt % args)
1641 def ToStdout(txt, *args):
1642 """Write a message to stdout only, bypassing the logging system
1644 This is just a wrapper over _ToStream.
1647 @param txt: the message
1650 _ToStream(sys.stdout, txt, *args)
1653 def ToStderr(txt, *args):
1654 """Write a message to stderr only, bypassing the logging system
1656 This is just a wrapper over _ToStream.
1659 @param txt: the message
1662 _ToStream(sys.stderr, txt, *args)
1665 class JobExecutor(object):
1666 """Class which manages the submission and execution of multiple jobs.
1668 Note that instances of this class should not be reused between
1672 def __init__(self, cl=None, verbose=True):
1677 self.verbose = verbose
1680 def QueueJob(self, name, *ops):
1681 """Record a job for later submit.
1684 @param name: a description of the job, will be used in WaitJobSet
1686 self.queue.append((name, ops))
1688 def SubmitPending(self):
1689 """Submit all pending jobs.
1692 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
1693 for ((status, data), (name, _)) in zip(results, self.queue):
1694 self.jobs.append((status, data, name))
1696 def GetResults(self):
1697 """Wait for and return the results of all jobs.
1700 @return: list of tuples (success, job results), in the same order
1701 as the submitted jobs; if a job has failed, instead of the result
1702 there will be the error message
1706 self.SubmitPending()
1709 ok_jobs = [row[1] for row in self.jobs if row[0]]
1711 ToStdout("Submitted jobs %s", ", ".join(ok_jobs))
1712 for submit_status, jid, name in self.jobs:
1713 if not submit_status:
1714 ToStderr("Failed to submit job for %s: %s", name, jid)
1715 results.append((False, jid))
1718 ToStdout("Waiting for job %s for %s...", jid, name)
1720 job_result = PollJob(jid, cl=self.cl)
1722 except (errors.GenericError, luxi.ProtocolError), err:
1723 _, job_result = FormatError(err)
1725 # the error message will always be shown, verbose or not
1726 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
1728 results.append((success, job_result))
1731 def WaitOrShow(self, wait):
1732 """Wait for job results or only print the job IDs.
1735 @param wait: whether to wait or not
1739 return self.GetResults()
1742 self.SubmitPending()
1743 for status, result, name in self.jobs:
1745 ToStdout("%s: %s", result, name)
1747 ToStderr("Failure for %s: %s", name, result)