4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
39 from ganeti import ssh
40 from ganeti import compat
41 from ganeti import netutils
42 from ganeti import qlang
44 from optparse import (OptionParser, TitledHelpFormatter,
45 Option, OptionValueError)
49 # Command line options
62 "CLUSTER_DOMAIN_SECRET_OPT",
79 "FILESTORE_DRIVER_OPT",
85 "GLOBAL_SHARED_FILEDIR_OPT",
90 "DEFAULT_IALLOCATOR_OPT",
91 "IDENTIFY_DEFAULTS_OPT",
93 "IGNORE_FAILURES_OPT",
95 "IGNORE_REMOVE_FAILURES_OPT",
96 "IGNORE_SECONDARIES_OPT",
100 "MAINTAIN_NODE_HEALTH_OPT",
103 "MIGRATION_MODE_OPT",
105 "NEW_CLUSTER_CERT_OPT",
106 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
107 "NEW_CONFD_HMAC_KEY_OPT",
111 "NODE_FORCE_JOIN_OPT",
113 "NODE_PLACEMENT_OPT",
117 "NODRBD_STORAGE_OPT",
123 "NOMODIFY_ETCHOSTS_OPT",
124 "NOMODIFY_SSH_SETUP_OPT",
130 "NOSSH_KEYCHECK_OPT",
141 "PREALLOC_WIPE_DISKS_OPT",
142 "PRIMARY_IP_VERSION_OPT",
147 "REMOVE_INSTANCE_OPT",
155 "SHUTDOWN_TIMEOUT_OPT",
170 # Generic functions for CLI programs
173 "GenericInstanceCreate",
179 "JobSubmittedException",
181 "RunWhileClusterStopped",
185 # Formatting functions
186 "ToStderr", "ToStdout",
189 "FormatParameterDict",
198 # command line options support infrastructure
199 "ARGS_MANY_INSTANCES",
218 "OPT_COMPL_INST_ADD_NODES",
219 "OPT_COMPL_MANY_NODES",
220 "OPT_COMPL_ONE_IALLOCATOR",
221 "OPT_COMPL_ONE_INSTANCE",
222 "OPT_COMPL_ONE_NODE",
223 "OPT_COMPL_ONE_NODEGROUP",
229 "COMMON_CREATE_OPTS",
235 #: Priorities (sorted)
237 ("low", constants.OP_PRIO_LOW),
238 ("normal", constants.OP_PRIO_NORMAL),
239 ("high", constants.OP_PRIO_HIGH),
242 #: Priority dictionary for easier lookup
243 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
244 # we migrate to Python 2.6
245 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
247 # Query result status for clients
250 QR_INCOMPLETE) = range(3)
254 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
259 return ("<%s min=%s max=%s>" %
260 (self.__class__.__name__, self.min, self.max))
263 class ArgSuggest(_Argument):
264 """Suggesting argument.
266 Value can be any of the ones passed to the constructor.
269 # pylint: disable-msg=W0622
270 def __init__(self, min=0, max=None, choices=None):
271 _Argument.__init__(self, min=min, max=max)
272 self.choices = choices
275 return ("<%s min=%s max=%s choices=%r>" %
276 (self.__class__.__name__, self.min, self.max, self.choices))
279 class ArgChoice(ArgSuggest):
282 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
283 but value must be one of the choices.
288 class ArgUnknown(_Argument):
289 """Unknown argument to program (e.g. determined at runtime).
294 class ArgInstance(_Argument):
295 """Instances argument.
300 class ArgNode(_Argument):
306 class ArgGroup(_Argument):
307 """Node group argument.
312 class ArgJobId(_Argument):
318 class ArgFile(_Argument):
319 """File path argument.
324 class ArgCommand(_Argument):
330 class ArgHost(_Argument):
336 class ArgOs(_Argument):
343 ARGS_MANY_INSTANCES = [ArgInstance()]
344 ARGS_MANY_NODES = [ArgNode()]
345 ARGS_MANY_GROUPS = [ArgGroup()]
346 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
347 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
348 ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
349 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
352 def _ExtractTagsObject(opts, args):
353 """Extract the tag type object.
355 Note that this function will modify its args parameter.
358 if not hasattr(opts, "tag_type"):
359 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
361 if kind == constants.TAG_CLUSTER:
363 elif kind in (constants.TAG_NODEGROUP,
365 constants.TAG_INSTANCE):
367 raise errors.OpPrereqError("no arguments passed to the command")
371 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
375 def _ExtendTags(opts, args):
376 """Extend the args if a source file has been given.
378 This function will extend the tags with the contents of the file
379 passed in the 'tags_source' attribute of the opts parameter. A file
380 named '-' will be replaced by stdin.
383 fname = opts.tags_source
389 new_fh = open(fname, "r")
392 # we don't use the nice 'new_data = [line.strip() for line in fh]'
393 # because of python bug 1633941
395 line = new_fh.readline()
398 new_data.append(line.strip())
401 args.extend(new_data)
404 def ListTags(opts, args):
405 """List the tags on a given object.
407 This is a generic implementation that knows how to deal with all
408 three cases of tag objects (cluster, node, instance). The opts
409 argument is expected to contain a tag_type field denoting what
410 object type we work on.
413 kind, name = _ExtractTagsObject(opts, args)
415 result = cl.QueryTags(kind, name)
416 result = list(result)
422 def AddTags(opts, args):
423 """Add tags on a given object.
425 This is a generic implementation that knows how to deal with all
426 three cases of tag objects (cluster, node, instance). The opts
427 argument is expected to contain a tag_type field denoting what
428 object type we work on.
431 kind, name = _ExtractTagsObject(opts, args)
432 _ExtendTags(opts, args)
434 raise errors.OpPrereqError("No tags to be added")
435 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
436 SubmitOpCode(op, opts=opts)
439 def RemoveTags(opts, args):
440 """Remove tags from a given object.
442 This is a generic implementation that knows how to deal with all
443 three cases of tag objects (cluster, node, instance). The opts
444 argument is expected to contain a tag_type field denoting what
445 object type we work on.
448 kind, name = _ExtractTagsObject(opts, args)
449 _ExtendTags(opts, args)
451 raise errors.OpPrereqError("No tags to be removed")
452 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
453 SubmitOpCode(op, opts=opts)
456 def check_unit(option, opt, value): # pylint: disable-msg=W0613
457 """OptParsers custom converter for units.
461 return utils.ParseUnit(value)
462 except errors.UnitParseError, err:
463 raise OptionValueError("option %s: %s" % (opt, err))
466 def _SplitKeyVal(opt, data):
467 """Convert a KeyVal string into a dict.
469 This function will convert a key=val[,...] string into a dict. Empty
470 values will be converted specially: keys which have the prefix 'no_'
471 will have the value=False and the prefix stripped, the others will
475 @param opt: a string holding the option name for which we process the
476 data, used in building error messages
478 @param data: a string of the format key=val,key=val,...
480 @return: {key=val, key=val}
481 @raises errors.ParameterError: if there are duplicate keys
486 for elem in utils.UnescapeAndSplit(data, sep=","):
488 key, val = elem.split("=", 1)
490 if elem.startswith(NO_PREFIX):
491 key, val = elem[len(NO_PREFIX):], False
492 elif elem.startswith(UN_PREFIX):
493 key, val = elem[len(UN_PREFIX):], None
495 key, val = elem, True
497 raise errors.ParameterError("Duplicate key '%s' in option %s" %
503 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
504 """Custom parser for ident:key=val,key=val options.
506 This will store the parsed values as a tuple (ident, {key: val}). As such,
507 multiple uses of this option via action=append is possible.
511 ident, rest = value, ''
513 ident, rest = value.split(":", 1)
515 if ident.startswith(NO_PREFIX):
517 msg = "Cannot pass options when removing parameter groups: %s" % value
518 raise errors.ParameterError(msg)
519 retval = (ident[len(NO_PREFIX):], False)
520 elif ident.startswith(UN_PREFIX):
522 msg = "Cannot pass options when removing parameter groups: %s" % value
523 raise errors.ParameterError(msg)
524 retval = (ident[len(UN_PREFIX):], None)
526 kv_dict = _SplitKeyVal(opt, rest)
527 retval = (ident, kv_dict)
531 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
532 """Custom parser class for key=val,key=val options.
534 This will store the parsed values as a dict {key: val}.
537 return _SplitKeyVal(opt, value)
540 def check_bool(option, opt, value): # pylint: disable-msg=W0613
541 """Custom parser for yes/no options.
543 This will store the parsed value as either True or False.
546 value = value.lower()
547 if value == constants.VALUE_FALSE or value == "no":
549 elif value == constants.VALUE_TRUE or value == "yes":
552 raise errors.ParameterError("Invalid boolean value '%s'" % value)
555 # completion_suggestion is normally a list. Using numeric values not evaluating
556 # to False for dynamic completion.
557 (OPT_COMPL_MANY_NODES,
559 OPT_COMPL_ONE_INSTANCE,
561 OPT_COMPL_ONE_IALLOCATOR,
562 OPT_COMPL_INST_ADD_NODES,
563 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
565 OPT_COMPL_ALL = frozenset([
566 OPT_COMPL_MANY_NODES,
568 OPT_COMPL_ONE_INSTANCE,
570 OPT_COMPL_ONE_IALLOCATOR,
571 OPT_COMPL_INST_ADD_NODES,
572 OPT_COMPL_ONE_NODEGROUP,
576 class CliOption(Option):
577 """Custom option class for optparse.
580 ATTRS = Option.ATTRS + [
581 "completion_suggest",
583 TYPES = Option.TYPES + (
589 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
590 TYPE_CHECKER["identkeyval"] = check_ident_key_val
591 TYPE_CHECKER["keyval"] = check_key_val
592 TYPE_CHECKER["unit"] = check_unit
593 TYPE_CHECKER["bool"] = check_bool
596 # optparse.py sets make_option, so we do it for our own option class, too
597 cli_option = CliOption
602 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
603 help="Increase debugging level")
605 NOHDR_OPT = cli_option("--no-headers", default=False,
606 action="store_true", dest="no_headers",
607 help="Don't display column headers")
609 SEP_OPT = cli_option("--separator", default=None,
610 action="store", dest="separator",
611 help=("Separator between output fields"
612 " (defaults to one space)"))
614 USEUNITS_OPT = cli_option("--units", default=None,
615 dest="units", choices=('h', 'm', 'g', 't'),
616 help="Specify units for output (one of h/m/g/t)")
618 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
619 type="string", metavar="FIELDS",
620 help="Comma separated list of output fields")
622 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
623 default=False, help="Force the operation")
625 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
626 default=False, help="Do not require confirmation")
628 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
629 action="store_true", default=False,
630 help=("Ignore offline nodes and do as much"
633 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
634 default=None, help="File with tag names")
636 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
637 default=False, action="store_true",
638 help=("Submit the job and return the job ID, but"
639 " don't wait for the job to finish"))
641 SYNC_OPT = cli_option("--sync", dest="do_locking",
642 default=False, action="store_true",
643 help=("Grab locks while doing the queries"
644 " in order to ensure more consistent results"))
646 DRY_RUN_OPT = cli_option("--dry-run", default=False,
648 help=("Do not execute the operation, just run the"
649 " check steps and verify it it could be"
652 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
654 help="Increase the verbosity of the operation")
656 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
657 action="store_true", dest="simulate_errors",
658 help="Debugging option that makes the operation"
659 " treat most runtime checks as failed")
661 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
662 default=True, action="store_false",
663 help="Don't wait for sync (DANGEROUS!)")
665 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
666 help="Custom disk setup (diskless, file,"
668 default=None, metavar="TEMPL",
669 choices=list(constants.DISK_TEMPLATES))
671 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
672 help="Do not create any network cards for"
675 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
676 help="Relative path under default cluster-wide"
677 " file storage dir to store file-based disks",
678 default=None, metavar="<DIR>")
680 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
681 help="Driver to use for image files",
682 default="loop", metavar="<DRIVER>",
683 choices=list(constants.FILE_DRIVER))
685 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
686 help="Select nodes for the instance automatically"
687 " using the <NAME> iallocator plugin",
688 default=None, type="string",
689 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
691 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
693 help="Set the default instance allocator plugin",
694 default=None, type="string",
695 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
697 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
699 completion_suggest=OPT_COMPL_ONE_OS)
701 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
702 type="keyval", default={},
703 help="OS parameters")
705 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
706 action="store_true", default=False,
707 help="Force an unknown variant")
709 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
710 action="store_true", default=False,
711 help="Do not install the OS (will"
714 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
715 type="keyval", default={},
716 help="Backend parameters")
718 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
719 default={}, dest="hvparams",
720 help="Hypervisor parameters")
722 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
723 help="Hypervisor and hypervisor options, in the"
724 " format hypervisor:option=value,option=value,...",
725 default=None, type="identkeyval")
727 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
728 help="Hypervisor and hypervisor options, in the"
729 " format hypervisor:option=value,option=value,...",
730 default=[], action="append", type="identkeyval")
732 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
733 action="store_false",
734 help="Don't check that the instance's IP"
737 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
738 default=True, action="store_false",
739 help="Don't check that the instance's name"
742 NET_OPT = cli_option("--net",
743 help="NIC parameters", default=[],
744 dest="nics", action="append", type="identkeyval")
746 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
747 dest="disks", action="append", type="identkeyval")
749 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
750 help="Comma-separated list of disks"
751 " indices to act on (e.g. 0,2) (optional,"
752 " defaults to all disks)")
754 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
755 help="Enforces a single-disk configuration using the"
756 " given disk size, in MiB unless a suffix is used",
757 default=None, type="unit", metavar="<size>")
759 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
760 dest="ignore_consistency",
761 action="store_true", default=False,
762 help="Ignore the consistency of the disks on"
765 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
766 dest="allow_failover",
767 action="store_true", default=False,
768 help="If migration is not possible fallback to"
771 NONLIVE_OPT = cli_option("--non-live", dest="live",
772 default=True, action="store_false",
773 help="Do a non-live migration (this usually means"
774 " freeze the instance, save the state, transfer and"
775 " only then resume running on the secondary node)")
777 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
779 choices=list(constants.HT_MIGRATION_MODES),
780 help="Override default migration mode (choose"
781 " either live or non-live")
783 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
784 help="Target node and optional secondary node",
785 metavar="<pnode>[:<snode>]",
786 completion_suggest=OPT_COMPL_INST_ADD_NODES)
788 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
789 action="append", metavar="<node>",
790 help="Use only this node (can be used multiple"
791 " times, if not given defaults to all nodes)",
792 completion_suggest=OPT_COMPL_ONE_NODE)
794 NODEGROUP_OPT = cli_option("-g", "--node-group",
796 help="Node group (name or uuid)",
797 metavar="<nodegroup>",
798 default=None, type="string",
799 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
801 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
803 completion_suggest=OPT_COMPL_ONE_NODE)
805 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
806 action="store_false",
807 help="Don't start the instance after creation")
809 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
810 action="store_true", default=False,
811 help="Show command instead of executing it")
813 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
814 default=False, action="store_true",
815 help="Instead of performing the migration, try to"
816 " recover from a failed cleanup. This is safe"
817 " to run even if the instance is healthy, but it"
818 " will create extra replication traffic and "
819 " disrupt briefly the replication (like during the"
822 STATIC_OPT = cli_option("-s", "--static", dest="static",
823 action="store_true", default=False,
824 help="Only show configuration data, not runtime data")
826 ALL_OPT = cli_option("--all", dest="show_all",
827 default=False, action="store_true",
828 help="Show info on all instances on the cluster."
829 " This can take a long time to run, use wisely")
831 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
832 action="store_true", default=False,
833 help="Interactive OS reinstall, lists available"
834 " OS templates for selection")
836 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
837 action="store_true", default=False,
838 help="Remove the instance from the cluster"
839 " configuration even if there are failures"
840 " during the removal process")
842 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
843 dest="ignore_remove_failures",
844 action="store_true", default=False,
845 help="Remove the instance from the"
846 " cluster configuration even if there"
847 " are failures during the removal"
850 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
851 action="store_true", default=False,
852 help="Remove the instance from the cluster")
854 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
855 help="Specifies the new node for the instance",
856 metavar="NODE", default=None,
857 completion_suggest=OPT_COMPL_ONE_NODE)
859 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
860 help="Specifies the new secondary node",
861 metavar="NODE", default=None,
862 completion_suggest=OPT_COMPL_ONE_NODE)
864 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
865 default=False, action="store_true",
866 help="Replace the disk(s) on the primary"
867 " node (only for the drbd template)")
869 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
870 default=False, action="store_true",
871 help="Replace the disk(s) on the secondary"
872 " node (only for the drbd template)")
874 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
875 default=False, action="store_true",
876 help="Lock all nodes and auto-promote as needed"
879 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
880 default=False, action="store_true",
881 help="Automatically replace faulty disks"
882 " (only for the drbd template)")
884 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
885 default=False, action="store_true",
886 help="Ignore current recorded size"
887 " (useful for forcing activation when"
888 " the recorded size is wrong)")
890 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
892 completion_suggest=OPT_COMPL_ONE_NODE)
894 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
897 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
898 help="Specify the secondary ip for the node",
899 metavar="ADDRESS", default=None)
901 READD_OPT = cli_option("--readd", dest="readd",
902 default=False, action="store_true",
903 help="Readd old node after replacing it")
905 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
906 default=True, action="store_false",
907 help="Disable SSH key fingerprint checking")
909 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
910 default=False, action="store_true",
911 help="Force the joining of a node")
913 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
914 type="bool", default=None, metavar=_YORNO,
915 help="Set the master_candidate flag on the node")
917 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
918 type="bool", default=None,
919 help=("Set the offline flag on the node"
920 " (cluster does not communicate with offline"
923 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
924 type="bool", default=None,
925 help=("Set the drained flag on the node"
926 " (excluded from allocation operations)"))
928 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
929 type="bool", default=None, metavar=_YORNO,
930 help="Set the master_capable flag on the node")
932 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
933 type="bool", default=None, metavar=_YORNO,
934 help="Set the vm_capable flag on the node")
936 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
937 type="bool", default=None, metavar=_YORNO,
938 help="Set the allocatable flag on a volume")
940 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
941 help="Disable support for lvm based instances"
943 action="store_false", default=True)
945 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
946 dest="enabled_hypervisors",
947 help="Comma-separated list of hypervisors",
948 type="string", default=None)
950 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
951 type="keyval", default={},
952 help="NIC parameters")
954 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
955 dest="candidate_pool_size", type="int",
956 help="Set the candidate pool size")
958 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
959 help=("Enables LVM and specifies the volume group"
960 " name (cluster-wide) for disk allocation"
961 " [%s]" % constants.DEFAULT_VG),
962 metavar="VG", default=None)
964 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
965 help="Destroy cluster", action="store_true")
967 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
968 help="Skip node agreement check (dangerous)",
969 action="store_true", default=False)
971 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
972 help="Specify the mac prefix for the instance IP"
973 " addresses, in the format XX:XX:XX",
977 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
978 help="Specify the node interface (cluster-wide)"
979 " on which the master IP address will be added"
980 " (cluster init default: %s)" %
981 constants.DEFAULT_BRIDGE,
985 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
986 help="Specify the default directory (cluster-"
987 "wide) for storing the file-based disks [%s]" %
988 constants.DEFAULT_FILE_STORAGE_DIR,
990 default=constants.DEFAULT_FILE_STORAGE_DIR)
992 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
993 dest="shared_file_storage_dir",
994 help="Specify the default directory (cluster-"
995 "wide) for storing the shared file-based"
997 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
999 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1001 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1002 help="Don't modify /etc/hosts",
1003 action="store_false", default=True)
1005 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1006 help="Don't initialize SSH keys",
1007 action="store_false", default=True)
1009 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1010 help="Enable parseable error messages",
1011 action="store_true", default=False)
1013 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1014 help="Skip N+1 memory redundancy tests",
1015 action="store_true", default=False)
1017 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1018 help="Type of reboot: soft/hard/full",
1019 default=constants.INSTANCE_REBOOT_HARD,
1021 choices=list(constants.REBOOT_TYPES))
1023 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1024 dest="ignore_secondaries",
1025 default=False, action="store_true",
1026 help="Ignore errors from secondaries")
1028 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1029 action="store_false", default=True,
1030 help="Don't shutdown the instance (unsafe)")
1032 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1033 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1034 help="Maximum time to wait")
1036 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1037 dest="shutdown_timeout", type="int",
1038 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1039 help="Maximum time to wait for instance shutdown")
1041 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1043 help=("Number of seconds between repetions of the"
1046 EARLY_RELEASE_OPT = cli_option("--early-release",
1047 dest="early_release", default=False,
1048 action="store_true",
1049 help="Release the locks on the secondary"
1052 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1053 dest="new_cluster_cert",
1054 default=False, action="store_true",
1055 help="Generate a new cluster certificate")
1057 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1059 help="File containing new RAPI certificate")
1061 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1062 default=None, action="store_true",
1063 help=("Generate a new self-signed RAPI"
1066 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1067 dest="new_confd_hmac_key",
1068 default=False, action="store_true",
1069 help=("Create a new HMAC key for %s" %
1072 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1073 dest="cluster_domain_secret",
1075 help=("Load new new cluster domain"
1076 " secret from file"))
1078 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1079 dest="new_cluster_domain_secret",
1080 default=False, action="store_true",
1081 help=("Create a new cluster domain"
1084 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1085 dest="use_replication_network",
1086 help="Whether to use the replication network"
1087 " for talking to the nodes",
1088 action="store_true", default=False)
1090 MAINTAIN_NODE_HEALTH_OPT = \
1091 cli_option("--maintain-node-health", dest="maintain_node_health",
1092 metavar=_YORNO, default=None, type="bool",
1093 help="Configure the cluster to automatically maintain node"
1094 " health, by shutting down unknown instances, shutting down"
1095 " unknown DRBD devices, etc.")
1097 IDENTIFY_DEFAULTS_OPT = \
1098 cli_option("--identify-defaults", dest="identify_defaults",
1099 default=False, action="store_true",
1100 help="Identify which saved instance parameters are equal to"
1101 " the current cluster defaults and set them as such, instead"
1102 " of marking them as overridden")
1104 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1105 action="store", dest="uid_pool",
1106 help=("A list of user-ids or user-id"
1107 " ranges separated by commas"))
1109 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1110 action="store", dest="add_uids",
1111 help=("A list of user-ids or user-id"
1112 " ranges separated by commas, to be"
1113 " added to the user-id pool"))
1115 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1116 action="store", dest="remove_uids",
1117 help=("A list of user-ids or user-id"
1118 " ranges separated by commas, to be"
1119 " removed from the user-id pool"))
1121 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1122 action="store", dest="reserved_lvs",
1123 help=("A comma-separated list of reserved"
1124 " logical volumes names, that will be"
1125 " ignored by cluster verify"))
1127 ROMAN_OPT = cli_option("--roman",
1128 dest="roman_integers", default=False,
1129 action="store_true",
1130 help="Use roman numbers for positive integers")
1132 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1133 action="store", default=None,
1134 help="Specifies usermode helper for DRBD")
1136 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1137 action="store_false", default=True,
1138 help="Disable support for DRBD")
1140 PRIMARY_IP_VERSION_OPT = \
1141 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1142 action="store", dest="primary_ip_version",
1143 metavar="%d|%d" % (constants.IP4_VERSION,
1144 constants.IP6_VERSION),
1145 help="Cluster-wide IP version for primary IP")
1147 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1148 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1149 choices=_PRIONAME_TO_VALUE.keys(),
1150 help="Priority for opcode processing")
1152 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1153 type="bool", default=None, metavar=_YORNO,
1154 help="Sets the hidden flag on the OS")
1156 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1157 type="bool", default=None, metavar=_YORNO,
1158 help="Sets the blacklisted flag on the OS")
1160 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1161 type="bool", metavar=_YORNO,
1162 dest="prealloc_wipe_disks",
1163 help=("Wipe disks prior to instance"
1166 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1167 type="keyval", default=None,
1168 help="Node parameters")
1170 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1171 action="store", metavar="POLICY", default=None,
1172 help="Allocation policy for the node group")
1174 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1175 type="bool", metavar=_YORNO,
1176 dest="node_powered",
1177 help="Specify if the SoR for node is powered")
1179 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1180 default=constants.OOB_TIMEOUT,
1181 help="Maximum time to wait for out-of-band helper")
1183 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1184 default=constants.OOB_POWER_DELAY,
1185 help="Time in seconds to wait between power-ons")
1187 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1188 action="store_true", default=False,
1189 help=("Whether command argument should be treated"
1193 #: Options provided by all commands
1194 COMMON_OPTS = [DEBUG_OPT]
1196 # common options for creating instances. add and import then add their own
1198 COMMON_CREATE_OPTS = [
1203 FILESTORE_DRIVER_OPT,
1220 def _ParseArgs(argv, commands, aliases):
1221 """Parser for the command line arguments.
1223 This function parses the arguments and returns the function which
1224 must be executed together with its (modified) arguments.
1226 @param argv: the command line
1227 @param commands: dictionary with special contents, see the design
1228 doc for cmdline handling
1229 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1233 binary = "<command>"
1235 binary = argv[0].split("/")[-1]
1237 if len(argv) > 1 and argv[1] == "--version":
1238 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1239 constants.RELEASE_VERSION)
1240 # Quit right away. That way we don't have to care about this special
1241 # argument. optparse.py does it the same.
1244 if len(argv) < 2 or not (argv[1] in commands or
1245 argv[1] in aliases):
1246 # let's do a nice thing
1247 sortedcmds = commands.keys()
1250 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1251 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1254 # compute the max line length for cmd + usage
1255 mlen = max([len(" %s" % cmd) for cmd in commands])
1256 mlen = min(60, mlen) # should not get here...
1258 # and format a nice command list
1259 ToStdout("Commands:")
1260 for cmd in sortedcmds:
1261 cmdstr = " %s" % (cmd,)
1262 help_text = commands[cmd][4]
1263 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1264 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1265 for line in help_lines:
1266 ToStdout("%-*s %s", mlen, "", line)
1270 return None, None, None
1272 # get command, unalias it, and look it up in commands
1276 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1279 if aliases[cmd] not in commands:
1280 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1281 " command '%s'" % (cmd, aliases[cmd]))
1285 func, args_def, parser_opts, usage, description = commands[cmd]
1286 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1287 description=description,
1288 formatter=TitledHelpFormatter(),
1289 usage="%%prog %s %s" % (cmd, usage))
1290 parser.disable_interspersed_args()
1291 options, args = parser.parse_args()
1293 if not _CheckArguments(cmd, args_def, args):
1294 return None, None, None
1296 return func, options, args
1299 def _CheckArguments(cmd, args_def, args):
1300 """Verifies the arguments using the argument definition.
1304 1. Abort with error if values specified by user but none expected.
1306 1. For each argument in definition
1308 1. Keep running count of minimum number of values (min_count)
1309 1. Keep running count of maximum number of values (max_count)
1310 1. If it has an unlimited number of values
1312 1. Abort with error if it's not the last argument in the definition
1314 1. If last argument has limited number of values
1316 1. Abort with error if number of values doesn't match or is too large
1318 1. Abort with error if user didn't pass enough values (min_count)
1321 if args and not args_def:
1322 ToStderr("Error: Command %s expects no arguments", cmd)
1329 last_idx = len(args_def) - 1
1331 for idx, arg in enumerate(args_def):
1332 if min_count is None:
1334 elif arg.min is not None:
1335 min_count += arg.min
1337 if max_count is None:
1339 elif arg.max is not None:
1340 max_count += arg.max
1343 check_max = (arg.max is not None)
1345 elif arg.max is None:
1346 raise errors.ProgrammerError("Only the last argument can have max=None")
1349 # Command with exact number of arguments
1350 if (min_count is not None and max_count is not None and
1351 min_count == max_count and len(args) != min_count):
1352 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1355 # Command with limited number of arguments
1356 if max_count is not None and len(args) > max_count:
1357 ToStderr("Error: Command %s expects only %d argument(s)",
1361 # Command with some required arguments
1362 if min_count is not None and len(args) < min_count:
1363 ToStderr("Error: Command %s expects at least %d argument(s)",
1370 def SplitNodeOption(value):
1371 """Splits the value of a --node option.
1374 if value and ':' in value:
1375 return value.split(':', 1)
1377 return (value, None)
1380 def CalculateOSNames(os_name, os_variants):
1381 """Calculates all the names an OS can be called, according to its variants.
1383 @type os_name: string
1384 @param os_name: base name of the os
1385 @type os_variants: list or None
1386 @param os_variants: list of supported variants
1388 @return: list of valid names
1392 return ['%s+%s' % (os_name, v) for v in os_variants]
1397 def ParseFields(selected, default):
1398 """Parses the values of "--field"-like options.
1400 @type selected: string or None
1401 @param selected: User-selected options
1403 @param default: Default fields
1406 if selected is None:
1409 if selected.startswith("+"):
1410 return default + selected[1:].split(",")
1412 return selected.split(",")
1415 UsesRPC = rpc.RunWithRPC
1418 def AskUser(text, choices=None):
1419 """Ask the user a question.
1421 @param text: the question to ask
1423 @param choices: list with elements tuples (input_char, return_value,
1424 description); if not given, it will default to: [('y', True,
1425 'Perform the operation'), ('n', False, 'Do no do the operation')];
1426 note that the '?' char is reserved for help
1428 @return: one of the return values from the choices list; if input is
1429 not possible (i.e. not running with a tty, we return the last
1434 choices = [('y', True, 'Perform the operation'),
1435 ('n', False, 'Do not perform the operation')]
1436 if not choices or not isinstance(choices, list):
1437 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1438 for entry in choices:
1439 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1440 raise errors.ProgrammerError("Invalid choices element to AskUser")
1442 answer = choices[-1][1]
1444 for line in text.splitlines():
1445 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1446 text = "\n".join(new_text)
1448 f = file("/dev/tty", "a+")
1452 chars = [entry[0] for entry in choices]
1453 chars[-1] = "[%s]" % chars[-1]
1455 maps = dict([(entry[0], entry[1]) for entry in choices])
1459 f.write("/".join(chars))
1461 line = f.readline(2).strip().lower()
1466 for entry in choices:
1467 f.write(" %s - %s\n" % (entry[0], entry[2]))
1475 class JobSubmittedException(Exception):
1476 """Job was submitted, client should exit.
1478 This exception has one argument, the ID of the job that was
1479 submitted. The handler should print this ID.
1481 This is not an error, just a structured way to exit from clients.
1486 def SendJob(ops, cl=None):
1487 """Function to submit an opcode without waiting for the results.
1490 @param ops: list of opcodes
1491 @type cl: luxi.Client
1492 @param cl: the luxi client to use for communicating with the master;
1493 if None, a new client will be created
1499 job_id = cl.SubmitJob(ops)
1504 def GenericPollJob(job_id, cbs, report_cbs):
1505 """Generic job-polling function.
1507 @type job_id: number
1508 @param job_id: Job ID
1509 @type cbs: Instance of L{JobPollCbBase}
1510 @param cbs: Data callbacks
1511 @type report_cbs: Instance of L{JobPollReportCbBase}
1512 @param report_cbs: Reporting callbacks
1515 prev_job_info = None
1516 prev_logmsg_serial = None
1521 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1524 # job not found, go away!
1525 raise errors.JobLost("Job with id %s lost" % job_id)
1527 if result == constants.JOB_NOTCHANGED:
1528 report_cbs.ReportNotChanged(job_id, status)
1533 # Split result, a tuple of (field values, log entries)
1534 (job_info, log_entries) = result
1535 (status, ) = job_info
1538 for log_entry in log_entries:
1539 (serial, timestamp, log_type, message) = log_entry
1540 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1542 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1544 # TODO: Handle canceled and archived jobs
1545 elif status in (constants.JOB_STATUS_SUCCESS,
1546 constants.JOB_STATUS_ERROR,
1547 constants.JOB_STATUS_CANCELING,
1548 constants.JOB_STATUS_CANCELED):
1551 prev_job_info = job_info
1553 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1555 raise errors.JobLost("Job with id %s lost" % job_id)
1557 status, opstatus, result = jobs[0]
1559 if status == constants.JOB_STATUS_SUCCESS:
1562 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1563 raise errors.OpExecError("Job was canceled")
1566 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1567 if status == constants.OP_STATUS_SUCCESS:
1569 elif status == constants.OP_STATUS_ERROR:
1570 errors.MaybeRaise(msg)
1573 raise errors.OpExecError("partial failure (opcode %d): %s" %
1576 raise errors.OpExecError(str(msg))
1578 # default failure mode
1579 raise errors.OpExecError(result)
1582 class JobPollCbBase:
1583 """Base class for L{GenericPollJob} callbacks.
1587 """Initializes this class.
1591 def WaitForJobChangeOnce(self, job_id, fields,
1592 prev_job_info, prev_log_serial):
1593 """Waits for changes on a job.
1596 raise NotImplementedError()
1598 def QueryJobs(self, job_ids, fields):
1599 """Returns the selected fields for the selected job IDs.
1601 @type job_ids: list of numbers
1602 @param job_ids: Job IDs
1603 @type fields: list of strings
1604 @param fields: Fields
1607 raise NotImplementedError()
1610 class JobPollReportCbBase:
1611 """Base class for L{GenericPollJob} reporting callbacks.
1615 """Initializes this class.
1619 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1620 """Handles a log message.
1623 raise NotImplementedError()
1625 def ReportNotChanged(self, job_id, status):
1626 """Called for if a job hasn't changed in a while.
1628 @type job_id: number
1629 @param job_id: Job ID
1630 @type status: string or None
1631 @param status: Job status if available
1634 raise NotImplementedError()
1637 class _LuxiJobPollCb(JobPollCbBase):
1638 def __init__(self, cl):
1639 """Initializes this class.
1642 JobPollCbBase.__init__(self)
1645 def WaitForJobChangeOnce(self, job_id, fields,
1646 prev_job_info, prev_log_serial):
1647 """Waits for changes on a job.
1650 return self.cl.WaitForJobChangeOnce(job_id, fields,
1651 prev_job_info, prev_log_serial)
1653 def QueryJobs(self, job_ids, fields):
1654 """Returns the selected fields for the selected job IDs.
1657 return self.cl.QueryJobs(job_ids, fields)
1660 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1661 def __init__(self, feedback_fn):
1662 """Initializes this class.
1665 JobPollReportCbBase.__init__(self)
1667 self.feedback_fn = feedback_fn
1669 assert callable(feedback_fn)
1671 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1672 """Handles a log message.
1675 self.feedback_fn((timestamp, log_type, log_msg))
1677 def ReportNotChanged(self, job_id, status):
1678 """Called if a job hasn't changed in a while.
1684 class StdioJobPollReportCb(JobPollReportCbBase):
1686 """Initializes this class.
1689 JobPollReportCbBase.__init__(self)
1691 self.notified_queued = False
1692 self.notified_waitlock = False
1694 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1695 """Handles a log message.
1698 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1699 FormatLogMessage(log_type, log_msg))
1701 def ReportNotChanged(self, job_id, status):
1702 """Called if a job hasn't changed in a while.
1708 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1709 ToStderr("Job %s is waiting in queue", job_id)
1710 self.notified_queued = True
1712 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1713 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1714 self.notified_waitlock = True
1717 def FormatLogMessage(log_type, log_msg):
1718 """Formats a job message according to its type.
1721 if log_type != constants.ELOG_MESSAGE:
1722 log_msg = str(log_msg)
1724 return utils.SafeEncode(log_msg)
1727 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1728 """Function to poll for the result of a job.
1730 @type job_id: job identified
1731 @param job_id: the job to poll for results
1732 @type cl: luxi.Client
1733 @param cl: the luxi client to use for communicating with the master;
1734 if None, a new client will be created
1740 if reporter is None:
1742 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1744 reporter = StdioJobPollReportCb()
1746 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1748 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1751 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1752 """Legacy function to submit an opcode.
1754 This is just a simple wrapper over the construction of the processor
1755 instance. It should be extended to better handle feedback and
1756 interaction functions.
1762 SetGenericOpcodeOpts([op], opts)
1764 job_id = SendJob([op], cl=cl)
1766 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1769 return op_results[0]
1772 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1773 """Wrapper around SubmitOpCode or SendJob.
1775 This function will decide, based on the 'opts' parameter, whether to
1776 submit and wait for the result of the opcode (and return it), or
1777 whether to just send the job and print its identifier. It is used in
1778 order to simplify the implementation of the '--submit' option.
1780 It will also process the opcodes if we're sending the via SendJob
1781 (otherwise SubmitOpCode does it).
1784 if opts and opts.submit_only:
1786 SetGenericOpcodeOpts(job, opts)
1787 job_id = SendJob(job, cl=cl)
1788 raise JobSubmittedException(job_id)
1790 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1793 def SetGenericOpcodeOpts(opcode_list, options):
1794 """Processor for generic options.
1796 This function updates the given opcodes based on generic command
1797 line options (like debug, dry-run, etc.).
1799 @param opcode_list: list of opcodes
1800 @param options: command line options or None
1801 @return: None (in-place modification)
1806 for op in opcode_list:
1807 op.debug_level = options.debug
1808 if hasattr(options, "dry_run"):
1809 op.dry_run = options.dry_run
1810 if getattr(options, "priority", None) is not None:
1811 op.priority = _PRIONAME_TO_VALUE[options.priority]
1815 # TODO: Cache object?
1817 client = luxi.Client()
1818 except luxi.NoMasterError:
1819 ss = ssconf.SimpleStore()
1821 # Try to read ssconf file
1824 except errors.ConfigurationError:
1825 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1826 " not part of a cluster")
1828 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1829 if master != myself:
1830 raise errors.OpPrereqError("This is not the master node, please connect"
1831 " to node '%s' and rerun the command" %
1837 def FormatError(err):
1838 """Return a formatted error message for a given error.
1840 This function takes an exception instance and returns a tuple
1841 consisting of two values: first, the recommended exit code, and
1842 second, a string describing the error message (not
1843 newline-terminated).
1849 if isinstance(err, errors.ConfigurationError):
1850 txt = "Corrupt configuration file: %s" % msg
1852 obuf.write(txt + "\n")
1853 obuf.write("Aborting.")
1855 elif isinstance(err, errors.HooksAbort):
1856 obuf.write("Failure: hooks execution failed:\n")
1857 for node, script, out in err.args[0]:
1859 obuf.write(" node: %s, script: %s, output: %s\n" %
1860 (node, script, out))
1862 obuf.write(" node: %s, script: %s (no output)\n" %
1864 elif isinstance(err, errors.HooksFailure):
1865 obuf.write("Failure: hooks general failure: %s" % msg)
1866 elif isinstance(err, errors.ResolverError):
1867 this_host = netutils.Hostname.GetSysName()
1868 if err.args[0] == this_host:
1869 msg = "Failure: can't resolve my own hostname ('%s')"
1871 msg = "Failure: can't resolve hostname '%s'"
1872 obuf.write(msg % err.args[0])
1873 elif isinstance(err, errors.OpPrereqError):
1874 if len(err.args) == 2:
1875 obuf.write("Failure: prerequisites not met for this"
1876 " operation:\nerror type: %s, error details:\n%s" %
1877 (err.args[1], err.args[0]))
1879 obuf.write("Failure: prerequisites not met for this"
1880 " operation:\n%s" % msg)
1881 elif isinstance(err, errors.OpExecError):
1882 obuf.write("Failure: command execution error:\n%s" % msg)
1883 elif isinstance(err, errors.TagError):
1884 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1885 elif isinstance(err, errors.JobQueueDrainError):
1886 obuf.write("Failure: the job queue is marked for drain and doesn't"
1887 " accept new requests\n")
1888 elif isinstance(err, errors.JobQueueFull):
1889 obuf.write("Failure: the job queue is full and doesn't accept new"
1890 " job submissions until old jobs are archived\n")
1891 elif isinstance(err, errors.TypeEnforcementError):
1892 obuf.write("Parameter Error: %s" % msg)
1893 elif isinstance(err, errors.ParameterError):
1894 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1895 elif isinstance(err, luxi.NoMasterError):
1896 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1897 " and listening for connections?")
1898 elif isinstance(err, luxi.TimeoutError):
1899 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1900 " been submitted and will continue to run even if the call"
1901 " timed out. Useful commands in this situation are \"gnt-job"
1902 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1904 elif isinstance(err, luxi.PermissionError):
1905 obuf.write("It seems you don't have permissions to connect to the"
1906 " master daemon.\nPlease retry as a different user.")
1907 elif isinstance(err, luxi.ProtocolError):
1908 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1910 elif isinstance(err, errors.JobLost):
1911 obuf.write("Error checking job status: %s" % msg)
1912 elif isinstance(err, errors.QueryFilterParseError):
1913 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1914 obuf.write("\n".join(err.GetDetails()))
1915 elif isinstance(err, errors.GenericError):
1916 obuf.write("Unhandled Ganeti error: %s" % msg)
1917 elif isinstance(err, JobSubmittedException):
1918 obuf.write("JobID: %s\n" % err.args[0])
1921 obuf.write("Unhandled exception: %s" % msg)
1922 return retcode, obuf.getvalue().rstrip('\n')
1925 def GenericMain(commands, override=None, aliases=None):
1926 """Generic main function for all the gnt-* commands.
1929 - commands: a dictionary with a special structure, see the design doc
1930 for command line handling.
1931 - override: if not None, we expect a dictionary with keys that will
1932 override command line options; this can be used to pass
1933 options from the scripts to generic functions
1934 - aliases: dictionary with command aliases {'alias': 'target, ...}
1937 # save the program name and the entire command line for later logging
1939 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1940 if len(sys.argv) >= 2:
1941 binary += " " + sys.argv[1]
1942 old_cmdline = " ".join(sys.argv[2:])
1946 binary = "<unknown program>"
1953 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1954 except errors.ParameterError, err:
1955 result, err_msg = FormatError(err)
1959 if func is None: # parse error
1962 if override is not None:
1963 for key, val in override.iteritems():
1964 setattr(options, key, val)
1966 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1967 stderr_logging=True)
1970 logging.info("run with arguments '%s'", old_cmdline)
1972 logging.info("run with no arguments")
1975 result = func(options, args)
1976 except (errors.GenericError, luxi.ProtocolError,
1977 JobSubmittedException), err:
1978 result, err_msg = FormatError(err)
1979 logging.exception("Error during command processing")
1981 except KeyboardInterrupt:
1982 result = constants.EXIT_FAILURE
1983 ToStderr("Aborted. Note that if the operation created any jobs, they"
1984 " might have been submitted and"
1985 " will continue to run in the background.")
1990 def ParseNicOption(optvalue):
1991 """Parses the value of the --net option(s).
1995 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1996 except (TypeError, ValueError), err:
1997 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1999 nics = [{}] * nic_max
2000 for nidx, ndict in optvalue:
2003 if not isinstance(ndict, dict):
2004 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2005 " got %s" % (nidx, ndict))
2007 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2014 def GenericInstanceCreate(mode, opts, args):
2015 """Add an instance to the cluster via either creation or import.
2017 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2018 @param opts: the command line options selected by the user
2020 @param args: should contain only one element, the new instance name
2022 @return: the desired exit code
2027 (pnode, snode) = SplitNodeOption(opts.node)
2032 hypervisor, hvparams = opts.hypervisor
2035 nics = ParseNicOption(opts.nics)
2039 elif mode == constants.INSTANCE_CREATE:
2040 # default of one nic, all auto
2046 if opts.disk_template == constants.DT_DISKLESS:
2047 if opts.disks or opts.sd_size is not None:
2048 raise errors.OpPrereqError("Diskless instance but disk"
2049 " information passed")
2052 if (not opts.disks and not opts.sd_size
2053 and mode == constants.INSTANCE_CREATE):
2054 raise errors.OpPrereqError("No disk information specified")
2055 if opts.disks and opts.sd_size is not None:
2056 raise errors.OpPrereqError("Please use either the '--disk' or"
2058 if opts.sd_size is not None:
2059 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2063 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2064 except ValueError, err:
2065 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2066 disks = [{}] * disk_max
2069 for didx, ddict in opts.disks:
2071 if not isinstance(ddict, dict):
2072 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2073 raise errors.OpPrereqError(msg)
2074 elif constants.IDISK_SIZE in ddict:
2075 if constants.IDISK_ADOPT in ddict:
2076 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2077 " (disk %d)" % didx)
2079 ddict[constants.IDISK_SIZE] = \
2080 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2081 except ValueError, err:
2082 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2084 elif constants.IDISK_ADOPT in ddict:
2085 if mode == constants.INSTANCE_IMPORT:
2086 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2088 ddict[constants.IDISK_SIZE] = 0
2090 raise errors.OpPrereqError("Missing size or adoption source for"
2094 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2095 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2097 if mode == constants.INSTANCE_CREATE:
2100 force_variant = opts.force_variant
2103 no_install = opts.no_install
2104 identify_defaults = False
2105 elif mode == constants.INSTANCE_IMPORT:
2108 force_variant = False
2109 src_node = opts.src_node
2110 src_path = opts.src_dir
2112 identify_defaults = opts.identify_defaults
2114 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2116 op = opcodes.OpInstanceCreate(instance_name=instance,
2118 disk_template=opts.disk_template,
2120 pnode=pnode, snode=snode,
2121 ip_check=opts.ip_check,
2122 name_check=opts.name_check,
2123 wait_for_sync=opts.wait_for_sync,
2124 file_storage_dir=opts.file_storage_dir,
2125 file_driver=opts.file_driver,
2126 iallocator=opts.iallocator,
2127 hypervisor=hypervisor,
2129 beparams=opts.beparams,
2130 osparams=opts.osparams,
2134 force_variant=force_variant,
2137 no_install=no_install,
2138 identify_defaults=identify_defaults)
2140 SubmitOrSend(op, opts)
2144 class _RunWhileClusterStoppedHelper:
2145 """Helper class for L{RunWhileClusterStopped} to simplify state management
2148 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2149 """Initializes this class.
2151 @type feedback_fn: callable
2152 @param feedback_fn: Feedback function
2153 @type cluster_name: string
2154 @param cluster_name: Cluster name
2155 @type master_node: string
2156 @param master_node Master node name
2157 @type online_nodes: list
2158 @param online_nodes: List of names of online nodes
2161 self.feedback_fn = feedback_fn
2162 self.cluster_name = cluster_name
2163 self.master_node = master_node
2164 self.online_nodes = online_nodes
2166 self.ssh = ssh.SshRunner(self.cluster_name)
2168 self.nonmaster_nodes = [name for name in online_nodes
2169 if name != master_node]
2171 assert self.master_node not in self.nonmaster_nodes
2173 def _RunCmd(self, node_name, cmd):
2174 """Runs a command on the local or a remote machine.
2176 @type node_name: string
2177 @param node_name: Machine name
2182 if node_name is None or node_name == self.master_node:
2183 # No need to use SSH
2184 result = utils.RunCmd(cmd)
2186 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2189 errmsg = ["Failed to run command %s" % result.cmd]
2191 errmsg.append("on node %s" % node_name)
2192 errmsg.append(": exitcode %s and error %s" %
2193 (result.exit_code, result.output))
2194 raise errors.OpExecError(" ".join(errmsg))
2196 def Call(self, fn, *args):
2197 """Call function while all daemons are stopped.
2200 @param fn: Function to be called
2203 # Pause watcher by acquiring an exclusive lock on watcher state file
2204 self.feedback_fn("Blocking watcher")
2205 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2207 # TODO: Currently, this just blocks. There's no timeout.
2208 # TODO: Should it be a shared lock?
2209 watcher_block.Exclusive(blocking=True)
2211 # Stop master daemons, so that no new jobs can come in and all running
2213 self.feedback_fn("Stopping master daemons")
2214 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2216 # Stop daemons on all nodes
2217 for node_name in self.online_nodes:
2218 self.feedback_fn("Stopping daemons on %s" % node_name)
2219 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2221 # All daemons are shut down now
2223 return fn(self, *args)
2224 except Exception, err:
2225 _, errmsg = FormatError(err)
2226 logging.exception("Caught exception")
2227 self.feedback_fn(errmsg)
2230 # Start cluster again, master node last
2231 for node_name in self.nonmaster_nodes + [self.master_node]:
2232 self.feedback_fn("Starting daemons on %s" % node_name)
2233 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2236 watcher_block.Close()
2239 def RunWhileClusterStopped(feedback_fn, fn, *args):
2240 """Calls a function while all cluster daemons are stopped.
2242 @type feedback_fn: callable
2243 @param feedback_fn: Feedback function
2245 @param fn: Function to be called when daemons are stopped
2248 feedback_fn("Gathering cluster information")
2250 # This ensures we're running on the master daemon
2253 (cluster_name, master_node) = \
2254 cl.QueryConfigValues(["cluster_name", "master_node"])
2256 online_nodes = GetOnlineNodes([], cl=cl)
2258 # Don't keep a reference to the client. The master daemon will go away.
2261 assert master_node in online_nodes
2263 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2264 online_nodes).Call(fn, *args)
2267 def GenerateTable(headers, fields, separator, data,
2268 numfields=None, unitfields=None,
2270 """Prints a table with headers and different fields.
2273 @param headers: dictionary mapping field names to headers for
2276 @param fields: the field names corresponding to each row in
2278 @param separator: the separator to be used; if this is None,
2279 the default 'smart' algorithm is used which computes optimal
2280 field width, otherwise just the separator is used between
2283 @param data: a list of lists, each sublist being one row to be output
2284 @type numfields: list
2285 @param numfields: a list with the fields that hold numeric
2286 values and thus should be right-aligned
2287 @type unitfields: list
2288 @param unitfields: a list with the fields that hold numeric
2289 values that should be formatted with the units field
2290 @type units: string or None
2291 @param units: the units we should use for formatting, or None for
2292 automatic choice (human-readable for non-separator usage, otherwise
2293 megabytes); this is a one-letter string
2302 if numfields is None:
2304 if unitfields is None:
2307 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2308 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2311 for field in fields:
2312 if headers and field not in headers:
2313 # TODO: handle better unknown fields (either revert to old
2314 # style of raising exception, or deal more intelligently with
2316 headers[field] = field
2317 if separator is not None:
2318 format_fields.append("%s")
2319 elif numfields.Matches(field):
2320 format_fields.append("%*s")
2322 format_fields.append("%-*s")
2324 if separator is None:
2325 mlens = [0 for name in fields]
2326 format_str = ' '.join(format_fields)
2328 format_str = separator.replace("%", "%%").join(format_fields)
2333 for idx, val in enumerate(row):
2334 if unitfields.Matches(fields[idx]):
2337 except (TypeError, ValueError):
2340 val = row[idx] = utils.FormatUnit(val, units)
2341 val = row[idx] = str(val)
2342 if separator is None:
2343 mlens[idx] = max(mlens[idx], len(val))
2348 for idx, name in enumerate(fields):
2350 if separator is None:
2351 mlens[idx] = max(mlens[idx], len(hdr))
2352 args.append(mlens[idx])
2354 result.append(format_str % tuple(args))
2356 if separator is None:
2357 assert len(mlens) == len(fields)
2359 if fields and not numfields.Matches(fields[-1]):
2365 line = ['-' for _ in fields]
2366 for idx in range(len(fields)):
2367 if separator is None:
2368 args.append(mlens[idx])
2369 args.append(line[idx])
2370 result.append(format_str % tuple(args))
2375 def _FormatBool(value):
2376 """Formats a boolean value as a string.
2384 #: Default formatting for query results; (callback, align right)
2385 _DEFAULT_FORMAT_QUERY = {
2386 constants.QFT_TEXT: (str, False),
2387 constants.QFT_BOOL: (_FormatBool, False),
2388 constants.QFT_NUMBER: (str, True),
2389 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2390 constants.QFT_OTHER: (str, False),
2391 constants.QFT_UNKNOWN: (str, False),
2395 def _GetColumnFormatter(fdef, override, unit):
2396 """Returns formatting function for a field.
2398 @type fdef: L{objects.QueryFieldDefinition}
2399 @type override: dict
2400 @param override: Dictionary for overriding field formatting functions,
2401 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2403 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2404 @rtype: tuple; (callable, bool)
2405 @return: Returns the function to format a value (takes one parameter) and a
2406 boolean for aligning the value on the right-hand side
2409 fmt = override.get(fdef.name, None)
2413 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2415 if fdef.kind == constants.QFT_UNIT:
2416 # Can't keep this information in the static dictionary
2417 return (lambda value: utils.FormatUnit(value, unit), True)
2419 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2423 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2426 class _QueryColumnFormatter:
2427 """Callable class for formatting fields of a query.
2430 def __init__(self, fn, status_fn, verbose):
2431 """Initializes this class.
2434 @param fn: Formatting function
2435 @type status_fn: callable
2436 @param status_fn: Function to report fields' status
2437 @type verbose: boolean
2438 @param verbose: whether to use verbose field descriptions or not
2442 self._status_fn = status_fn
2443 self._verbose = verbose
2445 def __call__(self, data):
2446 """Returns a field's string representation.
2449 (status, value) = data
2452 self._status_fn(status)
2454 if status == constants.RS_NORMAL:
2455 return self._fn(value)
2457 assert value is None, \
2458 "Found value %r for abnormal status %s" % (value, status)
2460 return FormatResultError(status, self._verbose)
2463 def FormatResultError(status, verbose):
2464 """Formats result status other than L{constants.RS_NORMAL}.
2466 @param status: The result status
2467 @type verbose: boolean
2468 @param verbose: Whether to return the verbose text
2469 @return: Text of result status
2472 assert status != constants.RS_NORMAL, \
2473 "FormatResultError called with status equal to constants.RS_NORMAL"
2475 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2477 raise NotImplementedError("Unknown status %s" % status)
2484 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2485 header=False, verbose=False):
2486 """Formats data in L{objects.QueryResponse}.
2488 @type result: L{objects.QueryResponse}
2489 @param result: result of query operation
2491 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2492 see L{utils.text.FormatUnit}
2493 @type format_override: dict
2494 @param format_override: Dictionary for overriding field formatting functions,
2495 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2496 @type separator: string or None
2497 @param separator: String used to separate fields
2499 @param header: Whether to output header row
2500 @type verbose: boolean
2501 @param verbose: whether to use verbose field descriptions or not
2510 if format_override is None:
2511 format_override = {}
2513 stats = dict.fromkeys(constants.RS_ALL, 0)
2515 def _RecordStatus(status):
2520 for fdef in result.fields:
2521 assert fdef.title and fdef.name
2522 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2523 columns.append(TableColumn(fdef.title,
2524 _QueryColumnFormatter(fn, _RecordStatus,
2528 table = FormatTable(result.data, columns, header, separator)
2530 # Collect statistics
2531 assert len(stats) == len(constants.RS_ALL)
2532 assert compat.all(count >= 0 for count in stats.values())
2534 # Determine overall status. If there was no data, unknown fields must be
2535 # detected via the field definitions.
2536 if (stats[constants.RS_UNKNOWN] or
2537 (not result.data and _GetUnknownFields(result.fields))):
2539 elif compat.any(count > 0 for key, count in stats.items()
2540 if key != constants.RS_NORMAL):
2541 status = QR_INCOMPLETE
2545 return (status, table)
2548 def _GetUnknownFields(fdefs):
2549 """Returns list of unknown fields included in C{fdefs}.
2551 @type fdefs: list of L{objects.QueryFieldDefinition}
2554 return [fdef for fdef in fdefs
2555 if fdef.kind == constants.QFT_UNKNOWN]
2558 def _WarnUnknownFields(fdefs):
2559 """Prints a warning to stderr if a query included unknown fields.
2561 @type fdefs: list of L{objects.QueryFieldDefinition}
2564 unknown = _GetUnknownFields(fdefs)
2566 ToStderr("Warning: Queried for unknown fields %s",
2567 utils.CommaJoin(fdef.name for fdef in unknown))
2573 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2574 format_override=None, verbose=False, force_filter=False):
2575 """Generic implementation for listing all items of a resource.
2577 @param resource: One of L{constants.QR_VIA_LUXI}
2578 @type fields: list of strings
2579 @param fields: List of fields to query for
2580 @type names: list of strings
2581 @param names: Names of items to query for
2582 @type unit: string or None
2583 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2584 None for automatic choice (human-readable for non-separator usage,
2585 otherwise megabytes); this is a one-letter string
2586 @type separator: string or None
2587 @param separator: String used to separate fields
2589 @param header: Whether to show header row
2590 @type force_filter: bool
2591 @param force_filter: Whether to always treat names as filter
2592 @type format_override: dict
2593 @param format_override: Dictionary for overriding field formatting functions,
2594 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2595 @type verbose: boolean
2596 @param verbose: whether to use verbose field descriptions or not
2606 (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2608 (filter_text, ) = names
2610 raise errors.OpPrereqError("Exactly one argument must be given as a"
2613 logging.debug("Parsing '%s' as filter", filter_text)
2614 filter_ = qlang.ParseFilter(filter_text)
2616 filter_ = qlang.MakeSimpleFilter("name", names)
2618 response = cl.Query(resource, fields, filter_)
2620 found_unknown = _WarnUnknownFields(response.fields)
2622 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2624 format_override=format_override,
2630 assert ((found_unknown and status == QR_UNKNOWN) or
2631 (not found_unknown and status != QR_UNKNOWN))
2633 if status == QR_UNKNOWN:
2634 return constants.EXIT_UNKNOWN_FIELD
2636 # TODO: Should the list command fail if not all data could be collected?
2637 return constants.EXIT_SUCCESS
2640 def GenericListFields(resource, fields, separator, header, cl=None):
2641 """Generic implementation for listing fields for a resource.
2643 @param resource: One of L{constants.QR_VIA_LUXI}
2644 @type fields: list of strings
2645 @param fields: List of fields to query for
2646 @type separator: string or None
2647 @param separator: String used to separate fields
2649 @param header: Whether to show header row
2658 response = cl.QueryFields(resource, fields)
2660 found_unknown = _WarnUnknownFields(response.fields)
2663 TableColumn("Name", str, False),
2664 TableColumn("Title", str, False),
2665 TableColumn("Description", str, False),
2668 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2670 for line in FormatTable(rows, columns, header, separator):
2674 return constants.EXIT_UNKNOWN_FIELD
2676 return constants.EXIT_SUCCESS
2680 """Describes a column for L{FormatTable}.
2683 def __init__(self, title, fn, align_right):
2684 """Initializes this class.
2687 @param title: Column title
2689 @param fn: Formatting function
2690 @type align_right: bool
2691 @param align_right: Whether to align values on the right-hand side
2696 self.align_right = align_right
2699 def _GetColFormatString(width, align_right):
2700 """Returns the format string for a field.
2708 return "%%%s%ss" % (sign, width)
2711 def FormatTable(rows, columns, header, separator):
2712 """Formats data as a table.
2714 @type rows: list of lists
2715 @param rows: Row data, one list per row
2716 @type columns: list of L{TableColumn}
2717 @param columns: Column descriptions
2719 @param header: Whether to show header row
2720 @type separator: string or None
2721 @param separator: String used to separate columns
2725 data = [[col.title for col in columns]]
2726 colwidth = [len(col.title) for col in columns]
2729 colwidth = [0 for _ in columns]
2733 assert len(row) == len(columns)
2735 formatted = [col.format(value) for value, col in zip(row, columns)]
2737 if separator is None:
2738 # Update column widths
2739 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2740 # Modifying a list's items while iterating is fine
2741 colwidth[idx] = max(oldwidth, len(value))
2743 data.append(formatted)
2745 if separator is not None:
2746 # Return early if a separator is used
2747 return [separator.join(row) for row in data]
2749 if columns and not columns[-1].align_right:
2750 # Avoid unnecessary spaces at end of line
2753 # Build format string
2754 fmt = " ".join([_GetColFormatString(width, col.align_right)
2755 for col, width in zip(columns, colwidth)])
2757 return [fmt % tuple(row) for row in data]
2760 def FormatTimestamp(ts):
2761 """Formats a given timestamp.
2764 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2767 @return: a string with the formatted timestamp
2770 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2773 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2776 def ParseTimespec(value):
2777 """Parse a time specification.
2779 The following suffixed will be recognized:
2787 Without any suffix, the value will be taken to be in seconds.
2792 raise errors.OpPrereqError("Empty time specification passed")
2800 if value[-1] not in suffix_map:
2803 except (TypeError, ValueError):
2804 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2806 multiplier = suffix_map[value[-1]]
2808 if not value: # no data left after stripping the suffix
2809 raise errors.OpPrereqError("Invalid time specification (only"
2812 value = int(value) * multiplier
2813 except (TypeError, ValueError):
2814 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2818 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2819 filter_master=False):
2820 """Returns the names of online nodes.
2822 This function will also log a warning on stderr with the names of
2825 @param nodes: if not empty, use only this subset of nodes (minus the
2827 @param cl: if not None, luxi client to use
2828 @type nowarn: boolean
2829 @param nowarn: by default, this function will output a note with the
2830 offline nodes that are skipped; if this parameter is True the
2831 note is not displayed
2832 @type secondary_ips: boolean
2833 @param secondary_ips: if True, return the secondary IPs instead of the
2834 names, useful for doing network traffic over the replication interface
2836 @type filter_master: boolean
2837 @param filter_master: if True, do not return the master node in the list
2838 (useful in coordination with secondary_ips where we cannot check our
2839 node name against the list)
2851 master_node = cl.QueryConfigValues(["master_node"])[0]
2852 filter_fn = lambda x: x != master_node
2854 filter_fn = lambda _: True
2856 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2858 offline = [row[0] for row in result if row[1]]
2859 if offline and not nowarn:
2860 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2861 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2864 def _ToStream(stream, txt, *args):
2865 """Write a message to a stream, bypassing the logging system
2867 @type stream: file object
2868 @param stream: the file to which we should write
2870 @param txt: the message
2875 stream.write(txt % args)
2882 def ToStdout(txt, *args):
2883 """Write a message to stdout only, bypassing the logging system
2885 This is just a wrapper over _ToStream.
2888 @param txt: the message
2891 _ToStream(sys.stdout, txt, *args)
2894 def ToStderr(txt, *args):
2895 """Write a message to stderr only, bypassing the logging system
2897 This is just a wrapper over _ToStream.
2900 @param txt: the message
2903 _ToStream(sys.stderr, txt, *args)
2906 class JobExecutor(object):
2907 """Class which manages the submission and execution of multiple jobs.
2909 Note that instances of this class should not be reused between
2913 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2918 self.verbose = verbose
2921 self.feedback_fn = feedback_fn
2923 def QueueJob(self, name, *ops):
2924 """Record a job for later submit.
2927 @param name: a description of the job, will be used in WaitJobSet
2929 SetGenericOpcodeOpts(ops, self.opts)
2930 self.queue.append((name, ops))
2932 def SubmitPending(self, each=False):
2933 """Submit all pending jobs.
2938 for row in self.queue:
2939 # SubmitJob will remove the success status, but raise an exception if
2940 # the submission fails, so we'll notice that anyway.
2941 results.append([True, self.cl.SubmitJob(row[1])])
2943 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2944 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2946 self.jobs.append((idx, status, data, name))
2948 def _ChooseJob(self):
2949 """Choose a non-waiting/queued job to poll next.
2952 assert self.jobs, "_ChooseJob called with empty job list"
2954 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2957 for job_data, status in zip(self.jobs, result):
2958 if (isinstance(status, list) and status and
2959 status[0] in (constants.JOB_STATUS_QUEUED,
2960 constants.JOB_STATUS_WAITLOCK,
2961 constants.JOB_STATUS_CANCELING)):
2962 # job is still present and waiting
2964 # good candidate found (either running job or lost job)
2965 self.jobs.remove(job_data)
2969 return self.jobs.pop(0)
2971 def GetResults(self):
2972 """Wait for and return the results of all jobs.
2975 @return: list of tuples (success, job results), in the same order
2976 as the submitted jobs; if a job has failed, instead of the result
2977 there will be the error message
2981 self.SubmitPending()
2984 ok_jobs = [row[2] for row in self.jobs if row[1]]
2986 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2988 # first, remove any non-submitted jobs
2989 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2990 for idx, _, jid, name in failures:
2991 ToStderr("Failed to submit job for %s: %s", name, jid)
2992 results.append((idx, False, jid))
2995 (idx, _, jid, name) = self._ChooseJob()
2996 ToStdout("Waiting for job %s for %s...", jid, name)
2998 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3000 except errors.JobLost, err:
3001 _, job_result = FormatError(err)
3002 ToStderr("Job %s for %s has been archived, cannot check its result",
3005 except (errors.GenericError, luxi.ProtocolError), err:
3006 _, job_result = FormatError(err)
3008 # the error message will always be shown, verbose or not
3009 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
3011 results.append((idx, success, job_result))
3013 # sort based on the index, then drop it
3015 results = [i[1:] for i in results]
3019 def WaitOrShow(self, wait):
3020 """Wait for job results or only print the job IDs.
3023 @param wait: whether to wait or not
3027 return self.GetResults()
3030 self.SubmitPending()
3031 for _, status, result, name in self.jobs:
3033 ToStdout("%s: %s", result, name)
3035 ToStderr("Failure for %s: %s", name, result)
3036 return [row[1:3] for row in self.jobs]
3039 def FormatParameterDict(buf, param_dict, actual, level=1):
3040 """Formats a parameter dictionary.
3042 @type buf: L{StringIO}
3043 @param buf: the buffer into which to write
3044 @type param_dict: dict
3045 @param param_dict: the own parameters
3047 @param actual: the current parameter set (including defaults)
3048 @param level: Level of indent
3051 indent = " " * level
3052 for key in sorted(actual):
3053 val = param_dict.get(key, "default (%s)" % actual[key])
3054 buf.write("%s- %s: %s\n" % (indent, key, val))
3057 def ConfirmOperation(names, list_type, text, extra=""):
3058 """Ask the user to confirm an operation on a list of list_type.
3060 This function is used to request confirmation for doing an operation
3061 on a given list of list_type.
3064 @param names: the list of names that we display when
3065 we ask for confirmation
3066 @type list_type: str
3067 @param list_type: Human readable name for elements in the list (e.g. nodes)
3069 @param text: the operation that the user should confirm
3071 @return: True or False depending on user's confirmation.
3075 msg = ("The %s will operate on %d %s.\n%s"
3076 "Do you want to continue?" % (text, count, list_type, extra))
3077 affected = (("\nAffected %s:\n" % list_type) +
3078 "\n".join([" %s" % name for name in names]))
3080 choices = [("y", True, "Yes, execute the %s" % text),
3081 ("n", False, "No, abort the %s" % text)]
3084 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3087 question = msg + affected
3089 choice = AskUser(question, choices)
3092 choice = AskUser(msg + affected, choices)