4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
32 from cStringIO import StringIO
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import opcodes
38 from ganeti import luxi
39 from ganeti import ssconf
40 from ganeti import rpc
41 from ganeti import ssh
42 from ganeti import compat
43 from ganeti import netutils
44 from ganeti import qlang
46 from optparse import (OptionParser, TitledHelpFormatter,
47 Option, OptionValueError)
51 # Command line options
64 "CLUSTER_DOMAIN_SECRET_OPT",
81 "FILESTORE_DRIVER_OPT",
87 "GLOBAL_SHARED_FILEDIR_OPT",
92 "DEFAULT_IALLOCATOR_OPT",
93 "IDENTIFY_DEFAULTS_OPT",
95 "IGNORE_FAILURES_OPT",
97 "IGNORE_REMOVE_FAILURES_OPT",
98 "IGNORE_SECONDARIES_OPT",
102 "MAINTAIN_NODE_HEALTH_OPT",
105 "MIGRATION_MODE_OPT",
107 "NEW_CLUSTER_CERT_OPT",
108 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109 "NEW_CONFD_HMAC_KEY_OPT",
113 "NODE_FORCE_JOIN_OPT",
115 "NODE_PLACEMENT_OPT",
119 "NODRBD_STORAGE_OPT",
125 "NOMODIFY_ETCHOSTS_OPT",
126 "NOMODIFY_SSH_SETUP_OPT",
132 "NOSSH_KEYCHECK_OPT",
144 "PREALLOC_WIPE_DISKS_OPT",
145 "PRIMARY_IP_VERSION_OPT",
150 "REMOVE_INSTANCE_OPT",
158 "SHUTDOWN_TIMEOUT_OPT",
174 # Generic functions for CLI programs
177 "GenericInstanceCreate",
183 "JobSubmittedException",
185 "RunWhileClusterStopped",
189 # Formatting functions
190 "ToStderr", "ToStdout",
193 "FormatParameterDict",
202 # command line options support infrastructure
203 "ARGS_MANY_INSTANCES",
222 "OPT_COMPL_INST_ADD_NODES",
223 "OPT_COMPL_MANY_NODES",
224 "OPT_COMPL_ONE_IALLOCATOR",
225 "OPT_COMPL_ONE_INSTANCE",
226 "OPT_COMPL_ONE_NODE",
227 "OPT_COMPL_ONE_NODEGROUP",
233 "COMMON_CREATE_OPTS",
239 #: Priorities (sorted)
241 ("low", constants.OP_PRIO_LOW),
242 ("normal", constants.OP_PRIO_NORMAL),
243 ("high", constants.OP_PRIO_HIGH),
246 #: Priority dictionary for easier lookup
247 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
248 # we migrate to Python 2.6
249 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
251 # Query result status for clients
254 QR_INCOMPLETE) = range(3)
258 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
263 return ("<%s min=%s max=%s>" %
264 (self.__class__.__name__, self.min, self.max))
267 class ArgSuggest(_Argument):
268 """Suggesting argument.
270 Value can be any of the ones passed to the constructor.
273 # pylint: disable-msg=W0622
274 def __init__(self, min=0, max=None, choices=None):
275 _Argument.__init__(self, min=min, max=max)
276 self.choices = choices
279 return ("<%s min=%s max=%s choices=%r>" %
280 (self.__class__.__name__, self.min, self.max, self.choices))
283 class ArgChoice(ArgSuggest):
286 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
287 but value must be one of the choices.
292 class ArgUnknown(_Argument):
293 """Unknown argument to program (e.g. determined at runtime).
298 class ArgInstance(_Argument):
299 """Instances argument.
304 class ArgNode(_Argument):
310 class ArgGroup(_Argument):
311 """Node group argument.
316 class ArgJobId(_Argument):
322 class ArgFile(_Argument):
323 """File path argument.
328 class ArgCommand(_Argument):
334 class ArgHost(_Argument):
340 class ArgOs(_Argument):
347 ARGS_MANY_INSTANCES = [ArgInstance()]
348 ARGS_MANY_NODES = [ArgNode()]
349 ARGS_MANY_GROUPS = [ArgGroup()]
350 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
351 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
353 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
354 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
357 def _ExtractTagsObject(opts, args):
358 """Extract the tag type object.
360 Note that this function will modify its args parameter.
363 if not hasattr(opts, "tag_type"):
364 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
366 if kind == constants.TAG_CLUSTER:
368 elif kind in (constants.TAG_NODEGROUP,
370 constants.TAG_INSTANCE):
372 raise errors.OpPrereqError("no arguments passed to the command")
376 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
380 def _ExtendTags(opts, args):
381 """Extend the args if a source file has been given.
383 This function will extend the tags with the contents of the file
384 passed in the 'tags_source' attribute of the opts parameter. A file
385 named '-' will be replaced by stdin.
388 fname = opts.tags_source
394 new_fh = open(fname, "r")
397 # we don't use the nice 'new_data = [line.strip() for line in fh]'
398 # because of python bug 1633941
400 line = new_fh.readline()
403 new_data.append(line.strip())
406 args.extend(new_data)
409 def ListTags(opts, args):
410 """List the tags on a given object.
412 This is a generic implementation that knows how to deal with all
413 three cases of tag objects (cluster, node, instance). The opts
414 argument is expected to contain a tag_type field denoting what
415 object type we work on.
418 kind, name = _ExtractTagsObject(opts, args)
420 result = cl.QueryTags(kind, name)
421 result = list(result)
427 def AddTags(opts, args):
428 """Add tags on a given object.
430 This is a generic implementation that knows how to deal with all
431 three cases of tag objects (cluster, node, instance). The opts
432 argument is expected to contain a tag_type field denoting what
433 object type we work on.
436 kind, name = _ExtractTagsObject(opts, args)
437 _ExtendTags(opts, args)
439 raise errors.OpPrereqError("No tags to be added")
440 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
441 SubmitOpCode(op, opts=opts)
444 def RemoveTags(opts, args):
445 """Remove tags from a given object.
447 This is a generic implementation that knows how to deal with all
448 three cases of tag objects (cluster, node, instance). The opts
449 argument is expected to contain a tag_type field denoting what
450 object type we work on.
453 kind, name = _ExtractTagsObject(opts, args)
454 _ExtendTags(opts, args)
456 raise errors.OpPrereqError("No tags to be removed")
457 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
458 SubmitOpCode(op, opts=opts)
461 def check_unit(option, opt, value): # pylint: disable-msg=W0613
462 """OptParsers custom converter for units.
466 return utils.ParseUnit(value)
467 except errors.UnitParseError, err:
468 raise OptionValueError("option %s: %s" % (opt, err))
471 def _SplitKeyVal(opt, data):
472 """Convert a KeyVal string into a dict.
474 This function will convert a key=val[,...] string into a dict. Empty
475 values will be converted specially: keys which have the prefix 'no_'
476 will have the value=False and the prefix stripped, the others will
480 @param opt: a string holding the option name for which we process the
481 data, used in building error messages
483 @param data: a string of the format key=val,key=val,...
485 @return: {key=val, key=val}
486 @raises errors.ParameterError: if there are duplicate keys
491 for elem in utils.UnescapeAndSplit(data, sep=","):
493 key, val = elem.split("=", 1)
495 if elem.startswith(NO_PREFIX):
496 key, val = elem[len(NO_PREFIX):], False
497 elif elem.startswith(UN_PREFIX):
498 key, val = elem[len(UN_PREFIX):], None
500 key, val = elem, True
502 raise errors.ParameterError("Duplicate key '%s' in option %s" %
508 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
509 """Custom parser for ident:key=val,key=val options.
511 This will store the parsed values as a tuple (ident, {key: val}). As such,
512 multiple uses of this option via action=append is possible.
516 ident, rest = value, ''
518 ident, rest = value.split(":", 1)
520 if ident.startswith(NO_PREFIX):
522 msg = "Cannot pass options when removing parameter groups: %s" % value
523 raise errors.ParameterError(msg)
524 retval = (ident[len(NO_PREFIX):], False)
525 elif ident.startswith(UN_PREFIX):
527 msg = "Cannot pass options when removing parameter groups: %s" % value
528 raise errors.ParameterError(msg)
529 retval = (ident[len(UN_PREFIX):], None)
531 kv_dict = _SplitKeyVal(opt, rest)
532 retval = (ident, kv_dict)
536 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
537 """Custom parser class for key=val,key=val options.
539 This will store the parsed values as a dict {key: val}.
542 return _SplitKeyVal(opt, value)
545 def check_bool(option, opt, value): # pylint: disable-msg=W0613
546 """Custom parser for yes/no options.
548 This will store the parsed value as either True or False.
551 value = value.lower()
552 if value == constants.VALUE_FALSE or value == "no":
554 elif value == constants.VALUE_TRUE or value == "yes":
557 raise errors.ParameterError("Invalid boolean value '%s'" % value)
560 # completion_suggestion is normally a list. Using numeric values not evaluating
561 # to False for dynamic completion.
562 (OPT_COMPL_MANY_NODES,
564 OPT_COMPL_ONE_INSTANCE,
566 OPT_COMPL_ONE_IALLOCATOR,
567 OPT_COMPL_INST_ADD_NODES,
568 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
570 OPT_COMPL_ALL = frozenset([
571 OPT_COMPL_MANY_NODES,
573 OPT_COMPL_ONE_INSTANCE,
575 OPT_COMPL_ONE_IALLOCATOR,
576 OPT_COMPL_INST_ADD_NODES,
577 OPT_COMPL_ONE_NODEGROUP,
581 class CliOption(Option):
582 """Custom option class for optparse.
585 ATTRS = Option.ATTRS + [
586 "completion_suggest",
588 TYPES = Option.TYPES + (
594 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
595 TYPE_CHECKER["identkeyval"] = check_ident_key_val
596 TYPE_CHECKER["keyval"] = check_key_val
597 TYPE_CHECKER["unit"] = check_unit
598 TYPE_CHECKER["bool"] = check_bool
601 # optparse.py sets make_option, so we do it for our own option class, too
602 cli_option = CliOption
607 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
608 help="Increase debugging level")
610 NOHDR_OPT = cli_option("--no-headers", default=False,
611 action="store_true", dest="no_headers",
612 help="Don't display column headers")
614 SEP_OPT = cli_option("--separator", default=None,
615 action="store", dest="separator",
616 help=("Separator between output fields"
617 " (defaults to one space)"))
619 USEUNITS_OPT = cli_option("--units", default=None,
620 dest="units", choices=('h', 'm', 'g', 't'),
621 help="Specify units for output (one of h/m/g/t)")
623 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
624 type="string", metavar="FIELDS",
625 help="Comma separated list of output fields")
627 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
628 default=False, help="Force the operation")
630 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
631 default=False, help="Do not require confirmation")
633 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
634 action="store_true", default=False,
635 help=("Ignore offline nodes and do as much"
638 TAG_ADD_OPT = cli_option("--tags", dest="tags",
639 default=None, help="Comma-separated list of instance"
642 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
643 default=None, help="File with tag names")
645 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
646 default=False, action="store_true",
647 help=("Submit the job and return the job ID, but"
648 " don't wait for the job to finish"))
650 SYNC_OPT = cli_option("--sync", dest="do_locking",
651 default=False, action="store_true",
652 help=("Grab locks while doing the queries"
653 " in order to ensure more consistent results"))
655 DRY_RUN_OPT = cli_option("--dry-run", default=False,
657 help=("Do not execute the operation, just run the"
658 " check steps and verify it it could be"
661 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
663 help="Increase the verbosity of the operation")
665 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
666 action="store_true", dest="simulate_errors",
667 help="Debugging option that makes the operation"
668 " treat most runtime checks as failed")
670 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
671 default=True, action="store_false",
672 help="Don't wait for sync (DANGEROUS!)")
674 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
675 help=("Custom disk setup (%s)" %
676 utils.CommaJoin(constants.DISK_TEMPLATES)),
677 default=None, metavar="TEMPL",
678 choices=list(constants.DISK_TEMPLATES))
680 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
681 help="Do not create any network cards for"
684 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
685 help="Relative path under default cluster-wide"
686 " file storage dir to store file-based disks",
687 default=None, metavar="<DIR>")
689 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
690 help="Driver to use for image files",
691 default="loop", metavar="<DRIVER>",
692 choices=list(constants.FILE_DRIVER))
694 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
695 help="Select nodes for the instance automatically"
696 " using the <NAME> iallocator plugin",
697 default=None, type="string",
698 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
700 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
702 help="Set the default instance allocator plugin",
703 default=None, type="string",
704 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
706 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
708 completion_suggest=OPT_COMPL_ONE_OS)
710 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
711 type="keyval", default={},
712 help="OS parameters")
714 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
715 action="store_true", default=False,
716 help="Force an unknown variant")
718 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
719 action="store_true", default=False,
720 help="Do not install the OS (will"
723 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
724 type="keyval", default={},
725 help="Backend parameters")
727 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
728 default={}, dest="hvparams",
729 help="Hypervisor parameters")
731 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
732 help="Hypervisor and hypervisor options, in the"
733 " format hypervisor:option=value,option=value,...",
734 default=None, type="identkeyval")
736 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
737 help="Hypervisor and hypervisor options, in the"
738 " format hypervisor:option=value,option=value,...",
739 default=[], action="append", type="identkeyval")
741 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
742 action="store_false",
743 help="Don't check that the instance's IP"
746 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
747 default=True, action="store_false",
748 help="Don't check that the instance's name"
751 NET_OPT = cli_option("--net",
752 help="NIC parameters", default=[],
753 dest="nics", action="append", type="identkeyval")
755 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
756 dest="disks", action="append", type="identkeyval")
758 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
759 help="Comma-separated list of disks"
760 " indices to act on (e.g. 0,2) (optional,"
761 " defaults to all disks)")
763 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
764 help="Enforces a single-disk configuration using the"
765 " given disk size, in MiB unless a suffix is used",
766 default=None, type="unit", metavar="<size>")
768 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
769 dest="ignore_consistency",
770 action="store_true", default=False,
771 help="Ignore the consistency of the disks on"
774 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
775 dest="allow_failover",
776 action="store_true", default=False,
777 help="If migration is not possible fallback to"
780 NONLIVE_OPT = cli_option("--non-live", dest="live",
781 default=True, action="store_false",
782 help="Do a non-live migration (this usually means"
783 " freeze the instance, save the state, transfer and"
784 " only then resume running on the secondary node)")
786 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
788 choices=list(constants.HT_MIGRATION_MODES),
789 help="Override default migration mode (choose"
790 " either live or non-live")
792 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
793 help="Target node and optional secondary node",
794 metavar="<pnode>[:<snode>]",
795 completion_suggest=OPT_COMPL_INST_ADD_NODES)
797 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
798 action="append", metavar="<node>",
799 help="Use only this node (can be used multiple"
800 " times, if not given defaults to all nodes)",
801 completion_suggest=OPT_COMPL_ONE_NODE)
803 NODEGROUP_OPT = cli_option("-g", "--node-group",
805 help="Node group (name or uuid)",
806 metavar="<nodegroup>",
807 default=None, type="string",
808 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
810 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
812 completion_suggest=OPT_COMPL_ONE_NODE)
814 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
815 action="store_false",
816 help="Don't start the instance after creation")
818 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
819 action="store_true", default=False,
820 help="Show command instead of executing it")
822 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
823 default=False, action="store_true",
824 help="Instead of performing the migration, try to"
825 " recover from a failed cleanup. This is safe"
826 " to run even if the instance is healthy, but it"
827 " will create extra replication traffic and "
828 " disrupt briefly the replication (like during the"
831 STATIC_OPT = cli_option("-s", "--static", dest="static",
832 action="store_true", default=False,
833 help="Only show configuration data, not runtime data")
835 ALL_OPT = cli_option("--all", dest="show_all",
836 default=False, action="store_true",
837 help="Show info on all instances on the cluster."
838 " This can take a long time to run, use wisely")
840 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
841 action="store_true", default=False,
842 help="Interactive OS reinstall, lists available"
843 " OS templates for selection")
845 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
846 action="store_true", default=False,
847 help="Remove the instance from the cluster"
848 " configuration even if there are failures"
849 " during the removal process")
851 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
852 dest="ignore_remove_failures",
853 action="store_true", default=False,
854 help="Remove the instance from the"
855 " cluster configuration even if there"
856 " are failures during the removal"
859 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
860 action="store_true", default=False,
861 help="Remove the instance from the cluster")
863 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
864 help="Specifies the new node for the instance",
865 metavar="NODE", default=None,
866 completion_suggest=OPT_COMPL_ONE_NODE)
868 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
869 help="Specifies the new secondary node",
870 metavar="NODE", default=None,
871 completion_suggest=OPT_COMPL_ONE_NODE)
873 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
874 default=False, action="store_true",
875 help="Replace the disk(s) on the primary"
876 " node (applies only to internally mirrored"
877 " disk templates, e.g. %s)" %
878 utils.CommaJoin(constants.DTS_INT_MIRROR))
880 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
881 default=False, action="store_true",
882 help="Replace the disk(s) on the secondary"
883 " node (applies only to internally mirrored"
884 " disk templates, e.g. %s)" %
885 utils.CommaJoin(constants.DTS_INT_MIRROR))
887 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
888 default=False, action="store_true",
889 help="Lock all nodes and auto-promote as needed"
892 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
893 default=False, action="store_true",
894 help="Automatically replace faulty disks"
895 " (applies only to internally mirrored"
896 " disk templates, e.g. %s)" %
897 utils.CommaJoin(constants.DTS_INT_MIRROR))
899 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
900 default=False, action="store_true",
901 help="Ignore current recorded size"
902 " (useful for forcing activation when"
903 " the recorded size is wrong)")
905 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
907 completion_suggest=OPT_COMPL_ONE_NODE)
909 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
912 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
913 help="Specify the secondary ip for the node",
914 metavar="ADDRESS", default=None)
916 READD_OPT = cli_option("--readd", dest="readd",
917 default=False, action="store_true",
918 help="Readd old node after replacing it")
920 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
921 default=True, action="store_false",
922 help="Disable SSH key fingerprint checking")
924 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
925 default=False, action="store_true",
926 help="Force the joining of a node")
928 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
929 type="bool", default=None, metavar=_YORNO,
930 help="Set the master_candidate flag on the node")
932 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
933 type="bool", default=None,
934 help=("Set the offline flag on the node"
935 " (cluster does not communicate with offline"
938 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
939 type="bool", default=None,
940 help=("Set the drained flag on the node"
941 " (excluded from allocation operations)"))
943 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
944 type="bool", default=None, metavar=_YORNO,
945 help="Set the master_capable flag on the node")
947 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
948 type="bool", default=None, metavar=_YORNO,
949 help="Set the vm_capable flag on the node")
951 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
952 type="bool", default=None, metavar=_YORNO,
953 help="Set the allocatable flag on a volume")
955 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
956 help="Disable support for lvm based instances"
958 action="store_false", default=True)
960 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
961 dest="enabled_hypervisors",
962 help="Comma-separated list of hypervisors",
963 type="string", default=None)
965 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
966 type="keyval", default={},
967 help="NIC parameters")
969 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
970 dest="candidate_pool_size", type="int",
971 help="Set the candidate pool size")
973 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
974 help=("Enables LVM and specifies the volume group"
975 " name (cluster-wide) for disk allocation"
976 " [%s]" % constants.DEFAULT_VG),
977 metavar="VG", default=None)
979 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
980 help="Destroy cluster", action="store_true")
982 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
983 help="Skip node agreement check (dangerous)",
984 action="store_true", default=False)
986 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
987 help="Specify the mac prefix for the instance IP"
988 " addresses, in the format XX:XX:XX",
992 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
993 help="Specify the node interface (cluster-wide)"
994 " on which the master IP address will be added"
995 " (cluster init default: %s)" %
996 constants.DEFAULT_BRIDGE,
1000 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1001 help="Specify the default directory (cluster-"
1002 "wide) for storing the file-based disks [%s]" %
1003 constants.DEFAULT_FILE_STORAGE_DIR,
1005 default=constants.DEFAULT_FILE_STORAGE_DIR)
1007 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1008 dest="shared_file_storage_dir",
1009 help="Specify the default directory (cluster-"
1010 "wide) for storing the shared file-based"
1012 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1013 metavar="SHAREDDIR",
1014 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1016 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1017 help="Don't modify /etc/hosts",
1018 action="store_false", default=True)
1020 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1021 help="Don't initialize SSH keys",
1022 action="store_false", default=True)
1024 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1025 help="Enable parseable error messages",
1026 action="store_true", default=False)
1028 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1029 help="Skip N+1 memory redundancy tests",
1030 action="store_true", default=False)
1032 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1033 help="Type of reboot: soft/hard/full",
1034 default=constants.INSTANCE_REBOOT_HARD,
1036 choices=list(constants.REBOOT_TYPES))
1038 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1039 dest="ignore_secondaries",
1040 default=False, action="store_true",
1041 help="Ignore errors from secondaries")
1043 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1044 action="store_false", default=True,
1045 help="Don't shutdown the instance (unsafe)")
1047 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1048 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1049 help="Maximum time to wait")
1051 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1052 dest="shutdown_timeout", type="int",
1053 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1054 help="Maximum time to wait for instance shutdown")
1056 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1058 help=("Number of seconds between repetions of the"
1061 EARLY_RELEASE_OPT = cli_option("--early-release",
1062 dest="early_release", default=False,
1063 action="store_true",
1064 help="Release the locks on the secondary"
1067 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1068 dest="new_cluster_cert",
1069 default=False, action="store_true",
1070 help="Generate a new cluster certificate")
1072 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1074 help="File containing new RAPI certificate")
1076 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1077 default=None, action="store_true",
1078 help=("Generate a new self-signed RAPI"
1081 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1082 dest="new_confd_hmac_key",
1083 default=False, action="store_true",
1084 help=("Create a new HMAC key for %s" %
1087 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1088 dest="cluster_domain_secret",
1090 help=("Load new new cluster domain"
1091 " secret from file"))
1093 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1094 dest="new_cluster_domain_secret",
1095 default=False, action="store_true",
1096 help=("Create a new cluster domain"
1099 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1100 dest="use_replication_network",
1101 help="Whether to use the replication network"
1102 " for talking to the nodes",
1103 action="store_true", default=False)
1105 MAINTAIN_NODE_HEALTH_OPT = \
1106 cli_option("--maintain-node-health", dest="maintain_node_health",
1107 metavar=_YORNO, default=None, type="bool",
1108 help="Configure the cluster to automatically maintain node"
1109 " health, by shutting down unknown instances, shutting down"
1110 " unknown DRBD devices, etc.")
1112 IDENTIFY_DEFAULTS_OPT = \
1113 cli_option("--identify-defaults", dest="identify_defaults",
1114 default=False, action="store_true",
1115 help="Identify which saved instance parameters are equal to"
1116 " the current cluster defaults and set them as such, instead"
1117 " of marking them as overridden")
1119 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1120 action="store", dest="uid_pool",
1121 help=("A list of user-ids or user-id"
1122 " ranges separated by commas"))
1124 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1125 action="store", dest="add_uids",
1126 help=("A list of user-ids or user-id"
1127 " ranges separated by commas, to be"
1128 " added to the user-id pool"))
1130 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1131 action="store", dest="remove_uids",
1132 help=("A list of user-ids or user-id"
1133 " ranges separated by commas, to be"
1134 " removed from the user-id pool"))
1136 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1137 action="store", dest="reserved_lvs",
1138 help=("A comma-separated list of reserved"
1139 " logical volumes names, that will be"
1140 " ignored by cluster verify"))
1142 ROMAN_OPT = cli_option("--roman",
1143 dest="roman_integers", default=False,
1144 action="store_true",
1145 help="Use roman numbers for positive integers")
1147 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1148 action="store", default=None,
1149 help="Specifies usermode helper for DRBD")
1151 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1152 action="store_false", default=True,
1153 help="Disable support for DRBD")
1155 PRIMARY_IP_VERSION_OPT = \
1156 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1157 action="store", dest="primary_ip_version",
1158 metavar="%d|%d" % (constants.IP4_VERSION,
1159 constants.IP6_VERSION),
1160 help="Cluster-wide IP version for primary IP")
1162 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1163 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1164 choices=_PRIONAME_TO_VALUE.keys(),
1165 help="Priority for opcode processing")
1167 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1168 type="bool", default=None, metavar=_YORNO,
1169 help="Sets the hidden flag on the OS")
1171 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1172 type="bool", default=None, metavar=_YORNO,
1173 help="Sets the blacklisted flag on the OS")
1175 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1176 type="bool", metavar=_YORNO,
1177 dest="prealloc_wipe_disks",
1178 help=("Wipe disks prior to instance"
1181 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1182 type="keyval", default=None,
1183 help="Node parameters")
1185 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1186 action="store", metavar="POLICY", default=None,
1187 help="Allocation policy for the node group")
1189 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1190 type="bool", metavar=_YORNO,
1191 dest="node_powered",
1192 help="Specify if the SoR for node is powered")
1194 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1195 default=constants.OOB_TIMEOUT,
1196 help="Maximum time to wait for out-of-band helper")
1198 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1199 default=constants.OOB_POWER_DELAY,
1200 help="Time in seconds to wait between power-ons")
1202 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1203 action="store_true", default=False,
1204 help=("Whether command argument should be treated"
1207 NO_REMEMBER_OPT = cli_option("--no-remember",
1209 action="store_true", default=False,
1210 help="Perform but do not record the change"
1211 " in the configuration")
1214 #: Options provided by all commands
1215 COMMON_OPTS = [DEBUG_OPT]
1217 # common options for creating instances. add and import then add their own
1219 COMMON_CREATE_OPTS = [
1224 FILESTORE_DRIVER_OPT,
1242 def _ParseArgs(argv, commands, aliases):
1243 """Parser for the command line arguments.
1245 This function parses the arguments and returns the function which
1246 must be executed together with its (modified) arguments.
1248 @param argv: the command line
1249 @param commands: dictionary with special contents, see the design
1250 doc for cmdline handling
1251 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1255 binary = "<command>"
1257 binary = argv[0].split("/")[-1]
1259 if len(argv) > 1 and argv[1] == "--version":
1260 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1261 constants.RELEASE_VERSION)
1262 # Quit right away. That way we don't have to care about this special
1263 # argument. optparse.py does it the same.
1266 if len(argv) < 2 or not (argv[1] in commands or
1267 argv[1] in aliases):
1268 # let's do a nice thing
1269 sortedcmds = commands.keys()
1272 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1273 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1276 # compute the max line length for cmd + usage
1277 mlen = max([len(" %s" % cmd) for cmd in commands])
1278 mlen = min(60, mlen) # should not get here...
1280 # and format a nice command list
1281 ToStdout("Commands:")
1282 for cmd in sortedcmds:
1283 cmdstr = " %s" % (cmd,)
1284 help_text = commands[cmd][4]
1285 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1286 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1287 for line in help_lines:
1288 ToStdout("%-*s %s", mlen, "", line)
1292 return None, None, None
1294 # get command, unalias it, and look it up in commands
1298 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1301 if aliases[cmd] not in commands:
1302 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1303 " command '%s'" % (cmd, aliases[cmd]))
1307 func, args_def, parser_opts, usage, description = commands[cmd]
1308 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1309 description=description,
1310 formatter=TitledHelpFormatter(),
1311 usage="%%prog %s %s" % (cmd, usage))
1312 parser.disable_interspersed_args()
1313 options, args = parser.parse_args()
1315 if not _CheckArguments(cmd, args_def, args):
1316 return None, None, None
1318 return func, options, args
1321 def _CheckArguments(cmd, args_def, args):
1322 """Verifies the arguments using the argument definition.
1326 1. Abort with error if values specified by user but none expected.
1328 1. For each argument in definition
1330 1. Keep running count of minimum number of values (min_count)
1331 1. Keep running count of maximum number of values (max_count)
1332 1. If it has an unlimited number of values
1334 1. Abort with error if it's not the last argument in the definition
1336 1. If last argument has limited number of values
1338 1. Abort with error if number of values doesn't match or is too large
1340 1. Abort with error if user didn't pass enough values (min_count)
1343 if args and not args_def:
1344 ToStderr("Error: Command %s expects no arguments", cmd)
1351 last_idx = len(args_def) - 1
1353 for idx, arg in enumerate(args_def):
1354 if min_count is None:
1356 elif arg.min is not None:
1357 min_count += arg.min
1359 if max_count is None:
1361 elif arg.max is not None:
1362 max_count += arg.max
1365 check_max = (arg.max is not None)
1367 elif arg.max is None:
1368 raise errors.ProgrammerError("Only the last argument can have max=None")
1371 # Command with exact number of arguments
1372 if (min_count is not None and max_count is not None and
1373 min_count == max_count and len(args) != min_count):
1374 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1377 # Command with limited number of arguments
1378 if max_count is not None and len(args) > max_count:
1379 ToStderr("Error: Command %s expects only %d argument(s)",
1383 # Command with some required arguments
1384 if min_count is not None and len(args) < min_count:
1385 ToStderr("Error: Command %s expects at least %d argument(s)",
1392 def SplitNodeOption(value):
1393 """Splits the value of a --node option.
1396 if value and ':' in value:
1397 return value.split(':', 1)
1399 return (value, None)
1402 def CalculateOSNames(os_name, os_variants):
1403 """Calculates all the names an OS can be called, according to its variants.
1405 @type os_name: string
1406 @param os_name: base name of the os
1407 @type os_variants: list or None
1408 @param os_variants: list of supported variants
1410 @return: list of valid names
1414 return ['%s+%s' % (os_name, v) for v in os_variants]
1419 def ParseFields(selected, default):
1420 """Parses the values of "--field"-like options.
1422 @type selected: string or None
1423 @param selected: User-selected options
1425 @param default: Default fields
1428 if selected is None:
1431 if selected.startswith("+"):
1432 return default + selected[1:].split(",")
1434 return selected.split(",")
1437 UsesRPC = rpc.RunWithRPC
1440 def AskUser(text, choices=None):
1441 """Ask the user a question.
1443 @param text: the question to ask
1445 @param choices: list with elements tuples (input_char, return_value,
1446 description); if not given, it will default to: [('y', True,
1447 'Perform the operation'), ('n', False, 'Do no do the operation')];
1448 note that the '?' char is reserved for help
1450 @return: one of the return values from the choices list; if input is
1451 not possible (i.e. not running with a tty, we return the last
1456 choices = [('y', True, 'Perform the operation'),
1457 ('n', False, 'Do not perform the operation')]
1458 if not choices or not isinstance(choices, list):
1459 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1460 for entry in choices:
1461 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1462 raise errors.ProgrammerError("Invalid choices element to AskUser")
1464 answer = choices[-1][1]
1466 for line in text.splitlines():
1467 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1468 text = "\n".join(new_text)
1470 f = file("/dev/tty", "a+")
1474 chars = [entry[0] for entry in choices]
1475 chars[-1] = "[%s]" % chars[-1]
1477 maps = dict([(entry[0], entry[1]) for entry in choices])
1481 f.write("/".join(chars))
1483 line = f.readline(2).strip().lower()
1488 for entry in choices:
1489 f.write(" %s - %s\n" % (entry[0], entry[2]))
1497 class JobSubmittedException(Exception):
1498 """Job was submitted, client should exit.
1500 This exception has one argument, the ID of the job that was
1501 submitted. The handler should print this ID.
1503 This is not an error, just a structured way to exit from clients.
1508 def SendJob(ops, cl=None):
1509 """Function to submit an opcode without waiting for the results.
1512 @param ops: list of opcodes
1513 @type cl: luxi.Client
1514 @param cl: the luxi client to use for communicating with the master;
1515 if None, a new client will be created
1521 job_id = cl.SubmitJob(ops)
1526 def GenericPollJob(job_id, cbs, report_cbs):
1527 """Generic job-polling function.
1529 @type job_id: number
1530 @param job_id: Job ID
1531 @type cbs: Instance of L{JobPollCbBase}
1532 @param cbs: Data callbacks
1533 @type report_cbs: Instance of L{JobPollReportCbBase}
1534 @param report_cbs: Reporting callbacks
1537 prev_job_info = None
1538 prev_logmsg_serial = None
1543 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1546 # job not found, go away!
1547 raise errors.JobLost("Job with id %s lost" % job_id)
1549 if result == constants.JOB_NOTCHANGED:
1550 report_cbs.ReportNotChanged(job_id, status)
1555 # Split result, a tuple of (field values, log entries)
1556 (job_info, log_entries) = result
1557 (status, ) = job_info
1560 for log_entry in log_entries:
1561 (serial, timestamp, log_type, message) = log_entry
1562 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1564 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1566 # TODO: Handle canceled and archived jobs
1567 elif status in (constants.JOB_STATUS_SUCCESS,
1568 constants.JOB_STATUS_ERROR,
1569 constants.JOB_STATUS_CANCELING,
1570 constants.JOB_STATUS_CANCELED):
1573 prev_job_info = job_info
1575 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1577 raise errors.JobLost("Job with id %s lost" % job_id)
1579 status, opstatus, result = jobs[0]
1581 if status == constants.JOB_STATUS_SUCCESS:
1584 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1585 raise errors.OpExecError("Job was canceled")
1588 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1589 if status == constants.OP_STATUS_SUCCESS:
1591 elif status == constants.OP_STATUS_ERROR:
1592 errors.MaybeRaise(msg)
1595 raise errors.OpExecError("partial failure (opcode %d): %s" %
1598 raise errors.OpExecError(str(msg))
1600 # default failure mode
1601 raise errors.OpExecError(result)
1604 class JobPollCbBase:
1605 """Base class for L{GenericPollJob} callbacks.
1609 """Initializes this class.
1613 def WaitForJobChangeOnce(self, job_id, fields,
1614 prev_job_info, prev_log_serial):
1615 """Waits for changes on a job.
1618 raise NotImplementedError()
1620 def QueryJobs(self, job_ids, fields):
1621 """Returns the selected fields for the selected job IDs.
1623 @type job_ids: list of numbers
1624 @param job_ids: Job IDs
1625 @type fields: list of strings
1626 @param fields: Fields
1629 raise NotImplementedError()
1632 class JobPollReportCbBase:
1633 """Base class for L{GenericPollJob} reporting callbacks.
1637 """Initializes this class.
1641 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1642 """Handles a log message.
1645 raise NotImplementedError()
1647 def ReportNotChanged(self, job_id, status):
1648 """Called for if a job hasn't changed in a while.
1650 @type job_id: number
1651 @param job_id: Job ID
1652 @type status: string or None
1653 @param status: Job status if available
1656 raise NotImplementedError()
1659 class _LuxiJobPollCb(JobPollCbBase):
1660 def __init__(self, cl):
1661 """Initializes this class.
1664 JobPollCbBase.__init__(self)
1667 def WaitForJobChangeOnce(self, job_id, fields,
1668 prev_job_info, prev_log_serial):
1669 """Waits for changes on a job.
1672 return self.cl.WaitForJobChangeOnce(job_id, fields,
1673 prev_job_info, prev_log_serial)
1675 def QueryJobs(self, job_ids, fields):
1676 """Returns the selected fields for the selected job IDs.
1679 return self.cl.QueryJobs(job_ids, fields)
1682 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1683 def __init__(self, feedback_fn):
1684 """Initializes this class.
1687 JobPollReportCbBase.__init__(self)
1689 self.feedback_fn = feedback_fn
1691 assert callable(feedback_fn)
1693 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1694 """Handles a log message.
1697 self.feedback_fn((timestamp, log_type, log_msg))
1699 def ReportNotChanged(self, job_id, status):
1700 """Called if a job hasn't changed in a while.
1706 class StdioJobPollReportCb(JobPollReportCbBase):
1708 """Initializes this class.
1711 JobPollReportCbBase.__init__(self)
1713 self.notified_queued = False
1714 self.notified_waitlock = False
1716 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1717 """Handles a log message.
1720 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1721 FormatLogMessage(log_type, log_msg))
1723 def ReportNotChanged(self, job_id, status):
1724 """Called if a job hasn't changed in a while.
1730 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1731 ToStderr("Job %s is waiting in queue", job_id)
1732 self.notified_queued = True
1734 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1735 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1736 self.notified_waitlock = True
1739 def FormatLogMessage(log_type, log_msg):
1740 """Formats a job message according to its type.
1743 if log_type != constants.ELOG_MESSAGE:
1744 log_msg = str(log_msg)
1746 return utils.SafeEncode(log_msg)
1749 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1750 """Function to poll for the result of a job.
1752 @type job_id: job identified
1753 @param job_id: the job to poll for results
1754 @type cl: luxi.Client
1755 @param cl: the luxi client to use for communicating with the master;
1756 if None, a new client will be created
1762 if reporter is None:
1764 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1766 reporter = StdioJobPollReportCb()
1768 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1770 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1773 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1774 """Legacy function to submit an opcode.
1776 This is just a simple wrapper over the construction of the processor
1777 instance. It should be extended to better handle feedback and
1778 interaction functions.
1784 SetGenericOpcodeOpts([op], opts)
1786 job_id = SendJob([op], cl=cl)
1788 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1791 return op_results[0]
1794 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1795 """Wrapper around SubmitOpCode or SendJob.
1797 This function will decide, based on the 'opts' parameter, whether to
1798 submit and wait for the result of the opcode (and return it), or
1799 whether to just send the job and print its identifier. It is used in
1800 order to simplify the implementation of the '--submit' option.
1802 It will also process the opcodes if we're sending the via SendJob
1803 (otherwise SubmitOpCode does it).
1806 if opts and opts.submit_only:
1808 SetGenericOpcodeOpts(job, opts)
1809 job_id = SendJob(job, cl=cl)
1810 raise JobSubmittedException(job_id)
1812 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1815 def SetGenericOpcodeOpts(opcode_list, options):
1816 """Processor for generic options.
1818 This function updates the given opcodes based on generic command
1819 line options (like debug, dry-run, etc.).
1821 @param opcode_list: list of opcodes
1822 @param options: command line options or None
1823 @return: None (in-place modification)
1828 for op in opcode_list:
1829 op.debug_level = options.debug
1830 if hasattr(options, "dry_run"):
1831 op.dry_run = options.dry_run
1832 if getattr(options, "priority", None) is not None:
1833 op.priority = _PRIONAME_TO_VALUE[options.priority]
1837 # TODO: Cache object?
1839 client = luxi.Client()
1840 except luxi.NoMasterError:
1841 ss = ssconf.SimpleStore()
1843 # Try to read ssconf file
1846 except errors.ConfigurationError:
1847 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1848 " not part of a cluster")
1850 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1851 if master != myself:
1852 raise errors.OpPrereqError("This is not the master node, please connect"
1853 " to node '%s' and rerun the command" %
1859 def FormatError(err):
1860 """Return a formatted error message for a given error.
1862 This function takes an exception instance and returns a tuple
1863 consisting of two values: first, the recommended exit code, and
1864 second, a string describing the error message (not
1865 newline-terminated).
1871 if isinstance(err, errors.ConfigurationError):
1872 txt = "Corrupt configuration file: %s" % msg
1874 obuf.write(txt + "\n")
1875 obuf.write("Aborting.")
1877 elif isinstance(err, errors.HooksAbort):
1878 obuf.write("Failure: hooks execution failed:\n")
1879 for node, script, out in err.args[0]:
1881 obuf.write(" node: %s, script: %s, output: %s\n" %
1882 (node, script, out))
1884 obuf.write(" node: %s, script: %s (no output)\n" %
1886 elif isinstance(err, errors.HooksFailure):
1887 obuf.write("Failure: hooks general failure: %s" % msg)
1888 elif isinstance(err, errors.ResolverError):
1889 this_host = netutils.Hostname.GetSysName()
1890 if err.args[0] == this_host:
1891 msg = "Failure: can't resolve my own hostname ('%s')"
1893 msg = "Failure: can't resolve hostname '%s'"
1894 obuf.write(msg % err.args[0])
1895 elif isinstance(err, errors.OpPrereqError):
1896 if len(err.args) == 2:
1897 obuf.write("Failure: prerequisites not met for this"
1898 " operation:\nerror type: %s, error details:\n%s" %
1899 (err.args[1], err.args[0]))
1901 obuf.write("Failure: prerequisites not met for this"
1902 " operation:\n%s" % msg)
1903 elif isinstance(err, errors.OpExecError):
1904 obuf.write("Failure: command execution error:\n%s" % msg)
1905 elif isinstance(err, errors.TagError):
1906 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1907 elif isinstance(err, errors.JobQueueDrainError):
1908 obuf.write("Failure: the job queue is marked for drain and doesn't"
1909 " accept new requests\n")
1910 elif isinstance(err, errors.JobQueueFull):
1911 obuf.write("Failure: the job queue is full and doesn't accept new"
1912 " job submissions until old jobs are archived\n")
1913 elif isinstance(err, errors.TypeEnforcementError):
1914 obuf.write("Parameter Error: %s" % msg)
1915 elif isinstance(err, errors.ParameterError):
1916 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1917 elif isinstance(err, luxi.NoMasterError):
1918 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1919 " and listening for connections?")
1920 elif isinstance(err, luxi.TimeoutError):
1921 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1922 " been submitted and will continue to run even if the call"
1923 " timed out. Useful commands in this situation are \"gnt-job"
1924 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1926 elif isinstance(err, luxi.PermissionError):
1927 obuf.write("It seems you don't have permissions to connect to the"
1928 " master daemon.\nPlease retry as a different user.")
1929 elif isinstance(err, luxi.ProtocolError):
1930 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1932 elif isinstance(err, errors.JobLost):
1933 obuf.write("Error checking job status: %s" % msg)
1934 elif isinstance(err, errors.QueryFilterParseError):
1935 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1936 obuf.write("\n".join(err.GetDetails()))
1937 elif isinstance(err, errors.GenericError):
1938 obuf.write("Unhandled Ganeti error: %s" % msg)
1939 elif isinstance(err, JobSubmittedException):
1940 obuf.write("JobID: %s\n" % err.args[0])
1943 obuf.write("Unhandled exception: %s" % msg)
1944 return retcode, obuf.getvalue().rstrip('\n')
1947 def GenericMain(commands, override=None, aliases=None):
1948 """Generic main function for all the gnt-* commands.
1951 - commands: a dictionary with a special structure, see the design doc
1952 for command line handling.
1953 - override: if not None, we expect a dictionary with keys that will
1954 override command line options; this can be used to pass
1955 options from the scripts to generic functions
1956 - aliases: dictionary with command aliases {'alias': 'target, ...}
1959 # save the program name and the entire command line for later logging
1961 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1962 if len(sys.argv) >= 2:
1963 binary += " " + sys.argv[1]
1964 old_cmdline = " ".join(sys.argv[2:])
1968 binary = "<unknown program>"
1975 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1976 except errors.ParameterError, err:
1977 result, err_msg = FormatError(err)
1981 if func is None: # parse error
1984 if override is not None:
1985 for key, val in override.iteritems():
1986 setattr(options, key, val)
1988 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1989 stderr_logging=True)
1992 logging.info("run with arguments '%s'", old_cmdline)
1994 logging.info("run with no arguments")
1997 result = func(options, args)
1998 except (errors.GenericError, luxi.ProtocolError,
1999 JobSubmittedException), err:
2000 result, err_msg = FormatError(err)
2001 logging.exception("Error during command processing")
2003 except KeyboardInterrupt:
2004 result = constants.EXIT_FAILURE
2005 ToStderr("Aborted. Note that if the operation created any jobs, they"
2006 " might have been submitted and"
2007 " will continue to run in the background.")
2008 except IOError, err:
2009 if err.errno == errno.EPIPE:
2010 # our terminal went away, we'll exit
2011 sys.exit(constants.EXIT_FAILURE)
2018 def ParseNicOption(optvalue):
2019 """Parses the value of the --net option(s).
2023 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2024 except (TypeError, ValueError), err:
2025 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2027 nics = [{}] * nic_max
2028 for nidx, ndict in optvalue:
2031 if not isinstance(ndict, dict):
2032 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2033 " got %s" % (nidx, ndict))
2035 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2042 def GenericInstanceCreate(mode, opts, args):
2043 """Add an instance to the cluster via either creation or import.
2045 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2046 @param opts: the command line options selected by the user
2048 @param args: should contain only one element, the new instance name
2050 @return: the desired exit code
2055 (pnode, snode) = SplitNodeOption(opts.node)
2060 hypervisor, hvparams = opts.hypervisor
2063 nics = ParseNicOption(opts.nics)
2067 elif mode == constants.INSTANCE_CREATE:
2068 # default of one nic, all auto
2074 if opts.disk_template == constants.DT_DISKLESS:
2075 if opts.disks or opts.sd_size is not None:
2076 raise errors.OpPrereqError("Diskless instance but disk"
2077 " information passed")
2080 if (not opts.disks and not opts.sd_size
2081 and mode == constants.INSTANCE_CREATE):
2082 raise errors.OpPrereqError("No disk information specified")
2083 if opts.disks and opts.sd_size is not None:
2084 raise errors.OpPrereqError("Please use either the '--disk' or"
2086 if opts.sd_size is not None:
2087 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2091 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2092 except ValueError, err:
2093 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2094 disks = [{}] * disk_max
2097 for didx, ddict in opts.disks:
2099 if not isinstance(ddict, dict):
2100 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2101 raise errors.OpPrereqError(msg)
2102 elif constants.IDISK_SIZE in ddict:
2103 if constants.IDISK_ADOPT in ddict:
2104 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2105 " (disk %d)" % didx)
2107 ddict[constants.IDISK_SIZE] = \
2108 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2109 except ValueError, err:
2110 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2112 elif constants.IDISK_ADOPT in ddict:
2113 if mode == constants.INSTANCE_IMPORT:
2114 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2116 ddict[constants.IDISK_SIZE] = 0
2118 raise errors.OpPrereqError("Missing size or adoption source for"
2122 if opts.tags is not None:
2123 tags = opts.tags.split(",")
2127 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2128 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2130 if mode == constants.INSTANCE_CREATE:
2133 force_variant = opts.force_variant
2136 no_install = opts.no_install
2137 identify_defaults = False
2138 elif mode == constants.INSTANCE_IMPORT:
2141 force_variant = False
2142 src_node = opts.src_node
2143 src_path = opts.src_dir
2145 identify_defaults = opts.identify_defaults
2147 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2149 op = opcodes.OpInstanceCreate(instance_name=instance,
2151 disk_template=opts.disk_template,
2153 pnode=pnode, snode=snode,
2154 ip_check=opts.ip_check,
2155 name_check=opts.name_check,
2156 wait_for_sync=opts.wait_for_sync,
2157 file_storage_dir=opts.file_storage_dir,
2158 file_driver=opts.file_driver,
2159 iallocator=opts.iallocator,
2160 hypervisor=hypervisor,
2162 beparams=opts.beparams,
2163 osparams=opts.osparams,
2167 force_variant=force_variant,
2171 no_install=no_install,
2172 identify_defaults=identify_defaults)
2174 SubmitOrSend(op, opts)
2178 class _RunWhileClusterStoppedHelper:
2179 """Helper class for L{RunWhileClusterStopped} to simplify state management
2182 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2183 """Initializes this class.
2185 @type feedback_fn: callable
2186 @param feedback_fn: Feedback function
2187 @type cluster_name: string
2188 @param cluster_name: Cluster name
2189 @type master_node: string
2190 @param master_node Master node name
2191 @type online_nodes: list
2192 @param online_nodes: List of names of online nodes
2195 self.feedback_fn = feedback_fn
2196 self.cluster_name = cluster_name
2197 self.master_node = master_node
2198 self.online_nodes = online_nodes
2200 self.ssh = ssh.SshRunner(self.cluster_name)
2202 self.nonmaster_nodes = [name for name in online_nodes
2203 if name != master_node]
2205 assert self.master_node not in self.nonmaster_nodes
2207 def _RunCmd(self, node_name, cmd):
2208 """Runs a command on the local or a remote machine.
2210 @type node_name: string
2211 @param node_name: Machine name
2216 if node_name is None or node_name == self.master_node:
2217 # No need to use SSH
2218 result = utils.RunCmd(cmd)
2220 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2223 errmsg = ["Failed to run command %s" % result.cmd]
2225 errmsg.append("on node %s" % node_name)
2226 errmsg.append(": exitcode %s and error %s" %
2227 (result.exit_code, result.output))
2228 raise errors.OpExecError(" ".join(errmsg))
2230 def Call(self, fn, *args):
2231 """Call function while all daemons are stopped.
2234 @param fn: Function to be called
2237 # Pause watcher by acquiring an exclusive lock on watcher state file
2238 self.feedback_fn("Blocking watcher")
2239 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2241 # TODO: Currently, this just blocks. There's no timeout.
2242 # TODO: Should it be a shared lock?
2243 watcher_block.Exclusive(blocking=True)
2245 # Stop master daemons, so that no new jobs can come in and all running
2247 self.feedback_fn("Stopping master daemons")
2248 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2250 # Stop daemons on all nodes
2251 for node_name in self.online_nodes:
2252 self.feedback_fn("Stopping daemons on %s" % node_name)
2253 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2255 # All daemons are shut down now
2257 return fn(self, *args)
2258 except Exception, err:
2259 _, errmsg = FormatError(err)
2260 logging.exception("Caught exception")
2261 self.feedback_fn(errmsg)
2264 # Start cluster again, master node last
2265 for node_name in self.nonmaster_nodes + [self.master_node]:
2266 self.feedback_fn("Starting daemons on %s" % node_name)
2267 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2270 watcher_block.Close()
2273 def RunWhileClusterStopped(feedback_fn, fn, *args):
2274 """Calls a function while all cluster daemons are stopped.
2276 @type feedback_fn: callable
2277 @param feedback_fn: Feedback function
2279 @param fn: Function to be called when daemons are stopped
2282 feedback_fn("Gathering cluster information")
2284 # This ensures we're running on the master daemon
2287 (cluster_name, master_node) = \
2288 cl.QueryConfigValues(["cluster_name", "master_node"])
2290 online_nodes = GetOnlineNodes([], cl=cl)
2292 # Don't keep a reference to the client. The master daemon will go away.
2295 assert master_node in online_nodes
2297 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2298 online_nodes).Call(fn, *args)
2301 def GenerateTable(headers, fields, separator, data,
2302 numfields=None, unitfields=None,
2304 """Prints a table with headers and different fields.
2307 @param headers: dictionary mapping field names to headers for
2310 @param fields: the field names corresponding to each row in
2312 @param separator: the separator to be used; if this is None,
2313 the default 'smart' algorithm is used which computes optimal
2314 field width, otherwise just the separator is used between
2317 @param data: a list of lists, each sublist being one row to be output
2318 @type numfields: list
2319 @param numfields: a list with the fields that hold numeric
2320 values and thus should be right-aligned
2321 @type unitfields: list
2322 @param unitfields: a list with the fields that hold numeric
2323 values that should be formatted with the units field
2324 @type units: string or None
2325 @param units: the units we should use for formatting, or None for
2326 automatic choice (human-readable for non-separator usage, otherwise
2327 megabytes); this is a one-letter string
2336 if numfields is None:
2338 if unitfields is None:
2341 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2342 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2345 for field in fields:
2346 if headers and field not in headers:
2347 # TODO: handle better unknown fields (either revert to old
2348 # style of raising exception, or deal more intelligently with
2350 headers[field] = field
2351 if separator is not None:
2352 format_fields.append("%s")
2353 elif numfields.Matches(field):
2354 format_fields.append("%*s")
2356 format_fields.append("%-*s")
2358 if separator is None:
2359 mlens = [0 for name in fields]
2360 format_str = ' '.join(format_fields)
2362 format_str = separator.replace("%", "%%").join(format_fields)
2367 for idx, val in enumerate(row):
2368 if unitfields.Matches(fields[idx]):
2371 except (TypeError, ValueError):
2374 val = row[idx] = utils.FormatUnit(val, units)
2375 val = row[idx] = str(val)
2376 if separator is None:
2377 mlens[idx] = max(mlens[idx], len(val))
2382 for idx, name in enumerate(fields):
2384 if separator is None:
2385 mlens[idx] = max(mlens[idx], len(hdr))
2386 args.append(mlens[idx])
2388 result.append(format_str % tuple(args))
2390 if separator is None:
2391 assert len(mlens) == len(fields)
2393 if fields and not numfields.Matches(fields[-1]):
2399 line = ['-' for _ in fields]
2400 for idx in range(len(fields)):
2401 if separator is None:
2402 args.append(mlens[idx])
2403 args.append(line[idx])
2404 result.append(format_str % tuple(args))
2409 def _FormatBool(value):
2410 """Formats a boolean value as a string.
2418 #: Default formatting for query results; (callback, align right)
2419 _DEFAULT_FORMAT_QUERY = {
2420 constants.QFT_TEXT: (str, False),
2421 constants.QFT_BOOL: (_FormatBool, False),
2422 constants.QFT_NUMBER: (str, True),
2423 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2424 constants.QFT_OTHER: (str, False),
2425 constants.QFT_UNKNOWN: (str, False),
2429 def _GetColumnFormatter(fdef, override, unit):
2430 """Returns formatting function for a field.
2432 @type fdef: L{objects.QueryFieldDefinition}
2433 @type override: dict
2434 @param override: Dictionary for overriding field formatting functions,
2435 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2437 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2438 @rtype: tuple; (callable, bool)
2439 @return: Returns the function to format a value (takes one parameter) and a
2440 boolean for aligning the value on the right-hand side
2443 fmt = override.get(fdef.name, None)
2447 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2449 if fdef.kind == constants.QFT_UNIT:
2450 # Can't keep this information in the static dictionary
2451 return (lambda value: utils.FormatUnit(value, unit), True)
2453 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2457 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2460 class _QueryColumnFormatter:
2461 """Callable class for formatting fields of a query.
2464 def __init__(self, fn, status_fn, verbose):
2465 """Initializes this class.
2468 @param fn: Formatting function
2469 @type status_fn: callable
2470 @param status_fn: Function to report fields' status
2471 @type verbose: boolean
2472 @param verbose: whether to use verbose field descriptions or not
2476 self._status_fn = status_fn
2477 self._verbose = verbose
2479 def __call__(self, data):
2480 """Returns a field's string representation.
2483 (status, value) = data
2486 self._status_fn(status)
2488 if status == constants.RS_NORMAL:
2489 return self._fn(value)
2491 assert value is None, \
2492 "Found value %r for abnormal status %s" % (value, status)
2494 return FormatResultError(status, self._verbose)
2497 def FormatResultError(status, verbose):
2498 """Formats result status other than L{constants.RS_NORMAL}.
2500 @param status: The result status
2501 @type verbose: boolean
2502 @param verbose: Whether to return the verbose text
2503 @return: Text of result status
2506 assert status != constants.RS_NORMAL, \
2507 "FormatResultError called with status equal to constants.RS_NORMAL"
2509 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2511 raise NotImplementedError("Unknown status %s" % status)
2518 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2519 header=False, verbose=False):
2520 """Formats data in L{objects.QueryResponse}.
2522 @type result: L{objects.QueryResponse}
2523 @param result: result of query operation
2525 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2526 see L{utils.text.FormatUnit}
2527 @type format_override: dict
2528 @param format_override: Dictionary for overriding field formatting functions,
2529 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2530 @type separator: string or None
2531 @param separator: String used to separate fields
2533 @param header: Whether to output header row
2534 @type verbose: boolean
2535 @param verbose: whether to use verbose field descriptions or not
2544 if format_override is None:
2545 format_override = {}
2547 stats = dict.fromkeys(constants.RS_ALL, 0)
2549 def _RecordStatus(status):
2554 for fdef in result.fields:
2555 assert fdef.title and fdef.name
2556 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2557 columns.append(TableColumn(fdef.title,
2558 _QueryColumnFormatter(fn, _RecordStatus,
2562 table = FormatTable(result.data, columns, header, separator)
2564 # Collect statistics
2565 assert len(stats) == len(constants.RS_ALL)
2566 assert compat.all(count >= 0 for count in stats.values())
2568 # Determine overall status. If there was no data, unknown fields must be
2569 # detected via the field definitions.
2570 if (stats[constants.RS_UNKNOWN] or
2571 (not result.data and _GetUnknownFields(result.fields))):
2573 elif compat.any(count > 0 for key, count in stats.items()
2574 if key != constants.RS_NORMAL):
2575 status = QR_INCOMPLETE
2579 return (status, table)
2582 def _GetUnknownFields(fdefs):
2583 """Returns list of unknown fields included in C{fdefs}.
2585 @type fdefs: list of L{objects.QueryFieldDefinition}
2588 return [fdef for fdef in fdefs
2589 if fdef.kind == constants.QFT_UNKNOWN]
2592 def _WarnUnknownFields(fdefs):
2593 """Prints a warning to stderr if a query included unknown fields.
2595 @type fdefs: list of L{objects.QueryFieldDefinition}
2598 unknown = _GetUnknownFields(fdefs)
2600 ToStderr("Warning: Queried for unknown fields %s",
2601 utils.CommaJoin(fdef.name for fdef in unknown))
2607 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2608 format_override=None, verbose=False, force_filter=False):
2609 """Generic implementation for listing all items of a resource.
2611 @param resource: One of L{constants.QR_VIA_LUXI}
2612 @type fields: list of strings
2613 @param fields: List of fields to query for
2614 @type names: list of strings
2615 @param names: Names of items to query for
2616 @type unit: string or None
2617 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2618 None for automatic choice (human-readable for non-separator usage,
2619 otherwise megabytes); this is a one-letter string
2620 @type separator: string or None
2621 @param separator: String used to separate fields
2623 @param header: Whether to show header row
2624 @type force_filter: bool
2625 @param force_filter: Whether to always treat names as filter
2626 @type format_override: dict
2627 @param format_override: Dictionary for overriding field formatting functions,
2628 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2629 @type verbose: boolean
2630 @param verbose: whether to use verbose field descriptions or not
2640 (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2642 (filter_text, ) = names
2644 raise errors.OpPrereqError("Exactly one argument must be given as a"
2647 logging.debug("Parsing '%s' as filter", filter_text)
2648 filter_ = qlang.ParseFilter(filter_text)
2650 filter_ = qlang.MakeSimpleFilter("name", names)
2652 response = cl.Query(resource, fields, filter_)
2654 found_unknown = _WarnUnknownFields(response.fields)
2656 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2658 format_override=format_override,
2664 assert ((found_unknown and status == QR_UNKNOWN) or
2665 (not found_unknown and status != QR_UNKNOWN))
2667 if status == QR_UNKNOWN:
2668 return constants.EXIT_UNKNOWN_FIELD
2670 # TODO: Should the list command fail if not all data could be collected?
2671 return constants.EXIT_SUCCESS
2674 def GenericListFields(resource, fields, separator, header, cl=None):
2675 """Generic implementation for listing fields for a resource.
2677 @param resource: One of L{constants.QR_VIA_LUXI}
2678 @type fields: list of strings
2679 @param fields: List of fields to query for
2680 @type separator: string or None
2681 @param separator: String used to separate fields
2683 @param header: Whether to show header row
2692 response = cl.QueryFields(resource, fields)
2694 found_unknown = _WarnUnknownFields(response.fields)
2697 TableColumn("Name", str, False),
2698 TableColumn("Title", str, False),
2699 TableColumn("Description", str, False),
2702 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2704 for line in FormatTable(rows, columns, header, separator):
2708 return constants.EXIT_UNKNOWN_FIELD
2710 return constants.EXIT_SUCCESS
2714 """Describes a column for L{FormatTable}.
2717 def __init__(self, title, fn, align_right):
2718 """Initializes this class.
2721 @param title: Column title
2723 @param fn: Formatting function
2724 @type align_right: bool
2725 @param align_right: Whether to align values on the right-hand side
2730 self.align_right = align_right
2733 def _GetColFormatString(width, align_right):
2734 """Returns the format string for a field.
2742 return "%%%s%ss" % (sign, width)
2745 def FormatTable(rows, columns, header, separator):
2746 """Formats data as a table.
2748 @type rows: list of lists
2749 @param rows: Row data, one list per row
2750 @type columns: list of L{TableColumn}
2751 @param columns: Column descriptions
2753 @param header: Whether to show header row
2754 @type separator: string or None
2755 @param separator: String used to separate columns
2759 data = [[col.title for col in columns]]
2760 colwidth = [len(col.title) for col in columns]
2763 colwidth = [0 for _ in columns]
2767 assert len(row) == len(columns)
2769 formatted = [col.format(value) for value, col in zip(row, columns)]
2771 if separator is None:
2772 # Update column widths
2773 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2774 # Modifying a list's items while iterating is fine
2775 colwidth[idx] = max(oldwidth, len(value))
2777 data.append(formatted)
2779 if separator is not None:
2780 # Return early if a separator is used
2781 return [separator.join(row) for row in data]
2783 if columns and not columns[-1].align_right:
2784 # Avoid unnecessary spaces at end of line
2787 # Build format string
2788 fmt = " ".join([_GetColFormatString(width, col.align_right)
2789 for col, width in zip(columns, colwidth)])
2791 return [fmt % tuple(row) for row in data]
2794 def FormatTimestamp(ts):
2795 """Formats a given timestamp.
2798 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2801 @return: a string with the formatted timestamp
2804 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2807 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2810 def ParseTimespec(value):
2811 """Parse a time specification.
2813 The following suffixed will be recognized:
2821 Without any suffix, the value will be taken to be in seconds.
2826 raise errors.OpPrereqError("Empty time specification passed")
2834 if value[-1] not in suffix_map:
2837 except (TypeError, ValueError):
2838 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2840 multiplier = suffix_map[value[-1]]
2842 if not value: # no data left after stripping the suffix
2843 raise errors.OpPrereqError("Invalid time specification (only"
2846 value = int(value) * multiplier
2847 except (TypeError, ValueError):
2848 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2852 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2853 filter_master=False):
2854 """Returns the names of online nodes.
2856 This function will also log a warning on stderr with the names of
2859 @param nodes: if not empty, use only this subset of nodes (minus the
2861 @param cl: if not None, luxi client to use
2862 @type nowarn: boolean
2863 @param nowarn: by default, this function will output a note with the
2864 offline nodes that are skipped; if this parameter is True the
2865 note is not displayed
2866 @type secondary_ips: boolean
2867 @param secondary_ips: if True, return the secondary IPs instead of the
2868 names, useful for doing network traffic over the replication interface
2870 @type filter_master: boolean
2871 @param filter_master: if True, do not return the master node in the list
2872 (useful in coordination with secondary_ips where we cannot check our
2873 node name against the list)
2885 master_node = cl.QueryConfigValues(["master_node"])[0]
2886 filter_fn = lambda x: x != master_node
2888 filter_fn = lambda _: True
2890 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2892 offline = [row[0] for row in result if row[1]]
2893 if offline and not nowarn:
2894 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2895 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2898 def _ToStream(stream, txt, *args):
2899 """Write a message to a stream, bypassing the logging system
2901 @type stream: file object
2902 @param stream: the file to which we should write
2904 @param txt: the message
2910 stream.write(txt % args)
2915 except IOError, err:
2916 if err.errno == errno.EPIPE:
2917 # our terminal went away, we'll exit
2918 sys.exit(constants.EXIT_FAILURE)
2923 def ToStdout(txt, *args):
2924 """Write a message to stdout only, bypassing the logging system
2926 This is just a wrapper over _ToStream.
2929 @param txt: the message
2932 _ToStream(sys.stdout, txt, *args)
2935 def ToStderr(txt, *args):
2936 """Write a message to stderr only, bypassing the logging system
2938 This is just a wrapper over _ToStream.
2941 @param txt: the message
2944 _ToStream(sys.stderr, txt, *args)
2947 class JobExecutor(object):
2948 """Class which manages the submission and execution of multiple jobs.
2950 Note that instances of this class should not be reused between
2954 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2959 self.verbose = verbose
2962 self.feedback_fn = feedback_fn
2963 self._counter = itertools.count()
2966 def _IfName(name, fmt):
2967 """Helper function for formatting name.
2975 def QueueJob(self, name, *ops):
2976 """Record a job for later submit.
2979 @param name: a description of the job, will be used in WaitJobSet
2982 SetGenericOpcodeOpts(ops, self.opts)
2983 self.queue.append((self._counter.next(), name, ops))
2985 def AddJobId(self, name, status, job_id):
2986 """Adds a job ID to the internal queue.
2989 self.jobs.append((self._counter.next(), status, job_id, name))
2991 def SubmitPending(self, each=False):
2992 """Submit all pending jobs.
2997 for (_, _, ops) in self.queue:
2998 # SubmitJob will remove the success status, but raise an exception if
2999 # the submission fails, so we'll notice that anyway.
3000 results.append([True, self.cl.SubmitJob(ops)])
3002 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3003 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3004 self.jobs.append((idx, status, data, name))
3006 def _ChooseJob(self):
3007 """Choose a non-waiting/queued job to poll next.
3010 assert self.jobs, "_ChooseJob called with empty job list"
3012 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3015 for job_data, status in zip(self.jobs, result):
3016 if (isinstance(status, list) and status and
3017 status[0] in (constants.JOB_STATUS_QUEUED,
3018 constants.JOB_STATUS_WAITLOCK,
3019 constants.JOB_STATUS_CANCELING)):
3020 # job is still present and waiting
3022 # good candidate found (either running job or lost job)
3023 self.jobs.remove(job_data)
3027 return self.jobs.pop(0)
3029 def GetResults(self):
3030 """Wait for and return the results of all jobs.
3033 @return: list of tuples (success, job results), in the same order
3034 as the submitted jobs; if a job has failed, instead of the result
3035 there will be the error message
3039 self.SubmitPending()
3042 ok_jobs = [row[2] for row in self.jobs if row[1]]
3044 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3046 # first, remove any non-submitted jobs
3047 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3048 for idx, _, jid, name in failures:
3049 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3050 results.append((idx, False, jid))
3053 (idx, _, jid, name) = self._ChooseJob()
3054 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3056 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3058 except errors.JobLost, err:
3059 _, job_result = FormatError(err)
3060 ToStderr("Job %s%s has been archived, cannot check its result",
3061 jid, self._IfName(name, " for %s"))
3063 except (errors.GenericError, luxi.ProtocolError), err:
3064 _, job_result = FormatError(err)
3066 # the error message will always be shown, verbose or not
3067 ToStderr("Job %s%s has failed: %s",
3068 jid, self._IfName(name, " for %s"), job_result)
3070 results.append((idx, success, job_result))
3072 # sort based on the index, then drop it
3074 results = [i[1:] for i in results]
3078 def WaitOrShow(self, wait):
3079 """Wait for job results or only print the job IDs.
3082 @param wait: whether to wait or not
3086 return self.GetResults()
3089 self.SubmitPending()
3090 for _, status, result, name in self.jobs:
3092 ToStdout("%s: %s", result, name)
3094 ToStderr("Failure for %s: %s", name, result)
3095 return [row[1:3] for row in self.jobs]
3098 def FormatParameterDict(buf, param_dict, actual, level=1):
3099 """Formats a parameter dictionary.
3101 @type buf: L{StringIO}
3102 @param buf: the buffer into which to write
3103 @type param_dict: dict
3104 @param param_dict: the own parameters
3106 @param actual: the current parameter set (including defaults)
3107 @param level: Level of indent
3110 indent = " " * level
3111 for key in sorted(actual):
3112 val = param_dict.get(key, "default (%s)" % actual[key])
3113 buf.write("%s- %s: %s\n" % (indent, key, val))
3116 def ConfirmOperation(names, list_type, text, extra=""):
3117 """Ask the user to confirm an operation on a list of list_type.
3119 This function is used to request confirmation for doing an operation
3120 on a given list of list_type.
3123 @param names: the list of names that we display when
3124 we ask for confirmation
3125 @type list_type: str
3126 @param list_type: Human readable name for elements in the list (e.g. nodes)
3128 @param text: the operation that the user should confirm
3130 @return: True or False depending on user's confirmation.
3134 msg = ("The %s will operate on %d %s.\n%s"
3135 "Do you want to continue?" % (text, count, list_type, extra))
3136 affected = (("\nAffected %s:\n" % list_type) +
3137 "\n".join([" %s" % name for name in names]))
3139 choices = [("y", True, "Yes, execute the %s" % text),
3140 ("n", False, "No, abort the %s" % text)]
3143 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3146 question = msg + affected
3148 choice = AskUser(question, choices)
3151 choice = AskUser(msg + affected, choices)