4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
82 "FILESTORE_DRIVER_OPT",
88 "GLOBAL_SHARED_FILEDIR_OPT",
93 "DEFAULT_IALLOCATOR_OPT",
94 "IDENTIFY_DEFAULTS_OPT",
97 "IGNORE_FAILURES_OPT",
99 "IGNORE_REMOVE_FAILURES_OPT",
100 "IGNORE_SECONDARIES_OPT",
104 "MAINTAIN_NODE_HEALTH_OPT",
106 "MASTER_NETMASK_OPT",
108 "MIGRATION_MODE_OPT",
110 "NEW_CLUSTER_CERT_OPT",
111 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
112 "NEW_CONFD_HMAC_KEY_OPT",
115 "NEW_SPICE_CERT_OPT",
117 "NODE_FORCE_JOIN_OPT",
119 "NODE_PLACEMENT_OPT",
123 "NODRBD_STORAGE_OPT",
129 "NOMODIFY_ETCHOSTS_OPT",
130 "NOMODIFY_SSH_SETUP_OPT",
136 "NOSSH_KEYCHECK_OPT",
150 "PREALLOC_WIPE_DISKS_OPT",
151 "PRIMARY_IP_VERSION_OPT",
157 "REMOVE_INSTANCE_OPT",
162 "SECONDARY_ONLY_OPT",
166 "SHUTDOWN_TIMEOUT_OPT",
173 "STARTUP_PAUSED_OPT",
182 "USE_EXTERNAL_MIP_SCRIPT",
187 # Generic functions for CLI programs
190 "GenericInstanceCreate",
196 "JobSubmittedException",
198 "RunWhileClusterStopped",
202 # Formatting functions
203 "ToStderr", "ToStdout",
206 "FormatParameterDict",
215 # command line options support infrastructure
216 "ARGS_MANY_INSTANCES",
235 "OPT_COMPL_INST_ADD_NODES",
236 "OPT_COMPL_MANY_NODES",
237 "OPT_COMPL_ONE_IALLOCATOR",
238 "OPT_COMPL_ONE_INSTANCE",
239 "OPT_COMPL_ONE_NODE",
240 "OPT_COMPL_ONE_NODEGROUP",
246 "COMMON_CREATE_OPTS",
252 #: Priorities (sorted)
254 ("low", constants.OP_PRIO_LOW),
255 ("normal", constants.OP_PRIO_NORMAL),
256 ("high", constants.OP_PRIO_HIGH),
259 #: Priority dictionary for easier lookup
260 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
261 # we migrate to Python 2.6
262 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
264 # Query result status for clients
267 QR_INCOMPLETE) = range(3)
269 #: Maximum batch size for ChooseJob
274 def __init__(self, min=0, max=None): # pylint: disable=W0622
279 return ("<%s min=%s max=%s>" %
280 (self.__class__.__name__, self.min, self.max))
283 class ArgSuggest(_Argument):
284 """Suggesting argument.
286 Value can be any of the ones passed to the constructor.
289 # pylint: disable=W0622
290 def __init__(self, min=0, max=None, choices=None):
291 _Argument.__init__(self, min=min, max=max)
292 self.choices = choices
295 return ("<%s min=%s max=%s choices=%r>" %
296 (self.__class__.__name__, self.min, self.max, self.choices))
299 class ArgChoice(ArgSuggest):
302 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
303 but value must be one of the choices.
308 class ArgUnknown(_Argument):
309 """Unknown argument to program (e.g. determined at runtime).
314 class ArgInstance(_Argument):
315 """Instances argument.
320 class ArgNode(_Argument):
326 class ArgGroup(_Argument):
327 """Node group argument.
332 class ArgJobId(_Argument):
338 class ArgFile(_Argument):
339 """File path argument.
344 class ArgCommand(_Argument):
350 class ArgHost(_Argument):
356 class ArgOs(_Argument):
363 ARGS_MANY_INSTANCES = [ArgInstance()]
364 ARGS_MANY_NODES = [ArgNode()]
365 ARGS_MANY_GROUPS = [ArgGroup()]
366 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
367 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
369 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
370 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
373 def _ExtractTagsObject(opts, args):
374 """Extract the tag type object.
376 Note that this function will modify its args parameter.
379 if not hasattr(opts, "tag_type"):
380 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
382 if kind == constants.TAG_CLUSTER:
384 elif kind in (constants.TAG_NODEGROUP,
386 constants.TAG_INSTANCE):
388 raise errors.OpPrereqError("no arguments passed to the command")
392 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
396 def _ExtendTags(opts, args):
397 """Extend the args if a source file has been given.
399 This function will extend the tags with the contents of the file
400 passed in the 'tags_source' attribute of the opts parameter. A file
401 named '-' will be replaced by stdin.
404 fname = opts.tags_source
410 new_fh = open(fname, "r")
413 # we don't use the nice 'new_data = [line.strip() for line in fh]'
414 # because of python bug 1633941
416 line = new_fh.readline()
419 new_data.append(line.strip())
422 args.extend(new_data)
425 def ListTags(opts, args):
426 """List the tags on a given object.
428 This is a generic implementation that knows how to deal with all
429 three cases of tag objects (cluster, node, instance). The opts
430 argument is expected to contain a tag_type field denoting what
431 object type we work on.
434 kind, name = _ExtractTagsObject(opts, args)
436 result = cl.QueryTags(kind, name)
437 result = list(result)
443 def AddTags(opts, args):
444 """Add tags on a given object.
446 This is a generic implementation that knows how to deal with all
447 three cases of tag objects (cluster, node, instance). The opts
448 argument is expected to contain a tag_type field denoting what
449 object type we work on.
452 kind, name = _ExtractTagsObject(opts, args)
453 _ExtendTags(opts, args)
455 raise errors.OpPrereqError("No tags to be added")
456 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
457 SubmitOpCode(op, opts=opts)
460 def RemoveTags(opts, args):
461 """Remove tags from a given object.
463 This is a generic implementation that knows how to deal with all
464 three cases of tag objects (cluster, node, instance). The opts
465 argument is expected to contain a tag_type field denoting what
466 object type we work on.
469 kind, name = _ExtractTagsObject(opts, args)
470 _ExtendTags(opts, args)
472 raise errors.OpPrereqError("No tags to be removed")
473 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
474 SubmitOpCode(op, opts=opts)
477 def check_unit(option, opt, value): # pylint: disable=W0613
478 """OptParsers custom converter for units.
482 return utils.ParseUnit(value)
483 except errors.UnitParseError, err:
484 raise OptionValueError("option %s: %s" % (opt, err))
487 def _SplitKeyVal(opt, data):
488 """Convert a KeyVal string into a dict.
490 This function will convert a key=val[,...] string into a dict. Empty
491 values will be converted specially: keys which have the prefix 'no_'
492 will have the value=False and the prefix stripped, the others will
496 @param opt: a string holding the option name for which we process the
497 data, used in building error messages
499 @param data: a string of the format key=val,key=val,...
501 @return: {key=val, key=val}
502 @raises errors.ParameterError: if there are duplicate keys
507 for elem in utils.UnescapeAndSplit(data, sep=","):
509 key, val = elem.split("=", 1)
511 if elem.startswith(NO_PREFIX):
512 key, val = elem[len(NO_PREFIX):], False
513 elif elem.startswith(UN_PREFIX):
514 key, val = elem[len(UN_PREFIX):], None
516 key, val = elem, True
518 raise errors.ParameterError("Duplicate key '%s' in option %s" %
524 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
525 """Custom parser for ident:key=val,key=val options.
527 This will store the parsed values as a tuple (ident, {key: val}). As such,
528 multiple uses of this option via action=append is possible.
532 ident, rest = value, ""
534 ident, rest = value.split(":", 1)
536 if ident.startswith(NO_PREFIX):
538 msg = "Cannot pass options when removing parameter groups: %s" % value
539 raise errors.ParameterError(msg)
540 retval = (ident[len(NO_PREFIX):], False)
541 elif ident.startswith(UN_PREFIX):
543 msg = "Cannot pass options when removing parameter groups: %s" % value
544 raise errors.ParameterError(msg)
545 retval = (ident[len(UN_PREFIX):], None)
547 kv_dict = _SplitKeyVal(opt, rest)
548 retval = (ident, kv_dict)
552 def check_key_val(option, opt, value): # pylint: disable=W0613
553 """Custom parser class for key=val,key=val options.
555 This will store the parsed values as a dict {key: val}.
558 return _SplitKeyVal(opt, value)
561 def check_bool(option, opt, value): # pylint: disable=W0613
562 """Custom parser for yes/no options.
564 This will store the parsed value as either True or False.
567 value = value.lower()
568 if value == constants.VALUE_FALSE or value == "no":
570 elif value == constants.VALUE_TRUE or value == "yes":
573 raise errors.ParameterError("Invalid boolean value '%s'" % value)
576 # completion_suggestion is normally a list. Using numeric values not evaluating
577 # to False for dynamic completion.
578 (OPT_COMPL_MANY_NODES,
580 OPT_COMPL_ONE_INSTANCE,
582 OPT_COMPL_ONE_IALLOCATOR,
583 OPT_COMPL_INST_ADD_NODES,
584 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
586 OPT_COMPL_ALL = frozenset([
587 OPT_COMPL_MANY_NODES,
589 OPT_COMPL_ONE_INSTANCE,
591 OPT_COMPL_ONE_IALLOCATOR,
592 OPT_COMPL_INST_ADD_NODES,
593 OPT_COMPL_ONE_NODEGROUP,
597 class CliOption(Option):
598 """Custom option class for optparse.
601 ATTRS = Option.ATTRS + [
602 "completion_suggest",
604 TYPES = Option.TYPES + (
610 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
611 TYPE_CHECKER["identkeyval"] = check_ident_key_val
612 TYPE_CHECKER["keyval"] = check_key_val
613 TYPE_CHECKER["unit"] = check_unit
614 TYPE_CHECKER["bool"] = check_bool
617 # optparse.py sets make_option, so we do it for our own option class, too
618 cli_option = CliOption
623 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
624 help="Increase debugging level")
626 NOHDR_OPT = cli_option("--no-headers", default=False,
627 action="store_true", dest="no_headers",
628 help="Don't display column headers")
630 SEP_OPT = cli_option("--separator", default=None,
631 action="store", dest="separator",
632 help=("Separator between output fields"
633 " (defaults to one space)"))
635 USEUNITS_OPT = cli_option("--units", default=None,
636 dest="units", choices=("h", "m", "g", "t"),
637 help="Specify units for output (one of h/m/g/t)")
639 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
640 type="string", metavar="FIELDS",
641 help="Comma separated list of output fields")
643 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
644 default=False, help="Force the operation")
646 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
647 default=False, help="Do not require confirmation")
649 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
650 action="store_true", default=False,
651 help=("Ignore offline nodes and do as much"
654 TAG_ADD_OPT = cli_option("--tags", dest="tags",
655 default=None, help="Comma-separated list of instance"
658 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
659 default=None, help="File with tag names")
661 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
662 default=False, action="store_true",
663 help=("Submit the job and return the job ID, but"
664 " don't wait for the job to finish"))
666 SYNC_OPT = cli_option("--sync", dest="do_locking",
667 default=False, action="store_true",
668 help=("Grab locks while doing the queries"
669 " in order to ensure more consistent results"))
671 DRY_RUN_OPT = cli_option("--dry-run", default=False,
673 help=("Do not execute the operation, just run the"
674 " check steps and verify it it could be"
677 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
679 help="Increase the verbosity of the operation")
681 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
682 action="store_true", dest="simulate_errors",
683 help="Debugging option that makes the operation"
684 " treat most runtime checks as failed")
686 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
687 default=True, action="store_false",
688 help="Don't wait for sync (DANGEROUS!)")
690 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
691 action="store_true", default=False,
692 help="Enable offline instance")
694 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
695 action="store_true", default=False,
696 help="Disable down instance")
698 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
699 help=("Custom disk setup (%s)" %
700 utils.CommaJoin(constants.DISK_TEMPLATES)),
701 default=None, metavar="TEMPL",
702 choices=list(constants.DISK_TEMPLATES))
704 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
705 help="Do not create any network cards for"
708 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
709 help="Relative path under default cluster-wide"
710 " file storage dir to store file-based disks",
711 default=None, metavar="<DIR>")
713 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
714 help="Driver to use for image files",
715 default="loop", metavar="<DRIVER>",
716 choices=list(constants.FILE_DRIVER))
718 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
719 help="Select nodes for the instance automatically"
720 " using the <NAME> iallocator plugin",
721 default=None, type="string",
722 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
724 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
726 help="Set the default instance allocator plugin",
727 default=None, type="string",
728 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
730 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
732 completion_suggest=OPT_COMPL_ONE_OS)
734 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
735 type="keyval", default={},
736 help="OS parameters")
738 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
739 action="store_true", default=False,
740 help="Force an unknown variant")
742 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
743 action="store_true", default=False,
744 help="Do not install the OS (will"
747 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
748 type="keyval", default={},
749 help="Backend parameters")
751 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
752 default={}, dest="hvparams",
753 help="Hypervisor parameters")
755 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
756 help="Hypervisor and hypervisor options, in the"
757 " format hypervisor:option=value,option=value,...",
758 default=None, type="identkeyval")
760 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
761 help="Hypervisor and hypervisor options, in the"
762 " format hypervisor:option=value,option=value,...",
763 default=[], action="append", type="identkeyval")
765 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
766 action="store_false",
767 help="Don't check that the instance's IP"
770 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
771 default=True, action="store_false",
772 help="Don't check that the instance's name"
775 NET_OPT = cli_option("--net",
776 help="NIC parameters", default=[],
777 dest="nics", action="append", type="identkeyval")
779 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
780 dest="disks", action="append", type="identkeyval")
782 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
783 help="Comma-separated list of disks"
784 " indices to act on (e.g. 0,2) (optional,"
785 " defaults to all disks)")
787 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
788 help="Enforces a single-disk configuration using the"
789 " given disk size, in MiB unless a suffix is used",
790 default=None, type="unit", metavar="<size>")
792 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
793 dest="ignore_consistency",
794 action="store_true", default=False,
795 help="Ignore the consistency of the disks on"
798 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
799 dest="allow_failover",
800 action="store_true", default=False,
801 help="If migration is not possible fallback to"
804 NONLIVE_OPT = cli_option("--non-live", dest="live",
805 default=True, action="store_false",
806 help="Do a non-live migration (this usually means"
807 " freeze the instance, save the state, transfer and"
808 " only then resume running on the secondary node)")
810 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
812 choices=list(constants.HT_MIGRATION_MODES),
813 help="Override default migration mode (choose"
814 " either live or non-live")
816 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
817 help="Target node and optional secondary node",
818 metavar="<pnode>[:<snode>]",
819 completion_suggest=OPT_COMPL_INST_ADD_NODES)
821 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
822 action="append", metavar="<node>",
823 help="Use only this node (can be used multiple"
824 " times, if not given defaults to all nodes)",
825 completion_suggest=OPT_COMPL_ONE_NODE)
827 NODEGROUP_OPT_NAME = "--node-group"
828 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
830 help="Node group (name or uuid)",
831 metavar="<nodegroup>",
832 default=None, type="string",
833 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
835 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
837 completion_suggest=OPT_COMPL_ONE_NODE)
839 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
840 action="store_false",
841 help="Don't start the instance after creation")
843 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
844 action="store_true", default=False,
845 help="Show command instead of executing it")
847 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
848 default=False, action="store_true",
849 help="Instead of performing the migration, try to"
850 " recover from a failed cleanup. This is safe"
851 " to run even if the instance is healthy, but it"
852 " will create extra replication traffic and "
853 " disrupt briefly the replication (like during the"
856 STATIC_OPT = cli_option("-s", "--static", dest="static",
857 action="store_true", default=False,
858 help="Only show configuration data, not runtime data")
860 ALL_OPT = cli_option("--all", dest="show_all",
861 default=False, action="store_true",
862 help="Show info on all instances on the cluster."
863 " This can take a long time to run, use wisely")
865 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
866 action="store_true", default=False,
867 help="Interactive OS reinstall, lists available"
868 " OS templates for selection")
870 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
871 action="store_true", default=False,
872 help="Remove the instance from the cluster"
873 " configuration even if there are failures"
874 " during the removal process")
876 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
877 dest="ignore_remove_failures",
878 action="store_true", default=False,
879 help="Remove the instance from the"
880 " cluster configuration even if there"
881 " are failures during the removal"
884 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
885 action="store_true", default=False,
886 help="Remove the instance from the cluster")
888 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
889 help="Specifies the new node for the instance",
890 metavar="NODE", default=None,
891 completion_suggest=OPT_COMPL_ONE_NODE)
893 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
894 help="Specifies the new secondary node",
895 metavar="NODE", default=None,
896 completion_suggest=OPT_COMPL_ONE_NODE)
898 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
899 default=False, action="store_true",
900 help="Replace the disk(s) on the primary"
901 " node (applies only to internally mirrored"
902 " disk templates, e.g. %s)" %
903 utils.CommaJoin(constants.DTS_INT_MIRROR))
905 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
906 default=False, action="store_true",
907 help="Replace the disk(s) on the secondary"
908 " node (applies only to internally mirrored"
909 " disk templates, e.g. %s)" %
910 utils.CommaJoin(constants.DTS_INT_MIRROR))
912 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
913 default=False, action="store_true",
914 help="Lock all nodes and auto-promote as needed"
917 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
918 default=False, action="store_true",
919 help="Automatically replace faulty disks"
920 " (applies only to internally mirrored"
921 " disk templates, e.g. %s)" %
922 utils.CommaJoin(constants.DTS_INT_MIRROR))
924 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
925 default=False, action="store_true",
926 help="Ignore current recorded size"
927 " (useful for forcing activation when"
928 " the recorded size is wrong)")
930 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
932 completion_suggest=OPT_COMPL_ONE_NODE)
934 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
937 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
938 help="Specify the secondary ip for the node",
939 metavar="ADDRESS", default=None)
941 READD_OPT = cli_option("--readd", dest="readd",
942 default=False, action="store_true",
943 help="Readd old node after replacing it")
945 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
946 default=True, action="store_false",
947 help="Disable SSH key fingerprint checking")
949 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
950 default=False, action="store_true",
951 help="Force the joining of a node")
953 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
954 type="bool", default=None, metavar=_YORNO,
955 help="Set the master_candidate flag on the node")
957 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
958 type="bool", default=None,
959 help=("Set the offline flag on the node"
960 " (cluster does not communicate with offline"
963 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
964 type="bool", default=None,
965 help=("Set the drained flag on the node"
966 " (excluded from allocation operations)"))
968 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
969 type="bool", default=None, metavar=_YORNO,
970 help="Set the master_capable flag on the node")
972 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
973 type="bool", default=None, metavar=_YORNO,
974 help="Set the vm_capable flag on the node")
976 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
977 type="bool", default=None, metavar=_YORNO,
978 help="Set the allocatable flag on a volume")
980 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
981 help="Disable support for lvm based instances"
983 action="store_false", default=True)
985 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
986 dest="enabled_hypervisors",
987 help="Comma-separated list of hypervisors",
988 type="string", default=None)
990 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
991 type="keyval", default={},
992 help="NIC parameters")
994 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
995 dest="candidate_pool_size", type="int",
996 help="Set the candidate pool size")
998 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
999 help=("Enables LVM and specifies the volume group"
1000 " name (cluster-wide) for disk allocation"
1001 " [%s]" % constants.DEFAULT_VG),
1002 metavar="VG", default=None)
1004 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1005 help="Destroy cluster", action="store_true")
1007 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1008 help="Skip node agreement check (dangerous)",
1009 action="store_true", default=False)
1011 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1012 help="Specify the mac prefix for the instance IP"
1013 " addresses, in the format XX:XX:XX",
1017 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1018 help="Specify the node interface (cluster-wide)"
1019 " on which the master IP address will be added"
1020 " (cluster init default: %s)" %
1021 constants.DEFAULT_BRIDGE,
1025 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1026 help="Specify the netmask of the master IP",
1030 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1031 dest="use_external_mip_script",
1032 help="Specify whether to run a user-provided"
1033 " script for the master IP address turnup and"
1034 " turndown operations",
1035 type="bool", metavar=_YORNO, default=None)
1037 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1038 help="Specify the default directory (cluster-"
1039 "wide) for storing the file-based disks [%s]" %
1040 constants.DEFAULT_FILE_STORAGE_DIR,
1042 default=constants.DEFAULT_FILE_STORAGE_DIR)
1044 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1045 dest="shared_file_storage_dir",
1046 help="Specify the default directory (cluster-"
1047 "wide) for storing the shared file-based"
1049 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1050 metavar="SHAREDDIR",
1051 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1053 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1054 help="Don't modify /etc/hosts",
1055 action="store_false", default=True)
1057 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1058 help="Don't initialize SSH keys",
1059 action="store_false", default=True)
1061 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1062 help="Enable parseable error messages",
1063 action="store_true", default=False)
1065 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1066 help="Skip N+1 memory redundancy tests",
1067 action="store_true", default=False)
1069 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1070 help="Type of reboot: soft/hard/full",
1071 default=constants.INSTANCE_REBOOT_HARD,
1073 choices=list(constants.REBOOT_TYPES))
1075 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1076 dest="ignore_secondaries",
1077 default=False, action="store_true",
1078 help="Ignore errors from secondaries")
1080 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1081 action="store_false", default=True,
1082 help="Don't shutdown the instance (unsafe)")
1084 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1085 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1086 help="Maximum time to wait")
1088 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1089 dest="shutdown_timeout", type="int",
1090 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1091 help="Maximum time to wait for instance shutdown")
1093 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1095 help=("Number of seconds between repetions of the"
1098 EARLY_RELEASE_OPT = cli_option("--early-release",
1099 dest="early_release", default=False,
1100 action="store_true",
1101 help="Release the locks on the secondary"
1104 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1105 dest="new_cluster_cert",
1106 default=False, action="store_true",
1107 help="Generate a new cluster certificate")
1109 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1111 help="File containing new RAPI certificate")
1113 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1114 default=None, action="store_true",
1115 help=("Generate a new self-signed RAPI"
1118 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1120 help="File containing new SPICE certificate")
1122 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1124 help="File containing the certificate of the CA"
1125 " which signed the SPICE certificate")
1127 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1128 dest="new_spice_cert", default=None,
1129 action="store_true",
1130 help=("Generate a new self-signed SPICE"
1133 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1134 dest="new_confd_hmac_key",
1135 default=False, action="store_true",
1136 help=("Create a new HMAC key for %s" %
1139 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1140 dest="cluster_domain_secret",
1142 help=("Load new new cluster domain"
1143 " secret from file"))
1145 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1146 dest="new_cluster_domain_secret",
1147 default=False, action="store_true",
1148 help=("Create a new cluster domain"
1151 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1152 dest="use_replication_network",
1153 help="Whether to use the replication network"
1154 " for talking to the nodes",
1155 action="store_true", default=False)
1157 MAINTAIN_NODE_HEALTH_OPT = \
1158 cli_option("--maintain-node-health", dest="maintain_node_health",
1159 metavar=_YORNO, default=None, type="bool",
1160 help="Configure the cluster to automatically maintain node"
1161 " health, by shutting down unknown instances, shutting down"
1162 " unknown DRBD devices, etc.")
1164 IDENTIFY_DEFAULTS_OPT = \
1165 cli_option("--identify-defaults", dest="identify_defaults",
1166 default=False, action="store_true",
1167 help="Identify which saved instance parameters are equal to"
1168 " the current cluster defaults and set them as such, instead"
1169 " of marking them as overridden")
1171 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1172 action="store", dest="uid_pool",
1173 help=("A list of user-ids or user-id"
1174 " ranges separated by commas"))
1176 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1177 action="store", dest="add_uids",
1178 help=("A list of user-ids or user-id"
1179 " ranges separated by commas, to be"
1180 " added to the user-id pool"))
1182 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1183 action="store", dest="remove_uids",
1184 help=("A list of user-ids or user-id"
1185 " ranges separated by commas, to be"
1186 " removed from the user-id pool"))
1188 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1189 action="store", dest="reserved_lvs",
1190 help=("A comma-separated list of reserved"
1191 " logical volumes names, that will be"
1192 " ignored by cluster verify"))
1194 ROMAN_OPT = cli_option("--roman",
1195 dest="roman_integers", default=False,
1196 action="store_true",
1197 help="Use roman numbers for positive integers")
1199 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1200 action="store", default=None,
1201 help="Specifies usermode helper for DRBD")
1203 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1204 action="store_false", default=True,
1205 help="Disable support for DRBD")
1207 PRIMARY_IP_VERSION_OPT = \
1208 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1209 action="store", dest="primary_ip_version",
1210 metavar="%d|%d" % (constants.IP4_VERSION,
1211 constants.IP6_VERSION),
1212 help="Cluster-wide IP version for primary IP")
1214 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1215 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1216 choices=_PRIONAME_TO_VALUE.keys(),
1217 help="Priority for opcode processing")
1219 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1220 type="bool", default=None, metavar=_YORNO,
1221 help="Sets the hidden flag on the OS")
1223 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1224 type="bool", default=None, metavar=_YORNO,
1225 help="Sets the blacklisted flag on the OS")
1227 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1228 type="bool", metavar=_YORNO,
1229 dest="prealloc_wipe_disks",
1230 help=("Wipe disks prior to instance"
1233 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1234 type="keyval", default=None,
1235 help="Node parameters")
1237 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1238 action="store", metavar="POLICY", default=None,
1239 help="Allocation policy for the node group")
1241 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1242 type="bool", metavar=_YORNO,
1243 dest="node_powered",
1244 help="Specify if the SoR for node is powered")
1246 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1247 default=constants.OOB_TIMEOUT,
1248 help="Maximum time to wait for out-of-band helper")
1250 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1251 default=constants.OOB_POWER_DELAY,
1252 help="Time in seconds to wait between power-ons")
1254 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1255 action="store_true", default=False,
1256 help=("Whether command argument should be treated"
1259 NO_REMEMBER_OPT = cli_option("--no-remember",
1261 action="store_true", default=False,
1262 help="Perform but do not record the change"
1263 " in the configuration")
1265 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1266 default=False, action="store_true",
1267 help="Evacuate primary instances only")
1269 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1270 default=False, action="store_true",
1271 help="Evacuate secondary instances only"
1272 " (applies only to internally mirrored"
1273 " disk templates, e.g. %s)" %
1274 utils.CommaJoin(constants.DTS_INT_MIRROR))
1276 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1277 action="store_true", default=False,
1278 help="Pause instance at startup")
1280 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1281 help="Destination node group (name or uuid)",
1282 default=None, action="append",
1283 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1285 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1286 action="append", dest="ignore_errors",
1287 choices=list(constants.CV_ALL_ECODES_STRINGS),
1288 help="Error code to be ignored")
1291 #: Options provided by all commands
1292 COMMON_OPTS = [DEBUG_OPT]
1294 # common options for creating instances. add and import then add their own
1296 COMMON_CREATE_OPTS = [
1301 FILESTORE_DRIVER_OPT,
1319 def _ParseArgs(argv, commands, aliases, env_override):
1320 """Parser for the command line arguments.
1322 This function parses the arguments and returns the function which
1323 must be executed together with its (modified) arguments.
1325 @param argv: the command line
1326 @param commands: dictionary with special contents, see the design
1327 doc for cmdline handling
1328 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1329 @param env_override: list of env variables allowed for default args
1332 assert not (env_override - set(commands))
1335 binary = "<command>"
1337 binary = argv[0].split("/")[-1]
1339 if len(argv) > 1 and argv[1] == "--version":
1340 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1341 constants.RELEASE_VERSION)
1342 # Quit right away. That way we don't have to care about this special
1343 # argument. optparse.py does it the same.
1346 if len(argv) < 2 or not (argv[1] in commands or
1347 argv[1] in aliases):
1348 # let's do a nice thing
1349 sortedcmds = commands.keys()
1352 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1353 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1356 # compute the max line length for cmd + usage
1357 mlen = max([len(" %s" % cmd) for cmd in commands])
1358 mlen = min(60, mlen) # should not get here...
1360 # and format a nice command list
1361 ToStdout("Commands:")
1362 for cmd in sortedcmds:
1363 cmdstr = " %s" % (cmd,)
1364 help_text = commands[cmd][4]
1365 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1366 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1367 for line in help_lines:
1368 ToStdout("%-*s %s", mlen, "", line)
1372 return None, None, None
1374 # get command, unalias it, and look it up in commands
1378 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1381 if aliases[cmd] not in commands:
1382 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1383 " command '%s'" % (cmd, aliases[cmd]))
1387 if cmd in env_override:
1388 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1389 env_args = os.environ.get(args_env_name)
1391 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1393 func, args_def, parser_opts, usage, description = commands[cmd]
1394 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1395 description=description,
1396 formatter=TitledHelpFormatter(),
1397 usage="%%prog %s %s" % (cmd, usage))
1398 parser.disable_interspersed_args()
1399 options, args = parser.parse_args(args=argv[1:])
1401 if not _CheckArguments(cmd, args_def, args):
1402 return None, None, None
1404 return func, options, args
1407 def _CheckArguments(cmd, args_def, args):
1408 """Verifies the arguments using the argument definition.
1412 1. Abort with error if values specified by user but none expected.
1414 1. For each argument in definition
1416 1. Keep running count of minimum number of values (min_count)
1417 1. Keep running count of maximum number of values (max_count)
1418 1. If it has an unlimited number of values
1420 1. Abort with error if it's not the last argument in the definition
1422 1. If last argument has limited number of values
1424 1. Abort with error if number of values doesn't match or is too large
1426 1. Abort with error if user didn't pass enough values (min_count)
1429 if args and not args_def:
1430 ToStderr("Error: Command %s expects no arguments", cmd)
1437 last_idx = len(args_def) - 1
1439 for idx, arg in enumerate(args_def):
1440 if min_count is None:
1442 elif arg.min is not None:
1443 min_count += arg.min
1445 if max_count is None:
1447 elif arg.max is not None:
1448 max_count += arg.max
1451 check_max = (arg.max is not None)
1453 elif arg.max is None:
1454 raise errors.ProgrammerError("Only the last argument can have max=None")
1457 # Command with exact number of arguments
1458 if (min_count is not None and max_count is not None and
1459 min_count == max_count and len(args) != min_count):
1460 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1463 # Command with limited number of arguments
1464 if max_count is not None and len(args) > max_count:
1465 ToStderr("Error: Command %s expects only %d argument(s)",
1469 # Command with some required arguments
1470 if min_count is not None and len(args) < min_count:
1471 ToStderr("Error: Command %s expects at least %d argument(s)",
1478 def SplitNodeOption(value):
1479 """Splits the value of a --node option.
1482 if value and ":" in value:
1483 return value.split(":", 1)
1485 return (value, None)
1488 def CalculateOSNames(os_name, os_variants):
1489 """Calculates all the names an OS can be called, according to its variants.
1491 @type os_name: string
1492 @param os_name: base name of the os
1493 @type os_variants: list or None
1494 @param os_variants: list of supported variants
1496 @return: list of valid names
1500 return ["%s+%s" % (os_name, v) for v in os_variants]
1505 def ParseFields(selected, default):
1506 """Parses the values of "--field"-like options.
1508 @type selected: string or None
1509 @param selected: User-selected options
1511 @param default: Default fields
1514 if selected is None:
1517 if selected.startswith("+"):
1518 return default + selected[1:].split(",")
1520 return selected.split(",")
1523 UsesRPC = rpc.RunWithRPC
1526 def AskUser(text, choices=None):
1527 """Ask the user a question.
1529 @param text: the question to ask
1531 @param choices: list with elements tuples (input_char, return_value,
1532 description); if not given, it will default to: [('y', True,
1533 'Perform the operation'), ('n', False, 'Do no do the operation')];
1534 note that the '?' char is reserved for help
1536 @return: one of the return values from the choices list; if input is
1537 not possible (i.e. not running with a tty, we return the last
1542 choices = [("y", True, "Perform the operation"),
1543 ("n", False, "Do not perform the operation")]
1544 if not choices or not isinstance(choices, list):
1545 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1546 for entry in choices:
1547 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1548 raise errors.ProgrammerError("Invalid choices element to AskUser")
1550 answer = choices[-1][1]
1552 for line in text.splitlines():
1553 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1554 text = "\n".join(new_text)
1556 f = file("/dev/tty", "a+")
1560 chars = [entry[0] for entry in choices]
1561 chars[-1] = "[%s]" % chars[-1]
1563 maps = dict([(entry[0], entry[1]) for entry in choices])
1567 f.write("/".join(chars))
1569 line = f.readline(2).strip().lower()
1574 for entry in choices:
1575 f.write(" %s - %s\n" % (entry[0], entry[2]))
1583 class JobSubmittedException(Exception):
1584 """Job was submitted, client should exit.
1586 This exception has one argument, the ID of the job that was
1587 submitted. The handler should print this ID.
1589 This is not an error, just a structured way to exit from clients.
1594 def SendJob(ops, cl=None):
1595 """Function to submit an opcode without waiting for the results.
1598 @param ops: list of opcodes
1599 @type cl: luxi.Client
1600 @param cl: the luxi client to use for communicating with the master;
1601 if None, a new client will be created
1607 job_id = cl.SubmitJob(ops)
1612 def GenericPollJob(job_id, cbs, report_cbs):
1613 """Generic job-polling function.
1615 @type job_id: number
1616 @param job_id: Job ID
1617 @type cbs: Instance of L{JobPollCbBase}
1618 @param cbs: Data callbacks
1619 @type report_cbs: Instance of L{JobPollReportCbBase}
1620 @param report_cbs: Reporting callbacks
1623 prev_job_info = None
1624 prev_logmsg_serial = None
1629 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1632 # job not found, go away!
1633 raise errors.JobLost("Job with id %s lost" % job_id)
1635 if result == constants.JOB_NOTCHANGED:
1636 report_cbs.ReportNotChanged(job_id, status)
1641 # Split result, a tuple of (field values, log entries)
1642 (job_info, log_entries) = result
1643 (status, ) = job_info
1646 for log_entry in log_entries:
1647 (serial, timestamp, log_type, message) = log_entry
1648 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1650 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1652 # TODO: Handle canceled and archived jobs
1653 elif status in (constants.JOB_STATUS_SUCCESS,
1654 constants.JOB_STATUS_ERROR,
1655 constants.JOB_STATUS_CANCELING,
1656 constants.JOB_STATUS_CANCELED):
1659 prev_job_info = job_info
1661 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1663 raise errors.JobLost("Job with id %s lost" % job_id)
1665 status, opstatus, result = jobs[0]
1667 if status == constants.JOB_STATUS_SUCCESS:
1670 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1671 raise errors.OpExecError("Job was canceled")
1674 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1675 if status == constants.OP_STATUS_SUCCESS:
1677 elif status == constants.OP_STATUS_ERROR:
1678 errors.MaybeRaise(msg)
1681 raise errors.OpExecError("partial failure (opcode %d): %s" %
1684 raise errors.OpExecError(str(msg))
1686 # default failure mode
1687 raise errors.OpExecError(result)
1690 class JobPollCbBase:
1691 """Base class for L{GenericPollJob} callbacks.
1695 """Initializes this class.
1699 def WaitForJobChangeOnce(self, job_id, fields,
1700 prev_job_info, prev_log_serial):
1701 """Waits for changes on a job.
1704 raise NotImplementedError()
1706 def QueryJobs(self, job_ids, fields):
1707 """Returns the selected fields for the selected job IDs.
1709 @type job_ids: list of numbers
1710 @param job_ids: Job IDs
1711 @type fields: list of strings
1712 @param fields: Fields
1715 raise NotImplementedError()
1718 class JobPollReportCbBase:
1719 """Base class for L{GenericPollJob} reporting callbacks.
1723 """Initializes this class.
1727 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1728 """Handles a log message.
1731 raise NotImplementedError()
1733 def ReportNotChanged(self, job_id, status):
1734 """Called for if a job hasn't changed in a while.
1736 @type job_id: number
1737 @param job_id: Job ID
1738 @type status: string or None
1739 @param status: Job status if available
1742 raise NotImplementedError()
1745 class _LuxiJobPollCb(JobPollCbBase):
1746 def __init__(self, cl):
1747 """Initializes this class.
1750 JobPollCbBase.__init__(self)
1753 def WaitForJobChangeOnce(self, job_id, fields,
1754 prev_job_info, prev_log_serial):
1755 """Waits for changes on a job.
1758 return self.cl.WaitForJobChangeOnce(job_id, fields,
1759 prev_job_info, prev_log_serial)
1761 def QueryJobs(self, job_ids, fields):
1762 """Returns the selected fields for the selected job IDs.
1765 return self.cl.QueryJobs(job_ids, fields)
1768 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1769 def __init__(self, feedback_fn):
1770 """Initializes this class.
1773 JobPollReportCbBase.__init__(self)
1775 self.feedback_fn = feedback_fn
1777 assert callable(feedback_fn)
1779 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1780 """Handles a log message.
1783 self.feedback_fn((timestamp, log_type, log_msg))
1785 def ReportNotChanged(self, job_id, status):
1786 """Called if a job hasn't changed in a while.
1792 class StdioJobPollReportCb(JobPollReportCbBase):
1794 """Initializes this class.
1797 JobPollReportCbBase.__init__(self)
1799 self.notified_queued = False
1800 self.notified_waitlock = False
1802 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1803 """Handles a log message.
1806 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1807 FormatLogMessage(log_type, log_msg))
1809 def ReportNotChanged(self, job_id, status):
1810 """Called if a job hasn't changed in a while.
1816 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1817 ToStderr("Job %s is waiting in queue", job_id)
1818 self.notified_queued = True
1820 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1821 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1822 self.notified_waitlock = True
1825 def FormatLogMessage(log_type, log_msg):
1826 """Formats a job message according to its type.
1829 if log_type != constants.ELOG_MESSAGE:
1830 log_msg = str(log_msg)
1832 return utils.SafeEncode(log_msg)
1835 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1836 """Function to poll for the result of a job.
1838 @type job_id: job identified
1839 @param job_id: the job to poll for results
1840 @type cl: luxi.Client
1841 @param cl: the luxi client to use for communicating with the master;
1842 if None, a new client will be created
1848 if reporter is None:
1850 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1852 reporter = StdioJobPollReportCb()
1854 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1856 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1859 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1860 """Legacy function to submit an opcode.
1862 This is just a simple wrapper over the construction of the processor
1863 instance. It should be extended to better handle feedback and
1864 interaction functions.
1870 SetGenericOpcodeOpts([op], opts)
1872 job_id = SendJob([op], cl=cl)
1874 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1877 return op_results[0]
1880 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1881 """Wrapper around SubmitOpCode or SendJob.
1883 This function will decide, based on the 'opts' parameter, whether to
1884 submit and wait for the result of the opcode (and return it), or
1885 whether to just send the job and print its identifier. It is used in
1886 order to simplify the implementation of the '--submit' option.
1888 It will also process the opcodes if we're sending the via SendJob
1889 (otherwise SubmitOpCode does it).
1892 if opts and opts.submit_only:
1894 SetGenericOpcodeOpts(job, opts)
1895 job_id = SendJob(job, cl=cl)
1896 raise JobSubmittedException(job_id)
1898 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1901 def SetGenericOpcodeOpts(opcode_list, options):
1902 """Processor for generic options.
1904 This function updates the given opcodes based on generic command
1905 line options (like debug, dry-run, etc.).
1907 @param opcode_list: list of opcodes
1908 @param options: command line options or None
1909 @return: None (in-place modification)
1914 for op in opcode_list:
1915 op.debug_level = options.debug
1916 if hasattr(options, "dry_run"):
1917 op.dry_run = options.dry_run
1918 if getattr(options, "priority", None) is not None:
1919 op.priority = _PRIONAME_TO_VALUE[options.priority]
1923 # TODO: Cache object?
1925 client = luxi.Client()
1926 except luxi.NoMasterError:
1927 ss = ssconf.SimpleStore()
1929 # Try to read ssconf file
1932 except errors.ConfigurationError:
1933 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1934 " not part of a cluster")
1936 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1937 if master != myself:
1938 raise errors.OpPrereqError("This is not the master node, please connect"
1939 " to node '%s' and rerun the command" %
1945 def FormatError(err):
1946 """Return a formatted error message for a given error.
1948 This function takes an exception instance and returns a tuple
1949 consisting of two values: first, the recommended exit code, and
1950 second, a string describing the error message (not
1951 newline-terminated).
1957 if isinstance(err, errors.ConfigurationError):
1958 txt = "Corrupt configuration file: %s" % msg
1960 obuf.write(txt + "\n")
1961 obuf.write("Aborting.")
1963 elif isinstance(err, errors.HooksAbort):
1964 obuf.write("Failure: hooks execution failed:\n")
1965 for node, script, out in err.args[0]:
1967 obuf.write(" node: %s, script: %s, output: %s\n" %
1968 (node, script, out))
1970 obuf.write(" node: %s, script: %s (no output)\n" %
1972 elif isinstance(err, errors.HooksFailure):
1973 obuf.write("Failure: hooks general failure: %s" % msg)
1974 elif isinstance(err, errors.ResolverError):
1975 this_host = netutils.Hostname.GetSysName()
1976 if err.args[0] == this_host:
1977 msg = "Failure: can't resolve my own hostname ('%s')"
1979 msg = "Failure: can't resolve hostname '%s'"
1980 obuf.write(msg % err.args[0])
1981 elif isinstance(err, errors.OpPrereqError):
1982 if len(err.args) == 2:
1983 obuf.write("Failure: prerequisites not met for this"
1984 " operation:\nerror type: %s, error details:\n%s" %
1985 (err.args[1], err.args[0]))
1987 obuf.write("Failure: prerequisites not met for this"
1988 " operation:\n%s" % msg)
1989 elif isinstance(err, errors.OpExecError):
1990 obuf.write("Failure: command execution error:\n%s" % msg)
1991 elif isinstance(err, errors.TagError):
1992 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1993 elif isinstance(err, errors.JobQueueDrainError):
1994 obuf.write("Failure: the job queue is marked for drain and doesn't"
1995 " accept new requests\n")
1996 elif isinstance(err, errors.JobQueueFull):
1997 obuf.write("Failure: the job queue is full and doesn't accept new"
1998 " job submissions until old jobs are archived\n")
1999 elif isinstance(err, errors.TypeEnforcementError):
2000 obuf.write("Parameter Error: %s" % msg)
2001 elif isinstance(err, errors.ParameterError):
2002 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2003 elif isinstance(err, luxi.NoMasterError):
2004 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2005 " and listening for connections?")
2006 elif isinstance(err, luxi.TimeoutError):
2007 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2008 " been submitted and will continue to run even if the call"
2009 " timed out. Useful commands in this situation are \"gnt-job"
2010 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2012 elif isinstance(err, luxi.PermissionError):
2013 obuf.write("It seems you don't have permissions to connect to the"
2014 " master daemon.\nPlease retry as a different user.")
2015 elif isinstance(err, luxi.ProtocolError):
2016 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2018 elif isinstance(err, errors.JobLost):
2019 obuf.write("Error checking job status: %s" % msg)
2020 elif isinstance(err, errors.QueryFilterParseError):
2021 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2022 obuf.write("\n".join(err.GetDetails()))
2023 elif isinstance(err, errors.GenericError):
2024 obuf.write("Unhandled Ganeti error: %s" % msg)
2025 elif isinstance(err, JobSubmittedException):
2026 obuf.write("JobID: %s\n" % err.args[0])
2029 obuf.write("Unhandled exception: %s" % msg)
2030 return retcode, obuf.getvalue().rstrip("\n")
2033 def GenericMain(commands, override=None, aliases=None,
2034 env_override=frozenset()):
2035 """Generic main function for all the gnt-* commands.
2037 @param commands: a dictionary with a special structure, see the design doc
2038 for command line handling.
2039 @param override: if not None, we expect a dictionary with keys that will
2040 override command line options; this can be used to pass
2041 options from the scripts to generic functions
2042 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2043 @param env_override: list of environment names which are allowed to submit
2044 default args for commands
2047 # save the program name and the entire command line for later logging
2049 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2050 if len(sys.argv) >= 2:
2051 binary += " " + sys.argv[1]
2052 old_cmdline = " ".join(sys.argv[2:])
2056 binary = "<unknown program>"
2063 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2064 except errors.ParameterError, err:
2065 result, err_msg = FormatError(err)
2069 if func is None: # parse error
2072 if override is not None:
2073 for key, val in override.iteritems():
2074 setattr(options, key, val)
2076 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2077 stderr_logging=True)
2080 logging.info("run with arguments '%s'", old_cmdline)
2082 logging.info("run with no arguments")
2085 result = func(options, args)
2086 except (errors.GenericError, luxi.ProtocolError,
2087 JobSubmittedException), err:
2088 result, err_msg = FormatError(err)
2089 logging.exception("Error during command processing")
2091 except KeyboardInterrupt:
2092 result = constants.EXIT_FAILURE
2093 ToStderr("Aborted. Note that if the operation created any jobs, they"
2094 " might have been submitted and"
2095 " will continue to run in the background.")
2096 except IOError, err:
2097 if err.errno == errno.EPIPE:
2098 # our terminal went away, we'll exit
2099 sys.exit(constants.EXIT_FAILURE)
2106 def ParseNicOption(optvalue):
2107 """Parses the value of the --net option(s).
2111 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2112 except (TypeError, ValueError), err:
2113 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2115 nics = [{}] * nic_max
2116 for nidx, ndict in optvalue:
2119 if not isinstance(ndict, dict):
2120 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2121 " got %s" % (nidx, ndict))
2123 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2130 def GenericInstanceCreate(mode, opts, args):
2131 """Add an instance to the cluster via either creation or import.
2133 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2134 @param opts: the command line options selected by the user
2136 @param args: should contain only one element, the new instance name
2138 @return: the desired exit code
2143 (pnode, snode) = SplitNodeOption(opts.node)
2148 hypervisor, hvparams = opts.hypervisor
2151 nics = ParseNicOption(opts.nics)
2155 elif mode == constants.INSTANCE_CREATE:
2156 # default of one nic, all auto
2162 if opts.disk_template == constants.DT_DISKLESS:
2163 if opts.disks or opts.sd_size is not None:
2164 raise errors.OpPrereqError("Diskless instance but disk"
2165 " information passed")
2168 if (not opts.disks and not opts.sd_size
2169 and mode == constants.INSTANCE_CREATE):
2170 raise errors.OpPrereqError("No disk information specified")
2171 if opts.disks and opts.sd_size is not None:
2172 raise errors.OpPrereqError("Please use either the '--disk' or"
2174 if opts.sd_size is not None:
2175 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2179 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2180 except ValueError, err:
2181 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2182 disks = [{}] * disk_max
2185 for didx, ddict in opts.disks:
2187 if not isinstance(ddict, dict):
2188 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2189 raise errors.OpPrereqError(msg)
2190 elif constants.IDISK_SIZE in ddict:
2191 if constants.IDISK_ADOPT in ddict:
2192 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2193 " (disk %d)" % didx)
2195 ddict[constants.IDISK_SIZE] = \
2196 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2197 except ValueError, err:
2198 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2200 elif constants.IDISK_ADOPT in ddict:
2201 if mode == constants.INSTANCE_IMPORT:
2202 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2204 ddict[constants.IDISK_SIZE] = 0
2206 raise errors.OpPrereqError("Missing size or adoption source for"
2210 if opts.tags is not None:
2211 tags = opts.tags.split(",")
2215 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2216 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2218 if mode == constants.INSTANCE_CREATE:
2221 force_variant = opts.force_variant
2224 no_install = opts.no_install
2225 identify_defaults = False
2226 elif mode == constants.INSTANCE_IMPORT:
2229 force_variant = False
2230 src_node = opts.src_node
2231 src_path = opts.src_dir
2233 identify_defaults = opts.identify_defaults
2235 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2237 op = opcodes.OpInstanceCreate(instance_name=instance,
2239 disk_template=opts.disk_template,
2241 pnode=pnode, snode=snode,
2242 ip_check=opts.ip_check,
2243 name_check=opts.name_check,
2244 wait_for_sync=opts.wait_for_sync,
2245 file_storage_dir=opts.file_storage_dir,
2246 file_driver=opts.file_driver,
2247 iallocator=opts.iallocator,
2248 hypervisor=hypervisor,
2250 beparams=opts.beparams,
2251 osparams=opts.osparams,
2255 force_variant=force_variant,
2259 no_install=no_install,
2260 identify_defaults=identify_defaults)
2262 SubmitOrSend(op, opts)
2266 class _RunWhileClusterStoppedHelper:
2267 """Helper class for L{RunWhileClusterStopped} to simplify state management
2270 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2271 """Initializes this class.
2273 @type feedback_fn: callable
2274 @param feedback_fn: Feedback function
2275 @type cluster_name: string
2276 @param cluster_name: Cluster name
2277 @type master_node: string
2278 @param master_node Master node name
2279 @type online_nodes: list
2280 @param online_nodes: List of names of online nodes
2283 self.feedback_fn = feedback_fn
2284 self.cluster_name = cluster_name
2285 self.master_node = master_node
2286 self.online_nodes = online_nodes
2288 self.ssh = ssh.SshRunner(self.cluster_name)
2290 self.nonmaster_nodes = [name for name in online_nodes
2291 if name != master_node]
2293 assert self.master_node not in self.nonmaster_nodes
2295 def _RunCmd(self, node_name, cmd):
2296 """Runs a command on the local or a remote machine.
2298 @type node_name: string
2299 @param node_name: Machine name
2304 if node_name is None or node_name == self.master_node:
2305 # No need to use SSH
2306 result = utils.RunCmd(cmd)
2308 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2311 errmsg = ["Failed to run command %s" % result.cmd]
2313 errmsg.append("on node %s" % node_name)
2314 errmsg.append(": exitcode %s and error %s" %
2315 (result.exit_code, result.output))
2316 raise errors.OpExecError(" ".join(errmsg))
2318 def Call(self, fn, *args):
2319 """Call function while all daemons are stopped.
2322 @param fn: Function to be called
2325 # Pause watcher by acquiring an exclusive lock on watcher state file
2326 self.feedback_fn("Blocking watcher")
2327 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2329 # TODO: Currently, this just blocks. There's no timeout.
2330 # TODO: Should it be a shared lock?
2331 watcher_block.Exclusive(blocking=True)
2333 # Stop master daemons, so that no new jobs can come in and all running
2335 self.feedback_fn("Stopping master daemons")
2336 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2338 # Stop daemons on all nodes
2339 for node_name in self.online_nodes:
2340 self.feedback_fn("Stopping daemons on %s" % node_name)
2341 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2343 # All daemons are shut down now
2345 return fn(self, *args)
2346 except Exception, err:
2347 _, errmsg = FormatError(err)
2348 logging.exception("Caught exception")
2349 self.feedback_fn(errmsg)
2352 # Start cluster again, master node last
2353 for node_name in self.nonmaster_nodes + [self.master_node]:
2354 self.feedback_fn("Starting daemons on %s" % node_name)
2355 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2358 watcher_block.Close()
2361 def RunWhileClusterStopped(feedback_fn, fn, *args):
2362 """Calls a function while all cluster daemons are stopped.
2364 @type feedback_fn: callable
2365 @param feedback_fn: Feedback function
2367 @param fn: Function to be called when daemons are stopped
2370 feedback_fn("Gathering cluster information")
2372 # This ensures we're running on the master daemon
2375 (cluster_name, master_node) = \
2376 cl.QueryConfigValues(["cluster_name", "master_node"])
2378 online_nodes = GetOnlineNodes([], cl=cl)
2380 # Don't keep a reference to the client. The master daemon will go away.
2383 assert master_node in online_nodes
2385 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2386 online_nodes).Call(fn, *args)
2389 def GenerateTable(headers, fields, separator, data,
2390 numfields=None, unitfields=None,
2392 """Prints a table with headers and different fields.
2395 @param headers: dictionary mapping field names to headers for
2398 @param fields: the field names corresponding to each row in
2400 @param separator: the separator to be used; if this is None,
2401 the default 'smart' algorithm is used which computes optimal
2402 field width, otherwise just the separator is used between
2405 @param data: a list of lists, each sublist being one row to be output
2406 @type numfields: list
2407 @param numfields: a list with the fields that hold numeric
2408 values and thus should be right-aligned
2409 @type unitfields: list
2410 @param unitfields: a list with the fields that hold numeric
2411 values that should be formatted with the units field
2412 @type units: string or None
2413 @param units: the units we should use for formatting, or None for
2414 automatic choice (human-readable for non-separator usage, otherwise
2415 megabytes); this is a one-letter string
2424 if numfields is None:
2426 if unitfields is None:
2429 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2430 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2433 for field in fields:
2434 if headers and field not in headers:
2435 # TODO: handle better unknown fields (either revert to old
2436 # style of raising exception, or deal more intelligently with
2438 headers[field] = field
2439 if separator is not None:
2440 format_fields.append("%s")
2441 elif numfields.Matches(field):
2442 format_fields.append("%*s")
2444 format_fields.append("%-*s")
2446 if separator is None:
2447 mlens = [0 for name in fields]
2448 format_str = " ".join(format_fields)
2450 format_str = separator.replace("%", "%%").join(format_fields)
2455 for idx, val in enumerate(row):
2456 if unitfields.Matches(fields[idx]):
2459 except (TypeError, ValueError):
2462 val = row[idx] = utils.FormatUnit(val, units)
2463 val = row[idx] = str(val)
2464 if separator is None:
2465 mlens[idx] = max(mlens[idx], len(val))
2470 for idx, name in enumerate(fields):
2472 if separator is None:
2473 mlens[idx] = max(mlens[idx], len(hdr))
2474 args.append(mlens[idx])
2476 result.append(format_str % tuple(args))
2478 if separator is None:
2479 assert len(mlens) == len(fields)
2481 if fields and not numfields.Matches(fields[-1]):
2487 line = ["-" for _ in fields]
2488 for idx in range(len(fields)):
2489 if separator is None:
2490 args.append(mlens[idx])
2491 args.append(line[idx])
2492 result.append(format_str % tuple(args))
2497 def _FormatBool(value):
2498 """Formats a boolean value as a string.
2506 #: Default formatting for query results; (callback, align right)
2507 _DEFAULT_FORMAT_QUERY = {
2508 constants.QFT_TEXT: (str, False),
2509 constants.QFT_BOOL: (_FormatBool, False),
2510 constants.QFT_NUMBER: (str, True),
2511 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2512 constants.QFT_OTHER: (str, False),
2513 constants.QFT_UNKNOWN: (str, False),
2517 def _GetColumnFormatter(fdef, override, unit):
2518 """Returns formatting function for a field.
2520 @type fdef: L{objects.QueryFieldDefinition}
2521 @type override: dict
2522 @param override: Dictionary for overriding field formatting functions,
2523 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2525 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2526 @rtype: tuple; (callable, bool)
2527 @return: Returns the function to format a value (takes one parameter) and a
2528 boolean for aligning the value on the right-hand side
2531 fmt = override.get(fdef.name, None)
2535 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2537 if fdef.kind == constants.QFT_UNIT:
2538 # Can't keep this information in the static dictionary
2539 return (lambda value: utils.FormatUnit(value, unit), True)
2541 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2545 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2548 class _QueryColumnFormatter:
2549 """Callable class for formatting fields of a query.
2552 def __init__(self, fn, status_fn, verbose):
2553 """Initializes this class.
2556 @param fn: Formatting function
2557 @type status_fn: callable
2558 @param status_fn: Function to report fields' status
2559 @type verbose: boolean
2560 @param verbose: whether to use verbose field descriptions or not
2564 self._status_fn = status_fn
2565 self._verbose = verbose
2567 def __call__(self, data):
2568 """Returns a field's string representation.
2571 (status, value) = data
2574 self._status_fn(status)
2576 if status == constants.RS_NORMAL:
2577 return self._fn(value)
2579 assert value is None, \
2580 "Found value %r for abnormal status %s" % (value, status)
2582 return FormatResultError(status, self._verbose)
2585 def FormatResultError(status, verbose):
2586 """Formats result status other than L{constants.RS_NORMAL}.
2588 @param status: The result status
2589 @type verbose: boolean
2590 @param verbose: Whether to return the verbose text
2591 @return: Text of result status
2594 assert status != constants.RS_NORMAL, \
2595 "FormatResultError called with status equal to constants.RS_NORMAL"
2597 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2599 raise NotImplementedError("Unknown status %s" % status)
2606 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2607 header=False, verbose=False):
2608 """Formats data in L{objects.QueryResponse}.
2610 @type result: L{objects.QueryResponse}
2611 @param result: result of query operation
2613 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2614 see L{utils.text.FormatUnit}
2615 @type format_override: dict
2616 @param format_override: Dictionary for overriding field formatting functions,
2617 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2618 @type separator: string or None
2619 @param separator: String used to separate fields
2621 @param header: Whether to output header row
2622 @type verbose: boolean
2623 @param verbose: whether to use verbose field descriptions or not
2632 if format_override is None:
2633 format_override = {}
2635 stats = dict.fromkeys(constants.RS_ALL, 0)
2637 def _RecordStatus(status):
2642 for fdef in result.fields:
2643 assert fdef.title and fdef.name
2644 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2645 columns.append(TableColumn(fdef.title,
2646 _QueryColumnFormatter(fn, _RecordStatus,
2650 table = FormatTable(result.data, columns, header, separator)
2652 # Collect statistics
2653 assert len(stats) == len(constants.RS_ALL)
2654 assert compat.all(count >= 0 for count in stats.values())
2656 # Determine overall status. If there was no data, unknown fields must be
2657 # detected via the field definitions.
2658 if (stats[constants.RS_UNKNOWN] or
2659 (not result.data and _GetUnknownFields(result.fields))):
2661 elif compat.any(count > 0 for key, count in stats.items()
2662 if key != constants.RS_NORMAL):
2663 status = QR_INCOMPLETE
2667 return (status, table)
2670 def _GetUnknownFields(fdefs):
2671 """Returns list of unknown fields included in C{fdefs}.
2673 @type fdefs: list of L{objects.QueryFieldDefinition}
2676 return [fdef for fdef in fdefs
2677 if fdef.kind == constants.QFT_UNKNOWN]
2680 def _WarnUnknownFields(fdefs):
2681 """Prints a warning to stderr if a query included unknown fields.
2683 @type fdefs: list of L{objects.QueryFieldDefinition}
2686 unknown = _GetUnknownFields(fdefs)
2688 ToStderr("Warning: Queried for unknown fields %s",
2689 utils.CommaJoin(fdef.name for fdef in unknown))
2695 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2696 format_override=None, verbose=False, force_filter=False):
2697 """Generic implementation for listing all items of a resource.
2699 @param resource: One of L{constants.QR_VIA_LUXI}
2700 @type fields: list of strings
2701 @param fields: List of fields to query for
2702 @type names: list of strings
2703 @param names: Names of items to query for
2704 @type unit: string or None
2705 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2706 None for automatic choice (human-readable for non-separator usage,
2707 otherwise megabytes); this is a one-letter string
2708 @type separator: string or None
2709 @param separator: String used to separate fields
2711 @param header: Whether to show header row
2712 @type force_filter: bool
2713 @param force_filter: Whether to always treat names as filter
2714 @type format_override: dict
2715 @param format_override: Dictionary for overriding field formatting functions,
2716 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2717 @type verbose: boolean
2718 @param verbose: whether to use verbose field descriptions or not
2724 qfilter = qlang.MakeFilter(names, force_filter)
2729 response = cl.Query(resource, fields, qfilter)
2731 found_unknown = _WarnUnknownFields(response.fields)
2733 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2735 format_override=format_override,
2741 assert ((found_unknown and status == QR_UNKNOWN) or
2742 (not found_unknown and status != QR_UNKNOWN))
2744 if status == QR_UNKNOWN:
2745 return constants.EXIT_UNKNOWN_FIELD
2747 # TODO: Should the list command fail if not all data could be collected?
2748 return constants.EXIT_SUCCESS
2751 def GenericListFields(resource, fields, separator, header, cl=None):
2752 """Generic implementation for listing fields for a resource.
2754 @param resource: One of L{constants.QR_VIA_LUXI}
2755 @type fields: list of strings
2756 @param fields: List of fields to query for
2757 @type separator: string or None
2758 @param separator: String used to separate fields
2760 @param header: Whether to show header row
2769 response = cl.QueryFields(resource, fields)
2771 found_unknown = _WarnUnknownFields(response.fields)
2774 TableColumn("Name", str, False),
2775 TableColumn("Title", str, False),
2776 TableColumn("Description", str, False),
2779 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2781 for line in FormatTable(rows, columns, header, separator):
2785 return constants.EXIT_UNKNOWN_FIELD
2787 return constants.EXIT_SUCCESS
2791 """Describes a column for L{FormatTable}.
2794 def __init__(self, title, fn, align_right):
2795 """Initializes this class.
2798 @param title: Column title
2800 @param fn: Formatting function
2801 @type align_right: bool
2802 @param align_right: Whether to align values on the right-hand side
2807 self.align_right = align_right
2810 def _GetColFormatString(width, align_right):
2811 """Returns the format string for a field.
2819 return "%%%s%ss" % (sign, width)
2822 def FormatTable(rows, columns, header, separator):
2823 """Formats data as a table.
2825 @type rows: list of lists
2826 @param rows: Row data, one list per row
2827 @type columns: list of L{TableColumn}
2828 @param columns: Column descriptions
2830 @param header: Whether to show header row
2831 @type separator: string or None
2832 @param separator: String used to separate columns
2836 data = [[col.title for col in columns]]
2837 colwidth = [len(col.title) for col in columns]
2840 colwidth = [0 for _ in columns]
2844 assert len(row) == len(columns)
2846 formatted = [col.format(value) for value, col in zip(row, columns)]
2848 if separator is None:
2849 # Update column widths
2850 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2851 # Modifying a list's items while iterating is fine
2852 colwidth[idx] = max(oldwidth, len(value))
2854 data.append(formatted)
2856 if separator is not None:
2857 # Return early if a separator is used
2858 return [separator.join(row) for row in data]
2860 if columns and not columns[-1].align_right:
2861 # Avoid unnecessary spaces at end of line
2864 # Build format string
2865 fmt = " ".join([_GetColFormatString(width, col.align_right)
2866 for col, width in zip(columns, colwidth)])
2868 return [fmt % tuple(row) for row in data]
2871 def FormatTimestamp(ts):
2872 """Formats a given timestamp.
2875 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2878 @return: a string with the formatted timestamp
2881 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2884 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2887 def ParseTimespec(value):
2888 """Parse a time specification.
2890 The following suffixed will be recognized:
2898 Without any suffix, the value will be taken to be in seconds.
2903 raise errors.OpPrereqError("Empty time specification passed")
2911 if value[-1] not in suffix_map:
2914 except (TypeError, ValueError):
2915 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2917 multiplier = suffix_map[value[-1]]
2919 if not value: # no data left after stripping the suffix
2920 raise errors.OpPrereqError("Invalid time specification (only"
2923 value = int(value) * multiplier
2924 except (TypeError, ValueError):
2925 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2929 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2930 filter_master=False, nodegroup=None):
2931 """Returns the names of online nodes.
2933 This function will also log a warning on stderr with the names of
2936 @param nodes: if not empty, use only this subset of nodes (minus the
2938 @param cl: if not None, luxi client to use
2939 @type nowarn: boolean
2940 @param nowarn: by default, this function will output a note with the
2941 offline nodes that are skipped; if this parameter is True the
2942 note is not displayed
2943 @type secondary_ips: boolean
2944 @param secondary_ips: if True, return the secondary IPs instead of the
2945 names, useful for doing network traffic over the replication interface
2947 @type filter_master: boolean
2948 @param filter_master: if True, do not return the master node in the list
2949 (useful in coordination with secondary_ips where we cannot check our
2950 node name against the list)
2951 @type nodegroup: string
2952 @param nodegroup: If set, only return nodes in this node group
2961 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
2963 if nodegroup is not None:
2964 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2965 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2968 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2971 if len(qfilter) > 1:
2972 final_filter = [qlang.OP_AND] + qfilter
2974 assert len(qfilter) == 1
2975 final_filter = qfilter[0]
2979 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2981 def _IsOffline(row):
2982 (_, (_, offline), _) = row
2986 ((_, name), _, _) = row
2990 (_, _, (_, sip)) = row
2993 (offline, online) = compat.partition(result.data, _IsOffline)
2995 if offline and not nowarn:
2996 ToStderr("Note: skipping offline node(s): %s" %
2997 utils.CommaJoin(map(_GetName, offline)))
3004 return map(fn, online)
3007 def _ToStream(stream, txt, *args):
3008 """Write a message to a stream, bypassing the logging system
3010 @type stream: file object
3011 @param stream: the file to which we should write
3013 @param txt: the message
3019 stream.write(txt % args)
3024 except IOError, err:
3025 if err.errno == errno.EPIPE:
3026 # our terminal went away, we'll exit
3027 sys.exit(constants.EXIT_FAILURE)
3032 def ToStdout(txt, *args):
3033 """Write a message to stdout only, bypassing the logging system
3035 This is just a wrapper over _ToStream.
3038 @param txt: the message
3041 _ToStream(sys.stdout, txt, *args)
3044 def ToStderr(txt, *args):
3045 """Write a message to stderr only, bypassing the logging system
3047 This is just a wrapper over _ToStream.
3050 @param txt: the message
3053 _ToStream(sys.stderr, txt, *args)
3056 class JobExecutor(object):
3057 """Class which manages the submission and execution of multiple jobs.
3059 Note that instances of this class should not be reused between
3063 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3068 self.verbose = verbose
3071 self.feedback_fn = feedback_fn
3072 self._counter = itertools.count()
3075 def _IfName(name, fmt):
3076 """Helper function for formatting name.
3084 def QueueJob(self, name, *ops):
3085 """Record a job for later submit.
3088 @param name: a description of the job, will be used in WaitJobSet
3091 SetGenericOpcodeOpts(ops, self.opts)
3092 self.queue.append((self._counter.next(), name, ops))
3094 def AddJobId(self, name, status, job_id):
3095 """Adds a job ID to the internal queue.
3098 self.jobs.append((self._counter.next(), status, job_id, name))
3100 def SubmitPending(self, each=False):
3101 """Submit all pending jobs.
3106 for (_, _, ops) in self.queue:
3107 # SubmitJob will remove the success status, but raise an exception if
3108 # the submission fails, so we'll notice that anyway.
3109 results.append([True, self.cl.SubmitJob(ops)])
3111 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3112 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3113 self.jobs.append((idx, status, data, name))
3115 def _ChooseJob(self):
3116 """Choose a non-waiting/queued job to poll next.
3119 assert self.jobs, "_ChooseJob called with empty job list"
3121 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3125 for job_data, status in zip(self.jobs, result):
3126 if (isinstance(status, list) and status and
3127 status[0] in (constants.JOB_STATUS_QUEUED,
3128 constants.JOB_STATUS_WAITING,
3129 constants.JOB_STATUS_CANCELING)):
3130 # job is still present and waiting
3132 # good candidate found (either running job or lost job)
3133 self.jobs.remove(job_data)
3137 return self.jobs.pop(0)
3139 def GetResults(self):
3140 """Wait for and return the results of all jobs.
3143 @return: list of tuples (success, job results), in the same order
3144 as the submitted jobs; if a job has failed, instead of the result
3145 there will be the error message
3149 self.SubmitPending()
3152 ok_jobs = [row[2] for row in self.jobs if row[1]]
3154 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3156 # first, remove any non-submitted jobs
3157 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3158 for idx, _, jid, name in failures:
3159 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3160 results.append((idx, False, jid))
3163 (idx, _, jid, name) = self._ChooseJob()
3164 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3166 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3168 except errors.JobLost, err:
3169 _, job_result = FormatError(err)
3170 ToStderr("Job %s%s has been archived, cannot check its result",
3171 jid, self._IfName(name, " for %s"))
3173 except (errors.GenericError, luxi.ProtocolError), err:
3174 _, job_result = FormatError(err)
3176 # the error message will always be shown, verbose or not
3177 ToStderr("Job %s%s has failed: %s",
3178 jid, self._IfName(name, " for %s"), job_result)
3180 results.append((idx, success, job_result))
3182 # sort based on the index, then drop it
3184 results = [i[1:] for i in results]
3188 def WaitOrShow(self, wait):
3189 """Wait for job results or only print the job IDs.
3192 @param wait: whether to wait or not
3196 return self.GetResults()
3199 self.SubmitPending()
3200 for _, status, result, name in self.jobs:
3202 ToStdout("%s: %s", result, name)
3204 ToStderr("Failure for %s: %s", name, result)
3205 return [row[1:3] for row in self.jobs]
3208 def FormatParameterDict(buf, param_dict, actual, level=1):
3209 """Formats a parameter dictionary.
3211 @type buf: L{StringIO}
3212 @param buf: the buffer into which to write
3213 @type param_dict: dict
3214 @param param_dict: the own parameters
3216 @param actual: the current parameter set (including defaults)
3217 @param level: Level of indent
3220 indent = " " * level
3221 for key in sorted(actual):
3222 val = param_dict.get(key, "default (%s)" % actual[key])
3223 buf.write("%s- %s: %s\n" % (indent, key, val))
3226 def ConfirmOperation(names, list_type, text, extra=""):
3227 """Ask the user to confirm an operation on a list of list_type.
3229 This function is used to request confirmation for doing an operation
3230 on a given list of list_type.
3233 @param names: the list of names that we display when
3234 we ask for confirmation
3235 @type list_type: str
3236 @param list_type: Human readable name for elements in the list (e.g. nodes)
3238 @param text: the operation that the user should confirm
3240 @return: True or False depending on user's confirmation.
3244 msg = ("The %s will operate on %d %s.\n%s"
3245 "Do you want to continue?" % (text, count, list_type, extra))
3246 affected = (("\nAffected %s:\n" % list_type) +
3247 "\n".join([" %s" % name for name in names]))
3249 choices = [("y", True, "Yes, execute the %s" % text),
3250 ("n", False, "No, abort the %s" % text)]
3253 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3256 question = msg + affected
3258 choice = AskUser(question, choices)
3261 choice = AskUser(msg + affected, choices)