4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
137 "NOSSH_KEYCHECK_OPT",
151 "PREALLOC_WIPE_DISKS_OPT",
152 "PRIMARY_IP_VERSION_OPT",
158 "REMOVE_INSTANCE_OPT",
163 "SECONDARY_ONLY_OPT",
167 "SHUTDOWN_TIMEOUT_OPT",
174 "STARTUP_PAUSED_OPT",
183 "USE_EXTERNAL_MIP_SCRIPT",
188 # Generic functions for CLI programs
191 "GenericInstanceCreate",
197 "JobSubmittedException",
199 "RunWhileClusterStopped",
203 # Formatting functions
204 "ToStderr", "ToStdout",
207 "FormatParameterDict",
216 # command line options support infrastructure
217 "ARGS_MANY_INSTANCES",
236 "OPT_COMPL_INST_ADD_NODES",
237 "OPT_COMPL_MANY_NODES",
238 "OPT_COMPL_ONE_IALLOCATOR",
239 "OPT_COMPL_ONE_INSTANCE",
240 "OPT_COMPL_ONE_NODE",
241 "OPT_COMPL_ONE_NODEGROUP",
247 "COMMON_CREATE_OPTS",
253 #: Priorities (sorted)
255 ("low", constants.OP_PRIO_LOW),
256 ("normal", constants.OP_PRIO_NORMAL),
257 ("high", constants.OP_PRIO_HIGH),
260 #: Priority dictionary for easier lookup
261 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
262 # we migrate to Python 2.6
263 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
265 # Query result status for clients
268 QR_INCOMPLETE) = range(3)
270 #: Maximum batch size for ChooseJob
275 def __init__(self, min=0, max=None): # pylint: disable=W0622
280 return ("<%s min=%s max=%s>" %
281 (self.__class__.__name__, self.min, self.max))
284 class ArgSuggest(_Argument):
285 """Suggesting argument.
287 Value can be any of the ones passed to the constructor.
290 # pylint: disable=W0622
291 def __init__(self, min=0, max=None, choices=None):
292 _Argument.__init__(self, min=min, max=max)
293 self.choices = choices
296 return ("<%s min=%s max=%s choices=%r>" %
297 (self.__class__.__name__, self.min, self.max, self.choices))
300 class ArgChoice(ArgSuggest):
303 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
304 but value must be one of the choices.
309 class ArgUnknown(_Argument):
310 """Unknown argument to program (e.g. determined at runtime).
315 class ArgInstance(_Argument):
316 """Instances argument.
321 class ArgNode(_Argument):
327 class ArgGroup(_Argument):
328 """Node group argument.
333 class ArgJobId(_Argument):
339 class ArgFile(_Argument):
340 """File path argument.
345 class ArgCommand(_Argument):
351 class ArgHost(_Argument):
357 class ArgOs(_Argument):
364 ARGS_MANY_INSTANCES = [ArgInstance()]
365 ARGS_MANY_NODES = [ArgNode()]
366 ARGS_MANY_GROUPS = [ArgGroup()]
367 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
368 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
370 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
371 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
374 def _ExtractTagsObject(opts, args):
375 """Extract the tag type object.
377 Note that this function will modify its args parameter.
380 if not hasattr(opts, "tag_type"):
381 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
383 if kind == constants.TAG_CLUSTER:
385 elif kind in (constants.TAG_NODEGROUP,
387 constants.TAG_INSTANCE):
389 raise errors.OpPrereqError("no arguments passed to the command")
393 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
397 def _ExtendTags(opts, args):
398 """Extend the args if a source file has been given.
400 This function will extend the tags with the contents of the file
401 passed in the 'tags_source' attribute of the opts parameter. A file
402 named '-' will be replaced by stdin.
405 fname = opts.tags_source
411 new_fh = open(fname, "r")
414 # we don't use the nice 'new_data = [line.strip() for line in fh]'
415 # because of python bug 1633941
417 line = new_fh.readline()
420 new_data.append(line.strip())
423 args.extend(new_data)
426 def ListTags(opts, args):
427 """List the tags on a given object.
429 This is a generic implementation that knows how to deal with all
430 three cases of tag objects (cluster, node, instance). The opts
431 argument is expected to contain a tag_type field denoting what
432 object type we work on.
435 kind, name = _ExtractTagsObject(opts, args)
437 result = cl.QueryTags(kind, name)
438 result = list(result)
444 def AddTags(opts, args):
445 """Add tags on a given object.
447 This is a generic implementation that knows how to deal with all
448 three cases of tag objects (cluster, node, instance). The opts
449 argument is expected to contain a tag_type field denoting what
450 object type we work on.
453 kind, name = _ExtractTagsObject(opts, args)
454 _ExtendTags(opts, args)
456 raise errors.OpPrereqError("No tags to be added")
457 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
458 SubmitOpCode(op, opts=opts)
461 def RemoveTags(opts, args):
462 """Remove tags from a given object.
464 This is a generic implementation that knows how to deal with all
465 three cases of tag objects (cluster, node, instance). The opts
466 argument is expected to contain a tag_type field denoting what
467 object type we work on.
470 kind, name = _ExtractTagsObject(opts, args)
471 _ExtendTags(opts, args)
473 raise errors.OpPrereqError("No tags to be removed")
474 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
475 SubmitOpCode(op, opts=opts)
478 def check_unit(option, opt, value): # pylint: disable=W0613
479 """OptParsers custom converter for units.
483 return utils.ParseUnit(value)
484 except errors.UnitParseError, err:
485 raise OptionValueError("option %s: %s" % (opt, err))
488 def _SplitKeyVal(opt, data):
489 """Convert a KeyVal string into a dict.
491 This function will convert a key=val[,...] string into a dict. Empty
492 values will be converted specially: keys which have the prefix 'no_'
493 will have the value=False and the prefix stripped, the others will
497 @param opt: a string holding the option name for which we process the
498 data, used in building error messages
500 @param data: a string of the format key=val,key=val,...
502 @return: {key=val, key=val}
503 @raises errors.ParameterError: if there are duplicate keys
508 for elem in utils.UnescapeAndSplit(data, sep=","):
510 key, val = elem.split("=", 1)
512 if elem.startswith(NO_PREFIX):
513 key, val = elem[len(NO_PREFIX):], False
514 elif elem.startswith(UN_PREFIX):
515 key, val = elem[len(UN_PREFIX):], None
517 key, val = elem, True
519 raise errors.ParameterError("Duplicate key '%s' in option %s" %
525 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
526 """Custom parser for ident:key=val,key=val options.
528 This will store the parsed values as a tuple (ident, {key: val}). As such,
529 multiple uses of this option via action=append is possible.
533 ident, rest = value, ""
535 ident, rest = value.split(":", 1)
537 if ident.startswith(NO_PREFIX):
539 msg = "Cannot pass options when removing parameter groups: %s" % value
540 raise errors.ParameterError(msg)
541 retval = (ident[len(NO_PREFIX):], False)
542 elif ident.startswith(UN_PREFIX):
544 msg = "Cannot pass options when removing parameter groups: %s" % value
545 raise errors.ParameterError(msg)
546 retval = (ident[len(UN_PREFIX):], None)
548 kv_dict = _SplitKeyVal(opt, rest)
549 retval = (ident, kv_dict)
553 def check_key_val(option, opt, value): # pylint: disable=W0613
554 """Custom parser class for key=val,key=val options.
556 This will store the parsed values as a dict {key: val}.
559 return _SplitKeyVal(opt, value)
562 def check_bool(option, opt, value): # pylint: disable=W0613
563 """Custom parser for yes/no options.
565 This will store the parsed value as either True or False.
568 value = value.lower()
569 if value == constants.VALUE_FALSE or value == "no":
571 elif value == constants.VALUE_TRUE or value == "yes":
574 raise errors.ParameterError("Invalid boolean value '%s'" % value)
577 # completion_suggestion is normally a list. Using numeric values not evaluating
578 # to False for dynamic completion.
579 (OPT_COMPL_MANY_NODES,
581 OPT_COMPL_ONE_INSTANCE,
583 OPT_COMPL_ONE_IALLOCATOR,
584 OPT_COMPL_INST_ADD_NODES,
585 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
587 OPT_COMPL_ALL = frozenset([
588 OPT_COMPL_MANY_NODES,
590 OPT_COMPL_ONE_INSTANCE,
592 OPT_COMPL_ONE_IALLOCATOR,
593 OPT_COMPL_INST_ADD_NODES,
594 OPT_COMPL_ONE_NODEGROUP,
598 class CliOption(Option):
599 """Custom option class for optparse.
602 ATTRS = Option.ATTRS + [
603 "completion_suggest",
605 TYPES = Option.TYPES + (
611 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
612 TYPE_CHECKER["identkeyval"] = check_ident_key_val
613 TYPE_CHECKER["keyval"] = check_key_val
614 TYPE_CHECKER["unit"] = check_unit
615 TYPE_CHECKER["bool"] = check_bool
618 # optparse.py sets make_option, so we do it for our own option class, too
619 cli_option = CliOption
624 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
625 help="Increase debugging level")
627 NOHDR_OPT = cli_option("--no-headers", default=False,
628 action="store_true", dest="no_headers",
629 help="Don't display column headers")
631 SEP_OPT = cli_option("--separator", default=None,
632 action="store", dest="separator",
633 help=("Separator between output fields"
634 " (defaults to one space)"))
636 USEUNITS_OPT = cli_option("--units", default=None,
637 dest="units", choices=("h", "m", "g", "t"),
638 help="Specify units for output (one of h/m/g/t)")
640 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
641 type="string", metavar="FIELDS",
642 help="Comma separated list of output fields")
644 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
645 default=False, help="Force the operation")
647 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
648 default=False, help="Do not require confirmation")
650 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
651 action="store_true", default=False,
652 help=("Ignore offline nodes and do as much"
655 TAG_ADD_OPT = cli_option("--tags", dest="tags",
656 default=None, help="Comma-separated list of instance"
659 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
660 default=None, help="File with tag names")
662 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
663 default=False, action="store_true",
664 help=("Submit the job and return the job ID, but"
665 " don't wait for the job to finish"))
667 SYNC_OPT = cli_option("--sync", dest="do_locking",
668 default=False, action="store_true",
669 help=("Grab locks while doing the queries"
670 " in order to ensure more consistent results"))
672 DRY_RUN_OPT = cli_option("--dry-run", default=False,
674 help=("Do not execute the operation, just run the"
675 " check steps and verify it it could be"
678 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
680 help="Increase the verbosity of the operation")
682 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
683 action="store_true", dest="simulate_errors",
684 help="Debugging option that makes the operation"
685 " treat most runtime checks as failed")
687 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
688 default=True, action="store_false",
689 help="Don't wait for sync (DANGEROUS!)")
691 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
692 action="store_true", default=False,
693 help="Enable offline instance")
695 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
696 action="store_true", default=False,
697 help="Disable down instance")
699 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
700 help=("Custom disk setup (%s)" %
701 utils.CommaJoin(constants.DISK_TEMPLATES)),
702 default=None, metavar="TEMPL",
703 choices=list(constants.DISK_TEMPLATES))
705 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
706 help="Do not create any network cards for"
709 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
710 help="Relative path under default cluster-wide"
711 " file storage dir to store file-based disks",
712 default=None, metavar="<DIR>")
714 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
715 help="Driver to use for image files",
716 default="loop", metavar="<DRIVER>",
717 choices=list(constants.FILE_DRIVER))
719 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
720 help="Select nodes for the instance automatically"
721 " using the <NAME> iallocator plugin",
722 default=None, type="string",
723 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
725 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
727 help="Set the default instance allocator plugin",
728 default=None, type="string",
729 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
731 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
733 completion_suggest=OPT_COMPL_ONE_OS)
735 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
736 type="keyval", default={},
737 help="OS parameters")
739 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
740 action="store_true", default=False,
741 help="Force an unknown variant")
743 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
744 action="store_true", default=False,
745 help="Do not install the OS (will"
748 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
749 type="keyval", default={},
750 help="Backend parameters")
752 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
753 default={}, dest="hvparams",
754 help="Hypervisor parameters")
756 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
757 help="Disk template parameters, in the format"
758 " template:option=value,option=value,...",
759 type="identkeyval", action="append", default=[])
761 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
762 help="Hypervisor and hypervisor options, in the"
763 " format hypervisor:option=value,option=value,...",
764 default=None, type="identkeyval")
766 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
767 help="Hypervisor and hypervisor options, in the"
768 " format hypervisor:option=value,option=value,...",
769 default=[], action="append", type="identkeyval")
771 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
772 action="store_false",
773 help="Don't check that the instance's IP"
776 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
777 default=True, action="store_false",
778 help="Don't check that the instance's name"
781 NET_OPT = cli_option("--net",
782 help="NIC parameters", default=[],
783 dest="nics", action="append", type="identkeyval")
785 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
786 dest="disks", action="append", type="identkeyval")
788 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
789 help="Comma-separated list of disks"
790 " indices to act on (e.g. 0,2) (optional,"
791 " defaults to all disks)")
793 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
794 help="Enforces a single-disk configuration using the"
795 " given disk size, in MiB unless a suffix is used",
796 default=None, type="unit", metavar="<size>")
798 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
799 dest="ignore_consistency",
800 action="store_true", default=False,
801 help="Ignore the consistency of the disks on"
804 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
805 dest="allow_failover",
806 action="store_true", default=False,
807 help="If migration is not possible fallback to"
810 NONLIVE_OPT = cli_option("--non-live", dest="live",
811 default=True, action="store_false",
812 help="Do a non-live migration (this usually means"
813 " freeze the instance, save the state, transfer and"
814 " only then resume running on the secondary node)")
816 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
818 choices=list(constants.HT_MIGRATION_MODES),
819 help="Override default migration mode (choose"
820 " either live or non-live")
822 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
823 help="Target node and optional secondary node",
824 metavar="<pnode>[:<snode>]",
825 completion_suggest=OPT_COMPL_INST_ADD_NODES)
827 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
828 action="append", metavar="<node>",
829 help="Use only this node (can be used multiple"
830 " times, if not given defaults to all nodes)",
831 completion_suggest=OPT_COMPL_ONE_NODE)
833 NODEGROUP_OPT_NAME = "--node-group"
834 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
836 help="Node group (name or uuid)",
837 metavar="<nodegroup>",
838 default=None, type="string",
839 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
841 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
843 completion_suggest=OPT_COMPL_ONE_NODE)
845 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
846 action="store_false",
847 help="Don't start the instance after creation")
849 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
850 action="store_true", default=False,
851 help="Show command instead of executing it")
853 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
854 default=False, action="store_true",
855 help="Instead of performing the migration, try to"
856 " recover from a failed cleanup. This is safe"
857 " to run even if the instance is healthy, but it"
858 " will create extra replication traffic and "
859 " disrupt briefly the replication (like during the"
862 STATIC_OPT = cli_option("-s", "--static", dest="static",
863 action="store_true", default=False,
864 help="Only show configuration data, not runtime data")
866 ALL_OPT = cli_option("--all", dest="show_all",
867 default=False, action="store_true",
868 help="Show info on all instances on the cluster."
869 " This can take a long time to run, use wisely")
871 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
872 action="store_true", default=False,
873 help="Interactive OS reinstall, lists available"
874 " OS templates for selection")
876 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
877 action="store_true", default=False,
878 help="Remove the instance from the cluster"
879 " configuration even if there are failures"
880 " during the removal process")
882 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
883 dest="ignore_remove_failures",
884 action="store_true", default=False,
885 help="Remove the instance from the"
886 " cluster configuration even if there"
887 " are failures during the removal"
890 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
891 action="store_true", default=False,
892 help="Remove the instance from the cluster")
894 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
895 help="Specifies the new node for the instance",
896 metavar="NODE", default=None,
897 completion_suggest=OPT_COMPL_ONE_NODE)
899 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
900 help="Specifies the new secondary node",
901 metavar="NODE", default=None,
902 completion_suggest=OPT_COMPL_ONE_NODE)
904 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
905 default=False, action="store_true",
906 help="Replace the disk(s) on the primary"
907 " node (applies only to internally mirrored"
908 " disk templates, e.g. %s)" %
909 utils.CommaJoin(constants.DTS_INT_MIRROR))
911 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
912 default=False, action="store_true",
913 help="Replace the disk(s) on the secondary"
914 " node (applies only to internally mirrored"
915 " disk templates, e.g. %s)" %
916 utils.CommaJoin(constants.DTS_INT_MIRROR))
918 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
919 default=False, action="store_true",
920 help="Lock all nodes and auto-promote as needed"
923 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
924 default=False, action="store_true",
925 help="Automatically replace faulty disks"
926 " (applies only to internally mirrored"
927 " disk templates, e.g. %s)" %
928 utils.CommaJoin(constants.DTS_INT_MIRROR))
930 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
931 default=False, action="store_true",
932 help="Ignore current recorded size"
933 " (useful for forcing activation when"
934 " the recorded size is wrong)")
936 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
938 completion_suggest=OPT_COMPL_ONE_NODE)
940 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
943 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
944 help="Specify the secondary ip for the node",
945 metavar="ADDRESS", default=None)
947 READD_OPT = cli_option("--readd", dest="readd",
948 default=False, action="store_true",
949 help="Readd old node after replacing it")
951 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
952 default=True, action="store_false",
953 help="Disable SSH key fingerprint checking")
955 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
956 default=False, action="store_true",
957 help="Force the joining of a node")
959 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
960 type="bool", default=None, metavar=_YORNO,
961 help="Set the master_candidate flag on the node")
963 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
964 type="bool", default=None,
965 help=("Set the offline flag on the node"
966 " (cluster does not communicate with offline"
969 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
970 type="bool", default=None,
971 help=("Set the drained flag on the node"
972 " (excluded from allocation operations)"))
974 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
975 type="bool", default=None, metavar=_YORNO,
976 help="Set the master_capable flag on the node")
978 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
979 type="bool", default=None, metavar=_YORNO,
980 help="Set the vm_capable flag on the node")
982 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
983 type="bool", default=None, metavar=_YORNO,
984 help="Set the allocatable flag on a volume")
986 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
987 help="Disable support for lvm based instances"
989 action="store_false", default=True)
991 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
992 dest="enabled_hypervisors",
993 help="Comma-separated list of hypervisors",
994 type="string", default=None)
996 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
997 type="keyval", default={},
998 help="NIC parameters")
1000 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1001 dest="candidate_pool_size", type="int",
1002 help="Set the candidate pool size")
1004 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1005 help=("Enables LVM and specifies the volume group"
1006 " name (cluster-wide) for disk allocation"
1007 " [%s]" % constants.DEFAULT_VG),
1008 metavar="VG", default=None)
1010 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1011 help="Destroy cluster", action="store_true")
1013 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1014 help="Skip node agreement check (dangerous)",
1015 action="store_true", default=False)
1017 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1018 help="Specify the mac prefix for the instance IP"
1019 " addresses, in the format XX:XX:XX",
1023 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1024 help="Specify the node interface (cluster-wide)"
1025 " on which the master IP address will be added"
1026 " (cluster init default: %s)" %
1027 constants.DEFAULT_BRIDGE,
1031 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1032 help="Specify the netmask of the master IP",
1036 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1037 dest="use_external_mip_script",
1038 help="Specify whether to run a user-provided"
1039 " script for the master IP address turnup and"
1040 " turndown operations",
1041 type="bool", metavar=_YORNO, default=None)
1043 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1044 help="Specify the default directory (cluster-"
1045 "wide) for storing the file-based disks [%s]" %
1046 constants.DEFAULT_FILE_STORAGE_DIR,
1048 default=constants.DEFAULT_FILE_STORAGE_DIR)
1050 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1051 dest="shared_file_storage_dir",
1052 help="Specify the default directory (cluster-"
1053 "wide) for storing the shared file-based"
1055 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1056 metavar="SHAREDDIR",
1057 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1059 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1060 help="Don't modify /etc/hosts",
1061 action="store_false", default=True)
1063 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1064 help="Don't initialize SSH keys",
1065 action="store_false", default=True)
1067 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1068 help="Enable parseable error messages",
1069 action="store_true", default=False)
1071 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1072 help="Skip N+1 memory redundancy tests",
1073 action="store_true", default=False)
1075 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1076 help="Type of reboot: soft/hard/full",
1077 default=constants.INSTANCE_REBOOT_HARD,
1079 choices=list(constants.REBOOT_TYPES))
1081 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1082 dest="ignore_secondaries",
1083 default=False, action="store_true",
1084 help="Ignore errors from secondaries")
1086 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1087 action="store_false", default=True,
1088 help="Don't shutdown the instance (unsafe)")
1090 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1091 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1092 help="Maximum time to wait")
1094 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1095 dest="shutdown_timeout", type="int",
1096 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1097 help="Maximum time to wait for instance shutdown")
1099 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1101 help=("Number of seconds between repetions of the"
1104 EARLY_RELEASE_OPT = cli_option("--early-release",
1105 dest="early_release", default=False,
1106 action="store_true",
1107 help="Release the locks on the secondary"
1110 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1111 dest="new_cluster_cert",
1112 default=False, action="store_true",
1113 help="Generate a new cluster certificate")
1115 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1117 help="File containing new RAPI certificate")
1119 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1120 default=None, action="store_true",
1121 help=("Generate a new self-signed RAPI"
1124 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1126 help="File containing new SPICE certificate")
1128 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1130 help="File containing the certificate of the CA"
1131 " which signed the SPICE certificate")
1133 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1134 dest="new_spice_cert", default=None,
1135 action="store_true",
1136 help=("Generate a new self-signed SPICE"
1139 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1140 dest="new_confd_hmac_key",
1141 default=False, action="store_true",
1142 help=("Create a new HMAC key for %s" %
1145 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1146 dest="cluster_domain_secret",
1148 help=("Load new new cluster domain"
1149 " secret from file"))
1151 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1152 dest="new_cluster_domain_secret",
1153 default=False, action="store_true",
1154 help=("Create a new cluster domain"
1157 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1158 dest="use_replication_network",
1159 help="Whether to use the replication network"
1160 " for talking to the nodes",
1161 action="store_true", default=False)
1163 MAINTAIN_NODE_HEALTH_OPT = \
1164 cli_option("--maintain-node-health", dest="maintain_node_health",
1165 metavar=_YORNO, default=None, type="bool",
1166 help="Configure the cluster to automatically maintain node"
1167 " health, by shutting down unknown instances, shutting down"
1168 " unknown DRBD devices, etc.")
1170 IDENTIFY_DEFAULTS_OPT = \
1171 cli_option("--identify-defaults", dest="identify_defaults",
1172 default=False, action="store_true",
1173 help="Identify which saved instance parameters are equal to"
1174 " the current cluster defaults and set them as such, instead"
1175 " of marking them as overridden")
1177 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1178 action="store", dest="uid_pool",
1179 help=("A list of user-ids or user-id"
1180 " ranges separated by commas"))
1182 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1183 action="store", dest="add_uids",
1184 help=("A list of user-ids or user-id"
1185 " ranges separated by commas, to be"
1186 " added to the user-id pool"))
1188 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1189 action="store", dest="remove_uids",
1190 help=("A list of user-ids or user-id"
1191 " ranges separated by commas, to be"
1192 " removed from the user-id pool"))
1194 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1195 action="store", dest="reserved_lvs",
1196 help=("A comma-separated list of reserved"
1197 " logical volumes names, that will be"
1198 " ignored by cluster verify"))
1200 ROMAN_OPT = cli_option("--roman",
1201 dest="roman_integers", default=False,
1202 action="store_true",
1203 help="Use roman numbers for positive integers")
1205 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1206 action="store", default=None,
1207 help="Specifies usermode helper for DRBD")
1209 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1210 action="store_false", default=True,
1211 help="Disable support for DRBD")
1213 PRIMARY_IP_VERSION_OPT = \
1214 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1215 action="store", dest="primary_ip_version",
1216 metavar="%d|%d" % (constants.IP4_VERSION,
1217 constants.IP6_VERSION),
1218 help="Cluster-wide IP version for primary IP")
1220 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1221 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1222 choices=_PRIONAME_TO_VALUE.keys(),
1223 help="Priority for opcode processing")
1225 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1226 type="bool", default=None, metavar=_YORNO,
1227 help="Sets the hidden flag on the OS")
1229 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1230 type="bool", default=None, metavar=_YORNO,
1231 help="Sets the blacklisted flag on the OS")
1233 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1234 type="bool", metavar=_YORNO,
1235 dest="prealloc_wipe_disks",
1236 help=("Wipe disks prior to instance"
1239 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1240 type="keyval", default=None,
1241 help="Node parameters")
1243 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1244 action="store", metavar="POLICY", default=None,
1245 help="Allocation policy for the node group")
1247 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1248 type="bool", metavar=_YORNO,
1249 dest="node_powered",
1250 help="Specify if the SoR for node is powered")
1252 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1253 default=constants.OOB_TIMEOUT,
1254 help="Maximum time to wait for out-of-band helper")
1256 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1257 default=constants.OOB_POWER_DELAY,
1258 help="Time in seconds to wait between power-ons")
1260 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1261 action="store_true", default=False,
1262 help=("Whether command argument should be treated"
1265 NO_REMEMBER_OPT = cli_option("--no-remember",
1267 action="store_true", default=False,
1268 help="Perform but do not record the change"
1269 " in the configuration")
1271 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1272 default=False, action="store_true",
1273 help="Evacuate primary instances only")
1275 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1276 default=False, action="store_true",
1277 help="Evacuate secondary instances only"
1278 " (applies only to internally mirrored"
1279 " disk templates, e.g. %s)" %
1280 utils.CommaJoin(constants.DTS_INT_MIRROR))
1282 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1283 action="store_true", default=False,
1284 help="Pause instance at startup")
1286 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1287 help="Destination node group (name or uuid)",
1288 default=None, action="append",
1289 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1291 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1292 action="append", dest="ignore_errors",
1293 choices=list(constants.CV_ALL_ECODES_STRINGS),
1294 help="Error code to be ignored")
1297 #: Options provided by all commands
1298 COMMON_OPTS = [DEBUG_OPT]
1300 # common options for creating instances. add and import then add their own
1302 COMMON_CREATE_OPTS = [
1307 FILESTORE_DRIVER_OPT,
1325 def _ParseArgs(argv, commands, aliases, env_override):
1326 """Parser for the command line arguments.
1328 This function parses the arguments and returns the function which
1329 must be executed together with its (modified) arguments.
1331 @param argv: the command line
1332 @param commands: dictionary with special contents, see the design
1333 doc for cmdline handling
1334 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1335 @param env_override: list of env variables allowed for default args
1338 assert not (env_override - set(commands))
1341 binary = "<command>"
1343 binary = argv[0].split("/")[-1]
1345 if len(argv) > 1 and argv[1] == "--version":
1346 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1347 constants.RELEASE_VERSION)
1348 # Quit right away. That way we don't have to care about this special
1349 # argument. optparse.py does it the same.
1352 if len(argv) < 2 or not (argv[1] in commands or
1353 argv[1] in aliases):
1354 # let's do a nice thing
1355 sortedcmds = commands.keys()
1358 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1359 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1362 # compute the max line length for cmd + usage
1363 mlen = max([len(" %s" % cmd) for cmd in commands])
1364 mlen = min(60, mlen) # should not get here...
1366 # and format a nice command list
1367 ToStdout("Commands:")
1368 for cmd in sortedcmds:
1369 cmdstr = " %s" % (cmd,)
1370 help_text = commands[cmd][4]
1371 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1372 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1373 for line in help_lines:
1374 ToStdout("%-*s %s", mlen, "", line)
1378 return None, None, None
1380 # get command, unalias it, and look it up in commands
1384 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1387 if aliases[cmd] not in commands:
1388 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1389 " command '%s'" % (cmd, aliases[cmd]))
1393 if cmd in env_override:
1394 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1395 env_args = os.environ.get(args_env_name)
1397 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1399 func, args_def, parser_opts, usage, description = commands[cmd]
1400 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1401 description=description,
1402 formatter=TitledHelpFormatter(),
1403 usage="%%prog %s %s" % (cmd, usage))
1404 parser.disable_interspersed_args()
1405 options, args = parser.parse_args(args=argv[1:])
1407 if not _CheckArguments(cmd, args_def, args):
1408 return None, None, None
1410 return func, options, args
1413 def _CheckArguments(cmd, args_def, args):
1414 """Verifies the arguments using the argument definition.
1418 1. Abort with error if values specified by user but none expected.
1420 1. For each argument in definition
1422 1. Keep running count of minimum number of values (min_count)
1423 1. Keep running count of maximum number of values (max_count)
1424 1. If it has an unlimited number of values
1426 1. Abort with error if it's not the last argument in the definition
1428 1. If last argument has limited number of values
1430 1. Abort with error if number of values doesn't match or is too large
1432 1. Abort with error if user didn't pass enough values (min_count)
1435 if args and not args_def:
1436 ToStderr("Error: Command %s expects no arguments", cmd)
1443 last_idx = len(args_def) - 1
1445 for idx, arg in enumerate(args_def):
1446 if min_count is None:
1448 elif arg.min is not None:
1449 min_count += arg.min
1451 if max_count is None:
1453 elif arg.max is not None:
1454 max_count += arg.max
1457 check_max = (arg.max is not None)
1459 elif arg.max is None:
1460 raise errors.ProgrammerError("Only the last argument can have max=None")
1463 # Command with exact number of arguments
1464 if (min_count is not None and max_count is not None and
1465 min_count == max_count and len(args) != min_count):
1466 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1469 # Command with limited number of arguments
1470 if max_count is not None and len(args) > max_count:
1471 ToStderr("Error: Command %s expects only %d argument(s)",
1475 # Command with some required arguments
1476 if min_count is not None and len(args) < min_count:
1477 ToStderr("Error: Command %s expects at least %d argument(s)",
1484 def SplitNodeOption(value):
1485 """Splits the value of a --node option.
1488 if value and ":" in value:
1489 return value.split(":", 1)
1491 return (value, None)
1494 def CalculateOSNames(os_name, os_variants):
1495 """Calculates all the names an OS can be called, according to its variants.
1497 @type os_name: string
1498 @param os_name: base name of the os
1499 @type os_variants: list or None
1500 @param os_variants: list of supported variants
1502 @return: list of valid names
1506 return ["%s+%s" % (os_name, v) for v in os_variants]
1511 def ParseFields(selected, default):
1512 """Parses the values of "--field"-like options.
1514 @type selected: string or None
1515 @param selected: User-selected options
1517 @param default: Default fields
1520 if selected is None:
1523 if selected.startswith("+"):
1524 return default + selected[1:].split(",")
1526 return selected.split(",")
1529 UsesRPC = rpc.RunWithRPC
1532 def AskUser(text, choices=None):
1533 """Ask the user a question.
1535 @param text: the question to ask
1537 @param choices: list with elements tuples (input_char, return_value,
1538 description); if not given, it will default to: [('y', True,
1539 'Perform the operation'), ('n', False, 'Do no do the operation')];
1540 note that the '?' char is reserved for help
1542 @return: one of the return values from the choices list; if input is
1543 not possible (i.e. not running with a tty, we return the last
1548 choices = [("y", True, "Perform the operation"),
1549 ("n", False, "Do not perform the operation")]
1550 if not choices or not isinstance(choices, list):
1551 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1552 for entry in choices:
1553 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1554 raise errors.ProgrammerError("Invalid choices element to AskUser")
1556 answer = choices[-1][1]
1558 for line in text.splitlines():
1559 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1560 text = "\n".join(new_text)
1562 f = file("/dev/tty", "a+")
1566 chars = [entry[0] for entry in choices]
1567 chars[-1] = "[%s]" % chars[-1]
1569 maps = dict([(entry[0], entry[1]) for entry in choices])
1573 f.write("/".join(chars))
1575 line = f.readline(2).strip().lower()
1580 for entry in choices:
1581 f.write(" %s - %s\n" % (entry[0], entry[2]))
1589 class JobSubmittedException(Exception):
1590 """Job was submitted, client should exit.
1592 This exception has one argument, the ID of the job that was
1593 submitted. The handler should print this ID.
1595 This is not an error, just a structured way to exit from clients.
1600 def SendJob(ops, cl=None):
1601 """Function to submit an opcode without waiting for the results.
1604 @param ops: list of opcodes
1605 @type cl: luxi.Client
1606 @param cl: the luxi client to use for communicating with the master;
1607 if None, a new client will be created
1613 job_id = cl.SubmitJob(ops)
1618 def GenericPollJob(job_id, cbs, report_cbs):
1619 """Generic job-polling function.
1621 @type job_id: number
1622 @param job_id: Job ID
1623 @type cbs: Instance of L{JobPollCbBase}
1624 @param cbs: Data callbacks
1625 @type report_cbs: Instance of L{JobPollReportCbBase}
1626 @param report_cbs: Reporting callbacks
1629 prev_job_info = None
1630 prev_logmsg_serial = None
1635 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1638 # job not found, go away!
1639 raise errors.JobLost("Job with id %s lost" % job_id)
1641 if result == constants.JOB_NOTCHANGED:
1642 report_cbs.ReportNotChanged(job_id, status)
1647 # Split result, a tuple of (field values, log entries)
1648 (job_info, log_entries) = result
1649 (status, ) = job_info
1652 for log_entry in log_entries:
1653 (serial, timestamp, log_type, message) = log_entry
1654 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1656 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1658 # TODO: Handle canceled and archived jobs
1659 elif status in (constants.JOB_STATUS_SUCCESS,
1660 constants.JOB_STATUS_ERROR,
1661 constants.JOB_STATUS_CANCELING,
1662 constants.JOB_STATUS_CANCELED):
1665 prev_job_info = job_info
1667 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1669 raise errors.JobLost("Job with id %s lost" % job_id)
1671 status, opstatus, result = jobs[0]
1673 if status == constants.JOB_STATUS_SUCCESS:
1676 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1677 raise errors.OpExecError("Job was canceled")
1680 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1681 if status == constants.OP_STATUS_SUCCESS:
1683 elif status == constants.OP_STATUS_ERROR:
1684 errors.MaybeRaise(msg)
1687 raise errors.OpExecError("partial failure (opcode %d): %s" %
1690 raise errors.OpExecError(str(msg))
1692 # default failure mode
1693 raise errors.OpExecError(result)
1696 class JobPollCbBase:
1697 """Base class for L{GenericPollJob} callbacks.
1701 """Initializes this class.
1705 def WaitForJobChangeOnce(self, job_id, fields,
1706 prev_job_info, prev_log_serial):
1707 """Waits for changes on a job.
1710 raise NotImplementedError()
1712 def QueryJobs(self, job_ids, fields):
1713 """Returns the selected fields for the selected job IDs.
1715 @type job_ids: list of numbers
1716 @param job_ids: Job IDs
1717 @type fields: list of strings
1718 @param fields: Fields
1721 raise NotImplementedError()
1724 class JobPollReportCbBase:
1725 """Base class for L{GenericPollJob} reporting callbacks.
1729 """Initializes this class.
1733 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1734 """Handles a log message.
1737 raise NotImplementedError()
1739 def ReportNotChanged(self, job_id, status):
1740 """Called for if a job hasn't changed in a while.
1742 @type job_id: number
1743 @param job_id: Job ID
1744 @type status: string or None
1745 @param status: Job status if available
1748 raise NotImplementedError()
1751 class _LuxiJobPollCb(JobPollCbBase):
1752 def __init__(self, cl):
1753 """Initializes this class.
1756 JobPollCbBase.__init__(self)
1759 def WaitForJobChangeOnce(self, job_id, fields,
1760 prev_job_info, prev_log_serial):
1761 """Waits for changes on a job.
1764 return self.cl.WaitForJobChangeOnce(job_id, fields,
1765 prev_job_info, prev_log_serial)
1767 def QueryJobs(self, job_ids, fields):
1768 """Returns the selected fields for the selected job IDs.
1771 return self.cl.QueryJobs(job_ids, fields)
1774 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1775 def __init__(self, feedback_fn):
1776 """Initializes this class.
1779 JobPollReportCbBase.__init__(self)
1781 self.feedback_fn = feedback_fn
1783 assert callable(feedback_fn)
1785 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1786 """Handles a log message.
1789 self.feedback_fn((timestamp, log_type, log_msg))
1791 def ReportNotChanged(self, job_id, status):
1792 """Called if a job hasn't changed in a while.
1798 class StdioJobPollReportCb(JobPollReportCbBase):
1800 """Initializes this class.
1803 JobPollReportCbBase.__init__(self)
1805 self.notified_queued = False
1806 self.notified_waitlock = False
1808 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1809 """Handles a log message.
1812 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1813 FormatLogMessage(log_type, log_msg))
1815 def ReportNotChanged(self, job_id, status):
1816 """Called if a job hasn't changed in a while.
1822 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1823 ToStderr("Job %s is waiting in queue", job_id)
1824 self.notified_queued = True
1826 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1827 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1828 self.notified_waitlock = True
1831 def FormatLogMessage(log_type, log_msg):
1832 """Formats a job message according to its type.
1835 if log_type != constants.ELOG_MESSAGE:
1836 log_msg = str(log_msg)
1838 return utils.SafeEncode(log_msg)
1841 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1842 """Function to poll for the result of a job.
1844 @type job_id: job identified
1845 @param job_id: the job to poll for results
1846 @type cl: luxi.Client
1847 @param cl: the luxi client to use for communicating with the master;
1848 if None, a new client will be created
1854 if reporter is None:
1856 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1858 reporter = StdioJobPollReportCb()
1860 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1862 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1865 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1866 """Legacy function to submit an opcode.
1868 This is just a simple wrapper over the construction of the processor
1869 instance. It should be extended to better handle feedback and
1870 interaction functions.
1876 SetGenericOpcodeOpts([op], opts)
1878 job_id = SendJob([op], cl=cl)
1880 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1883 return op_results[0]
1886 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1887 """Wrapper around SubmitOpCode or SendJob.
1889 This function will decide, based on the 'opts' parameter, whether to
1890 submit and wait for the result of the opcode (and return it), or
1891 whether to just send the job and print its identifier. It is used in
1892 order to simplify the implementation of the '--submit' option.
1894 It will also process the opcodes if we're sending the via SendJob
1895 (otherwise SubmitOpCode does it).
1898 if opts and opts.submit_only:
1900 SetGenericOpcodeOpts(job, opts)
1901 job_id = SendJob(job, cl=cl)
1902 raise JobSubmittedException(job_id)
1904 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1907 def SetGenericOpcodeOpts(opcode_list, options):
1908 """Processor for generic options.
1910 This function updates the given opcodes based on generic command
1911 line options (like debug, dry-run, etc.).
1913 @param opcode_list: list of opcodes
1914 @param options: command line options or None
1915 @return: None (in-place modification)
1920 for op in opcode_list:
1921 op.debug_level = options.debug
1922 if hasattr(options, "dry_run"):
1923 op.dry_run = options.dry_run
1924 if getattr(options, "priority", None) is not None:
1925 op.priority = _PRIONAME_TO_VALUE[options.priority]
1929 # TODO: Cache object?
1931 client = luxi.Client()
1932 except luxi.NoMasterError:
1933 ss = ssconf.SimpleStore()
1935 # Try to read ssconf file
1938 except errors.ConfigurationError:
1939 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1940 " not part of a cluster")
1942 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1943 if master != myself:
1944 raise errors.OpPrereqError("This is not the master node, please connect"
1945 " to node '%s' and rerun the command" %
1951 def FormatError(err):
1952 """Return a formatted error message for a given error.
1954 This function takes an exception instance and returns a tuple
1955 consisting of two values: first, the recommended exit code, and
1956 second, a string describing the error message (not
1957 newline-terminated).
1963 if isinstance(err, errors.ConfigurationError):
1964 txt = "Corrupt configuration file: %s" % msg
1966 obuf.write(txt + "\n")
1967 obuf.write("Aborting.")
1969 elif isinstance(err, errors.HooksAbort):
1970 obuf.write("Failure: hooks execution failed:\n")
1971 for node, script, out in err.args[0]:
1973 obuf.write(" node: %s, script: %s, output: %s\n" %
1974 (node, script, out))
1976 obuf.write(" node: %s, script: %s (no output)\n" %
1978 elif isinstance(err, errors.HooksFailure):
1979 obuf.write("Failure: hooks general failure: %s" % msg)
1980 elif isinstance(err, errors.ResolverError):
1981 this_host = netutils.Hostname.GetSysName()
1982 if err.args[0] == this_host:
1983 msg = "Failure: can't resolve my own hostname ('%s')"
1985 msg = "Failure: can't resolve hostname '%s'"
1986 obuf.write(msg % err.args[0])
1987 elif isinstance(err, errors.OpPrereqError):
1988 if len(err.args) == 2:
1989 obuf.write("Failure: prerequisites not met for this"
1990 " operation:\nerror type: %s, error details:\n%s" %
1991 (err.args[1], err.args[0]))
1993 obuf.write("Failure: prerequisites not met for this"
1994 " operation:\n%s" % msg)
1995 elif isinstance(err, errors.OpExecError):
1996 obuf.write("Failure: command execution error:\n%s" % msg)
1997 elif isinstance(err, errors.TagError):
1998 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1999 elif isinstance(err, errors.JobQueueDrainError):
2000 obuf.write("Failure: the job queue is marked for drain and doesn't"
2001 " accept new requests\n")
2002 elif isinstance(err, errors.JobQueueFull):
2003 obuf.write("Failure: the job queue is full and doesn't accept new"
2004 " job submissions until old jobs are archived\n")
2005 elif isinstance(err, errors.TypeEnforcementError):
2006 obuf.write("Parameter Error: %s" % msg)
2007 elif isinstance(err, errors.ParameterError):
2008 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2009 elif isinstance(err, luxi.NoMasterError):
2010 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2011 " and listening for connections?")
2012 elif isinstance(err, luxi.TimeoutError):
2013 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2014 " been submitted and will continue to run even if the call"
2015 " timed out. Useful commands in this situation are \"gnt-job"
2016 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2018 elif isinstance(err, luxi.PermissionError):
2019 obuf.write("It seems you don't have permissions to connect to the"
2020 " master daemon.\nPlease retry as a different user.")
2021 elif isinstance(err, luxi.ProtocolError):
2022 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2024 elif isinstance(err, errors.JobLost):
2025 obuf.write("Error checking job status: %s" % msg)
2026 elif isinstance(err, errors.QueryFilterParseError):
2027 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2028 obuf.write("\n".join(err.GetDetails()))
2029 elif isinstance(err, errors.GenericError):
2030 obuf.write("Unhandled Ganeti error: %s" % msg)
2031 elif isinstance(err, JobSubmittedException):
2032 obuf.write("JobID: %s\n" % err.args[0])
2035 obuf.write("Unhandled exception: %s" % msg)
2036 return retcode, obuf.getvalue().rstrip("\n")
2039 def GenericMain(commands, override=None, aliases=None,
2040 env_override=frozenset()):
2041 """Generic main function for all the gnt-* commands.
2043 @param commands: a dictionary with a special structure, see the design doc
2044 for command line handling.
2045 @param override: if not None, we expect a dictionary with keys that will
2046 override command line options; this can be used to pass
2047 options from the scripts to generic functions
2048 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2049 @param env_override: list of environment names which are allowed to submit
2050 default args for commands
2053 # save the program name and the entire command line for later logging
2055 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2056 if len(sys.argv) >= 2:
2057 binary += " " + sys.argv[1]
2058 old_cmdline = " ".join(sys.argv[2:])
2062 binary = "<unknown program>"
2069 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2070 except errors.ParameterError, err:
2071 result, err_msg = FormatError(err)
2075 if func is None: # parse error
2078 if override is not None:
2079 for key, val in override.iteritems():
2080 setattr(options, key, val)
2082 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2083 stderr_logging=True)
2086 logging.info("run with arguments '%s'", old_cmdline)
2088 logging.info("run with no arguments")
2091 result = func(options, args)
2092 except (errors.GenericError, luxi.ProtocolError,
2093 JobSubmittedException), err:
2094 result, err_msg = FormatError(err)
2095 logging.exception("Error during command processing")
2097 except KeyboardInterrupt:
2098 result = constants.EXIT_FAILURE
2099 ToStderr("Aborted. Note that if the operation created any jobs, they"
2100 " might have been submitted and"
2101 " will continue to run in the background.")
2102 except IOError, err:
2103 if err.errno == errno.EPIPE:
2104 # our terminal went away, we'll exit
2105 sys.exit(constants.EXIT_FAILURE)
2112 def ParseNicOption(optvalue):
2113 """Parses the value of the --net option(s).
2117 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2118 except (TypeError, ValueError), err:
2119 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2121 nics = [{}] * nic_max
2122 for nidx, ndict in optvalue:
2125 if not isinstance(ndict, dict):
2126 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2127 " got %s" % (nidx, ndict))
2129 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2136 def GenericInstanceCreate(mode, opts, args):
2137 """Add an instance to the cluster via either creation or import.
2139 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2140 @param opts: the command line options selected by the user
2142 @param args: should contain only one element, the new instance name
2144 @return: the desired exit code
2149 (pnode, snode) = SplitNodeOption(opts.node)
2154 hypervisor, hvparams = opts.hypervisor
2157 nics = ParseNicOption(opts.nics)
2161 elif mode == constants.INSTANCE_CREATE:
2162 # default of one nic, all auto
2168 if opts.disk_template == constants.DT_DISKLESS:
2169 if opts.disks or opts.sd_size is not None:
2170 raise errors.OpPrereqError("Diskless instance but disk"
2171 " information passed")
2174 if (not opts.disks and not opts.sd_size
2175 and mode == constants.INSTANCE_CREATE):
2176 raise errors.OpPrereqError("No disk information specified")
2177 if opts.disks and opts.sd_size is not None:
2178 raise errors.OpPrereqError("Please use either the '--disk' or"
2180 if opts.sd_size is not None:
2181 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2185 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2186 except ValueError, err:
2187 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2188 disks = [{}] * disk_max
2191 for didx, ddict in opts.disks:
2193 if not isinstance(ddict, dict):
2194 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2195 raise errors.OpPrereqError(msg)
2196 elif constants.IDISK_SIZE in ddict:
2197 if constants.IDISK_ADOPT in ddict:
2198 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2199 " (disk %d)" % didx)
2201 ddict[constants.IDISK_SIZE] = \
2202 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2203 except ValueError, err:
2204 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2206 elif constants.IDISK_ADOPT in ddict:
2207 if mode == constants.INSTANCE_IMPORT:
2208 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2210 ddict[constants.IDISK_SIZE] = 0
2212 raise errors.OpPrereqError("Missing size or adoption source for"
2216 if opts.tags is not None:
2217 tags = opts.tags.split(",")
2221 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2222 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2224 if mode == constants.INSTANCE_CREATE:
2227 force_variant = opts.force_variant
2230 no_install = opts.no_install
2231 identify_defaults = False
2232 elif mode == constants.INSTANCE_IMPORT:
2235 force_variant = False
2236 src_node = opts.src_node
2237 src_path = opts.src_dir
2239 identify_defaults = opts.identify_defaults
2241 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2243 op = opcodes.OpInstanceCreate(instance_name=instance,
2245 disk_template=opts.disk_template,
2247 pnode=pnode, snode=snode,
2248 ip_check=opts.ip_check,
2249 name_check=opts.name_check,
2250 wait_for_sync=opts.wait_for_sync,
2251 file_storage_dir=opts.file_storage_dir,
2252 file_driver=opts.file_driver,
2253 iallocator=opts.iallocator,
2254 hypervisor=hypervisor,
2256 beparams=opts.beparams,
2257 osparams=opts.osparams,
2261 force_variant=force_variant,
2265 no_install=no_install,
2266 identify_defaults=identify_defaults)
2268 SubmitOrSend(op, opts)
2272 class _RunWhileClusterStoppedHelper:
2273 """Helper class for L{RunWhileClusterStopped} to simplify state management
2276 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2277 """Initializes this class.
2279 @type feedback_fn: callable
2280 @param feedback_fn: Feedback function
2281 @type cluster_name: string
2282 @param cluster_name: Cluster name
2283 @type master_node: string
2284 @param master_node Master node name
2285 @type online_nodes: list
2286 @param online_nodes: List of names of online nodes
2289 self.feedback_fn = feedback_fn
2290 self.cluster_name = cluster_name
2291 self.master_node = master_node
2292 self.online_nodes = online_nodes
2294 self.ssh = ssh.SshRunner(self.cluster_name)
2296 self.nonmaster_nodes = [name for name in online_nodes
2297 if name != master_node]
2299 assert self.master_node not in self.nonmaster_nodes
2301 def _RunCmd(self, node_name, cmd):
2302 """Runs a command on the local or a remote machine.
2304 @type node_name: string
2305 @param node_name: Machine name
2310 if node_name is None or node_name == self.master_node:
2311 # No need to use SSH
2312 result = utils.RunCmd(cmd)
2314 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2317 errmsg = ["Failed to run command %s" % result.cmd]
2319 errmsg.append("on node %s" % node_name)
2320 errmsg.append(": exitcode %s and error %s" %
2321 (result.exit_code, result.output))
2322 raise errors.OpExecError(" ".join(errmsg))
2324 def Call(self, fn, *args):
2325 """Call function while all daemons are stopped.
2328 @param fn: Function to be called
2331 # Pause watcher by acquiring an exclusive lock on watcher state file
2332 self.feedback_fn("Blocking watcher")
2333 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2335 # TODO: Currently, this just blocks. There's no timeout.
2336 # TODO: Should it be a shared lock?
2337 watcher_block.Exclusive(blocking=True)
2339 # Stop master daemons, so that no new jobs can come in and all running
2341 self.feedback_fn("Stopping master daemons")
2342 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2344 # Stop daemons on all nodes
2345 for node_name in self.online_nodes:
2346 self.feedback_fn("Stopping daemons on %s" % node_name)
2347 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2349 # All daemons are shut down now
2351 return fn(self, *args)
2352 except Exception, err:
2353 _, errmsg = FormatError(err)
2354 logging.exception("Caught exception")
2355 self.feedback_fn(errmsg)
2358 # Start cluster again, master node last
2359 for node_name in self.nonmaster_nodes + [self.master_node]:
2360 self.feedback_fn("Starting daemons on %s" % node_name)
2361 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2364 watcher_block.Close()
2367 def RunWhileClusterStopped(feedback_fn, fn, *args):
2368 """Calls a function while all cluster daemons are stopped.
2370 @type feedback_fn: callable
2371 @param feedback_fn: Feedback function
2373 @param fn: Function to be called when daemons are stopped
2376 feedback_fn("Gathering cluster information")
2378 # This ensures we're running on the master daemon
2381 (cluster_name, master_node) = \
2382 cl.QueryConfigValues(["cluster_name", "master_node"])
2384 online_nodes = GetOnlineNodes([], cl=cl)
2386 # Don't keep a reference to the client. The master daemon will go away.
2389 assert master_node in online_nodes
2391 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2392 online_nodes).Call(fn, *args)
2395 def GenerateTable(headers, fields, separator, data,
2396 numfields=None, unitfields=None,
2398 """Prints a table with headers and different fields.
2401 @param headers: dictionary mapping field names to headers for
2404 @param fields: the field names corresponding to each row in
2406 @param separator: the separator to be used; if this is None,
2407 the default 'smart' algorithm is used which computes optimal
2408 field width, otherwise just the separator is used between
2411 @param data: a list of lists, each sublist being one row to be output
2412 @type numfields: list
2413 @param numfields: a list with the fields that hold numeric
2414 values and thus should be right-aligned
2415 @type unitfields: list
2416 @param unitfields: a list with the fields that hold numeric
2417 values that should be formatted with the units field
2418 @type units: string or None
2419 @param units: the units we should use for formatting, or None for
2420 automatic choice (human-readable for non-separator usage, otherwise
2421 megabytes); this is a one-letter string
2430 if numfields is None:
2432 if unitfields is None:
2435 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2436 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2439 for field in fields:
2440 if headers and field not in headers:
2441 # TODO: handle better unknown fields (either revert to old
2442 # style of raising exception, or deal more intelligently with
2444 headers[field] = field
2445 if separator is not None:
2446 format_fields.append("%s")
2447 elif numfields.Matches(field):
2448 format_fields.append("%*s")
2450 format_fields.append("%-*s")
2452 if separator is None:
2453 mlens = [0 for name in fields]
2454 format_str = " ".join(format_fields)
2456 format_str = separator.replace("%", "%%").join(format_fields)
2461 for idx, val in enumerate(row):
2462 if unitfields.Matches(fields[idx]):
2465 except (TypeError, ValueError):
2468 val = row[idx] = utils.FormatUnit(val, units)
2469 val = row[idx] = str(val)
2470 if separator is None:
2471 mlens[idx] = max(mlens[idx], len(val))
2476 for idx, name in enumerate(fields):
2478 if separator is None:
2479 mlens[idx] = max(mlens[idx], len(hdr))
2480 args.append(mlens[idx])
2482 result.append(format_str % tuple(args))
2484 if separator is None:
2485 assert len(mlens) == len(fields)
2487 if fields and not numfields.Matches(fields[-1]):
2493 line = ["-" for _ in fields]
2494 for idx in range(len(fields)):
2495 if separator is None:
2496 args.append(mlens[idx])
2497 args.append(line[idx])
2498 result.append(format_str % tuple(args))
2503 def _FormatBool(value):
2504 """Formats a boolean value as a string.
2512 #: Default formatting for query results; (callback, align right)
2513 _DEFAULT_FORMAT_QUERY = {
2514 constants.QFT_TEXT: (str, False),
2515 constants.QFT_BOOL: (_FormatBool, False),
2516 constants.QFT_NUMBER: (str, True),
2517 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2518 constants.QFT_OTHER: (str, False),
2519 constants.QFT_UNKNOWN: (str, False),
2523 def _GetColumnFormatter(fdef, override, unit):
2524 """Returns formatting function for a field.
2526 @type fdef: L{objects.QueryFieldDefinition}
2527 @type override: dict
2528 @param override: Dictionary for overriding field formatting functions,
2529 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2531 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2532 @rtype: tuple; (callable, bool)
2533 @return: Returns the function to format a value (takes one parameter) and a
2534 boolean for aligning the value on the right-hand side
2537 fmt = override.get(fdef.name, None)
2541 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2543 if fdef.kind == constants.QFT_UNIT:
2544 # Can't keep this information in the static dictionary
2545 return (lambda value: utils.FormatUnit(value, unit), True)
2547 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2551 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2554 class _QueryColumnFormatter:
2555 """Callable class for formatting fields of a query.
2558 def __init__(self, fn, status_fn, verbose):
2559 """Initializes this class.
2562 @param fn: Formatting function
2563 @type status_fn: callable
2564 @param status_fn: Function to report fields' status
2565 @type verbose: boolean
2566 @param verbose: whether to use verbose field descriptions or not
2570 self._status_fn = status_fn
2571 self._verbose = verbose
2573 def __call__(self, data):
2574 """Returns a field's string representation.
2577 (status, value) = data
2580 self._status_fn(status)
2582 if status == constants.RS_NORMAL:
2583 return self._fn(value)
2585 assert value is None, \
2586 "Found value %r for abnormal status %s" % (value, status)
2588 return FormatResultError(status, self._verbose)
2591 def FormatResultError(status, verbose):
2592 """Formats result status other than L{constants.RS_NORMAL}.
2594 @param status: The result status
2595 @type verbose: boolean
2596 @param verbose: Whether to return the verbose text
2597 @return: Text of result status
2600 assert status != constants.RS_NORMAL, \
2601 "FormatResultError called with status equal to constants.RS_NORMAL"
2603 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2605 raise NotImplementedError("Unknown status %s" % status)
2612 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2613 header=False, verbose=False):
2614 """Formats data in L{objects.QueryResponse}.
2616 @type result: L{objects.QueryResponse}
2617 @param result: result of query operation
2619 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2620 see L{utils.text.FormatUnit}
2621 @type format_override: dict
2622 @param format_override: Dictionary for overriding field formatting functions,
2623 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2624 @type separator: string or None
2625 @param separator: String used to separate fields
2627 @param header: Whether to output header row
2628 @type verbose: boolean
2629 @param verbose: whether to use verbose field descriptions or not
2638 if format_override is None:
2639 format_override = {}
2641 stats = dict.fromkeys(constants.RS_ALL, 0)
2643 def _RecordStatus(status):
2648 for fdef in result.fields:
2649 assert fdef.title and fdef.name
2650 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2651 columns.append(TableColumn(fdef.title,
2652 _QueryColumnFormatter(fn, _RecordStatus,
2656 table = FormatTable(result.data, columns, header, separator)
2658 # Collect statistics
2659 assert len(stats) == len(constants.RS_ALL)
2660 assert compat.all(count >= 0 for count in stats.values())
2662 # Determine overall status. If there was no data, unknown fields must be
2663 # detected via the field definitions.
2664 if (stats[constants.RS_UNKNOWN] or
2665 (not result.data and _GetUnknownFields(result.fields))):
2667 elif compat.any(count > 0 for key, count in stats.items()
2668 if key != constants.RS_NORMAL):
2669 status = QR_INCOMPLETE
2673 return (status, table)
2676 def _GetUnknownFields(fdefs):
2677 """Returns list of unknown fields included in C{fdefs}.
2679 @type fdefs: list of L{objects.QueryFieldDefinition}
2682 return [fdef for fdef in fdefs
2683 if fdef.kind == constants.QFT_UNKNOWN]
2686 def _WarnUnknownFields(fdefs):
2687 """Prints a warning to stderr if a query included unknown fields.
2689 @type fdefs: list of L{objects.QueryFieldDefinition}
2692 unknown = _GetUnknownFields(fdefs)
2694 ToStderr("Warning: Queried for unknown fields %s",
2695 utils.CommaJoin(fdef.name for fdef in unknown))
2701 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2702 format_override=None, verbose=False, force_filter=False):
2703 """Generic implementation for listing all items of a resource.
2705 @param resource: One of L{constants.QR_VIA_LUXI}
2706 @type fields: list of strings
2707 @param fields: List of fields to query for
2708 @type names: list of strings
2709 @param names: Names of items to query for
2710 @type unit: string or None
2711 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2712 None for automatic choice (human-readable for non-separator usage,
2713 otherwise megabytes); this is a one-letter string
2714 @type separator: string or None
2715 @param separator: String used to separate fields
2717 @param header: Whether to show header row
2718 @type force_filter: bool
2719 @param force_filter: Whether to always treat names as filter
2720 @type format_override: dict
2721 @param format_override: Dictionary for overriding field formatting functions,
2722 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2723 @type verbose: boolean
2724 @param verbose: whether to use verbose field descriptions or not
2730 qfilter = qlang.MakeFilter(names, force_filter)
2735 response = cl.Query(resource, fields, qfilter)
2737 found_unknown = _WarnUnknownFields(response.fields)
2739 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2741 format_override=format_override,
2747 assert ((found_unknown and status == QR_UNKNOWN) or
2748 (not found_unknown and status != QR_UNKNOWN))
2750 if status == QR_UNKNOWN:
2751 return constants.EXIT_UNKNOWN_FIELD
2753 # TODO: Should the list command fail if not all data could be collected?
2754 return constants.EXIT_SUCCESS
2757 def GenericListFields(resource, fields, separator, header, cl=None):
2758 """Generic implementation for listing fields for a resource.
2760 @param resource: One of L{constants.QR_VIA_LUXI}
2761 @type fields: list of strings
2762 @param fields: List of fields to query for
2763 @type separator: string or None
2764 @param separator: String used to separate fields
2766 @param header: Whether to show header row
2775 response = cl.QueryFields(resource, fields)
2777 found_unknown = _WarnUnknownFields(response.fields)
2780 TableColumn("Name", str, False),
2781 TableColumn("Title", str, False),
2782 TableColumn("Description", str, False),
2785 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2787 for line in FormatTable(rows, columns, header, separator):
2791 return constants.EXIT_UNKNOWN_FIELD
2793 return constants.EXIT_SUCCESS
2797 """Describes a column for L{FormatTable}.
2800 def __init__(self, title, fn, align_right):
2801 """Initializes this class.
2804 @param title: Column title
2806 @param fn: Formatting function
2807 @type align_right: bool
2808 @param align_right: Whether to align values on the right-hand side
2813 self.align_right = align_right
2816 def _GetColFormatString(width, align_right):
2817 """Returns the format string for a field.
2825 return "%%%s%ss" % (sign, width)
2828 def FormatTable(rows, columns, header, separator):
2829 """Formats data as a table.
2831 @type rows: list of lists
2832 @param rows: Row data, one list per row
2833 @type columns: list of L{TableColumn}
2834 @param columns: Column descriptions
2836 @param header: Whether to show header row
2837 @type separator: string or None
2838 @param separator: String used to separate columns
2842 data = [[col.title for col in columns]]
2843 colwidth = [len(col.title) for col in columns]
2846 colwidth = [0 for _ in columns]
2850 assert len(row) == len(columns)
2852 formatted = [col.format(value) for value, col in zip(row, columns)]
2854 if separator is None:
2855 # Update column widths
2856 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2857 # Modifying a list's items while iterating is fine
2858 colwidth[idx] = max(oldwidth, len(value))
2860 data.append(formatted)
2862 if separator is not None:
2863 # Return early if a separator is used
2864 return [separator.join(row) for row in data]
2866 if columns and not columns[-1].align_right:
2867 # Avoid unnecessary spaces at end of line
2870 # Build format string
2871 fmt = " ".join([_GetColFormatString(width, col.align_right)
2872 for col, width in zip(columns, colwidth)])
2874 return [fmt % tuple(row) for row in data]
2877 def FormatTimestamp(ts):
2878 """Formats a given timestamp.
2881 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2884 @return: a string with the formatted timestamp
2887 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2890 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2893 def ParseTimespec(value):
2894 """Parse a time specification.
2896 The following suffixed will be recognized:
2904 Without any suffix, the value will be taken to be in seconds.
2909 raise errors.OpPrereqError("Empty time specification passed")
2917 if value[-1] not in suffix_map:
2920 except (TypeError, ValueError):
2921 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2923 multiplier = suffix_map[value[-1]]
2925 if not value: # no data left after stripping the suffix
2926 raise errors.OpPrereqError("Invalid time specification (only"
2929 value = int(value) * multiplier
2930 except (TypeError, ValueError):
2931 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2935 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2936 filter_master=False, nodegroup=None):
2937 """Returns the names of online nodes.
2939 This function will also log a warning on stderr with the names of
2942 @param nodes: if not empty, use only this subset of nodes (minus the
2944 @param cl: if not None, luxi client to use
2945 @type nowarn: boolean
2946 @param nowarn: by default, this function will output a note with the
2947 offline nodes that are skipped; if this parameter is True the
2948 note is not displayed
2949 @type secondary_ips: boolean
2950 @param secondary_ips: if True, return the secondary IPs instead of the
2951 names, useful for doing network traffic over the replication interface
2953 @type filter_master: boolean
2954 @param filter_master: if True, do not return the master node in the list
2955 (useful in coordination with secondary_ips where we cannot check our
2956 node name against the list)
2957 @type nodegroup: string
2958 @param nodegroup: If set, only return nodes in this node group
2967 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
2969 if nodegroup is not None:
2970 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2971 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2974 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2977 if len(qfilter) > 1:
2978 final_filter = [qlang.OP_AND] + qfilter
2980 assert len(qfilter) == 1
2981 final_filter = qfilter[0]
2985 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2987 def _IsOffline(row):
2988 (_, (_, offline), _) = row
2992 ((_, name), _, _) = row
2996 (_, _, (_, sip)) = row
2999 (offline, online) = compat.partition(result.data, _IsOffline)
3001 if offline and not nowarn:
3002 ToStderr("Note: skipping offline node(s): %s" %
3003 utils.CommaJoin(map(_GetName, offline)))
3010 return map(fn, online)
3013 def _ToStream(stream, txt, *args):
3014 """Write a message to a stream, bypassing the logging system
3016 @type stream: file object
3017 @param stream: the file to which we should write
3019 @param txt: the message
3025 stream.write(txt % args)
3030 except IOError, err:
3031 if err.errno == errno.EPIPE:
3032 # our terminal went away, we'll exit
3033 sys.exit(constants.EXIT_FAILURE)
3038 def ToStdout(txt, *args):
3039 """Write a message to stdout only, bypassing the logging system
3041 This is just a wrapper over _ToStream.
3044 @param txt: the message
3047 _ToStream(sys.stdout, txt, *args)
3050 def ToStderr(txt, *args):
3051 """Write a message to stderr only, bypassing the logging system
3053 This is just a wrapper over _ToStream.
3056 @param txt: the message
3059 _ToStream(sys.stderr, txt, *args)
3062 class JobExecutor(object):
3063 """Class which manages the submission and execution of multiple jobs.
3065 Note that instances of this class should not be reused between
3069 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3074 self.verbose = verbose
3077 self.feedback_fn = feedback_fn
3078 self._counter = itertools.count()
3081 def _IfName(name, fmt):
3082 """Helper function for formatting name.
3090 def QueueJob(self, name, *ops):
3091 """Record a job for later submit.
3094 @param name: a description of the job, will be used in WaitJobSet
3097 SetGenericOpcodeOpts(ops, self.opts)
3098 self.queue.append((self._counter.next(), name, ops))
3100 def AddJobId(self, name, status, job_id):
3101 """Adds a job ID to the internal queue.
3104 self.jobs.append((self._counter.next(), status, job_id, name))
3106 def SubmitPending(self, each=False):
3107 """Submit all pending jobs.
3112 for (_, _, ops) in self.queue:
3113 # SubmitJob will remove the success status, but raise an exception if
3114 # the submission fails, so we'll notice that anyway.
3115 results.append([True, self.cl.SubmitJob(ops)])
3117 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3118 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3119 self.jobs.append((idx, status, data, name))
3121 def _ChooseJob(self):
3122 """Choose a non-waiting/queued job to poll next.
3125 assert self.jobs, "_ChooseJob called with empty job list"
3127 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3131 for job_data, status in zip(self.jobs, result):
3132 if (isinstance(status, list) and status and
3133 status[0] in (constants.JOB_STATUS_QUEUED,
3134 constants.JOB_STATUS_WAITING,
3135 constants.JOB_STATUS_CANCELING)):
3136 # job is still present and waiting
3138 # good candidate found (either running job or lost job)
3139 self.jobs.remove(job_data)
3143 return self.jobs.pop(0)
3145 def GetResults(self):
3146 """Wait for and return the results of all jobs.
3149 @return: list of tuples (success, job results), in the same order
3150 as the submitted jobs; if a job has failed, instead of the result
3151 there will be the error message
3155 self.SubmitPending()
3158 ok_jobs = [row[2] for row in self.jobs if row[1]]
3160 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3162 # first, remove any non-submitted jobs
3163 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3164 for idx, _, jid, name in failures:
3165 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3166 results.append((idx, False, jid))
3169 (idx, _, jid, name) = self._ChooseJob()
3170 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3172 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3174 except errors.JobLost, err:
3175 _, job_result = FormatError(err)
3176 ToStderr("Job %s%s has been archived, cannot check its result",
3177 jid, self._IfName(name, " for %s"))
3179 except (errors.GenericError, luxi.ProtocolError), err:
3180 _, job_result = FormatError(err)
3182 # the error message will always be shown, verbose or not
3183 ToStderr("Job %s%s has failed: %s",
3184 jid, self._IfName(name, " for %s"), job_result)
3186 results.append((idx, success, job_result))
3188 # sort based on the index, then drop it
3190 results = [i[1:] for i in results]
3194 def WaitOrShow(self, wait):
3195 """Wait for job results or only print the job IDs.
3198 @param wait: whether to wait or not
3202 return self.GetResults()
3205 self.SubmitPending()
3206 for _, status, result, name in self.jobs:
3208 ToStdout("%s: %s", result, name)
3210 ToStderr("Failure for %s: %s", name, result)
3211 return [row[1:3] for row in self.jobs]
3214 def FormatParameterDict(buf, param_dict, actual, level=1):
3215 """Formats a parameter dictionary.
3217 @type buf: L{StringIO}
3218 @param buf: the buffer into which to write
3219 @type param_dict: dict
3220 @param param_dict: the own parameters
3222 @param actual: the current parameter set (including defaults)
3223 @param level: Level of indent
3226 indent = " " * level
3227 for key in sorted(actual):
3228 val = param_dict.get(key, "default (%s)" % actual[key])
3229 buf.write("%s- %s: %s\n" % (indent, key, val))
3232 def ConfirmOperation(names, list_type, text, extra=""):
3233 """Ask the user to confirm an operation on a list of list_type.
3235 This function is used to request confirmation for doing an operation
3236 on a given list of list_type.
3239 @param names: the list of names that we display when
3240 we ask for confirmation
3241 @type list_type: str
3242 @param list_type: Human readable name for elements in the list (e.g. nodes)
3244 @param text: the operation that the user should confirm
3246 @return: True or False depending on user's confirmation.
3250 msg = ("The %s will operate on %d %s.\n%s"
3251 "Do you want to continue?" % (text, count, list_type, extra))
3252 affected = (("\nAffected %s:\n" % list_type) +
3253 "\n".join([" %s" % name for name in names]))
3255 choices = [("y", True, "Yes, execute the %s" % text),
3256 ("n", False, "No, abort the %s" % text)]
3259 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3262 question = msg + affected
3264 choice = AskUser(question, choices)
3267 choice = AskUser(msg + affected, choices)