4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
32 from cStringIO import StringIO
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import opcodes
38 from ganeti import luxi
39 from ganeti import ssconf
40 from ganeti import rpc
41 from ganeti import ssh
42 from ganeti import compat
43 from ganeti import netutils
44 from ganeti import qlang
46 from optparse import (OptionParser, TitledHelpFormatter,
47 Option, OptionValueError)
51 # Command line options
64 "CLUSTER_DOMAIN_SECRET_OPT",
81 "FILESTORE_DRIVER_OPT",
87 "GLOBAL_SHARED_FILEDIR_OPT",
92 "DEFAULT_IALLOCATOR_OPT",
93 "IDENTIFY_DEFAULTS_OPT",
96 "IGNORE_FAILURES_OPT",
98 "IGNORE_REMOVE_FAILURES_OPT",
99 "IGNORE_SECONDARIES_OPT",
103 "MAINTAIN_NODE_HEALTH_OPT",
105 "MASTER_NETMASK_OPT",
107 "MIGRATION_MODE_OPT",
109 "NEW_CLUSTER_CERT_OPT",
110 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
111 "NEW_CONFD_HMAC_KEY_OPT",
114 "NEW_SPICE_CERT_OPT",
116 "NODE_FORCE_JOIN_OPT",
118 "NODE_PLACEMENT_OPT",
122 "NODRBD_STORAGE_OPT",
128 "NOMODIFY_ETCHOSTS_OPT",
129 "NOMODIFY_SSH_SETUP_OPT",
135 "NOSSH_KEYCHECK_OPT",
147 "PREALLOC_WIPE_DISKS_OPT",
148 "PRIMARY_IP_VERSION_OPT",
154 "REMOVE_INSTANCE_OPT",
159 "SECONDARY_ONLY_OPT",
163 "SHUTDOWN_TIMEOUT_OPT",
170 "STARTUP_PAUSED_OPT",
183 # Generic functions for CLI programs
186 "GenericInstanceCreate",
192 "JobSubmittedException",
194 "RunWhileClusterStopped",
198 # Formatting functions
199 "ToStderr", "ToStdout",
202 "FormatParameterDict",
211 # command line options support infrastructure
212 "ARGS_MANY_INSTANCES",
231 "OPT_COMPL_INST_ADD_NODES",
232 "OPT_COMPL_MANY_NODES",
233 "OPT_COMPL_ONE_IALLOCATOR",
234 "OPT_COMPL_ONE_INSTANCE",
235 "OPT_COMPL_ONE_NODE",
236 "OPT_COMPL_ONE_NODEGROUP",
242 "COMMON_CREATE_OPTS",
248 #: Priorities (sorted)
250 ("low", constants.OP_PRIO_LOW),
251 ("normal", constants.OP_PRIO_NORMAL),
252 ("high", constants.OP_PRIO_HIGH),
255 #: Priority dictionary for easier lookup
256 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
257 # we migrate to Python 2.6
258 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
260 # Query result status for clients
263 QR_INCOMPLETE) = range(3)
265 #: Maximum batch size for ChooseJob
270 def __init__(self, min=0, max=None): # pylint: disable=W0622
275 return ("<%s min=%s max=%s>" %
276 (self.__class__.__name__, self.min, self.max))
279 class ArgSuggest(_Argument):
280 """Suggesting argument.
282 Value can be any of the ones passed to the constructor.
285 # pylint: disable=W0622
286 def __init__(self, min=0, max=None, choices=None):
287 _Argument.__init__(self, min=min, max=max)
288 self.choices = choices
291 return ("<%s min=%s max=%s choices=%r>" %
292 (self.__class__.__name__, self.min, self.max, self.choices))
295 class ArgChoice(ArgSuggest):
298 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
299 but value must be one of the choices.
304 class ArgUnknown(_Argument):
305 """Unknown argument to program (e.g. determined at runtime).
310 class ArgInstance(_Argument):
311 """Instances argument.
316 class ArgNode(_Argument):
322 class ArgGroup(_Argument):
323 """Node group argument.
328 class ArgJobId(_Argument):
334 class ArgFile(_Argument):
335 """File path argument.
340 class ArgCommand(_Argument):
346 class ArgHost(_Argument):
352 class ArgOs(_Argument):
359 ARGS_MANY_INSTANCES = [ArgInstance()]
360 ARGS_MANY_NODES = [ArgNode()]
361 ARGS_MANY_GROUPS = [ArgGroup()]
362 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
363 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
365 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
366 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
369 def _ExtractTagsObject(opts, args):
370 """Extract the tag type object.
372 Note that this function will modify its args parameter.
375 if not hasattr(opts, "tag_type"):
376 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
378 if kind == constants.TAG_CLUSTER:
380 elif kind in (constants.TAG_NODEGROUP,
382 constants.TAG_INSTANCE):
384 raise errors.OpPrereqError("no arguments passed to the command")
388 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
392 def _ExtendTags(opts, args):
393 """Extend the args if a source file has been given.
395 This function will extend the tags with the contents of the file
396 passed in the 'tags_source' attribute of the opts parameter. A file
397 named '-' will be replaced by stdin.
400 fname = opts.tags_source
406 new_fh = open(fname, "r")
409 # we don't use the nice 'new_data = [line.strip() for line in fh]'
410 # because of python bug 1633941
412 line = new_fh.readline()
415 new_data.append(line.strip())
418 args.extend(new_data)
421 def ListTags(opts, args):
422 """List the tags on a given object.
424 This is a generic implementation that knows how to deal with all
425 three cases of tag objects (cluster, node, instance). The opts
426 argument is expected to contain a tag_type field denoting what
427 object type we work on.
430 kind, name = _ExtractTagsObject(opts, args)
432 result = cl.QueryTags(kind, name)
433 result = list(result)
439 def AddTags(opts, args):
440 """Add tags on a given object.
442 This is a generic implementation that knows how to deal with all
443 three cases of tag objects (cluster, node, instance). The opts
444 argument is expected to contain a tag_type field denoting what
445 object type we work on.
448 kind, name = _ExtractTagsObject(opts, args)
449 _ExtendTags(opts, args)
451 raise errors.OpPrereqError("No tags to be added")
452 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
453 SubmitOpCode(op, opts=opts)
456 def RemoveTags(opts, args):
457 """Remove tags from a given object.
459 This is a generic implementation that knows how to deal with all
460 three cases of tag objects (cluster, node, instance). The opts
461 argument is expected to contain a tag_type field denoting what
462 object type we work on.
465 kind, name = _ExtractTagsObject(opts, args)
466 _ExtendTags(opts, args)
468 raise errors.OpPrereqError("No tags to be removed")
469 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
470 SubmitOpCode(op, opts=opts)
473 def check_unit(option, opt, value): # pylint: disable=W0613
474 """OptParsers custom converter for units.
478 return utils.ParseUnit(value)
479 except errors.UnitParseError, err:
480 raise OptionValueError("option %s: %s" % (opt, err))
483 def _SplitKeyVal(opt, data):
484 """Convert a KeyVal string into a dict.
486 This function will convert a key=val[,...] string into a dict. Empty
487 values will be converted specially: keys which have the prefix 'no_'
488 will have the value=False and the prefix stripped, the others will
492 @param opt: a string holding the option name for which we process the
493 data, used in building error messages
495 @param data: a string of the format key=val,key=val,...
497 @return: {key=val, key=val}
498 @raises errors.ParameterError: if there are duplicate keys
503 for elem in utils.UnescapeAndSplit(data, sep=","):
505 key, val = elem.split("=", 1)
507 if elem.startswith(NO_PREFIX):
508 key, val = elem[len(NO_PREFIX):], False
509 elif elem.startswith(UN_PREFIX):
510 key, val = elem[len(UN_PREFIX):], None
512 key, val = elem, True
514 raise errors.ParameterError("Duplicate key '%s' in option %s" %
520 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
521 """Custom parser for ident:key=val,key=val options.
523 This will store the parsed values as a tuple (ident, {key: val}). As such,
524 multiple uses of this option via action=append is possible.
528 ident, rest = value, ""
530 ident, rest = value.split(":", 1)
532 if ident.startswith(NO_PREFIX):
534 msg = "Cannot pass options when removing parameter groups: %s" % value
535 raise errors.ParameterError(msg)
536 retval = (ident[len(NO_PREFIX):], False)
537 elif ident.startswith(UN_PREFIX):
539 msg = "Cannot pass options when removing parameter groups: %s" % value
540 raise errors.ParameterError(msg)
541 retval = (ident[len(UN_PREFIX):], None)
543 kv_dict = _SplitKeyVal(opt, rest)
544 retval = (ident, kv_dict)
548 def check_key_val(option, opt, value): # pylint: disable=W0613
549 """Custom parser class for key=val,key=val options.
551 This will store the parsed values as a dict {key: val}.
554 return _SplitKeyVal(opt, value)
557 def check_bool(option, opt, value): # pylint: disable=W0613
558 """Custom parser for yes/no options.
560 This will store the parsed value as either True or False.
563 value = value.lower()
564 if value == constants.VALUE_FALSE or value == "no":
566 elif value == constants.VALUE_TRUE or value == "yes":
569 raise errors.ParameterError("Invalid boolean value '%s'" % value)
572 # completion_suggestion is normally a list. Using numeric values not evaluating
573 # to False for dynamic completion.
574 (OPT_COMPL_MANY_NODES,
576 OPT_COMPL_ONE_INSTANCE,
578 OPT_COMPL_ONE_IALLOCATOR,
579 OPT_COMPL_INST_ADD_NODES,
580 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
582 OPT_COMPL_ALL = frozenset([
583 OPT_COMPL_MANY_NODES,
585 OPT_COMPL_ONE_INSTANCE,
587 OPT_COMPL_ONE_IALLOCATOR,
588 OPT_COMPL_INST_ADD_NODES,
589 OPT_COMPL_ONE_NODEGROUP,
593 class CliOption(Option):
594 """Custom option class for optparse.
597 ATTRS = Option.ATTRS + [
598 "completion_suggest",
600 TYPES = Option.TYPES + (
606 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
607 TYPE_CHECKER["identkeyval"] = check_ident_key_val
608 TYPE_CHECKER["keyval"] = check_key_val
609 TYPE_CHECKER["unit"] = check_unit
610 TYPE_CHECKER["bool"] = check_bool
613 # optparse.py sets make_option, so we do it for our own option class, too
614 cli_option = CliOption
619 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
620 help="Increase debugging level")
622 NOHDR_OPT = cli_option("--no-headers", default=False,
623 action="store_true", dest="no_headers",
624 help="Don't display column headers")
626 SEP_OPT = cli_option("--separator", default=None,
627 action="store", dest="separator",
628 help=("Separator between output fields"
629 " (defaults to one space)"))
631 USEUNITS_OPT = cli_option("--units", default=None,
632 dest="units", choices=("h", "m", "g", "t"),
633 help="Specify units for output (one of h/m/g/t)")
635 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
636 type="string", metavar="FIELDS",
637 help="Comma separated list of output fields")
639 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
640 default=False, help="Force the operation")
642 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
643 default=False, help="Do not require confirmation")
645 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
646 action="store_true", default=False,
647 help=("Ignore offline nodes and do as much"
650 TAG_ADD_OPT = cli_option("--tags", dest="tags",
651 default=None, help="Comma-separated list of instance"
654 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
655 default=None, help="File with tag names")
657 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
658 default=False, action="store_true",
659 help=("Submit the job and return the job ID, but"
660 " don't wait for the job to finish"))
662 SYNC_OPT = cli_option("--sync", dest="do_locking",
663 default=False, action="store_true",
664 help=("Grab locks while doing the queries"
665 " in order to ensure more consistent results"))
667 DRY_RUN_OPT = cli_option("--dry-run", default=False,
669 help=("Do not execute the operation, just run the"
670 " check steps and verify it it could be"
673 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
675 help="Increase the verbosity of the operation")
677 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
678 action="store_true", dest="simulate_errors",
679 help="Debugging option that makes the operation"
680 " treat most runtime checks as failed")
682 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
683 default=True, action="store_false",
684 help="Don't wait for sync (DANGEROUS!)")
686 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
687 help=("Custom disk setup (%s)" %
688 utils.CommaJoin(constants.DISK_TEMPLATES)),
689 default=None, metavar="TEMPL",
690 choices=list(constants.DISK_TEMPLATES))
692 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
693 help="Do not create any network cards for"
696 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
697 help="Relative path under default cluster-wide"
698 " file storage dir to store file-based disks",
699 default=None, metavar="<DIR>")
701 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
702 help="Driver to use for image files",
703 default="loop", metavar="<DRIVER>",
704 choices=list(constants.FILE_DRIVER))
706 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
707 help="Select nodes for the instance automatically"
708 " using the <NAME> iallocator plugin",
709 default=None, type="string",
710 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
712 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
714 help="Set the default instance allocator plugin",
715 default=None, type="string",
716 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
718 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
720 completion_suggest=OPT_COMPL_ONE_OS)
722 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
723 type="keyval", default={},
724 help="OS parameters")
726 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
727 action="store_true", default=False,
728 help="Force an unknown variant")
730 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
731 action="store_true", default=False,
732 help="Do not install the OS (will"
735 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
736 type="keyval", default={},
737 help="Backend parameters")
739 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
740 default={}, dest="hvparams",
741 help="Hypervisor parameters")
743 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
744 help="Hypervisor and hypervisor options, in the"
745 " format hypervisor:option=value,option=value,...",
746 default=None, type="identkeyval")
748 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
749 help="Hypervisor and hypervisor options, in the"
750 " format hypervisor:option=value,option=value,...",
751 default=[], action="append", type="identkeyval")
753 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
754 action="store_false",
755 help="Don't check that the instance's IP"
758 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
759 default=True, action="store_false",
760 help="Don't check that the instance's name"
763 NET_OPT = cli_option("--net",
764 help="NIC parameters", default=[],
765 dest="nics", action="append", type="identkeyval")
767 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
768 dest="disks", action="append", type="identkeyval")
770 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
771 help="Comma-separated list of disks"
772 " indices to act on (e.g. 0,2) (optional,"
773 " defaults to all disks)")
775 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
776 help="Enforces a single-disk configuration using the"
777 " given disk size, in MiB unless a suffix is used",
778 default=None, type="unit", metavar="<size>")
780 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
781 dest="ignore_consistency",
782 action="store_true", default=False,
783 help="Ignore the consistency of the disks on"
786 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
787 dest="allow_failover",
788 action="store_true", default=False,
789 help="If migration is not possible fallback to"
792 NONLIVE_OPT = cli_option("--non-live", dest="live",
793 default=True, action="store_false",
794 help="Do a non-live migration (this usually means"
795 " freeze the instance, save the state, transfer and"
796 " only then resume running on the secondary node)")
798 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
800 choices=list(constants.HT_MIGRATION_MODES),
801 help="Override default migration mode (choose"
802 " either live or non-live")
804 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
805 help="Target node and optional secondary node",
806 metavar="<pnode>[:<snode>]",
807 completion_suggest=OPT_COMPL_INST_ADD_NODES)
809 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
810 action="append", metavar="<node>",
811 help="Use only this node (can be used multiple"
812 " times, if not given defaults to all nodes)",
813 completion_suggest=OPT_COMPL_ONE_NODE)
815 NODEGROUP_OPT_NAME = "--node-group"
816 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
818 help="Node group (name or uuid)",
819 metavar="<nodegroup>",
820 default=None, type="string",
821 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
823 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
825 completion_suggest=OPT_COMPL_ONE_NODE)
827 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
828 action="store_false",
829 help="Don't start the instance after creation")
831 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
832 action="store_true", default=False,
833 help="Show command instead of executing it")
835 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
836 default=False, action="store_true",
837 help="Instead of performing the migration, try to"
838 " recover from a failed cleanup. This is safe"
839 " to run even if the instance is healthy, but it"
840 " will create extra replication traffic and "
841 " disrupt briefly the replication (like during the"
844 STATIC_OPT = cli_option("-s", "--static", dest="static",
845 action="store_true", default=False,
846 help="Only show configuration data, not runtime data")
848 ALL_OPT = cli_option("--all", dest="show_all",
849 default=False, action="store_true",
850 help="Show info on all instances on the cluster."
851 " This can take a long time to run, use wisely")
853 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
854 action="store_true", default=False,
855 help="Interactive OS reinstall, lists available"
856 " OS templates for selection")
858 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
859 action="store_true", default=False,
860 help="Remove the instance from the cluster"
861 " configuration even if there are failures"
862 " during the removal process")
864 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
865 dest="ignore_remove_failures",
866 action="store_true", default=False,
867 help="Remove the instance from the"
868 " cluster configuration even if there"
869 " are failures during the removal"
872 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
873 action="store_true", default=False,
874 help="Remove the instance from the cluster")
876 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
877 help="Specifies the new node for the instance",
878 metavar="NODE", default=None,
879 completion_suggest=OPT_COMPL_ONE_NODE)
881 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
882 help="Specifies the new secondary node",
883 metavar="NODE", default=None,
884 completion_suggest=OPT_COMPL_ONE_NODE)
886 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
887 default=False, action="store_true",
888 help="Replace the disk(s) on the primary"
889 " node (applies only to internally mirrored"
890 " disk templates, e.g. %s)" %
891 utils.CommaJoin(constants.DTS_INT_MIRROR))
893 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
894 default=False, action="store_true",
895 help="Replace the disk(s) on the secondary"
896 " node (applies only to internally mirrored"
897 " disk templates, e.g. %s)" %
898 utils.CommaJoin(constants.DTS_INT_MIRROR))
900 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
901 default=False, action="store_true",
902 help="Lock all nodes and auto-promote as needed"
905 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
906 default=False, action="store_true",
907 help="Automatically replace faulty disks"
908 " (applies only to internally mirrored"
909 " disk templates, e.g. %s)" %
910 utils.CommaJoin(constants.DTS_INT_MIRROR))
912 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
913 default=False, action="store_true",
914 help="Ignore current recorded size"
915 " (useful for forcing activation when"
916 " the recorded size is wrong)")
918 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
920 completion_suggest=OPT_COMPL_ONE_NODE)
922 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
925 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
926 help="Specify the secondary ip for the node",
927 metavar="ADDRESS", default=None)
929 READD_OPT = cli_option("--readd", dest="readd",
930 default=False, action="store_true",
931 help="Readd old node after replacing it")
933 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
934 default=True, action="store_false",
935 help="Disable SSH key fingerprint checking")
937 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
938 default=False, action="store_true",
939 help="Force the joining of a node")
941 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
942 type="bool", default=None, metavar=_YORNO,
943 help="Set the master_candidate flag on the node")
945 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
946 type="bool", default=None,
947 help=("Set the offline flag on the node"
948 " (cluster does not communicate with offline"
951 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
952 type="bool", default=None,
953 help=("Set the drained flag on the node"
954 " (excluded from allocation operations)"))
956 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
957 type="bool", default=None, metavar=_YORNO,
958 help="Set the master_capable flag on the node")
960 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
961 type="bool", default=None, metavar=_YORNO,
962 help="Set the vm_capable flag on the node")
964 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
965 type="bool", default=None, metavar=_YORNO,
966 help="Set the allocatable flag on a volume")
968 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
969 help="Disable support for lvm based instances"
971 action="store_false", default=True)
973 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
974 dest="enabled_hypervisors",
975 help="Comma-separated list of hypervisors",
976 type="string", default=None)
978 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
979 type="keyval", default={},
980 help="NIC parameters")
982 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
983 dest="candidate_pool_size", type="int",
984 help="Set the candidate pool size")
986 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
987 help=("Enables LVM and specifies the volume group"
988 " name (cluster-wide) for disk allocation"
989 " [%s]" % constants.DEFAULT_VG),
990 metavar="VG", default=None)
992 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
993 help="Destroy cluster", action="store_true")
995 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
996 help="Skip node agreement check (dangerous)",
997 action="store_true", default=False)
999 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1000 help="Specify the mac prefix for the instance IP"
1001 " addresses, in the format XX:XX:XX",
1005 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1006 help="Specify the node interface (cluster-wide)"
1007 " on which the master IP address will be added"
1008 " (cluster init default: %s)" %
1009 constants.DEFAULT_BRIDGE,
1013 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1014 help="Specify the netmask of the master IP",
1018 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1019 help="Specify the default directory (cluster-"
1020 "wide) for storing the file-based disks [%s]" %
1021 constants.DEFAULT_FILE_STORAGE_DIR,
1023 default=constants.DEFAULT_FILE_STORAGE_DIR)
1025 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1026 dest="shared_file_storage_dir",
1027 help="Specify the default directory (cluster-"
1028 "wide) for storing the shared file-based"
1030 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1031 metavar="SHAREDDIR",
1032 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1034 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1035 help="Don't modify /etc/hosts",
1036 action="store_false", default=True)
1038 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1039 help="Don't initialize SSH keys",
1040 action="store_false", default=True)
1042 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1043 help="Enable parseable error messages",
1044 action="store_true", default=False)
1046 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1047 help="Skip N+1 memory redundancy tests",
1048 action="store_true", default=False)
1050 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1051 help="Type of reboot: soft/hard/full",
1052 default=constants.INSTANCE_REBOOT_HARD,
1054 choices=list(constants.REBOOT_TYPES))
1056 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1057 dest="ignore_secondaries",
1058 default=False, action="store_true",
1059 help="Ignore errors from secondaries")
1061 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1062 action="store_false", default=True,
1063 help="Don't shutdown the instance (unsafe)")
1065 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1066 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1067 help="Maximum time to wait")
1069 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1070 dest="shutdown_timeout", type="int",
1071 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1072 help="Maximum time to wait for instance shutdown")
1074 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1076 help=("Number of seconds between repetions of the"
1079 EARLY_RELEASE_OPT = cli_option("--early-release",
1080 dest="early_release", default=False,
1081 action="store_true",
1082 help="Release the locks on the secondary"
1085 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1086 dest="new_cluster_cert",
1087 default=False, action="store_true",
1088 help="Generate a new cluster certificate")
1090 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1092 help="File containing new RAPI certificate")
1094 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1095 default=None, action="store_true",
1096 help=("Generate a new self-signed RAPI"
1099 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1101 help="File containing new SPICE certificate")
1103 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1105 help="File containing the certificate of the CA"
1106 " which signed the SPICE certificate")
1108 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1109 dest="new_spice_cert", default=None,
1110 action="store_true",
1111 help=("Generate a new self-signed SPICE"
1114 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1115 dest="new_confd_hmac_key",
1116 default=False, action="store_true",
1117 help=("Create a new HMAC key for %s" %
1120 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1121 dest="cluster_domain_secret",
1123 help=("Load new new cluster domain"
1124 " secret from file"))
1126 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1127 dest="new_cluster_domain_secret",
1128 default=False, action="store_true",
1129 help=("Create a new cluster domain"
1132 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1133 dest="use_replication_network",
1134 help="Whether to use the replication network"
1135 " for talking to the nodes",
1136 action="store_true", default=False)
1138 MAINTAIN_NODE_HEALTH_OPT = \
1139 cli_option("--maintain-node-health", dest="maintain_node_health",
1140 metavar=_YORNO, default=None, type="bool",
1141 help="Configure the cluster to automatically maintain node"
1142 " health, by shutting down unknown instances, shutting down"
1143 " unknown DRBD devices, etc.")
1145 IDENTIFY_DEFAULTS_OPT = \
1146 cli_option("--identify-defaults", dest="identify_defaults",
1147 default=False, action="store_true",
1148 help="Identify which saved instance parameters are equal to"
1149 " the current cluster defaults and set them as such, instead"
1150 " of marking them as overridden")
1152 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1153 action="store", dest="uid_pool",
1154 help=("A list of user-ids or user-id"
1155 " ranges separated by commas"))
1157 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1158 action="store", dest="add_uids",
1159 help=("A list of user-ids or user-id"
1160 " ranges separated by commas, to be"
1161 " added to the user-id pool"))
1163 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1164 action="store", dest="remove_uids",
1165 help=("A list of user-ids or user-id"
1166 " ranges separated by commas, to be"
1167 " removed from the user-id pool"))
1169 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1170 action="store", dest="reserved_lvs",
1171 help=("A comma-separated list of reserved"
1172 " logical volumes names, that will be"
1173 " ignored by cluster verify"))
1175 ROMAN_OPT = cli_option("--roman",
1176 dest="roman_integers", default=False,
1177 action="store_true",
1178 help="Use roman numbers for positive integers")
1180 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1181 action="store", default=None,
1182 help="Specifies usermode helper for DRBD")
1184 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1185 action="store_false", default=True,
1186 help="Disable support for DRBD")
1188 PRIMARY_IP_VERSION_OPT = \
1189 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1190 action="store", dest="primary_ip_version",
1191 metavar="%d|%d" % (constants.IP4_VERSION,
1192 constants.IP6_VERSION),
1193 help="Cluster-wide IP version for primary IP")
1195 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1196 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1197 choices=_PRIONAME_TO_VALUE.keys(),
1198 help="Priority for opcode processing")
1200 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1201 type="bool", default=None, metavar=_YORNO,
1202 help="Sets the hidden flag on the OS")
1204 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1205 type="bool", default=None, metavar=_YORNO,
1206 help="Sets the blacklisted flag on the OS")
1208 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1209 type="bool", metavar=_YORNO,
1210 dest="prealloc_wipe_disks",
1211 help=("Wipe disks prior to instance"
1214 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1215 type="keyval", default=None,
1216 help="Node parameters")
1218 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1219 action="store", metavar="POLICY", default=None,
1220 help="Allocation policy for the node group")
1222 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1223 type="bool", metavar=_YORNO,
1224 dest="node_powered",
1225 help="Specify if the SoR for node is powered")
1227 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1228 default=constants.OOB_TIMEOUT,
1229 help="Maximum time to wait for out-of-band helper")
1231 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1232 default=constants.OOB_POWER_DELAY,
1233 help="Time in seconds to wait between power-ons")
1235 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1236 action="store_true", default=False,
1237 help=("Whether command argument should be treated"
1240 NO_REMEMBER_OPT = cli_option("--no-remember",
1242 action="store_true", default=False,
1243 help="Perform but do not record the change"
1244 " in the configuration")
1246 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1247 default=False, action="store_true",
1248 help="Evacuate primary instances only")
1250 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1251 default=False, action="store_true",
1252 help="Evacuate secondary instances only"
1253 " (applies only to internally mirrored"
1254 " disk templates, e.g. %s)" %
1255 utils.CommaJoin(constants.DTS_INT_MIRROR))
1257 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1258 action="store_true", default=False,
1259 help="Pause instance at startup")
1261 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1262 help="Destination node group (name or uuid)",
1263 default=None, action="append",
1264 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1266 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1267 action="append", dest="ignore_errors",
1268 choices=list(constants.CV_ALL_ECODES_STRINGS),
1269 help="Error code to be ignored")
1272 #: Options provided by all commands
1273 COMMON_OPTS = [DEBUG_OPT]
1275 # common options for creating instances. add and import then add their own
1277 COMMON_CREATE_OPTS = [
1282 FILESTORE_DRIVER_OPT,
1300 def _ParseArgs(argv, commands, aliases):
1301 """Parser for the command line arguments.
1303 This function parses the arguments and returns the function which
1304 must be executed together with its (modified) arguments.
1306 @param argv: the command line
1307 @param commands: dictionary with special contents, see the design
1308 doc for cmdline handling
1309 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1313 binary = "<command>"
1315 binary = argv[0].split("/")[-1]
1317 if len(argv) > 1 and argv[1] == "--version":
1318 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1319 constants.RELEASE_VERSION)
1320 # Quit right away. That way we don't have to care about this special
1321 # argument. optparse.py does it the same.
1324 if len(argv) < 2 or not (argv[1] in commands or
1325 argv[1] in aliases):
1326 # let's do a nice thing
1327 sortedcmds = commands.keys()
1330 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1331 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1334 # compute the max line length for cmd + usage
1335 mlen = max([len(" %s" % cmd) for cmd in commands])
1336 mlen = min(60, mlen) # should not get here...
1338 # and format a nice command list
1339 ToStdout("Commands:")
1340 for cmd in sortedcmds:
1341 cmdstr = " %s" % (cmd,)
1342 help_text = commands[cmd][4]
1343 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1344 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1345 for line in help_lines:
1346 ToStdout("%-*s %s", mlen, "", line)
1350 return None, None, None
1352 # get command, unalias it, and look it up in commands
1356 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1359 if aliases[cmd] not in commands:
1360 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1361 " command '%s'" % (cmd, aliases[cmd]))
1365 func, args_def, parser_opts, usage, description = commands[cmd]
1366 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1367 description=description,
1368 formatter=TitledHelpFormatter(),
1369 usage="%%prog %s %s" % (cmd, usage))
1370 parser.disable_interspersed_args()
1371 options, args = parser.parse_args()
1373 if not _CheckArguments(cmd, args_def, args):
1374 return None, None, None
1376 return func, options, args
1379 def _CheckArguments(cmd, args_def, args):
1380 """Verifies the arguments using the argument definition.
1384 1. Abort with error if values specified by user but none expected.
1386 1. For each argument in definition
1388 1. Keep running count of minimum number of values (min_count)
1389 1. Keep running count of maximum number of values (max_count)
1390 1. If it has an unlimited number of values
1392 1. Abort with error if it's not the last argument in the definition
1394 1. If last argument has limited number of values
1396 1. Abort with error if number of values doesn't match or is too large
1398 1. Abort with error if user didn't pass enough values (min_count)
1401 if args and not args_def:
1402 ToStderr("Error: Command %s expects no arguments", cmd)
1409 last_idx = len(args_def) - 1
1411 for idx, arg in enumerate(args_def):
1412 if min_count is None:
1414 elif arg.min is not None:
1415 min_count += arg.min
1417 if max_count is None:
1419 elif arg.max is not None:
1420 max_count += arg.max
1423 check_max = (arg.max is not None)
1425 elif arg.max is None:
1426 raise errors.ProgrammerError("Only the last argument can have max=None")
1429 # Command with exact number of arguments
1430 if (min_count is not None and max_count is not None and
1431 min_count == max_count and len(args) != min_count):
1432 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1435 # Command with limited number of arguments
1436 if max_count is not None and len(args) > max_count:
1437 ToStderr("Error: Command %s expects only %d argument(s)",
1441 # Command with some required arguments
1442 if min_count is not None and len(args) < min_count:
1443 ToStderr("Error: Command %s expects at least %d argument(s)",
1450 def SplitNodeOption(value):
1451 """Splits the value of a --node option.
1454 if value and ":" in value:
1455 return value.split(":", 1)
1457 return (value, None)
1460 def CalculateOSNames(os_name, os_variants):
1461 """Calculates all the names an OS can be called, according to its variants.
1463 @type os_name: string
1464 @param os_name: base name of the os
1465 @type os_variants: list or None
1466 @param os_variants: list of supported variants
1468 @return: list of valid names
1472 return ["%s+%s" % (os_name, v) for v in os_variants]
1477 def ParseFields(selected, default):
1478 """Parses the values of "--field"-like options.
1480 @type selected: string or None
1481 @param selected: User-selected options
1483 @param default: Default fields
1486 if selected is None:
1489 if selected.startswith("+"):
1490 return default + selected[1:].split(",")
1492 return selected.split(",")
1495 UsesRPC = rpc.RunWithRPC
1498 def AskUser(text, choices=None):
1499 """Ask the user a question.
1501 @param text: the question to ask
1503 @param choices: list with elements tuples (input_char, return_value,
1504 description); if not given, it will default to: [('y', True,
1505 'Perform the operation'), ('n', False, 'Do no do the operation')];
1506 note that the '?' char is reserved for help
1508 @return: one of the return values from the choices list; if input is
1509 not possible (i.e. not running with a tty, we return the last
1514 choices = [("y", True, "Perform the operation"),
1515 ("n", False, "Do not perform the operation")]
1516 if not choices or not isinstance(choices, list):
1517 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1518 for entry in choices:
1519 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1520 raise errors.ProgrammerError("Invalid choices element to AskUser")
1522 answer = choices[-1][1]
1524 for line in text.splitlines():
1525 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1526 text = "\n".join(new_text)
1528 f = file("/dev/tty", "a+")
1532 chars = [entry[0] for entry in choices]
1533 chars[-1] = "[%s]" % chars[-1]
1535 maps = dict([(entry[0], entry[1]) for entry in choices])
1539 f.write("/".join(chars))
1541 line = f.readline(2).strip().lower()
1546 for entry in choices:
1547 f.write(" %s - %s\n" % (entry[0], entry[2]))
1555 class JobSubmittedException(Exception):
1556 """Job was submitted, client should exit.
1558 This exception has one argument, the ID of the job that was
1559 submitted. The handler should print this ID.
1561 This is not an error, just a structured way to exit from clients.
1566 def SendJob(ops, cl=None):
1567 """Function to submit an opcode without waiting for the results.
1570 @param ops: list of opcodes
1571 @type cl: luxi.Client
1572 @param cl: the luxi client to use for communicating with the master;
1573 if None, a new client will be created
1579 job_id = cl.SubmitJob(ops)
1584 def GenericPollJob(job_id, cbs, report_cbs):
1585 """Generic job-polling function.
1587 @type job_id: number
1588 @param job_id: Job ID
1589 @type cbs: Instance of L{JobPollCbBase}
1590 @param cbs: Data callbacks
1591 @type report_cbs: Instance of L{JobPollReportCbBase}
1592 @param report_cbs: Reporting callbacks
1595 prev_job_info = None
1596 prev_logmsg_serial = None
1601 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1604 # job not found, go away!
1605 raise errors.JobLost("Job with id %s lost" % job_id)
1607 if result == constants.JOB_NOTCHANGED:
1608 report_cbs.ReportNotChanged(job_id, status)
1613 # Split result, a tuple of (field values, log entries)
1614 (job_info, log_entries) = result
1615 (status, ) = job_info
1618 for log_entry in log_entries:
1619 (serial, timestamp, log_type, message) = log_entry
1620 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1622 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1624 # TODO: Handle canceled and archived jobs
1625 elif status in (constants.JOB_STATUS_SUCCESS,
1626 constants.JOB_STATUS_ERROR,
1627 constants.JOB_STATUS_CANCELING,
1628 constants.JOB_STATUS_CANCELED):
1631 prev_job_info = job_info
1633 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1635 raise errors.JobLost("Job with id %s lost" % job_id)
1637 status, opstatus, result = jobs[0]
1639 if status == constants.JOB_STATUS_SUCCESS:
1642 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1643 raise errors.OpExecError("Job was canceled")
1646 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1647 if status == constants.OP_STATUS_SUCCESS:
1649 elif status == constants.OP_STATUS_ERROR:
1650 errors.MaybeRaise(msg)
1653 raise errors.OpExecError("partial failure (opcode %d): %s" %
1656 raise errors.OpExecError(str(msg))
1658 # default failure mode
1659 raise errors.OpExecError(result)
1662 class JobPollCbBase:
1663 """Base class for L{GenericPollJob} callbacks.
1667 """Initializes this class.
1671 def WaitForJobChangeOnce(self, job_id, fields,
1672 prev_job_info, prev_log_serial):
1673 """Waits for changes on a job.
1676 raise NotImplementedError()
1678 def QueryJobs(self, job_ids, fields):
1679 """Returns the selected fields for the selected job IDs.
1681 @type job_ids: list of numbers
1682 @param job_ids: Job IDs
1683 @type fields: list of strings
1684 @param fields: Fields
1687 raise NotImplementedError()
1690 class JobPollReportCbBase:
1691 """Base class for L{GenericPollJob} reporting callbacks.
1695 """Initializes this class.
1699 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1700 """Handles a log message.
1703 raise NotImplementedError()
1705 def ReportNotChanged(self, job_id, status):
1706 """Called for if a job hasn't changed in a while.
1708 @type job_id: number
1709 @param job_id: Job ID
1710 @type status: string or None
1711 @param status: Job status if available
1714 raise NotImplementedError()
1717 class _LuxiJobPollCb(JobPollCbBase):
1718 def __init__(self, cl):
1719 """Initializes this class.
1722 JobPollCbBase.__init__(self)
1725 def WaitForJobChangeOnce(self, job_id, fields,
1726 prev_job_info, prev_log_serial):
1727 """Waits for changes on a job.
1730 return self.cl.WaitForJobChangeOnce(job_id, fields,
1731 prev_job_info, prev_log_serial)
1733 def QueryJobs(self, job_ids, fields):
1734 """Returns the selected fields for the selected job IDs.
1737 return self.cl.QueryJobs(job_ids, fields)
1740 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1741 def __init__(self, feedback_fn):
1742 """Initializes this class.
1745 JobPollReportCbBase.__init__(self)
1747 self.feedback_fn = feedback_fn
1749 assert callable(feedback_fn)
1751 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1752 """Handles a log message.
1755 self.feedback_fn((timestamp, log_type, log_msg))
1757 def ReportNotChanged(self, job_id, status):
1758 """Called if a job hasn't changed in a while.
1764 class StdioJobPollReportCb(JobPollReportCbBase):
1766 """Initializes this class.
1769 JobPollReportCbBase.__init__(self)
1771 self.notified_queued = False
1772 self.notified_waitlock = False
1774 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1775 """Handles a log message.
1778 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1779 FormatLogMessage(log_type, log_msg))
1781 def ReportNotChanged(self, job_id, status):
1782 """Called if a job hasn't changed in a while.
1788 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1789 ToStderr("Job %s is waiting in queue", job_id)
1790 self.notified_queued = True
1792 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1793 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1794 self.notified_waitlock = True
1797 def FormatLogMessage(log_type, log_msg):
1798 """Formats a job message according to its type.
1801 if log_type != constants.ELOG_MESSAGE:
1802 log_msg = str(log_msg)
1804 return utils.SafeEncode(log_msg)
1807 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1808 """Function to poll for the result of a job.
1810 @type job_id: job identified
1811 @param job_id: the job to poll for results
1812 @type cl: luxi.Client
1813 @param cl: the luxi client to use for communicating with the master;
1814 if None, a new client will be created
1820 if reporter is None:
1822 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1824 reporter = StdioJobPollReportCb()
1826 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1828 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1831 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1832 """Legacy function to submit an opcode.
1834 This is just a simple wrapper over the construction of the processor
1835 instance. It should be extended to better handle feedback and
1836 interaction functions.
1842 SetGenericOpcodeOpts([op], opts)
1844 job_id = SendJob([op], cl=cl)
1846 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1849 return op_results[0]
1852 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1853 """Wrapper around SubmitOpCode or SendJob.
1855 This function will decide, based on the 'opts' parameter, whether to
1856 submit and wait for the result of the opcode (and return it), or
1857 whether to just send the job and print its identifier. It is used in
1858 order to simplify the implementation of the '--submit' option.
1860 It will also process the opcodes if we're sending the via SendJob
1861 (otherwise SubmitOpCode does it).
1864 if opts and opts.submit_only:
1866 SetGenericOpcodeOpts(job, opts)
1867 job_id = SendJob(job, cl=cl)
1868 raise JobSubmittedException(job_id)
1870 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1873 def SetGenericOpcodeOpts(opcode_list, options):
1874 """Processor for generic options.
1876 This function updates the given opcodes based on generic command
1877 line options (like debug, dry-run, etc.).
1879 @param opcode_list: list of opcodes
1880 @param options: command line options or None
1881 @return: None (in-place modification)
1886 for op in opcode_list:
1887 op.debug_level = options.debug
1888 if hasattr(options, "dry_run"):
1889 op.dry_run = options.dry_run
1890 if getattr(options, "priority", None) is not None:
1891 op.priority = _PRIONAME_TO_VALUE[options.priority]
1895 # TODO: Cache object?
1897 client = luxi.Client()
1898 except luxi.NoMasterError:
1899 ss = ssconf.SimpleStore()
1901 # Try to read ssconf file
1904 except errors.ConfigurationError:
1905 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1906 " not part of a cluster")
1908 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1909 if master != myself:
1910 raise errors.OpPrereqError("This is not the master node, please connect"
1911 " to node '%s' and rerun the command" %
1917 def FormatError(err):
1918 """Return a formatted error message for a given error.
1920 This function takes an exception instance and returns a tuple
1921 consisting of two values: first, the recommended exit code, and
1922 second, a string describing the error message (not
1923 newline-terminated).
1929 if isinstance(err, errors.ConfigurationError):
1930 txt = "Corrupt configuration file: %s" % msg
1932 obuf.write(txt + "\n")
1933 obuf.write("Aborting.")
1935 elif isinstance(err, errors.HooksAbort):
1936 obuf.write("Failure: hooks execution failed:\n")
1937 for node, script, out in err.args[0]:
1939 obuf.write(" node: %s, script: %s, output: %s\n" %
1940 (node, script, out))
1942 obuf.write(" node: %s, script: %s (no output)\n" %
1944 elif isinstance(err, errors.HooksFailure):
1945 obuf.write("Failure: hooks general failure: %s" % msg)
1946 elif isinstance(err, errors.ResolverError):
1947 this_host = netutils.Hostname.GetSysName()
1948 if err.args[0] == this_host:
1949 msg = "Failure: can't resolve my own hostname ('%s')"
1951 msg = "Failure: can't resolve hostname '%s'"
1952 obuf.write(msg % err.args[0])
1953 elif isinstance(err, errors.OpPrereqError):
1954 if len(err.args) == 2:
1955 obuf.write("Failure: prerequisites not met for this"
1956 " operation:\nerror type: %s, error details:\n%s" %
1957 (err.args[1], err.args[0]))
1959 obuf.write("Failure: prerequisites not met for this"
1960 " operation:\n%s" % msg)
1961 elif isinstance(err, errors.OpExecError):
1962 obuf.write("Failure: command execution error:\n%s" % msg)
1963 elif isinstance(err, errors.TagError):
1964 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1965 elif isinstance(err, errors.JobQueueDrainError):
1966 obuf.write("Failure: the job queue is marked for drain and doesn't"
1967 " accept new requests\n")
1968 elif isinstance(err, errors.JobQueueFull):
1969 obuf.write("Failure: the job queue is full and doesn't accept new"
1970 " job submissions until old jobs are archived\n")
1971 elif isinstance(err, errors.TypeEnforcementError):
1972 obuf.write("Parameter Error: %s" % msg)
1973 elif isinstance(err, errors.ParameterError):
1974 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1975 elif isinstance(err, luxi.NoMasterError):
1976 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1977 " and listening for connections?")
1978 elif isinstance(err, luxi.TimeoutError):
1979 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1980 " been submitted and will continue to run even if the call"
1981 " timed out. Useful commands in this situation are \"gnt-job"
1982 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1984 elif isinstance(err, luxi.PermissionError):
1985 obuf.write("It seems you don't have permissions to connect to the"
1986 " master daemon.\nPlease retry as a different user.")
1987 elif isinstance(err, luxi.ProtocolError):
1988 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1990 elif isinstance(err, errors.JobLost):
1991 obuf.write("Error checking job status: %s" % msg)
1992 elif isinstance(err, errors.QueryFilterParseError):
1993 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1994 obuf.write("\n".join(err.GetDetails()))
1995 elif isinstance(err, errors.GenericError):
1996 obuf.write("Unhandled Ganeti error: %s" % msg)
1997 elif isinstance(err, JobSubmittedException):
1998 obuf.write("JobID: %s\n" % err.args[0])
2001 obuf.write("Unhandled exception: %s" % msg)
2002 return retcode, obuf.getvalue().rstrip("\n")
2005 def GenericMain(commands, override=None, aliases=None):
2006 """Generic main function for all the gnt-* commands.
2009 - commands: a dictionary with a special structure, see the design doc
2010 for command line handling.
2011 - override: if not None, we expect a dictionary with keys that will
2012 override command line options; this can be used to pass
2013 options from the scripts to generic functions
2014 - aliases: dictionary with command aliases {'alias': 'target, ...}
2017 # save the program name and the entire command line for later logging
2019 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2020 if len(sys.argv) >= 2:
2021 binary += " " + sys.argv[1]
2022 old_cmdline = " ".join(sys.argv[2:])
2026 binary = "<unknown program>"
2033 func, options, args = _ParseArgs(sys.argv, commands, aliases)
2034 except errors.ParameterError, err:
2035 result, err_msg = FormatError(err)
2039 if func is None: # parse error
2042 if override is not None:
2043 for key, val in override.iteritems():
2044 setattr(options, key, val)
2046 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2047 stderr_logging=True)
2050 logging.info("run with arguments '%s'", old_cmdline)
2052 logging.info("run with no arguments")
2055 result = func(options, args)
2056 except (errors.GenericError, luxi.ProtocolError,
2057 JobSubmittedException), err:
2058 result, err_msg = FormatError(err)
2059 logging.exception("Error during command processing")
2061 except KeyboardInterrupt:
2062 result = constants.EXIT_FAILURE
2063 ToStderr("Aborted. Note that if the operation created any jobs, they"
2064 " might have been submitted and"
2065 " will continue to run in the background.")
2066 except IOError, err:
2067 if err.errno == errno.EPIPE:
2068 # our terminal went away, we'll exit
2069 sys.exit(constants.EXIT_FAILURE)
2076 def ParseNicOption(optvalue):
2077 """Parses the value of the --net option(s).
2081 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2082 except (TypeError, ValueError), err:
2083 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2085 nics = [{}] * nic_max
2086 for nidx, ndict in optvalue:
2089 if not isinstance(ndict, dict):
2090 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2091 " got %s" % (nidx, ndict))
2093 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2100 def GenericInstanceCreate(mode, opts, args):
2101 """Add an instance to the cluster via either creation or import.
2103 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2104 @param opts: the command line options selected by the user
2106 @param args: should contain only one element, the new instance name
2108 @return: the desired exit code
2113 (pnode, snode) = SplitNodeOption(opts.node)
2118 hypervisor, hvparams = opts.hypervisor
2121 nics = ParseNicOption(opts.nics)
2125 elif mode == constants.INSTANCE_CREATE:
2126 # default of one nic, all auto
2132 if opts.disk_template == constants.DT_DISKLESS:
2133 if opts.disks or opts.sd_size is not None:
2134 raise errors.OpPrereqError("Diskless instance but disk"
2135 " information passed")
2138 if (not opts.disks and not opts.sd_size
2139 and mode == constants.INSTANCE_CREATE):
2140 raise errors.OpPrereqError("No disk information specified")
2141 if opts.disks and opts.sd_size is not None:
2142 raise errors.OpPrereqError("Please use either the '--disk' or"
2144 if opts.sd_size is not None:
2145 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2149 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2150 except ValueError, err:
2151 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2152 disks = [{}] * disk_max
2155 for didx, ddict in opts.disks:
2157 if not isinstance(ddict, dict):
2158 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2159 raise errors.OpPrereqError(msg)
2160 elif constants.IDISK_SIZE in ddict:
2161 if constants.IDISK_ADOPT in ddict:
2162 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2163 " (disk %d)" % didx)
2165 ddict[constants.IDISK_SIZE] = \
2166 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2167 except ValueError, err:
2168 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2170 elif constants.IDISK_ADOPT in ddict:
2171 if mode == constants.INSTANCE_IMPORT:
2172 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2174 ddict[constants.IDISK_SIZE] = 0
2176 raise errors.OpPrereqError("Missing size or adoption source for"
2180 if opts.tags is not None:
2181 tags = opts.tags.split(",")
2185 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2186 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2188 if mode == constants.INSTANCE_CREATE:
2191 force_variant = opts.force_variant
2194 no_install = opts.no_install
2195 identify_defaults = False
2196 elif mode == constants.INSTANCE_IMPORT:
2199 force_variant = False
2200 src_node = opts.src_node
2201 src_path = opts.src_dir
2203 identify_defaults = opts.identify_defaults
2205 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2207 op = opcodes.OpInstanceCreate(instance_name=instance,
2209 disk_template=opts.disk_template,
2211 pnode=pnode, snode=snode,
2212 ip_check=opts.ip_check,
2213 name_check=opts.name_check,
2214 wait_for_sync=opts.wait_for_sync,
2215 file_storage_dir=opts.file_storage_dir,
2216 file_driver=opts.file_driver,
2217 iallocator=opts.iallocator,
2218 hypervisor=hypervisor,
2220 beparams=opts.beparams,
2221 osparams=opts.osparams,
2225 force_variant=force_variant,
2229 no_install=no_install,
2230 identify_defaults=identify_defaults)
2232 SubmitOrSend(op, opts)
2236 class _RunWhileClusterStoppedHelper:
2237 """Helper class for L{RunWhileClusterStopped} to simplify state management
2240 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2241 """Initializes this class.
2243 @type feedback_fn: callable
2244 @param feedback_fn: Feedback function
2245 @type cluster_name: string
2246 @param cluster_name: Cluster name
2247 @type master_node: string
2248 @param master_node Master node name
2249 @type online_nodes: list
2250 @param online_nodes: List of names of online nodes
2253 self.feedback_fn = feedback_fn
2254 self.cluster_name = cluster_name
2255 self.master_node = master_node
2256 self.online_nodes = online_nodes
2258 self.ssh = ssh.SshRunner(self.cluster_name)
2260 self.nonmaster_nodes = [name for name in online_nodes
2261 if name != master_node]
2263 assert self.master_node not in self.nonmaster_nodes
2265 def _RunCmd(self, node_name, cmd):
2266 """Runs a command on the local or a remote machine.
2268 @type node_name: string
2269 @param node_name: Machine name
2274 if node_name is None or node_name == self.master_node:
2275 # No need to use SSH
2276 result = utils.RunCmd(cmd)
2278 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2281 errmsg = ["Failed to run command %s" % result.cmd]
2283 errmsg.append("on node %s" % node_name)
2284 errmsg.append(": exitcode %s and error %s" %
2285 (result.exit_code, result.output))
2286 raise errors.OpExecError(" ".join(errmsg))
2288 def Call(self, fn, *args):
2289 """Call function while all daemons are stopped.
2292 @param fn: Function to be called
2295 # Pause watcher by acquiring an exclusive lock on watcher state file
2296 self.feedback_fn("Blocking watcher")
2297 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2299 # TODO: Currently, this just blocks. There's no timeout.
2300 # TODO: Should it be a shared lock?
2301 watcher_block.Exclusive(blocking=True)
2303 # Stop master daemons, so that no new jobs can come in and all running
2305 self.feedback_fn("Stopping master daemons")
2306 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2308 # Stop daemons on all nodes
2309 for node_name in self.online_nodes:
2310 self.feedback_fn("Stopping daemons on %s" % node_name)
2311 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2313 # All daemons are shut down now
2315 return fn(self, *args)
2316 except Exception, err:
2317 _, errmsg = FormatError(err)
2318 logging.exception("Caught exception")
2319 self.feedback_fn(errmsg)
2322 # Start cluster again, master node last
2323 for node_name in self.nonmaster_nodes + [self.master_node]:
2324 self.feedback_fn("Starting daemons on %s" % node_name)
2325 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2328 watcher_block.Close()
2331 def RunWhileClusterStopped(feedback_fn, fn, *args):
2332 """Calls a function while all cluster daemons are stopped.
2334 @type feedback_fn: callable
2335 @param feedback_fn: Feedback function
2337 @param fn: Function to be called when daemons are stopped
2340 feedback_fn("Gathering cluster information")
2342 # This ensures we're running on the master daemon
2345 (cluster_name, master_node) = \
2346 cl.QueryConfigValues(["cluster_name", "master_node"])
2348 online_nodes = GetOnlineNodes([], cl=cl)
2350 # Don't keep a reference to the client. The master daemon will go away.
2353 assert master_node in online_nodes
2355 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2356 online_nodes).Call(fn, *args)
2359 def GenerateTable(headers, fields, separator, data,
2360 numfields=None, unitfields=None,
2362 """Prints a table with headers and different fields.
2365 @param headers: dictionary mapping field names to headers for
2368 @param fields: the field names corresponding to each row in
2370 @param separator: the separator to be used; if this is None,
2371 the default 'smart' algorithm is used which computes optimal
2372 field width, otherwise just the separator is used between
2375 @param data: a list of lists, each sublist being one row to be output
2376 @type numfields: list
2377 @param numfields: a list with the fields that hold numeric
2378 values and thus should be right-aligned
2379 @type unitfields: list
2380 @param unitfields: a list with the fields that hold numeric
2381 values that should be formatted with the units field
2382 @type units: string or None
2383 @param units: the units we should use for formatting, or None for
2384 automatic choice (human-readable for non-separator usage, otherwise
2385 megabytes); this is a one-letter string
2394 if numfields is None:
2396 if unitfields is None:
2399 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2400 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2403 for field in fields:
2404 if headers and field not in headers:
2405 # TODO: handle better unknown fields (either revert to old
2406 # style of raising exception, or deal more intelligently with
2408 headers[field] = field
2409 if separator is not None:
2410 format_fields.append("%s")
2411 elif numfields.Matches(field):
2412 format_fields.append("%*s")
2414 format_fields.append("%-*s")
2416 if separator is None:
2417 mlens = [0 for name in fields]
2418 format_str = " ".join(format_fields)
2420 format_str = separator.replace("%", "%%").join(format_fields)
2425 for idx, val in enumerate(row):
2426 if unitfields.Matches(fields[idx]):
2429 except (TypeError, ValueError):
2432 val = row[idx] = utils.FormatUnit(val, units)
2433 val = row[idx] = str(val)
2434 if separator is None:
2435 mlens[idx] = max(mlens[idx], len(val))
2440 for idx, name in enumerate(fields):
2442 if separator is None:
2443 mlens[idx] = max(mlens[idx], len(hdr))
2444 args.append(mlens[idx])
2446 result.append(format_str % tuple(args))
2448 if separator is None:
2449 assert len(mlens) == len(fields)
2451 if fields and not numfields.Matches(fields[-1]):
2457 line = ["-" for _ in fields]
2458 for idx in range(len(fields)):
2459 if separator is None:
2460 args.append(mlens[idx])
2461 args.append(line[idx])
2462 result.append(format_str % tuple(args))
2467 def _FormatBool(value):
2468 """Formats a boolean value as a string.
2476 #: Default formatting for query results; (callback, align right)
2477 _DEFAULT_FORMAT_QUERY = {
2478 constants.QFT_TEXT: (str, False),
2479 constants.QFT_BOOL: (_FormatBool, False),
2480 constants.QFT_NUMBER: (str, True),
2481 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2482 constants.QFT_OTHER: (str, False),
2483 constants.QFT_UNKNOWN: (str, False),
2487 def _GetColumnFormatter(fdef, override, unit):
2488 """Returns formatting function for a field.
2490 @type fdef: L{objects.QueryFieldDefinition}
2491 @type override: dict
2492 @param override: Dictionary for overriding field formatting functions,
2493 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2495 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2496 @rtype: tuple; (callable, bool)
2497 @return: Returns the function to format a value (takes one parameter) and a
2498 boolean for aligning the value on the right-hand side
2501 fmt = override.get(fdef.name, None)
2505 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2507 if fdef.kind == constants.QFT_UNIT:
2508 # Can't keep this information in the static dictionary
2509 return (lambda value: utils.FormatUnit(value, unit), True)
2511 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2515 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2518 class _QueryColumnFormatter:
2519 """Callable class for formatting fields of a query.
2522 def __init__(self, fn, status_fn, verbose):
2523 """Initializes this class.
2526 @param fn: Formatting function
2527 @type status_fn: callable
2528 @param status_fn: Function to report fields' status
2529 @type verbose: boolean
2530 @param verbose: whether to use verbose field descriptions or not
2534 self._status_fn = status_fn
2535 self._verbose = verbose
2537 def __call__(self, data):
2538 """Returns a field's string representation.
2541 (status, value) = data
2544 self._status_fn(status)
2546 if status == constants.RS_NORMAL:
2547 return self._fn(value)
2549 assert value is None, \
2550 "Found value %r for abnormal status %s" % (value, status)
2552 return FormatResultError(status, self._verbose)
2555 def FormatResultError(status, verbose):
2556 """Formats result status other than L{constants.RS_NORMAL}.
2558 @param status: The result status
2559 @type verbose: boolean
2560 @param verbose: Whether to return the verbose text
2561 @return: Text of result status
2564 assert status != constants.RS_NORMAL, \
2565 "FormatResultError called with status equal to constants.RS_NORMAL"
2567 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2569 raise NotImplementedError("Unknown status %s" % status)
2576 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2577 header=False, verbose=False):
2578 """Formats data in L{objects.QueryResponse}.
2580 @type result: L{objects.QueryResponse}
2581 @param result: result of query operation
2583 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2584 see L{utils.text.FormatUnit}
2585 @type format_override: dict
2586 @param format_override: Dictionary for overriding field formatting functions,
2587 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2588 @type separator: string or None
2589 @param separator: String used to separate fields
2591 @param header: Whether to output header row
2592 @type verbose: boolean
2593 @param verbose: whether to use verbose field descriptions or not
2602 if format_override is None:
2603 format_override = {}
2605 stats = dict.fromkeys(constants.RS_ALL, 0)
2607 def _RecordStatus(status):
2612 for fdef in result.fields:
2613 assert fdef.title and fdef.name
2614 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2615 columns.append(TableColumn(fdef.title,
2616 _QueryColumnFormatter(fn, _RecordStatus,
2620 table = FormatTable(result.data, columns, header, separator)
2622 # Collect statistics
2623 assert len(stats) == len(constants.RS_ALL)
2624 assert compat.all(count >= 0 for count in stats.values())
2626 # Determine overall status. If there was no data, unknown fields must be
2627 # detected via the field definitions.
2628 if (stats[constants.RS_UNKNOWN] or
2629 (not result.data and _GetUnknownFields(result.fields))):
2631 elif compat.any(count > 0 for key, count in stats.items()
2632 if key != constants.RS_NORMAL):
2633 status = QR_INCOMPLETE
2637 return (status, table)
2640 def _GetUnknownFields(fdefs):
2641 """Returns list of unknown fields included in C{fdefs}.
2643 @type fdefs: list of L{objects.QueryFieldDefinition}
2646 return [fdef for fdef in fdefs
2647 if fdef.kind == constants.QFT_UNKNOWN]
2650 def _WarnUnknownFields(fdefs):
2651 """Prints a warning to stderr if a query included unknown fields.
2653 @type fdefs: list of L{objects.QueryFieldDefinition}
2656 unknown = _GetUnknownFields(fdefs)
2658 ToStderr("Warning: Queried for unknown fields %s",
2659 utils.CommaJoin(fdef.name for fdef in unknown))
2665 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2666 format_override=None, verbose=False, force_filter=False):
2667 """Generic implementation for listing all items of a resource.
2669 @param resource: One of L{constants.QR_VIA_LUXI}
2670 @type fields: list of strings
2671 @param fields: List of fields to query for
2672 @type names: list of strings
2673 @param names: Names of items to query for
2674 @type unit: string or None
2675 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2676 None for automatic choice (human-readable for non-separator usage,
2677 otherwise megabytes); this is a one-letter string
2678 @type separator: string or None
2679 @param separator: String used to separate fields
2681 @param header: Whether to show header row
2682 @type force_filter: bool
2683 @param force_filter: Whether to always treat names as filter
2684 @type format_override: dict
2685 @param format_override: Dictionary for overriding field formatting functions,
2686 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2687 @type verbose: boolean
2688 @param verbose: whether to use verbose field descriptions or not
2694 qfilter = qlang.MakeFilter(names, force_filter)
2699 response = cl.Query(resource, fields, qfilter)
2701 found_unknown = _WarnUnknownFields(response.fields)
2703 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2705 format_override=format_override,
2711 assert ((found_unknown and status == QR_UNKNOWN) or
2712 (not found_unknown and status != QR_UNKNOWN))
2714 if status == QR_UNKNOWN:
2715 return constants.EXIT_UNKNOWN_FIELD
2717 # TODO: Should the list command fail if not all data could be collected?
2718 return constants.EXIT_SUCCESS
2721 def GenericListFields(resource, fields, separator, header, cl=None):
2722 """Generic implementation for listing fields for a resource.
2724 @param resource: One of L{constants.QR_VIA_LUXI}
2725 @type fields: list of strings
2726 @param fields: List of fields to query for
2727 @type separator: string or None
2728 @param separator: String used to separate fields
2730 @param header: Whether to show header row
2739 response = cl.QueryFields(resource, fields)
2741 found_unknown = _WarnUnknownFields(response.fields)
2744 TableColumn("Name", str, False),
2745 TableColumn("Title", str, False),
2746 TableColumn("Description", str, False),
2749 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2751 for line in FormatTable(rows, columns, header, separator):
2755 return constants.EXIT_UNKNOWN_FIELD
2757 return constants.EXIT_SUCCESS
2761 """Describes a column for L{FormatTable}.
2764 def __init__(self, title, fn, align_right):
2765 """Initializes this class.
2768 @param title: Column title
2770 @param fn: Formatting function
2771 @type align_right: bool
2772 @param align_right: Whether to align values on the right-hand side
2777 self.align_right = align_right
2780 def _GetColFormatString(width, align_right):
2781 """Returns the format string for a field.
2789 return "%%%s%ss" % (sign, width)
2792 def FormatTable(rows, columns, header, separator):
2793 """Formats data as a table.
2795 @type rows: list of lists
2796 @param rows: Row data, one list per row
2797 @type columns: list of L{TableColumn}
2798 @param columns: Column descriptions
2800 @param header: Whether to show header row
2801 @type separator: string or None
2802 @param separator: String used to separate columns
2806 data = [[col.title for col in columns]]
2807 colwidth = [len(col.title) for col in columns]
2810 colwidth = [0 for _ in columns]
2814 assert len(row) == len(columns)
2816 formatted = [col.format(value) for value, col in zip(row, columns)]
2818 if separator is None:
2819 # Update column widths
2820 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2821 # Modifying a list's items while iterating is fine
2822 colwidth[idx] = max(oldwidth, len(value))
2824 data.append(formatted)
2826 if separator is not None:
2827 # Return early if a separator is used
2828 return [separator.join(row) for row in data]
2830 if columns and not columns[-1].align_right:
2831 # Avoid unnecessary spaces at end of line
2834 # Build format string
2835 fmt = " ".join([_GetColFormatString(width, col.align_right)
2836 for col, width in zip(columns, colwidth)])
2838 return [fmt % tuple(row) for row in data]
2841 def FormatTimestamp(ts):
2842 """Formats a given timestamp.
2845 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2848 @return: a string with the formatted timestamp
2851 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2854 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2857 def ParseTimespec(value):
2858 """Parse a time specification.
2860 The following suffixed will be recognized:
2868 Without any suffix, the value will be taken to be in seconds.
2873 raise errors.OpPrereqError("Empty time specification passed")
2881 if value[-1] not in suffix_map:
2884 except (TypeError, ValueError):
2885 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2887 multiplier = suffix_map[value[-1]]
2889 if not value: # no data left after stripping the suffix
2890 raise errors.OpPrereqError("Invalid time specification (only"
2893 value = int(value) * multiplier
2894 except (TypeError, ValueError):
2895 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2899 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2900 filter_master=False, nodegroup=None):
2901 """Returns the names of online nodes.
2903 This function will also log a warning on stderr with the names of
2906 @param nodes: if not empty, use only this subset of nodes (minus the
2908 @param cl: if not None, luxi client to use
2909 @type nowarn: boolean
2910 @param nowarn: by default, this function will output a note with the
2911 offline nodes that are skipped; if this parameter is True the
2912 note is not displayed
2913 @type secondary_ips: boolean
2914 @param secondary_ips: if True, return the secondary IPs instead of the
2915 names, useful for doing network traffic over the replication interface
2917 @type filter_master: boolean
2918 @param filter_master: if True, do not return the master node in the list
2919 (useful in coordination with secondary_ips where we cannot check our
2920 node name against the list)
2921 @type nodegroup: string
2922 @param nodegroup: If set, only return nodes in this node group
2931 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
2933 if nodegroup is not None:
2934 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2935 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2938 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2941 if len(qfilter) > 1:
2942 final_filter = [qlang.OP_AND] + qfilter
2944 assert len(qfilter) == 1
2945 final_filter = qfilter[0]
2949 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2951 def _IsOffline(row):
2952 (_, (_, offline), _) = row
2956 ((_, name), _, _) = row
2960 (_, _, (_, sip)) = row
2963 (offline, online) = compat.partition(result.data, _IsOffline)
2965 if offline and not nowarn:
2966 ToStderr("Note: skipping offline node(s): %s" %
2967 utils.CommaJoin(map(_GetName, offline)))
2974 return map(fn, online)
2977 def _ToStream(stream, txt, *args):
2978 """Write a message to a stream, bypassing the logging system
2980 @type stream: file object
2981 @param stream: the file to which we should write
2983 @param txt: the message
2989 stream.write(txt % args)
2994 except IOError, err:
2995 if err.errno == errno.EPIPE:
2996 # our terminal went away, we'll exit
2997 sys.exit(constants.EXIT_FAILURE)
3002 def ToStdout(txt, *args):
3003 """Write a message to stdout only, bypassing the logging system
3005 This is just a wrapper over _ToStream.
3008 @param txt: the message
3011 _ToStream(sys.stdout, txt, *args)
3014 def ToStderr(txt, *args):
3015 """Write a message to stderr only, bypassing the logging system
3017 This is just a wrapper over _ToStream.
3020 @param txt: the message
3023 _ToStream(sys.stderr, txt, *args)
3026 class JobExecutor(object):
3027 """Class which manages the submission and execution of multiple jobs.
3029 Note that instances of this class should not be reused between
3033 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3038 self.verbose = verbose
3041 self.feedback_fn = feedback_fn
3042 self._counter = itertools.count()
3045 def _IfName(name, fmt):
3046 """Helper function for formatting name.
3054 def QueueJob(self, name, *ops):
3055 """Record a job for later submit.
3058 @param name: a description of the job, will be used in WaitJobSet
3061 SetGenericOpcodeOpts(ops, self.opts)
3062 self.queue.append((self._counter.next(), name, ops))
3064 def AddJobId(self, name, status, job_id):
3065 """Adds a job ID to the internal queue.
3068 self.jobs.append((self._counter.next(), status, job_id, name))
3070 def SubmitPending(self, each=False):
3071 """Submit all pending jobs.
3076 for (_, _, ops) in self.queue:
3077 # SubmitJob will remove the success status, but raise an exception if
3078 # the submission fails, so we'll notice that anyway.
3079 results.append([True, self.cl.SubmitJob(ops)])
3081 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3082 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3083 self.jobs.append((idx, status, data, name))
3085 def _ChooseJob(self):
3086 """Choose a non-waiting/queued job to poll next.
3089 assert self.jobs, "_ChooseJob called with empty job list"
3091 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3095 for job_data, status in zip(self.jobs, result):
3096 if (isinstance(status, list) and status and
3097 status[0] in (constants.JOB_STATUS_QUEUED,
3098 constants.JOB_STATUS_WAITING,
3099 constants.JOB_STATUS_CANCELING)):
3100 # job is still present and waiting
3102 # good candidate found (either running job or lost job)
3103 self.jobs.remove(job_data)
3107 return self.jobs.pop(0)
3109 def GetResults(self):
3110 """Wait for and return the results of all jobs.
3113 @return: list of tuples (success, job results), in the same order
3114 as the submitted jobs; if a job has failed, instead of the result
3115 there will be the error message
3119 self.SubmitPending()
3122 ok_jobs = [row[2] for row in self.jobs if row[1]]
3124 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3126 # first, remove any non-submitted jobs
3127 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3128 for idx, _, jid, name in failures:
3129 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3130 results.append((idx, False, jid))
3133 (idx, _, jid, name) = self._ChooseJob()
3134 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3136 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3138 except errors.JobLost, err:
3139 _, job_result = FormatError(err)
3140 ToStderr("Job %s%s has been archived, cannot check its result",
3141 jid, self._IfName(name, " for %s"))
3143 except (errors.GenericError, luxi.ProtocolError), err:
3144 _, job_result = FormatError(err)
3146 # the error message will always be shown, verbose or not
3147 ToStderr("Job %s%s has failed: %s",
3148 jid, self._IfName(name, " for %s"), job_result)
3150 results.append((idx, success, job_result))
3152 # sort based on the index, then drop it
3154 results = [i[1:] for i in results]
3158 def WaitOrShow(self, wait):
3159 """Wait for job results or only print the job IDs.
3162 @param wait: whether to wait or not
3166 return self.GetResults()
3169 self.SubmitPending()
3170 for _, status, result, name in self.jobs:
3172 ToStdout("%s: %s", result, name)
3174 ToStderr("Failure for %s: %s", name, result)
3175 return [row[1:3] for row in self.jobs]
3178 def FormatParameterDict(buf, param_dict, actual, level=1):
3179 """Formats a parameter dictionary.
3181 @type buf: L{StringIO}
3182 @param buf: the buffer into which to write
3183 @type param_dict: dict
3184 @param param_dict: the own parameters
3186 @param actual: the current parameter set (including defaults)
3187 @param level: Level of indent
3190 indent = " " * level
3191 for key in sorted(actual):
3192 val = param_dict.get(key, "default (%s)" % actual[key])
3193 buf.write("%s- %s: %s\n" % (indent, key, val))
3196 def ConfirmOperation(names, list_type, text, extra=""):
3197 """Ask the user to confirm an operation on a list of list_type.
3199 This function is used to request confirmation for doing an operation
3200 on a given list of list_type.
3203 @param names: the list of names that we display when
3204 we ask for confirmation
3205 @type list_type: str
3206 @param list_type: Human readable name for elements in the list (e.g. nodes)
3208 @param text: the operation that the user should confirm
3210 @return: True or False depending on user's confirmation.
3214 msg = ("The %s will operate on %d %s.\n%s"
3215 "Do you want to continue?" % (text, count, list_type, extra))
3216 affected = (("\nAffected %s:\n" % list_type) +
3217 "\n".join([" %s" % name for name in names]))
3219 choices = [("y", True, "Yes, execute the %s" % text),
3220 ("n", False, "No, abort the %s" % text)]
3223 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3226 question = msg + affected
3228 choice = AskUser(question, choices)
3231 choice = AskUser(msg + affected, choices)