4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
137 "NOSSH_KEYCHECK_OPT",
151 "PREALLOC_WIPE_DISKS_OPT",
152 "PRIMARY_IP_VERSION_OPT",
158 "REMOVE_INSTANCE_OPT",
163 "SECONDARY_ONLY_OPT",
167 "SHUTDOWN_TIMEOUT_OPT",
169 "SPECS_CPU_COUNT_OPT",
170 "SPECS_DISK_COUNT_OPT",
171 "SPECS_DISK_SIZE_OPT",
172 "SPECS_MEM_SIZE_OPT",
173 "SPECS_NIC_COUNT_OPT",
179 "STARTUP_PAUSED_OPT",
188 "USE_EXTERNAL_MIP_SCRIPT",
195 "IGNORE_IPOLICY_OPT",
196 # Generic functions for CLI programs
199 "GenericInstanceCreate",
205 "JobSubmittedException",
207 "RunWhileClusterStopped",
211 # Formatting functions
212 "ToStderr", "ToStdout",
215 "FormatParameterDict",
224 # command line options support infrastructure
225 "ARGS_MANY_INSTANCES",
244 "OPT_COMPL_INST_ADD_NODES",
245 "OPT_COMPL_MANY_NODES",
246 "OPT_COMPL_ONE_IALLOCATOR",
247 "OPT_COMPL_ONE_INSTANCE",
248 "OPT_COMPL_ONE_NODE",
249 "OPT_COMPL_ONE_NODEGROUP",
255 "COMMON_CREATE_OPTS",
261 #: Priorities (sorted)
263 ("low", constants.OP_PRIO_LOW),
264 ("normal", constants.OP_PRIO_NORMAL),
265 ("high", constants.OP_PRIO_HIGH),
268 #: Priority dictionary for easier lookup
269 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
270 # we migrate to Python 2.6
271 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
273 # Query result status for clients
276 QR_INCOMPLETE) = range(3)
278 #: Maximum batch size for ChooseJob
283 def __init__(self, min=0, max=None): # pylint: disable=W0622
288 return ("<%s min=%s max=%s>" %
289 (self.__class__.__name__, self.min, self.max))
292 class ArgSuggest(_Argument):
293 """Suggesting argument.
295 Value can be any of the ones passed to the constructor.
298 # pylint: disable=W0622
299 def __init__(self, min=0, max=None, choices=None):
300 _Argument.__init__(self, min=min, max=max)
301 self.choices = choices
304 return ("<%s min=%s max=%s choices=%r>" %
305 (self.__class__.__name__, self.min, self.max, self.choices))
308 class ArgChoice(ArgSuggest):
311 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
312 but value must be one of the choices.
317 class ArgUnknown(_Argument):
318 """Unknown argument to program (e.g. determined at runtime).
323 class ArgInstance(_Argument):
324 """Instances argument.
329 class ArgNode(_Argument):
335 class ArgGroup(_Argument):
336 """Node group argument.
341 class ArgJobId(_Argument):
347 class ArgFile(_Argument):
348 """File path argument.
353 class ArgCommand(_Argument):
359 class ArgHost(_Argument):
365 class ArgOs(_Argument):
372 ARGS_MANY_INSTANCES = [ArgInstance()]
373 ARGS_MANY_NODES = [ArgNode()]
374 ARGS_MANY_GROUPS = [ArgGroup()]
375 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
376 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
378 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
379 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
382 def _ExtractTagsObject(opts, args):
383 """Extract the tag type object.
385 Note that this function will modify its args parameter.
388 if not hasattr(opts, "tag_type"):
389 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
391 if kind == constants.TAG_CLUSTER:
393 elif kind in (constants.TAG_NODEGROUP,
395 constants.TAG_INSTANCE):
397 raise errors.OpPrereqError("no arguments passed to the command")
401 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
405 def _ExtendTags(opts, args):
406 """Extend the args if a source file has been given.
408 This function will extend the tags with the contents of the file
409 passed in the 'tags_source' attribute of the opts parameter. A file
410 named '-' will be replaced by stdin.
413 fname = opts.tags_source
419 new_fh = open(fname, "r")
422 # we don't use the nice 'new_data = [line.strip() for line in fh]'
423 # because of python bug 1633941
425 line = new_fh.readline()
428 new_data.append(line.strip())
431 args.extend(new_data)
434 def ListTags(opts, args):
435 """List the tags on a given object.
437 This is a generic implementation that knows how to deal with all
438 three cases of tag objects (cluster, node, instance). The opts
439 argument is expected to contain a tag_type field denoting what
440 object type we work on.
443 kind, name = _ExtractTagsObject(opts, args)
445 result = cl.QueryTags(kind, name)
446 result = list(result)
452 def AddTags(opts, args):
453 """Add tags on a given object.
455 This is a generic implementation that knows how to deal with all
456 three cases of tag objects (cluster, node, instance). The opts
457 argument is expected to contain a tag_type field denoting what
458 object type we work on.
461 kind, name = _ExtractTagsObject(opts, args)
462 _ExtendTags(opts, args)
464 raise errors.OpPrereqError("No tags to be added")
465 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
466 SubmitOpCode(op, opts=opts)
469 def RemoveTags(opts, args):
470 """Remove tags from a given object.
472 This is a generic implementation that knows how to deal with all
473 three cases of tag objects (cluster, node, instance). The opts
474 argument is expected to contain a tag_type field denoting what
475 object type we work on.
478 kind, name = _ExtractTagsObject(opts, args)
479 _ExtendTags(opts, args)
481 raise errors.OpPrereqError("No tags to be removed")
482 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
483 SubmitOpCode(op, opts=opts)
486 def check_unit(option, opt, value): # pylint: disable=W0613
487 """OptParsers custom converter for units.
491 return utils.ParseUnit(value)
492 except errors.UnitParseError, err:
493 raise OptionValueError("option %s: %s" % (opt, err))
496 def _SplitKeyVal(opt, data):
497 """Convert a KeyVal string into a dict.
499 This function will convert a key=val[,...] string into a dict. Empty
500 values will be converted specially: keys which have the prefix 'no_'
501 will have the value=False and the prefix stripped, the others will
505 @param opt: a string holding the option name for which we process the
506 data, used in building error messages
508 @param data: a string of the format key=val,key=val,...
510 @return: {key=val, key=val}
511 @raises errors.ParameterError: if there are duplicate keys
516 for elem in utils.UnescapeAndSplit(data, sep=","):
518 key, val = elem.split("=", 1)
520 if elem.startswith(NO_PREFIX):
521 key, val = elem[len(NO_PREFIX):], False
522 elif elem.startswith(UN_PREFIX):
523 key, val = elem[len(UN_PREFIX):], None
525 key, val = elem, True
527 raise errors.ParameterError("Duplicate key '%s' in option %s" %
533 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
534 """Custom parser for ident:key=val,key=val options.
536 This will store the parsed values as a tuple (ident, {key: val}). As such,
537 multiple uses of this option via action=append is possible.
541 ident, rest = value, ""
543 ident, rest = value.split(":", 1)
545 if ident.startswith(NO_PREFIX):
547 msg = "Cannot pass options when removing parameter groups: %s" % value
548 raise errors.ParameterError(msg)
549 retval = (ident[len(NO_PREFIX):], False)
550 elif ident.startswith(UN_PREFIX):
552 msg = "Cannot pass options when removing parameter groups: %s" % value
553 raise errors.ParameterError(msg)
554 retval = (ident[len(UN_PREFIX):], None)
556 kv_dict = _SplitKeyVal(opt, rest)
557 retval = (ident, kv_dict)
561 def check_key_val(option, opt, value): # pylint: disable=W0613
562 """Custom parser class for key=val,key=val options.
564 This will store the parsed values as a dict {key: val}.
567 return _SplitKeyVal(opt, value)
570 def check_bool(option, opt, value): # pylint: disable=W0613
571 """Custom parser for yes/no options.
573 This will store the parsed value as either True or False.
576 value = value.lower()
577 if value == constants.VALUE_FALSE or value == "no":
579 elif value == constants.VALUE_TRUE or value == "yes":
582 raise errors.ParameterError("Invalid boolean value '%s'" % value)
585 def check_list(option, opt, value): # pylint: disable=W0613
586 """Custom parser for comma-separated lists.
589 # we have to make this explicit check since "".split(",") is [""],
590 # not an empty list :(
594 return utils.UnescapeAndSplit(value)
597 # completion_suggestion is normally a list. Using numeric values not evaluating
598 # to False for dynamic completion.
599 (OPT_COMPL_MANY_NODES,
601 OPT_COMPL_ONE_INSTANCE,
603 OPT_COMPL_ONE_IALLOCATOR,
604 OPT_COMPL_INST_ADD_NODES,
605 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
607 OPT_COMPL_ALL = frozenset([
608 OPT_COMPL_MANY_NODES,
610 OPT_COMPL_ONE_INSTANCE,
612 OPT_COMPL_ONE_IALLOCATOR,
613 OPT_COMPL_INST_ADD_NODES,
614 OPT_COMPL_ONE_NODEGROUP,
618 class CliOption(Option):
619 """Custom option class for optparse.
622 ATTRS = Option.ATTRS + [
623 "completion_suggest",
625 TYPES = Option.TYPES + (
632 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
633 TYPE_CHECKER["identkeyval"] = check_ident_key_val
634 TYPE_CHECKER["keyval"] = check_key_val
635 TYPE_CHECKER["unit"] = check_unit
636 TYPE_CHECKER["bool"] = check_bool
637 TYPE_CHECKER["list"] = check_list
640 # optparse.py sets make_option, so we do it for our own option class, too
641 cli_option = CliOption
646 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
647 help="Increase debugging level")
649 NOHDR_OPT = cli_option("--no-headers", default=False,
650 action="store_true", dest="no_headers",
651 help="Don't display column headers")
653 SEP_OPT = cli_option("--separator", default=None,
654 action="store", dest="separator",
655 help=("Separator between output fields"
656 " (defaults to one space)"))
658 USEUNITS_OPT = cli_option("--units", default=None,
659 dest="units", choices=("h", "m", "g", "t"),
660 help="Specify units for output (one of h/m/g/t)")
662 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
663 type="string", metavar="FIELDS",
664 help="Comma separated list of output fields")
666 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
667 default=False, help="Force the operation")
669 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
670 default=False, help="Do not require confirmation")
672 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
673 action="store_true", default=False,
674 help=("Ignore offline nodes and do as much"
677 TAG_ADD_OPT = cli_option("--tags", dest="tags",
678 default=None, help="Comma-separated list of instance"
681 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
682 default=None, help="File with tag names")
684 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
685 default=False, action="store_true",
686 help=("Submit the job and return the job ID, but"
687 " don't wait for the job to finish"))
689 SYNC_OPT = cli_option("--sync", dest="do_locking",
690 default=False, action="store_true",
691 help=("Grab locks while doing the queries"
692 " in order to ensure more consistent results"))
694 DRY_RUN_OPT = cli_option("--dry-run", default=False,
696 help=("Do not execute the operation, just run the"
697 " check steps and verify it it could be"
700 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
702 help="Increase the verbosity of the operation")
704 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
705 action="store_true", dest="simulate_errors",
706 help="Debugging option that makes the operation"
707 " treat most runtime checks as failed")
709 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
710 default=True, action="store_false",
711 help="Don't wait for sync (DANGEROUS!)")
713 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
714 action="store_true", default=False,
715 help="Enable offline instance")
717 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
718 action="store_true", default=False,
719 help="Disable down instance")
721 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
722 help=("Custom disk setup (%s)" %
723 utils.CommaJoin(constants.DISK_TEMPLATES)),
724 default=None, metavar="TEMPL",
725 choices=list(constants.DISK_TEMPLATES))
727 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
728 help="Do not create any network cards for"
731 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
732 help="Relative path under default cluster-wide"
733 " file storage dir to store file-based disks",
734 default=None, metavar="<DIR>")
736 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
737 help="Driver to use for image files",
738 default="loop", metavar="<DRIVER>",
739 choices=list(constants.FILE_DRIVER))
741 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
742 help="Select nodes for the instance automatically"
743 " using the <NAME> iallocator plugin",
744 default=None, type="string",
745 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
747 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
749 help="Set the default instance allocator plugin",
750 default=None, type="string",
751 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
753 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
755 completion_suggest=OPT_COMPL_ONE_OS)
757 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
758 type="keyval", default={},
759 help="OS parameters")
761 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
762 action="store_true", default=False,
763 help="Force an unknown variant")
765 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
766 action="store_true", default=False,
767 help="Do not install the OS (will"
770 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
771 type="keyval", default={},
772 help="Backend parameters")
774 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
775 default={}, dest="hvparams",
776 help="Hypervisor parameters")
778 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
779 help="Disk template parameters, in the format"
780 " template:option=value,option=value,...",
781 type="identkeyval", action="append", default=[])
783 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
784 type="keyval", default={},
785 help="Memory count specs: min, max, std"
788 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
789 type="keyval", default={},
790 help="CPU count specs: min, max, std")
792 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
793 dest="ispecs_disk_count",
794 type="keyval", default={},
795 help="Disk count specs: min, max, std")
797 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
798 type="keyval", default={},
799 help="Disk size specs: min, max, std (in MB)")
801 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
802 type="keyval", default={},
803 help="NIC count specs: min, max, std")
805 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
806 help="Hypervisor and hypervisor options, in the"
807 " format hypervisor:option=value,option=value,...",
808 default=None, type="identkeyval")
810 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
811 help="Hypervisor and hypervisor options, in the"
812 " format hypervisor:option=value,option=value,...",
813 default=[], action="append", type="identkeyval")
815 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
816 action="store_false",
817 help="Don't check that the instance's IP"
820 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
821 default=True, action="store_false",
822 help="Don't check that the instance's name"
825 NET_OPT = cli_option("--net",
826 help="NIC parameters", default=[],
827 dest="nics", action="append", type="identkeyval")
829 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
830 dest="disks", action="append", type="identkeyval")
832 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
833 help="Comma-separated list of disks"
834 " indices to act on (e.g. 0,2) (optional,"
835 " defaults to all disks)")
837 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
838 help="Enforces a single-disk configuration using the"
839 " given disk size, in MiB unless a suffix is used",
840 default=None, type="unit", metavar="<size>")
842 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
843 dest="ignore_consistency",
844 action="store_true", default=False,
845 help="Ignore the consistency of the disks on"
848 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
849 dest="allow_failover",
850 action="store_true", default=False,
851 help="If migration is not possible fallback to"
854 NONLIVE_OPT = cli_option("--non-live", dest="live",
855 default=True, action="store_false",
856 help="Do a non-live migration (this usually means"
857 " freeze the instance, save the state, transfer and"
858 " only then resume running on the secondary node)")
860 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
862 choices=list(constants.HT_MIGRATION_MODES),
863 help="Override default migration mode (choose"
864 " either live or non-live")
866 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
867 help="Target node and optional secondary node",
868 metavar="<pnode>[:<snode>]",
869 completion_suggest=OPT_COMPL_INST_ADD_NODES)
871 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
872 action="append", metavar="<node>",
873 help="Use only this node (can be used multiple"
874 " times, if not given defaults to all nodes)",
875 completion_suggest=OPT_COMPL_ONE_NODE)
877 NODEGROUP_OPT_NAME = "--node-group"
878 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
880 help="Node group (name or uuid)",
881 metavar="<nodegroup>",
882 default=None, type="string",
883 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
885 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
887 completion_suggest=OPT_COMPL_ONE_NODE)
889 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
890 action="store_false",
891 help="Don't start the instance after creation")
893 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
894 action="store_true", default=False,
895 help="Show command instead of executing it")
897 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
898 default=False, action="store_true",
899 help="Instead of performing the migration, try to"
900 " recover from a failed cleanup. This is safe"
901 " to run even if the instance is healthy, but it"
902 " will create extra replication traffic and "
903 " disrupt briefly the replication (like during the"
906 STATIC_OPT = cli_option("-s", "--static", dest="static",
907 action="store_true", default=False,
908 help="Only show configuration data, not runtime data")
910 ALL_OPT = cli_option("--all", dest="show_all",
911 default=False, action="store_true",
912 help="Show info on all instances on the cluster."
913 " This can take a long time to run, use wisely")
915 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
916 action="store_true", default=False,
917 help="Interactive OS reinstall, lists available"
918 " OS templates for selection")
920 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
921 action="store_true", default=False,
922 help="Remove the instance from the cluster"
923 " configuration even if there are failures"
924 " during the removal process")
926 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
927 dest="ignore_remove_failures",
928 action="store_true", default=False,
929 help="Remove the instance from the"
930 " cluster configuration even if there"
931 " are failures during the removal"
934 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
935 action="store_true", default=False,
936 help="Remove the instance from the cluster")
938 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
939 help="Specifies the new node for the instance",
940 metavar="NODE", default=None,
941 completion_suggest=OPT_COMPL_ONE_NODE)
943 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
944 help="Specifies the new secondary node",
945 metavar="NODE", default=None,
946 completion_suggest=OPT_COMPL_ONE_NODE)
948 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
949 default=False, action="store_true",
950 help="Replace the disk(s) on the primary"
951 " node (applies only to internally mirrored"
952 " disk templates, e.g. %s)" %
953 utils.CommaJoin(constants.DTS_INT_MIRROR))
955 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
956 default=False, action="store_true",
957 help="Replace the disk(s) on the secondary"
958 " node (applies only to internally mirrored"
959 " disk templates, e.g. %s)" %
960 utils.CommaJoin(constants.DTS_INT_MIRROR))
962 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
963 default=False, action="store_true",
964 help="Lock all nodes and auto-promote as needed"
967 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
968 default=False, action="store_true",
969 help="Automatically replace faulty disks"
970 " (applies only to internally mirrored"
971 " disk templates, e.g. %s)" %
972 utils.CommaJoin(constants.DTS_INT_MIRROR))
974 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
975 default=False, action="store_true",
976 help="Ignore current recorded size"
977 " (useful for forcing activation when"
978 " the recorded size is wrong)")
980 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
982 completion_suggest=OPT_COMPL_ONE_NODE)
984 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
987 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
988 help="Specify the secondary ip for the node",
989 metavar="ADDRESS", default=None)
991 READD_OPT = cli_option("--readd", dest="readd",
992 default=False, action="store_true",
993 help="Readd old node after replacing it")
995 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
996 default=True, action="store_false",
997 help="Disable SSH key fingerprint checking")
999 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1000 default=False, action="store_true",
1001 help="Force the joining of a node")
1003 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1004 type="bool", default=None, metavar=_YORNO,
1005 help="Set the master_candidate flag on the node")
1007 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1008 type="bool", default=None,
1009 help=("Set the offline flag on the node"
1010 " (cluster does not communicate with offline"
1013 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1014 type="bool", default=None,
1015 help=("Set the drained flag on the node"
1016 " (excluded from allocation operations)"))
1018 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1019 type="bool", default=None, metavar=_YORNO,
1020 help="Set the master_capable flag on the node")
1022 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1023 type="bool", default=None, metavar=_YORNO,
1024 help="Set the vm_capable flag on the node")
1026 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1027 type="bool", default=None, metavar=_YORNO,
1028 help="Set the allocatable flag on a volume")
1030 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1031 help="Disable support for lvm based instances"
1033 action="store_false", default=True)
1035 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1036 dest="enabled_hypervisors",
1037 help="Comma-separated list of hypervisors",
1038 type="string", default=None)
1040 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1041 type="keyval", default={},
1042 help="NIC parameters")
1044 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1045 dest="candidate_pool_size", type="int",
1046 help="Set the candidate pool size")
1048 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1049 help=("Enables LVM and specifies the volume group"
1050 " name (cluster-wide) for disk allocation"
1051 " [%s]" % constants.DEFAULT_VG),
1052 metavar="VG", default=None)
1054 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1055 help="Destroy cluster", action="store_true")
1057 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1058 help="Skip node agreement check (dangerous)",
1059 action="store_true", default=False)
1061 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1062 help="Specify the mac prefix for the instance IP"
1063 " addresses, in the format XX:XX:XX",
1067 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1068 help="Specify the node interface (cluster-wide)"
1069 " on which the master IP address will be added"
1070 " (cluster init default: %s)" %
1071 constants.DEFAULT_BRIDGE,
1075 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1076 help="Specify the netmask of the master IP",
1080 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1081 dest="use_external_mip_script",
1082 help="Specify whether to run a user-provided"
1083 " script for the master IP address turnup and"
1084 " turndown operations",
1085 type="bool", metavar=_YORNO, default=None)
1087 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1088 help="Specify the default directory (cluster-"
1089 "wide) for storing the file-based disks [%s]" %
1090 constants.DEFAULT_FILE_STORAGE_DIR,
1092 default=constants.DEFAULT_FILE_STORAGE_DIR)
1094 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1095 dest="shared_file_storage_dir",
1096 help="Specify the default directory (cluster-"
1097 "wide) for storing the shared file-based"
1099 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1100 metavar="SHAREDDIR",
1101 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1103 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1104 help="Don't modify /etc/hosts",
1105 action="store_false", default=True)
1107 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1108 help="Don't initialize SSH keys",
1109 action="store_false", default=True)
1111 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1112 help="Enable parseable error messages",
1113 action="store_true", default=False)
1115 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1116 help="Skip N+1 memory redundancy tests",
1117 action="store_true", default=False)
1119 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1120 help="Type of reboot: soft/hard/full",
1121 default=constants.INSTANCE_REBOOT_HARD,
1123 choices=list(constants.REBOOT_TYPES))
1125 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1126 dest="ignore_secondaries",
1127 default=False, action="store_true",
1128 help="Ignore errors from secondaries")
1130 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1131 action="store_false", default=True,
1132 help="Don't shutdown the instance (unsafe)")
1134 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1135 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1136 help="Maximum time to wait")
1138 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1139 dest="shutdown_timeout", type="int",
1140 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1141 help="Maximum time to wait for instance shutdown")
1143 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1145 help=("Number of seconds between repetions of the"
1148 EARLY_RELEASE_OPT = cli_option("--early-release",
1149 dest="early_release", default=False,
1150 action="store_true",
1151 help="Release the locks on the secondary"
1154 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1155 dest="new_cluster_cert",
1156 default=False, action="store_true",
1157 help="Generate a new cluster certificate")
1159 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1161 help="File containing new RAPI certificate")
1163 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1164 default=None, action="store_true",
1165 help=("Generate a new self-signed RAPI"
1168 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1170 help="File containing new SPICE certificate")
1172 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1174 help="File containing the certificate of the CA"
1175 " which signed the SPICE certificate")
1177 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1178 dest="new_spice_cert", default=None,
1179 action="store_true",
1180 help=("Generate a new self-signed SPICE"
1183 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1184 dest="new_confd_hmac_key",
1185 default=False, action="store_true",
1186 help=("Create a new HMAC key for %s" %
1189 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1190 dest="cluster_domain_secret",
1192 help=("Load new new cluster domain"
1193 " secret from file"))
1195 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1196 dest="new_cluster_domain_secret",
1197 default=False, action="store_true",
1198 help=("Create a new cluster domain"
1201 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1202 dest="use_replication_network",
1203 help="Whether to use the replication network"
1204 " for talking to the nodes",
1205 action="store_true", default=False)
1207 MAINTAIN_NODE_HEALTH_OPT = \
1208 cli_option("--maintain-node-health", dest="maintain_node_health",
1209 metavar=_YORNO, default=None, type="bool",
1210 help="Configure the cluster to automatically maintain node"
1211 " health, by shutting down unknown instances, shutting down"
1212 " unknown DRBD devices, etc.")
1214 IDENTIFY_DEFAULTS_OPT = \
1215 cli_option("--identify-defaults", dest="identify_defaults",
1216 default=False, action="store_true",
1217 help="Identify which saved instance parameters are equal to"
1218 " the current cluster defaults and set them as such, instead"
1219 " of marking them as overridden")
1221 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1222 action="store", dest="uid_pool",
1223 help=("A list of user-ids or user-id"
1224 " ranges separated by commas"))
1226 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1227 action="store", dest="add_uids",
1228 help=("A list of user-ids or user-id"
1229 " ranges separated by commas, to be"
1230 " added to the user-id pool"))
1232 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1233 action="store", dest="remove_uids",
1234 help=("A list of user-ids or user-id"
1235 " ranges separated by commas, to be"
1236 " removed from the user-id pool"))
1238 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1239 action="store", dest="reserved_lvs",
1240 help=("A comma-separated list of reserved"
1241 " logical volumes names, that will be"
1242 " ignored by cluster verify"))
1244 ROMAN_OPT = cli_option("--roman",
1245 dest="roman_integers", default=False,
1246 action="store_true",
1247 help="Use roman numbers for positive integers")
1249 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1250 action="store", default=None,
1251 help="Specifies usermode helper for DRBD")
1253 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1254 action="store_false", default=True,
1255 help="Disable support for DRBD")
1257 PRIMARY_IP_VERSION_OPT = \
1258 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1259 action="store", dest="primary_ip_version",
1260 metavar="%d|%d" % (constants.IP4_VERSION,
1261 constants.IP6_VERSION),
1262 help="Cluster-wide IP version for primary IP")
1264 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1265 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1266 choices=_PRIONAME_TO_VALUE.keys(),
1267 help="Priority for opcode processing")
1269 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1270 type="bool", default=None, metavar=_YORNO,
1271 help="Sets the hidden flag on the OS")
1273 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1274 type="bool", default=None, metavar=_YORNO,
1275 help="Sets the blacklisted flag on the OS")
1277 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1278 type="bool", metavar=_YORNO,
1279 dest="prealloc_wipe_disks",
1280 help=("Wipe disks prior to instance"
1283 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1284 type="keyval", default=None,
1285 help="Node parameters")
1287 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1288 action="store", metavar="POLICY", default=None,
1289 help="Allocation policy for the node group")
1291 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1292 type="bool", metavar=_YORNO,
1293 dest="node_powered",
1294 help="Specify if the SoR for node is powered")
1296 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1297 default=constants.OOB_TIMEOUT,
1298 help="Maximum time to wait for out-of-band helper")
1300 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1301 default=constants.OOB_POWER_DELAY,
1302 help="Time in seconds to wait between power-ons")
1304 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1305 action="store_true", default=False,
1306 help=("Whether command argument should be treated"
1309 NO_REMEMBER_OPT = cli_option("--no-remember",
1311 action="store_true", default=False,
1312 help="Perform but do not record the change"
1313 " in the configuration")
1315 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1316 default=False, action="store_true",
1317 help="Evacuate primary instances only")
1319 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1320 default=False, action="store_true",
1321 help="Evacuate secondary instances only"
1322 " (applies only to internally mirrored"
1323 " disk templates, e.g. %s)" %
1324 utils.CommaJoin(constants.DTS_INT_MIRROR))
1326 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1327 action="store_true", default=False,
1328 help="Pause instance at startup")
1330 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1331 help="Destination node group (name or uuid)",
1332 default=None, action="append",
1333 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1335 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1336 action="append", dest="ignore_errors",
1337 choices=list(constants.CV_ALL_ECODES_STRINGS),
1338 help="Error code to be ignored")
1340 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1342 help=("Specify disk state information in the format"
1343 " storage_type/identifier:option=value,..."),
1346 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1348 help=("Specify hypervisor state information in the"
1349 " format hypervisor:option=value,..."),
1352 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1353 action="store_true", default=False,
1354 help="Ignore instance policy violations")
1357 #: Options provided by all commands
1358 COMMON_OPTS = [DEBUG_OPT]
1360 # common options for creating instances. add and import then add their own
1362 COMMON_CREATE_OPTS = [
1367 FILESTORE_DRIVER_OPT,
1385 def _ParseArgs(argv, commands, aliases, env_override):
1386 """Parser for the command line arguments.
1388 This function parses the arguments and returns the function which
1389 must be executed together with its (modified) arguments.
1391 @param argv: the command line
1392 @param commands: dictionary with special contents, see the design
1393 doc for cmdline handling
1394 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1395 @param env_override: list of env variables allowed for default args
1398 assert not (env_override - set(commands))
1401 binary = "<command>"
1403 binary = argv[0].split("/")[-1]
1405 if len(argv) > 1 and argv[1] == "--version":
1406 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1407 constants.RELEASE_VERSION)
1408 # Quit right away. That way we don't have to care about this special
1409 # argument. optparse.py does it the same.
1412 if len(argv) < 2 or not (argv[1] in commands or
1413 argv[1] in aliases):
1414 # let's do a nice thing
1415 sortedcmds = commands.keys()
1418 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1419 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1422 # compute the max line length for cmd + usage
1423 mlen = max([len(" %s" % cmd) for cmd in commands])
1424 mlen = min(60, mlen) # should not get here...
1426 # and format a nice command list
1427 ToStdout("Commands:")
1428 for cmd in sortedcmds:
1429 cmdstr = " %s" % (cmd,)
1430 help_text = commands[cmd][4]
1431 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1432 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1433 for line in help_lines:
1434 ToStdout("%-*s %s", mlen, "", line)
1438 return None, None, None
1440 # get command, unalias it, and look it up in commands
1444 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1447 if aliases[cmd] not in commands:
1448 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1449 " command '%s'" % (cmd, aliases[cmd]))
1453 if cmd in env_override:
1454 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1455 env_args = os.environ.get(args_env_name)
1457 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1459 func, args_def, parser_opts, usage, description = commands[cmd]
1460 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1461 description=description,
1462 formatter=TitledHelpFormatter(),
1463 usage="%%prog %s %s" % (cmd, usage))
1464 parser.disable_interspersed_args()
1465 options, args = parser.parse_args(args=argv[1:])
1467 if not _CheckArguments(cmd, args_def, args):
1468 return None, None, None
1470 return func, options, args
1473 def _CheckArguments(cmd, args_def, args):
1474 """Verifies the arguments using the argument definition.
1478 1. Abort with error if values specified by user but none expected.
1480 1. For each argument in definition
1482 1. Keep running count of minimum number of values (min_count)
1483 1. Keep running count of maximum number of values (max_count)
1484 1. If it has an unlimited number of values
1486 1. Abort with error if it's not the last argument in the definition
1488 1. If last argument has limited number of values
1490 1. Abort with error if number of values doesn't match or is too large
1492 1. Abort with error if user didn't pass enough values (min_count)
1495 if args and not args_def:
1496 ToStderr("Error: Command %s expects no arguments", cmd)
1503 last_idx = len(args_def) - 1
1505 for idx, arg in enumerate(args_def):
1506 if min_count is None:
1508 elif arg.min is not None:
1509 min_count += arg.min
1511 if max_count is None:
1513 elif arg.max is not None:
1514 max_count += arg.max
1517 check_max = (arg.max is not None)
1519 elif arg.max is None:
1520 raise errors.ProgrammerError("Only the last argument can have max=None")
1523 # Command with exact number of arguments
1524 if (min_count is not None and max_count is not None and
1525 min_count == max_count and len(args) != min_count):
1526 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1529 # Command with limited number of arguments
1530 if max_count is not None and len(args) > max_count:
1531 ToStderr("Error: Command %s expects only %d argument(s)",
1535 # Command with some required arguments
1536 if min_count is not None and len(args) < min_count:
1537 ToStderr("Error: Command %s expects at least %d argument(s)",
1544 def SplitNodeOption(value):
1545 """Splits the value of a --node option.
1548 if value and ":" in value:
1549 return value.split(":", 1)
1551 return (value, None)
1554 def CalculateOSNames(os_name, os_variants):
1555 """Calculates all the names an OS can be called, according to its variants.
1557 @type os_name: string
1558 @param os_name: base name of the os
1559 @type os_variants: list or None
1560 @param os_variants: list of supported variants
1562 @return: list of valid names
1566 return ["%s+%s" % (os_name, v) for v in os_variants]
1571 def ParseFields(selected, default):
1572 """Parses the values of "--field"-like options.
1574 @type selected: string or None
1575 @param selected: User-selected options
1577 @param default: Default fields
1580 if selected is None:
1583 if selected.startswith("+"):
1584 return default + selected[1:].split(",")
1586 return selected.split(",")
1589 UsesRPC = rpc.RunWithRPC
1592 def AskUser(text, choices=None):
1593 """Ask the user a question.
1595 @param text: the question to ask
1597 @param choices: list with elements tuples (input_char, return_value,
1598 description); if not given, it will default to: [('y', True,
1599 'Perform the operation'), ('n', False, 'Do no do the operation')];
1600 note that the '?' char is reserved for help
1602 @return: one of the return values from the choices list; if input is
1603 not possible (i.e. not running with a tty, we return the last
1608 choices = [("y", True, "Perform the operation"),
1609 ("n", False, "Do not perform the operation")]
1610 if not choices or not isinstance(choices, list):
1611 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1612 for entry in choices:
1613 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1614 raise errors.ProgrammerError("Invalid choices element to AskUser")
1616 answer = choices[-1][1]
1618 for line in text.splitlines():
1619 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1620 text = "\n".join(new_text)
1622 f = file("/dev/tty", "a+")
1626 chars = [entry[0] for entry in choices]
1627 chars[-1] = "[%s]" % chars[-1]
1629 maps = dict([(entry[0], entry[1]) for entry in choices])
1633 f.write("/".join(chars))
1635 line = f.readline(2).strip().lower()
1640 for entry in choices:
1641 f.write(" %s - %s\n" % (entry[0], entry[2]))
1649 class JobSubmittedException(Exception):
1650 """Job was submitted, client should exit.
1652 This exception has one argument, the ID of the job that was
1653 submitted. The handler should print this ID.
1655 This is not an error, just a structured way to exit from clients.
1660 def SendJob(ops, cl=None):
1661 """Function to submit an opcode without waiting for the results.
1664 @param ops: list of opcodes
1665 @type cl: luxi.Client
1666 @param cl: the luxi client to use for communicating with the master;
1667 if None, a new client will be created
1673 job_id = cl.SubmitJob(ops)
1678 def GenericPollJob(job_id, cbs, report_cbs):
1679 """Generic job-polling function.
1681 @type job_id: number
1682 @param job_id: Job ID
1683 @type cbs: Instance of L{JobPollCbBase}
1684 @param cbs: Data callbacks
1685 @type report_cbs: Instance of L{JobPollReportCbBase}
1686 @param report_cbs: Reporting callbacks
1689 prev_job_info = None
1690 prev_logmsg_serial = None
1695 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1698 # job not found, go away!
1699 raise errors.JobLost("Job with id %s lost" % job_id)
1701 if result == constants.JOB_NOTCHANGED:
1702 report_cbs.ReportNotChanged(job_id, status)
1707 # Split result, a tuple of (field values, log entries)
1708 (job_info, log_entries) = result
1709 (status, ) = job_info
1712 for log_entry in log_entries:
1713 (serial, timestamp, log_type, message) = log_entry
1714 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1716 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1718 # TODO: Handle canceled and archived jobs
1719 elif status in (constants.JOB_STATUS_SUCCESS,
1720 constants.JOB_STATUS_ERROR,
1721 constants.JOB_STATUS_CANCELING,
1722 constants.JOB_STATUS_CANCELED):
1725 prev_job_info = job_info
1727 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1729 raise errors.JobLost("Job with id %s lost" % job_id)
1731 status, opstatus, result = jobs[0]
1733 if status == constants.JOB_STATUS_SUCCESS:
1736 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1737 raise errors.OpExecError("Job was canceled")
1740 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1741 if status == constants.OP_STATUS_SUCCESS:
1743 elif status == constants.OP_STATUS_ERROR:
1744 errors.MaybeRaise(msg)
1747 raise errors.OpExecError("partial failure (opcode %d): %s" %
1750 raise errors.OpExecError(str(msg))
1752 # default failure mode
1753 raise errors.OpExecError(result)
1756 class JobPollCbBase:
1757 """Base class for L{GenericPollJob} callbacks.
1761 """Initializes this class.
1765 def WaitForJobChangeOnce(self, job_id, fields,
1766 prev_job_info, prev_log_serial):
1767 """Waits for changes on a job.
1770 raise NotImplementedError()
1772 def QueryJobs(self, job_ids, fields):
1773 """Returns the selected fields for the selected job IDs.
1775 @type job_ids: list of numbers
1776 @param job_ids: Job IDs
1777 @type fields: list of strings
1778 @param fields: Fields
1781 raise NotImplementedError()
1784 class JobPollReportCbBase:
1785 """Base class for L{GenericPollJob} reporting callbacks.
1789 """Initializes this class.
1793 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1794 """Handles a log message.
1797 raise NotImplementedError()
1799 def ReportNotChanged(self, job_id, status):
1800 """Called for if a job hasn't changed in a while.
1802 @type job_id: number
1803 @param job_id: Job ID
1804 @type status: string or None
1805 @param status: Job status if available
1808 raise NotImplementedError()
1811 class _LuxiJobPollCb(JobPollCbBase):
1812 def __init__(self, cl):
1813 """Initializes this class.
1816 JobPollCbBase.__init__(self)
1819 def WaitForJobChangeOnce(self, job_id, fields,
1820 prev_job_info, prev_log_serial):
1821 """Waits for changes on a job.
1824 return self.cl.WaitForJobChangeOnce(job_id, fields,
1825 prev_job_info, prev_log_serial)
1827 def QueryJobs(self, job_ids, fields):
1828 """Returns the selected fields for the selected job IDs.
1831 return self.cl.QueryJobs(job_ids, fields)
1834 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1835 def __init__(self, feedback_fn):
1836 """Initializes this class.
1839 JobPollReportCbBase.__init__(self)
1841 self.feedback_fn = feedback_fn
1843 assert callable(feedback_fn)
1845 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1846 """Handles a log message.
1849 self.feedback_fn((timestamp, log_type, log_msg))
1851 def ReportNotChanged(self, job_id, status):
1852 """Called if a job hasn't changed in a while.
1858 class StdioJobPollReportCb(JobPollReportCbBase):
1860 """Initializes this class.
1863 JobPollReportCbBase.__init__(self)
1865 self.notified_queued = False
1866 self.notified_waitlock = False
1868 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1869 """Handles a log message.
1872 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1873 FormatLogMessage(log_type, log_msg))
1875 def ReportNotChanged(self, job_id, status):
1876 """Called if a job hasn't changed in a while.
1882 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1883 ToStderr("Job %s is waiting in queue", job_id)
1884 self.notified_queued = True
1886 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1887 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1888 self.notified_waitlock = True
1891 def FormatLogMessage(log_type, log_msg):
1892 """Formats a job message according to its type.
1895 if log_type != constants.ELOG_MESSAGE:
1896 log_msg = str(log_msg)
1898 return utils.SafeEncode(log_msg)
1901 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1902 """Function to poll for the result of a job.
1904 @type job_id: job identified
1905 @param job_id: the job to poll for results
1906 @type cl: luxi.Client
1907 @param cl: the luxi client to use for communicating with the master;
1908 if None, a new client will be created
1914 if reporter is None:
1916 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1918 reporter = StdioJobPollReportCb()
1920 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1922 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1925 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1926 """Legacy function to submit an opcode.
1928 This is just a simple wrapper over the construction of the processor
1929 instance. It should be extended to better handle feedback and
1930 interaction functions.
1936 SetGenericOpcodeOpts([op], opts)
1938 job_id = SendJob([op], cl=cl)
1940 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1943 return op_results[0]
1946 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1947 """Wrapper around SubmitOpCode or SendJob.
1949 This function will decide, based on the 'opts' parameter, whether to
1950 submit and wait for the result of the opcode (and return it), or
1951 whether to just send the job and print its identifier. It is used in
1952 order to simplify the implementation of the '--submit' option.
1954 It will also process the opcodes if we're sending the via SendJob
1955 (otherwise SubmitOpCode does it).
1958 if opts and opts.submit_only:
1960 SetGenericOpcodeOpts(job, opts)
1961 job_id = SendJob(job, cl=cl)
1962 raise JobSubmittedException(job_id)
1964 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1967 def SetGenericOpcodeOpts(opcode_list, options):
1968 """Processor for generic options.
1970 This function updates the given opcodes based on generic command
1971 line options (like debug, dry-run, etc.).
1973 @param opcode_list: list of opcodes
1974 @param options: command line options or None
1975 @return: None (in-place modification)
1980 for op in opcode_list:
1981 op.debug_level = options.debug
1982 if hasattr(options, "dry_run"):
1983 op.dry_run = options.dry_run
1984 if getattr(options, "priority", None) is not None:
1985 op.priority = _PRIONAME_TO_VALUE[options.priority]
1989 # TODO: Cache object?
1991 client = luxi.Client()
1992 except luxi.NoMasterError:
1993 ss = ssconf.SimpleStore()
1995 # Try to read ssconf file
1998 except errors.ConfigurationError:
1999 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2000 " not part of a cluster")
2002 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2003 if master != myself:
2004 raise errors.OpPrereqError("This is not the master node, please connect"
2005 " to node '%s' and rerun the command" %
2011 def FormatError(err):
2012 """Return a formatted error message for a given error.
2014 This function takes an exception instance and returns a tuple
2015 consisting of two values: first, the recommended exit code, and
2016 second, a string describing the error message (not
2017 newline-terminated).
2023 if isinstance(err, errors.ConfigurationError):
2024 txt = "Corrupt configuration file: %s" % msg
2026 obuf.write(txt + "\n")
2027 obuf.write("Aborting.")
2029 elif isinstance(err, errors.HooksAbort):
2030 obuf.write("Failure: hooks execution failed:\n")
2031 for node, script, out in err.args[0]:
2033 obuf.write(" node: %s, script: %s, output: %s\n" %
2034 (node, script, out))
2036 obuf.write(" node: %s, script: %s (no output)\n" %
2038 elif isinstance(err, errors.HooksFailure):
2039 obuf.write("Failure: hooks general failure: %s" % msg)
2040 elif isinstance(err, errors.ResolverError):
2041 this_host = netutils.Hostname.GetSysName()
2042 if err.args[0] == this_host:
2043 msg = "Failure: can't resolve my own hostname ('%s')"
2045 msg = "Failure: can't resolve hostname '%s'"
2046 obuf.write(msg % err.args[0])
2047 elif isinstance(err, errors.OpPrereqError):
2048 if len(err.args) == 2:
2049 obuf.write("Failure: prerequisites not met for this"
2050 " operation:\nerror type: %s, error details:\n%s" %
2051 (err.args[1], err.args[0]))
2053 obuf.write("Failure: prerequisites not met for this"
2054 " operation:\n%s" % msg)
2055 elif isinstance(err, errors.OpExecError):
2056 obuf.write("Failure: command execution error:\n%s" % msg)
2057 elif isinstance(err, errors.TagError):
2058 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2059 elif isinstance(err, errors.JobQueueDrainError):
2060 obuf.write("Failure: the job queue is marked for drain and doesn't"
2061 " accept new requests\n")
2062 elif isinstance(err, errors.JobQueueFull):
2063 obuf.write("Failure: the job queue is full and doesn't accept new"
2064 " job submissions until old jobs are archived\n")
2065 elif isinstance(err, errors.TypeEnforcementError):
2066 obuf.write("Parameter Error: %s" % msg)
2067 elif isinstance(err, errors.ParameterError):
2068 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2069 elif isinstance(err, luxi.NoMasterError):
2070 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2071 " and listening for connections?")
2072 elif isinstance(err, luxi.TimeoutError):
2073 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2074 " been submitted and will continue to run even if the call"
2075 " timed out. Useful commands in this situation are \"gnt-job"
2076 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2078 elif isinstance(err, luxi.PermissionError):
2079 obuf.write("It seems you don't have permissions to connect to the"
2080 " master daemon.\nPlease retry as a different user.")
2081 elif isinstance(err, luxi.ProtocolError):
2082 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2084 elif isinstance(err, errors.JobLost):
2085 obuf.write("Error checking job status: %s" % msg)
2086 elif isinstance(err, errors.QueryFilterParseError):
2087 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2088 obuf.write("\n".join(err.GetDetails()))
2089 elif isinstance(err, errors.GenericError):
2090 obuf.write("Unhandled Ganeti error: %s" % msg)
2091 elif isinstance(err, JobSubmittedException):
2092 obuf.write("JobID: %s\n" % err.args[0])
2095 obuf.write("Unhandled exception: %s" % msg)
2096 return retcode, obuf.getvalue().rstrip("\n")
2099 def GenericMain(commands, override=None, aliases=None,
2100 env_override=frozenset()):
2101 """Generic main function for all the gnt-* commands.
2103 @param commands: a dictionary with a special structure, see the design doc
2104 for command line handling.
2105 @param override: if not None, we expect a dictionary with keys that will
2106 override command line options; this can be used to pass
2107 options from the scripts to generic functions
2108 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2109 @param env_override: list of environment names which are allowed to submit
2110 default args for commands
2113 # save the program name and the entire command line for later logging
2115 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2116 if len(sys.argv) >= 2:
2117 binary += " " + sys.argv[1]
2118 old_cmdline = " ".join(sys.argv[2:])
2122 binary = "<unknown program>"
2129 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2130 except errors.ParameterError, err:
2131 result, err_msg = FormatError(err)
2135 if func is None: # parse error
2138 if override is not None:
2139 for key, val in override.iteritems():
2140 setattr(options, key, val)
2142 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2143 stderr_logging=True)
2146 logging.info("run with arguments '%s'", old_cmdline)
2148 logging.info("run with no arguments")
2151 result = func(options, args)
2152 except (errors.GenericError, luxi.ProtocolError,
2153 JobSubmittedException), err:
2154 result, err_msg = FormatError(err)
2155 logging.exception("Error during command processing")
2157 except KeyboardInterrupt:
2158 result = constants.EXIT_FAILURE
2159 ToStderr("Aborted. Note that if the operation created any jobs, they"
2160 " might have been submitted and"
2161 " will continue to run in the background.")
2162 except IOError, err:
2163 if err.errno == errno.EPIPE:
2164 # our terminal went away, we'll exit
2165 sys.exit(constants.EXIT_FAILURE)
2172 def ParseNicOption(optvalue):
2173 """Parses the value of the --net option(s).
2177 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2178 except (TypeError, ValueError), err:
2179 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2181 nics = [{}] * nic_max
2182 for nidx, ndict in optvalue:
2185 if not isinstance(ndict, dict):
2186 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2187 " got %s" % (nidx, ndict))
2189 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2196 def GenericInstanceCreate(mode, opts, args):
2197 """Add an instance to the cluster via either creation or import.
2199 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2200 @param opts: the command line options selected by the user
2202 @param args: should contain only one element, the new instance name
2204 @return: the desired exit code
2209 (pnode, snode) = SplitNodeOption(opts.node)
2214 hypervisor, hvparams = opts.hypervisor
2217 nics = ParseNicOption(opts.nics)
2221 elif mode == constants.INSTANCE_CREATE:
2222 # default of one nic, all auto
2228 if opts.disk_template == constants.DT_DISKLESS:
2229 if opts.disks or opts.sd_size is not None:
2230 raise errors.OpPrereqError("Diskless instance but disk"
2231 " information passed")
2234 if (not opts.disks and not opts.sd_size
2235 and mode == constants.INSTANCE_CREATE):
2236 raise errors.OpPrereqError("No disk information specified")
2237 if opts.disks and opts.sd_size is not None:
2238 raise errors.OpPrereqError("Please use either the '--disk' or"
2240 if opts.sd_size is not None:
2241 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2245 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2246 except ValueError, err:
2247 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2248 disks = [{}] * disk_max
2251 for didx, ddict in opts.disks:
2253 if not isinstance(ddict, dict):
2254 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2255 raise errors.OpPrereqError(msg)
2256 elif constants.IDISK_SIZE in ddict:
2257 if constants.IDISK_ADOPT in ddict:
2258 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2259 " (disk %d)" % didx)
2261 ddict[constants.IDISK_SIZE] = \
2262 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2263 except ValueError, err:
2264 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2266 elif constants.IDISK_ADOPT in ddict:
2267 if mode == constants.INSTANCE_IMPORT:
2268 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2270 ddict[constants.IDISK_SIZE] = 0
2272 raise errors.OpPrereqError("Missing size or adoption source for"
2276 if opts.tags is not None:
2277 tags = opts.tags.split(",")
2281 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2282 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2284 if mode == constants.INSTANCE_CREATE:
2287 force_variant = opts.force_variant
2290 no_install = opts.no_install
2291 identify_defaults = False
2292 elif mode == constants.INSTANCE_IMPORT:
2295 force_variant = False
2296 src_node = opts.src_node
2297 src_path = opts.src_dir
2299 identify_defaults = opts.identify_defaults
2301 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2303 op = opcodes.OpInstanceCreate(instance_name=instance,
2305 disk_template=opts.disk_template,
2307 pnode=pnode, snode=snode,
2308 ip_check=opts.ip_check,
2309 name_check=opts.name_check,
2310 wait_for_sync=opts.wait_for_sync,
2311 file_storage_dir=opts.file_storage_dir,
2312 file_driver=opts.file_driver,
2313 iallocator=opts.iallocator,
2314 hypervisor=hypervisor,
2316 beparams=opts.beparams,
2317 osparams=opts.osparams,
2321 force_variant=force_variant,
2325 no_install=no_install,
2326 identify_defaults=identify_defaults,
2327 ignore_ipolicy=opts.ignore_ipolicy)
2329 SubmitOrSend(op, opts)
2333 class _RunWhileClusterStoppedHelper:
2334 """Helper class for L{RunWhileClusterStopped} to simplify state management
2337 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2338 """Initializes this class.
2340 @type feedback_fn: callable
2341 @param feedback_fn: Feedback function
2342 @type cluster_name: string
2343 @param cluster_name: Cluster name
2344 @type master_node: string
2345 @param master_node Master node name
2346 @type online_nodes: list
2347 @param online_nodes: List of names of online nodes
2350 self.feedback_fn = feedback_fn
2351 self.cluster_name = cluster_name
2352 self.master_node = master_node
2353 self.online_nodes = online_nodes
2355 self.ssh = ssh.SshRunner(self.cluster_name)
2357 self.nonmaster_nodes = [name for name in online_nodes
2358 if name != master_node]
2360 assert self.master_node not in self.nonmaster_nodes
2362 def _RunCmd(self, node_name, cmd):
2363 """Runs a command on the local or a remote machine.
2365 @type node_name: string
2366 @param node_name: Machine name
2371 if node_name is None or node_name == self.master_node:
2372 # No need to use SSH
2373 result = utils.RunCmd(cmd)
2375 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2378 errmsg = ["Failed to run command %s" % result.cmd]
2380 errmsg.append("on node %s" % node_name)
2381 errmsg.append(": exitcode %s and error %s" %
2382 (result.exit_code, result.output))
2383 raise errors.OpExecError(" ".join(errmsg))
2385 def Call(self, fn, *args):
2386 """Call function while all daemons are stopped.
2389 @param fn: Function to be called
2392 # Pause watcher by acquiring an exclusive lock on watcher state file
2393 self.feedback_fn("Blocking watcher")
2394 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2396 # TODO: Currently, this just blocks. There's no timeout.
2397 # TODO: Should it be a shared lock?
2398 watcher_block.Exclusive(blocking=True)
2400 # Stop master daemons, so that no new jobs can come in and all running
2402 self.feedback_fn("Stopping master daemons")
2403 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2405 # Stop daemons on all nodes
2406 for node_name in self.online_nodes:
2407 self.feedback_fn("Stopping daemons on %s" % node_name)
2408 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2410 # All daemons are shut down now
2412 return fn(self, *args)
2413 except Exception, err:
2414 _, errmsg = FormatError(err)
2415 logging.exception("Caught exception")
2416 self.feedback_fn(errmsg)
2419 # Start cluster again, master node last
2420 for node_name in self.nonmaster_nodes + [self.master_node]:
2421 self.feedback_fn("Starting daemons on %s" % node_name)
2422 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2425 watcher_block.Close()
2428 def RunWhileClusterStopped(feedback_fn, fn, *args):
2429 """Calls a function while all cluster daemons are stopped.
2431 @type feedback_fn: callable
2432 @param feedback_fn: Feedback function
2434 @param fn: Function to be called when daemons are stopped
2437 feedback_fn("Gathering cluster information")
2439 # This ensures we're running on the master daemon
2442 (cluster_name, master_node) = \
2443 cl.QueryConfigValues(["cluster_name", "master_node"])
2445 online_nodes = GetOnlineNodes([], cl=cl)
2447 # Don't keep a reference to the client. The master daemon will go away.
2450 assert master_node in online_nodes
2452 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2453 online_nodes).Call(fn, *args)
2456 def GenerateTable(headers, fields, separator, data,
2457 numfields=None, unitfields=None,
2459 """Prints a table with headers and different fields.
2462 @param headers: dictionary mapping field names to headers for
2465 @param fields: the field names corresponding to each row in
2467 @param separator: the separator to be used; if this is None,
2468 the default 'smart' algorithm is used which computes optimal
2469 field width, otherwise just the separator is used between
2472 @param data: a list of lists, each sublist being one row to be output
2473 @type numfields: list
2474 @param numfields: a list with the fields that hold numeric
2475 values and thus should be right-aligned
2476 @type unitfields: list
2477 @param unitfields: a list with the fields that hold numeric
2478 values that should be formatted with the units field
2479 @type units: string or None
2480 @param units: the units we should use for formatting, or None for
2481 automatic choice (human-readable for non-separator usage, otherwise
2482 megabytes); this is a one-letter string
2491 if numfields is None:
2493 if unitfields is None:
2496 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2497 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2500 for field in fields:
2501 if headers and field not in headers:
2502 # TODO: handle better unknown fields (either revert to old
2503 # style of raising exception, or deal more intelligently with
2505 headers[field] = field
2506 if separator is not None:
2507 format_fields.append("%s")
2508 elif numfields.Matches(field):
2509 format_fields.append("%*s")
2511 format_fields.append("%-*s")
2513 if separator is None:
2514 mlens = [0 for name in fields]
2515 format_str = " ".join(format_fields)
2517 format_str = separator.replace("%", "%%").join(format_fields)
2522 for idx, val in enumerate(row):
2523 if unitfields.Matches(fields[idx]):
2526 except (TypeError, ValueError):
2529 val = row[idx] = utils.FormatUnit(val, units)
2530 val = row[idx] = str(val)
2531 if separator is None:
2532 mlens[idx] = max(mlens[idx], len(val))
2537 for idx, name in enumerate(fields):
2539 if separator is None:
2540 mlens[idx] = max(mlens[idx], len(hdr))
2541 args.append(mlens[idx])
2543 result.append(format_str % tuple(args))
2545 if separator is None:
2546 assert len(mlens) == len(fields)
2548 if fields and not numfields.Matches(fields[-1]):
2554 line = ["-" for _ in fields]
2555 for idx in range(len(fields)):
2556 if separator is None:
2557 args.append(mlens[idx])
2558 args.append(line[idx])
2559 result.append(format_str % tuple(args))
2564 def _FormatBool(value):
2565 """Formats a boolean value as a string.
2573 #: Default formatting for query results; (callback, align right)
2574 _DEFAULT_FORMAT_QUERY = {
2575 constants.QFT_TEXT: (str, False),
2576 constants.QFT_BOOL: (_FormatBool, False),
2577 constants.QFT_NUMBER: (str, True),
2578 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2579 constants.QFT_OTHER: (str, False),
2580 constants.QFT_UNKNOWN: (str, False),
2584 def _GetColumnFormatter(fdef, override, unit):
2585 """Returns formatting function for a field.
2587 @type fdef: L{objects.QueryFieldDefinition}
2588 @type override: dict
2589 @param override: Dictionary for overriding field formatting functions,
2590 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2592 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2593 @rtype: tuple; (callable, bool)
2594 @return: Returns the function to format a value (takes one parameter) and a
2595 boolean for aligning the value on the right-hand side
2598 fmt = override.get(fdef.name, None)
2602 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2604 if fdef.kind == constants.QFT_UNIT:
2605 # Can't keep this information in the static dictionary
2606 return (lambda value: utils.FormatUnit(value, unit), True)
2608 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2612 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2615 class _QueryColumnFormatter:
2616 """Callable class for formatting fields of a query.
2619 def __init__(self, fn, status_fn, verbose):
2620 """Initializes this class.
2623 @param fn: Formatting function
2624 @type status_fn: callable
2625 @param status_fn: Function to report fields' status
2626 @type verbose: boolean
2627 @param verbose: whether to use verbose field descriptions or not
2631 self._status_fn = status_fn
2632 self._verbose = verbose
2634 def __call__(self, data):
2635 """Returns a field's string representation.
2638 (status, value) = data
2641 self._status_fn(status)
2643 if status == constants.RS_NORMAL:
2644 return self._fn(value)
2646 assert value is None, \
2647 "Found value %r for abnormal status %s" % (value, status)
2649 return FormatResultError(status, self._verbose)
2652 def FormatResultError(status, verbose):
2653 """Formats result status other than L{constants.RS_NORMAL}.
2655 @param status: The result status
2656 @type verbose: boolean
2657 @param verbose: Whether to return the verbose text
2658 @return: Text of result status
2661 assert status != constants.RS_NORMAL, \
2662 "FormatResultError called with status equal to constants.RS_NORMAL"
2664 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2666 raise NotImplementedError("Unknown status %s" % status)
2673 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2674 header=False, verbose=False):
2675 """Formats data in L{objects.QueryResponse}.
2677 @type result: L{objects.QueryResponse}
2678 @param result: result of query operation
2680 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2681 see L{utils.text.FormatUnit}
2682 @type format_override: dict
2683 @param format_override: Dictionary for overriding field formatting functions,
2684 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2685 @type separator: string or None
2686 @param separator: String used to separate fields
2688 @param header: Whether to output header row
2689 @type verbose: boolean
2690 @param verbose: whether to use verbose field descriptions or not
2699 if format_override is None:
2700 format_override = {}
2702 stats = dict.fromkeys(constants.RS_ALL, 0)
2704 def _RecordStatus(status):
2709 for fdef in result.fields:
2710 assert fdef.title and fdef.name
2711 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2712 columns.append(TableColumn(fdef.title,
2713 _QueryColumnFormatter(fn, _RecordStatus,
2717 table = FormatTable(result.data, columns, header, separator)
2719 # Collect statistics
2720 assert len(stats) == len(constants.RS_ALL)
2721 assert compat.all(count >= 0 for count in stats.values())
2723 # Determine overall status. If there was no data, unknown fields must be
2724 # detected via the field definitions.
2725 if (stats[constants.RS_UNKNOWN] or
2726 (not result.data and _GetUnknownFields(result.fields))):
2728 elif compat.any(count > 0 for key, count in stats.items()
2729 if key != constants.RS_NORMAL):
2730 status = QR_INCOMPLETE
2734 return (status, table)
2737 def _GetUnknownFields(fdefs):
2738 """Returns list of unknown fields included in C{fdefs}.
2740 @type fdefs: list of L{objects.QueryFieldDefinition}
2743 return [fdef for fdef in fdefs
2744 if fdef.kind == constants.QFT_UNKNOWN]
2747 def _WarnUnknownFields(fdefs):
2748 """Prints a warning to stderr if a query included unknown fields.
2750 @type fdefs: list of L{objects.QueryFieldDefinition}
2753 unknown = _GetUnknownFields(fdefs)
2755 ToStderr("Warning: Queried for unknown fields %s",
2756 utils.CommaJoin(fdef.name for fdef in unknown))
2762 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2763 format_override=None, verbose=False, force_filter=False):
2764 """Generic implementation for listing all items of a resource.
2766 @param resource: One of L{constants.QR_VIA_LUXI}
2767 @type fields: list of strings
2768 @param fields: List of fields to query for
2769 @type names: list of strings
2770 @param names: Names of items to query for
2771 @type unit: string or None
2772 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2773 None for automatic choice (human-readable for non-separator usage,
2774 otherwise megabytes); this is a one-letter string
2775 @type separator: string or None
2776 @param separator: String used to separate fields
2778 @param header: Whether to show header row
2779 @type force_filter: bool
2780 @param force_filter: Whether to always treat names as filter
2781 @type format_override: dict
2782 @param format_override: Dictionary for overriding field formatting functions,
2783 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2784 @type verbose: boolean
2785 @param verbose: whether to use verbose field descriptions or not
2791 qfilter = qlang.MakeFilter(names, force_filter)
2796 response = cl.Query(resource, fields, qfilter)
2798 found_unknown = _WarnUnknownFields(response.fields)
2800 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2802 format_override=format_override,
2808 assert ((found_unknown and status == QR_UNKNOWN) or
2809 (not found_unknown and status != QR_UNKNOWN))
2811 if status == QR_UNKNOWN:
2812 return constants.EXIT_UNKNOWN_FIELD
2814 # TODO: Should the list command fail if not all data could be collected?
2815 return constants.EXIT_SUCCESS
2818 def GenericListFields(resource, fields, separator, header, cl=None):
2819 """Generic implementation for listing fields for a resource.
2821 @param resource: One of L{constants.QR_VIA_LUXI}
2822 @type fields: list of strings
2823 @param fields: List of fields to query for
2824 @type separator: string or None
2825 @param separator: String used to separate fields
2827 @param header: Whether to show header row
2836 response = cl.QueryFields(resource, fields)
2838 found_unknown = _WarnUnknownFields(response.fields)
2841 TableColumn("Name", str, False),
2842 TableColumn("Title", str, False),
2843 TableColumn("Description", str, False),
2846 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2848 for line in FormatTable(rows, columns, header, separator):
2852 return constants.EXIT_UNKNOWN_FIELD
2854 return constants.EXIT_SUCCESS
2858 """Describes a column for L{FormatTable}.
2861 def __init__(self, title, fn, align_right):
2862 """Initializes this class.
2865 @param title: Column title
2867 @param fn: Formatting function
2868 @type align_right: bool
2869 @param align_right: Whether to align values on the right-hand side
2874 self.align_right = align_right
2877 def _GetColFormatString(width, align_right):
2878 """Returns the format string for a field.
2886 return "%%%s%ss" % (sign, width)
2889 def FormatTable(rows, columns, header, separator):
2890 """Formats data as a table.
2892 @type rows: list of lists
2893 @param rows: Row data, one list per row
2894 @type columns: list of L{TableColumn}
2895 @param columns: Column descriptions
2897 @param header: Whether to show header row
2898 @type separator: string or None
2899 @param separator: String used to separate columns
2903 data = [[col.title for col in columns]]
2904 colwidth = [len(col.title) for col in columns]
2907 colwidth = [0 for _ in columns]
2911 assert len(row) == len(columns)
2913 formatted = [col.format(value) for value, col in zip(row, columns)]
2915 if separator is None:
2916 # Update column widths
2917 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2918 # Modifying a list's items while iterating is fine
2919 colwidth[idx] = max(oldwidth, len(value))
2921 data.append(formatted)
2923 if separator is not None:
2924 # Return early if a separator is used
2925 return [separator.join(row) for row in data]
2927 if columns and not columns[-1].align_right:
2928 # Avoid unnecessary spaces at end of line
2931 # Build format string
2932 fmt = " ".join([_GetColFormatString(width, col.align_right)
2933 for col, width in zip(columns, colwidth)])
2935 return [fmt % tuple(row) for row in data]
2938 def FormatTimestamp(ts):
2939 """Formats a given timestamp.
2942 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2945 @return: a string with the formatted timestamp
2948 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2951 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2954 def ParseTimespec(value):
2955 """Parse a time specification.
2957 The following suffixed will be recognized:
2965 Without any suffix, the value will be taken to be in seconds.
2970 raise errors.OpPrereqError("Empty time specification passed")
2978 if value[-1] not in suffix_map:
2981 except (TypeError, ValueError):
2982 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2984 multiplier = suffix_map[value[-1]]
2986 if not value: # no data left after stripping the suffix
2987 raise errors.OpPrereqError("Invalid time specification (only"
2990 value = int(value) * multiplier
2991 except (TypeError, ValueError):
2992 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2996 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2997 filter_master=False, nodegroup=None):
2998 """Returns the names of online nodes.
3000 This function will also log a warning on stderr with the names of
3003 @param nodes: if not empty, use only this subset of nodes (minus the
3005 @param cl: if not None, luxi client to use
3006 @type nowarn: boolean
3007 @param nowarn: by default, this function will output a note with the
3008 offline nodes that are skipped; if this parameter is True the
3009 note is not displayed
3010 @type secondary_ips: boolean
3011 @param secondary_ips: if True, return the secondary IPs instead of the
3012 names, useful for doing network traffic over the replication interface
3014 @type filter_master: boolean
3015 @param filter_master: if True, do not return the master node in the list
3016 (useful in coordination with secondary_ips where we cannot check our
3017 node name against the list)
3018 @type nodegroup: string
3019 @param nodegroup: If set, only return nodes in this node group
3028 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3030 if nodegroup is not None:
3031 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3032 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3035 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3038 if len(qfilter) > 1:
3039 final_filter = [qlang.OP_AND] + qfilter
3041 assert len(qfilter) == 1
3042 final_filter = qfilter[0]
3046 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3048 def _IsOffline(row):
3049 (_, (_, offline), _) = row
3053 ((_, name), _, _) = row
3057 (_, _, (_, sip)) = row
3060 (offline, online) = compat.partition(result.data, _IsOffline)
3062 if offline and not nowarn:
3063 ToStderr("Note: skipping offline node(s): %s" %
3064 utils.CommaJoin(map(_GetName, offline)))
3071 return map(fn, online)
3074 def _ToStream(stream, txt, *args):
3075 """Write a message to a stream, bypassing the logging system
3077 @type stream: file object
3078 @param stream: the file to which we should write
3080 @param txt: the message
3086 stream.write(txt % args)
3091 except IOError, err:
3092 if err.errno == errno.EPIPE:
3093 # our terminal went away, we'll exit
3094 sys.exit(constants.EXIT_FAILURE)
3099 def ToStdout(txt, *args):
3100 """Write a message to stdout only, bypassing the logging system
3102 This is just a wrapper over _ToStream.
3105 @param txt: the message
3108 _ToStream(sys.stdout, txt, *args)
3111 def ToStderr(txt, *args):
3112 """Write a message to stderr only, bypassing the logging system
3114 This is just a wrapper over _ToStream.
3117 @param txt: the message
3120 _ToStream(sys.stderr, txt, *args)
3123 class JobExecutor(object):
3124 """Class which manages the submission and execution of multiple jobs.
3126 Note that instances of this class should not be reused between
3130 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3135 self.verbose = verbose
3138 self.feedback_fn = feedback_fn
3139 self._counter = itertools.count()
3142 def _IfName(name, fmt):
3143 """Helper function for formatting name.
3151 def QueueJob(self, name, *ops):
3152 """Record a job for later submit.
3155 @param name: a description of the job, will be used in WaitJobSet
3158 SetGenericOpcodeOpts(ops, self.opts)
3159 self.queue.append((self._counter.next(), name, ops))
3161 def AddJobId(self, name, status, job_id):
3162 """Adds a job ID to the internal queue.
3165 self.jobs.append((self._counter.next(), status, job_id, name))
3167 def SubmitPending(self, each=False):
3168 """Submit all pending jobs.
3173 for (_, _, ops) in self.queue:
3174 # SubmitJob will remove the success status, but raise an exception if
3175 # the submission fails, so we'll notice that anyway.
3176 results.append([True, self.cl.SubmitJob(ops)[0]])
3178 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3179 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3180 self.jobs.append((idx, status, data, name))
3182 def _ChooseJob(self):
3183 """Choose a non-waiting/queued job to poll next.
3186 assert self.jobs, "_ChooseJob called with empty job list"
3188 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3192 for job_data, status in zip(self.jobs, result):
3193 if (isinstance(status, list) and status and
3194 status[0] in (constants.JOB_STATUS_QUEUED,
3195 constants.JOB_STATUS_WAITING,
3196 constants.JOB_STATUS_CANCELING)):
3197 # job is still present and waiting
3199 # good candidate found (either running job or lost job)
3200 self.jobs.remove(job_data)
3204 return self.jobs.pop(0)
3206 def GetResults(self):
3207 """Wait for and return the results of all jobs.
3210 @return: list of tuples (success, job results), in the same order
3211 as the submitted jobs; if a job has failed, instead of the result
3212 there will be the error message
3216 self.SubmitPending()
3219 ok_jobs = [row[2] for row in self.jobs if row[1]]
3221 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3223 # first, remove any non-submitted jobs
3224 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3225 for idx, _, jid, name in failures:
3226 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3227 results.append((idx, False, jid))
3230 (idx, _, jid, name) = self._ChooseJob()
3231 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3233 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3235 except errors.JobLost, err:
3236 _, job_result = FormatError(err)
3237 ToStderr("Job %s%s has been archived, cannot check its result",
3238 jid, self._IfName(name, " for %s"))
3240 except (errors.GenericError, luxi.ProtocolError), err:
3241 _, job_result = FormatError(err)
3243 # the error message will always be shown, verbose or not
3244 ToStderr("Job %s%s has failed: %s",
3245 jid, self._IfName(name, " for %s"), job_result)
3247 results.append((idx, success, job_result))
3249 # sort based on the index, then drop it
3251 results = [i[1:] for i in results]
3255 def WaitOrShow(self, wait):
3256 """Wait for job results or only print the job IDs.
3259 @param wait: whether to wait or not
3263 return self.GetResults()
3266 self.SubmitPending()
3267 for _, status, result, name in self.jobs:
3269 ToStdout("%s: %s", result, name)
3271 ToStderr("Failure for %s: %s", name, result)
3272 return [row[1:3] for row in self.jobs]
3275 def FormatParameterDict(buf, param_dict, actual, level=1):
3276 """Formats a parameter dictionary.
3278 @type buf: L{StringIO}
3279 @param buf: the buffer into which to write
3280 @type param_dict: dict
3281 @param param_dict: the own parameters
3283 @param actual: the current parameter set (including defaults)
3284 @param level: Level of indent
3287 indent = " " * level
3288 for key in sorted(actual):
3289 val = param_dict.get(key, "default (%s)" % actual[key])
3290 buf.write("%s- %s: %s\n" % (indent, key, val))
3293 def ConfirmOperation(names, list_type, text, extra=""):
3294 """Ask the user to confirm an operation on a list of list_type.
3296 This function is used to request confirmation for doing an operation
3297 on a given list of list_type.
3300 @param names: the list of names that we display when
3301 we ask for confirmation
3302 @type list_type: str
3303 @param list_type: Human readable name for elements in the list (e.g. nodes)
3305 @param text: the operation that the user should confirm
3307 @return: True or False depending on user's confirmation.
3311 msg = ("The %s will operate on %d %s.\n%s"
3312 "Do you want to continue?" % (text, count, list_type, extra))
3313 affected = (("\nAffected %s:\n" % list_type) +
3314 "\n".join([" %s" % name for name in names]))
3316 choices = [("y", True, "Yes, execute the %s" % text),
3317 ("n", False, "No, abort the %s" % text)]
3320 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3323 question = msg + affected
3325 choice = AskUser(question, choices)
3328 choice = AskUser(msg + affected, choices)