4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
32 from cStringIO import StringIO
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import opcodes
38 from ganeti import luxi
39 from ganeti import ssconf
40 from ganeti import rpc
41 from ganeti import ssh
42 from ganeti import compat
43 from ganeti import netutils
44 from ganeti import qlang
46 from optparse import (OptionParser, TitledHelpFormatter,
47 Option, OptionValueError)
51 # Command line options
64 "CLUSTER_DOMAIN_SECRET_OPT",
81 "FILESTORE_DRIVER_OPT",
87 "GLOBAL_SHARED_FILEDIR_OPT",
92 "DEFAULT_IALLOCATOR_OPT",
93 "IDENTIFY_DEFAULTS_OPT",
95 "IGNORE_FAILURES_OPT",
97 "IGNORE_REMOVE_FAILURES_OPT",
98 "IGNORE_SECONDARIES_OPT",
102 "MAINTAIN_NODE_HEALTH_OPT",
105 "MIGRATION_MODE_OPT",
107 "NEW_CLUSTER_CERT_OPT",
108 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109 "NEW_CONFD_HMAC_KEY_OPT",
113 "NODE_FORCE_JOIN_OPT",
115 "NODE_PLACEMENT_OPT",
119 "NODRBD_STORAGE_OPT",
125 "NOMODIFY_ETCHOSTS_OPT",
126 "NOMODIFY_SSH_SETUP_OPT",
132 "NOSSH_KEYCHECK_OPT",
144 "PREALLOC_WIPE_DISKS_OPT",
145 "PRIMARY_IP_VERSION_OPT",
151 "REMOVE_INSTANCE_OPT",
156 "SECONDARY_ONLY_OPT",
160 "SHUTDOWN_TIMEOUT_OPT",
165 "STARTUP_PAUSED_OPT",
178 # Generic functions for CLI programs
181 "GenericInstanceCreate",
187 "JobSubmittedException",
189 "RunWhileClusterStopped",
193 # Formatting functions
194 "ToStderr", "ToStdout",
197 "FormatParameterDict",
206 # command line options support infrastructure
207 "ARGS_MANY_INSTANCES",
226 "OPT_COMPL_INST_ADD_NODES",
227 "OPT_COMPL_MANY_NODES",
228 "OPT_COMPL_ONE_IALLOCATOR",
229 "OPT_COMPL_ONE_INSTANCE",
230 "OPT_COMPL_ONE_NODE",
231 "OPT_COMPL_ONE_NODEGROUP",
237 "COMMON_CREATE_OPTS",
243 #: Priorities (sorted)
245 ("low", constants.OP_PRIO_LOW),
246 ("normal", constants.OP_PRIO_NORMAL),
247 ("high", constants.OP_PRIO_HIGH),
250 #: Priority dictionary for easier lookup
251 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
252 # we migrate to Python 2.6
253 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
255 # Query result status for clients
258 QR_INCOMPLETE) = range(3)
262 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
267 return ("<%s min=%s max=%s>" %
268 (self.__class__.__name__, self.min, self.max))
271 class ArgSuggest(_Argument):
272 """Suggesting argument.
274 Value can be any of the ones passed to the constructor.
277 # pylint: disable-msg=W0622
278 def __init__(self, min=0, max=None, choices=None):
279 _Argument.__init__(self, min=min, max=max)
280 self.choices = choices
283 return ("<%s min=%s max=%s choices=%r>" %
284 (self.__class__.__name__, self.min, self.max, self.choices))
287 class ArgChoice(ArgSuggest):
290 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
291 but value must be one of the choices.
296 class ArgUnknown(_Argument):
297 """Unknown argument to program (e.g. determined at runtime).
302 class ArgInstance(_Argument):
303 """Instances argument.
308 class ArgNode(_Argument):
314 class ArgGroup(_Argument):
315 """Node group argument.
320 class ArgJobId(_Argument):
326 class ArgFile(_Argument):
327 """File path argument.
332 class ArgCommand(_Argument):
338 class ArgHost(_Argument):
344 class ArgOs(_Argument):
351 ARGS_MANY_INSTANCES = [ArgInstance()]
352 ARGS_MANY_NODES = [ArgNode()]
353 ARGS_MANY_GROUPS = [ArgGroup()]
354 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
355 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
357 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
358 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
361 def _ExtractTagsObject(opts, args):
362 """Extract the tag type object.
364 Note that this function will modify its args parameter.
367 if not hasattr(opts, "tag_type"):
368 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
370 if kind == constants.TAG_CLUSTER:
372 elif kind in (constants.TAG_NODEGROUP,
374 constants.TAG_INSTANCE):
376 raise errors.OpPrereqError("no arguments passed to the command")
380 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
384 def _ExtendTags(opts, args):
385 """Extend the args if a source file has been given.
387 This function will extend the tags with the contents of the file
388 passed in the 'tags_source' attribute of the opts parameter. A file
389 named '-' will be replaced by stdin.
392 fname = opts.tags_source
398 new_fh = open(fname, "r")
401 # we don't use the nice 'new_data = [line.strip() for line in fh]'
402 # because of python bug 1633941
404 line = new_fh.readline()
407 new_data.append(line.strip())
410 args.extend(new_data)
413 def ListTags(opts, args):
414 """List the tags on a given object.
416 This is a generic implementation that knows how to deal with all
417 three cases of tag objects (cluster, node, instance). The opts
418 argument is expected to contain a tag_type field denoting what
419 object type we work on.
422 kind, name = _ExtractTagsObject(opts, args)
424 result = cl.QueryTags(kind, name)
425 result = list(result)
431 def AddTags(opts, args):
432 """Add tags on a given object.
434 This is a generic implementation that knows how to deal with all
435 three cases of tag objects (cluster, node, instance). The opts
436 argument is expected to contain a tag_type field denoting what
437 object type we work on.
440 kind, name = _ExtractTagsObject(opts, args)
441 _ExtendTags(opts, args)
443 raise errors.OpPrereqError("No tags to be added")
444 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
445 SubmitOpCode(op, opts=opts)
448 def RemoveTags(opts, args):
449 """Remove tags from a given object.
451 This is a generic implementation that knows how to deal with all
452 three cases of tag objects (cluster, node, instance). The opts
453 argument is expected to contain a tag_type field denoting what
454 object type we work on.
457 kind, name = _ExtractTagsObject(opts, args)
458 _ExtendTags(opts, args)
460 raise errors.OpPrereqError("No tags to be removed")
461 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
462 SubmitOpCode(op, opts=opts)
465 def check_unit(option, opt, value): # pylint: disable-msg=W0613
466 """OptParsers custom converter for units.
470 return utils.ParseUnit(value)
471 except errors.UnitParseError, err:
472 raise OptionValueError("option %s: %s" % (opt, err))
475 def _SplitKeyVal(opt, data):
476 """Convert a KeyVal string into a dict.
478 This function will convert a key=val[,...] string into a dict. Empty
479 values will be converted specially: keys which have the prefix 'no_'
480 will have the value=False and the prefix stripped, the others will
484 @param opt: a string holding the option name for which we process the
485 data, used in building error messages
487 @param data: a string of the format key=val,key=val,...
489 @return: {key=val, key=val}
490 @raises errors.ParameterError: if there are duplicate keys
495 for elem in utils.UnescapeAndSplit(data, sep=","):
497 key, val = elem.split("=", 1)
499 if elem.startswith(NO_PREFIX):
500 key, val = elem[len(NO_PREFIX):], False
501 elif elem.startswith(UN_PREFIX):
502 key, val = elem[len(UN_PREFIX):], None
504 key, val = elem, True
506 raise errors.ParameterError("Duplicate key '%s' in option %s" %
512 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
513 """Custom parser for ident:key=val,key=val options.
515 This will store the parsed values as a tuple (ident, {key: val}). As such,
516 multiple uses of this option via action=append is possible.
520 ident, rest = value, ""
522 ident, rest = value.split(":", 1)
524 if ident.startswith(NO_PREFIX):
526 msg = "Cannot pass options when removing parameter groups: %s" % value
527 raise errors.ParameterError(msg)
528 retval = (ident[len(NO_PREFIX):], False)
529 elif ident.startswith(UN_PREFIX):
531 msg = "Cannot pass options when removing parameter groups: %s" % value
532 raise errors.ParameterError(msg)
533 retval = (ident[len(UN_PREFIX):], None)
535 kv_dict = _SplitKeyVal(opt, rest)
536 retval = (ident, kv_dict)
540 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
541 """Custom parser class for key=val,key=val options.
543 This will store the parsed values as a dict {key: val}.
546 return _SplitKeyVal(opt, value)
549 def check_bool(option, opt, value): # pylint: disable-msg=W0613
550 """Custom parser for yes/no options.
552 This will store the parsed value as either True or False.
555 value = value.lower()
556 if value == constants.VALUE_FALSE or value == "no":
558 elif value == constants.VALUE_TRUE or value == "yes":
561 raise errors.ParameterError("Invalid boolean value '%s'" % value)
564 # completion_suggestion is normally a list. Using numeric values not evaluating
565 # to False for dynamic completion.
566 (OPT_COMPL_MANY_NODES,
568 OPT_COMPL_ONE_INSTANCE,
570 OPT_COMPL_ONE_IALLOCATOR,
571 OPT_COMPL_INST_ADD_NODES,
572 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
574 OPT_COMPL_ALL = frozenset([
575 OPT_COMPL_MANY_NODES,
577 OPT_COMPL_ONE_INSTANCE,
579 OPT_COMPL_ONE_IALLOCATOR,
580 OPT_COMPL_INST_ADD_NODES,
581 OPT_COMPL_ONE_NODEGROUP,
585 class CliOption(Option):
586 """Custom option class for optparse.
589 ATTRS = Option.ATTRS + [
590 "completion_suggest",
592 TYPES = Option.TYPES + (
598 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
599 TYPE_CHECKER["identkeyval"] = check_ident_key_val
600 TYPE_CHECKER["keyval"] = check_key_val
601 TYPE_CHECKER["unit"] = check_unit
602 TYPE_CHECKER["bool"] = check_bool
605 # optparse.py sets make_option, so we do it for our own option class, too
606 cli_option = CliOption
611 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
612 help="Increase debugging level")
614 NOHDR_OPT = cli_option("--no-headers", default=False,
615 action="store_true", dest="no_headers",
616 help="Don't display column headers")
618 SEP_OPT = cli_option("--separator", default=None,
619 action="store", dest="separator",
620 help=("Separator between output fields"
621 " (defaults to one space)"))
623 USEUNITS_OPT = cli_option("--units", default=None,
624 dest="units", choices=("h", "m", "g", "t"),
625 help="Specify units for output (one of h/m/g/t)")
627 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
628 type="string", metavar="FIELDS",
629 help="Comma separated list of output fields")
631 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
632 default=False, help="Force the operation")
634 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
635 default=False, help="Do not require confirmation")
637 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
638 action="store_true", default=False,
639 help=("Ignore offline nodes and do as much"
642 TAG_ADD_OPT = cli_option("--tags", dest="tags",
643 default=None, help="Comma-separated list of instance"
646 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
647 default=None, help="File with tag names")
649 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
650 default=False, action="store_true",
651 help=("Submit the job and return the job ID, but"
652 " don't wait for the job to finish"))
654 SYNC_OPT = cli_option("--sync", dest="do_locking",
655 default=False, action="store_true",
656 help=("Grab locks while doing the queries"
657 " in order to ensure more consistent results"))
659 DRY_RUN_OPT = cli_option("--dry-run", default=False,
661 help=("Do not execute the operation, just run the"
662 " check steps and verify it it could be"
665 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
667 help="Increase the verbosity of the operation")
669 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
670 action="store_true", dest="simulate_errors",
671 help="Debugging option that makes the operation"
672 " treat most runtime checks as failed")
674 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
675 default=True, action="store_false",
676 help="Don't wait for sync (DANGEROUS!)")
678 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
679 help=("Custom disk setup (%s)" %
680 utils.CommaJoin(constants.DISK_TEMPLATES)),
681 default=None, metavar="TEMPL",
682 choices=list(constants.DISK_TEMPLATES))
684 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
685 help="Do not create any network cards for"
688 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
689 help="Relative path under default cluster-wide"
690 " file storage dir to store file-based disks",
691 default=None, metavar="<DIR>")
693 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
694 help="Driver to use for image files",
695 default="loop", metavar="<DRIVER>",
696 choices=list(constants.FILE_DRIVER))
698 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
699 help="Select nodes for the instance automatically"
700 " using the <NAME> iallocator plugin",
701 default=None, type="string",
702 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
704 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
706 help="Set the default instance allocator plugin",
707 default=None, type="string",
708 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
710 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
712 completion_suggest=OPT_COMPL_ONE_OS)
714 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
715 type="keyval", default={},
716 help="OS parameters")
718 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
719 action="store_true", default=False,
720 help="Force an unknown variant")
722 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
723 action="store_true", default=False,
724 help="Do not install the OS (will"
727 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
728 type="keyval", default={},
729 help="Backend parameters")
731 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
732 default={}, dest="hvparams",
733 help="Hypervisor parameters")
735 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
736 help="Hypervisor and hypervisor options, in the"
737 " format hypervisor:option=value,option=value,...",
738 default=None, type="identkeyval")
740 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
741 help="Hypervisor and hypervisor options, in the"
742 " format hypervisor:option=value,option=value,...",
743 default=[], action="append", type="identkeyval")
745 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
746 action="store_false",
747 help="Don't check that the instance's IP"
750 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
751 default=True, action="store_false",
752 help="Don't check that the instance's name"
755 NET_OPT = cli_option("--net",
756 help="NIC parameters", default=[],
757 dest="nics", action="append", type="identkeyval")
759 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
760 dest="disks", action="append", type="identkeyval")
762 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
763 help="Comma-separated list of disks"
764 " indices to act on (e.g. 0,2) (optional,"
765 " defaults to all disks)")
767 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
768 help="Enforces a single-disk configuration using the"
769 " given disk size, in MiB unless a suffix is used",
770 default=None, type="unit", metavar="<size>")
772 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
773 dest="ignore_consistency",
774 action="store_true", default=False,
775 help="Ignore the consistency of the disks on"
778 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
779 dest="allow_failover",
780 action="store_true", default=False,
781 help="If migration is not possible fallback to"
784 NONLIVE_OPT = cli_option("--non-live", dest="live",
785 default=True, action="store_false",
786 help="Do a non-live migration (this usually means"
787 " freeze the instance, save the state, transfer and"
788 " only then resume running on the secondary node)")
790 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
792 choices=list(constants.HT_MIGRATION_MODES),
793 help="Override default migration mode (choose"
794 " either live or non-live")
796 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
797 help="Target node and optional secondary node",
798 metavar="<pnode>[:<snode>]",
799 completion_suggest=OPT_COMPL_INST_ADD_NODES)
801 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
802 action="append", metavar="<node>",
803 help="Use only this node (can be used multiple"
804 " times, if not given defaults to all nodes)",
805 completion_suggest=OPT_COMPL_ONE_NODE)
807 NODEGROUP_OPT_NAME = "--node-group"
808 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
810 help="Node group (name or uuid)",
811 metavar="<nodegroup>",
812 default=None, type="string",
813 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
815 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
817 completion_suggest=OPT_COMPL_ONE_NODE)
819 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
820 action="store_false",
821 help="Don't start the instance after creation")
823 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
824 action="store_true", default=False,
825 help="Show command instead of executing it")
827 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
828 default=False, action="store_true",
829 help="Instead of performing the migration, try to"
830 " recover from a failed cleanup. This is safe"
831 " to run even if the instance is healthy, but it"
832 " will create extra replication traffic and "
833 " disrupt briefly the replication (like during the"
836 STATIC_OPT = cli_option("-s", "--static", dest="static",
837 action="store_true", default=False,
838 help="Only show configuration data, not runtime data")
840 ALL_OPT = cli_option("--all", dest="show_all",
841 default=False, action="store_true",
842 help="Show info on all instances on the cluster."
843 " This can take a long time to run, use wisely")
845 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
846 action="store_true", default=False,
847 help="Interactive OS reinstall, lists available"
848 " OS templates for selection")
850 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
851 action="store_true", default=False,
852 help="Remove the instance from the cluster"
853 " configuration even if there are failures"
854 " during the removal process")
856 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
857 dest="ignore_remove_failures",
858 action="store_true", default=False,
859 help="Remove the instance from the"
860 " cluster configuration even if there"
861 " are failures during the removal"
864 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
865 action="store_true", default=False,
866 help="Remove the instance from the cluster")
868 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
869 help="Specifies the new node for the instance",
870 metavar="NODE", default=None,
871 completion_suggest=OPT_COMPL_ONE_NODE)
873 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
874 help="Specifies the new secondary node",
875 metavar="NODE", default=None,
876 completion_suggest=OPT_COMPL_ONE_NODE)
878 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
879 default=False, action="store_true",
880 help="Replace the disk(s) on the primary"
881 " node (applies only to internally mirrored"
882 " disk templates, e.g. %s)" %
883 utils.CommaJoin(constants.DTS_INT_MIRROR))
885 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
886 default=False, action="store_true",
887 help="Replace the disk(s) on the secondary"
888 " node (applies only to internally mirrored"
889 " disk templates, e.g. %s)" %
890 utils.CommaJoin(constants.DTS_INT_MIRROR))
892 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
893 default=False, action="store_true",
894 help="Lock all nodes and auto-promote as needed"
897 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
898 default=False, action="store_true",
899 help="Automatically replace faulty disks"
900 " (applies only to internally mirrored"
901 " disk templates, e.g. %s)" %
902 utils.CommaJoin(constants.DTS_INT_MIRROR))
904 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
905 default=False, action="store_true",
906 help="Ignore current recorded size"
907 " (useful for forcing activation when"
908 " the recorded size is wrong)")
910 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
912 completion_suggest=OPT_COMPL_ONE_NODE)
914 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
917 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
918 help="Specify the secondary ip for the node",
919 metavar="ADDRESS", default=None)
921 READD_OPT = cli_option("--readd", dest="readd",
922 default=False, action="store_true",
923 help="Readd old node after replacing it")
925 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
926 default=True, action="store_false",
927 help="Disable SSH key fingerprint checking")
929 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
930 default=False, action="store_true",
931 help="Force the joining of a node")
933 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
934 type="bool", default=None, metavar=_YORNO,
935 help="Set the master_candidate flag on the node")
937 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
938 type="bool", default=None,
939 help=("Set the offline flag on the node"
940 " (cluster does not communicate with offline"
943 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
944 type="bool", default=None,
945 help=("Set the drained flag on the node"
946 " (excluded from allocation operations)"))
948 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
949 type="bool", default=None, metavar=_YORNO,
950 help="Set the master_capable flag on the node")
952 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
953 type="bool", default=None, metavar=_YORNO,
954 help="Set the vm_capable flag on the node")
956 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
957 type="bool", default=None, metavar=_YORNO,
958 help="Set the allocatable flag on a volume")
960 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
961 help="Disable support for lvm based instances"
963 action="store_false", default=True)
965 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
966 dest="enabled_hypervisors",
967 help="Comma-separated list of hypervisors",
968 type="string", default=None)
970 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
971 type="keyval", default={},
972 help="NIC parameters")
974 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
975 dest="candidate_pool_size", type="int",
976 help="Set the candidate pool size")
978 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
979 help=("Enables LVM and specifies the volume group"
980 " name (cluster-wide) for disk allocation"
981 " [%s]" % constants.DEFAULT_VG),
982 metavar="VG", default=None)
984 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
985 help="Destroy cluster", action="store_true")
987 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
988 help="Skip node agreement check (dangerous)",
989 action="store_true", default=False)
991 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
992 help="Specify the mac prefix for the instance IP"
993 " addresses, in the format XX:XX:XX",
997 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
998 help="Specify the node interface (cluster-wide)"
999 " on which the master IP address will be added"
1000 " (cluster init default: %s)" %
1001 constants.DEFAULT_BRIDGE,
1005 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1006 help="Specify the default directory (cluster-"
1007 "wide) for storing the file-based disks [%s]" %
1008 constants.DEFAULT_FILE_STORAGE_DIR,
1010 default=constants.DEFAULT_FILE_STORAGE_DIR)
1012 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1013 dest="shared_file_storage_dir",
1014 help="Specify the default directory (cluster-"
1015 "wide) for storing the shared file-based"
1017 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1018 metavar="SHAREDDIR",
1019 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1021 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1022 help="Don't modify /etc/hosts",
1023 action="store_false", default=True)
1025 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1026 help="Don't initialize SSH keys",
1027 action="store_false", default=True)
1029 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1030 help="Enable parseable error messages",
1031 action="store_true", default=False)
1033 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1034 help="Skip N+1 memory redundancy tests",
1035 action="store_true", default=False)
1037 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1038 help="Type of reboot: soft/hard/full",
1039 default=constants.INSTANCE_REBOOT_HARD,
1041 choices=list(constants.REBOOT_TYPES))
1043 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1044 dest="ignore_secondaries",
1045 default=False, action="store_true",
1046 help="Ignore errors from secondaries")
1048 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1049 action="store_false", default=True,
1050 help="Don't shutdown the instance (unsafe)")
1052 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1053 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1054 help="Maximum time to wait")
1056 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1057 dest="shutdown_timeout", type="int",
1058 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1059 help="Maximum time to wait for instance shutdown")
1061 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1063 help=("Number of seconds between repetions of the"
1066 EARLY_RELEASE_OPT = cli_option("--early-release",
1067 dest="early_release", default=False,
1068 action="store_true",
1069 help="Release the locks on the secondary"
1072 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1073 dest="new_cluster_cert",
1074 default=False, action="store_true",
1075 help="Generate a new cluster certificate")
1077 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1079 help="File containing new RAPI certificate")
1081 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1082 default=None, action="store_true",
1083 help=("Generate a new self-signed RAPI"
1086 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1087 dest="new_confd_hmac_key",
1088 default=False, action="store_true",
1089 help=("Create a new HMAC key for %s" %
1092 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1093 dest="cluster_domain_secret",
1095 help=("Load new new cluster domain"
1096 " secret from file"))
1098 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1099 dest="new_cluster_domain_secret",
1100 default=False, action="store_true",
1101 help=("Create a new cluster domain"
1104 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1105 dest="use_replication_network",
1106 help="Whether to use the replication network"
1107 " for talking to the nodes",
1108 action="store_true", default=False)
1110 MAINTAIN_NODE_HEALTH_OPT = \
1111 cli_option("--maintain-node-health", dest="maintain_node_health",
1112 metavar=_YORNO, default=None, type="bool",
1113 help="Configure the cluster to automatically maintain node"
1114 " health, by shutting down unknown instances, shutting down"
1115 " unknown DRBD devices, etc.")
1117 IDENTIFY_DEFAULTS_OPT = \
1118 cli_option("--identify-defaults", dest="identify_defaults",
1119 default=False, action="store_true",
1120 help="Identify which saved instance parameters are equal to"
1121 " the current cluster defaults and set them as such, instead"
1122 " of marking them as overridden")
1124 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1125 action="store", dest="uid_pool",
1126 help=("A list of user-ids or user-id"
1127 " ranges separated by commas"))
1129 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1130 action="store", dest="add_uids",
1131 help=("A list of user-ids or user-id"
1132 " ranges separated by commas, to be"
1133 " added to the user-id pool"))
1135 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1136 action="store", dest="remove_uids",
1137 help=("A list of user-ids or user-id"
1138 " ranges separated by commas, to be"
1139 " removed from the user-id pool"))
1141 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1142 action="store", dest="reserved_lvs",
1143 help=("A comma-separated list of reserved"
1144 " logical volumes names, that will be"
1145 " ignored by cluster verify"))
1147 ROMAN_OPT = cli_option("--roman",
1148 dest="roman_integers", default=False,
1149 action="store_true",
1150 help="Use roman numbers for positive integers")
1152 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1153 action="store", default=None,
1154 help="Specifies usermode helper for DRBD")
1156 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1157 action="store_false", default=True,
1158 help="Disable support for DRBD")
1160 PRIMARY_IP_VERSION_OPT = \
1161 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1162 action="store", dest="primary_ip_version",
1163 metavar="%d|%d" % (constants.IP4_VERSION,
1164 constants.IP6_VERSION),
1165 help="Cluster-wide IP version for primary IP")
1167 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1168 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1169 choices=_PRIONAME_TO_VALUE.keys(),
1170 help="Priority for opcode processing")
1172 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1173 type="bool", default=None, metavar=_YORNO,
1174 help="Sets the hidden flag on the OS")
1176 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1177 type="bool", default=None, metavar=_YORNO,
1178 help="Sets the blacklisted flag on the OS")
1180 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1181 type="bool", metavar=_YORNO,
1182 dest="prealloc_wipe_disks",
1183 help=("Wipe disks prior to instance"
1186 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1187 type="keyval", default=None,
1188 help="Node parameters")
1190 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1191 action="store", metavar="POLICY", default=None,
1192 help="Allocation policy for the node group")
1194 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1195 type="bool", metavar=_YORNO,
1196 dest="node_powered",
1197 help="Specify if the SoR for node is powered")
1199 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1200 default=constants.OOB_TIMEOUT,
1201 help="Maximum time to wait for out-of-band helper")
1203 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1204 default=constants.OOB_POWER_DELAY,
1205 help="Time in seconds to wait between power-ons")
1207 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1208 action="store_true", default=False,
1209 help=("Whether command argument should be treated"
1212 NO_REMEMBER_OPT = cli_option("--no-remember",
1214 action="store_true", default=False,
1215 help="Perform but do not record the change"
1216 " in the configuration")
1218 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1219 default=False, action="store_true",
1220 help="Evacuate primary instances only")
1222 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1223 default=False, action="store_true",
1224 help="Evacuate secondary instances only"
1225 " (applies only to internally mirrored"
1226 " disk templates, e.g. %s)" %
1227 utils.CommaJoin(constants.DTS_INT_MIRROR))
1229 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1230 action="store_true", default=False,
1231 help="Pause instance at startup")
1233 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1234 help="Destination node group (name or uuid)",
1235 default=None, action="append",
1236 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1239 #: Options provided by all commands
1240 COMMON_OPTS = [DEBUG_OPT]
1242 # common options for creating instances. add and import then add their own
1244 COMMON_CREATE_OPTS = [
1249 FILESTORE_DRIVER_OPT,
1267 def _ParseArgs(argv, commands, aliases):
1268 """Parser for the command line arguments.
1270 This function parses the arguments and returns the function which
1271 must be executed together with its (modified) arguments.
1273 @param argv: the command line
1274 @param commands: dictionary with special contents, see the design
1275 doc for cmdline handling
1276 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1280 binary = "<command>"
1282 binary = argv[0].split("/")[-1]
1284 if len(argv) > 1 and argv[1] == "--version":
1285 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1286 constants.RELEASE_VERSION)
1287 # Quit right away. That way we don't have to care about this special
1288 # argument. optparse.py does it the same.
1291 if len(argv) < 2 or not (argv[1] in commands or
1292 argv[1] in aliases):
1293 # let's do a nice thing
1294 sortedcmds = commands.keys()
1297 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1298 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1301 # compute the max line length for cmd + usage
1302 mlen = max([len(" %s" % cmd) for cmd in commands])
1303 mlen = min(60, mlen) # should not get here...
1305 # and format a nice command list
1306 ToStdout("Commands:")
1307 for cmd in sortedcmds:
1308 cmdstr = " %s" % (cmd,)
1309 help_text = commands[cmd][4]
1310 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1311 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1312 for line in help_lines:
1313 ToStdout("%-*s %s", mlen, "", line)
1317 return None, None, None
1319 # get command, unalias it, and look it up in commands
1323 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1326 if aliases[cmd] not in commands:
1327 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1328 " command '%s'" % (cmd, aliases[cmd]))
1332 func, args_def, parser_opts, usage, description = commands[cmd]
1333 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1334 description=description,
1335 formatter=TitledHelpFormatter(),
1336 usage="%%prog %s %s" % (cmd, usage))
1337 parser.disable_interspersed_args()
1338 options, args = parser.parse_args()
1340 if not _CheckArguments(cmd, args_def, args):
1341 return None, None, None
1343 return func, options, args
1346 def _CheckArguments(cmd, args_def, args):
1347 """Verifies the arguments using the argument definition.
1351 1. Abort with error if values specified by user but none expected.
1353 1. For each argument in definition
1355 1. Keep running count of minimum number of values (min_count)
1356 1. Keep running count of maximum number of values (max_count)
1357 1. If it has an unlimited number of values
1359 1. Abort with error if it's not the last argument in the definition
1361 1. If last argument has limited number of values
1363 1. Abort with error if number of values doesn't match or is too large
1365 1. Abort with error if user didn't pass enough values (min_count)
1368 if args and not args_def:
1369 ToStderr("Error: Command %s expects no arguments", cmd)
1376 last_idx = len(args_def) - 1
1378 for idx, arg in enumerate(args_def):
1379 if min_count is None:
1381 elif arg.min is not None:
1382 min_count += arg.min
1384 if max_count is None:
1386 elif arg.max is not None:
1387 max_count += arg.max
1390 check_max = (arg.max is not None)
1392 elif arg.max is None:
1393 raise errors.ProgrammerError("Only the last argument can have max=None")
1396 # Command with exact number of arguments
1397 if (min_count is not None and max_count is not None and
1398 min_count == max_count and len(args) != min_count):
1399 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1402 # Command with limited number of arguments
1403 if max_count is not None and len(args) > max_count:
1404 ToStderr("Error: Command %s expects only %d argument(s)",
1408 # Command with some required arguments
1409 if min_count is not None and len(args) < min_count:
1410 ToStderr("Error: Command %s expects at least %d argument(s)",
1417 def SplitNodeOption(value):
1418 """Splits the value of a --node option.
1421 if value and ":" in value:
1422 return value.split(":", 1)
1424 return (value, None)
1427 def CalculateOSNames(os_name, os_variants):
1428 """Calculates all the names an OS can be called, according to its variants.
1430 @type os_name: string
1431 @param os_name: base name of the os
1432 @type os_variants: list or None
1433 @param os_variants: list of supported variants
1435 @return: list of valid names
1439 return ["%s+%s" % (os_name, v) for v in os_variants]
1444 def ParseFields(selected, default):
1445 """Parses the values of "--field"-like options.
1447 @type selected: string or None
1448 @param selected: User-selected options
1450 @param default: Default fields
1453 if selected is None:
1456 if selected.startswith("+"):
1457 return default + selected[1:].split(",")
1459 return selected.split(",")
1462 UsesRPC = rpc.RunWithRPC
1465 def AskUser(text, choices=None):
1466 """Ask the user a question.
1468 @param text: the question to ask
1470 @param choices: list with elements tuples (input_char, return_value,
1471 description); if not given, it will default to: [('y', True,
1472 'Perform the operation'), ('n', False, 'Do no do the operation')];
1473 note that the '?' char is reserved for help
1475 @return: one of the return values from the choices list; if input is
1476 not possible (i.e. not running with a tty, we return the last
1481 choices = [("y", True, "Perform the operation"),
1482 ("n", False, "Do not perform the operation")]
1483 if not choices or not isinstance(choices, list):
1484 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1485 for entry in choices:
1486 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1487 raise errors.ProgrammerError("Invalid choices element to AskUser")
1489 answer = choices[-1][1]
1491 for line in text.splitlines():
1492 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1493 text = "\n".join(new_text)
1495 f = file("/dev/tty", "a+")
1499 chars = [entry[0] for entry in choices]
1500 chars[-1] = "[%s]" % chars[-1]
1502 maps = dict([(entry[0], entry[1]) for entry in choices])
1506 f.write("/".join(chars))
1508 line = f.readline(2).strip().lower()
1513 for entry in choices:
1514 f.write(" %s - %s\n" % (entry[0], entry[2]))
1522 class JobSubmittedException(Exception):
1523 """Job was submitted, client should exit.
1525 This exception has one argument, the ID of the job that was
1526 submitted. The handler should print this ID.
1528 This is not an error, just a structured way to exit from clients.
1533 def SendJob(ops, cl=None):
1534 """Function to submit an opcode without waiting for the results.
1537 @param ops: list of opcodes
1538 @type cl: luxi.Client
1539 @param cl: the luxi client to use for communicating with the master;
1540 if None, a new client will be created
1546 job_id = cl.SubmitJob(ops)
1551 def GenericPollJob(job_id, cbs, report_cbs):
1552 """Generic job-polling function.
1554 @type job_id: number
1555 @param job_id: Job ID
1556 @type cbs: Instance of L{JobPollCbBase}
1557 @param cbs: Data callbacks
1558 @type report_cbs: Instance of L{JobPollReportCbBase}
1559 @param report_cbs: Reporting callbacks
1562 prev_job_info = None
1563 prev_logmsg_serial = None
1568 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1571 # job not found, go away!
1572 raise errors.JobLost("Job with id %s lost" % job_id)
1574 if result == constants.JOB_NOTCHANGED:
1575 report_cbs.ReportNotChanged(job_id, status)
1580 # Split result, a tuple of (field values, log entries)
1581 (job_info, log_entries) = result
1582 (status, ) = job_info
1585 for log_entry in log_entries:
1586 (serial, timestamp, log_type, message) = log_entry
1587 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1589 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1591 # TODO: Handle canceled and archived jobs
1592 elif status in (constants.JOB_STATUS_SUCCESS,
1593 constants.JOB_STATUS_ERROR,
1594 constants.JOB_STATUS_CANCELING,
1595 constants.JOB_STATUS_CANCELED):
1598 prev_job_info = job_info
1600 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1602 raise errors.JobLost("Job with id %s lost" % job_id)
1604 status, opstatus, result = jobs[0]
1606 if status == constants.JOB_STATUS_SUCCESS:
1609 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1610 raise errors.OpExecError("Job was canceled")
1613 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1614 if status == constants.OP_STATUS_SUCCESS:
1616 elif status == constants.OP_STATUS_ERROR:
1617 errors.MaybeRaise(msg)
1620 raise errors.OpExecError("partial failure (opcode %d): %s" %
1623 raise errors.OpExecError(str(msg))
1625 # default failure mode
1626 raise errors.OpExecError(result)
1629 class JobPollCbBase:
1630 """Base class for L{GenericPollJob} callbacks.
1634 """Initializes this class.
1638 def WaitForJobChangeOnce(self, job_id, fields,
1639 prev_job_info, prev_log_serial):
1640 """Waits for changes on a job.
1643 raise NotImplementedError()
1645 def QueryJobs(self, job_ids, fields):
1646 """Returns the selected fields for the selected job IDs.
1648 @type job_ids: list of numbers
1649 @param job_ids: Job IDs
1650 @type fields: list of strings
1651 @param fields: Fields
1654 raise NotImplementedError()
1657 class JobPollReportCbBase:
1658 """Base class for L{GenericPollJob} reporting callbacks.
1662 """Initializes this class.
1666 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1667 """Handles a log message.
1670 raise NotImplementedError()
1672 def ReportNotChanged(self, job_id, status):
1673 """Called for if a job hasn't changed in a while.
1675 @type job_id: number
1676 @param job_id: Job ID
1677 @type status: string or None
1678 @param status: Job status if available
1681 raise NotImplementedError()
1684 class _LuxiJobPollCb(JobPollCbBase):
1685 def __init__(self, cl):
1686 """Initializes this class.
1689 JobPollCbBase.__init__(self)
1692 def WaitForJobChangeOnce(self, job_id, fields,
1693 prev_job_info, prev_log_serial):
1694 """Waits for changes on a job.
1697 return self.cl.WaitForJobChangeOnce(job_id, fields,
1698 prev_job_info, prev_log_serial)
1700 def QueryJobs(self, job_ids, fields):
1701 """Returns the selected fields for the selected job IDs.
1704 return self.cl.QueryJobs(job_ids, fields)
1707 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1708 def __init__(self, feedback_fn):
1709 """Initializes this class.
1712 JobPollReportCbBase.__init__(self)
1714 self.feedback_fn = feedback_fn
1716 assert callable(feedback_fn)
1718 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1719 """Handles a log message.
1722 self.feedback_fn((timestamp, log_type, log_msg))
1724 def ReportNotChanged(self, job_id, status):
1725 """Called if a job hasn't changed in a while.
1731 class StdioJobPollReportCb(JobPollReportCbBase):
1733 """Initializes this class.
1736 JobPollReportCbBase.__init__(self)
1738 self.notified_queued = False
1739 self.notified_waitlock = False
1741 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1742 """Handles a log message.
1745 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1746 FormatLogMessage(log_type, log_msg))
1748 def ReportNotChanged(self, job_id, status):
1749 """Called if a job hasn't changed in a while.
1755 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1756 ToStderr("Job %s is waiting in queue", job_id)
1757 self.notified_queued = True
1759 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1760 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1761 self.notified_waitlock = True
1764 def FormatLogMessage(log_type, log_msg):
1765 """Formats a job message according to its type.
1768 if log_type != constants.ELOG_MESSAGE:
1769 log_msg = str(log_msg)
1771 return utils.SafeEncode(log_msg)
1774 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1775 """Function to poll for the result of a job.
1777 @type job_id: job identified
1778 @param job_id: the job to poll for results
1779 @type cl: luxi.Client
1780 @param cl: the luxi client to use for communicating with the master;
1781 if None, a new client will be created
1787 if reporter is None:
1789 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1791 reporter = StdioJobPollReportCb()
1793 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1795 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1798 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1799 """Legacy function to submit an opcode.
1801 This is just a simple wrapper over the construction of the processor
1802 instance. It should be extended to better handle feedback and
1803 interaction functions.
1809 SetGenericOpcodeOpts([op], opts)
1811 job_id = SendJob([op], cl=cl)
1813 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1816 return op_results[0]
1819 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1820 """Wrapper around SubmitOpCode or SendJob.
1822 This function will decide, based on the 'opts' parameter, whether to
1823 submit and wait for the result of the opcode (and return it), or
1824 whether to just send the job and print its identifier. It is used in
1825 order to simplify the implementation of the '--submit' option.
1827 It will also process the opcodes if we're sending the via SendJob
1828 (otherwise SubmitOpCode does it).
1831 if opts and opts.submit_only:
1833 SetGenericOpcodeOpts(job, opts)
1834 job_id = SendJob(job, cl=cl)
1835 raise JobSubmittedException(job_id)
1837 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1840 def SetGenericOpcodeOpts(opcode_list, options):
1841 """Processor for generic options.
1843 This function updates the given opcodes based on generic command
1844 line options (like debug, dry-run, etc.).
1846 @param opcode_list: list of opcodes
1847 @param options: command line options or None
1848 @return: None (in-place modification)
1853 for op in opcode_list:
1854 op.debug_level = options.debug
1855 if hasattr(options, "dry_run"):
1856 op.dry_run = options.dry_run
1857 if getattr(options, "priority", None) is not None:
1858 op.priority = _PRIONAME_TO_VALUE[options.priority]
1862 # TODO: Cache object?
1864 client = luxi.Client()
1865 except luxi.NoMasterError:
1866 ss = ssconf.SimpleStore()
1868 # Try to read ssconf file
1871 except errors.ConfigurationError:
1872 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1873 " not part of a cluster")
1875 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1876 if master != myself:
1877 raise errors.OpPrereqError("This is not the master node, please connect"
1878 " to node '%s' and rerun the command" %
1884 def FormatError(err):
1885 """Return a formatted error message for a given error.
1887 This function takes an exception instance and returns a tuple
1888 consisting of two values: first, the recommended exit code, and
1889 second, a string describing the error message (not
1890 newline-terminated).
1896 if isinstance(err, errors.ConfigurationError):
1897 txt = "Corrupt configuration file: %s" % msg
1899 obuf.write(txt + "\n")
1900 obuf.write("Aborting.")
1902 elif isinstance(err, errors.HooksAbort):
1903 obuf.write("Failure: hooks execution failed:\n")
1904 for node, script, out in err.args[0]:
1906 obuf.write(" node: %s, script: %s, output: %s\n" %
1907 (node, script, out))
1909 obuf.write(" node: %s, script: %s (no output)\n" %
1911 elif isinstance(err, errors.HooksFailure):
1912 obuf.write("Failure: hooks general failure: %s" % msg)
1913 elif isinstance(err, errors.ResolverError):
1914 this_host = netutils.Hostname.GetSysName()
1915 if err.args[0] == this_host:
1916 msg = "Failure: can't resolve my own hostname ('%s')"
1918 msg = "Failure: can't resolve hostname '%s'"
1919 obuf.write(msg % err.args[0])
1920 elif isinstance(err, errors.OpPrereqError):
1921 if len(err.args) == 2:
1922 obuf.write("Failure: prerequisites not met for this"
1923 " operation:\nerror type: %s, error details:\n%s" %
1924 (err.args[1], err.args[0]))
1926 obuf.write("Failure: prerequisites not met for this"
1927 " operation:\n%s" % msg)
1928 elif isinstance(err, errors.OpExecError):
1929 obuf.write("Failure: command execution error:\n%s" % msg)
1930 elif isinstance(err, errors.TagError):
1931 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1932 elif isinstance(err, errors.JobQueueDrainError):
1933 obuf.write("Failure: the job queue is marked for drain and doesn't"
1934 " accept new requests\n")
1935 elif isinstance(err, errors.JobQueueFull):
1936 obuf.write("Failure: the job queue is full and doesn't accept new"
1937 " job submissions until old jobs are archived\n")
1938 elif isinstance(err, errors.TypeEnforcementError):
1939 obuf.write("Parameter Error: %s" % msg)
1940 elif isinstance(err, errors.ParameterError):
1941 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1942 elif isinstance(err, luxi.NoMasterError):
1943 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1944 " and listening for connections?")
1945 elif isinstance(err, luxi.TimeoutError):
1946 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1947 " been submitted and will continue to run even if the call"
1948 " timed out. Useful commands in this situation are \"gnt-job"
1949 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1951 elif isinstance(err, luxi.PermissionError):
1952 obuf.write("It seems you don't have permissions to connect to the"
1953 " master daemon.\nPlease retry as a different user.")
1954 elif isinstance(err, luxi.ProtocolError):
1955 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1957 elif isinstance(err, errors.JobLost):
1958 obuf.write("Error checking job status: %s" % msg)
1959 elif isinstance(err, errors.QueryFilterParseError):
1960 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1961 obuf.write("\n".join(err.GetDetails()))
1962 elif isinstance(err, errors.GenericError):
1963 obuf.write("Unhandled Ganeti error: %s" % msg)
1964 elif isinstance(err, JobSubmittedException):
1965 obuf.write("JobID: %s\n" % err.args[0])
1968 obuf.write("Unhandled exception: %s" % msg)
1969 return retcode, obuf.getvalue().rstrip("\n")
1972 def GenericMain(commands, override=None, aliases=None):
1973 """Generic main function for all the gnt-* commands.
1976 - commands: a dictionary with a special structure, see the design doc
1977 for command line handling.
1978 - override: if not None, we expect a dictionary with keys that will
1979 override command line options; this can be used to pass
1980 options from the scripts to generic functions
1981 - aliases: dictionary with command aliases {'alias': 'target, ...}
1984 # save the program name and the entire command line for later logging
1986 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1987 if len(sys.argv) >= 2:
1988 binary += " " + sys.argv[1]
1989 old_cmdline = " ".join(sys.argv[2:])
1993 binary = "<unknown program>"
2000 func, options, args = _ParseArgs(sys.argv, commands, aliases)
2001 except errors.ParameterError, err:
2002 result, err_msg = FormatError(err)
2006 if func is None: # parse error
2009 if override is not None:
2010 for key, val in override.iteritems():
2011 setattr(options, key, val)
2013 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2014 stderr_logging=True)
2017 logging.info("run with arguments '%s'", old_cmdline)
2019 logging.info("run with no arguments")
2022 result = func(options, args)
2023 except (errors.GenericError, luxi.ProtocolError,
2024 JobSubmittedException), err:
2025 result, err_msg = FormatError(err)
2026 logging.exception("Error during command processing")
2028 except KeyboardInterrupt:
2029 result = constants.EXIT_FAILURE
2030 ToStderr("Aborted. Note that if the operation created any jobs, they"
2031 " might have been submitted and"
2032 " will continue to run in the background.")
2033 except IOError, err:
2034 if err.errno == errno.EPIPE:
2035 # our terminal went away, we'll exit
2036 sys.exit(constants.EXIT_FAILURE)
2043 def ParseNicOption(optvalue):
2044 """Parses the value of the --net option(s).
2048 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2049 except (TypeError, ValueError), err:
2050 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2052 nics = [{}] * nic_max
2053 for nidx, ndict in optvalue:
2056 if not isinstance(ndict, dict):
2057 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2058 " got %s" % (nidx, ndict))
2060 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2067 def GenericInstanceCreate(mode, opts, args):
2068 """Add an instance to the cluster via either creation or import.
2070 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2071 @param opts: the command line options selected by the user
2073 @param args: should contain only one element, the new instance name
2075 @return: the desired exit code
2080 (pnode, snode) = SplitNodeOption(opts.node)
2085 hypervisor, hvparams = opts.hypervisor
2088 nics = ParseNicOption(opts.nics)
2092 elif mode == constants.INSTANCE_CREATE:
2093 # default of one nic, all auto
2099 if opts.disk_template == constants.DT_DISKLESS:
2100 if opts.disks or opts.sd_size is not None:
2101 raise errors.OpPrereqError("Diskless instance but disk"
2102 " information passed")
2105 if (not opts.disks and not opts.sd_size
2106 and mode == constants.INSTANCE_CREATE):
2107 raise errors.OpPrereqError("No disk information specified")
2108 if opts.disks and opts.sd_size is not None:
2109 raise errors.OpPrereqError("Please use either the '--disk' or"
2111 if opts.sd_size is not None:
2112 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2116 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2117 except ValueError, err:
2118 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2119 disks = [{}] * disk_max
2122 for didx, ddict in opts.disks:
2124 if not isinstance(ddict, dict):
2125 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2126 raise errors.OpPrereqError(msg)
2127 elif constants.IDISK_SIZE in ddict:
2128 if constants.IDISK_ADOPT in ddict:
2129 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2130 " (disk %d)" % didx)
2132 ddict[constants.IDISK_SIZE] = \
2133 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2134 except ValueError, err:
2135 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2137 elif constants.IDISK_ADOPT in ddict:
2138 if mode == constants.INSTANCE_IMPORT:
2139 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2141 ddict[constants.IDISK_SIZE] = 0
2143 raise errors.OpPrereqError("Missing size or adoption source for"
2147 if opts.tags is not None:
2148 tags = opts.tags.split(",")
2152 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2153 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2155 if mode == constants.INSTANCE_CREATE:
2158 force_variant = opts.force_variant
2161 no_install = opts.no_install
2162 identify_defaults = False
2163 elif mode == constants.INSTANCE_IMPORT:
2166 force_variant = False
2167 src_node = opts.src_node
2168 src_path = opts.src_dir
2170 identify_defaults = opts.identify_defaults
2172 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2174 op = opcodes.OpInstanceCreate(instance_name=instance,
2176 disk_template=opts.disk_template,
2178 pnode=pnode, snode=snode,
2179 ip_check=opts.ip_check,
2180 name_check=opts.name_check,
2181 wait_for_sync=opts.wait_for_sync,
2182 file_storage_dir=opts.file_storage_dir,
2183 file_driver=opts.file_driver,
2184 iallocator=opts.iallocator,
2185 hypervisor=hypervisor,
2187 beparams=opts.beparams,
2188 osparams=opts.osparams,
2192 force_variant=force_variant,
2196 no_install=no_install,
2197 identify_defaults=identify_defaults)
2199 SubmitOrSend(op, opts)
2203 class _RunWhileClusterStoppedHelper:
2204 """Helper class for L{RunWhileClusterStopped} to simplify state management
2207 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2208 """Initializes this class.
2210 @type feedback_fn: callable
2211 @param feedback_fn: Feedback function
2212 @type cluster_name: string
2213 @param cluster_name: Cluster name
2214 @type master_node: string
2215 @param master_node Master node name
2216 @type online_nodes: list
2217 @param online_nodes: List of names of online nodes
2220 self.feedback_fn = feedback_fn
2221 self.cluster_name = cluster_name
2222 self.master_node = master_node
2223 self.online_nodes = online_nodes
2225 self.ssh = ssh.SshRunner(self.cluster_name)
2227 self.nonmaster_nodes = [name for name in online_nodes
2228 if name != master_node]
2230 assert self.master_node not in self.nonmaster_nodes
2232 def _RunCmd(self, node_name, cmd):
2233 """Runs a command on the local or a remote machine.
2235 @type node_name: string
2236 @param node_name: Machine name
2241 if node_name is None or node_name == self.master_node:
2242 # No need to use SSH
2243 result = utils.RunCmd(cmd)
2245 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2248 errmsg = ["Failed to run command %s" % result.cmd]
2250 errmsg.append("on node %s" % node_name)
2251 errmsg.append(": exitcode %s and error %s" %
2252 (result.exit_code, result.output))
2253 raise errors.OpExecError(" ".join(errmsg))
2255 def Call(self, fn, *args):
2256 """Call function while all daemons are stopped.
2259 @param fn: Function to be called
2262 # Pause watcher by acquiring an exclusive lock on watcher state file
2263 self.feedback_fn("Blocking watcher")
2264 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2266 # TODO: Currently, this just blocks. There's no timeout.
2267 # TODO: Should it be a shared lock?
2268 watcher_block.Exclusive(blocking=True)
2270 # Stop master daemons, so that no new jobs can come in and all running
2272 self.feedback_fn("Stopping master daemons")
2273 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2275 # Stop daemons on all nodes
2276 for node_name in self.online_nodes:
2277 self.feedback_fn("Stopping daemons on %s" % node_name)
2278 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2280 # All daemons are shut down now
2282 return fn(self, *args)
2283 except Exception, err:
2284 _, errmsg = FormatError(err)
2285 logging.exception("Caught exception")
2286 self.feedback_fn(errmsg)
2289 # Start cluster again, master node last
2290 for node_name in self.nonmaster_nodes + [self.master_node]:
2291 self.feedback_fn("Starting daemons on %s" % node_name)
2292 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2295 watcher_block.Close()
2298 def RunWhileClusterStopped(feedback_fn, fn, *args):
2299 """Calls a function while all cluster daemons are stopped.
2301 @type feedback_fn: callable
2302 @param feedback_fn: Feedback function
2304 @param fn: Function to be called when daemons are stopped
2307 feedback_fn("Gathering cluster information")
2309 # This ensures we're running on the master daemon
2312 (cluster_name, master_node) = \
2313 cl.QueryConfigValues(["cluster_name", "master_node"])
2315 online_nodes = GetOnlineNodes([], cl=cl)
2317 # Don't keep a reference to the client. The master daemon will go away.
2320 assert master_node in online_nodes
2322 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2323 online_nodes).Call(fn, *args)
2326 def GenerateTable(headers, fields, separator, data,
2327 numfields=None, unitfields=None,
2329 """Prints a table with headers and different fields.
2332 @param headers: dictionary mapping field names to headers for
2335 @param fields: the field names corresponding to each row in
2337 @param separator: the separator to be used; if this is None,
2338 the default 'smart' algorithm is used which computes optimal
2339 field width, otherwise just the separator is used between
2342 @param data: a list of lists, each sublist being one row to be output
2343 @type numfields: list
2344 @param numfields: a list with the fields that hold numeric
2345 values and thus should be right-aligned
2346 @type unitfields: list
2347 @param unitfields: a list with the fields that hold numeric
2348 values that should be formatted with the units field
2349 @type units: string or None
2350 @param units: the units we should use for formatting, or None for
2351 automatic choice (human-readable for non-separator usage, otherwise
2352 megabytes); this is a one-letter string
2361 if numfields is None:
2363 if unitfields is None:
2366 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2367 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2370 for field in fields:
2371 if headers and field not in headers:
2372 # TODO: handle better unknown fields (either revert to old
2373 # style of raising exception, or deal more intelligently with
2375 headers[field] = field
2376 if separator is not None:
2377 format_fields.append("%s")
2378 elif numfields.Matches(field):
2379 format_fields.append("%*s")
2381 format_fields.append("%-*s")
2383 if separator is None:
2384 mlens = [0 for name in fields]
2385 format_str = " ".join(format_fields)
2387 format_str = separator.replace("%", "%%").join(format_fields)
2392 for idx, val in enumerate(row):
2393 if unitfields.Matches(fields[idx]):
2396 except (TypeError, ValueError):
2399 val = row[idx] = utils.FormatUnit(val, units)
2400 val = row[idx] = str(val)
2401 if separator is None:
2402 mlens[idx] = max(mlens[idx], len(val))
2407 for idx, name in enumerate(fields):
2409 if separator is None:
2410 mlens[idx] = max(mlens[idx], len(hdr))
2411 args.append(mlens[idx])
2413 result.append(format_str % tuple(args))
2415 if separator is None:
2416 assert len(mlens) == len(fields)
2418 if fields and not numfields.Matches(fields[-1]):
2424 line = ["-" for _ in fields]
2425 for idx in range(len(fields)):
2426 if separator is None:
2427 args.append(mlens[idx])
2428 args.append(line[idx])
2429 result.append(format_str % tuple(args))
2434 def _FormatBool(value):
2435 """Formats a boolean value as a string.
2443 #: Default formatting for query results; (callback, align right)
2444 _DEFAULT_FORMAT_QUERY = {
2445 constants.QFT_TEXT: (str, False),
2446 constants.QFT_BOOL: (_FormatBool, False),
2447 constants.QFT_NUMBER: (str, True),
2448 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2449 constants.QFT_OTHER: (str, False),
2450 constants.QFT_UNKNOWN: (str, False),
2454 def _GetColumnFormatter(fdef, override, unit):
2455 """Returns formatting function for a field.
2457 @type fdef: L{objects.QueryFieldDefinition}
2458 @type override: dict
2459 @param override: Dictionary for overriding field formatting functions,
2460 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2462 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2463 @rtype: tuple; (callable, bool)
2464 @return: Returns the function to format a value (takes one parameter) and a
2465 boolean for aligning the value on the right-hand side
2468 fmt = override.get(fdef.name, None)
2472 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2474 if fdef.kind == constants.QFT_UNIT:
2475 # Can't keep this information in the static dictionary
2476 return (lambda value: utils.FormatUnit(value, unit), True)
2478 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2482 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2485 class _QueryColumnFormatter:
2486 """Callable class for formatting fields of a query.
2489 def __init__(self, fn, status_fn, verbose):
2490 """Initializes this class.
2493 @param fn: Formatting function
2494 @type status_fn: callable
2495 @param status_fn: Function to report fields' status
2496 @type verbose: boolean
2497 @param verbose: whether to use verbose field descriptions or not
2501 self._status_fn = status_fn
2502 self._verbose = verbose
2504 def __call__(self, data):
2505 """Returns a field's string representation.
2508 (status, value) = data
2511 self._status_fn(status)
2513 if status == constants.RS_NORMAL:
2514 return self._fn(value)
2516 assert value is None, \
2517 "Found value %r for abnormal status %s" % (value, status)
2519 return FormatResultError(status, self._verbose)
2522 def FormatResultError(status, verbose):
2523 """Formats result status other than L{constants.RS_NORMAL}.
2525 @param status: The result status
2526 @type verbose: boolean
2527 @param verbose: Whether to return the verbose text
2528 @return: Text of result status
2531 assert status != constants.RS_NORMAL, \
2532 "FormatResultError called with status equal to constants.RS_NORMAL"
2534 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2536 raise NotImplementedError("Unknown status %s" % status)
2543 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2544 header=False, verbose=False):
2545 """Formats data in L{objects.QueryResponse}.
2547 @type result: L{objects.QueryResponse}
2548 @param result: result of query operation
2550 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2551 see L{utils.text.FormatUnit}
2552 @type format_override: dict
2553 @param format_override: Dictionary for overriding field formatting functions,
2554 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2555 @type separator: string or None
2556 @param separator: String used to separate fields
2558 @param header: Whether to output header row
2559 @type verbose: boolean
2560 @param verbose: whether to use verbose field descriptions or not
2569 if format_override is None:
2570 format_override = {}
2572 stats = dict.fromkeys(constants.RS_ALL, 0)
2574 def _RecordStatus(status):
2579 for fdef in result.fields:
2580 assert fdef.title and fdef.name
2581 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2582 columns.append(TableColumn(fdef.title,
2583 _QueryColumnFormatter(fn, _RecordStatus,
2587 table = FormatTable(result.data, columns, header, separator)
2589 # Collect statistics
2590 assert len(stats) == len(constants.RS_ALL)
2591 assert compat.all(count >= 0 for count in stats.values())
2593 # Determine overall status. If there was no data, unknown fields must be
2594 # detected via the field definitions.
2595 if (stats[constants.RS_UNKNOWN] or
2596 (not result.data and _GetUnknownFields(result.fields))):
2598 elif compat.any(count > 0 for key, count in stats.items()
2599 if key != constants.RS_NORMAL):
2600 status = QR_INCOMPLETE
2604 return (status, table)
2607 def _GetUnknownFields(fdefs):
2608 """Returns list of unknown fields included in C{fdefs}.
2610 @type fdefs: list of L{objects.QueryFieldDefinition}
2613 return [fdef for fdef in fdefs
2614 if fdef.kind == constants.QFT_UNKNOWN]
2617 def _WarnUnknownFields(fdefs):
2618 """Prints a warning to stderr if a query included unknown fields.
2620 @type fdefs: list of L{objects.QueryFieldDefinition}
2623 unknown = _GetUnknownFields(fdefs)
2625 ToStderr("Warning: Queried for unknown fields %s",
2626 utils.CommaJoin(fdef.name for fdef in unknown))
2632 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2633 format_override=None, verbose=False, force_filter=False):
2634 """Generic implementation for listing all items of a resource.
2636 @param resource: One of L{constants.QR_VIA_LUXI}
2637 @type fields: list of strings
2638 @param fields: List of fields to query for
2639 @type names: list of strings
2640 @param names: Names of items to query for
2641 @type unit: string or None
2642 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2643 None for automatic choice (human-readable for non-separator usage,
2644 otherwise megabytes); this is a one-letter string
2645 @type separator: string or None
2646 @param separator: String used to separate fields
2648 @param header: Whether to show header row
2649 @type force_filter: bool
2650 @param force_filter: Whether to always treat names as filter
2651 @type format_override: dict
2652 @param format_override: Dictionary for overriding field formatting functions,
2653 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2654 @type verbose: boolean
2655 @param verbose: whether to use verbose field descriptions or not
2664 filter_ = qlang.MakeFilter(names, force_filter)
2666 response = cl.Query(resource, fields, filter_)
2668 found_unknown = _WarnUnknownFields(response.fields)
2670 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2672 format_override=format_override,
2678 assert ((found_unknown and status == QR_UNKNOWN) or
2679 (not found_unknown and status != QR_UNKNOWN))
2681 if status == QR_UNKNOWN:
2682 return constants.EXIT_UNKNOWN_FIELD
2684 # TODO: Should the list command fail if not all data could be collected?
2685 return constants.EXIT_SUCCESS
2688 def GenericListFields(resource, fields, separator, header, cl=None):
2689 """Generic implementation for listing fields for a resource.
2691 @param resource: One of L{constants.QR_VIA_LUXI}
2692 @type fields: list of strings
2693 @param fields: List of fields to query for
2694 @type separator: string or None
2695 @param separator: String used to separate fields
2697 @param header: Whether to show header row
2706 response = cl.QueryFields(resource, fields)
2708 found_unknown = _WarnUnknownFields(response.fields)
2711 TableColumn("Name", str, False),
2712 TableColumn("Title", str, False),
2713 TableColumn("Description", str, False),
2716 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2718 for line in FormatTable(rows, columns, header, separator):
2722 return constants.EXIT_UNKNOWN_FIELD
2724 return constants.EXIT_SUCCESS
2728 """Describes a column for L{FormatTable}.
2731 def __init__(self, title, fn, align_right):
2732 """Initializes this class.
2735 @param title: Column title
2737 @param fn: Formatting function
2738 @type align_right: bool
2739 @param align_right: Whether to align values on the right-hand side
2744 self.align_right = align_right
2747 def _GetColFormatString(width, align_right):
2748 """Returns the format string for a field.
2756 return "%%%s%ss" % (sign, width)
2759 def FormatTable(rows, columns, header, separator):
2760 """Formats data as a table.
2762 @type rows: list of lists
2763 @param rows: Row data, one list per row
2764 @type columns: list of L{TableColumn}
2765 @param columns: Column descriptions
2767 @param header: Whether to show header row
2768 @type separator: string or None
2769 @param separator: String used to separate columns
2773 data = [[col.title for col in columns]]
2774 colwidth = [len(col.title) for col in columns]
2777 colwidth = [0 for _ in columns]
2781 assert len(row) == len(columns)
2783 formatted = [col.format(value) for value, col in zip(row, columns)]
2785 if separator is None:
2786 # Update column widths
2787 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2788 # Modifying a list's items while iterating is fine
2789 colwidth[idx] = max(oldwidth, len(value))
2791 data.append(formatted)
2793 if separator is not None:
2794 # Return early if a separator is used
2795 return [separator.join(row) for row in data]
2797 if columns and not columns[-1].align_right:
2798 # Avoid unnecessary spaces at end of line
2801 # Build format string
2802 fmt = " ".join([_GetColFormatString(width, col.align_right)
2803 for col, width in zip(columns, colwidth)])
2805 return [fmt % tuple(row) for row in data]
2808 def FormatTimestamp(ts):
2809 """Formats a given timestamp.
2812 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2815 @return: a string with the formatted timestamp
2818 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2821 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2824 def ParseTimespec(value):
2825 """Parse a time specification.
2827 The following suffixed will be recognized:
2835 Without any suffix, the value will be taken to be in seconds.
2840 raise errors.OpPrereqError("Empty time specification passed")
2848 if value[-1] not in suffix_map:
2851 except (TypeError, ValueError):
2852 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2854 multiplier = suffix_map[value[-1]]
2856 if not value: # no data left after stripping the suffix
2857 raise errors.OpPrereqError("Invalid time specification (only"
2860 value = int(value) * multiplier
2861 except (TypeError, ValueError):
2862 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2866 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2867 filter_master=False, nodegroup=None):
2868 """Returns the names of online nodes.
2870 This function will also log a warning on stderr with the names of
2873 @param nodes: if not empty, use only this subset of nodes (minus the
2875 @param cl: if not None, luxi client to use
2876 @type nowarn: boolean
2877 @param nowarn: by default, this function will output a note with the
2878 offline nodes that are skipped; if this parameter is True the
2879 note is not displayed
2880 @type secondary_ips: boolean
2881 @param secondary_ips: if True, return the secondary IPs instead of the
2882 names, useful for doing network traffic over the replication interface
2884 @type filter_master: boolean
2885 @param filter_master: if True, do not return the master node in the list
2886 (useful in coordination with secondary_ips where we cannot check our
2887 node name against the list)
2888 @type nodegroup: string
2889 @param nodegroup: If set, only return nodes in this node group
2898 filter_.append(qlang.MakeSimpleFilter("name", nodes))
2900 if nodegroup is not None:
2901 filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2902 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2905 filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2908 if len(filter_) > 1:
2909 final_filter = [qlang.OP_AND] + filter_
2911 assert len(filter_) == 1
2912 final_filter = filter_[0]
2916 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2918 def _IsOffline(row):
2919 (_, (_, offline), _) = row
2923 ((_, name), _, _) = row
2927 (_, _, (_, sip)) = row
2930 (offline, online) = compat.partition(result.data, _IsOffline)
2932 if offline and not nowarn:
2933 ToStderr("Note: skipping offline node(s): %s" %
2934 utils.CommaJoin(map(_GetName, offline)))
2941 return map(fn, online)
2944 def _ToStream(stream, txt, *args):
2945 """Write a message to a stream, bypassing the logging system
2947 @type stream: file object
2948 @param stream: the file to which we should write
2950 @param txt: the message
2956 stream.write(txt % args)
2961 except IOError, err:
2962 if err.errno == errno.EPIPE:
2963 # our terminal went away, we'll exit
2964 sys.exit(constants.EXIT_FAILURE)
2969 def ToStdout(txt, *args):
2970 """Write a message to stdout only, bypassing the logging system
2972 This is just a wrapper over _ToStream.
2975 @param txt: the message
2978 _ToStream(sys.stdout, txt, *args)
2981 def ToStderr(txt, *args):
2982 """Write a message to stderr only, bypassing the logging system
2984 This is just a wrapper over _ToStream.
2987 @param txt: the message
2990 _ToStream(sys.stderr, txt, *args)
2993 class JobExecutor(object):
2994 """Class which manages the submission and execution of multiple jobs.
2996 Note that instances of this class should not be reused between
3000 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3005 self.verbose = verbose
3008 self.feedback_fn = feedback_fn
3009 self._counter = itertools.count()
3012 def _IfName(name, fmt):
3013 """Helper function for formatting name.
3021 def QueueJob(self, name, *ops):
3022 """Record a job for later submit.
3025 @param name: a description of the job, will be used in WaitJobSet
3028 SetGenericOpcodeOpts(ops, self.opts)
3029 self.queue.append((self._counter.next(), name, ops))
3031 def AddJobId(self, name, status, job_id):
3032 """Adds a job ID to the internal queue.
3035 self.jobs.append((self._counter.next(), status, job_id, name))
3037 def SubmitPending(self, each=False):
3038 """Submit all pending jobs.
3043 for (_, _, ops) in self.queue:
3044 # SubmitJob will remove the success status, but raise an exception if
3045 # the submission fails, so we'll notice that anyway.
3046 results.append([True, self.cl.SubmitJob(ops)])
3048 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3049 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3050 self.jobs.append((idx, status, data, name))
3052 def _ChooseJob(self):
3053 """Choose a non-waiting/queued job to poll next.
3056 assert self.jobs, "_ChooseJob called with empty job list"
3058 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3061 for job_data, status in zip(self.jobs, result):
3062 if (isinstance(status, list) and status and
3063 status[0] in (constants.JOB_STATUS_QUEUED,
3064 constants.JOB_STATUS_WAITING,
3065 constants.JOB_STATUS_CANCELING)):
3066 # job is still present and waiting
3068 # good candidate found (either running job or lost job)
3069 self.jobs.remove(job_data)
3073 return self.jobs.pop(0)
3075 def GetResults(self):
3076 """Wait for and return the results of all jobs.
3079 @return: list of tuples (success, job results), in the same order
3080 as the submitted jobs; if a job has failed, instead of the result
3081 there will be the error message
3085 self.SubmitPending()
3088 ok_jobs = [row[2] for row in self.jobs if row[1]]
3090 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3092 # first, remove any non-submitted jobs
3093 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3094 for idx, _, jid, name in failures:
3095 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3096 results.append((idx, False, jid))
3099 (idx, _, jid, name) = self._ChooseJob()
3100 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3102 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3104 except errors.JobLost, err:
3105 _, job_result = FormatError(err)
3106 ToStderr("Job %s%s has been archived, cannot check its result",
3107 jid, self._IfName(name, " for %s"))
3109 except (errors.GenericError, luxi.ProtocolError), err:
3110 _, job_result = FormatError(err)
3112 # the error message will always be shown, verbose or not
3113 ToStderr("Job %s%s has failed: %s",
3114 jid, self._IfName(name, " for %s"), job_result)
3116 results.append((idx, success, job_result))
3118 # sort based on the index, then drop it
3120 results = [i[1:] for i in results]
3124 def WaitOrShow(self, wait):
3125 """Wait for job results or only print the job IDs.
3128 @param wait: whether to wait or not
3132 return self.GetResults()
3135 self.SubmitPending()
3136 for _, status, result, name in self.jobs:
3138 ToStdout("%s: %s", result, name)
3140 ToStderr("Failure for %s: %s", name, result)
3141 return [row[1:3] for row in self.jobs]
3144 def FormatParameterDict(buf, param_dict, actual, level=1):
3145 """Formats a parameter dictionary.
3147 @type buf: L{StringIO}
3148 @param buf: the buffer into which to write
3149 @type param_dict: dict
3150 @param param_dict: the own parameters
3152 @param actual: the current parameter set (including defaults)
3153 @param level: Level of indent
3156 indent = " " * level
3157 for key in sorted(actual):
3158 val = param_dict.get(key, "default (%s)" % actual[key])
3159 buf.write("%s- %s: %s\n" % (indent, key, val))
3162 def ConfirmOperation(names, list_type, text, extra=""):
3163 """Ask the user to confirm an operation on a list of list_type.
3165 This function is used to request confirmation for doing an operation
3166 on a given list of list_type.
3169 @param names: the list of names that we display when
3170 we ask for confirmation
3171 @type list_type: str
3172 @param list_type: Human readable name for elements in the list (e.g. nodes)
3174 @param text: the operation that the user should confirm
3176 @return: True or False depending on user's confirmation.
3180 msg = ("The %s will operate on %d %s.\n%s"
3181 "Do you want to continue?" % (text, count, list_type, extra))
3182 affected = (("\nAffected %s:\n" % list_type) +
3183 "\n".join([" %s" % name for name in names]))
3185 choices = [("y", True, "Yes, execute the %s" % text),
3186 ("n", False, "No, abort the %s" % text)]
3189 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3192 question = msg + affected
3194 choice = AskUser(question, choices)
3197 choice = AskUser(msg + affected, choices)