4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
32 from cStringIO import StringIO
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import opcodes
38 from ganeti import luxi
39 from ganeti import ssconf
40 from ganeti import rpc
41 from ganeti import ssh
42 from ganeti import compat
43 from ganeti import netutils
44 from ganeti import qlang
46 from optparse import (OptionParser, TitledHelpFormatter,
47 Option, OptionValueError)
51 # Command line options
64 "CLUSTER_DOMAIN_SECRET_OPT",
81 "FILESTORE_DRIVER_OPT",
87 "GLOBAL_SHARED_FILEDIR_OPT",
92 "DEFAULT_IALLOCATOR_OPT",
93 "IDENTIFY_DEFAULTS_OPT",
95 "IGNORE_FAILURES_OPT",
97 "IGNORE_REMOVE_FAILURES_OPT",
98 "IGNORE_SECONDARIES_OPT",
102 "MAINTAIN_NODE_HEALTH_OPT",
105 "MIGRATION_MODE_OPT",
107 "NEW_CLUSTER_CERT_OPT",
108 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109 "NEW_CONFD_HMAC_KEY_OPT",
113 "NODE_FORCE_JOIN_OPT",
115 "NODE_PLACEMENT_OPT",
119 "NODRBD_STORAGE_OPT",
125 "NOMODIFY_ETCHOSTS_OPT",
126 "NOMODIFY_SSH_SETUP_OPT",
132 "NOSSH_KEYCHECK_OPT",
144 "PREALLOC_WIPE_DISKS_OPT",
145 "PRIMARY_IP_VERSION_OPT",
151 "REMOVE_INSTANCE_OPT",
156 "SECONDARY_ONLY_OPT",
160 "SHUTDOWN_TIMEOUT_OPT",
165 "STARTUP_PAUSED_OPT",
178 # Generic functions for CLI programs
181 "GenericInstanceCreate",
187 "JobSubmittedException",
189 "RunWhileClusterStopped",
193 # Formatting functions
194 "ToStderr", "ToStdout",
197 "FormatParameterDict",
206 # command line options support infrastructure
207 "ARGS_MANY_INSTANCES",
226 "OPT_COMPL_INST_ADD_NODES",
227 "OPT_COMPL_MANY_NODES",
228 "OPT_COMPL_ONE_IALLOCATOR",
229 "OPT_COMPL_ONE_INSTANCE",
230 "OPT_COMPL_ONE_NODE",
231 "OPT_COMPL_ONE_NODEGROUP",
237 "COMMON_CREATE_OPTS",
243 #: Priorities (sorted)
245 ("low", constants.OP_PRIO_LOW),
246 ("normal", constants.OP_PRIO_NORMAL),
247 ("high", constants.OP_PRIO_HIGH),
250 #: Priority dictionary for easier lookup
251 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
252 # we migrate to Python 2.6
253 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
255 # Query result status for clients
258 QR_INCOMPLETE) = range(3)
262 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
267 return ("<%s min=%s max=%s>" %
268 (self.__class__.__name__, self.min, self.max))
271 class ArgSuggest(_Argument):
272 """Suggesting argument.
274 Value can be any of the ones passed to the constructor.
277 # pylint: disable-msg=W0622
278 def __init__(self, min=0, max=None, choices=None):
279 _Argument.__init__(self, min=min, max=max)
280 self.choices = choices
283 return ("<%s min=%s max=%s choices=%r>" %
284 (self.__class__.__name__, self.min, self.max, self.choices))
287 class ArgChoice(ArgSuggest):
290 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
291 but value must be one of the choices.
296 class ArgUnknown(_Argument):
297 """Unknown argument to program (e.g. determined at runtime).
302 class ArgInstance(_Argument):
303 """Instances argument.
308 class ArgNode(_Argument):
314 class ArgGroup(_Argument):
315 """Node group argument.
320 class ArgJobId(_Argument):
326 class ArgFile(_Argument):
327 """File path argument.
332 class ArgCommand(_Argument):
338 class ArgHost(_Argument):
344 class ArgOs(_Argument):
351 ARGS_MANY_INSTANCES = [ArgInstance()]
352 ARGS_MANY_NODES = [ArgNode()]
353 ARGS_MANY_GROUPS = [ArgGroup()]
354 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
355 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
357 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
358 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
361 def _ExtractTagsObject(opts, args):
362 """Extract the tag type object.
364 Note that this function will modify its args parameter.
367 if not hasattr(opts, "tag_type"):
368 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
370 if kind == constants.TAG_CLUSTER:
372 elif kind in (constants.TAG_NODEGROUP,
374 constants.TAG_INSTANCE):
376 raise errors.OpPrereqError("no arguments passed to the command")
380 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
384 def _ExtendTags(opts, args):
385 """Extend the args if a source file has been given.
387 This function will extend the tags with the contents of the file
388 passed in the 'tags_source' attribute of the opts parameter. A file
389 named '-' will be replaced by stdin.
392 fname = opts.tags_source
398 new_fh = open(fname, "r")
401 # we don't use the nice 'new_data = [line.strip() for line in fh]'
402 # because of python bug 1633941
404 line = new_fh.readline()
407 new_data.append(line.strip())
410 args.extend(new_data)
413 def ListTags(opts, args):
414 """List the tags on a given object.
416 This is a generic implementation that knows how to deal with all
417 three cases of tag objects (cluster, node, instance). The opts
418 argument is expected to contain a tag_type field denoting what
419 object type we work on.
422 kind, name = _ExtractTagsObject(opts, args)
424 result = cl.QueryTags(kind, name)
425 result = list(result)
431 def AddTags(opts, args):
432 """Add tags on a given object.
434 This is a generic implementation that knows how to deal with all
435 three cases of tag objects (cluster, node, instance). The opts
436 argument is expected to contain a tag_type field denoting what
437 object type we work on.
440 kind, name = _ExtractTagsObject(opts, args)
441 _ExtendTags(opts, args)
443 raise errors.OpPrereqError("No tags to be added")
444 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
445 SubmitOpCode(op, opts=opts)
448 def RemoveTags(opts, args):
449 """Remove tags from a given object.
451 This is a generic implementation that knows how to deal with all
452 three cases of tag objects (cluster, node, instance). The opts
453 argument is expected to contain a tag_type field denoting what
454 object type we work on.
457 kind, name = _ExtractTagsObject(opts, args)
458 _ExtendTags(opts, args)
460 raise errors.OpPrereqError("No tags to be removed")
461 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
462 SubmitOpCode(op, opts=opts)
465 def check_unit(option, opt, value): # pylint: disable-msg=W0613
466 """OptParsers custom converter for units.
470 return utils.ParseUnit(value)
471 except errors.UnitParseError, err:
472 raise OptionValueError("option %s: %s" % (opt, err))
475 def _SplitKeyVal(opt, data):
476 """Convert a KeyVal string into a dict.
478 This function will convert a key=val[,...] string into a dict. Empty
479 values will be converted specially: keys which have the prefix 'no_'
480 will have the value=False and the prefix stripped, the others will
484 @param opt: a string holding the option name for which we process the
485 data, used in building error messages
487 @param data: a string of the format key=val,key=val,...
489 @return: {key=val, key=val}
490 @raises errors.ParameterError: if there are duplicate keys
495 for elem in utils.UnescapeAndSplit(data, sep=","):
497 key, val = elem.split("=", 1)
499 if elem.startswith(NO_PREFIX):
500 key, val = elem[len(NO_PREFIX):], False
501 elif elem.startswith(UN_PREFIX):
502 key, val = elem[len(UN_PREFIX):], None
504 key, val = elem, True
506 raise errors.ParameterError("Duplicate key '%s' in option %s" %
512 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
513 """Custom parser for ident:key=val,key=val options.
515 This will store the parsed values as a tuple (ident, {key: val}). As such,
516 multiple uses of this option via action=append is possible.
520 ident, rest = value, ""
522 ident, rest = value.split(":", 1)
524 if ident.startswith(NO_PREFIX):
526 msg = "Cannot pass options when removing parameter groups: %s" % value
527 raise errors.ParameterError(msg)
528 retval = (ident[len(NO_PREFIX):], False)
529 elif ident.startswith(UN_PREFIX):
531 msg = "Cannot pass options when removing parameter groups: %s" % value
532 raise errors.ParameterError(msg)
533 retval = (ident[len(UN_PREFIX):], None)
535 kv_dict = _SplitKeyVal(opt, rest)
536 retval = (ident, kv_dict)
540 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
541 """Custom parser class for key=val,key=val options.
543 This will store the parsed values as a dict {key: val}.
546 return _SplitKeyVal(opt, value)
549 def check_bool(option, opt, value): # pylint: disable-msg=W0613
550 """Custom parser for yes/no options.
552 This will store the parsed value as either True or False.
555 value = value.lower()
556 if value == constants.VALUE_FALSE or value == "no":
558 elif value == constants.VALUE_TRUE or value == "yes":
561 raise errors.ParameterError("Invalid boolean value '%s'" % value)
564 # completion_suggestion is normally a list. Using numeric values not evaluating
565 # to False for dynamic completion.
566 (OPT_COMPL_MANY_NODES,
568 OPT_COMPL_ONE_INSTANCE,
570 OPT_COMPL_ONE_IALLOCATOR,
571 OPT_COMPL_INST_ADD_NODES,
572 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
574 OPT_COMPL_ALL = frozenset([
575 OPT_COMPL_MANY_NODES,
577 OPT_COMPL_ONE_INSTANCE,
579 OPT_COMPL_ONE_IALLOCATOR,
580 OPT_COMPL_INST_ADD_NODES,
581 OPT_COMPL_ONE_NODEGROUP,
585 class CliOption(Option):
586 """Custom option class for optparse.
589 ATTRS = Option.ATTRS + [
590 "completion_suggest",
592 TYPES = Option.TYPES + (
598 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
599 TYPE_CHECKER["identkeyval"] = check_ident_key_val
600 TYPE_CHECKER["keyval"] = check_key_val
601 TYPE_CHECKER["unit"] = check_unit
602 TYPE_CHECKER["bool"] = check_bool
605 # optparse.py sets make_option, so we do it for our own option class, too
606 cli_option = CliOption
611 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
612 help="Increase debugging level")
614 NOHDR_OPT = cli_option("--no-headers", default=False,
615 action="store_true", dest="no_headers",
616 help="Don't display column headers")
618 SEP_OPT = cli_option("--separator", default=None,
619 action="store", dest="separator",
620 help=("Separator between output fields"
621 " (defaults to one space)"))
623 USEUNITS_OPT = cli_option("--units", default=None,
624 dest="units", choices=("h", "m", "g", "t"),
625 help="Specify units for output (one of h/m/g/t)")
627 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
628 type="string", metavar="FIELDS",
629 help="Comma separated list of output fields")
631 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
632 default=False, help="Force the operation")
634 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
635 default=False, help="Do not require confirmation")
637 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
638 action="store_true", default=False,
639 help=("Ignore offline nodes and do as much"
642 TAG_ADD_OPT = cli_option("--tags", dest="tags",
643 default=None, help="Comma-separated list of instance"
646 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
647 default=None, help="File with tag names")
649 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
650 default=False, action="store_true",
651 help=("Submit the job and return the job ID, but"
652 " don't wait for the job to finish"))
654 SYNC_OPT = cli_option("--sync", dest="do_locking",
655 default=False, action="store_true",
656 help=("Grab locks while doing the queries"
657 " in order to ensure more consistent results"))
659 DRY_RUN_OPT = cli_option("--dry-run", default=False,
661 help=("Do not execute the operation, just run the"
662 " check steps and verify it it could be"
665 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
667 help="Increase the verbosity of the operation")
669 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
670 action="store_true", dest="simulate_errors",
671 help="Debugging option that makes the operation"
672 " treat most runtime checks as failed")
674 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
675 default=True, action="store_false",
676 help="Don't wait for sync (DANGEROUS!)")
678 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
679 help=("Custom disk setup (%s)" %
680 utils.CommaJoin(constants.DISK_TEMPLATES)),
681 default=None, metavar="TEMPL",
682 choices=list(constants.DISK_TEMPLATES))
684 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
685 help="Do not create any network cards for"
688 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
689 help="Relative path under default cluster-wide"
690 " file storage dir to store file-based disks",
691 default=None, metavar="<DIR>")
693 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
694 help="Driver to use for image files",
695 default="loop", metavar="<DRIVER>",
696 choices=list(constants.FILE_DRIVER))
698 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
699 help="Select nodes for the instance automatically"
700 " using the <NAME> iallocator plugin",
701 default=None, type="string",
702 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
704 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
706 help="Set the default instance allocator plugin",
707 default=None, type="string",
708 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
710 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
712 completion_suggest=OPT_COMPL_ONE_OS)
714 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
715 type="keyval", default={},
716 help="OS parameters")
718 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
719 action="store_true", default=False,
720 help="Force an unknown variant")
722 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
723 action="store_true", default=False,
724 help="Do not install the OS (will"
727 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
728 type="keyval", default={},
729 help="Backend parameters")
731 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
732 default={}, dest="hvparams",
733 help="Hypervisor parameters")
735 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
736 help="Hypervisor and hypervisor options, in the"
737 " format hypervisor:option=value,option=value,...",
738 default=None, type="identkeyval")
740 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
741 help="Hypervisor and hypervisor options, in the"
742 " format hypervisor:option=value,option=value,...",
743 default=[], action="append", type="identkeyval")
745 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
746 action="store_false",
747 help="Don't check that the instance's IP"
750 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
751 default=True, action="store_false",
752 help="Don't check that the instance's name"
755 NET_OPT = cli_option("--net",
756 help="NIC parameters", default=[],
757 dest="nics", action="append", type="identkeyval")
759 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
760 dest="disks", action="append", type="identkeyval")
762 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
763 help="Comma-separated list of disks"
764 " indices to act on (e.g. 0,2) (optional,"
765 " defaults to all disks)")
767 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
768 help="Enforces a single-disk configuration using the"
769 " given disk size, in MiB unless a suffix is used",
770 default=None, type="unit", metavar="<size>")
772 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
773 dest="ignore_consistency",
774 action="store_true", default=False,
775 help="Ignore the consistency of the disks on"
778 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
779 dest="allow_failover",
780 action="store_true", default=False,
781 help="If migration is not possible fallback to"
784 NONLIVE_OPT = cli_option("--non-live", dest="live",
785 default=True, action="store_false",
786 help="Do a non-live migration (this usually means"
787 " freeze the instance, save the state, transfer and"
788 " only then resume running on the secondary node)")
790 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
792 choices=list(constants.HT_MIGRATION_MODES),
793 help="Override default migration mode (choose"
794 " either live or non-live")
796 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
797 help="Target node and optional secondary node",
798 metavar="<pnode>[:<snode>]",
799 completion_suggest=OPT_COMPL_INST_ADD_NODES)
801 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
802 action="append", metavar="<node>",
803 help="Use only this node (can be used multiple"
804 " times, if not given defaults to all nodes)",
805 completion_suggest=OPT_COMPL_ONE_NODE)
807 NODEGROUP_OPT = cli_option("-g", "--node-group",
809 help="Node group (name or uuid)",
810 metavar="<nodegroup>",
811 default=None, type="string",
812 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
814 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
816 completion_suggest=OPT_COMPL_ONE_NODE)
818 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
819 action="store_false",
820 help="Don't start the instance after creation")
822 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
823 action="store_true", default=False,
824 help="Show command instead of executing it")
826 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
827 default=False, action="store_true",
828 help="Instead of performing the migration, try to"
829 " recover from a failed cleanup. This is safe"
830 " to run even if the instance is healthy, but it"
831 " will create extra replication traffic and "
832 " disrupt briefly the replication (like during the"
835 STATIC_OPT = cli_option("-s", "--static", dest="static",
836 action="store_true", default=False,
837 help="Only show configuration data, not runtime data")
839 ALL_OPT = cli_option("--all", dest="show_all",
840 default=False, action="store_true",
841 help="Show info on all instances on the cluster."
842 " This can take a long time to run, use wisely")
844 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
845 action="store_true", default=False,
846 help="Interactive OS reinstall, lists available"
847 " OS templates for selection")
849 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
850 action="store_true", default=False,
851 help="Remove the instance from the cluster"
852 " configuration even if there are failures"
853 " during the removal process")
855 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
856 dest="ignore_remove_failures",
857 action="store_true", default=False,
858 help="Remove the instance from the"
859 " cluster configuration even if there"
860 " are failures during the removal"
863 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
864 action="store_true", default=False,
865 help="Remove the instance from the cluster")
867 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
868 help="Specifies the new node for the instance",
869 metavar="NODE", default=None,
870 completion_suggest=OPT_COMPL_ONE_NODE)
872 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
873 help="Specifies the new secondary node",
874 metavar="NODE", default=None,
875 completion_suggest=OPT_COMPL_ONE_NODE)
877 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
878 default=False, action="store_true",
879 help="Replace the disk(s) on the primary"
880 " node (applies only to internally mirrored"
881 " disk templates, e.g. %s)" %
882 utils.CommaJoin(constants.DTS_INT_MIRROR))
884 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
885 default=False, action="store_true",
886 help="Replace the disk(s) on the secondary"
887 " node (applies only to internally mirrored"
888 " disk templates, e.g. %s)" %
889 utils.CommaJoin(constants.DTS_INT_MIRROR))
891 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
892 default=False, action="store_true",
893 help="Lock all nodes and auto-promote as needed"
896 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
897 default=False, action="store_true",
898 help="Automatically replace faulty disks"
899 " (applies only to internally mirrored"
900 " disk templates, e.g. %s)" %
901 utils.CommaJoin(constants.DTS_INT_MIRROR))
903 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
904 default=False, action="store_true",
905 help="Ignore current recorded size"
906 " (useful for forcing activation when"
907 " the recorded size is wrong)")
909 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
911 completion_suggest=OPT_COMPL_ONE_NODE)
913 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
916 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
917 help="Specify the secondary ip for the node",
918 metavar="ADDRESS", default=None)
920 READD_OPT = cli_option("--readd", dest="readd",
921 default=False, action="store_true",
922 help="Readd old node after replacing it")
924 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
925 default=True, action="store_false",
926 help="Disable SSH key fingerprint checking")
928 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
929 default=False, action="store_true",
930 help="Force the joining of a node")
932 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
933 type="bool", default=None, metavar=_YORNO,
934 help="Set the master_candidate flag on the node")
936 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
937 type="bool", default=None,
938 help=("Set the offline flag on the node"
939 " (cluster does not communicate with offline"
942 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
943 type="bool", default=None,
944 help=("Set the drained flag on the node"
945 " (excluded from allocation operations)"))
947 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
948 type="bool", default=None, metavar=_YORNO,
949 help="Set the master_capable flag on the node")
951 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
952 type="bool", default=None, metavar=_YORNO,
953 help="Set the vm_capable flag on the node")
955 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
956 type="bool", default=None, metavar=_YORNO,
957 help="Set the allocatable flag on a volume")
959 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
960 help="Disable support for lvm based instances"
962 action="store_false", default=True)
964 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
965 dest="enabled_hypervisors",
966 help="Comma-separated list of hypervisors",
967 type="string", default=None)
969 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
970 type="keyval", default={},
971 help="NIC parameters")
973 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
974 dest="candidate_pool_size", type="int",
975 help="Set the candidate pool size")
977 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
978 help=("Enables LVM and specifies the volume group"
979 " name (cluster-wide) for disk allocation"
980 " [%s]" % constants.DEFAULT_VG),
981 metavar="VG", default=None)
983 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
984 help="Destroy cluster", action="store_true")
986 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
987 help="Skip node agreement check (dangerous)",
988 action="store_true", default=False)
990 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
991 help="Specify the mac prefix for the instance IP"
992 " addresses, in the format XX:XX:XX",
996 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
997 help="Specify the node interface (cluster-wide)"
998 " on which the master IP address will be added"
999 " (cluster init default: %s)" %
1000 constants.DEFAULT_BRIDGE,
1004 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1005 help="Specify the default directory (cluster-"
1006 "wide) for storing the file-based disks [%s]" %
1007 constants.DEFAULT_FILE_STORAGE_DIR,
1009 default=constants.DEFAULT_FILE_STORAGE_DIR)
1011 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1012 dest="shared_file_storage_dir",
1013 help="Specify the default directory (cluster-"
1014 "wide) for storing the shared file-based"
1016 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1017 metavar="SHAREDDIR",
1018 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1020 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1021 help="Don't modify /etc/hosts",
1022 action="store_false", default=True)
1024 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1025 help="Don't initialize SSH keys",
1026 action="store_false", default=True)
1028 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1029 help="Enable parseable error messages",
1030 action="store_true", default=False)
1032 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1033 help="Skip N+1 memory redundancy tests",
1034 action="store_true", default=False)
1036 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1037 help="Type of reboot: soft/hard/full",
1038 default=constants.INSTANCE_REBOOT_HARD,
1040 choices=list(constants.REBOOT_TYPES))
1042 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1043 dest="ignore_secondaries",
1044 default=False, action="store_true",
1045 help="Ignore errors from secondaries")
1047 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1048 action="store_false", default=True,
1049 help="Don't shutdown the instance (unsafe)")
1051 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1052 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1053 help="Maximum time to wait")
1055 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1056 dest="shutdown_timeout", type="int",
1057 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1058 help="Maximum time to wait for instance shutdown")
1060 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1062 help=("Number of seconds between repetions of the"
1065 EARLY_RELEASE_OPT = cli_option("--early-release",
1066 dest="early_release", default=False,
1067 action="store_true",
1068 help="Release the locks on the secondary"
1071 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1072 dest="new_cluster_cert",
1073 default=False, action="store_true",
1074 help="Generate a new cluster certificate")
1076 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1078 help="File containing new RAPI certificate")
1080 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1081 default=None, action="store_true",
1082 help=("Generate a new self-signed RAPI"
1085 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1086 dest="new_confd_hmac_key",
1087 default=False, action="store_true",
1088 help=("Create a new HMAC key for %s" %
1091 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1092 dest="cluster_domain_secret",
1094 help=("Load new new cluster domain"
1095 " secret from file"))
1097 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1098 dest="new_cluster_domain_secret",
1099 default=False, action="store_true",
1100 help=("Create a new cluster domain"
1103 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1104 dest="use_replication_network",
1105 help="Whether to use the replication network"
1106 " for talking to the nodes",
1107 action="store_true", default=False)
1109 MAINTAIN_NODE_HEALTH_OPT = \
1110 cli_option("--maintain-node-health", dest="maintain_node_health",
1111 metavar=_YORNO, default=None, type="bool",
1112 help="Configure the cluster to automatically maintain node"
1113 " health, by shutting down unknown instances, shutting down"
1114 " unknown DRBD devices, etc.")
1116 IDENTIFY_DEFAULTS_OPT = \
1117 cli_option("--identify-defaults", dest="identify_defaults",
1118 default=False, action="store_true",
1119 help="Identify which saved instance parameters are equal to"
1120 " the current cluster defaults and set them as such, instead"
1121 " of marking them as overridden")
1123 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1124 action="store", dest="uid_pool",
1125 help=("A list of user-ids or user-id"
1126 " ranges separated by commas"))
1128 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1129 action="store", dest="add_uids",
1130 help=("A list of user-ids or user-id"
1131 " ranges separated by commas, to be"
1132 " added to the user-id pool"))
1134 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1135 action="store", dest="remove_uids",
1136 help=("A list of user-ids or user-id"
1137 " ranges separated by commas, to be"
1138 " removed from the user-id pool"))
1140 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1141 action="store", dest="reserved_lvs",
1142 help=("A comma-separated list of reserved"
1143 " logical volumes names, that will be"
1144 " ignored by cluster verify"))
1146 ROMAN_OPT = cli_option("--roman",
1147 dest="roman_integers", default=False,
1148 action="store_true",
1149 help="Use roman numbers for positive integers")
1151 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1152 action="store", default=None,
1153 help="Specifies usermode helper for DRBD")
1155 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1156 action="store_false", default=True,
1157 help="Disable support for DRBD")
1159 PRIMARY_IP_VERSION_OPT = \
1160 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1161 action="store", dest="primary_ip_version",
1162 metavar="%d|%d" % (constants.IP4_VERSION,
1163 constants.IP6_VERSION),
1164 help="Cluster-wide IP version for primary IP")
1166 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1167 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1168 choices=_PRIONAME_TO_VALUE.keys(),
1169 help="Priority for opcode processing")
1171 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1172 type="bool", default=None, metavar=_YORNO,
1173 help="Sets the hidden flag on the OS")
1175 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1176 type="bool", default=None, metavar=_YORNO,
1177 help="Sets the blacklisted flag on the OS")
1179 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1180 type="bool", metavar=_YORNO,
1181 dest="prealloc_wipe_disks",
1182 help=("Wipe disks prior to instance"
1185 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1186 type="keyval", default=None,
1187 help="Node parameters")
1189 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1190 action="store", metavar="POLICY", default=None,
1191 help="Allocation policy for the node group")
1193 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1194 type="bool", metavar=_YORNO,
1195 dest="node_powered",
1196 help="Specify if the SoR for node is powered")
1198 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1199 default=constants.OOB_TIMEOUT,
1200 help="Maximum time to wait for out-of-band helper")
1202 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1203 default=constants.OOB_POWER_DELAY,
1204 help="Time in seconds to wait between power-ons")
1206 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1207 action="store_true", default=False,
1208 help=("Whether command argument should be treated"
1211 NO_REMEMBER_OPT = cli_option("--no-remember",
1213 action="store_true", default=False,
1214 help="Perform but do not record the change"
1215 " in the configuration")
1217 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1218 default=False, action="store_true",
1219 help="Evacuate primary instances only")
1221 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1222 default=False, action="store_true",
1223 help="Evacuate secondary instances only"
1224 " (applies only to internally mirrored"
1225 " disk templates, e.g. %s)" %
1226 utils.CommaJoin(constants.DTS_INT_MIRROR))
1228 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1229 action="store_true", default=False,
1230 help="Pause instance at startup")
1232 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1233 help="Destination node group (name or uuid)",
1234 default=None, action="append",
1235 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1238 #: Options provided by all commands
1239 COMMON_OPTS = [DEBUG_OPT]
1241 # common options for creating instances. add and import then add their own
1243 COMMON_CREATE_OPTS = [
1248 FILESTORE_DRIVER_OPT,
1266 def _ParseArgs(argv, commands, aliases):
1267 """Parser for the command line arguments.
1269 This function parses the arguments and returns the function which
1270 must be executed together with its (modified) arguments.
1272 @param argv: the command line
1273 @param commands: dictionary with special contents, see the design
1274 doc for cmdline handling
1275 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1279 binary = "<command>"
1281 binary = argv[0].split("/")[-1]
1283 if len(argv) > 1 and argv[1] == "--version":
1284 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1285 constants.RELEASE_VERSION)
1286 # Quit right away. That way we don't have to care about this special
1287 # argument. optparse.py does it the same.
1290 if len(argv) < 2 or not (argv[1] in commands or
1291 argv[1] in aliases):
1292 # let's do a nice thing
1293 sortedcmds = commands.keys()
1296 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1297 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1300 # compute the max line length for cmd + usage
1301 mlen = max([len(" %s" % cmd) for cmd in commands])
1302 mlen = min(60, mlen) # should not get here...
1304 # and format a nice command list
1305 ToStdout("Commands:")
1306 for cmd in sortedcmds:
1307 cmdstr = " %s" % (cmd,)
1308 help_text = commands[cmd][4]
1309 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1310 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1311 for line in help_lines:
1312 ToStdout("%-*s %s", mlen, "", line)
1316 return None, None, None
1318 # get command, unalias it, and look it up in commands
1322 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1325 if aliases[cmd] not in commands:
1326 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1327 " command '%s'" % (cmd, aliases[cmd]))
1331 func, args_def, parser_opts, usage, description = commands[cmd]
1332 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1333 description=description,
1334 formatter=TitledHelpFormatter(),
1335 usage="%%prog %s %s" % (cmd, usage))
1336 parser.disable_interspersed_args()
1337 options, args = parser.parse_args()
1339 if not _CheckArguments(cmd, args_def, args):
1340 return None, None, None
1342 return func, options, args
1345 def _CheckArguments(cmd, args_def, args):
1346 """Verifies the arguments using the argument definition.
1350 1. Abort with error if values specified by user but none expected.
1352 1. For each argument in definition
1354 1. Keep running count of minimum number of values (min_count)
1355 1. Keep running count of maximum number of values (max_count)
1356 1. If it has an unlimited number of values
1358 1. Abort with error if it's not the last argument in the definition
1360 1. If last argument has limited number of values
1362 1. Abort with error if number of values doesn't match or is too large
1364 1. Abort with error if user didn't pass enough values (min_count)
1367 if args and not args_def:
1368 ToStderr("Error: Command %s expects no arguments", cmd)
1375 last_idx = len(args_def) - 1
1377 for idx, arg in enumerate(args_def):
1378 if min_count is None:
1380 elif arg.min is not None:
1381 min_count += arg.min
1383 if max_count is None:
1385 elif arg.max is not None:
1386 max_count += arg.max
1389 check_max = (arg.max is not None)
1391 elif arg.max is None:
1392 raise errors.ProgrammerError("Only the last argument can have max=None")
1395 # Command with exact number of arguments
1396 if (min_count is not None and max_count is not None and
1397 min_count == max_count and len(args) != min_count):
1398 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1401 # Command with limited number of arguments
1402 if max_count is not None and len(args) > max_count:
1403 ToStderr("Error: Command %s expects only %d argument(s)",
1407 # Command with some required arguments
1408 if min_count is not None and len(args) < min_count:
1409 ToStderr("Error: Command %s expects at least %d argument(s)",
1416 def SplitNodeOption(value):
1417 """Splits the value of a --node option.
1420 if value and ":" in value:
1421 return value.split(":", 1)
1423 return (value, None)
1426 def CalculateOSNames(os_name, os_variants):
1427 """Calculates all the names an OS can be called, according to its variants.
1429 @type os_name: string
1430 @param os_name: base name of the os
1431 @type os_variants: list or None
1432 @param os_variants: list of supported variants
1434 @return: list of valid names
1438 return ["%s+%s" % (os_name, v) for v in os_variants]
1443 def ParseFields(selected, default):
1444 """Parses the values of "--field"-like options.
1446 @type selected: string or None
1447 @param selected: User-selected options
1449 @param default: Default fields
1452 if selected is None:
1455 if selected.startswith("+"):
1456 return default + selected[1:].split(",")
1458 return selected.split(",")
1461 UsesRPC = rpc.RunWithRPC
1464 def AskUser(text, choices=None):
1465 """Ask the user a question.
1467 @param text: the question to ask
1469 @param choices: list with elements tuples (input_char, return_value,
1470 description); if not given, it will default to: [('y', True,
1471 'Perform the operation'), ('n', False, 'Do no do the operation')];
1472 note that the '?' char is reserved for help
1474 @return: one of the return values from the choices list; if input is
1475 not possible (i.e. not running with a tty, we return the last
1480 choices = [("y", True, "Perform the operation"),
1481 ("n", False, "Do not perform the operation")]
1482 if not choices or not isinstance(choices, list):
1483 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1484 for entry in choices:
1485 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1486 raise errors.ProgrammerError("Invalid choices element to AskUser")
1488 answer = choices[-1][1]
1490 for line in text.splitlines():
1491 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1492 text = "\n".join(new_text)
1494 f = file("/dev/tty", "a+")
1498 chars = [entry[0] for entry in choices]
1499 chars[-1] = "[%s]" % chars[-1]
1501 maps = dict([(entry[0], entry[1]) for entry in choices])
1505 f.write("/".join(chars))
1507 line = f.readline(2).strip().lower()
1512 for entry in choices:
1513 f.write(" %s - %s\n" % (entry[0], entry[2]))
1521 class JobSubmittedException(Exception):
1522 """Job was submitted, client should exit.
1524 This exception has one argument, the ID of the job that was
1525 submitted. The handler should print this ID.
1527 This is not an error, just a structured way to exit from clients.
1532 def SendJob(ops, cl=None):
1533 """Function to submit an opcode without waiting for the results.
1536 @param ops: list of opcodes
1537 @type cl: luxi.Client
1538 @param cl: the luxi client to use for communicating with the master;
1539 if None, a new client will be created
1545 job_id = cl.SubmitJob(ops)
1550 def GenericPollJob(job_id, cbs, report_cbs):
1551 """Generic job-polling function.
1553 @type job_id: number
1554 @param job_id: Job ID
1555 @type cbs: Instance of L{JobPollCbBase}
1556 @param cbs: Data callbacks
1557 @type report_cbs: Instance of L{JobPollReportCbBase}
1558 @param report_cbs: Reporting callbacks
1561 prev_job_info = None
1562 prev_logmsg_serial = None
1567 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1570 # job not found, go away!
1571 raise errors.JobLost("Job with id %s lost" % job_id)
1573 if result == constants.JOB_NOTCHANGED:
1574 report_cbs.ReportNotChanged(job_id, status)
1579 # Split result, a tuple of (field values, log entries)
1580 (job_info, log_entries) = result
1581 (status, ) = job_info
1584 for log_entry in log_entries:
1585 (serial, timestamp, log_type, message) = log_entry
1586 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1588 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1590 # TODO: Handle canceled and archived jobs
1591 elif status in (constants.JOB_STATUS_SUCCESS,
1592 constants.JOB_STATUS_ERROR,
1593 constants.JOB_STATUS_CANCELING,
1594 constants.JOB_STATUS_CANCELED):
1597 prev_job_info = job_info
1599 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1601 raise errors.JobLost("Job with id %s lost" % job_id)
1603 status, opstatus, result = jobs[0]
1605 if status == constants.JOB_STATUS_SUCCESS:
1608 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1609 raise errors.OpExecError("Job was canceled")
1612 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1613 if status == constants.OP_STATUS_SUCCESS:
1615 elif status == constants.OP_STATUS_ERROR:
1616 errors.MaybeRaise(msg)
1619 raise errors.OpExecError("partial failure (opcode %d): %s" %
1622 raise errors.OpExecError(str(msg))
1624 # default failure mode
1625 raise errors.OpExecError(result)
1628 class JobPollCbBase:
1629 """Base class for L{GenericPollJob} callbacks.
1633 """Initializes this class.
1637 def WaitForJobChangeOnce(self, job_id, fields,
1638 prev_job_info, prev_log_serial):
1639 """Waits for changes on a job.
1642 raise NotImplementedError()
1644 def QueryJobs(self, job_ids, fields):
1645 """Returns the selected fields for the selected job IDs.
1647 @type job_ids: list of numbers
1648 @param job_ids: Job IDs
1649 @type fields: list of strings
1650 @param fields: Fields
1653 raise NotImplementedError()
1656 class JobPollReportCbBase:
1657 """Base class for L{GenericPollJob} reporting callbacks.
1661 """Initializes this class.
1665 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1666 """Handles a log message.
1669 raise NotImplementedError()
1671 def ReportNotChanged(self, job_id, status):
1672 """Called for if a job hasn't changed in a while.
1674 @type job_id: number
1675 @param job_id: Job ID
1676 @type status: string or None
1677 @param status: Job status if available
1680 raise NotImplementedError()
1683 class _LuxiJobPollCb(JobPollCbBase):
1684 def __init__(self, cl):
1685 """Initializes this class.
1688 JobPollCbBase.__init__(self)
1691 def WaitForJobChangeOnce(self, job_id, fields,
1692 prev_job_info, prev_log_serial):
1693 """Waits for changes on a job.
1696 return self.cl.WaitForJobChangeOnce(job_id, fields,
1697 prev_job_info, prev_log_serial)
1699 def QueryJobs(self, job_ids, fields):
1700 """Returns the selected fields for the selected job IDs.
1703 return self.cl.QueryJobs(job_ids, fields)
1706 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1707 def __init__(self, feedback_fn):
1708 """Initializes this class.
1711 JobPollReportCbBase.__init__(self)
1713 self.feedback_fn = feedback_fn
1715 assert callable(feedback_fn)
1717 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1718 """Handles a log message.
1721 self.feedback_fn((timestamp, log_type, log_msg))
1723 def ReportNotChanged(self, job_id, status):
1724 """Called if a job hasn't changed in a while.
1730 class StdioJobPollReportCb(JobPollReportCbBase):
1732 """Initializes this class.
1735 JobPollReportCbBase.__init__(self)
1737 self.notified_queued = False
1738 self.notified_waitlock = False
1740 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1741 """Handles a log message.
1744 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1745 FormatLogMessage(log_type, log_msg))
1747 def ReportNotChanged(self, job_id, status):
1748 """Called if a job hasn't changed in a while.
1754 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1755 ToStderr("Job %s is waiting in queue", job_id)
1756 self.notified_queued = True
1758 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1759 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1760 self.notified_waitlock = True
1763 def FormatLogMessage(log_type, log_msg):
1764 """Formats a job message according to its type.
1767 if log_type != constants.ELOG_MESSAGE:
1768 log_msg = str(log_msg)
1770 return utils.SafeEncode(log_msg)
1773 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1774 """Function to poll for the result of a job.
1776 @type job_id: job identified
1777 @param job_id: the job to poll for results
1778 @type cl: luxi.Client
1779 @param cl: the luxi client to use for communicating with the master;
1780 if None, a new client will be created
1786 if reporter is None:
1788 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1790 reporter = StdioJobPollReportCb()
1792 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1794 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1797 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1798 """Legacy function to submit an opcode.
1800 This is just a simple wrapper over the construction of the processor
1801 instance. It should be extended to better handle feedback and
1802 interaction functions.
1808 SetGenericOpcodeOpts([op], opts)
1810 job_id = SendJob([op], cl=cl)
1812 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1815 return op_results[0]
1818 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1819 """Wrapper around SubmitOpCode or SendJob.
1821 This function will decide, based on the 'opts' parameter, whether to
1822 submit and wait for the result of the opcode (and return it), or
1823 whether to just send the job and print its identifier. It is used in
1824 order to simplify the implementation of the '--submit' option.
1826 It will also process the opcodes if we're sending the via SendJob
1827 (otherwise SubmitOpCode does it).
1830 if opts and opts.submit_only:
1832 SetGenericOpcodeOpts(job, opts)
1833 job_id = SendJob(job, cl=cl)
1834 raise JobSubmittedException(job_id)
1836 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1839 def SetGenericOpcodeOpts(opcode_list, options):
1840 """Processor for generic options.
1842 This function updates the given opcodes based on generic command
1843 line options (like debug, dry-run, etc.).
1845 @param opcode_list: list of opcodes
1846 @param options: command line options or None
1847 @return: None (in-place modification)
1852 for op in opcode_list:
1853 op.debug_level = options.debug
1854 if hasattr(options, "dry_run"):
1855 op.dry_run = options.dry_run
1856 if getattr(options, "priority", None) is not None:
1857 op.priority = _PRIONAME_TO_VALUE[options.priority]
1861 # TODO: Cache object?
1863 client = luxi.Client()
1864 except luxi.NoMasterError:
1865 ss = ssconf.SimpleStore()
1867 # Try to read ssconf file
1870 except errors.ConfigurationError:
1871 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1872 " not part of a cluster")
1874 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1875 if master != myself:
1876 raise errors.OpPrereqError("This is not the master node, please connect"
1877 " to node '%s' and rerun the command" %
1883 def FormatError(err):
1884 """Return a formatted error message for a given error.
1886 This function takes an exception instance and returns a tuple
1887 consisting of two values: first, the recommended exit code, and
1888 second, a string describing the error message (not
1889 newline-terminated).
1895 if isinstance(err, errors.ConfigurationError):
1896 txt = "Corrupt configuration file: %s" % msg
1898 obuf.write(txt + "\n")
1899 obuf.write("Aborting.")
1901 elif isinstance(err, errors.HooksAbort):
1902 obuf.write("Failure: hooks execution failed:\n")
1903 for node, script, out in err.args[0]:
1905 obuf.write(" node: %s, script: %s, output: %s\n" %
1906 (node, script, out))
1908 obuf.write(" node: %s, script: %s (no output)\n" %
1910 elif isinstance(err, errors.HooksFailure):
1911 obuf.write("Failure: hooks general failure: %s" % msg)
1912 elif isinstance(err, errors.ResolverError):
1913 this_host = netutils.Hostname.GetSysName()
1914 if err.args[0] == this_host:
1915 msg = "Failure: can't resolve my own hostname ('%s')"
1917 msg = "Failure: can't resolve hostname '%s'"
1918 obuf.write(msg % err.args[0])
1919 elif isinstance(err, errors.OpPrereqError):
1920 if len(err.args) == 2:
1921 obuf.write("Failure: prerequisites not met for this"
1922 " operation:\nerror type: %s, error details:\n%s" %
1923 (err.args[1], err.args[0]))
1925 obuf.write("Failure: prerequisites not met for this"
1926 " operation:\n%s" % msg)
1927 elif isinstance(err, errors.OpExecError):
1928 obuf.write("Failure: command execution error:\n%s" % msg)
1929 elif isinstance(err, errors.TagError):
1930 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1931 elif isinstance(err, errors.JobQueueDrainError):
1932 obuf.write("Failure: the job queue is marked for drain and doesn't"
1933 " accept new requests\n")
1934 elif isinstance(err, errors.JobQueueFull):
1935 obuf.write("Failure: the job queue is full and doesn't accept new"
1936 " job submissions until old jobs are archived\n")
1937 elif isinstance(err, errors.TypeEnforcementError):
1938 obuf.write("Parameter Error: %s" % msg)
1939 elif isinstance(err, errors.ParameterError):
1940 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1941 elif isinstance(err, luxi.NoMasterError):
1942 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1943 " and listening for connections?")
1944 elif isinstance(err, luxi.TimeoutError):
1945 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1946 " been submitted and will continue to run even if the call"
1947 " timed out. Useful commands in this situation are \"gnt-job"
1948 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1950 elif isinstance(err, luxi.PermissionError):
1951 obuf.write("It seems you don't have permissions to connect to the"
1952 " master daemon.\nPlease retry as a different user.")
1953 elif isinstance(err, luxi.ProtocolError):
1954 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1956 elif isinstance(err, errors.JobLost):
1957 obuf.write("Error checking job status: %s" % msg)
1958 elif isinstance(err, errors.QueryFilterParseError):
1959 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1960 obuf.write("\n".join(err.GetDetails()))
1961 elif isinstance(err, errors.GenericError):
1962 obuf.write("Unhandled Ganeti error: %s" % msg)
1963 elif isinstance(err, JobSubmittedException):
1964 obuf.write("JobID: %s\n" % err.args[0])
1967 obuf.write("Unhandled exception: %s" % msg)
1968 return retcode, obuf.getvalue().rstrip("\n")
1971 def GenericMain(commands, override=None, aliases=None):
1972 """Generic main function for all the gnt-* commands.
1975 - commands: a dictionary with a special structure, see the design doc
1976 for command line handling.
1977 - override: if not None, we expect a dictionary with keys that will
1978 override command line options; this can be used to pass
1979 options from the scripts to generic functions
1980 - aliases: dictionary with command aliases {'alias': 'target, ...}
1983 # save the program name and the entire command line for later logging
1985 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1986 if len(sys.argv) >= 2:
1987 binary += " " + sys.argv[1]
1988 old_cmdline = " ".join(sys.argv[2:])
1992 binary = "<unknown program>"
1999 func, options, args = _ParseArgs(sys.argv, commands, aliases)
2000 except errors.ParameterError, err:
2001 result, err_msg = FormatError(err)
2005 if func is None: # parse error
2008 if override is not None:
2009 for key, val in override.iteritems():
2010 setattr(options, key, val)
2012 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2013 stderr_logging=True)
2016 logging.info("run with arguments '%s'", old_cmdline)
2018 logging.info("run with no arguments")
2021 result = func(options, args)
2022 except (errors.GenericError, luxi.ProtocolError,
2023 JobSubmittedException), err:
2024 result, err_msg = FormatError(err)
2025 logging.exception("Error during command processing")
2027 except KeyboardInterrupt:
2028 result = constants.EXIT_FAILURE
2029 ToStderr("Aborted. Note that if the operation created any jobs, they"
2030 " might have been submitted and"
2031 " will continue to run in the background.")
2032 except IOError, err:
2033 if err.errno == errno.EPIPE:
2034 # our terminal went away, we'll exit
2035 sys.exit(constants.EXIT_FAILURE)
2042 def ParseNicOption(optvalue):
2043 """Parses the value of the --net option(s).
2047 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2048 except (TypeError, ValueError), err:
2049 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2051 nics = [{}] * nic_max
2052 for nidx, ndict in optvalue:
2055 if not isinstance(ndict, dict):
2056 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2057 " got %s" % (nidx, ndict))
2059 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2066 def GenericInstanceCreate(mode, opts, args):
2067 """Add an instance to the cluster via either creation or import.
2069 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2070 @param opts: the command line options selected by the user
2072 @param args: should contain only one element, the new instance name
2074 @return: the desired exit code
2079 (pnode, snode) = SplitNodeOption(opts.node)
2084 hypervisor, hvparams = opts.hypervisor
2087 nics = ParseNicOption(opts.nics)
2091 elif mode == constants.INSTANCE_CREATE:
2092 # default of one nic, all auto
2098 if opts.disk_template == constants.DT_DISKLESS:
2099 if opts.disks or opts.sd_size is not None:
2100 raise errors.OpPrereqError("Diskless instance but disk"
2101 " information passed")
2104 if (not opts.disks and not opts.sd_size
2105 and mode == constants.INSTANCE_CREATE):
2106 raise errors.OpPrereqError("No disk information specified")
2107 if opts.disks and opts.sd_size is not None:
2108 raise errors.OpPrereqError("Please use either the '--disk' or"
2110 if opts.sd_size is not None:
2111 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2115 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2116 except ValueError, err:
2117 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2118 disks = [{}] * disk_max
2121 for didx, ddict in opts.disks:
2123 if not isinstance(ddict, dict):
2124 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2125 raise errors.OpPrereqError(msg)
2126 elif constants.IDISK_SIZE in ddict:
2127 if constants.IDISK_ADOPT in ddict:
2128 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2129 " (disk %d)" % didx)
2131 ddict[constants.IDISK_SIZE] = \
2132 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2133 except ValueError, err:
2134 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2136 elif constants.IDISK_ADOPT in ddict:
2137 if mode == constants.INSTANCE_IMPORT:
2138 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2140 ddict[constants.IDISK_SIZE] = 0
2142 raise errors.OpPrereqError("Missing size or adoption source for"
2146 if opts.tags is not None:
2147 tags = opts.tags.split(",")
2151 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2152 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2154 if mode == constants.INSTANCE_CREATE:
2157 force_variant = opts.force_variant
2160 no_install = opts.no_install
2161 identify_defaults = False
2162 elif mode == constants.INSTANCE_IMPORT:
2165 force_variant = False
2166 src_node = opts.src_node
2167 src_path = opts.src_dir
2169 identify_defaults = opts.identify_defaults
2171 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2173 op = opcodes.OpInstanceCreate(instance_name=instance,
2175 disk_template=opts.disk_template,
2177 pnode=pnode, snode=snode,
2178 ip_check=opts.ip_check,
2179 name_check=opts.name_check,
2180 wait_for_sync=opts.wait_for_sync,
2181 file_storage_dir=opts.file_storage_dir,
2182 file_driver=opts.file_driver,
2183 iallocator=opts.iallocator,
2184 hypervisor=hypervisor,
2186 beparams=opts.beparams,
2187 osparams=opts.osparams,
2191 force_variant=force_variant,
2195 no_install=no_install,
2196 identify_defaults=identify_defaults)
2198 SubmitOrSend(op, opts)
2202 class _RunWhileClusterStoppedHelper:
2203 """Helper class for L{RunWhileClusterStopped} to simplify state management
2206 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2207 """Initializes this class.
2209 @type feedback_fn: callable
2210 @param feedback_fn: Feedback function
2211 @type cluster_name: string
2212 @param cluster_name: Cluster name
2213 @type master_node: string
2214 @param master_node Master node name
2215 @type online_nodes: list
2216 @param online_nodes: List of names of online nodes
2219 self.feedback_fn = feedback_fn
2220 self.cluster_name = cluster_name
2221 self.master_node = master_node
2222 self.online_nodes = online_nodes
2224 self.ssh = ssh.SshRunner(self.cluster_name)
2226 self.nonmaster_nodes = [name for name in online_nodes
2227 if name != master_node]
2229 assert self.master_node not in self.nonmaster_nodes
2231 def _RunCmd(self, node_name, cmd):
2232 """Runs a command on the local or a remote machine.
2234 @type node_name: string
2235 @param node_name: Machine name
2240 if node_name is None or node_name == self.master_node:
2241 # No need to use SSH
2242 result = utils.RunCmd(cmd)
2244 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2247 errmsg = ["Failed to run command %s" % result.cmd]
2249 errmsg.append("on node %s" % node_name)
2250 errmsg.append(": exitcode %s and error %s" %
2251 (result.exit_code, result.output))
2252 raise errors.OpExecError(" ".join(errmsg))
2254 def Call(self, fn, *args):
2255 """Call function while all daemons are stopped.
2258 @param fn: Function to be called
2261 # Pause watcher by acquiring an exclusive lock on watcher state file
2262 self.feedback_fn("Blocking watcher")
2263 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2265 # TODO: Currently, this just blocks. There's no timeout.
2266 # TODO: Should it be a shared lock?
2267 watcher_block.Exclusive(blocking=True)
2269 # Stop master daemons, so that no new jobs can come in and all running
2271 self.feedback_fn("Stopping master daemons")
2272 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2274 # Stop daemons on all nodes
2275 for node_name in self.online_nodes:
2276 self.feedback_fn("Stopping daemons on %s" % node_name)
2277 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2279 # All daemons are shut down now
2281 return fn(self, *args)
2282 except Exception, err:
2283 _, errmsg = FormatError(err)
2284 logging.exception("Caught exception")
2285 self.feedback_fn(errmsg)
2288 # Start cluster again, master node last
2289 for node_name in self.nonmaster_nodes + [self.master_node]:
2290 self.feedback_fn("Starting daemons on %s" % node_name)
2291 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2294 watcher_block.Close()
2297 def RunWhileClusterStopped(feedback_fn, fn, *args):
2298 """Calls a function while all cluster daemons are stopped.
2300 @type feedback_fn: callable
2301 @param feedback_fn: Feedback function
2303 @param fn: Function to be called when daemons are stopped
2306 feedback_fn("Gathering cluster information")
2308 # This ensures we're running on the master daemon
2311 (cluster_name, master_node) = \
2312 cl.QueryConfigValues(["cluster_name", "master_node"])
2314 online_nodes = GetOnlineNodes([], cl=cl)
2316 # Don't keep a reference to the client. The master daemon will go away.
2319 assert master_node in online_nodes
2321 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2322 online_nodes).Call(fn, *args)
2325 def GenerateTable(headers, fields, separator, data,
2326 numfields=None, unitfields=None,
2328 """Prints a table with headers and different fields.
2331 @param headers: dictionary mapping field names to headers for
2334 @param fields: the field names corresponding to each row in
2336 @param separator: the separator to be used; if this is None,
2337 the default 'smart' algorithm is used which computes optimal
2338 field width, otherwise just the separator is used between
2341 @param data: a list of lists, each sublist being one row to be output
2342 @type numfields: list
2343 @param numfields: a list with the fields that hold numeric
2344 values and thus should be right-aligned
2345 @type unitfields: list
2346 @param unitfields: a list with the fields that hold numeric
2347 values that should be formatted with the units field
2348 @type units: string or None
2349 @param units: the units we should use for formatting, or None for
2350 automatic choice (human-readable for non-separator usage, otherwise
2351 megabytes); this is a one-letter string
2360 if numfields is None:
2362 if unitfields is None:
2365 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2366 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2369 for field in fields:
2370 if headers and field not in headers:
2371 # TODO: handle better unknown fields (either revert to old
2372 # style of raising exception, or deal more intelligently with
2374 headers[field] = field
2375 if separator is not None:
2376 format_fields.append("%s")
2377 elif numfields.Matches(field):
2378 format_fields.append("%*s")
2380 format_fields.append("%-*s")
2382 if separator is None:
2383 mlens = [0 for name in fields]
2384 format_str = " ".join(format_fields)
2386 format_str = separator.replace("%", "%%").join(format_fields)
2391 for idx, val in enumerate(row):
2392 if unitfields.Matches(fields[idx]):
2395 except (TypeError, ValueError):
2398 val = row[idx] = utils.FormatUnit(val, units)
2399 val = row[idx] = str(val)
2400 if separator is None:
2401 mlens[idx] = max(mlens[idx], len(val))
2406 for idx, name in enumerate(fields):
2408 if separator is None:
2409 mlens[idx] = max(mlens[idx], len(hdr))
2410 args.append(mlens[idx])
2412 result.append(format_str % tuple(args))
2414 if separator is None:
2415 assert len(mlens) == len(fields)
2417 if fields and not numfields.Matches(fields[-1]):
2423 line = ["-" for _ in fields]
2424 for idx in range(len(fields)):
2425 if separator is None:
2426 args.append(mlens[idx])
2427 args.append(line[idx])
2428 result.append(format_str % tuple(args))
2433 def _FormatBool(value):
2434 """Formats a boolean value as a string.
2442 #: Default formatting for query results; (callback, align right)
2443 _DEFAULT_FORMAT_QUERY = {
2444 constants.QFT_TEXT: (str, False),
2445 constants.QFT_BOOL: (_FormatBool, False),
2446 constants.QFT_NUMBER: (str, True),
2447 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2448 constants.QFT_OTHER: (str, False),
2449 constants.QFT_UNKNOWN: (str, False),
2453 def _GetColumnFormatter(fdef, override, unit):
2454 """Returns formatting function for a field.
2456 @type fdef: L{objects.QueryFieldDefinition}
2457 @type override: dict
2458 @param override: Dictionary for overriding field formatting functions,
2459 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2461 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2462 @rtype: tuple; (callable, bool)
2463 @return: Returns the function to format a value (takes one parameter) and a
2464 boolean for aligning the value on the right-hand side
2467 fmt = override.get(fdef.name, None)
2471 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2473 if fdef.kind == constants.QFT_UNIT:
2474 # Can't keep this information in the static dictionary
2475 return (lambda value: utils.FormatUnit(value, unit), True)
2477 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2481 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2484 class _QueryColumnFormatter:
2485 """Callable class for formatting fields of a query.
2488 def __init__(self, fn, status_fn, verbose):
2489 """Initializes this class.
2492 @param fn: Formatting function
2493 @type status_fn: callable
2494 @param status_fn: Function to report fields' status
2495 @type verbose: boolean
2496 @param verbose: whether to use verbose field descriptions or not
2500 self._status_fn = status_fn
2501 self._verbose = verbose
2503 def __call__(self, data):
2504 """Returns a field's string representation.
2507 (status, value) = data
2510 self._status_fn(status)
2512 if status == constants.RS_NORMAL:
2513 return self._fn(value)
2515 assert value is None, \
2516 "Found value %r for abnormal status %s" % (value, status)
2518 return FormatResultError(status, self._verbose)
2521 def FormatResultError(status, verbose):
2522 """Formats result status other than L{constants.RS_NORMAL}.
2524 @param status: The result status
2525 @type verbose: boolean
2526 @param verbose: Whether to return the verbose text
2527 @return: Text of result status
2530 assert status != constants.RS_NORMAL, \
2531 "FormatResultError called with status equal to constants.RS_NORMAL"
2533 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2535 raise NotImplementedError("Unknown status %s" % status)
2542 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2543 header=False, verbose=False):
2544 """Formats data in L{objects.QueryResponse}.
2546 @type result: L{objects.QueryResponse}
2547 @param result: result of query operation
2549 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2550 see L{utils.text.FormatUnit}
2551 @type format_override: dict
2552 @param format_override: Dictionary for overriding field formatting functions,
2553 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2554 @type separator: string or None
2555 @param separator: String used to separate fields
2557 @param header: Whether to output header row
2558 @type verbose: boolean
2559 @param verbose: whether to use verbose field descriptions or not
2568 if format_override is None:
2569 format_override = {}
2571 stats = dict.fromkeys(constants.RS_ALL, 0)
2573 def _RecordStatus(status):
2578 for fdef in result.fields:
2579 assert fdef.title and fdef.name
2580 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2581 columns.append(TableColumn(fdef.title,
2582 _QueryColumnFormatter(fn, _RecordStatus,
2586 table = FormatTable(result.data, columns, header, separator)
2588 # Collect statistics
2589 assert len(stats) == len(constants.RS_ALL)
2590 assert compat.all(count >= 0 for count in stats.values())
2592 # Determine overall status. If there was no data, unknown fields must be
2593 # detected via the field definitions.
2594 if (stats[constants.RS_UNKNOWN] or
2595 (not result.data and _GetUnknownFields(result.fields))):
2597 elif compat.any(count > 0 for key, count in stats.items()
2598 if key != constants.RS_NORMAL):
2599 status = QR_INCOMPLETE
2603 return (status, table)
2606 def _GetUnknownFields(fdefs):
2607 """Returns list of unknown fields included in C{fdefs}.
2609 @type fdefs: list of L{objects.QueryFieldDefinition}
2612 return [fdef for fdef in fdefs
2613 if fdef.kind == constants.QFT_UNKNOWN]
2616 def _WarnUnknownFields(fdefs):
2617 """Prints a warning to stderr if a query included unknown fields.
2619 @type fdefs: list of L{objects.QueryFieldDefinition}
2622 unknown = _GetUnknownFields(fdefs)
2624 ToStderr("Warning: Queried for unknown fields %s",
2625 utils.CommaJoin(fdef.name for fdef in unknown))
2631 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2632 format_override=None, verbose=False, force_filter=False):
2633 """Generic implementation for listing all items of a resource.
2635 @param resource: One of L{constants.QR_VIA_LUXI}
2636 @type fields: list of strings
2637 @param fields: List of fields to query for
2638 @type names: list of strings
2639 @param names: Names of items to query for
2640 @type unit: string or None
2641 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2642 None for automatic choice (human-readable for non-separator usage,
2643 otherwise megabytes); this is a one-letter string
2644 @type separator: string or None
2645 @param separator: String used to separate fields
2647 @param header: Whether to show header row
2648 @type force_filter: bool
2649 @param force_filter: Whether to always treat names as filter
2650 @type format_override: dict
2651 @param format_override: Dictionary for overriding field formatting functions,
2652 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2653 @type verbose: boolean
2654 @param verbose: whether to use verbose field descriptions or not
2664 (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2666 (filter_text, ) = names
2668 raise errors.OpPrereqError("Exactly one argument must be given as a"
2671 logging.debug("Parsing '%s' as filter", filter_text)
2672 filter_ = qlang.ParseFilter(filter_text)
2674 filter_ = qlang.MakeSimpleFilter("name", names)
2676 response = cl.Query(resource, fields, filter_)
2678 found_unknown = _WarnUnknownFields(response.fields)
2680 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2682 format_override=format_override,
2688 assert ((found_unknown and status == QR_UNKNOWN) or
2689 (not found_unknown and status != QR_UNKNOWN))
2691 if status == QR_UNKNOWN:
2692 return constants.EXIT_UNKNOWN_FIELD
2694 # TODO: Should the list command fail if not all data could be collected?
2695 return constants.EXIT_SUCCESS
2698 def GenericListFields(resource, fields, separator, header, cl=None):
2699 """Generic implementation for listing fields for a resource.
2701 @param resource: One of L{constants.QR_VIA_LUXI}
2702 @type fields: list of strings
2703 @param fields: List of fields to query for
2704 @type separator: string or None
2705 @param separator: String used to separate fields
2707 @param header: Whether to show header row
2716 response = cl.QueryFields(resource, fields)
2718 found_unknown = _WarnUnknownFields(response.fields)
2721 TableColumn("Name", str, False),
2722 TableColumn("Title", str, False),
2723 TableColumn("Description", str, False),
2726 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2728 for line in FormatTable(rows, columns, header, separator):
2732 return constants.EXIT_UNKNOWN_FIELD
2734 return constants.EXIT_SUCCESS
2738 """Describes a column for L{FormatTable}.
2741 def __init__(self, title, fn, align_right):
2742 """Initializes this class.
2745 @param title: Column title
2747 @param fn: Formatting function
2748 @type align_right: bool
2749 @param align_right: Whether to align values on the right-hand side
2754 self.align_right = align_right
2757 def _GetColFormatString(width, align_right):
2758 """Returns the format string for a field.
2766 return "%%%s%ss" % (sign, width)
2769 def FormatTable(rows, columns, header, separator):
2770 """Formats data as a table.
2772 @type rows: list of lists
2773 @param rows: Row data, one list per row
2774 @type columns: list of L{TableColumn}
2775 @param columns: Column descriptions
2777 @param header: Whether to show header row
2778 @type separator: string or None
2779 @param separator: String used to separate columns
2783 data = [[col.title for col in columns]]
2784 colwidth = [len(col.title) for col in columns]
2787 colwidth = [0 for _ in columns]
2791 assert len(row) == len(columns)
2793 formatted = [col.format(value) for value, col in zip(row, columns)]
2795 if separator is None:
2796 # Update column widths
2797 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2798 # Modifying a list's items while iterating is fine
2799 colwidth[idx] = max(oldwidth, len(value))
2801 data.append(formatted)
2803 if separator is not None:
2804 # Return early if a separator is used
2805 return [separator.join(row) for row in data]
2807 if columns and not columns[-1].align_right:
2808 # Avoid unnecessary spaces at end of line
2811 # Build format string
2812 fmt = " ".join([_GetColFormatString(width, col.align_right)
2813 for col, width in zip(columns, colwidth)])
2815 return [fmt % tuple(row) for row in data]
2818 def FormatTimestamp(ts):
2819 """Formats a given timestamp.
2822 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2825 @return: a string with the formatted timestamp
2828 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2831 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2834 def ParseTimespec(value):
2835 """Parse a time specification.
2837 The following suffixed will be recognized:
2845 Without any suffix, the value will be taken to be in seconds.
2850 raise errors.OpPrereqError("Empty time specification passed")
2858 if value[-1] not in suffix_map:
2861 except (TypeError, ValueError):
2862 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2864 multiplier = suffix_map[value[-1]]
2866 if not value: # no data left after stripping the suffix
2867 raise errors.OpPrereqError("Invalid time specification (only"
2870 value = int(value) * multiplier
2871 except (TypeError, ValueError):
2872 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2876 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2877 filter_master=False, nodegroup=None):
2878 """Returns the names of online nodes.
2880 This function will also log a warning on stderr with the names of
2883 @param nodes: if not empty, use only this subset of nodes (minus the
2885 @param cl: if not None, luxi client to use
2886 @type nowarn: boolean
2887 @param nowarn: by default, this function will output a note with the
2888 offline nodes that are skipped; if this parameter is True the
2889 note is not displayed
2890 @type secondary_ips: boolean
2891 @param secondary_ips: if True, return the secondary IPs instead of the
2892 names, useful for doing network traffic over the replication interface
2894 @type filter_master: boolean
2895 @param filter_master: if True, do not return the master node in the list
2896 (useful in coordination with secondary_ips where we cannot check our
2897 node name against the list)
2898 @type nodegroup: string
2899 @param nodegroup: If set, only return nodes in this node group
2908 filter_.append(qlang.MakeSimpleFilter("name", nodes))
2910 if nodegroup is not None:
2911 filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2912 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2915 filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2918 if len(filter_) > 1:
2919 final_filter = [qlang.OP_AND] + filter_
2921 assert len(filter_) == 1
2922 final_filter = filter_[0]
2926 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2928 def _IsOffline(row):
2929 (_, (_, offline), _) = row
2933 ((_, name), _, _) = row
2937 (_, _, (_, sip)) = row
2940 (offline, online) = compat.partition(result.data, _IsOffline)
2942 if offline and not nowarn:
2943 ToStderr("Note: skipping offline node(s): %s" %
2944 utils.CommaJoin(map(_GetName, offline)))
2951 return map(fn, online)
2954 def _ToStream(stream, txt, *args):
2955 """Write a message to a stream, bypassing the logging system
2957 @type stream: file object
2958 @param stream: the file to which we should write
2960 @param txt: the message
2966 stream.write(txt % args)
2971 except IOError, err:
2972 if err.errno == errno.EPIPE:
2973 # our terminal went away, we'll exit
2974 sys.exit(constants.EXIT_FAILURE)
2979 def ToStdout(txt, *args):
2980 """Write a message to stdout only, bypassing the logging system
2982 This is just a wrapper over _ToStream.
2985 @param txt: the message
2988 _ToStream(sys.stdout, txt, *args)
2991 def ToStderr(txt, *args):
2992 """Write a message to stderr only, bypassing the logging system
2994 This is just a wrapper over _ToStream.
2997 @param txt: the message
3000 _ToStream(sys.stderr, txt, *args)
3003 class JobExecutor(object):
3004 """Class which manages the submission and execution of multiple jobs.
3006 Note that instances of this class should not be reused between
3010 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3015 self.verbose = verbose
3018 self.feedback_fn = feedback_fn
3020 self._info_fn = feedback_fn
3022 self._info_fn = ToStdout
3023 self._counter = itertools.count()
3026 def _IfName(name, fmt):
3027 """Helper function for formatting name.
3035 def QueueJob(self, name, *ops):
3036 """Record a job for later submit.
3039 @param name: a description of the job, will be used in WaitJobSet
3042 SetGenericOpcodeOpts(ops, self.opts)
3043 self.queue.append((self._counter.next(), name, ops))
3045 def AddJobId(self, name, status, job_id):
3046 """Adds a job ID to the internal queue.
3049 self.jobs.append((self._counter.next(), status, job_id, name))
3051 def SubmitPending(self, each=False):
3052 """Submit all pending jobs.
3057 for (_, _, ops) in self.queue:
3058 # SubmitJob will remove the success status, but raise an exception if
3059 # the submission fails, so we'll notice that anyway.
3060 results.append([True, self.cl.SubmitJob(ops)])
3062 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3063 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3064 self.jobs.append((idx, status, data, name))
3066 def _ChooseJob(self):
3067 """Choose a non-waiting/queued job to poll next.
3070 assert self.jobs, "_ChooseJob called with empty job list"
3072 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3075 for job_data, status in zip(self.jobs, result):
3076 if (isinstance(status, list) and status and
3077 status[0] in (constants.JOB_STATUS_QUEUED,
3078 constants.JOB_STATUS_WAITING,
3079 constants.JOB_STATUS_CANCELING)):
3080 # job is still present and waiting
3082 # good candidate found (either running job or lost job)
3083 self.jobs.remove(job_data)
3087 return self.jobs.pop(0)
3089 def GetResults(self):
3090 """Wait for and return the results of all jobs.
3093 @return: list of tuples (success, job results), in the same order
3094 as the submitted jobs; if a job has failed, instead of the result
3095 there will be the error message
3099 self.SubmitPending()
3102 ok_jobs = [row[2] for row in self.jobs if row[1]]
3104 self._info_fn("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3106 # first, remove any non-submitted jobs
3107 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3108 for idx, _, jid, name in failures:
3109 self._info_fn("Failed to submit job%s: %s",
3110 self._IfName(name, " for %s"), jid)
3111 results.append((idx, False, jid))
3114 (idx, _, jid, name) = self._ChooseJob()
3115 self._info_fn("Waiting for job %s%s ...",
3116 jid, self._IfName(name, " for %s"))
3118 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3120 except errors.JobLost, err:
3121 _, job_result = FormatError(err)
3122 self._info_fn("Job %s%s has been archived, cannot check its result",
3123 jid, self._IfName(name, " for %s"))
3125 except (errors.GenericError, luxi.ProtocolError), err:
3126 _, job_result = FormatError(err)
3128 # the error message will always be shown, verbose or not
3129 self._info_fn("Job %s%s has failed: %s",
3130 jid, self._IfName(name, " for %s"), job_result)
3132 results.append((idx, success, job_result))
3134 # sort based on the index, then drop it
3136 results = [i[1:] for i in results]
3140 def WaitOrShow(self, wait):
3141 """Wait for job results or only print the job IDs.
3144 @param wait: whether to wait or not
3148 return self.GetResults()
3151 self.SubmitPending()
3152 for _, status, result, name in self.jobs:
3154 self._info_fn("%s: %s", result, name)
3156 self._info_fn("Failure for %s: %s", name, result)
3157 return [row[1:3] for row in self.jobs]
3160 def FormatParameterDict(buf, param_dict, actual, level=1):
3161 """Formats a parameter dictionary.
3163 @type buf: L{StringIO}
3164 @param buf: the buffer into which to write
3165 @type param_dict: dict
3166 @param param_dict: the own parameters
3168 @param actual: the current parameter set (including defaults)
3169 @param level: Level of indent
3172 indent = " " * level
3173 for key in sorted(actual):
3174 val = param_dict.get(key, "default (%s)" % actual[key])
3175 buf.write("%s- %s: %s\n" % (indent, key, val))
3178 def ConfirmOperation(names, list_type, text, extra=""):
3179 """Ask the user to confirm an operation on a list of list_type.
3181 This function is used to request confirmation for doing an operation
3182 on a given list of list_type.
3185 @param names: the list of names that we display when
3186 we ask for confirmation
3187 @type list_type: str
3188 @param list_type: Human readable name for elements in the list (e.g. nodes)
3190 @param text: the operation that the user should confirm
3192 @return: True or False depending on user's confirmation.
3196 msg = ("The %s will operate on %d %s.\n%s"
3197 "Do you want to continue?" % (text, count, list_type, extra))
3198 affected = (("\nAffected %s:\n" % list_type) +
3199 "\n".join([" %s" % name for name in names]))
3201 choices = [("y", True, "Yes, execute the %s" % text),
3202 ("n", False, "No, abort the %s" % text)]
3205 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3208 question = msg + affected
3210 choice = AskUser(question, choices)
3213 choice = AskUser(msg + affected, choices)