4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
32 from cStringIO import StringIO
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import opcodes
38 from ganeti import luxi
39 from ganeti import ssconf
40 from ganeti import rpc
41 from ganeti import ssh
42 from ganeti import compat
43 from ganeti import netutils
44 from ganeti import qlang
46 from optparse import (OptionParser, TitledHelpFormatter,
47 Option, OptionValueError)
51 # Command line options
64 "CLUSTER_DOMAIN_SECRET_OPT",
81 "FILESTORE_DRIVER_OPT",
87 "GLOBAL_SHARED_FILEDIR_OPT",
92 "DEFAULT_IALLOCATOR_OPT",
93 "IDENTIFY_DEFAULTS_OPT",
95 "IGNORE_FAILURES_OPT",
97 "IGNORE_REMOVE_FAILURES_OPT",
98 "IGNORE_SECONDARIES_OPT",
102 "MAINTAIN_NODE_HEALTH_OPT",
105 "MIGRATION_MODE_OPT",
107 "NEW_CLUSTER_CERT_OPT",
108 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109 "NEW_CONFD_HMAC_KEY_OPT",
112 "NEW_SPICE_CERT_OPT",
114 "NODE_FORCE_JOIN_OPT",
116 "NODE_PLACEMENT_OPT",
120 "NODRBD_STORAGE_OPT",
126 "NOMODIFY_ETCHOSTS_OPT",
127 "NOMODIFY_SSH_SETUP_OPT",
133 "NOSSH_KEYCHECK_OPT",
145 "PREALLOC_WIPE_DISKS_OPT",
146 "PRIMARY_IP_VERSION_OPT",
152 "REMOVE_INSTANCE_OPT",
157 "SECONDARY_ONLY_OPT",
161 "SHUTDOWN_TIMEOUT_OPT",
168 "STARTUP_PAUSED_OPT",
181 # Generic functions for CLI programs
184 "GenericInstanceCreate",
190 "JobSubmittedException",
192 "RunWhileClusterStopped",
196 # Formatting functions
197 "ToStderr", "ToStdout",
200 "FormatParameterDict",
209 # command line options support infrastructure
210 "ARGS_MANY_INSTANCES",
229 "OPT_COMPL_INST_ADD_NODES",
230 "OPT_COMPL_MANY_NODES",
231 "OPT_COMPL_ONE_IALLOCATOR",
232 "OPT_COMPL_ONE_INSTANCE",
233 "OPT_COMPL_ONE_NODE",
234 "OPT_COMPL_ONE_NODEGROUP",
240 "COMMON_CREATE_OPTS",
246 #: Priorities (sorted)
248 ("low", constants.OP_PRIO_LOW),
249 ("normal", constants.OP_PRIO_NORMAL),
250 ("high", constants.OP_PRIO_HIGH),
253 #: Priority dictionary for easier lookup
254 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
255 # we migrate to Python 2.6
256 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
258 # Query result status for clients
261 QR_INCOMPLETE) = range(3)
265 def __init__(self, min=0, max=None): # pylint: disable=W0622
270 return ("<%s min=%s max=%s>" %
271 (self.__class__.__name__, self.min, self.max))
274 class ArgSuggest(_Argument):
275 """Suggesting argument.
277 Value can be any of the ones passed to the constructor.
280 # pylint: disable=W0622
281 def __init__(self, min=0, max=None, choices=None):
282 _Argument.__init__(self, min=min, max=max)
283 self.choices = choices
286 return ("<%s min=%s max=%s choices=%r>" %
287 (self.__class__.__name__, self.min, self.max, self.choices))
290 class ArgChoice(ArgSuggest):
293 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
294 but value must be one of the choices.
299 class ArgUnknown(_Argument):
300 """Unknown argument to program (e.g. determined at runtime).
305 class ArgInstance(_Argument):
306 """Instances argument.
311 class ArgNode(_Argument):
317 class ArgGroup(_Argument):
318 """Node group argument.
323 class ArgJobId(_Argument):
329 class ArgFile(_Argument):
330 """File path argument.
335 class ArgCommand(_Argument):
341 class ArgHost(_Argument):
347 class ArgOs(_Argument):
354 ARGS_MANY_INSTANCES = [ArgInstance()]
355 ARGS_MANY_NODES = [ArgNode()]
356 ARGS_MANY_GROUPS = [ArgGroup()]
357 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
358 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
360 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
361 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
364 def _ExtractTagsObject(opts, args):
365 """Extract the tag type object.
367 Note that this function will modify its args parameter.
370 if not hasattr(opts, "tag_type"):
371 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
373 if kind == constants.TAG_CLUSTER:
375 elif kind in (constants.TAG_NODEGROUP,
377 constants.TAG_INSTANCE):
379 raise errors.OpPrereqError("no arguments passed to the command")
383 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
387 def _ExtendTags(opts, args):
388 """Extend the args if a source file has been given.
390 This function will extend the tags with the contents of the file
391 passed in the 'tags_source' attribute of the opts parameter. A file
392 named '-' will be replaced by stdin.
395 fname = opts.tags_source
401 new_fh = open(fname, "r")
404 # we don't use the nice 'new_data = [line.strip() for line in fh]'
405 # because of python bug 1633941
407 line = new_fh.readline()
410 new_data.append(line.strip())
413 args.extend(new_data)
416 def ListTags(opts, args):
417 """List the tags on a given object.
419 This is a generic implementation that knows how to deal with all
420 three cases of tag objects (cluster, node, instance). The opts
421 argument is expected to contain a tag_type field denoting what
422 object type we work on.
425 kind, name = _ExtractTagsObject(opts, args)
427 result = cl.QueryTags(kind, name)
428 result = list(result)
434 def AddTags(opts, args):
435 """Add tags on a given object.
437 This is a generic implementation that knows how to deal with all
438 three cases of tag objects (cluster, node, instance). The opts
439 argument is expected to contain a tag_type field denoting what
440 object type we work on.
443 kind, name = _ExtractTagsObject(opts, args)
444 _ExtendTags(opts, args)
446 raise errors.OpPrereqError("No tags to be added")
447 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
448 SubmitOpCode(op, opts=opts)
451 def RemoveTags(opts, args):
452 """Remove tags from a given object.
454 This is a generic implementation that knows how to deal with all
455 three cases of tag objects (cluster, node, instance). The opts
456 argument is expected to contain a tag_type field denoting what
457 object type we work on.
460 kind, name = _ExtractTagsObject(opts, args)
461 _ExtendTags(opts, args)
463 raise errors.OpPrereqError("No tags to be removed")
464 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
465 SubmitOpCode(op, opts=opts)
468 def check_unit(option, opt, value): # pylint: disable=W0613
469 """OptParsers custom converter for units.
473 return utils.ParseUnit(value)
474 except errors.UnitParseError, err:
475 raise OptionValueError("option %s: %s" % (opt, err))
478 def _SplitKeyVal(opt, data):
479 """Convert a KeyVal string into a dict.
481 This function will convert a key=val[,...] string into a dict. Empty
482 values will be converted specially: keys which have the prefix 'no_'
483 will have the value=False and the prefix stripped, the others will
487 @param opt: a string holding the option name for which we process the
488 data, used in building error messages
490 @param data: a string of the format key=val,key=val,...
492 @return: {key=val, key=val}
493 @raises errors.ParameterError: if there are duplicate keys
498 for elem in utils.UnescapeAndSplit(data, sep=","):
500 key, val = elem.split("=", 1)
502 if elem.startswith(NO_PREFIX):
503 key, val = elem[len(NO_PREFIX):], False
504 elif elem.startswith(UN_PREFIX):
505 key, val = elem[len(UN_PREFIX):], None
507 key, val = elem, True
509 raise errors.ParameterError("Duplicate key '%s' in option %s" %
515 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
516 """Custom parser for ident:key=val,key=val options.
518 This will store the parsed values as a tuple (ident, {key: val}). As such,
519 multiple uses of this option via action=append is possible.
523 ident, rest = value, ""
525 ident, rest = value.split(":", 1)
527 if ident.startswith(NO_PREFIX):
529 msg = "Cannot pass options when removing parameter groups: %s" % value
530 raise errors.ParameterError(msg)
531 retval = (ident[len(NO_PREFIX):], False)
532 elif ident.startswith(UN_PREFIX):
534 msg = "Cannot pass options when removing parameter groups: %s" % value
535 raise errors.ParameterError(msg)
536 retval = (ident[len(UN_PREFIX):], None)
538 kv_dict = _SplitKeyVal(opt, rest)
539 retval = (ident, kv_dict)
543 def check_key_val(option, opt, value): # pylint: disable=W0613
544 """Custom parser class for key=val,key=val options.
546 This will store the parsed values as a dict {key: val}.
549 return _SplitKeyVal(opt, value)
552 def check_bool(option, opt, value): # pylint: disable=W0613
553 """Custom parser for yes/no options.
555 This will store the parsed value as either True or False.
558 value = value.lower()
559 if value == constants.VALUE_FALSE or value == "no":
561 elif value == constants.VALUE_TRUE or value == "yes":
564 raise errors.ParameterError("Invalid boolean value '%s'" % value)
567 # completion_suggestion is normally a list. Using numeric values not evaluating
568 # to False for dynamic completion.
569 (OPT_COMPL_MANY_NODES,
571 OPT_COMPL_ONE_INSTANCE,
573 OPT_COMPL_ONE_IALLOCATOR,
574 OPT_COMPL_INST_ADD_NODES,
575 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
577 OPT_COMPL_ALL = frozenset([
578 OPT_COMPL_MANY_NODES,
580 OPT_COMPL_ONE_INSTANCE,
582 OPT_COMPL_ONE_IALLOCATOR,
583 OPT_COMPL_INST_ADD_NODES,
584 OPT_COMPL_ONE_NODEGROUP,
588 class CliOption(Option):
589 """Custom option class for optparse.
592 ATTRS = Option.ATTRS + [
593 "completion_suggest",
595 TYPES = Option.TYPES + (
601 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
602 TYPE_CHECKER["identkeyval"] = check_ident_key_val
603 TYPE_CHECKER["keyval"] = check_key_val
604 TYPE_CHECKER["unit"] = check_unit
605 TYPE_CHECKER["bool"] = check_bool
608 # optparse.py sets make_option, so we do it for our own option class, too
609 cli_option = CliOption
614 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
615 help="Increase debugging level")
617 NOHDR_OPT = cli_option("--no-headers", default=False,
618 action="store_true", dest="no_headers",
619 help="Don't display column headers")
621 SEP_OPT = cli_option("--separator", default=None,
622 action="store", dest="separator",
623 help=("Separator between output fields"
624 " (defaults to one space)"))
626 USEUNITS_OPT = cli_option("--units", default=None,
627 dest="units", choices=("h", "m", "g", "t"),
628 help="Specify units for output (one of h/m/g/t)")
630 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
631 type="string", metavar="FIELDS",
632 help="Comma separated list of output fields")
634 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
635 default=False, help="Force the operation")
637 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
638 default=False, help="Do not require confirmation")
640 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
641 action="store_true", default=False,
642 help=("Ignore offline nodes and do as much"
645 TAG_ADD_OPT = cli_option("--tags", dest="tags",
646 default=None, help="Comma-separated list of instance"
649 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
650 default=None, help="File with tag names")
652 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
653 default=False, action="store_true",
654 help=("Submit the job and return the job ID, but"
655 " don't wait for the job to finish"))
657 SYNC_OPT = cli_option("--sync", dest="do_locking",
658 default=False, action="store_true",
659 help=("Grab locks while doing the queries"
660 " in order to ensure more consistent results"))
662 DRY_RUN_OPT = cli_option("--dry-run", default=False,
664 help=("Do not execute the operation, just run the"
665 " check steps and verify it it could be"
668 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
670 help="Increase the verbosity of the operation")
672 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
673 action="store_true", dest="simulate_errors",
674 help="Debugging option that makes the operation"
675 " treat most runtime checks as failed")
677 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
678 default=True, action="store_false",
679 help="Don't wait for sync (DANGEROUS!)")
681 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
682 help=("Custom disk setup (%s)" %
683 utils.CommaJoin(constants.DISK_TEMPLATES)),
684 default=None, metavar="TEMPL",
685 choices=list(constants.DISK_TEMPLATES))
687 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
688 help="Do not create any network cards for"
691 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
692 help="Relative path under default cluster-wide"
693 " file storage dir to store file-based disks",
694 default=None, metavar="<DIR>")
696 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
697 help="Driver to use for image files",
698 default="loop", metavar="<DRIVER>",
699 choices=list(constants.FILE_DRIVER))
701 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
702 help="Select nodes for the instance automatically"
703 " using the <NAME> iallocator plugin",
704 default=None, type="string",
705 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
707 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
709 help="Set the default instance allocator plugin",
710 default=None, type="string",
711 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
713 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
715 completion_suggest=OPT_COMPL_ONE_OS)
717 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
718 type="keyval", default={},
719 help="OS parameters")
721 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
722 action="store_true", default=False,
723 help="Force an unknown variant")
725 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
726 action="store_true", default=False,
727 help="Do not install the OS (will"
730 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
731 type="keyval", default={},
732 help="Backend parameters")
734 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
735 default={}, dest="hvparams",
736 help="Hypervisor parameters")
738 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
739 help="Hypervisor and hypervisor options, in the"
740 " format hypervisor:option=value,option=value,...",
741 default=None, type="identkeyval")
743 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
744 help="Hypervisor and hypervisor options, in the"
745 " format hypervisor:option=value,option=value,...",
746 default=[], action="append", type="identkeyval")
748 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
749 action="store_false",
750 help="Don't check that the instance's IP"
753 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
754 default=True, action="store_false",
755 help="Don't check that the instance's name"
758 NET_OPT = cli_option("--net",
759 help="NIC parameters", default=[],
760 dest="nics", action="append", type="identkeyval")
762 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
763 dest="disks", action="append", type="identkeyval")
765 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
766 help="Comma-separated list of disks"
767 " indices to act on (e.g. 0,2) (optional,"
768 " defaults to all disks)")
770 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
771 help="Enforces a single-disk configuration using the"
772 " given disk size, in MiB unless a suffix is used",
773 default=None, type="unit", metavar="<size>")
775 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
776 dest="ignore_consistency",
777 action="store_true", default=False,
778 help="Ignore the consistency of the disks on"
781 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
782 dest="allow_failover",
783 action="store_true", default=False,
784 help="If migration is not possible fallback to"
787 NONLIVE_OPT = cli_option("--non-live", dest="live",
788 default=True, action="store_false",
789 help="Do a non-live migration (this usually means"
790 " freeze the instance, save the state, transfer and"
791 " only then resume running on the secondary node)")
793 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
795 choices=list(constants.HT_MIGRATION_MODES),
796 help="Override default migration mode (choose"
797 " either live or non-live")
799 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
800 help="Target node and optional secondary node",
801 metavar="<pnode>[:<snode>]",
802 completion_suggest=OPT_COMPL_INST_ADD_NODES)
804 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
805 action="append", metavar="<node>",
806 help="Use only this node (can be used multiple"
807 " times, if not given defaults to all nodes)",
808 completion_suggest=OPT_COMPL_ONE_NODE)
810 NODEGROUP_OPT_NAME = "--node-group"
811 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
813 help="Node group (name or uuid)",
814 metavar="<nodegroup>",
815 default=None, type="string",
816 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
818 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
820 completion_suggest=OPT_COMPL_ONE_NODE)
822 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
823 action="store_false",
824 help="Don't start the instance after creation")
826 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
827 action="store_true", default=False,
828 help="Show command instead of executing it")
830 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
831 default=False, action="store_true",
832 help="Instead of performing the migration, try to"
833 " recover from a failed cleanup. This is safe"
834 " to run even if the instance is healthy, but it"
835 " will create extra replication traffic and "
836 " disrupt briefly the replication (like during the"
839 STATIC_OPT = cli_option("-s", "--static", dest="static",
840 action="store_true", default=False,
841 help="Only show configuration data, not runtime data")
843 ALL_OPT = cli_option("--all", dest="show_all",
844 default=False, action="store_true",
845 help="Show info on all instances on the cluster."
846 " This can take a long time to run, use wisely")
848 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
849 action="store_true", default=False,
850 help="Interactive OS reinstall, lists available"
851 " OS templates for selection")
853 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
854 action="store_true", default=False,
855 help="Remove the instance from the cluster"
856 " configuration even if there are failures"
857 " during the removal process")
859 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
860 dest="ignore_remove_failures",
861 action="store_true", default=False,
862 help="Remove the instance from the"
863 " cluster configuration even if there"
864 " are failures during the removal"
867 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
868 action="store_true", default=False,
869 help="Remove the instance from the cluster")
871 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
872 help="Specifies the new node for the instance",
873 metavar="NODE", default=None,
874 completion_suggest=OPT_COMPL_ONE_NODE)
876 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
877 help="Specifies the new secondary node",
878 metavar="NODE", default=None,
879 completion_suggest=OPT_COMPL_ONE_NODE)
881 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
882 default=False, action="store_true",
883 help="Replace the disk(s) on the primary"
884 " node (applies only to internally mirrored"
885 " disk templates, e.g. %s)" %
886 utils.CommaJoin(constants.DTS_INT_MIRROR))
888 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
889 default=False, action="store_true",
890 help="Replace the disk(s) on the secondary"
891 " node (applies only to internally mirrored"
892 " disk templates, e.g. %s)" %
893 utils.CommaJoin(constants.DTS_INT_MIRROR))
895 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
896 default=False, action="store_true",
897 help="Lock all nodes and auto-promote as needed"
900 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
901 default=False, action="store_true",
902 help="Automatically replace faulty disks"
903 " (applies only to internally mirrored"
904 " disk templates, e.g. %s)" %
905 utils.CommaJoin(constants.DTS_INT_MIRROR))
907 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
908 default=False, action="store_true",
909 help="Ignore current recorded size"
910 " (useful for forcing activation when"
911 " the recorded size is wrong)")
913 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
915 completion_suggest=OPT_COMPL_ONE_NODE)
917 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
920 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
921 help="Specify the secondary ip for the node",
922 metavar="ADDRESS", default=None)
924 READD_OPT = cli_option("--readd", dest="readd",
925 default=False, action="store_true",
926 help="Readd old node after replacing it")
928 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
929 default=True, action="store_false",
930 help="Disable SSH key fingerprint checking")
932 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
933 default=False, action="store_true",
934 help="Force the joining of a node")
936 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
937 type="bool", default=None, metavar=_YORNO,
938 help="Set the master_candidate flag on the node")
940 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
941 type="bool", default=None,
942 help=("Set the offline flag on the node"
943 " (cluster does not communicate with offline"
946 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
947 type="bool", default=None,
948 help=("Set the drained flag on the node"
949 " (excluded from allocation operations)"))
951 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
952 type="bool", default=None, metavar=_YORNO,
953 help="Set the master_capable flag on the node")
955 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
956 type="bool", default=None, metavar=_YORNO,
957 help="Set the vm_capable flag on the node")
959 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
960 type="bool", default=None, metavar=_YORNO,
961 help="Set the allocatable flag on a volume")
963 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
964 help="Disable support for lvm based instances"
966 action="store_false", default=True)
968 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
969 dest="enabled_hypervisors",
970 help="Comma-separated list of hypervisors",
971 type="string", default=None)
973 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
974 type="keyval", default={},
975 help="NIC parameters")
977 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
978 dest="candidate_pool_size", type="int",
979 help="Set the candidate pool size")
981 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
982 help=("Enables LVM and specifies the volume group"
983 " name (cluster-wide) for disk allocation"
984 " [%s]" % constants.DEFAULT_VG),
985 metavar="VG", default=None)
987 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
988 help="Destroy cluster", action="store_true")
990 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
991 help="Skip node agreement check (dangerous)",
992 action="store_true", default=False)
994 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
995 help="Specify the mac prefix for the instance IP"
996 " addresses, in the format XX:XX:XX",
1000 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1001 help="Specify the node interface (cluster-wide)"
1002 " on which the master IP address will be added"
1003 " (cluster init default: %s)" %
1004 constants.DEFAULT_BRIDGE,
1008 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1009 help="Specify the default directory (cluster-"
1010 "wide) for storing the file-based disks [%s]" %
1011 constants.DEFAULT_FILE_STORAGE_DIR,
1013 default=constants.DEFAULT_FILE_STORAGE_DIR)
1015 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1016 dest="shared_file_storage_dir",
1017 help="Specify the default directory (cluster-"
1018 "wide) for storing the shared file-based"
1020 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1021 metavar="SHAREDDIR",
1022 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1024 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1025 help="Don't modify /etc/hosts",
1026 action="store_false", default=True)
1028 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1029 help="Don't initialize SSH keys",
1030 action="store_false", default=True)
1032 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1033 help="Enable parseable error messages",
1034 action="store_true", default=False)
1036 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1037 help="Skip N+1 memory redundancy tests",
1038 action="store_true", default=False)
1040 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1041 help="Type of reboot: soft/hard/full",
1042 default=constants.INSTANCE_REBOOT_HARD,
1044 choices=list(constants.REBOOT_TYPES))
1046 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1047 dest="ignore_secondaries",
1048 default=False, action="store_true",
1049 help="Ignore errors from secondaries")
1051 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1052 action="store_false", default=True,
1053 help="Don't shutdown the instance (unsafe)")
1055 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1056 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1057 help="Maximum time to wait")
1059 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1060 dest="shutdown_timeout", type="int",
1061 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1062 help="Maximum time to wait for instance shutdown")
1064 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1066 help=("Number of seconds between repetions of the"
1069 EARLY_RELEASE_OPT = cli_option("--early-release",
1070 dest="early_release", default=False,
1071 action="store_true",
1072 help="Release the locks on the secondary"
1075 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1076 dest="new_cluster_cert",
1077 default=False, action="store_true",
1078 help="Generate a new cluster certificate")
1080 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1082 help="File containing new RAPI certificate")
1084 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1085 default=None, action="store_true",
1086 help=("Generate a new self-signed RAPI"
1089 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1091 help="File containing new SPICE certificate")
1093 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1095 help="File containing the certificate of the CA"
1096 " which signed the SPICE certificate")
1098 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1099 dest="new_spice_cert", default=None,
1100 action="store_true",
1101 help=("Generate a new self-signed SPICE"
1104 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1105 dest="new_confd_hmac_key",
1106 default=False, action="store_true",
1107 help=("Create a new HMAC key for %s" %
1110 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1111 dest="cluster_domain_secret",
1113 help=("Load new new cluster domain"
1114 " secret from file"))
1116 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1117 dest="new_cluster_domain_secret",
1118 default=False, action="store_true",
1119 help=("Create a new cluster domain"
1122 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1123 dest="use_replication_network",
1124 help="Whether to use the replication network"
1125 " for talking to the nodes",
1126 action="store_true", default=False)
1128 MAINTAIN_NODE_HEALTH_OPT = \
1129 cli_option("--maintain-node-health", dest="maintain_node_health",
1130 metavar=_YORNO, default=None, type="bool",
1131 help="Configure the cluster to automatically maintain node"
1132 " health, by shutting down unknown instances, shutting down"
1133 " unknown DRBD devices, etc.")
1135 IDENTIFY_DEFAULTS_OPT = \
1136 cli_option("--identify-defaults", dest="identify_defaults",
1137 default=False, action="store_true",
1138 help="Identify which saved instance parameters are equal to"
1139 " the current cluster defaults and set them as such, instead"
1140 " of marking them as overridden")
1142 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1143 action="store", dest="uid_pool",
1144 help=("A list of user-ids or user-id"
1145 " ranges separated by commas"))
1147 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1148 action="store", dest="add_uids",
1149 help=("A list of user-ids or user-id"
1150 " ranges separated by commas, to be"
1151 " added to the user-id pool"))
1153 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1154 action="store", dest="remove_uids",
1155 help=("A list of user-ids or user-id"
1156 " ranges separated by commas, to be"
1157 " removed from the user-id pool"))
1159 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1160 action="store", dest="reserved_lvs",
1161 help=("A comma-separated list of reserved"
1162 " logical volumes names, that will be"
1163 " ignored by cluster verify"))
1165 ROMAN_OPT = cli_option("--roman",
1166 dest="roman_integers", default=False,
1167 action="store_true",
1168 help="Use roman numbers for positive integers")
1170 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1171 action="store", default=None,
1172 help="Specifies usermode helper for DRBD")
1174 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1175 action="store_false", default=True,
1176 help="Disable support for DRBD")
1178 PRIMARY_IP_VERSION_OPT = \
1179 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1180 action="store", dest="primary_ip_version",
1181 metavar="%d|%d" % (constants.IP4_VERSION,
1182 constants.IP6_VERSION),
1183 help="Cluster-wide IP version for primary IP")
1185 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1186 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1187 choices=_PRIONAME_TO_VALUE.keys(),
1188 help="Priority for opcode processing")
1190 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1191 type="bool", default=None, metavar=_YORNO,
1192 help="Sets the hidden flag on the OS")
1194 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1195 type="bool", default=None, metavar=_YORNO,
1196 help="Sets the blacklisted flag on the OS")
1198 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1199 type="bool", metavar=_YORNO,
1200 dest="prealloc_wipe_disks",
1201 help=("Wipe disks prior to instance"
1204 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1205 type="keyval", default=None,
1206 help="Node parameters")
1208 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1209 action="store", metavar="POLICY", default=None,
1210 help="Allocation policy for the node group")
1212 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1213 type="bool", metavar=_YORNO,
1214 dest="node_powered",
1215 help="Specify if the SoR for node is powered")
1217 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1218 default=constants.OOB_TIMEOUT,
1219 help="Maximum time to wait for out-of-band helper")
1221 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1222 default=constants.OOB_POWER_DELAY,
1223 help="Time in seconds to wait between power-ons")
1225 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1226 action="store_true", default=False,
1227 help=("Whether command argument should be treated"
1230 NO_REMEMBER_OPT = cli_option("--no-remember",
1232 action="store_true", default=False,
1233 help="Perform but do not record the change"
1234 " in the configuration")
1236 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1237 default=False, action="store_true",
1238 help="Evacuate primary instances only")
1240 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1241 default=False, action="store_true",
1242 help="Evacuate secondary instances only"
1243 " (applies only to internally mirrored"
1244 " disk templates, e.g. %s)" %
1245 utils.CommaJoin(constants.DTS_INT_MIRROR))
1247 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1248 action="store_true", default=False,
1249 help="Pause instance at startup")
1251 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1252 help="Destination node group (name or uuid)",
1253 default=None, action="append",
1254 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1257 #: Options provided by all commands
1258 COMMON_OPTS = [DEBUG_OPT]
1260 # common options for creating instances. add and import then add their own
1262 COMMON_CREATE_OPTS = [
1267 FILESTORE_DRIVER_OPT,
1285 def _ParseArgs(argv, commands, aliases):
1286 """Parser for the command line arguments.
1288 This function parses the arguments and returns the function which
1289 must be executed together with its (modified) arguments.
1291 @param argv: the command line
1292 @param commands: dictionary with special contents, see the design
1293 doc for cmdline handling
1294 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1298 binary = "<command>"
1300 binary = argv[0].split("/")[-1]
1302 if len(argv) > 1 and argv[1] == "--version":
1303 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1304 constants.RELEASE_VERSION)
1305 # Quit right away. That way we don't have to care about this special
1306 # argument. optparse.py does it the same.
1309 if len(argv) < 2 or not (argv[1] in commands or
1310 argv[1] in aliases):
1311 # let's do a nice thing
1312 sortedcmds = commands.keys()
1315 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1316 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1319 # compute the max line length for cmd + usage
1320 mlen = max([len(" %s" % cmd) for cmd in commands])
1321 mlen = min(60, mlen) # should not get here...
1323 # and format a nice command list
1324 ToStdout("Commands:")
1325 for cmd in sortedcmds:
1326 cmdstr = " %s" % (cmd,)
1327 help_text = commands[cmd][4]
1328 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1329 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1330 for line in help_lines:
1331 ToStdout("%-*s %s", mlen, "", line)
1335 return None, None, None
1337 # get command, unalias it, and look it up in commands
1341 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1344 if aliases[cmd] not in commands:
1345 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1346 " command '%s'" % (cmd, aliases[cmd]))
1350 func, args_def, parser_opts, usage, description = commands[cmd]
1351 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1352 description=description,
1353 formatter=TitledHelpFormatter(),
1354 usage="%%prog %s %s" % (cmd, usage))
1355 parser.disable_interspersed_args()
1356 options, args = parser.parse_args()
1358 if not _CheckArguments(cmd, args_def, args):
1359 return None, None, None
1361 return func, options, args
1364 def _CheckArguments(cmd, args_def, args):
1365 """Verifies the arguments using the argument definition.
1369 1. Abort with error if values specified by user but none expected.
1371 1. For each argument in definition
1373 1. Keep running count of minimum number of values (min_count)
1374 1. Keep running count of maximum number of values (max_count)
1375 1. If it has an unlimited number of values
1377 1. Abort with error if it's not the last argument in the definition
1379 1. If last argument has limited number of values
1381 1. Abort with error if number of values doesn't match or is too large
1383 1. Abort with error if user didn't pass enough values (min_count)
1386 if args and not args_def:
1387 ToStderr("Error: Command %s expects no arguments", cmd)
1394 last_idx = len(args_def) - 1
1396 for idx, arg in enumerate(args_def):
1397 if min_count is None:
1399 elif arg.min is not None:
1400 min_count += arg.min
1402 if max_count is None:
1404 elif arg.max is not None:
1405 max_count += arg.max
1408 check_max = (arg.max is not None)
1410 elif arg.max is None:
1411 raise errors.ProgrammerError("Only the last argument can have max=None")
1414 # Command with exact number of arguments
1415 if (min_count is not None and max_count is not None and
1416 min_count == max_count and len(args) != min_count):
1417 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1420 # Command with limited number of arguments
1421 if max_count is not None and len(args) > max_count:
1422 ToStderr("Error: Command %s expects only %d argument(s)",
1426 # Command with some required arguments
1427 if min_count is not None and len(args) < min_count:
1428 ToStderr("Error: Command %s expects at least %d argument(s)",
1435 def SplitNodeOption(value):
1436 """Splits the value of a --node option.
1439 if value and ":" in value:
1440 return value.split(":", 1)
1442 return (value, None)
1445 def CalculateOSNames(os_name, os_variants):
1446 """Calculates all the names an OS can be called, according to its variants.
1448 @type os_name: string
1449 @param os_name: base name of the os
1450 @type os_variants: list or None
1451 @param os_variants: list of supported variants
1453 @return: list of valid names
1457 return ["%s+%s" % (os_name, v) for v in os_variants]
1462 def ParseFields(selected, default):
1463 """Parses the values of "--field"-like options.
1465 @type selected: string or None
1466 @param selected: User-selected options
1468 @param default: Default fields
1471 if selected is None:
1474 if selected.startswith("+"):
1475 return default + selected[1:].split(",")
1477 return selected.split(",")
1480 UsesRPC = rpc.RunWithRPC
1483 def AskUser(text, choices=None):
1484 """Ask the user a question.
1486 @param text: the question to ask
1488 @param choices: list with elements tuples (input_char, return_value,
1489 description); if not given, it will default to: [('y', True,
1490 'Perform the operation'), ('n', False, 'Do no do the operation')];
1491 note that the '?' char is reserved for help
1493 @return: one of the return values from the choices list; if input is
1494 not possible (i.e. not running with a tty, we return the last
1499 choices = [("y", True, "Perform the operation"),
1500 ("n", False, "Do not perform the operation")]
1501 if not choices or not isinstance(choices, list):
1502 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1503 for entry in choices:
1504 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1505 raise errors.ProgrammerError("Invalid choices element to AskUser")
1507 answer = choices[-1][1]
1509 for line in text.splitlines():
1510 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1511 text = "\n".join(new_text)
1513 f = file("/dev/tty", "a+")
1517 chars = [entry[0] for entry in choices]
1518 chars[-1] = "[%s]" % chars[-1]
1520 maps = dict([(entry[0], entry[1]) for entry in choices])
1524 f.write("/".join(chars))
1526 line = f.readline(2).strip().lower()
1531 for entry in choices:
1532 f.write(" %s - %s\n" % (entry[0], entry[2]))
1540 class JobSubmittedException(Exception):
1541 """Job was submitted, client should exit.
1543 This exception has one argument, the ID of the job that was
1544 submitted. The handler should print this ID.
1546 This is not an error, just a structured way to exit from clients.
1551 def SendJob(ops, cl=None):
1552 """Function to submit an opcode without waiting for the results.
1555 @param ops: list of opcodes
1556 @type cl: luxi.Client
1557 @param cl: the luxi client to use for communicating with the master;
1558 if None, a new client will be created
1564 job_id = cl.SubmitJob(ops)
1569 def GenericPollJob(job_id, cbs, report_cbs):
1570 """Generic job-polling function.
1572 @type job_id: number
1573 @param job_id: Job ID
1574 @type cbs: Instance of L{JobPollCbBase}
1575 @param cbs: Data callbacks
1576 @type report_cbs: Instance of L{JobPollReportCbBase}
1577 @param report_cbs: Reporting callbacks
1580 prev_job_info = None
1581 prev_logmsg_serial = None
1586 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1589 # job not found, go away!
1590 raise errors.JobLost("Job with id %s lost" % job_id)
1592 if result == constants.JOB_NOTCHANGED:
1593 report_cbs.ReportNotChanged(job_id, status)
1598 # Split result, a tuple of (field values, log entries)
1599 (job_info, log_entries) = result
1600 (status, ) = job_info
1603 for log_entry in log_entries:
1604 (serial, timestamp, log_type, message) = log_entry
1605 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1607 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1609 # TODO: Handle canceled and archived jobs
1610 elif status in (constants.JOB_STATUS_SUCCESS,
1611 constants.JOB_STATUS_ERROR,
1612 constants.JOB_STATUS_CANCELING,
1613 constants.JOB_STATUS_CANCELED):
1616 prev_job_info = job_info
1618 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1620 raise errors.JobLost("Job with id %s lost" % job_id)
1622 status, opstatus, result = jobs[0]
1624 if status == constants.JOB_STATUS_SUCCESS:
1627 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1628 raise errors.OpExecError("Job was canceled")
1631 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1632 if status == constants.OP_STATUS_SUCCESS:
1634 elif status == constants.OP_STATUS_ERROR:
1635 errors.MaybeRaise(msg)
1638 raise errors.OpExecError("partial failure (opcode %d): %s" %
1641 raise errors.OpExecError(str(msg))
1643 # default failure mode
1644 raise errors.OpExecError(result)
1647 class JobPollCbBase:
1648 """Base class for L{GenericPollJob} callbacks.
1652 """Initializes this class.
1656 def WaitForJobChangeOnce(self, job_id, fields,
1657 prev_job_info, prev_log_serial):
1658 """Waits for changes on a job.
1661 raise NotImplementedError()
1663 def QueryJobs(self, job_ids, fields):
1664 """Returns the selected fields for the selected job IDs.
1666 @type job_ids: list of numbers
1667 @param job_ids: Job IDs
1668 @type fields: list of strings
1669 @param fields: Fields
1672 raise NotImplementedError()
1675 class JobPollReportCbBase:
1676 """Base class for L{GenericPollJob} reporting callbacks.
1680 """Initializes this class.
1684 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1685 """Handles a log message.
1688 raise NotImplementedError()
1690 def ReportNotChanged(self, job_id, status):
1691 """Called for if a job hasn't changed in a while.
1693 @type job_id: number
1694 @param job_id: Job ID
1695 @type status: string or None
1696 @param status: Job status if available
1699 raise NotImplementedError()
1702 class _LuxiJobPollCb(JobPollCbBase):
1703 def __init__(self, cl):
1704 """Initializes this class.
1707 JobPollCbBase.__init__(self)
1710 def WaitForJobChangeOnce(self, job_id, fields,
1711 prev_job_info, prev_log_serial):
1712 """Waits for changes on a job.
1715 return self.cl.WaitForJobChangeOnce(job_id, fields,
1716 prev_job_info, prev_log_serial)
1718 def QueryJobs(self, job_ids, fields):
1719 """Returns the selected fields for the selected job IDs.
1722 return self.cl.QueryJobs(job_ids, fields)
1725 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1726 def __init__(self, feedback_fn):
1727 """Initializes this class.
1730 JobPollReportCbBase.__init__(self)
1732 self.feedback_fn = feedback_fn
1734 assert callable(feedback_fn)
1736 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1737 """Handles a log message.
1740 self.feedback_fn((timestamp, log_type, log_msg))
1742 def ReportNotChanged(self, job_id, status):
1743 """Called if a job hasn't changed in a while.
1749 class StdioJobPollReportCb(JobPollReportCbBase):
1751 """Initializes this class.
1754 JobPollReportCbBase.__init__(self)
1756 self.notified_queued = False
1757 self.notified_waitlock = False
1759 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1760 """Handles a log message.
1763 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1764 FormatLogMessage(log_type, log_msg))
1766 def ReportNotChanged(self, job_id, status):
1767 """Called if a job hasn't changed in a while.
1773 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1774 ToStderr("Job %s is waiting in queue", job_id)
1775 self.notified_queued = True
1777 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1778 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1779 self.notified_waitlock = True
1782 def FormatLogMessage(log_type, log_msg):
1783 """Formats a job message according to its type.
1786 if log_type != constants.ELOG_MESSAGE:
1787 log_msg = str(log_msg)
1789 return utils.SafeEncode(log_msg)
1792 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1793 """Function to poll for the result of a job.
1795 @type job_id: job identified
1796 @param job_id: the job to poll for results
1797 @type cl: luxi.Client
1798 @param cl: the luxi client to use for communicating with the master;
1799 if None, a new client will be created
1805 if reporter is None:
1807 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1809 reporter = StdioJobPollReportCb()
1811 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1813 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1816 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1817 """Legacy function to submit an opcode.
1819 This is just a simple wrapper over the construction of the processor
1820 instance. It should be extended to better handle feedback and
1821 interaction functions.
1827 SetGenericOpcodeOpts([op], opts)
1829 job_id = SendJob([op], cl=cl)
1831 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1834 return op_results[0]
1837 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1838 """Wrapper around SubmitOpCode or SendJob.
1840 This function will decide, based on the 'opts' parameter, whether to
1841 submit and wait for the result of the opcode (and return it), or
1842 whether to just send the job and print its identifier. It is used in
1843 order to simplify the implementation of the '--submit' option.
1845 It will also process the opcodes if we're sending the via SendJob
1846 (otherwise SubmitOpCode does it).
1849 if opts and opts.submit_only:
1851 SetGenericOpcodeOpts(job, opts)
1852 job_id = SendJob(job, cl=cl)
1853 raise JobSubmittedException(job_id)
1855 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1858 def SetGenericOpcodeOpts(opcode_list, options):
1859 """Processor for generic options.
1861 This function updates the given opcodes based on generic command
1862 line options (like debug, dry-run, etc.).
1864 @param opcode_list: list of opcodes
1865 @param options: command line options or None
1866 @return: None (in-place modification)
1871 for op in opcode_list:
1872 op.debug_level = options.debug
1873 if hasattr(options, "dry_run"):
1874 op.dry_run = options.dry_run
1875 if getattr(options, "priority", None) is not None:
1876 op.priority = _PRIONAME_TO_VALUE[options.priority]
1880 # TODO: Cache object?
1882 client = luxi.Client()
1883 except luxi.NoMasterError:
1884 ss = ssconf.SimpleStore()
1886 # Try to read ssconf file
1889 except errors.ConfigurationError:
1890 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1891 " not part of a cluster")
1893 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1894 if master != myself:
1895 raise errors.OpPrereqError("This is not the master node, please connect"
1896 " to node '%s' and rerun the command" %
1902 def FormatError(err):
1903 """Return a formatted error message for a given error.
1905 This function takes an exception instance and returns a tuple
1906 consisting of two values: first, the recommended exit code, and
1907 second, a string describing the error message (not
1908 newline-terminated).
1914 if isinstance(err, errors.ConfigurationError):
1915 txt = "Corrupt configuration file: %s" % msg
1917 obuf.write(txt + "\n")
1918 obuf.write("Aborting.")
1920 elif isinstance(err, errors.HooksAbort):
1921 obuf.write("Failure: hooks execution failed:\n")
1922 for node, script, out in err.args[0]:
1924 obuf.write(" node: %s, script: %s, output: %s\n" %
1925 (node, script, out))
1927 obuf.write(" node: %s, script: %s (no output)\n" %
1929 elif isinstance(err, errors.HooksFailure):
1930 obuf.write("Failure: hooks general failure: %s" % msg)
1931 elif isinstance(err, errors.ResolverError):
1932 this_host = netutils.Hostname.GetSysName()
1933 if err.args[0] == this_host:
1934 msg = "Failure: can't resolve my own hostname ('%s')"
1936 msg = "Failure: can't resolve hostname '%s'"
1937 obuf.write(msg % err.args[0])
1938 elif isinstance(err, errors.OpPrereqError):
1939 if len(err.args) == 2:
1940 obuf.write("Failure: prerequisites not met for this"
1941 " operation:\nerror type: %s, error details:\n%s" %
1942 (err.args[1], err.args[0]))
1944 obuf.write("Failure: prerequisites not met for this"
1945 " operation:\n%s" % msg)
1946 elif isinstance(err, errors.OpExecError):
1947 obuf.write("Failure: command execution error:\n%s" % msg)
1948 elif isinstance(err, errors.TagError):
1949 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1950 elif isinstance(err, errors.JobQueueDrainError):
1951 obuf.write("Failure: the job queue is marked for drain and doesn't"
1952 " accept new requests\n")
1953 elif isinstance(err, errors.JobQueueFull):
1954 obuf.write("Failure: the job queue is full and doesn't accept new"
1955 " job submissions until old jobs are archived\n")
1956 elif isinstance(err, errors.TypeEnforcementError):
1957 obuf.write("Parameter Error: %s" % msg)
1958 elif isinstance(err, errors.ParameterError):
1959 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1960 elif isinstance(err, luxi.NoMasterError):
1961 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1962 " and listening for connections?")
1963 elif isinstance(err, luxi.TimeoutError):
1964 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1965 " been submitted and will continue to run even if the call"
1966 " timed out. Useful commands in this situation are \"gnt-job"
1967 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1969 elif isinstance(err, luxi.PermissionError):
1970 obuf.write("It seems you don't have permissions to connect to the"
1971 " master daemon.\nPlease retry as a different user.")
1972 elif isinstance(err, luxi.ProtocolError):
1973 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1975 elif isinstance(err, errors.JobLost):
1976 obuf.write("Error checking job status: %s" % msg)
1977 elif isinstance(err, errors.QueryFilterParseError):
1978 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1979 obuf.write("\n".join(err.GetDetails()))
1980 elif isinstance(err, errors.GenericError):
1981 obuf.write("Unhandled Ganeti error: %s" % msg)
1982 elif isinstance(err, JobSubmittedException):
1983 obuf.write("JobID: %s\n" % err.args[0])
1986 obuf.write("Unhandled exception: %s" % msg)
1987 return retcode, obuf.getvalue().rstrip("\n")
1990 def GenericMain(commands, override=None, aliases=None):
1991 """Generic main function for all the gnt-* commands.
1994 - commands: a dictionary with a special structure, see the design doc
1995 for command line handling.
1996 - override: if not None, we expect a dictionary with keys that will
1997 override command line options; this can be used to pass
1998 options from the scripts to generic functions
1999 - aliases: dictionary with command aliases {'alias': 'target, ...}
2002 # save the program name and the entire command line for later logging
2004 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2005 if len(sys.argv) >= 2:
2006 binary += " " + sys.argv[1]
2007 old_cmdline = " ".join(sys.argv[2:])
2011 binary = "<unknown program>"
2018 func, options, args = _ParseArgs(sys.argv, commands, aliases)
2019 except errors.ParameterError, err:
2020 result, err_msg = FormatError(err)
2024 if func is None: # parse error
2027 if override is not None:
2028 for key, val in override.iteritems():
2029 setattr(options, key, val)
2031 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2032 stderr_logging=True)
2035 logging.info("run with arguments '%s'", old_cmdline)
2037 logging.info("run with no arguments")
2040 result = func(options, args)
2041 except (errors.GenericError, luxi.ProtocolError,
2042 JobSubmittedException), err:
2043 result, err_msg = FormatError(err)
2044 logging.exception("Error during command processing")
2046 except KeyboardInterrupt:
2047 result = constants.EXIT_FAILURE
2048 ToStderr("Aborted. Note that if the operation created any jobs, they"
2049 " might have been submitted and"
2050 " will continue to run in the background.")
2051 except IOError, err:
2052 if err.errno == errno.EPIPE:
2053 # our terminal went away, we'll exit
2054 sys.exit(constants.EXIT_FAILURE)
2061 def ParseNicOption(optvalue):
2062 """Parses the value of the --net option(s).
2066 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2067 except (TypeError, ValueError), err:
2068 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2070 nics = [{}] * nic_max
2071 for nidx, ndict in optvalue:
2074 if not isinstance(ndict, dict):
2075 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2076 " got %s" % (nidx, ndict))
2078 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2085 def GenericInstanceCreate(mode, opts, args):
2086 """Add an instance to the cluster via either creation or import.
2088 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2089 @param opts: the command line options selected by the user
2091 @param args: should contain only one element, the new instance name
2093 @return: the desired exit code
2098 (pnode, snode) = SplitNodeOption(opts.node)
2103 hypervisor, hvparams = opts.hypervisor
2106 nics = ParseNicOption(opts.nics)
2110 elif mode == constants.INSTANCE_CREATE:
2111 # default of one nic, all auto
2117 if opts.disk_template == constants.DT_DISKLESS:
2118 if opts.disks or opts.sd_size is not None:
2119 raise errors.OpPrereqError("Diskless instance but disk"
2120 " information passed")
2123 if (not opts.disks and not opts.sd_size
2124 and mode == constants.INSTANCE_CREATE):
2125 raise errors.OpPrereqError("No disk information specified")
2126 if opts.disks and opts.sd_size is not None:
2127 raise errors.OpPrereqError("Please use either the '--disk' or"
2129 if opts.sd_size is not None:
2130 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2134 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2135 except ValueError, err:
2136 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2137 disks = [{}] * disk_max
2140 for didx, ddict in opts.disks:
2142 if not isinstance(ddict, dict):
2143 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2144 raise errors.OpPrereqError(msg)
2145 elif constants.IDISK_SIZE in ddict:
2146 if constants.IDISK_ADOPT in ddict:
2147 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2148 " (disk %d)" % didx)
2150 ddict[constants.IDISK_SIZE] = \
2151 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2152 except ValueError, err:
2153 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2155 elif constants.IDISK_ADOPT in ddict:
2156 if mode == constants.INSTANCE_IMPORT:
2157 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2159 ddict[constants.IDISK_SIZE] = 0
2161 raise errors.OpPrereqError("Missing size or adoption source for"
2165 if opts.tags is not None:
2166 tags = opts.tags.split(",")
2170 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2171 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2173 if mode == constants.INSTANCE_CREATE:
2176 force_variant = opts.force_variant
2179 no_install = opts.no_install
2180 identify_defaults = False
2181 elif mode == constants.INSTANCE_IMPORT:
2184 force_variant = False
2185 src_node = opts.src_node
2186 src_path = opts.src_dir
2188 identify_defaults = opts.identify_defaults
2190 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2192 op = opcodes.OpInstanceCreate(instance_name=instance,
2194 disk_template=opts.disk_template,
2196 pnode=pnode, snode=snode,
2197 ip_check=opts.ip_check,
2198 name_check=opts.name_check,
2199 wait_for_sync=opts.wait_for_sync,
2200 file_storage_dir=opts.file_storage_dir,
2201 file_driver=opts.file_driver,
2202 iallocator=opts.iallocator,
2203 hypervisor=hypervisor,
2205 beparams=opts.beparams,
2206 osparams=opts.osparams,
2210 force_variant=force_variant,
2214 no_install=no_install,
2215 identify_defaults=identify_defaults)
2217 SubmitOrSend(op, opts)
2221 class _RunWhileClusterStoppedHelper:
2222 """Helper class for L{RunWhileClusterStopped} to simplify state management
2225 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2226 """Initializes this class.
2228 @type feedback_fn: callable
2229 @param feedback_fn: Feedback function
2230 @type cluster_name: string
2231 @param cluster_name: Cluster name
2232 @type master_node: string
2233 @param master_node Master node name
2234 @type online_nodes: list
2235 @param online_nodes: List of names of online nodes
2238 self.feedback_fn = feedback_fn
2239 self.cluster_name = cluster_name
2240 self.master_node = master_node
2241 self.online_nodes = online_nodes
2243 self.ssh = ssh.SshRunner(self.cluster_name)
2245 self.nonmaster_nodes = [name for name in online_nodes
2246 if name != master_node]
2248 assert self.master_node not in self.nonmaster_nodes
2250 def _RunCmd(self, node_name, cmd):
2251 """Runs a command on the local or a remote machine.
2253 @type node_name: string
2254 @param node_name: Machine name
2259 if node_name is None or node_name == self.master_node:
2260 # No need to use SSH
2261 result = utils.RunCmd(cmd)
2263 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2266 errmsg = ["Failed to run command %s" % result.cmd]
2268 errmsg.append("on node %s" % node_name)
2269 errmsg.append(": exitcode %s and error %s" %
2270 (result.exit_code, result.output))
2271 raise errors.OpExecError(" ".join(errmsg))
2273 def Call(self, fn, *args):
2274 """Call function while all daemons are stopped.
2277 @param fn: Function to be called
2280 # Pause watcher by acquiring an exclusive lock on watcher state file
2281 self.feedback_fn("Blocking watcher")
2282 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2284 # TODO: Currently, this just blocks. There's no timeout.
2285 # TODO: Should it be a shared lock?
2286 watcher_block.Exclusive(blocking=True)
2288 # Stop master daemons, so that no new jobs can come in and all running
2290 self.feedback_fn("Stopping master daemons")
2291 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2293 # Stop daemons on all nodes
2294 for node_name in self.online_nodes:
2295 self.feedback_fn("Stopping daemons on %s" % node_name)
2296 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2298 # All daemons are shut down now
2300 return fn(self, *args)
2301 except Exception, err:
2302 _, errmsg = FormatError(err)
2303 logging.exception("Caught exception")
2304 self.feedback_fn(errmsg)
2307 # Start cluster again, master node last
2308 for node_name in self.nonmaster_nodes + [self.master_node]:
2309 self.feedback_fn("Starting daemons on %s" % node_name)
2310 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2313 watcher_block.Close()
2316 def RunWhileClusterStopped(feedback_fn, fn, *args):
2317 """Calls a function while all cluster daemons are stopped.
2319 @type feedback_fn: callable
2320 @param feedback_fn: Feedback function
2322 @param fn: Function to be called when daemons are stopped
2325 feedback_fn("Gathering cluster information")
2327 # This ensures we're running on the master daemon
2330 (cluster_name, master_node) = \
2331 cl.QueryConfigValues(["cluster_name", "master_node"])
2333 online_nodes = GetOnlineNodes([], cl=cl)
2335 # Don't keep a reference to the client. The master daemon will go away.
2338 assert master_node in online_nodes
2340 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2341 online_nodes).Call(fn, *args)
2344 def GenerateTable(headers, fields, separator, data,
2345 numfields=None, unitfields=None,
2347 """Prints a table with headers and different fields.
2350 @param headers: dictionary mapping field names to headers for
2353 @param fields: the field names corresponding to each row in
2355 @param separator: the separator to be used; if this is None,
2356 the default 'smart' algorithm is used which computes optimal
2357 field width, otherwise just the separator is used between
2360 @param data: a list of lists, each sublist being one row to be output
2361 @type numfields: list
2362 @param numfields: a list with the fields that hold numeric
2363 values and thus should be right-aligned
2364 @type unitfields: list
2365 @param unitfields: a list with the fields that hold numeric
2366 values that should be formatted with the units field
2367 @type units: string or None
2368 @param units: the units we should use for formatting, or None for
2369 automatic choice (human-readable for non-separator usage, otherwise
2370 megabytes); this is a one-letter string
2379 if numfields is None:
2381 if unitfields is None:
2384 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2385 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2388 for field in fields:
2389 if headers and field not in headers:
2390 # TODO: handle better unknown fields (either revert to old
2391 # style of raising exception, or deal more intelligently with
2393 headers[field] = field
2394 if separator is not None:
2395 format_fields.append("%s")
2396 elif numfields.Matches(field):
2397 format_fields.append("%*s")
2399 format_fields.append("%-*s")
2401 if separator is None:
2402 mlens = [0 for name in fields]
2403 format_str = " ".join(format_fields)
2405 format_str = separator.replace("%", "%%").join(format_fields)
2410 for idx, val in enumerate(row):
2411 if unitfields.Matches(fields[idx]):
2414 except (TypeError, ValueError):
2417 val = row[idx] = utils.FormatUnit(val, units)
2418 val = row[idx] = str(val)
2419 if separator is None:
2420 mlens[idx] = max(mlens[idx], len(val))
2425 for idx, name in enumerate(fields):
2427 if separator is None:
2428 mlens[idx] = max(mlens[idx], len(hdr))
2429 args.append(mlens[idx])
2431 result.append(format_str % tuple(args))
2433 if separator is None:
2434 assert len(mlens) == len(fields)
2436 if fields and not numfields.Matches(fields[-1]):
2442 line = ["-" for _ in fields]
2443 for idx in range(len(fields)):
2444 if separator is None:
2445 args.append(mlens[idx])
2446 args.append(line[idx])
2447 result.append(format_str % tuple(args))
2452 def _FormatBool(value):
2453 """Formats a boolean value as a string.
2461 #: Default formatting for query results; (callback, align right)
2462 _DEFAULT_FORMAT_QUERY = {
2463 constants.QFT_TEXT: (str, False),
2464 constants.QFT_BOOL: (_FormatBool, False),
2465 constants.QFT_NUMBER: (str, True),
2466 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2467 constants.QFT_OTHER: (str, False),
2468 constants.QFT_UNKNOWN: (str, False),
2472 def _GetColumnFormatter(fdef, override, unit):
2473 """Returns formatting function for a field.
2475 @type fdef: L{objects.QueryFieldDefinition}
2476 @type override: dict
2477 @param override: Dictionary for overriding field formatting functions,
2478 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2480 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2481 @rtype: tuple; (callable, bool)
2482 @return: Returns the function to format a value (takes one parameter) and a
2483 boolean for aligning the value on the right-hand side
2486 fmt = override.get(fdef.name, None)
2490 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2492 if fdef.kind == constants.QFT_UNIT:
2493 # Can't keep this information in the static dictionary
2494 return (lambda value: utils.FormatUnit(value, unit), True)
2496 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2500 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2503 class _QueryColumnFormatter:
2504 """Callable class for formatting fields of a query.
2507 def __init__(self, fn, status_fn, verbose):
2508 """Initializes this class.
2511 @param fn: Formatting function
2512 @type status_fn: callable
2513 @param status_fn: Function to report fields' status
2514 @type verbose: boolean
2515 @param verbose: whether to use verbose field descriptions or not
2519 self._status_fn = status_fn
2520 self._verbose = verbose
2522 def __call__(self, data):
2523 """Returns a field's string representation.
2526 (status, value) = data
2529 self._status_fn(status)
2531 if status == constants.RS_NORMAL:
2532 return self._fn(value)
2534 assert value is None, \
2535 "Found value %r for abnormal status %s" % (value, status)
2537 return FormatResultError(status, self._verbose)
2540 def FormatResultError(status, verbose):
2541 """Formats result status other than L{constants.RS_NORMAL}.
2543 @param status: The result status
2544 @type verbose: boolean
2545 @param verbose: Whether to return the verbose text
2546 @return: Text of result status
2549 assert status != constants.RS_NORMAL, \
2550 "FormatResultError called with status equal to constants.RS_NORMAL"
2552 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2554 raise NotImplementedError("Unknown status %s" % status)
2561 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2562 header=False, verbose=False):
2563 """Formats data in L{objects.QueryResponse}.
2565 @type result: L{objects.QueryResponse}
2566 @param result: result of query operation
2568 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2569 see L{utils.text.FormatUnit}
2570 @type format_override: dict
2571 @param format_override: Dictionary for overriding field formatting functions,
2572 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2573 @type separator: string or None
2574 @param separator: String used to separate fields
2576 @param header: Whether to output header row
2577 @type verbose: boolean
2578 @param verbose: whether to use verbose field descriptions or not
2587 if format_override is None:
2588 format_override = {}
2590 stats = dict.fromkeys(constants.RS_ALL, 0)
2592 def _RecordStatus(status):
2597 for fdef in result.fields:
2598 assert fdef.title and fdef.name
2599 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2600 columns.append(TableColumn(fdef.title,
2601 _QueryColumnFormatter(fn, _RecordStatus,
2605 table = FormatTable(result.data, columns, header, separator)
2607 # Collect statistics
2608 assert len(stats) == len(constants.RS_ALL)
2609 assert compat.all(count >= 0 for count in stats.values())
2611 # Determine overall status. If there was no data, unknown fields must be
2612 # detected via the field definitions.
2613 if (stats[constants.RS_UNKNOWN] or
2614 (not result.data and _GetUnknownFields(result.fields))):
2616 elif compat.any(count > 0 for key, count in stats.items()
2617 if key != constants.RS_NORMAL):
2618 status = QR_INCOMPLETE
2622 return (status, table)
2625 def _GetUnknownFields(fdefs):
2626 """Returns list of unknown fields included in C{fdefs}.
2628 @type fdefs: list of L{objects.QueryFieldDefinition}
2631 return [fdef for fdef in fdefs
2632 if fdef.kind == constants.QFT_UNKNOWN]
2635 def _WarnUnknownFields(fdefs):
2636 """Prints a warning to stderr if a query included unknown fields.
2638 @type fdefs: list of L{objects.QueryFieldDefinition}
2641 unknown = _GetUnknownFields(fdefs)
2643 ToStderr("Warning: Queried for unknown fields %s",
2644 utils.CommaJoin(fdef.name for fdef in unknown))
2650 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2651 format_override=None, verbose=False, force_filter=False):
2652 """Generic implementation for listing all items of a resource.
2654 @param resource: One of L{constants.QR_VIA_LUXI}
2655 @type fields: list of strings
2656 @param fields: List of fields to query for
2657 @type names: list of strings
2658 @param names: Names of items to query for
2659 @type unit: string or None
2660 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2661 None for automatic choice (human-readable for non-separator usage,
2662 otherwise megabytes); this is a one-letter string
2663 @type separator: string or None
2664 @param separator: String used to separate fields
2666 @param header: Whether to show header row
2667 @type force_filter: bool
2668 @param force_filter: Whether to always treat names as filter
2669 @type format_override: dict
2670 @param format_override: Dictionary for overriding field formatting functions,
2671 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2672 @type verbose: boolean
2673 @param verbose: whether to use verbose field descriptions or not
2682 filter_ = qlang.MakeFilter(names, force_filter)
2684 response = cl.Query(resource, fields, filter_)
2686 found_unknown = _WarnUnknownFields(response.fields)
2688 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2690 format_override=format_override,
2696 assert ((found_unknown and status == QR_UNKNOWN) or
2697 (not found_unknown and status != QR_UNKNOWN))
2699 if status == QR_UNKNOWN:
2700 return constants.EXIT_UNKNOWN_FIELD
2702 # TODO: Should the list command fail if not all data could be collected?
2703 return constants.EXIT_SUCCESS
2706 def GenericListFields(resource, fields, separator, header, cl=None):
2707 """Generic implementation for listing fields for a resource.
2709 @param resource: One of L{constants.QR_VIA_LUXI}
2710 @type fields: list of strings
2711 @param fields: List of fields to query for
2712 @type separator: string or None
2713 @param separator: String used to separate fields
2715 @param header: Whether to show header row
2724 response = cl.QueryFields(resource, fields)
2726 found_unknown = _WarnUnknownFields(response.fields)
2729 TableColumn("Name", str, False),
2730 TableColumn("Title", str, False),
2731 TableColumn("Description", str, False),
2734 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2736 for line in FormatTable(rows, columns, header, separator):
2740 return constants.EXIT_UNKNOWN_FIELD
2742 return constants.EXIT_SUCCESS
2746 """Describes a column for L{FormatTable}.
2749 def __init__(self, title, fn, align_right):
2750 """Initializes this class.
2753 @param title: Column title
2755 @param fn: Formatting function
2756 @type align_right: bool
2757 @param align_right: Whether to align values on the right-hand side
2762 self.align_right = align_right
2765 def _GetColFormatString(width, align_right):
2766 """Returns the format string for a field.
2774 return "%%%s%ss" % (sign, width)
2777 def FormatTable(rows, columns, header, separator):
2778 """Formats data as a table.
2780 @type rows: list of lists
2781 @param rows: Row data, one list per row
2782 @type columns: list of L{TableColumn}
2783 @param columns: Column descriptions
2785 @param header: Whether to show header row
2786 @type separator: string or None
2787 @param separator: String used to separate columns
2791 data = [[col.title for col in columns]]
2792 colwidth = [len(col.title) for col in columns]
2795 colwidth = [0 for _ in columns]
2799 assert len(row) == len(columns)
2801 formatted = [col.format(value) for value, col in zip(row, columns)]
2803 if separator is None:
2804 # Update column widths
2805 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2806 # Modifying a list's items while iterating is fine
2807 colwidth[idx] = max(oldwidth, len(value))
2809 data.append(formatted)
2811 if separator is not None:
2812 # Return early if a separator is used
2813 return [separator.join(row) for row in data]
2815 if columns and not columns[-1].align_right:
2816 # Avoid unnecessary spaces at end of line
2819 # Build format string
2820 fmt = " ".join([_GetColFormatString(width, col.align_right)
2821 for col, width in zip(columns, colwidth)])
2823 return [fmt % tuple(row) for row in data]
2826 def FormatTimestamp(ts):
2827 """Formats a given timestamp.
2830 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2833 @return: a string with the formatted timestamp
2836 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2839 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2842 def ParseTimespec(value):
2843 """Parse a time specification.
2845 The following suffixed will be recognized:
2853 Without any suffix, the value will be taken to be in seconds.
2858 raise errors.OpPrereqError("Empty time specification passed")
2866 if value[-1] not in suffix_map:
2869 except (TypeError, ValueError):
2870 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2872 multiplier = suffix_map[value[-1]]
2874 if not value: # no data left after stripping the suffix
2875 raise errors.OpPrereqError("Invalid time specification (only"
2878 value = int(value) * multiplier
2879 except (TypeError, ValueError):
2880 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2884 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2885 filter_master=False, nodegroup=None):
2886 """Returns the names of online nodes.
2888 This function will also log a warning on stderr with the names of
2891 @param nodes: if not empty, use only this subset of nodes (minus the
2893 @param cl: if not None, luxi client to use
2894 @type nowarn: boolean
2895 @param nowarn: by default, this function will output a note with the
2896 offline nodes that are skipped; if this parameter is True the
2897 note is not displayed
2898 @type secondary_ips: boolean
2899 @param secondary_ips: if True, return the secondary IPs instead of the
2900 names, useful for doing network traffic over the replication interface
2902 @type filter_master: boolean
2903 @param filter_master: if True, do not return the master node in the list
2904 (useful in coordination with secondary_ips where we cannot check our
2905 node name against the list)
2906 @type nodegroup: string
2907 @param nodegroup: If set, only return nodes in this node group
2916 filter_.append(qlang.MakeSimpleFilter("name", nodes))
2918 if nodegroup is not None:
2919 filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2920 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2923 filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2926 if len(filter_) > 1:
2927 final_filter = [qlang.OP_AND] + filter_
2929 assert len(filter_) == 1
2930 final_filter = filter_[0]
2934 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2936 def _IsOffline(row):
2937 (_, (_, offline), _) = row
2941 ((_, name), _, _) = row
2945 (_, _, (_, sip)) = row
2948 (offline, online) = compat.partition(result.data, _IsOffline)
2950 if offline and not nowarn:
2951 ToStderr("Note: skipping offline node(s): %s" %
2952 utils.CommaJoin(map(_GetName, offline)))
2959 return map(fn, online)
2962 def _ToStream(stream, txt, *args):
2963 """Write a message to a stream, bypassing the logging system
2965 @type stream: file object
2966 @param stream: the file to which we should write
2968 @param txt: the message
2974 stream.write(txt % args)
2979 except IOError, err:
2980 if err.errno == errno.EPIPE:
2981 # our terminal went away, we'll exit
2982 sys.exit(constants.EXIT_FAILURE)
2987 def ToStdout(txt, *args):
2988 """Write a message to stdout only, bypassing the logging system
2990 This is just a wrapper over _ToStream.
2993 @param txt: the message
2996 _ToStream(sys.stdout, txt, *args)
2999 def ToStderr(txt, *args):
3000 """Write a message to stderr only, bypassing the logging system
3002 This is just a wrapper over _ToStream.
3005 @param txt: the message
3008 _ToStream(sys.stderr, txt, *args)
3011 class JobExecutor(object):
3012 """Class which manages the submission and execution of multiple jobs.
3014 Note that instances of this class should not be reused between
3018 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3023 self.verbose = verbose
3026 self.feedback_fn = feedback_fn
3027 self._counter = itertools.count()
3030 def _IfName(name, fmt):
3031 """Helper function for formatting name.
3039 def QueueJob(self, name, *ops):
3040 """Record a job for later submit.
3043 @param name: a description of the job, will be used in WaitJobSet
3046 SetGenericOpcodeOpts(ops, self.opts)
3047 self.queue.append((self._counter.next(), name, ops))
3049 def AddJobId(self, name, status, job_id):
3050 """Adds a job ID to the internal queue.
3053 self.jobs.append((self._counter.next(), status, job_id, name))
3055 def SubmitPending(self, each=False):
3056 """Submit all pending jobs.
3061 for (_, _, ops) in self.queue:
3062 # SubmitJob will remove the success status, but raise an exception if
3063 # the submission fails, so we'll notice that anyway.
3064 results.append([True, self.cl.SubmitJob(ops)])
3066 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3067 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3068 self.jobs.append((idx, status, data, name))
3070 def _ChooseJob(self):
3071 """Choose a non-waiting/queued job to poll next.
3074 assert self.jobs, "_ChooseJob called with empty job list"
3076 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3079 for job_data, status in zip(self.jobs, result):
3080 if (isinstance(status, list) and status and
3081 status[0] in (constants.JOB_STATUS_QUEUED,
3082 constants.JOB_STATUS_WAITING,
3083 constants.JOB_STATUS_CANCELING)):
3084 # job is still present and waiting
3086 # good candidate found (either running job or lost job)
3087 self.jobs.remove(job_data)
3091 return self.jobs.pop(0)
3093 def GetResults(self):
3094 """Wait for and return the results of all jobs.
3097 @return: list of tuples (success, job results), in the same order
3098 as the submitted jobs; if a job has failed, instead of the result
3099 there will be the error message
3103 self.SubmitPending()
3106 ok_jobs = [row[2] for row in self.jobs if row[1]]
3108 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3110 # first, remove any non-submitted jobs
3111 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3112 for idx, _, jid, name in failures:
3113 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3114 results.append((idx, False, jid))
3117 (idx, _, jid, name) = self._ChooseJob()
3118 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3120 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3122 except errors.JobLost, err:
3123 _, job_result = FormatError(err)
3124 ToStderr("Job %s%s has been archived, cannot check its result",
3125 jid, self._IfName(name, " for %s"))
3127 except (errors.GenericError, luxi.ProtocolError), err:
3128 _, job_result = FormatError(err)
3130 # the error message will always be shown, verbose or not
3131 ToStderr("Job %s%s has failed: %s",
3132 jid, self._IfName(name, " for %s"), job_result)
3134 results.append((idx, success, job_result))
3136 # sort based on the index, then drop it
3138 results = [i[1:] for i in results]
3142 def WaitOrShow(self, wait):
3143 """Wait for job results or only print the job IDs.
3146 @param wait: whether to wait or not
3150 return self.GetResults()
3153 self.SubmitPending()
3154 for _, status, result, name in self.jobs:
3156 ToStdout("%s: %s", result, name)
3158 ToStderr("Failure for %s: %s", name, result)
3159 return [row[1:3] for row in self.jobs]
3162 def FormatParameterDict(buf, param_dict, actual, level=1):
3163 """Formats a parameter dictionary.
3165 @type buf: L{StringIO}
3166 @param buf: the buffer into which to write
3167 @type param_dict: dict
3168 @param param_dict: the own parameters
3170 @param actual: the current parameter set (including defaults)
3171 @param level: Level of indent
3174 indent = " " * level
3175 for key in sorted(actual):
3176 val = param_dict.get(key, "default (%s)" % actual[key])
3177 buf.write("%s- %s: %s\n" % (indent, key, val))
3180 def ConfirmOperation(names, list_type, text, extra=""):
3181 """Ask the user to confirm an operation on a list of list_type.
3183 This function is used to request confirmation for doing an operation
3184 on a given list of list_type.
3187 @param names: the list of names that we display when
3188 we ask for confirmation
3189 @type list_type: str
3190 @param list_type: Human readable name for elements in the list (e.g. nodes)
3192 @param text: the operation that the user should confirm
3194 @return: True or False depending on user's confirmation.
3198 msg = ("The %s will operate on %d %s.\n%s"
3199 "Do you want to continue?" % (text, count, list_type, extra))
3200 affected = (("\nAffected %s:\n" % list_type) +
3201 "\n".join([" %s" % name for name in names]))
3203 choices = [("y", True, "Yes, execute the %s" % text),
3204 ("n", False, "No, abort the %s" % text)]
3207 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3210 question = msg + affected
3212 choice = AskUser(question, choices)
3215 choice = AskUser(msg + affected, choices)