4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
32 from cStringIO import StringIO
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import opcodes
38 from ganeti import luxi
39 from ganeti import ssconf
40 from ganeti import rpc
41 from ganeti import ssh
42 from ganeti import compat
43 from ganeti import netutils
44 from ganeti import qlang
46 from optparse import (OptionParser, TitledHelpFormatter,
47 Option, OptionValueError)
51 # Command line options
64 "CLUSTER_DOMAIN_SECRET_OPT",
81 "FILESTORE_DRIVER_OPT",
87 "GLOBAL_SHARED_FILEDIR_OPT",
92 "DEFAULT_IALLOCATOR_OPT",
93 "IDENTIFY_DEFAULTS_OPT",
95 "IGNORE_FAILURES_OPT",
97 "IGNORE_REMOVE_FAILURES_OPT",
98 "IGNORE_SECONDARIES_OPT",
102 "MAINTAIN_NODE_HEALTH_OPT",
105 "MIGRATION_MODE_OPT",
107 "NEW_CLUSTER_CERT_OPT",
108 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109 "NEW_CONFD_HMAC_KEY_OPT",
113 "NODE_FORCE_JOIN_OPT",
115 "NODE_PLACEMENT_OPT",
119 "NODRBD_STORAGE_OPT",
125 "NOMODIFY_ETCHOSTS_OPT",
126 "NOMODIFY_SSH_SETUP_OPT",
132 "NOSSH_KEYCHECK_OPT",
144 "PREALLOC_WIPE_DISKS_OPT",
145 "PRIMARY_IP_VERSION_OPT",
151 "REMOVE_INSTANCE_OPT",
156 "SECONDARY_ONLY_OPT",
160 "SHUTDOWN_TIMEOUT_OPT",
165 "STARTUP_PAUSED_OPT",
178 # Generic functions for CLI programs
181 "GenericInstanceCreate",
187 "JobSubmittedException",
189 "RunWhileClusterStopped",
193 # Formatting functions
194 "ToStderr", "ToStdout",
197 "FormatParameterDict",
206 # command line options support infrastructure
207 "ARGS_MANY_INSTANCES",
226 "OPT_COMPL_INST_ADD_NODES",
227 "OPT_COMPL_MANY_NODES",
228 "OPT_COMPL_ONE_IALLOCATOR",
229 "OPT_COMPL_ONE_INSTANCE",
230 "OPT_COMPL_ONE_NODE",
231 "OPT_COMPL_ONE_NODEGROUP",
237 "COMMON_CREATE_OPTS",
243 #: Priorities (sorted)
245 ("low", constants.OP_PRIO_LOW),
246 ("normal", constants.OP_PRIO_NORMAL),
247 ("high", constants.OP_PRIO_HIGH),
250 #: Priority dictionary for easier lookup
251 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
252 # we migrate to Python 2.6
253 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
255 # Query result status for clients
258 QR_INCOMPLETE) = range(3)
260 #: Maximum batch size for ChooseJob
265 def __init__(self, min=0, max=None): # pylint: disable=W0622
270 return ("<%s min=%s max=%s>" %
271 (self.__class__.__name__, self.min, self.max))
274 class ArgSuggest(_Argument):
275 """Suggesting argument.
277 Value can be any of the ones passed to the constructor.
280 # pylint: disable=W0622
281 def __init__(self, min=0, max=None, choices=None):
282 _Argument.__init__(self, min=min, max=max)
283 self.choices = choices
286 return ("<%s min=%s max=%s choices=%r>" %
287 (self.__class__.__name__, self.min, self.max, self.choices))
290 class ArgChoice(ArgSuggest):
293 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
294 but value must be one of the choices.
299 class ArgUnknown(_Argument):
300 """Unknown argument to program (e.g. determined at runtime).
305 class ArgInstance(_Argument):
306 """Instances argument.
311 class ArgNode(_Argument):
317 class ArgGroup(_Argument):
318 """Node group argument.
323 class ArgJobId(_Argument):
329 class ArgFile(_Argument):
330 """File path argument.
335 class ArgCommand(_Argument):
341 class ArgHost(_Argument):
347 class ArgOs(_Argument):
354 ARGS_MANY_INSTANCES = [ArgInstance()]
355 ARGS_MANY_NODES = [ArgNode()]
356 ARGS_MANY_GROUPS = [ArgGroup()]
357 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
358 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
360 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
361 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
364 def _ExtractTagsObject(opts, args):
365 """Extract the tag type object.
367 Note that this function will modify its args parameter.
370 if not hasattr(opts, "tag_type"):
371 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
373 if kind == constants.TAG_CLUSTER:
375 elif kind in (constants.TAG_NODEGROUP,
377 constants.TAG_INSTANCE):
379 raise errors.OpPrereqError("no arguments passed to the command")
383 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
387 def _ExtendTags(opts, args):
388 """Extend the args if a source file has been given.
390 This function will extend the tags with the contents of the file
391 passed in the 'tags_source' attribute of the opts parameter. A file
392 named '-' will be replaced by stdin.
395 fname = opts.tags_source
401 new_fh = open(fname, "r")
404 # we don't use the nice 'new_data = [line.strip() for line in fh]'
405 # because of python bug 1633941
407 line = new_fh.readline()
410 new_data.append(line.strip())
413 args.extend(new_data)
416 def ListTags(opts, args):
417 """List the tags on a given object.
419 This is a generic implementation that knows how to deal with all
420 three cases of tag objects (cluster, node, instance). The opts
421 argument is expected to contain a tag_type field denoting what
422 object type we work on.
425 kind, name = _ExtractTagsObject(opts, args)
427 result = cl.QueryTags(kind, name)
428 result = list(result)
434 def AddTags(opts, args):
435 """Add tags on a given object.
437 This is a generic implementation that knows how to deal with all
438 three cases of tag objects (cluster, node, instance). The opts
439 argument is expected to contain a tag_type field denoting what
440 object type we work on.
443 kind, name = _ExtractTagsObject(opts, args)
444 _ExtendTags(opts, args)
446 raise errors.OpPrereqError("No tags to be added")
447 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
448 SubmitOpCode(op, opts=opts)
451 def RemoveTags(opts, args):
452 """Remove tags from a given object.
454 This is a generic implementation that knows how to deal with all
455 three cases of tag objects (cluster, node, instance). The opts
456 argument is expected to contain a tag_type field denoting what
457 object type we work on.
460 kind, name = _ExtractTagsObject(opts, args)
461 _ExtendTags(opts, args)
463 raise errors.OpPrereqError("No tags to be removed")
464 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
465 SubmitOpCode(op, opts=opts)
468 def check_unit(option, opt, value): # pylint: disable=W0613
469 """OptParsers custom converter for units.
473 return utils.ParseUnit(value)
474 except errors.UnitParseError, err:
475 raise OptionValueError("option %s: %s" % (opt, err))
478 def _SplitKeyVal(opt, data):
479 """Convert a KeyVal string into a dict.
481 This function will convert a key=val[,...] string into a dict. Empty
482 values will be converted specially: keys which have the prefix 'no_'
483 will have the value=False and the prefix stripped, the others will
487 @param opt: a string holding the option name for which we process the
488 data, used in building error messages
490 @param data: a string of the format key=val,key=val,...
492 @return: {key=val, key=val}
493 @raises errors.ParameterError: if there are duplicate keys
498 for elem in utils.UnescapeAndSplit(data, sep=","):
500 key, val = elem.split("=", 1)
502 if elem.startswith(NO_PREFIX):
503 key, val = elem[len(NO_PREFIX):], False
504 elif elem.startswith(UN_PREFIX):
505 key, val = elem[len(UN_PREFIX):], None
507 key, val = elem, True
509 raise errors.ParameterError("Duplicate key '%s' in option %s" %
515 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
516 """Custom parser for ident:key=val,key=val options.
518 This will store the parsed values as a tuple (ident, {key: val}). As such,
519 multiple uses of this option via action=append is possible.
523 ident, rest = value, ""
525 ident, rest = value.split(":", 1)
527 if ident.startswith(NO_PREFIX):
529 msg = "Cannot pass options when removing parameter groups: %s" % value
530 raise errors.ParameterError(msg)
531 retval = (ident[len(NO_PREFIX):], False)
532 elif ident.startswith(UN_PREFIX):
534 msg = "Cannot pass options when removing parameter groups: %s" % value
535 raise errors.ParameterError(msg)
536 retval = (ident[len(UN_PREFIX):], None)
538 kv_dict = _SplitKeyVal(opt, rest)
539 retval = (ident, kv_dict)
543 def check_key_val(option, opt, value): # pylint: disable=W0613
544 """Custom parser class for key=val,key=val options.
546 This will store the parsed values as a dict {key: val}.
549 return _SplitKeyVal(opt, value)
552 def check_bool(option, opt, value): # pylint: disable=W0613
553 """Custom parser for yes/no options.
555 This will store the parsed value as either True or False.
558 value = value.lower()
559 if value == constants.VALUE_FALSE or value == "no":
561 elif value == constants.VALUE_TRUE or value == "yes":
564 raise errors.ParameterError("Invalid boolean value '%s'" % value)
567 # completion_suggestion is normally a list. Using numeric values not evaluating
568 # to False for dynamic completion.
569 (OPT_COMPL_MANY_NODES,
571 OPT_COMPL_ONE_INSTANCE,
573 OPT_COMPL_ONE_IALLOCATOR,
574 OPT_COMPL_INST_ADD_NODES,
575 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
577 OPT_COMPL_ALL = frozenset([
578 OPT_COMPL_MANY_NODES,
580 OPT_COMPL_ONE_INSTANCE,
582 OPT_COMPL_ONE_IALLOCATOR,
583 OPT_COMPL_INST_ADD_NODES,
584 OPT_COMPL_ONE_NODEGROUP,
588 class CliOption(Option):
589 """Custom option class for optparse.
592 ATTRS = Option.ATTRS + [
593 "completion_suggest",
595 TYPES = Option.TYPES + (
601 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
602 TYPE_CHECKER["identkeyval"] = check_ident_key_val
603 TYPE_CHECKER["keyval"] = check_key_val
604 TYPE_CHECKER["unit"] = check_unit
605 TYPE_CHECKER["bool"] = check_bool
608 # optparse.py sets make_option, so we do it for our own option class, too
609 cli_option = CliOption
614 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
615 help="Increase debugging level")
617 NOHDR_OPT = cli_option("--no-headers", default=False,
618 action="store_true", dest="no_headers",
619 help="Don't display column headers")
621 SEP_OPT = cli_option("--separator", default=None,
622 action="store", dest="separator",
623 help=("Separator between output fields"
624 " (defaults to one space)"))
626 USEUNITS_OPT = cli_option("--units", default=None,
627 dest="units", choices=("h", "m", "g", "t"),
628 help="Specify units for output (one of h/m/g/t)")
630 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
631 type="string", metavar="FIELDS",
632 help="Comma separated list of output fields")
634 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
635 default=False, help="Force the operation")
637 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
638 default=False, help="Do not require confirmation")
640 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
641 action="store_true", default=False,
642 help=("Ignore offline nodes and do as much"
645 TAG_ADD_OPT = cli_option("--tags", dest="tags",
646 default=None, help="Comma-separated list of instance"
649 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
650 default=None, help="File with tag names")
652 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
653 default=False, action="store_true",
654 help=("Submit the job and return the job ID, but"
655 " don't wait for the job to finish"))
657 SYNC_OPT = cli_option("--sync", dest="do_locking",
658 default=False, action="store_true",
659 help=("Grab locks while doing the queries"
660 " in order to ensure more consistent results"))
662 DRY_RUN_OPT = cli_option("--dry-run", default=False,
664 help=("Do not execute the operation, just run the"
665 " check steps and verify it it could be"
668 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
670 help="Increase the verbosity of the operation")
672 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
673 action="store_true", dest="simulate_errors",
674 help="Debugging option that makes the operation"
675 " treat most runtime checks as failed")
677 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
678 default=True, action="store_false",
679 help="Don't wait for sync (DANGEROUS!)")
681 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
682 help=("Custom disk setup (%s)" %
683 utils.CommaJoin(constants.DISK_TEMPLATES)),
684 default=None, metavar="TEMPL",
685 choices=list(constants.DISK_TEMPLATES))
687 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
688 help="Do not create any network cards for"
691 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
692 help="Relative path under default cluster-wide"
693 " file storage dir to store file-based disks",
694 default=None, metavar="<DIR>")
696 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
697 help="Driver to use for image files",
698 default="loop", metavar="<DRIVER>",
699 choices=list(constants.FILE_DRIVER))
701 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
702 help="Select nodes for the instance automatically"
703 " using the <NAME> iallocator plugin",
704 default=None, type="string",
705 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
707 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
709 help="Set the default instance allocator plugin",
710 default=None, type="string",
711 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
713 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
715 completion_suggest=OPT_COMPL_ONE_OS)
717 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
718 type="keyval", default={},
719 help="OS parameters")
721 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
722 action="store_true", default=False,
723 help="Force an unknown variant")
725 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
726 action="store_true", default=False,
727 help="Do not install the OS (will"
730 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
731 type="keyval", default={},
732 help="Backend parameters")
734 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
735 default={}, dest="hvparams",
736 help="Hypervisor parameters")
738 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
739 help="Hypervisor and hypervisor options, in the"
740 " format hypervisor:option=value,option=value,...",
741 default=None, type="identkeyval")
743 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
744 help="Hypervisor and hypervisor options, in the"
745 " format hypervisor:option=value,option=value,...",
746 default=[], action="append", type="identkeyval")
748 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
749 action="store_false",
750 help="Don't check that the instance's IP"
753 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
754 default=True, action="store_false",
755 help="Don't check that the instance's name"
758 NET_OPT = cli_option("--net",
759 help="NIC parameters", default=[],
760 dest="nics", action="append", type="identkeyval")
762 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
763 dest="disks", action="append", type="identkeyval")
765 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
766 help="Comma-separated list of disks"
767 " indices to act on (e.g. 0,2) (optional,"
768 " defaults to all disks)")
770 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
771 help="Enforces a single-disk configuration using the"
772 " given disk size, in MiB unless a suffix is used",
773 default=None, type="unit", metavar="<size>")
775 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
776 dest="ignore_consistency",
777 action="store_true", default=False,
778 help="Ignore the consistency of the disks on"
781 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
782 dest="allow_failover",
783 action="store_true", default=False,
784 help="If migration is not possible fallback to"
787 NONLIVE_OPT = cli_option("--non-live", dest="live",
788 default=True, action="store_false",
789 help="Do a non-live migration (this usually means"
790 " freeze the instance, save the state, transfer and"
791 " only then resume running on the secondary node)")
793 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
795 choices=list(constants.HT_MIGRATION_MODES),
796 help="Override default migration mode (choose"
797 " either live or non-live")
799 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
800 help="Target node and optional secondary node",
801 metavar="<pnode>[:<snode>]",
802 completion_suggest=OPT_COMPL_INST_ADD_NODES)
804 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
805 action="append", metavar="<node>",
806 help="Use only this node (can be used multiple"
807 " times, if not given defaults to all nodes)",
808 completion_suggest=OPT_COMPL_ONE_NODE)
810 NODEGROUP_OPT_NAME = "--node-group"
811 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
813 help="Node group (name or uuid)",
814 metavar="<nodegroup>",
815 default=None, type="string",
816 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
818 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
820 completion_suggest=OPT_COMPL_ONE_NODE)
822 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
823 action="store_false",
824 help="Don't start the instance after creation")
826 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
827 action="store_true", default=False,
828 help="Show command instead of executing it")
830 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
831 default=False, action="store_true",
832 help="Instead of performing the migration, try to"
833 " recover from a failed cleanup. This is safe"
834 " to run even if the instance is healthy, but it"
835 " will create extra replication traffic and "
836 " disrupt briefly the replication (like during the"
839 STATIC_OPT = cli_option("-s", "--static", dest="static",
840 action="store_true", default=False,
841 help="Only show configuration data, not runtime data")
843 ALL_OPT = cli_option("--all", dest="show_all",
844 default=False, action="store_true",
845 help="Show info on all instances on the cluster."
846 " This can take a long time to run, use wisely")
848 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
849 action="store_true", default=False,
850 help="Interactive OS reinstall, lists available"
851 " OS templates for selection")
853 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
854 action="store_true", default=False,
855 help="Remove the instance from the cluster"
856 " configuration even if there are failures"
857 " during the removal process")
859 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
860 dest="ignore_remove_failures",
861 action="store_true", default=False,
862 help="Remove the instance from the"
863 " cluster configuration even if there"
864 " are failures during the removal"
867 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
868 action="store_true", default=False,
869 help="Remove the instance from the cluster")
871 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
872 help="Specifies the new node for the instance",
873 metavar="NODE", default=None,
874 completion_suggest=OPT_COMPL_ONE_NODE)
876 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
877 help="Specifies the new secondary node",
878 metavar="NODE", default=None,
879 completion_suggest=OPT_COMPL_ONE_NODE)
881 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
882 default=False, action="store_true",
883 help="Replace the disk(s) on the primary"
884 " node (applies only to internally mirrored"
885 " disk templates, e.g. %s)" %
886 utils.CommaJoin(constants.DTS_INT_MIRROR))
888 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
889 default=False, action="store_true",
890 help="Replace the disk(s) on the secondary"
891 " node (applies only to internally mirrored"
892 " disk templates, e.g. %s)" %
893 utils.CommaJoin(constants.DTS_INT_MIRROR))
895 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
896 default=False, action="store_true",
897 help="Lock all nodes and auto-promote as needed"
900 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
901 default=False, action="store_true",
902 help="Automatically replace faulty disks"
903 " (applies only to internally mirrored"
904 " disk templates, e.g. %s)" %
905 utils.CommaJoin(constants.DTS_INT_MIRROR))
907 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
908 default=False, action="store_true",
909 help="Ignore current recorded size"
910 " (useful for forcing activation when"
911 " the recorded size is wrong)")
913 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
915 completion_suggest=OPT_COMPL_ONE_NODE)
917 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
920 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
921 help="Specify the secondary ip for the node",
922 metavar="ADDRESS", default=None)
924 READD_OPT = cli_option("--readd", dest="readd",
925 default=False, action="store_true",
926 help="Readd old node after replacing it")
928 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
929 default=True, action="store_false",
930 help="Disable SSH key fingerprint checking")
932 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
933 default=False, action="store_true",
934 help="Force the joining of a node")
936 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
937 type="bool", default=None, metavar=_YORNO,
938 help="Set the master_candidate flag on the node")
940 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
941 type="bool", default=None,
942 help=("Set the offline flag on the node"
943 " (cluster does not communicate with offline"
946 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
947 type="bool", default=None,
948 help=("Set the drained flag on the node"
949 " (excluded from allocation operations)"))
951 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
952 type="bool", default=None, metavar=_YORNO,
953 help="Set the master_capable flag on the node")
955 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
956 type="bool", default=None, metavar=_YORNO,
957 help="Set the vm_capable flag on the node")
959 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
960 type="bool", default=None, metavar=_YORNO,
961 help="Set the allocatable flag on a volume")
963 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
964 help="Disable support for lvm based instances"
966 action="store_false", default=True)
968 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
969 dest="enabled_hypervisors",
970 help="Comma-separated list of hypervisors",
971 type="string", default=None)
973 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
974 type="keyval", default={},
975 help="NIC parameters")
977 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
978 dest="candidate_pool_size", type="int",
979 help="Set the candidate pool size")
981 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
982 help=("Enables LVM and specifies the volume group"
983 " name (cluster-wide) for disk allocation"
984 " [%s]" % constants.DEFAULT_VG),
985 metavar="VG", default=None)
987 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
988 help="Destroy cluster", action="store_true")
990 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
991 help="Skip node agreement check (dangerous)",
992 action="store_true", default=False)
994 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
995 help="Specify the mac prefix for the instance IP"
996 " addresses, in the format XX:XX:XX",
1000 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1001 help="Specify the node interface (cluster-wide)"
1002 " on which the master IP address will be added"
1003 " (cluster init default: %s)" %
1004 constants.DEFAULT_BRIDGE,
1008 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1009 help="Specify the default directory (cluster-"
1010 "wide) for storing the file-based disks [%s]" %
1011 constants.DEFAULT_FILE_STORAGE_DIR,
1013 default=constants.DEFAULT_FILE_STORAGE_DIR)
1015 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1016 dest="shared_file_storage_dir",
1017 help="Specify the default directory (cluster-"
1018 "wide) for storing the shared file-based"
1020 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1021 metavar="SHAREDDIR",
1022 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1024 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1025 help="Don't modify /etc/hosts",
1026 action="store_false", default=True)
1028 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1029 help="Don't initialize SSH keys",
1030 action="store_false", default=True)
1032 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1033 help="Enable parseable error messages",
1034 action="store_true", default=False)
1036 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1037 help="Skip N+1 memory redundancy tests",
1038 action="store_true", default=False)
1040 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1041 help="Type of reboot: soft/hard/full",
1042 default=constants.INSTANCE_REBOOT_HARD,
1044 choices=list(constants.REBOOT_TYPES))
1046 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1047 dest="ignore_secondaries",
1048 default=False, action="store_true",
1049 help="Ignore errors from secondaries")
1051 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1052 action="store_false", default=True,
1053 help="Don't shutdown the instance (unsafe)")
1055 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1056 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1057 help="Maximum time to wait")
1059 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1060 dest="shutdown_timeout", type="int",
1061 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1062 help="Maximum time to wait for instance shutdown")
1064 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1066 help=("Number of seconds between repetions of the"
1069 EARLY_RELEASE_OPT = cli_option("--early-release",
1070 dest="early_release", default=False,
1071 action="store_true",
1072 help="Release the locks on the secondary"
1075 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1076 dest="new_cluster_cert",
1077 default=False, action="store_true",
1078 help="Generate a new cluster certificate")
1080 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1082 help="File containing new RAPI certificate")
1084 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1085 default=None, action="store_true",
1086 help=("Generate a new self-signed RAPI"
1089 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1090 dest="new_confd_hmac_key",
1091 default=False, action="store_true",
1092 help=("Create a new HMAC key for %s" %
1095 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1096 dest="cluster_domain_secret",
1098 help=("Load new new cluster domain"
1099 " secret from file"))
1101 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1102 dest="new_cluster_domain_secret",
1103 default=False, action="store_true",
1104 help=("Create a new cluster domain"
1107 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1108 dest="use_replication_network",
1109 help="Whether to use the replication network"
1110 " for talking to the nodes",
1111 action="store_true", default=False)
1113 MAINTAIN_NODE_HEALTH_OPT = \
1114 cli_option("--maintain-node-health", dest="maintain_node_health",
1115 metavar=_YORNO, default=None, type="bool",
1116 help="Configure the cluster to automatically maintain node"
1117 " health, by shutting down unknown instances, shutting down"
1118 " unknown DRBD devices, etc.")
1120 IDENTIFY_DEFAULTS_OPT = \
1121 cli_option("--identify-defaults", dest="identify_defaults",
1122 default=False, action="store_true",
1123 help="Identify which saved instance parameters are equal to"
1124 " the current cluster defaults and set them as such, instead"
1125 " of marking them as overridden")
1127 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1128 action="store", dest="uid_pool",
1129 help=("A list of user-ids or user-id"
1130 " ranges separated by commas"))
1132 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1133 action="store", dest="add_uids",
1134 help=("A list of user-ids or user-id"
1135 " ranges separated by commas, to be"
1136 " added to the user-id pool"))
1138 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1139 action="store", dest="remove_uids",
1140 help=("A list of user-ids or user-id"
1141 " ranges separated by commas, to be"
1142 " removed from the user-id pool"))
1144 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1145 action="store", dest="reserved_lvs",
1146 help=("A comma-separated list of reserved"
1147 " logical volumes names, that will be"
1148 " ignored by cluster verify"))
1150 ROMAN_OPT = cli_option("--roman",
1151 dest="roman_integers", default=False,
1152 action="store_true",
1153 help="Use roman numbers for positive integers")
1155 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1156 action="store", default=None,
1157 help="Specifies usermode helper for DRBD")
1159 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1160 action="store_false", default=True,
1161 help="Disable support for DRBD")
1163 PRIMARY_IP_VERSION_OPT = \
1164 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1165 action="store", dest="primary_ip_version",
1166 metavar="%d|%d" % (constants.IP4_VERSION,
1167 constants.IP6_VERSION),
1168 help="Cluster-wide IP version for primary IP")
1170 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1171 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1172 choices=_PRIONAME_TO_VALUE.keys(),
1173 help="Priority for opcode processing")
1175 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1176 type="bool", default=None, metavar=_YORNO,
1177 help="Sets the hidden flag on the OS")
1179 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1180 type="bool", default=None, metavar=_YORNO,
1181 help="Sets the blacklisted flag on the OS")
1183 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1184 type="bool", metavar=_YORNO,
1185 dest="prealloc_wipe_disks",
1186 help=("Wipe disks prior to instance"
1189 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1190 type="keyval", default=None,
1191 help="Node parameters")
1193 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1194 action="store", metavar="POLICY", default=None,
1195 help="Allocation policy for the node group")
1197 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1198 type="bool", metavar=_YORNO,
1199 dest="node_powered",
1200 help="Specify if the SoR for node is powered")
1202 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1203 default=constants.OOB_TIMEOUT,
1204 help="Maximum time to wait for out-of-band helper")
1206 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1207 default=constants.OOB_POWER_DELAY,
1208 help="Time in seconds to wait between power-ons")
1210 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1211 action="store_true", default=False,
1212 help=("Whether command argument should be treated"
1215 NO_REMEMBER_OPT = cli_option("--no-remember",
1217 action="store_true", default=False,
1218 help="Perform but do not record the change"
1219 " in the configuration")
1221 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1222 default=False, action="store_true",
1223 help="Evacuate primary instances only")
1225 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1226 default=False, action="store_true",
1227 help="Evacuate secondary instances only"
1228 " (applies only to internally mirrored"
1229 " disk templates, e.g. %s)" %
1230 utils.CommaJoin(constants.DTS_INT_MIRROR))
1232 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1233 action="store_true", default=False,
1234 help="Pause instance at startup")
1236 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1237 help="Destination node group (name or uuid)",
1238 default=None, action="append",
1239 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1242 #: Options provided by all commands
1243 COMMON_OPTS = [DEBUG_OPT]
1245 # common options for creating instances. add and import then add their own
1247 COMMON_CREATE_OPTS = [
1252 FILESTORE_DRIVER_OPT,
1270 def _ParseArgs(argv, commands, aliases):
1271 """Parser for the command line arguments.
1273 This function parses the arguments and returns the function which
1274 must be executed together with its (modified) arguments.
1276 @param argv: the command line
1277 @param commands: dictionary with special contents, see the design
1278 doc for cmdline handling
1279 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1283 binary = "<command>"
1285 binary = argv[0].split("/")[-1]
1287 if len(argv) > 1 and argv[1] == "--version":
1288 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1289 constants.RELEASE_VERSION)
1290 # Quit right away. That way we don't have to care about this special
1291 # argument. optparse.py does it the same.
1294 if len(argv) < 2 or not (argv[1] in commands or
1295 argv[1] in aliases):
1296 # let's do a nice thing
1297 sortedcmds = commands.keys()
1300 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1301 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1304 # compute the max line length for cmd + usage
1305 mlen = max([len(" %s" % cmd) for cmd in commands])
1306 mlen = min(60, mlen) # should not get here...
1308 # and format a nice command list
1309 ToStdout("Commands:")
1310 for cmd in sortedcmds:
1311 cmdstr = " %s" % (cmd,)
1312 help_text = commands[cmd][4]
1313 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1314 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1315 for line in help_lines:
1316 ToStdout("%-*s %s", mlen, "", line)
1320 return None, None, None
1322 # get command, unalias it, and look it up in commands
1326 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1329 if aliases[cmd] not in commands:
1330 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1331 " command '%s'" % (cmd, aliases[cmd]))
1335 func, args_def, parser_opts, usage, description = commands[cmd]
1336 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1337 description=description,
1338 formatter=TitledHelpFormatter(),
1339 usage="%%prog %s %s" % (cmd, usage))
1340 parser.disable_interspersed_args()
1341 options, args = parser.parse_args()
1343 if not _CheckArguments(cmd, args_def, args):
1344 return None, None, None
1346 return func, options, args
1349 def _CheckArguments(cmd, args_def, args):
1350 """Verifies the arguments using the argument definition.
1354 1. Abort with error if values specified by user but none expected.
1356 1. For each argument in definition
1358 1. Keep running count of minimum number of values (min_count)
1359 1. Keep running count of maximum number of values (max_count)
1360 1. If it has an unlimited number of values
1362 1. Abort with error if it's not the last argument in the definition
1364 1. If last argument has limited number of values
1366 1. Abort with error if number of values doesn't match or is too large
1368 1. Abort with error if user didn't pass enough values (min_count)
1371 if args and not args_def:
1372 ToStderr("Error: Command %s expects no arguments", cmd)
1379 last_idx = len(args_def) - 1
1381 for idx, arg in enumerate(args_def):
1382 if min_count is None:
1384 elif arg.min is not None:
1385 min_count += arg.min
1387 if max_count is None:
1389 elif arg.max is not None:
1390 max_count += arg.max
1393 check_max = (arg.max is not None)
1395 elif arg.max is None:
1396 raise errors.ProgrammerError("Only the last argument can have max=None")
1399 # Command with exact number of arguments
1400 if (min_count is not None and max_count is not None and
1401 min_count == max_count and len(args) != min_count):
1402 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1405 # Command with limited number of arguments
1406 if max_count is not None and len(args) > max_count:
1407 ToStderr("Error: Command %s expects only %d argument(s)",
1411 # Command with some required arguments
1412 if min_count is not None and len(args) < min_count:
1413 ToStderr("Error: Command %s expects at least %d argument(s)",
1420 def SplitNodeOption(value):
1421 """Splits the value of a --node option.
1424 if value and ":" in value:
1425 return value.split(":", 1)
1427 return (value, None)
1430 def CalculateOSNames(os_name, os_variants):
1431 """Calculates all the names an OS can be called, according to its variants.
1433 @type os_name: string
1434 @param os_name: base name of the os
1435 @type os_variants: list or None
1436 @param os_variants: list of supported variants
1438 @return: list of valid names
1442 return ["%s+%s" % (os_name, v) for v in os_variants]
1447 def ParseFields(selected, default):
1448 """Parses the values of "--field"-like options.
1450 @type selected: string or None
1451 @param selected: User-selected options
1453 @param default: Default fields
1456 if selected is None:
1459 if selected.startswith("+"):
1460 return default + selected[1:].split(",")
1462 return selected.split(",")
1465 UsesRPC = rpc.RunWithRPC
1468 def AskUser(text, choices=None):
1469 """Ask the user a question.
1471 @param text: the question to ask
1473 @param choices: list with elements tuples (input_char, return_value,
1474 description); if not given, it will default to: [('y', True,
1475 'Perform the operation'), ('n', False, 'Do no do the operation')];
1476 note that the '?' char is reserved for help
1478 @return: one of the return values from the choices list; if input is
1479 not possible (i.e. not running with a tty, we return the last
1484 choices = [("y", True, "Perform the operation"),
1485 ("n", False, "Do not perform the operation")]
1486 if not choices or not isinstance(choices, list):
1487 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1488 for entry in choices:
1489 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1490 raise errors.ProgrammerError("Invalid choices element to AskUser")
1492 answer = choices[-1][1]
1494 for line in text.splitlines():
1495 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1496 text = "\n".join(new_text)
1498 f = file("/dev/tty", "a+")
1502 chars = [entry[0] for entry in choices]
1503 chars[-1] = "[%s]" % chars[-1]
1505 maps = dict([(entry[0], entry[1]) for entry in choices])
1509 f.write("/".join(chars))
1511 line = f.readline(2).strip().lower()
1516 for entry in choices:
1517 f.write(" %s - %s\n" % (entry[0], entry[2]))
1525 class JobSubmittedException(Exception):
1526 """Job was submitted, client should exit.
1528 This exception has one argument, the ID of the job that was
1529 submitted. The handler should print this ID.
1531 This is not an error, just a structured way to exit from clients.
1536 def SendJob(ops, cl=None):
1537 """Function to submit an opcode without waiting for the results.
1540 @param ops: list of opcodes
1541 @type cl: luxi.Client
1542 @param cl: the luxi client to use for communicating with the master;
1543 if None, a new client will be created
1549 job_id = cl.SubmitJob(ops)
1554 def GenericPollJob(job_id, cbs, report_cbs):
1555 """Generic job-polling function.
1557 @type job_id: number
1558 @param job_id: Job ID
1559 @type cbs: Instance of L{JobPollCbBase}
1560 @param cbs: Data callbacks
1561 @type report_cbs: Instance of L{JobPollReportCbBase}
1562 @param report_cbs: Reporting callbacks
1565 prev_job_info = None
1566 prev_logmsg_serial = None
1571 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1574 # job not found, go away!
1575 raise errors.JobLost("Job with id %s lost" % job_id)
1577 if result == constants.JOB_NOTCHANGED:
1578 report_cbs.ReportNotChanged(job_id, status)
1583 # Split result, a tuple of (field values, log entries)
1584 (job_info, log_entries) = result
1585 (status, ) = job_info
1588 for log_entry in log_entries:
1589 (serial, timestamp, log_type, message) = log_entry
1590 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1592 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1594 # TODO: Handle canceled and archived jobs
1595 elif status in (constants.JOB_STATUS_SUCCESS,
1596 constants.JOB_STATUS_ERROR,
1597 constants.JOB_STATUS_CANCELING,
1598 constants.JOB_STATUS_CANCELED):
1601 prev_job_info = job_info
1603 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1605 raise errors.JobLost("Job with id %s lost" % job_id)
1607 status, opstatus, result = jobs[0]
1609 if status == constants.JOB_STATUS_SUCCESS:
1612 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1613 raise errors.OpExecError("Job was canceled")
1616 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1617 if status == constants.OP_STATUS_SUCCESS:
1619 elif status == constants.OP_STATUS_ERROR:
1620 errors.MaybeRaise(msg)
1623 raise errors.OpExecError("partial failure (opcode %d): %s" %
1626 raise errors.OpExecError(str(msg))
1628 # default failure mode
1629 raise errors.OpExecError(result)
1632 class JobPollCbBase:
1633 """Base class for L{GenericPollJob} callbacks.
1637 """Initializes this class.
1641 def WaitForJobChangeOnce(self, job_id, fields,
1642 prev_job_info, prev_log_serial):
1643 """Waits for changes on a job.
1646 raise NotImplementedError()
1648 def QueryJobs(self, job_ids, fields):
1649 """Returns the selected fields for the selected job IDs.
1651 @type job_ids: list of numbers
1652 @param job_ids: Job IDs
1653 @type fields: list of strings
1654 @param fields: Fields
1657 raise NotImplementedError()
1660 class JobPollReportCbBase:
1661 """Base class for L{GenericPollJob} reporting callbacks.
1665 """Initializes this class.
1669 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1670 """Handles a log message.
1673 raise NotImplementedError()
1675 def ReportNotChanged(self, job_id, status):
1676 """Called for if a job hasn't changed in a while.
1678 @type job_id: number
1679 @param job_id: Job ID
1680 @type status: string or None
1681 @param status: Job status if available
1684 raise NotImplementedError()
1687 class _LuxiJobPollCb(JobPollCbBase):
1688 def __init__(self, cl):
1689 """Initializes this class.
1692 JobPollCbBase.__init__(self)
1695 def WaitForJobChangeOnce(self, job_id, fields,
1696 prev_job_info, prev_log_serial):
1697 """Waits for changes on a job.
1700 return self.cl.WaitForJobChangeOnce(job_id, fields,
1701 prev_job_info, prev_log_serial)
1703 def QueryJobs(self, job_ids, fields):
1704 """Returns the selected fields for the selected job IDs.
1707 return self.cl.QueryJobs(job_ids, fields)
1710 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1711 def __init__(self, feedback_fn):
1712 """Initializes this class.
1715 JobPollReportCbBase.__init__(self)
1717 self.feedback_fn = feedback_fn
1719 assert callable(feedback_fn)
1721 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1722 """Handles a log message.
1725 self.feedback_fn((timestamp, log_type, log_msg))
1727 def ReportNotChanged(self, job_id, status):
1728 """Called if a job hasn't changed in a while.
1734 class StdioJobPollReportCb(JobPollReportCbBase):
1736 """Initializes this class.
1739 JobPollReportCbBase.__init__(self)
1741 self.notified_queued = False
1742 self.notified_waitlock = False
1744 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1745 """Handles a log message.
1748 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1749 FormatLogMessage(log_type, log_msg))
1751 def ReportNotChanged(self, job_id, status):
1752 """Called if a job hasn't changed in a while.
1758 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1759 ToStderr("Job %s is waiting in queue", job_id)
1760 self.notified_queued = True
1762 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1763 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1764 self.notified_waitlock = True
1767 def FormatLogMessage(log_type, log_msg):
1768 """Formats a job message according to its type.
1771 if log_type != constants.ELOG_MESSAGE:
1772 log_msg = str(log_msg)
1774 return utils.SafeEncode(log_msg)
1777 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1778 """Function to poll for the result of a job.
1780 @type job_id: job identified
1781 @param job_id: the job to poll for results
1782 @type cl: luxi.Client
1783 @param cl: the luxi client to use for communicating with the master;
1784 if None, a new client will be created
1790 if reporter is None:
1792 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1794 reporter = StdioJobPollReportCb()
1796 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1798 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1801 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1802 """Legacy function to submit an opcode.
1804 This is just a simple wrapper over the construction of the processor
1805 instance. It should be extended to better handle feedback and
1806 interaction functions.
1812 SetGenericOpcodeOpts([op], opts)
1814 job_id = SendJob([op], cl=cl)
1816 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1819 return op_results[0]
1822 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1823 """Wrapper around SubmitOpCode or SendJob.
1825 This function will decide, based on the 'opts' parameter, whether to
1826 submit and wait for the result of the opcode (and return it), or
1827 whether to just send the job and print its identifier. It is used in
1828 order to simplify the implementation of the '--submit' option.
1830 It will also process the opcodes if we're sending the via SendJob
1831 (otherwise SubmitOpCode does it).
1834 if opts and opts.submit_only:
1836 SetGenericOpcodeOpts(job, opts)
1837 job_id = SendJob(job, cl=cl)
1838 raise JobSubmittedException(job_id)
1840 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1843 def SetGenericOpcodeOpts(opcode_list, options):
1844 """Processor for generic options.
1846 This function updates the given opcodes based on generic command
1847 line options (like debug, dry-run, etc.).
1849 @param opcode_list: list of opcodes
1850 @param options: command line options or None
1851 @return: None (in-place modification)
1856 for op in opcode_list:
1857 op.debug_level = options.debug
1858 if hasattr(options, "dry_run"):
1859 op.dry_run = options.dry_run
1860 if getattr(options, "priority", None) is not None:
1861 op.priority = _PRIONAME_TO_VALUE[options.priority]
1865 # TODO: Cache object?
1867 client = luxi.Client()
1868 except luxi.NoMasterError:
1869 ss = ssconf.SimpleStore()
1871 # Try to read ssconf file
1874 except errors.ConfigurationError:
1875 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1876 " not part of a cluster")
1878 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1879 if master != myself:
1880 raise errors.OpPrereqError("This is not the master node, please connect"
1881 " to node '%s' and rerun the command" %
1887 def FormatError(err):
1888 """Return a formatted error message for a given error.
1890 This function takes an exception instance and returns a tuple
1891 consisting of two values: first, the recommended exit code, and
1892 second, a string describing the error message (not
1893 newline-terminated).
1899 if isinstance(err, errors.ConfigurationError):
1900 txt = "Corrupt configuration file: %s" % msg
1902 obuf.write(txt + "\n")
1903 obuf.write("Aborting.")
1905 elif isinstance(err, errors.HooksAbort):
1906 obuf.write("Failure: hooks execution failed:\n")
1907 for node, script, out in err.args[0]:
1909 obuf.write(" node: %s, script: %s, output: %s\n" %
1910 (node, script, out))
1912 obuf.write(" node: %s, script: %s (no output)\n" %
1914 elif isinstance(err, errors.HooksFailure):
1915 obuf.write("Failure: hooks general failure: %s" % msg)
1916 elif isinstance(err, errors.ResolverError):
1917 this_host = netutils.Hostname.GetSysName()
1918 if err.args[0] == this_host:
1919 msg = "Failure: can't resolve my own hostname ('%s')"
1921 msg = "Failure: can't resolve hostname '%s'"
1922 obuf.write(msg % err.args[0])
1923 elif isinstance(err, errors.OpPrereqError):
1924 if len(err.args) == 2:
1925 obuf.write("Failure: prerequisites not met for this"
1926 " operation:\nerror type: %s, error details:\n%s" %
1927 (err.args[1], err.args[0]))
1929 obuf.write("Failure: prerequisites not met for this"
1930 " operation:\n%s" % msg)
1931 elif isinstance(err, errors.OpExecError):
1932 obuf.write("Failure: command execution error:\n%s" % msg)
1933 elif isinstance(err, errors.TagError):
1934 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1935 elif isinstance(err, errors.JobQueueDrainError):
1936 obuf.write("Failure: the job queue is marked for drain and doesn't"
1937 " accept new requests\n")
1938 elif isinstance(err, errors.JobQueueFull):
1939 obuf.write("Failure: the job queue is full and doesn't accept new"
1940 " job submissions until old jobs are archived\n")
1941 elif isinstance(err, errors.TypeEnforcementError):
1942 obuf.write("Parameter Error: %s" % msg)
1943 elif isinstance(err, errors.ParameterError):
1944 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1945 elif isinstance(err, luxi.NoMasterError):
1946 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1947 " and listening for connections?")
1948 elif isinstance(err, luxi.TimeoutError):
1949 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1950 " been submitted and will continue to run even if the call"
1951 " timed out. Useful commands in this situation are \"gnt-job"
1952 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1954 elif isinstance(err, luxi.PermissionError):
1955 obuf.write("It seems you don't have permissions to connect to the"
1956 " master daemon.\nPlease retry as a different user.")
1957 elif isinstance(err, luxi.ProtocolError):
1958 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1960 elif isinstance(err, errors.JobLost):
1961 obuf.write("Error checking job status: %s" % msg)
1962 elif isinstance(err, errors.QueryFilterParseError):
1963 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1964 obuf.write("\n".join(err.GetDetails()))
1965 elif isinstance(err, errors.GenericError):
1966 obuf.write("Unhandled Ganeti error: %s" % msg)
1967 elif isinstance(err, JobSubmittedException):
1968 obuf.write("JobID: %s\n" % err.args[0])
1971 obuf.write("Unhandled exception: %s" % msg)
1972 return retcode, obuf.getvalue().rstrip("\n")
1975 def GenericMain(commands, override=None, aliases=None):
1976 """Generic main function for all the gnt-* commands.
1979 - commands: a dictionary with a special structure, see the design doc
1980 for command line handling.
1981 - override: if not None, we expect a dictionary with keys that will
1982 override command line options; this can be used to pass
1983 options from the scripts to generic functions
1984 - aliases: dictionary with command aliases {'alias': 'target, ...}
1987 # save the program name and the entire command line for later logging
1989 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1990 if len(sys.argv) >= 2:
1991 binary += " " + sys.argv[1]
1992 old_cmdline = " ".join(sys.argv[2:])
1996 binary = "<unknown program>"
2003 func, options, args = _ParseArgs(sys.argv, commands, aliases)
2004 except errors.ParameterError, err:
2005 result, err_msg = FormatError(err)
2009 if func is None: # parse error
2012 if override is not None:
2013 for key, val in override.iteritems():
2014 setattr(options, key, val)
2016 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2017 stderr_logging=True)
2020 logging.info("run with arguments '%s'", old_cmdline)
2022 logging.info("run with no arguments")
2025 result = func(options, args)
2026 except (errors.GenericError, luxi.ProtocolError,
2027 JobSubmittedException), err:
2028 result, err_msg = FormatError(err)
2029 logging.exception("Error during command processing")
2031 except KeyboardInterrupt:
2032 result = constants.EXIT_FAILURE
2033 ToStderr("Aborted. Note that if the operation created any jobs, they"
2034 " might have been submitted and"
2035 " will continue to run in the background.")
2036 except IOError, err:
2037 if err.errno == errno.EPIPE:
2038 # our terminal went away, we'll exit
2039 sys.exit(constants.EXIT_FAILURE)
2046 def ParseNicOption(optvalue):
2047 """Parses the value of the --net option(s).
2051 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2052 except (TypeError, ValueError), err:
2053 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2055 nics = [{}] * nic_max
2056 for nidx, ndict in optvalue:
2059 if not isinstance(ndict, dict):
2060 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2061 " got %s" % (nidx, ndict))
2063 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2070 def GenericInstanceCreate(mode, opts, args):
2071 """Add an instance to the cluster via either creation or import.
2073 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2074 @param opts: the command line options selected by the user
2076 @param args: should contain only one element, the new instance name
2078 @return: the desired exit code
2083 (pnode, snode) = SplitNodeOption(opts.node)
2088 hypervisor, hvparams = opts.hypervisor
2091 nics = ParseNicOption(opts.nics)
2095 elif mode == constants.INSTANCE_CREATE:
2096 # default of one nic, all auto
2102 if opts.disk_template == constants.DT_DISKLESS:
2103 if opts.disks or opts.sd_size is not None:
2104 raise errors.OpPrereqError("Diskless instance but disk"
2105 " information passed")
2108 if (not opts.disks and not opts.sd_size
2109 and mode == constants.INSTANCE_CREATE):
2110 raise errors.OpPrereqError("No disk information specified")
2111 if opts.disks and opts.sd_size is not None:
2112 raise errors.OpPrereqError("Please use either the '--disk' or"
2114 if opts.sd_size is not None:
2115 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2119 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2120 except ValueError, err:
2121 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2122 disks = [{}] * disk_max
2125 for didx, ddict in opts.disks:
2127 if not isinstance(ddict, dict):
2128 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2129 raise errors.OpPrereqError(msg)
2130 elif constants.IDISK_SIZE in ddict:
2131 if constants.IDISK_ADOPT in ddict:
2132 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2133 " (disk %d)" % didx)
2135 ddict[constants.IDISK_SIZE] = \
2136 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2137 except ValueError, err:
2138 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2140 elif constants.IDISK_ADOPT in ddict:
2141 if mode == constants.INSTANCE_IMPORT:
2142 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2144 ddict[constants.IDISK_SIZE] = 0
2146 raise errors.OpPrereqError("Missing size or adoption source for"
2150 if opts.tags is not None:
2151 tags = opts.tags.split(",")
2155 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2156 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2158 if mode == constants.INSTANCE_CREATE:
2161 force_variant = opts.force_variant
2164 no_install = opts.no_install
2165 identify_defaults = False
2166 elif mode == constants.INSTANCE_IMPORT:
2169 force_variant = False
2170 src_node = opts.src_node
2171 src_path = opts.src_dir
2173 identify_defaults = opts.identify_defaults
2175 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2177 op = opcodes.OpInstanceCreate(instance_name=instance,
2179 disk_template=opts.disk_template,
2181 pnode=pnode, snode=snode,
2182 ip_check=opts.ip_check,
2183 name_check=opts.name_check,
2184 wait_for_sync=opts.wait_for_sync,
2185 file_storage_dir=opts.file_storage_dir,
2186 file_driver=opts.file_driver,
2187 iallocator=opts.iallocator,
2188 hypervisor=hypervisor,
2190 beparams=opts.beparams,
2191 osparams=opts.osparams,
2195 force_variant=force_variant,
2199 no_install=no_install,
2200 identify_defaults=identify_defaults)
2202 SubmitOrSend(op, opts)
2206 class _RunWhileClusterStoppedHelper:
2207 """Helper class for L{RunWhileClusterStopped} to simplify state management
2210 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2211 """Initializes this class.
2213 @type feedback_fn: callable
2214 @param feedback_fn: Feedback function
2215 @type cluster_name: string
2216 @param cluster_name: Cluster name
2217 @type master_node: string
2218 @param master_node Master node name
2219 @type online_nodes: list
2220 @param online_nodes: List of names of online nodes
2223 self.feedback_fn = feedback_fn
2224 self.cluster_name = cluster_name
2225 self.master_node = master_node
2226 self.online_nodes = online_nodes
2228 self.ssh = ssh.SshRunner(self.cluster_name)
2230 self.nonmaster_nodes = [name for name in online_nodes
2231 if name != master_node]
2233 assert self.master_node not in self.nonmaster_nodes
2235 def _RunCmd(self, node_name, cmd):
2236 """Runs a command on the local or a remote machine.
2238 @type node_name: string
2239 @param node_name: Machine name
2244 if node_name is None or node_name == self.master_node:
2245 # No need to use SSH
2246 result = utils.RunCmd(cmd)
2248 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2251 errmsg = ["Failed to run command %s" % result.cmd]
2253 errmsg.append("on node %s" % node_name)
2254 errmsg.append(": exitcode %s and error %s" %
2255 (result.exit_code, result.output))
2256 raise errors.OpExecError(" ".join(errmsg))
2258 def Call(self, fn, *args):
2259 """Call function while all daemons are stopped.
2262 @param fn: Function to be called
2265 # Pause watcher by acquiring an exclusive lock on watcher state file
2266 self.feedback_fn("Blocking watcher")
2267 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2269 # TODO: Currently, this just blocks. There's no timeout.
2270 # TODO: Should it be a shared lock?
2271 watcher_block.Exclusive(blocking=True)
2273 # Stop master daemons, so that no new jobs can come in and all running
2275 self.feedback_fn("Stopping master daemons")
2276 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2278 # Stop daemons on all nodes
2279 for node_name in self.online_nodes:
2280 self.feedback_fn("Stopping daemons on %s" % node_name)
2281 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2283 # All daemons are shut down now
2285 return fn(self, *args)
2286 except Exception, err:
2287 _, errmsg = FormatError(err)
2288 logging.exception("Caught exception")
2289 self.feedback_fn(errmsg)
2292 # Start cluster again, master node last
2293 for node_name in self.nonmaster_nodes + [self.master_node]:
2294 self.feedback_fn("Starting daemons on %s" % node_name)
2295 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2298 watcher_block.Close()
2301 def RunWhileClusterStopped(feedback_fn, fn, *args):
2302 """Calls a function while all cluster daemons are stopped.
2304 @type feedback_fn: callable
2305 @param feedback_fn: Feedback function
2307 @param fn: Function to be called when daemons are stopped
2310 feedback_fn("Gathering cluster information")
2312 # This ensures we're running on the master daemon
2315 (cluster_name, master_node) = \
2316 cl.QueryConfigValues(["cluster_name", "master_node"])
2318 online_nodes = GetOnlineNodes([], cl=cl)
2320 # Don't keep a reference to the client. The master daemon will go away.
2323 assert master_node in online_nodes
2325 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2326 online_nodes).Call(fn, *args)
2329 def GenerateTable(headers, fields, separator, data,
2330 numfields=None, unitfields=None,
2332 """Prints a table with headers and different fields.
2335 @param headers: dictionary mapping field names to headers for
2338 @param fields: the field names corresponding to each row in
2340 @param separator: the separator to be used; if this is None,
2341 the default 'smart' algorithm is used which computes optimal
2342 field width, otherwise just the separator is used between
2345 @param data: a list of lists, each sublist being one row to be output
2346 @type numfields: list
2347 @param numfields: a list with the fields that hold numeric
2348 values and thus should be right-aligned
2349 @type unitfields: list
2350 @param unitfields: a list with the fields that hold numeric
2351 values that should be formatted with the units field
2352 @type units: string or None
2353 @param units: the units we should use for formatting, or None for
2354 automatic choice (human-readable for non-separator usage, otherwise
2355 megabytes); this is a one-letter string
2364 if numfields is None:
2366 if unitfields is None:
2369 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2370 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2373 for field in fields:
2374 if headers and field not in headers:
2375 # TODO: handle better unknown fields (either revert to old
2376 # style of raising exception, or deal more intelligently with
2378 headers[field] = field
2379 if separator is not None:
2380 format_fields.append("%s")
2381 elif numfields.Matches(field):
2382 format_fields.append("%*s")
2384 format_fields.append("%-*s")
2386 if separator is None:
2387 mlens = [0 for name in fields]
2388 format_str = " ".join(format_fields)
2390 format_str = separator.replace("%", "%%").join(format_fields)
2395 for idx, val in enumerate(row):
2396 if unitfields.Matches(fields[idx]):
2399 except (TypeError, ValueError):
2402 val = row[idx] = utils.FormatUnit(val, units)
2403 val = row[idx] = str(val)
2404 if separator is None:
2405 mlens[idx] = max(mlens[idx], len(val))
2410 for idx, name in enumerate(fields):
2412 if separator is None:
2413 mlens[idx] = max(mlens[idx], len(hdr))
2414 args.append(mlens[idx])
2416 result.append(format_str % tuple(args))
2418 if separator is None:
2419 assert len(mlens) == len(fields)
2421 if fields and not numfields.Matches(fields[-1]):
2427 line = ["-" for _ in fields]
2428 for idx in range(len(fields)):
2429 if separator is None:
2430 args.append(mlens[idx])
2431 args.append(line[idx])
2432 result.append(format_str % tuple(args))
2437 def _FormatBool(value):
2438 """Formats a boolean value as a string.
2446 #: Default formatting for query results; (callback, align right)
2447 _DEFAULT_FORMAT_QUERY = {
2448 constants.QFT_TEXT: (str, False),
2449 constants.QFT_BOOL: (_FormatBool, False),
2450 constants.QFT_NUMBER: (str, True),
2451 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2452 constants.QFT_OTHER: (str, False),
2453 constants.QFT_UNKNOWN: (str, False),
2457 def _GetColumnFormatter(fdef, override, unit):
2458 """Returns formatting function for a field.
2460 @type fdef: L{objects.QueryFieldDefinition}
2461 @type override: dict
2462 @param override: Dictionary for overriding field formatting functions,
2463 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2465 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2466 @rtype: tuple; (callable, bool)
2467 @return: Returns the function to format a value (takes one parameter) and a
2468 boolean for aligning the value on the right-hand side
2471 fmt = override.get(fdef.name, None)
2475 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2477 if fdef.kind == constants.QFT_UNIT:
2478 # Can't keep this information in the static dictionary
2479 return (lambda value: utils.FormatUnit(value, unit), True)
2481 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2485 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2488 class _QueryColumnFormatter:
2489 """Callable class for formatting fields of a query.
2492 def __init__(self, fn, status_fn, verbose):
2493 """Initializes this class.
2496 @param fn: Formatting function
2497 @type status_fn: callable
2498 @param status_fn: Function to report fields' status
2499 @type verbose: boolean
2500 @param verbose: whether to use verbose field descriptions or not
2504 self._status_fn = status_fn
2505 self._verbose = verbose
2507 def __call__(self, data):
2508 """Returns a field's string representation.
2511 (status, value) = data
2514 self._status_fn(status)
2516 if status == constants.RS_NORMAL:
2517 return self._fn(value)
2519 assert value is None, \
2520 "Found value %r for abnormal status %s" % (value, status)
2522 return FormatResultError(status, self._verbose)
2525 def FormatResultError(status, verbose):
2526 """Formats result status other than L{constants.RS_NORMAL}.
2528 @param status: The result status
2529 @type verbose: boolean
2530 @param verbose: Whether to return the verbose text
2531 @return: Text of result status
2534 assert status != constants.RS_NORMAL, \
2535 "FormatResultError called with status equal to constants.RS_NORMAL"
2537 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2539 raise NotImplementedError("Unknown status %s" % status)
2546 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2547 header=False, verbose=False):
2548 """Formats data in L{objects.QueryResponse}.
2550 @type result: L{objects.QueryResponse}
2551 @param result: result of query operation
2553 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2554 see L{utils.text.FormatUnit}
2555 @type format_override: dict
2556 @param format_override: Dictionary for overriding field formatting functions,
2557 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2558 @type separator: string or None
2559 @param separator: String used to separate fields
2561 @param header: Whether to output header row
2562 @type verbose: boolean
2563 @param verbose: whether to use verbose field descriptions or not
2572 if format_override is None:
2573 format_override = {}
2575 stats = dict.fromkeys(constants.RS_ALL, 0)
2577 def _RecordStatus(status):
2582 for fdef in result.fields:
2583 assert fdef.title and fdef.name
2584 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2585 columns.append(TableColumn(fdef.title,
2586 _QueryColumnFormatter(fn, _RecordStatus,
2590 table = FormatTable(result.data, columns, header, separator)
2592 # Collect statistics
2593 assert len(stats) == len(constants.RS_ALL)
2594 assert compat.all(count >= 0 for count in stats.values())
2596 # Determine overall status. If there was no data, unknown fields must be
2597 # detected via the field definitions.
2598 if (stats[constants.RS_UNKNOWN] or
2599 (not result.data and _GetUnknownFields(result.fields))):
2601 elif compat.any(count > 0 for key, count in stats.items()
2602 if key != constants.RS_NORMAL):
2603 status = QR_INCOMPLETE
2607 return (status, table)
2610 def _GetUnknownFields(fdefs):
2611 """Returns list of unknown fields included in C{fdefs}.
2613 @type fdefs: list of L{objects.QueryFieldDefinition}
2616 return [fdef for fdef in fdefs
2617 if fdef.kind == constants.QFT_UNKNOWN]
2620 def _WarnUnknownFields(fdefs):
2621 """Prints a warning to stderr if a query included unknown fields.
2623 @type fdefs: list of L{objects.QueryFieldDefinition}
2626 unknown = _GetUnknownFields(fdefs)
2628 ToStderr("Warning: Queried for unknown fields %s",
2629 utils.CommaJoin(fdef.name for fdef in unknown))
2635 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2636 format_override=None, verbose=False, force_filter=False):
2637 """Generic implementation for listing all items of a resource.
2639 @param resource: One of L{constants.QR_VIA_LUXI}
2640 @type fields: list of strings
2641 @param fields: List of fields to query for
2642 @type names: list of strings
2643 @param names: Names of items to query for
2644 @type unit: string or None
2645 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2646 None for automatic choice (human-readable for non-separator usage,
2647 otherwise megabytes); this is a one-letter string
2648 @type separator: string or None
2649 @param separator: String used to separate fields
2651 @param header: Whether to show header row
2652 @type force_filter: bool
2653 @param force_filter: Whether to always treat names as filter
2654 @type format_override: dict
2655 @param format_override: Dictionary for overriding field formatting functions,
2656 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2657 @type verbose: boolean
2658 @param verbose: whether to use verbose field descriptions or not
2667 filter_ = qlang.MakeFilter(names, force_filter)
2669 response = cl.Query(resource, fields, filter_)
2671 found_unknown = _WarnUnknownFields(response.fields)
2673 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2675 format_override=format_override,
2681 assert ((found_unknown and status == QR_UNKNOWN) or
2682 (not found_unknown and status != QR_UNKNOWN))
2684 if status == QR_UNKNOWN:
2685 return constants.EXIT_UNKNOWN_FIELD
2687 # TODO: Should the list command fail if not all data could be collected?
2688 return constants.EXIT_SUCCESS
2691 def GenericListFields(resource, fields, separator, header, cl=None):
2692 """Generic implementation for listing fields for a resource.
2694 @param resource: One of L{constants.QR_VIA_LUXI}
2695 @type fields: list of strings
2696 @param fields: List of fields to query for
2697 @type separator: string or None
2698 @param separator: String used to separate fields
2700 @param header: Whether to show header row
2709 response = cl.QueryFields(resource, fields)
2711 found_unknown = _WarnUnknownFields(response.fields)
2714 TableColumn("Name", str, False),
2715 TableColumn("Title", str, False),
2716 TableColumn("Description", str, False),
2719 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2721 for line in FormatTable(rows, columns, header, separator):
2725 return constants.EXIT_UNKNOWN_FIELD
2727 return constants.EXIT_SUCCESS
2731 """Describes a column for L{FormatTable}.
2734 def __init__(self, title, fn, align_right):
2735 """Initializes this class.
2738 @param title: Column title
2740 @param fn: Formatting function
2741 @type align_right: bool
2742 @param align_right: Whether to align values on the right-hand side
2747 self.align_right = align_right
2750 def _GetColFormatString(width, align_right):
2751 """Returns the format string for a field.
2759 return "%%%s%ss" % (sign, width)
2762 def FormatTable(rows, columns, header, separator):
2763 """Formats data as a table.
2765 @type rows: list of lists
2766 @param rows: Row data, one list per row
2767 @type columns: list of L{TableColumn}
2768 @param columns: Column descriptions
2770 @param header: Whether to show header row
2771 @type separator: string or None
2772 @param separator: String used to separate columns
2776 data = [[col.title for col in columns]]
2777 colwidth = [len(col.title) for col in columns]
2780 colwidth = [0 for _ in columns]
2784 assert len(row) == len(columns)
2786 formatted = [col.format(value) for value, col in zip(row, columns)]
2788 if separator is None:
2789 # Update column widths
2790 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2791 # Modifying a list's items while iterating is fine
2792 colwidth[idx] = max(oldwidth, len(value))
2794 data.append(formatted)
2796 if separator is not None:
2797 # Return early if a separator is used
2798 return [separator.join(row) for row in data]
2800 if columns and not columns[-1].align_right:
2801 # Avoid unnecessary spaces at end of line
2804 # Build format string
2805 fmt = " ".join([_GetColFormatString(width, col.align_right)
2806 for col, width in zip(columns, colwidth)])
2808 return [fmt % tuple(row) for row in data]
2811 def FormatTimestamp(ts):
2812 """Formats a given timestamp.
2815 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2818 @return: a string with the formatted timestamp
2821 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2824 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2827 def ParseTimespec(value):
2828 """Parse a time specification.
2830 The following suffixed will be recognized:
2838 Without any suffix, the value will be taken to be in seconds.
2843 raise errors.OpPrereqError("Empty time specification passed")
2851 if value[-1] not in suffix_map:
2854 except (TypeError, ValueError):
2855 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2857 multiplier = suffix_map[value[-1]]
2859 if not value: # no data left after stripping the suffix
2860 raise errors.OpPrereqError("Invalid time specification (only"
2863 value = int(value) * multiplier
2864 except (TypeError, ValueError):
2865 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2869 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2870 filter_master=False, nodegroup=None):
2871 """Returns the names of online nodes.
2873 This function will also log a warning on stderr with the names of
2876 @param nodes: if not empty, use only this subset of nodes (minus the
2878 @param cl: if not None, luxi client to use
2879 @type nowarn: boolean
2880 @param nowarn: by default, this function will output a note with the
2881 offline nodes that are skipped; if this parameter is True the
2882 note is not displayed
2883 @type secondary_ips: boolean
2884 @param secondary_ips: if True, return the secondary IPs instead of the
2885 names, useful for doing network traffic over the replication interface
2887 @type filter_master: boolean
2888 @param filter_master: if True, do not return the master node in the list
2889 (useful in coordination with secondary_ips where we cannot check our
2890 node name against the list)
2891 @type nodegroup: string
2892 @param nodegroup: If set, only return nodes in this node group
2901 filter_.append(qlang.MakeSimpleFilter("name", nodes))
2903 if nodegroup is not None:
2904 filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2905 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2908 filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2911 if len(filter_) > 1:
2912 final_filter = [qlang.OP_AND] + filter_
2914 assert len(filter_) == 1
2915 final_filter = filter_[0]
2919 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2921 def _IsOffline(row):
2922 (_, (_, offline), _) = row
2926 ((_, name), _, _) = row
2930 (_, _, (_, sip)) = row
2933 (offline, online) = compat.partition(result.data, _IsOffline)
2935 if offline and not nowarn:
2936 ToStderr("Note: skipping offline node(s): %s" %
2937 utils.CommaJoin(map(_GetName, offline)))
2944 return map(fn, online)
2947 def _ToStream(stream, txt, *args):
2948 """Write a message to a stream, bypassing the logging system
2950 @type stream: file object
2951 @param stream: the file to which we should write
2953 @param txt: the message
2959 stream.write(txt % args)
2964 except IOError, err:
2965 if err.errno == errno.EPIPE:
2966 # our terminal went away, we'll exit
2967 sys.exit(constants.EXIT_FAILURE)
2972 def ToStdout(txt, *args):
2973 """Write a message to stdout only, bypassing the logging system
2975 This is just a wrapper over _ToStream.
2978 @param txt: the message
2981 _ToStream(sys.stdout, txt, *args)
2984 def ToStderr(txt, *args):
2985 """Write a message to stderr only, bypassing the logging system
2987 This is just a wrapper over _ToStream.
2990 @param txt: the message
2993 _ToStream(sys.stderr, txt, *args)
2996 class JobExecutor(object):
2997 """Class which manages the submission and execution of multiple jobs.
2999 Note that instances of this class should not be reused between
3003 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3008 self.verbose = verbose
3011 self.feedback_fn = feedback_fn
3012 self._counter = itertools.count()
3015 def _IfName(name, fmt):
3016 """Helper function for formatting name.
3024 def QueueJob(self, name, *ops):
3025 """Record a job for later submit.
3028 @param name: a description of the job, will be used in WaitJobSet
3031 SetGenericOpcodeOpts(ops, self.opts)
3032 self.queue.append((self._counter.next(), name, ops))
3034 def AddJobId(self, name, status, job_id):
3035 """Adds a job ID to the internal queue.
3038 self.jobs.append((self._counter.next(), status, job_id, name))
3040 def SubmitPending(self, each=False):
3041 """Submit all pending jobs.
3046 for (_, _, ops) in self.queue:
3047 # SubmitJob will remove the success status, but raise an exception if
3048 # the submission fails, so we'll notice that anyway.
3049 results.append([True, self.cl.SubmitJob(ops)])
3051 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3052 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3053 self.jobs.append((idx, status, data, name))
3055 def _ChooseJob(self):
3056 """Choose a non-waiting/queued job to poll next.
3059 assert self.jobs, "_ChooseJob called with empty job list"
3061 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3065 for job_data, status in zip(self.jobs, result):
3066 if (isinstance(status, list) and status and
3067 status[0] in (constants.JOB_STATUS_QUEUED,
3068 constants.JOB_STATUS_WAITING,
3069 constants.JOB_STATUS_CANCELING)):
3070 # job is still present and waiting
3072 # good candidate found (either running job or lost job)
3073 self.jobs.remove(job_data)
3077 return self.jobs.pop(0)
3079 def GetResults(self):
3080 """Wait for and return the results of all jobs.
3083 @return: list of tuples (success, job results), in the same order
3084 as the submitted jobs; if a job has failed, instead of the result
3085 there will be the error message
3089 self.SubmitPending()
3092 ok_jobs = [row[2] for row in self.jobs if row[1]]
3094 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3096 # first, remove any non-submitted jobs
3097 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3098 for idx, _, jid, name in failures:
3099 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3100 results.append((idx, False, jid))
3103 (idx, _, jid, name) = self._ChooseJob()
3104 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3106 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3108 except errors.JobLost, err:
3109 _, job_result = FormatError(err)
3110 ToStderr("Job %s%s has been archived, cannot check its result",
3111 jid, self._IfName(name, " for %s"))
3113 except (errors.GenericError, luxi.ProtocolError), err:
3114 _, job_result = FormatError(err)
3116 # the error message will always be shown, verbose or not
3117 ToStderr("Job %s%s has failed: %s",
3118 jid, self._IfName(name, " for %s"), job_result)
3120 results.append((idx, success, job_result))
3122 # sort based on the index, then drop it
3124 results = [i[1:] for i in results]
3128 def WaitOrShow(self, wait):
3129 """Wait for job results or only print the job IDs.
3132 @param wait: whether to wait or not
3136 return self.GetResults()
3139 self.SubmitPending()
3140 for _, status, result, name in self.jobs:
3142 ToStdout("%s: %s", result, name)
3144 ToStderr("Failure for %s: %s", name, result)
3145 return [row[1:3] for row in self.jobs]
3148 def FormatParameterDict(buf, param_dict, actual, level=1):
3149 """Formats a parameter dictionary.
3151 @type buf: L{StringIO}
3152 @param buf: the buffer into which to write
3153 @type param_dict: dict
3154 @param param_dict: the own parameters
3156 @param actual: the current parameter set (including defaults)
3157 @param level: Level of indent
3160 indent = " " * level
3161 for key in sorted(actual):
3162 val = param_dict.get(key, "default (%s)" % actual[key])
3163 buf.write("%s- %s: %s\n" % (indent, key, val))
3166 def ConfirmOperation(names, list_type, text, extra=""):
3167 """Ask the user to confirm an operation on a list of list_type.
3169 This function is used to request confirmation for doing an operation
3170 on a given list of list_type.
3173 @param names: the list of names that we display when
3174 we ask for confirmation
3175 @type list_type: str
3176 @param list_type: Human readable name for elements in the list (e.g. nodes)
3178 @param text: the operation that the user should confirm
3180 @return: True or False depending on user's confirmation.
3184 msg = ("The %s will operate on %d %s.\n%s"
3185 "Do you want to continue?" % (text, count, list_type, extra))
3186 affected = (("\nAffected %s:\n" % list_type) +
3187 "\n".join([" %s" % name for name in names]))
3189 choices = [("y", True, "Yes, execute the %s" % text),
3190 ("n", False, "No, abort the %s" % text)]
3193 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3196 question = msg + affected
3198 choice = AskUser(question, choices)
3201 choice = AskUser(msg + affected, choices)