4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
39 from ganeti import ssh
40 from ganeti import compat
41 from ganeti import netutils
42 from ganeti import qlang
44 from optparse import (OptionParser, TitledHelpFormatter,
45 Option, OptionValueError)
49 # Command line options
61 "CLUSTER_DOMAIN_SECRET_OPT",
78 "FILESTORE_DRIVER_OPT",
83 "GLOBAL_SHARED_FILEDIR_OPT",
88 "DEFAULT_IALLOCATOR_OPT",
89 "IDENTIFY_DEFAULTS_OPT",
91 "IGNORE_FAILURES_OPT",
93 "IGNORE_REMOVE_FAILURES_OPT",
94 "IGNORE_SECONDARIES_OPT",
98 "MAINTAIN_NODE_HEALTH_OPT",
101 "MIGRATION_MODE_OPT",
103 "NEW_CLUSTER_CERT_OPT",
104 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
105 "NEW_CONFD_HMAC_KEY_OPT",
109 "NODE_FORCE_JOIN_OPT",
111 "NODE_PLACEMENT_OPT",
115 "NODRBD_STORAGE_OPT",
121 "NOMODIFY_ETCHOSTS_OPT",
122 "NOMODIFY_SSH_SETUP_OPT",
128 "NOSSH_KEYCHECK_OPT",
139 "PREALLOC_WIPE_DISKS_OPT",
140 "PRIMARY_IP_VERSION_OPT",
145 "REMOVE_INSTANCE_OPT",
153 "SHUTDOWN_TIMEOUT_OPT",
168 # Generic functions for CLI programs
171 "GenericInstanceCreate",
177 "JobSubmittedException",
179 "RunWhileClusterStopped",
183 # Formatting functions
184 "ToStderr", "ToStdout",
187 "FormatParameterDict",
196 # command line options support infrastructure
197 "ARGS_MANY_INSTANCES",
216 "OPT_COMPL_INST_ADD_NODES",
217 "OPT_COMPL_MANY_NODES",
218 "OPT_COMPL_ONE_IALLOCATOR",
219 "OPT_COMPL_ONE_INSTANCE",
220 "OPT_COMPL_ONE_NODE",
221 "OPT_COMPL_ONE_NODEGROUP",
227 "COMMON_CREATE_OPTS",
233 #: Priorities (sorted)
235 ("low", constants.OP_PRIO_LOW),
236 ("normal", constants.OP_PRIO_NORMAL),
237 ("high", constants.OP_PRIO_HIGH),
240 #: Priority dictionary for easier lookup
241 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
242 # we migrate to Python 2.6
243 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
245 # Query result status for clients
248 QR_INCOMPLETE) = range(3)
252 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
257 return ("<%s min=%s max=%s>" %
258 (self.__class__.__name__, self.min, self.max))
261 class ArgSuggest(_Argument):
262 """Suggesting argument.
264 Value can be any of the ones passed to the constructor.
267 # pylint: disable-msg=W0622
268 def __init__(self, min=0, max=None, choices=None):
269 _Argument.__init__(self, min=min, max=max)
270 self.choices = choices
273 return ("<%s min=%s max=%s choices=%r>" %
274 (self.__class__.__name__, self.min, self.max, self.choices))
277 class ArgChoice(ArgSuggest):
280 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
281 but value must be one of the choices.
286 class ArgUnknown(_Argument):
287 """Unknown argument to program (e.g. determined at runtime).
292 class ArgInstance(_Argument):
293 """Instances argument.
298 class ArgNode(_Argument):
304 class ArgGroup(_Argument):
305 """Node group argument.
310 class ArgJobId(_Argument):
316 class ArgFile(_Argument):
317 """File path argument.
322 class ArgCommand(_Argument):
328 class ArgHost(_Argument):
334 class ArgOs(_Argument):
341 ARGS_MANY_INSTANCES = [ArgInstance()]
342 ARGS_MANY_NODES = [ArgNode()]
343 ARGS_MANY_GROUPS = [ArgGroup()]
344 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
345 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
346 ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
347 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
350 def _ExtractTagsObject(opts, args):
351 """Extract the tag type object.
353 Note that this function will modify its args parameter.
356 if not hasattr(opts, "tag_type"):
357 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
359 if kind == constants.TAG_CLUSTER:
361 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
363 raise errors.OpPrereqError("no arguments passed to the command")
367 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
371 def _ExtendTags(opts, args):
372 """Extend the args if a source file has been given.
374 This function will extend the tags with the contents of the file
375 passed in the 'tags_source' attribute of the opts parameter. A file
376 named '-' will be replaced by stdin.
379 fname = opts.tags_source
385 new_fh = open(fname, "r")
388 # we don't use the nice 'new_data = [line.strip() for line in fh]'
389 # because of python bug 1633941
391 line = new_fh.readline()
394 new_data.append(line.strip())
397 args.extend(new_data)
400 def ListTags(opts, args):
401 """List the tags on a given object.
403 This is a generic implementation that knows how to deal with all
404 three cases of tag objects (cluster, node, instance). The opts
405 argument is expected to contain a tag_type field denoting what
406 object type we work on.
409 kind, name = _ExtractTagsObject(opts, args)
411 result = cl.QueryTags(kind, name)
412 result = list(result)
418 def AddTags(opts, args):
419 """Add tags on a given object.
421 This is a generic implementation that knows how to deal with all
422 three cases of tag objects (cluster, node, instance). The opts
423 argument is expected to contain a tag_type field denoting what
424 object type we work on.
427 kind, name = _ExtractTagsObject(opts, args)
428 _ExtendTags(opts, args)
430 raise errors.OpPrereqError("No tags to be added")
431 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
432 SubmitOpCode(op, opts=opts)
435 def RemoveTags(opts, args):
436 """Remove tags from a given object.
438 This is a generic implementation that knows how to deal with all
439 three cases of tag objects (cluster, node, instance). The opts
440 argument is expected to contain a tag_type field denoting what
441 object type we work on.
444 kind, name = _ExtractTagsObject(opts, args)
445 _ExtendTags(opts, args)
447 raise errors.OpPrereqError("No tags to be removed")
448 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
449 SubmitOpCode(op, opts=opts)
452 def check_unit(option, opt, value): # pylint: disable-msg=W0613
453 """OptParsers custom converter for units.
457 return utils.ParseUnit(value)
458 except errors.UnitParseError, err:
459 raise OptionValueError("option %s: %s" % (opt, err))
462 def _SplitKeyVal(opt, data):
463 """Convert a KeyVal string into a dict.
465 This function will convert a key=val[,...] string into a dict. Empty
466 values will be converted specially: keys which have the prefix 'no_'
467 will have the value=False and the prefix stripped, the others will
471 @param opt: a string holding the option name for which we process the
472 data, used in building error messages
474 @param data: a string of the format key=val,key=val,...
476 @return: {key=val, key=val}
477 @raises errors.ParameterError: if there are duplicate keys
482 for elem in utils.UnescapeAndSplit(data, sep=","):
484 key, val = elem.split("=", 1)
486 if elem.startswith(NO_PREFIX):
487 key, val = elem[len(NO_PREFIX):], False
488 elif elem.startswith(UN_PREFIX):
489 key, val = elem[len(UN_PREFIX):], None
491 key, val = elem, True
493 raise errors.ParameterError("Duplicate key '%s' in option %s" %
499 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
500 """Custom parser for ident:key=val,key=val options.
502 This will store the parsed values as a tuple (ident, {key: val}). As such,
503 multiple uses of this option via action=append is possible.
507 ident, rest = value, ''
509 ident, rest = value.split(":", 1)
511 if ident.startswith(NO_PREFIX):
513 msg = "Cannot pass options when removing parameter groups: %s" % value
514 raise errors.ParameterError(msg)
515 retval = (ident[len(NO_PREFIX):], False)
516 elif ident.startswith(UN_PREFIX):
518 msg = "Cannot pass options when removing parameter groups: %s" % value
519 raise errors.ParameterError(msg)
520 retval = (ident[len(UN_PREFIX):], None)
522 kv_dict = _SplitKeyVal(opt, rest)
523 retval = (ident, kv_dict)
527 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
528 """Custom parser class for key=val,key=val options.
530 This will store the parsed values as a dict {key: val}.
533 return _SplitKeyVal(opt, value)
536 def check_bool(option, opt, value): # pylint: disable-msg=W0613
537 """Custom parser for yes/no options.
539 This will store the parsed value as either True or False.
542 value = value.lower()
543 if value == constants.VALUE_FALSE or value == "no":
545 elif value == constants.VALUE_TRUE or value == "yes":
548 raise errors.ParameterError("Invalid boolean value '%s'" % value)
551 # completion_suggestion is normally a list. Using numeric values not evaluating
552 # to False for dynamic completion.
553 (OPT_COMPL_MANY_NODES,
555 OPT_COMPL_ONE_INSTANCE,
557 OPT_COMPL_ONE_IALLOCATOR,
558 OPT_COMPL_INST_ADD_NODES,
559 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
561 OPT_COMPL_ALL = frozenset([
562 OPT_COMPL_MANY_NODES,
564 OPT_COMPL_ONE_INSTANCE,
566 OPT_COMPL_ONE_IALLOCATOR,
567 OPT_COMPL_INST_ADD_NODES,
568 OPT_COMPL_ONE_NODEGROUP,
572 class CliOption(Option):
573 """Custom option class for optparse.
576 ATTRS = Option.ATTRS + [
577 "completion_suggest",
579 TYPES = Option.TYPES + (
585 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
586 TYPE_CHECKER["identkeyval"] = check_ident_key_val
587 TYPE_CHECKER["keyval"] = check_key_val
588 TYPE_CHECKER["unit"] = check_unit
589 TYPE_CHECKER["bool"] = check_bool
592 # optparse.py sets make_option, so we do it for our own option class, too
593 cli_option = CliOption
598 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
599 help="Increase debugging level")
601 NOHDR_OPT = cli_option("--no-headers", default=False,
602 action="store_true", dest="no_headers",
603 help="Don't display column headers")
605 SEP_OPT = cli_option("--separator", default=None,
606 action="store", dest="separator",
607 help=("Separator between output fields"
608 " (defaults to one space)"))
610 USEUNITS_OPT = cli_option("--units", default=None,
611 dest="units", choices=('h', 'm', 'g', 't'),
612 help="Specify units for output (one of h/m/g/t)")
614 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
615 type="string", metavar="FIELDS",
616 help="Comma separated list of output fields")
618 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
619 default=False, help="Force the operation")
621 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
622 default=False, help="Do not require confirmation")
624 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
625 action="store_true", default=False,
626 help=("Ignore offline nodes and do as much"
629 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
630 default=None, help="File with tag names")
632 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
633 default=False, action="store_true",
634 help=("Submit the job and return the job ID, but"
635 " don't wait for the job to finish"))
637 SYNC_OPT = cli_option("--sync", dest="do_locking",
638 default=False, action="store_true",
639 help=("Grab locks while doing the queries"
640 " in order to ensure more consistent results"))
642 DRY_RUN_OPT = cli_option("--dry-run", default=False,
644 help=("Do not execute the operation, just run the"
645 " check steps and verify it it could be"
648 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
650 help="Increase the verbosity of the operation")
652 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
653 action="store_true", dest="simulate_errors",
654 help="Debugging option that makes the operation"
655 " treat most runtime checks as failed")
657 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
658 default=True, action="store_false",
659 help="Don't wait for sync (DANGEROUS!)")
661 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
662 help="Custom disk setup (diskless, file,"
664 default=None, metavar="TEMPL",
665 choices=list(constants.DISK_TEMPLATES))
667 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
668 help="Do not create any network cards for"
671 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
672 help="Relative path under default cluster-wide"
673 " file storage dir to store file-based disks",
674 default=None, metavar="<DIR>")
676 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
677 help="Driver to use for image files",
678 default="loop", metavar="<DRIVER>",
679 choices=list(constants.FILE_DRIVER))
681 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
682 help="Select nodes for the instance automatically"
683 " using the <NAME> iallocator plugin",
684 default=None, type="string",
685 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
687 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
689 help="Set the default instance allocator plugin",
690 default=None, type="string",
691 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
693 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
695 completion_suggest=OPT_COMPL_ONE_OS)
697 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
698 type="keyval", default={},
699 help="OS parameters")
701 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
702 action="store_true", default=False,
703 help="Force an unknown variant")
705 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
706 action="store_true", default=False,
707 help="Do not install the OS (will"
710 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
711 type="keyval", default={},
712 help="Backend parameters")
714 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
715 default={}, dest="hvparams",
716 help="Hypervisor parameters")
718 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
719 help="Hypervisor and hypervisor options, in the"
720 " format hypervisor:option=value,option=value,...",
721 default=None, type="identkeyval")
723 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
724 help="Hypervisor and hypervisor options, in the"
725 " format hypervisor:option=value,option=value,...",
726 default=[], action="append", type="identkeyval")
728 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
729 action="store_false",
730 help="Don't check that the instance's IP"
733 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
734 default=True, action="store_false",
735 help="Don't check that the instance's name"
738 NET_OPT = cli_option("--net",
739 help="NIC parameters", default=[],
740 dest="nics", action="append", type="identkeyval")
742 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
743 dest="disks", action="append", type="identkeyval")
745 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
746 help="Comma-separated list of disks"
747 " indices to act on (e.g. 0,2) (optional,"
748 " defaults to all disks)")
750 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
751 help="Enforces a single-disk configuration using the"
752 " given disk size, in MiB unless a suffix is used",
753 default=None, type="unit", metavar="<size>")
755 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
756 dest="ignore_consistency",
757 action="store_true", default=False,
758 help="Ignore the consistency of the disks on"
761 NONLIVE_OPT = cli_option("--non-live", dest="live",
762 default=True, action="store_false",
763 help="Do a non-live migration (this usually means"
764 " freeze the instance, save the state, transfer and"
765 " only then resume running on the secondary node)")
767 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
769 choices=list(constants.HT_MIGRATION_MODES),
770 help="Override default migration mode (choose"
771 " either live or non-live")
773 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
774 help="Target node and optional secondary node",
775 metavar="<pnode>[:<snode>]",
776 completion_suggest=OPT_COMPL_INST_ADD_NODES)
778 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
779 action="append", metavar="<node>",
780 help="Use only this node (can be used multiple"
781 " times, if not given defaults to all nodes)",
782 completion_suggest=OPT_COMPL_ONE_NODE)
784 NODEGROUP_OPT = cli_option("-g", "--node-group",
786 help="Node group (name or uuid)",
787 metavar="<nodegroup>",
788 default=None, type="string",
789 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
791 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
793 completion_suggest=OPT_COMPL_ONE_NODE)
795 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
796 action="store_false",
797 help="Don't start the instance after creation")
799 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
800 action="store_true", default=False,
801 help="Show command instead of executing it")
803 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
804 default=False, action="store_true",
805 help="Instead of performing the migration, try to"
806 " recover from a failed cleanup. This is safe"
807 " to run even if the instance is healthy, but it"
808 " will create extra replication traffic and "
809 " disrupt briefly the replication (like during the"
812 STATIC_OPT = cli_option("-s", "--static", dest="static",
813 action="store_true", default=False,
814 help="Only show configuration data, not runtime data")
816 ALL_OPT = cli_option("--all", dest="show_all",
817 default=False, action="store_true",
818 help="Show info on all instances on the cluster."
819 " This can take a long time to run, use wisely")
821 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
822 action="store_true", default=False,
823 help="Interactive OS reinstall, lists available"
824 " OS templates for selection")
826 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
827 action="store_true", default=False,
828 help="Remove the instance from the cluster"
829 " configuration even if there are failures"
830 " during the removal process")
832 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
833 dest="ignore_remove_failures",
834 action="store_true", default=False,
835 help="Remove the instance from the"
836 " cluster configuration even if there"
837 " are failures during the removal"
840 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
841 action="store_true", default=False,
842 help="Remove the instance from the cluster")
844 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
845 help="Specifies the new node for the instance",
846 metavar="NODE", default=None,
847 completion_suggest=OPT_COMPL_ONE_NODE)
849 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
850 help="Specifies the new secondary node",
851 metavar="NODE", default=None,
852 completion_suggest=OPT_COMPL_ONE_NODE)
854 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
855 default=False, action="store_true",
856 help="Replace the disk(s) on the primary"
857 " node (only for the drbd template)")
859 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
860 default=False, action="store_true",
861 help="Replace the disk(s) on the secondary"
862 " node (only for the drbd template)")
864 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
865 default=False, action="store_true",
866 help="Lock all nodes and auto-promote as needed"
869 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
870 default=False, action="store_true",
871 help="Automatically replace faulty disks"
872 " (only for the drbd template)")
874 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
875 default=False, action="store_true",
876 help="Ignore current recorded size"
877 " (useful for forcing activation when"
878 " the recorded size is wrong)")
880 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
882 completion_suggest=OPT_COMPL_ONE_NODE)
884 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
887 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
888 help="Specify the secondary ip for the node",
889 metavar="ADDRESS", default=None)
891 READD_OPT = cli_option("--readd", dest="readd",
892 default=False, action="store_true",
893 help="Readd old node after replacing it")
895 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
896 default=True, action="store_false",
897 help="Disable SSH key fingerprint checking")
899 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
900 default=False, action="store_true",
901 help="Force the joining of a node,"
902 " needed when merging clusters")
904 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
905 type="bool", default=None, metavar=_YORNO,
906 help="Set the master_candidate flag on the node")
908 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
909 type="bool", default=None,
910 help=("Set the offline flag on the node"
911 " (cluster does not communicate with offline"
914 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
915 type="bool", default=None,
916 help=("Set the drained flag on the node"
917 " (excluded from allocation operations)"))
919 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
920 type="bool", default=None, metavar=_YORNO,
921 help="Set the master_capable flag on the node")
923 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
924 type="bool", default=None, metavar=_YORNO,
925 help="Set the vm_capable flag on the node")
927 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
928 type="bool", default=None, metavar=_YORNO,
929 help="Set the allocatable flag on a volume")
931 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
932 help="Disable support for lvm based instances"
934 action="store_false", default=True)
936 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
937 dest="enabled_hypervisors",
938 help="Comma-separated list of hypervisors",
939 type="string", default=None)
941 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
942 type="keyval", default={},
943 help="NIC parameters")
945 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
946 dest="candidate_pool_size", type="int",
947 help="Set the candidate pool size")
949 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
950 help=("Enables LVM and specifies the volume group"
951 " name (cluster-wide) for disk allocation"
952 " [%s]" % constants.DEFAULT_VG),
953 metavar="VG", default=None)
955 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
956 help="Destroy cluster", action="store_true")
958 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
959 help="Skip node agreement check (dangerous)",
960 action="store_true", default=False)
962 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
963 help="Specify the mac prefix for the instance IP"
964 " addresses, in the format XX:XX:XX",
968 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
969 help="Specify the node interface (cluster-wide)"
970 " on which the master IP address will be added"
971 " (cluster init default: %s)" %
972 constants.DEFAULT_BRIDGE,
976 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
977 help="Specify the default directory (cluster-"
978 "wide) for storing the file-based disks [%s]" %
979 constants.DEFAULT_FILE_STORAGE_DIR,
981 default=constants.DEFAULT_FILE_STORAGE_DIR)
983 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
984 dest="shared_file_storage_dir",
985 help="Specify the default directory (cluster-"
986 "wide) for storing the shared file-based"
988 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
990 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
992 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
993 help="Don't modify /etc/hosts",
994 action="store_false", default=True)
996 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
997 help="Don't initialize SSH keys",
998 action="store_false", default=True)
1000 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1001 help="Enable parseable error messages",
1002 action="store_true", default=False)
1004 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1005 help="Skip N+1 memory redundancy tests",
1006 action="store_true", default=False)
1008 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1009 help="Type of reboot: soft/hard/full",
1010 default=constants.INSTANCE_REBOOT_HARD,
1012 choices=list(constants.REBOOT_TYPES))
1014 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1015 dest="ignore_secondaries",
1016 default=False, action="store_true",
1017 help="Ignore errors from secondaries")
1019 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1020 action="store_false", default=True,
1021 help="Don't shutdown the instance (unsafe)")
1023 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1024 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1025 help="Maximum time to wait")
1027 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1028 dest="shutdown_timeout", type="int",
1029 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1030 help="Maximum time to wait for instance shutdown")
1032 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1034 help=("Number of seconds between repetions of the"
1037 EARLY_RELEASE_OPT = cli_option("--early-release",
1038 dest="early_release", default=False,
1039 action="store_true",
1040 help="Release the locks on the secondary"
1043 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1044 dest="new_cluster_cert",
1045 default=False, action="store_true",
1046 help="Generate a new cluster certificate")
1048 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1050 help="File containing new RAPI certificate")
1052 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1053 default=None, action="store_true",
1054 help=("Generate a new self-signed RAPI"
1057 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1058 dest="new_confd_hmac_key",
1059 default=False, action="store_true",
1060 help=("Create a new HMAC key for %s" %
1063 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1064 dest="cluster_domain_secret",
1066 help=("Load new new cluster domain"
1067 " secret from file"))
1069 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1070 dest="new_cluster_domain_secret",
1071 default=False, action="store_true",
1072 help=("Create a new cluster domain"
1075 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1076 dest="use_replication_network",
1077 help="Whether to use the replication network"
1078 " for talking to the nodes",
1079 action="store_true", default=False)
1081 MAINTAIN_NODE_HEALTH_OPT = \
1082 cli_option("--maintain-node-health", dest="maintain_node_health",
1083 metavar=_YORNO, default=None, type="bool",
1084 help="Configure the cluster to automatically maintain node"
1085 " health, by shutting down unknown instances, shutting down"
1086 " unknown DRBD devices, etc.")
1088 IDENTIFY_DEFAULTS_OPT = \
1089 cli_option("--identify-defaults", dest="identify_defaults",
1090 default=False, action="store_true",
1091 help="Identify which saved instance parameters are equal to"
1092 " the current cluster defaults and set them as such, instead"
1093 " of marking them as overridden")
1095 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1096 action="store", dest="uid_pool",
1097 help=("A list of user-ids or user-id"
1098 " ranges separated by commas"))
1100 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1101 action="store", dest="add_uids",
1102 help=("A list of user-ids or user-id"
1103 " ranges separated by commas, to be"
1104 " added to the user-id pool"))
1106 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1107 action="store", dest="remove_uids",
1108 help=("A list of user-ids or user-id"
1109 " ranges separated by commas, to be"
1110 " removed from the user-id pool"))
1112 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1113 action="store", dest="reserved_lvs",
1114 help=("A comma-separated list of reserved"
1115 " logical volumes names, that will be"
1116 " ignored by cluster verify"))
1118 ROMAN_OPT = cli_option("--roman",
1119 dest="roman_integers", default=False,
1120 action="store_true",
1121 help="Use roman numbers for positive integers")
1123 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1124 action="store", default=None,
1125 help="Specifies usermode helper for DRBD")
1127 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1128 action="store_false", default=True,
1129 help="Disable support for DRBD")
1131 PRIMARY_IP_VERSION_OPT = \
1132 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1133 action="store", dest="primary_ip_version",
1134 metavar="%d|%d" % (constants.IP4_VERSION,
1135 constants.IP6_VERSION),
1136 help="Cluster-wide IP version for primary IP")
1138 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1139 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1140 choices=_PRIONAME_TO_VALUE.keys(),
1141 help="Priority for opcode processing")
1143 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1144 type="bool", default=None, metavar=_YORNO,
1145 help="Sets the hidden flag on the OS")
1147 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1148 type="bool", default=None, metavar=_YORNO,
1149 help="Sets the blacklisted flag on the OS")
1151 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1152 type="bool", metavar=_YORNO,
1153 dest="prealloc_wipe_disks",
1154 help=("Wipe disks prior to instance"
1157 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1158 type="keyval", default=None,
1159 help="Node parameters")
1161 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1162 action="store", metavar="POLICY", default=None,
1163 help="Allocation policy for the node group")
1165 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1166 type="bool", metavar=_YORNO,
1167 dest="node_powered",
1168 help="Specify if the SoR for node is powered")
1170 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1171 default=constants.OOB_TIMEOUT,
1172 help="Maximum time to wait for out-of-band helper")
1174 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1175 default=constants.OOB_POWER_DELAY,
1176 help="Time in seconds to wait between power-ons")
1179 #: Options provided by all commands
1180 COMMON_OPTS = [DEBUG_OPT]
1182 # common options for creating instances. add and import then add their own
1184 COMMON_CREATE_OPTS = [
1189 FILESTORE_DRIVER_OPT,
1206 def _ParseArgs(argv, commands, aliases):
1207 """Parser for the command line arguments.
1209 This function parses the arguments and returns the function which
1210 must be executed together with its (modified) arguments.
1212 @param argv: the command line
1213 @param commands: dictionary with special contents, see the design
1214 doc for cmdline handling
1215 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1219 binary = "<command>"
1221 binary = argv[0].split("/")[-1]
1223 if len(argv) > 1 and argv[1] == "--version":
1224 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1225 constants.RELEASE_VERSION)
1226 # Quit right away. That way we don't have to care about this special
1227 # argument. optparse.py does it the same.
1230 if len(argv) < 2 or not (argv[1] in commands or
1231 argv[1] in aliases):
1232 # let's do a nice thing
1233 sortedcmds = commands.keys()
1236 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1237 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1240 # compute the max line length for cmd + usage
1241 mlen = max([len(" %s" % cmd) for cmd in commands])
1242 mlen = min(60, mlen) # should not get here...
1244 # and format a nice command list
1245 ToStdout("Commands:")
1246 for cmd in sortedcmds:
1247 cmdstr = " %s" % (cmd,)
1248 help_text = commands[cmd][4]
1249 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1250 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1251 for line in help_lines:
1252 ToStdout("%-*s %s", mlen, "", line)
1256 return None, None, None
1258 # get command, unalias it, and look it up in commands
1262 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1265 if aliases[cmd] not in commands:
1266 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1267 " command '%s'" % (cmd, aliases[cmd]))
1271 func, args_def, parser_opts, usage, description = commands[cmd]
1272 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1273 description=description,
1274 formatter=TitledHelpFormatter(),
1275 usage="%%prog %s %s" % (cmd, usage))
1276 parser.disable_interspersed_args()
1277 options, args = parser.parse_args()
1279 if not _CheckArguments(cmd, args_def, args):
1280 return None, None, None
1282 return func, options, args
1285 def _CheckArguments(cmd, args_def, args):
1286 """Verifies the arguments using the argument definition.
1290 1. Abort with error if values specified by user but none expected.
1292 1. For each argument in definition
1294 1. Keep running count of minimum number of values (min_count)
1295 1. Keep running count of maximum number of values (max_count)
1296 1. If it has an unlimited number of values
1298 1. Abort with error if it's not the last argument in the definition
1300 1. If last argument has limited number of values
1302 1. Abort with error if number of values doesn't match or is too large
1304 1. Abort with error if user didn't pass enough values (min_count)
1307 if args and not args_def:
1308 ToStderr("Error: Command %s expects no arguments", cmd)
1315 last_idx = len(args_def) - 1
1317 for idx, arg in enumerate(args_def):
1318 if min_count is None:
1320 elif arg.min is not None:
1321 min_count += arg.min
1323 if max_count is None:
1325 elif arg.max is not None:
1326 max_count += arg.max
1329 check_max = (arg.max is not None)
1331 elif arg.max is None:
1332 raise errors.ProgrammerError("Only the last argument can have max=None")
1335 # Command with exact number of arguments
1336 if (min_count is not None and max_count is not None and
1337 min_count == max_count and len(args) != min_count):
1338 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1341 # Command with limited number of arguments
1342 if max_count is not None and len(args) > max_count:
1343 ToStderr("Error: Command %s expects only %d argument(s)",
1347 # Command with some required arguments
1348 if min_count is not None and len(args) < min_count:
1349 ToStderr("Error: Command %s expects at least %d argument(s)",
1356 def SplitNodeOption(value):
1357 """Splits the value of a --node option.
1360 if value and ':' in value:
1361 return value.split(':', 1)
1363 return (value, None)
1366 def CalculateOSNames(os_name, os_variants):
1367 """Calculates all the names an OS can be called, according to its variants.
1369 @type os_name: string
1370 @param os_name: base name of the os
1371 @type os_variants: list or None
1372 @param os_variants: list of supported variants
1374 @return: list of valid names
1378 return ['%s+%s' % (os_name, v) for v in os_variants]
1383 def ParseFields(selected, default):
1384 """Parses the values of "--field"-like options.
1386 @type selected: string or None
1387 @param selected: User-selected options
1389 @param default: Default fields
1392 if selected is None:
1395 if selected.startswith("+"):
1396 return default + selected[1:].split(",")
1398 return selected.split(",")
1401 UsesRPC = rpc.RunWithRPC
1404 def AskUser(text, choices=None):
1405 """Ask the user a question.
1407 @param text: the question to ask
1409 @param choices: list with elements tuples (input_char, return_value,
1410 description); if not given, it will default to: [('y', True,
1411 'Perform the operation'), ('n', False, 'Do no do the operation')];
1412 note that the '?' char is reserved for help
1414 @return: one of the return values from the choices list; if input is
1415 not possible (i.e. not running with a tty, we return the last
1420 choices = [('y', True, 'Perform the operation'),
1421 ('n', False, 'Do not perform the operation')]
1422 if not choices or not isinstance(choices, list):
1423 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1424 for entry in choices:
1425 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1426 raise errors.ProgrammerError("Invalid choices element to AskUser")
1428 answer = choices[-1][1]
1430 for line in text.splitlines():
1431 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1432 text = "\n".join(new_text)
1434 f = file("/dev/tty", "a+")
1438 chars = [entry[0] for entry in choices]
1439 chars[-1] = "[%s]" % chars[-1]
1441 maps = dict([(entry[0], entry[1]) for entry in choices])
1445 f.write("/".join(chars))
1447 line = f.readline(2).strip().lower()
1452 for entry in choices:
1453 f.write(" %s - %s\n" % (entry[0], entry[2]))
1461 class JobSubmittedException(Exception):
1462 """Job was submitted, client should exit.
1464 This exception has one argument, the ID of the job that was
1465 submitted. The handler should print this ID.
1467 This is not an error, just a structured way to exit from clients.
1472 def SendJob(ops, cl=None):
1473 """Function to submit an opcode without waiting for the results.
1476 @param ops: list of opcodes
1477 @type cl: luxi.Client
1478 @param cl: the luxi client to use for communicating with the master;
1479 if None, a new client will be created
1485 job_id = cl.SubmitJob(ops)
1490 def GenericPollJob(job_id, cbs, report_cbs):
1491 """Generic job-polling function.
1493 @type job_id: number
1494 @param job_id: Job ID
1495 @type cbs: Instance of L{JobPollCbBase}
1496 @param cbs: Data callbacks
1497 @type report_cbs: Instance of L{JobPollReportCbBase}
1498 @param report_cbs: Reporting callbacks
1501 prev_job_info = None
1502 prev_logmsg_serial = None
1507 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1510 # job not found, go away!
1511 raise errors.JobLost("Job with id %s lost" % job_id)
1513 if result == constants.JOB_NOTCHANGED:
1514 report_cbs.ReportNotChanged(job_id, status)
1519 # Split result, a tuple of (field values, log entries)
1520 (job_info, log_entries) = result
1521 (status, ) = job_info
1524 for log_entry in log_entries:
1525 (serial, timestamp, log_type, message) = log_entry
1526 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1528 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1530 # TODO: Handle canceled and archived jobs
1531 elif status in (constants.JOB_STATUS_SUCCESS,
1532 constants.JOB_STATUS_ERROR,
1533 constants.JOB_STATUS_CANCELING,
1534 constants.JOB_STATUS_CANCELED):
1537 prev_job_info = job_info
1539 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1541 raise errors.JobLost("Job with id %s lost" % job_id)
1543 status, opstatus, result = jobs[0]
1545 if status == constants.JOB_STATUS_SUCCESS:
1548 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1549 raise errors.OpExecError("Job was canceled")
1552 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1553 if status == constants.OP_STATUS_SUCCESS:
1555 elif status == constants.OP_STATUS_ERROR:
1556 errors.MaybeRaise(msg)
1559 raise errors.OpExecError("partial failure (opcode %d): %s" %
1562 raise errors.OpExecError(str(msg))
1564 # default failure mode
1565 raise errors.OpExecError(result)
1568 class JobPollCbBase:
1569 """Base class for L{GenericPollJob} callbacks.
1573 """Initializes this class.
1577 def WaitForJobChangeOnce(self, job_id, fields,
1578 prev_job_info, prev_log_serial):
1579 """Waits for changes on a job.
1582 raise NotImplementedError()
1584 def QueryJobs(self, job_ids, fields):
1585 """Returns the selected fields for the selected job IDs.
1587 @type job_ids: list of numbers
1588 @param job_ids: Job IDs
1589 @type fields: list of strings
1590 @param fields: Fields
1593 raise NotImplementedError()
1596 class JobPollReportCbBase:
1597 """Base class for L{GenericPollJob} reporting callbacks.
1601 """Initializes this class.
1605 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1606 """Handles a log message.
1609 raise NotImplementedError()
1611 def ReportNotChanged(self, job_id, status):
1612 """Called for if a job hasn't changed in a while.
1614 @type job_id: number
1615 @param job_id: Job ID
1616 @type status: string or None
1617 @param status: Job status if available
1620 raise NotImplementedError()
1623 class _LuxiJobPollCb(JobPollCbBase):
1624 def __init__(self, cl):
1625 """Initializes this class.
1628 JobPollCbBase.__init__(self)
1631 def WaitForJobChangeOnce(self, job_id, fields,
1632 prev_job_info, prev_log_serial):
1633 """Waits for changes on a job.
1636 return self.cl.WaitForJobChangeOnce(job_id, fields,
1637 prev_job_info, prev_log_serial)
1639 def QueryJobs(self, job_ids, fields):
1640 """Returns the selected fields for the selected job IDs.
1643 return self.cl.QueryJobs(job_ids, fields)
1646 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1647 def __init__(self, feedback_fn):
1648 """Initializes this class.
1651 JobPollReportCbBase.__init__(self)
1653 self.feedback_fn = feedback_fn
1655 assert callable(feedback_fn)
1657 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1658 """Handles a log message.
1661 self.feedback_fn((timestamp, log_type, log_msg))
1663 def ReportNotChanged(self, job_id, status):
1664 """Called if a job hasn't changed in a while.
1670 class StdioJobPollReportCb(JobPollReportCbBase):
1672 """Initializes this class.
1675 JobPollReportCbBase.__init__(self)
1677 self.notified_queued = False
1678 self.notified_waitlock = False
1680 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1681 """Handles a log message.
1684 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1685 FormatLogMessage(log_type, log_msg))
1687 def ReportNotChanged(self, job_id, status):
1688 """Called if a job hasn't changed in a while.
1694 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1695 ToStderr("Job %s is waiting in queue", job_id)
1696 self.notified_queued = True
1698 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1699 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1700 self.notified_waitlock = True
1703 def FormatLogMessage(log_type, log_msg):
1704 """Formats a job message according to its type.
1707 if log_type != constants.ELOG_MESSAGE:
1708 log_msg = str(log_msg)
1710 return utils.SafeEncode(log_msg)
1713 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1714 """Function to poll for the result of a job.
1716 @type job_id: job identified
1717 @param job_id: the job to poll for results
1718 @type cl: luxi.Client
1719 @param cl: the luxi client to use for communicating with the master;
1720 if None, a new client will be created
1726 if reporter is None:
1728 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1730 reporter = StdioJobPollReportCb()
1732 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1734 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1737 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1738 """Legacy function to submit an opcode.
1740 This is just a simple wrapper over the construction of the processor
1741 instance. It should be extended to better handle feedback and
1742 interaction functions.
1748 SetGenericOpcodeOpts([op], opts)
1750 job_id = SendJob([op], cl=cl)
1752 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1755 return op_results[0]
1758 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1759 """Wrapper around SubmitOpCode or SendJob.
1761 This function will decide, based on the 'opts' parameter, whether to
1762 submit and wait for the result of the opcode (and return it), or
1763 whether to just send the job and print its identifier. It is used in
1764 order to simplify the implementation of the '--submit' option.
1766 It will also process the opcodes if we're sending the via SendJob
1767 (otherwise SubmitOpCode does it).
1770 if opts and opts.submit_only:
1772 SetGenericOpcodeOpts(job, opts)
1773 job_id = SendJob(job, cl=cl)
1774 raise JobSubmittedException(job_id)
1776 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1779 def SetGenericOpcodeOpts(opcode_list, options):
1780 """Processor for generic options.
1782 This function updates the given opcodes based on generic command
1783 line options (like debug, dry-run, etc.).
1785 @param opcode_list: list of opcodes
1786 @param options: command line options or None
1787 @return: None (in-place modification)
1792 for op in opcode_list:
1793 op.debug_level = options.debug
1794 if hasattr(options, "dry_run"):
1795 op.dry_run = options.dry_run
1796 if getattr(options, "priority", None) is not None:
1797 op.priority = _PRIONAME_TO_VALUE[options.priority]
1801 # TODO: Cache object?
1803 client = luxi.Client()
1804 except luxi.NoMasterError:
1805 ss = ssconf.SimpleStore()
1807 # Try to read ssconf file
1810 except errors.ConfigurationError:
1811 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1812 " not part of a cluster")
1814 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1815 if master != myself:
1816 raise errors.OpPrereqError("This is not the master node, please connect"
1817 " to node '%s' and rerun the command" %
1823 def FormatError(err):
1824 """Return a formatted error message for a given error.
1826 This function takes an exception instance and returns a tuple
1827 consisting of two values: first, the recommended exit code, and
1828 second, a string describing the error message (not
1829 newline-terminated).
1835 if isinstance(err, errors.ConfigurationError):
1836 txt = "Corrupt configuration file: %s" % msg
1838 obuf.write(txt + "\n")
1839 obuf.write("Aborting.")
1841 elif isinstance(err, errors.HooksAbort):
1842 obuf.write("Failure: hooks execution failed:\n")
1843 for node, script, out in err.args[0]:
1845 obuf.write(" node: %s, script: %s, output: %s\n" %
1846 (node, script, out))
1848 obuf.write(" node: %s, script: %s (no output)\n" %
1850 elif isinstance(err, errors.HooksFailure):
1851 obuf.write("Failure: hooks general failure: %s" % msg)
1852 elif isinstance(err, errors.ResolverError):
1853 this_host = netutils.Hostname.GetSysName()
1854 if err.args[0] == this_host:
1855 msg = "Failure: can't resolve my own hostname ('%s')"
1857 msg = "Failure: can't resolve hostname '%s'"
1858 obuf.write(msg % err.args[0])
1859 elif isinstance(err, errors.OpPrereqError):
1860 if len(err.args) == 2:
1861 obuf.write("Failure: prerequisites not met for this"
1862 " operation:\nerror type: %s, error details:\n%s" %
1863 (err.args[1], err.args[0]))
1865 obuf.write("Failure: prerequisites not met for this"
1866 " operation:\n%s" % msg)
1867 elif isinstance(err, errors.OpExecError):
1868 obuf.write("Failure: command execution error:\n%s" % msg)
1869 elif isinstance(err, errors.TagError):
1870 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1871 elif isinstance(err, errors.JobQueueDrainError):
1872 obuf.write("Failure: the job queue is marked for drain and doesn't"
1873 " accept new requests\n")
1874 elif isinstance(err, errors.JobQueueFull):
1875 obuf.write("Failure: the job queue is full and doesn't accept new"
1876 " job submissions until old jobs are archived\n")
1877 elif isinstance(err, errors.TypeEnforcementError):
1878 obuf.write("Parameter Error: %s" % msg)
1879 elif isinstance(err, errors.ParameterError):
1880 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1881 elif isinstance(err, luxi.NoMasterError):
1882 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1883 " and listening for connections?")
1884 elif isinstance(err, luxi.TimeoutError):
1885 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1886 " been submitted and will continue to run even if the call"
1887 " timed out. Useful commands in this situation are \"gnt-job"
1888 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1890 elif isinstance(err, luxi.PermissionError):
1891 obuf.write("It seems you don't have permissions to connect to the"
1892 " master daemon.\nPlease retry as a different user.")
1893 elif isinstance(err, luxi.ProtocolError):
1894 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1896 elif isinstance(err, errors.JobLost):
1897 obuf.write("Error checking job status: %s" % msg)
1898 elif isinstance(err, errors.GenericError):
1899 obuf.write("Unhandled Ganeti error: %s" % msg)
1900 elif isinstance(err, JobSubmittedException):
1901 obuf.write("JobID: %s\n" % err.args[0])
1904 obuf.write("Unhandled exception: %s" % msg)
1905 return retcode, obuf.getvalue().rstrip('\n')
1908 def GenericMain(commands, override=None, aliases=None):
1909 """Generic main function for all the gnt-* commands.
1912 - commands: a dictionary with a special structure, see the design doc
1913 for command line handling.
1914 - override: if not None, we expect a dictionary with keys that will
1915 override command line options; this can be used to pass
1916 options from the scripts to generic functions
1917 - aliases: dictionary with command aliases {'alias': 'target, ...}
1920 # save the program name and the entire command line for later logging
1922 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1923 if len(sys.argv) >= 2:
1924 binary += " " + sys.argv[1]
1925 old_cmdline = " ".join(sys.argv[2:])
1929 binary = "<unknown program>"
1936 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1937 except errors.ParameterError, err:
1938 result, err_msg = FormatError(err)
1942 if func is None: # parse error
1945 if override is not None:
1946 for key, val in override.iteritems():
1947 setattr(options, key, val)
1949 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1950 stderr_logging=True)
1953 logging.info("run with arguments '%s'", old_cmdline)
1955 logging.info("run with no arguments")
1958 result = func(options, args)
1959 except (errors.GenericError, luxi.ProtocolError,
1960 JobSubmittedException), err:
1961 result, err_msg = FormatError(err)
1962 logging.exception("Error during command processing")
1964 except KeyboardInterrupt:
1965 result = constants.EXIT_FAILURE
1966 ToStderr("Aborted. Note that if the operation created any jobs, they"
1967 " might have been submitted and"
1968 " will continue to run in the background.")
1973 def ParseNicOption(optvalue):
1974 """Parses the value of the --net option(s).
1978 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1979 except (TypeError, ValueError), err:
1980 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1982 nics = [{}] * nic_max
1983 for nidx, ndict in optvalue:
1986 if not isinstance(ndict, dict):
1987 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1988 " got %s" % (nidx, ndict))
1990 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1997 def GenericInstanceCreate(mode, opts, args):
1998 """Add an instance to the cluster via either creation or import.
2000 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2001 @param opts: the command line options selected by the user
2003 @param args: should contain only one element, the new instance name
2005 @return: the desired exit code
2010 (pnode, snode) = SplitNodeOption(opts.node)
2015 hypervisor, hvparams = opts.hypervisor
2018 nics = ParseNicOption(opts.nics)
2022 elif mode == constants.INSTANCE_CREATE:
2023 # default of one nic, all auto
2029 if opts.disk_template == constants.DT_DISKLESS:
2030 if opts.disks or opts.sd_size is not None:
2031 raise errors.OpPrereqError("Diskless instance but disk"
2032 " information passed")
2035 if (not opts.disks and not opts.sd_size
2036 and mode == constants.INSTANCE_CREATE):
2037 raise errors.OpPrereqError("No disk information specified")
2038 if opts.disks and opts.sd_size is not None:
2039 raise errors.OpPrereqError("Please use either the '--disk' or"
2041 if opts.sd_size is not None:
2042 opts.disks = [(0, {"size": opts.sd_size})]
2046 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2047 except ValueError, err:
2048 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2049 disks = [{}] * disk_max
2052 for didx, ddict in opts.disks:
2054 if not isinstance(ddict, dict):
2055 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2056 raise errors.OpPrereqError(msg)
2057 elif "size" in ddict:
2058 if "adopt" in ddict:
2059 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2060 " (disk %d)" % didx)
2062 ddict["size"] = utils.ParseUnit(ddict["size"])
2063 except ValueError, err:
2064 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2066 elif "adopt" in ddict:
2067 if mode == constants.INSTANCE_IMPORT:
2068 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2072 raise errors.OpPrereqError("Missing size or adoption source for"
2076 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2077 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2079 if mode == constants.INSTANCE_CREATE:
2082 force_variant = opts.force_variant
2085 no_install = opts.no_install
2086 identify_defaults = False
2087 elif mode == constants.INSTANCE_IMPORT:
2090 force_variant = False
2091 src_node = opts.src_node
2092 src_path = opts.src_dir
2094 identify_defaults = opts.identify_defaults
2096 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2098 op = opcodes.OpInstanceCreate(instance_name=instance,
2100 disk_template=opts.disk_template,
2102 pnode=pnode, snode=snode,
2103 ip_check=opts.ip_check,
2104 name_check=opts.name_check,
2105 wait_for_sync=opts.wait_for_sync,
2106 file_storage_dir=opts.file_storage_dir,
2107 file_driver=opts.file_driver,
2108 iallocator=opts.iallocator,
2109 hypervisor=hypervisor,
2111 beparams=opts.beparams,
2112 osparams=opts.osparams,
2116 force_variant=force_variant,
2119 no_install=no_install,
2120 identify_defaults=identify_defaults)
2122 SubmitOrSend(op, opts)
2126 class _RunWhileClusterStoppedHelper:
2127 """Helper class for L{RunWhileClusterStopped} to simplify state management
2130 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2131 """Initializes this class.
2133 @type feedback_fn: callable
2134 @param feedback_fn: Feedback function
2135 @type cluster_name: string
2136 @param cluster_name: Cluster name
2137 @type master_node: string
2138 @param master_node Master node name
2139 @type online_nodes: list
2140 @param online_nodes: List of names of online nodes
2143 self.feedback_fn = feedback_fn
2144 self.cluster_name = cluster_name
2145 self.master_node = master_node
2146 self.online_nodes = online_nodes
2148 self.ssh = ssh.SshRunner(self.cluster_name)
2150 self.nonmaster_nodes = [name for name in online_nodes
2151 if name != master_node]
2153 assert self.master_node not in self.nonmaster_nodes
2155 def _RunCmd(self, node_name, cmd):
2156 """Runs a command on the local or a remote machine.
2158 @type node_name: string
2159 @param node_name: Machine name
2164 if node_name is None or node_name == self.master_node:
2165 # No need to use SSH
2166 result = utils.RunCmd(cmd)
2168 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2171 errmsg = ["Failed to run command %s" % result.cmd]
2173 errmsg.append("on node %s" % node_name)
2174 errmsg.append(": exitcode %s and error %s" %
2175 (result.exit_code, result.output))
2176 raise errors.OpExecError(" ".join(errmsg))
2178 def Call(self, fn, *args):
2179 """Call function while all daemons are stopped.
2182 @param fn: Function to be called
2185 # Pause watcher by acquiring an exclusive lock on watcher state file
2186 self.feedback_fn("Blocking watcher")
2187 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2189 # TODO: Currently, this just blocks. There's no timeout.
2190 # TODO: Should it be a shared lock?
2191 watcher_block.Exclusive(blocking=True)
2193 # Stop master daemons, so that no new jobs can come in and all running
2195 self.feedback_fn("Stopping master daemons")
2196 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2198 # Stop daemons on all nodes
2199 for node_name in self.online_nodes:
2200 self.feedback_fn("Stopping daemons on %s" % node_name)
2201 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2203 # All daemons are shut down now
2205 return fn(self, *args)
2206 except Exception, err:
2207 _, errmsg = FormatError(err)
2208 logging.exception("Caught exception")
2209 self.feedback_fn(errmsg)
2212 # Start cluster again, master node last
2213 for node_name in self.nonmaster_nodes + [self.master_node]:
2214 self.feedback_fn("Starting daemons on %s" % node_name)
2215 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2218 watcher_block.Close()
2221 def RunWhileClusterStopped(feedback_fn, fn, *args):
2222 """Calls a function while all cluster daemons are stopped.
2224 @type feedback_fn: callable
2225 @param feedback_fn: Feedback function
2227 @param fn: Function to be called when daemons are stopped
2230 feedback_fn("Gathering cluster information")
2232 # This ensures we're running on the master daemon
2235 (cluster_name, master_node) = \
2236 cl.QueryConfigValues(["cluster_name", "master_node"])
2238 online_nodes = GetOnlineNodes([], cl=cl)
2240 # Don't keep a reference to the client. The master daemon will go away.
2243 assert master_node in online_nodes
2245 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2246 online_nodes).Call(fn, *args)
2249 def GenerateTable(headers, fields, separator, data,
2250 numfields=None, unitfields=None,
2252 """Prints a table with headers and different fields.
2255 @param headers: dictionary mapping field names to headers for
2258 @param fields: the field names corresponding to each row in
2260 @param separator: the separator to be used; if this is None,
2261 the default 'smart' algorithm is used which computes optimal
2262 field width, otherwise just the separator is used between
2265 @param data: a list of lists, each sublist being one row to be output
2266 @type numfields: list
2267 @param numfields: a list with the fields that hold numeric
2268 values and thus should be right-aligned
2269 @type unitfields: list
2270 @param unitfields: a list with the fields that hold numeric
2271 values that should be formatted with the units field
2272 @type units: string or None
2273 @param units: the units we should use for formatting, or None for
2274 automatic choice (human-readable for non-separator usage, otherwise
2275 megabytes); this is a one-letter string
2284 if numfields is None:
2286 if unitfields is None:
2289 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2290 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2293 for field in fields:
2294 if headers and field not in headers:
2295 # TODO: handle better unknown fields (either revert to old
2296 # style of raising exception, or deal more intelligently with
2298 headers[field] = field
2299 if separator is not None:
2300 format_fields.append("%s")
2301 elif numfields.Matches(field):
2302 format_fields.append("%*s")
2304 format_fields.append("%-*s")
2306 if separator is None:
2307 mlens = [0 for name in fields]
2308 format_str = ' '.join(format_fields)
2310 format_str = separator.replace("%", "%%").join(format_fields)
2315 for idx, val in enumerate(row):
2316 if unitfields.Matches(fields[idx]):
2319 except (TypeError, ValueError):
2322 val = row[idx] = utils.FormatUnit(val, units)
2323 val = row[idx] = str(val)
2324 if separator is None:
2325 mlens[idx] = max(mlens[idx], len(val))
2330 for idx, name in enumerate(fields):
2332 if separator is None:
2333 mlens[idx] = max(mlens[idx], len(hdr))
2334 args.append(mlens[idx])
2336 result.append(format_str % tuple(args))
2338 if separator is None:
2339 assert len(mlens) == len(fields)
2341 if fields and not numfields.Matches(fields[-1]):
2347 line = ['-' for _ in fields]
2348 for idx in range(len(fields)):
2349 if separator is None:
2350 args.append(mlens[idx])
2351 args.append(line[idx])
2352 result.append(format_str % tuple(args))
2357 def _FormatBool(value):
2358 """Formats a boolean value as a string.
2366 #: Default formatting for query results; (callback, align right)
2367 _DEFAULT_FORMAT_QUERY = {
2368 constants.QFT_TEXT: (str, False),
2369 constants.QFT_BOOL: (_FormatBool, False),
2370 constants.QFT_NUMBER: (str, True),
2371 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2372 constants.QFT_OTHER: (str, False),
2373 constants.QFT_UNKNOWN: (str, False),
2377 def _GetColumnFormatter(fdef, override, unit):
2378 """Returns formatting function for a field.
2380 @type fdef: L{objects.QueryFieldDefinition}
2381 @type override: dict
2382 @param override: Dictionary for overriding field formatting functions,
2383 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2385 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2386 @rtype: tuple; (callable, bool)
2387 @return: Returns the function to format a value (takes one parameter) and a
2388 boolean for aligning the value on the right-hand side
2391 fmt = override.get(fdef.name, None)
2395 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2397 if fdef.kind == constants.QFT_UNIT:
2398 # Can't keep this information in the static dictionary
2399 return (lambda value: utils.FormatUnit(value, unit), True)
2401 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2405 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2408 class _QueryColumnFormatter:
2409 """Callable class for formatting fields of a query.
2412 def __init__(self, fn, status_fn, verbose):
2413 """Initializes this class.
2416 @param fn: Formatting function
2417 @type status_fn: callable
2418 @param status_fn: Function to report fields' status
2419 @type verbose: boolean
2420 @param verbose: whether to use verbose field descriptions or not
2424 self._status_fn = status_fn
2425 self._verbose = verbose
2427 def __call__(self, data):
2428 """Returns a field's string representation.
2431 (status, value) = data
2434 self._status_fn(status)
2436 if status == constants.RS_NORMAL:
2437 return self._fn(value)
2439 assert value is None, \
2440 "Found value %r for abnormal status %s" % (value, status)
2442 return FormatResultError(status, self._verbose)
2445 def FormatResultError(status, verbose):
2446 """Formats result status other than L{constants.RS_NORMAL}.
2448 @param status: The result status
2449 @type verbose: boolean
2450 @param verbose: Whether to return the verbose text
2451 @return: Text of result status
2454 assert status != constants.RS_NORMAL, \
2455 "FormatResultError called with status equal to constants.RS_NORMAL"
2457 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2459 raise NotImplementedError("Unknown status %s" % status)
2466 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2467 header=False, verbose=False):
2468 """Formats data in L{objects.QueryResponse}.
2470 @type result: L{objects.QueryResponse}
2471 @param result: result of query operation
2473 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2474 see L{utils.text.FormatUnit}
2475 @type format_override: dict
2476 @param format_override: Dictionary for overriding field formatting functions,
2477 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2478 @type separator: string or None
2479 @param separator: String used to separate fields
2481 @param header: Whether to output header row
2482 @type verbose: boolean
2483 @param verbose: whether to use verbose field descriptions or not
2492 if format_override is None:
2493 format_override = {}
2495 stats = dict.fromkeys(constants.RS_ALL, 0)
2497 def _RecordStatus(status):
2502 for fdef in result.fields:
2503 assert fdef.title and fdef.name
2504 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2505 columns.append(TableColumn(fdef.title,
2506 _QueryColumnFormatter(fn, _RecordStatus,
2510 table = FormatTable(result.data, columns, header, separator)
2512 # Collect statistics
2513 assert len(stats) == len(constants.RS_ALL)
2514 assert compat.all(count >= 0 for count in stats.values())
2516 # Determine overall status. If there was no data, unknown fields must be
2517 # detected via the field definitions.
2518 if (stats[constants.RS_UNKNOWN] or
2519 (not result.data and _GetUnknownFields(result.fields))):
2521 elif compat.any(count > 0 for key, count in stats.items()
2522 if key != constants.RS_NORMAL):
2523 status = QR_INCOMPLETE
2527 return (status, table)
2530 def _GetUnknownFields(fdefs):
2531 """Returns list of unknown fields included in C{fdefs}.
2533 @type fdefs: list of L{objects.QueryFieldDefinition}
2536 return [fdef for fdef in fdefs
2537 if fdef.kind == constants.QFT_UNKNOWN]
2540 def _WarnUnknownFields(fdefs):
2541 """Prints a warning to stderr if a query included unknown fields.
2543 @type fdefs: list of L{objects.QueryFieldDefinition}
2546 unknown = _GetUnknownFields(fdefs)
2548 ToStderr("Warning: Queried for unknown fields %s",
2549 utils.CommaJoin(fdef.name for fdef in unknown))
2555 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2556 format_override=None, verbose=False):
2557 """Generic implementation for listing all items of a resource.
2559 @param resource: One of L{constants.QR_VIA_LUXI}
2560 @type fields: list of strings
2561 @param fields: List of fields to query for
2562 @type names: list of strings
2563 @param names: Names of items to query for
2564 @type unit: string or None
2565 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2566 None for automatic choice (human-readable for non-separator usage,
2567 otherwise megabytes); this is a one-letter string
2568 @type separator: string or None
2569 @param separator: String used to separate fields
2571 @param header: Whether to show header row
2572 @type format_override: dict
2573 @param format_override: Dictionary for overriding field formatting functions,
2574 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2575 @type verbose: boolean
2576 @param verbose: whether to use verbose field descriptions or not
2585 response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2587 found_unknown = _WarnUnknownFields(response.fields)
2589 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2591 format_override=format_override,
2597 assert ((found_unknown and status == QR_UNKNOWN) or
2598 (not found_unknown and status != QR_UNKNOWN))
2600 if status == QR_UNKNOWN:
2601 return constants.EXIT_UNKNOWN_FIELD
2603 # TODO: Should the list command fail if not all data could be collected?
2604 return constants.EXIT_SUCCESS
2607 def GenericListFields(resource, fields, separator, header, cl=None):
2608 """Generic implementation for listing fields for a resource.
2610 @param resource: One of L{constants.QR_VIA_LUXI}
2611 @type fields: list of strings
2612 @param fields: List of fields to query for
2613 @type separator: string or None
2614 @param separator: String used to separate fields
2616 @param header: Whether to show header row
2625 response = cl.QueryFields(resource, fields)
2627 found_unknown = _WarnUnknownFields(response.fields)
2630 TableColumn("Name", str, False),
2631 TableColumn("Title", str, False),
2632 TableColumn("Description", str, False),
2635 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2637 for line in FormatTable(rows, columns, header, separator):
2641 return constants.EXIT_UNKNOWN_FIELD
2643 return constants.EXIT_SUCCESS
2647 """Describes a column for L{FormatTable}.
2650 def __init__(self, title, fn, align_right):
2651 """Initializes this class.
2654 @param title: Column title
2656 @param fn: Formatting function
2657 @type align_right: bool
2658 @param align_right: Whether to align values on the right-hand side
2663 self.align_right = align_right
2666 def _GetColFormatString(width, align_right):
2667 """Returns the format string for a field.
2675 return "%%%s%ss" % (sign, width)
2678 def FormatTable(rows, columns, header, separator):
2679 """Formats data as a table.
2681 @type rows: list of lists
2682 @param rows: Row data, one list per row
2683 @type columns: list of L{TableColumn}
2684 @param columns: Column descriptions
2686 @param header: Whether to show header row
2687 @type separator: string or None
2688 @param separator: String used to separate columns
2692 data = [[col.title for col in columns]]
2693 colwidth = [len(col.title) for col in columns]
2696 colwidth = [0 for _ in columns]
2700 assert len(row) == len(columns)
2702 formatted = [col.format(value) for value, col in zip(row, columns)]
2704 if separator is None:
2705 # Update column widths
2706 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2707 # Modifying a list's items while iterating is fine
2708 colwidth[idx] = max(oldwidth, len(value))
2710 data.append(formatted)
2712 if separator is not None:
2713 # Return early if a separator is used
2714 return [separator.join(row) for row in data]
2716 if columns and not columns[-1].align_right:
2717 # Avoid unnecessary spaces at end of line
2720 # Build format string
2721 fmt = " ".join([_GetColFormatString(width, col.align_right)
2722 for col, width in zip(columns, colwidth)])
2724 return [fmt % tuple(row) for row in data]
2727 def FormatTimestamp(ts):
2728 """Formats a given timestamp.
2731 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2734 @return: a string with the formatted timestamp
2737 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2740 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2743 def ParseTimespec(value):
2744 """Parse a time specification.
2746 The following suffixed will be recognized:
2754 Without any suffix, the value will be taken to be in seconds.
2759 raise errors.OpPrereqError("Empty time specification passed")
2767 if value[-1] not in suffix_map:
2770 except (TypeError, ValueError):
2771 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2773 multiplier = suffix_map[value[-1]]
2775 if not value: # no data left after stripping the suffix
2776 raise errors.OpPrereqError("Invalid time specification (only"
2779 value = int(value) * multiplier
2780 except (TypeError, ValueError):
2781 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2785 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2786 filter_master=False):
2787 """Returns the names of online nodes.
2789 This function will also log a warning on stderr with the names of
2792 @param nodes: if not empty, use only this subset of nodes (minus the
2794 @param cl: if not None, luxi client to use
2795 @type nowarn: boolean
2796 @param nowarn: by default, this function will output a note with the
2797 offline nodes that are skipped; if this parameter is True the
2798 note is not displayed
2799 @type secondary_ips: boolean
2800 @param secondary_ips: if True, return the secondary IPs instead of the
2801 names, useful for doing network traffic over the replication interface
2803 @type filter_master: boolean
2804 @param filter_master: if True, do not return the master node in the list
2805 (useful in coordination with secondary_ips where we cannot check our
2806 node name against the list)
2818 master_node = cl.QueryConfigValues(["master_node"])[0]
2819 filter_fn = lambda x: x != master_node
2821 filter_fn = lambda _: True
2823 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2825 offline = [row[0] for row in result if row[1]]
2826 if offline and not nowarn:
2827 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2828 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2831 def _ToStream(stream, txt, *args):
2832 """Write a message to a stream, bypassing the logging system
2834 @type stream: file object
2835 @param stream: the file to which we should write
2837 @param txt: the message
2842 stream.write(txt % args)
2849 def ToStdout(txt, *args):
2850 """Write a message to stdout only, bypassing the logging system
2852 This is just a wrapper over _ToStream.
2855 @param txt: the message
2858 _ToStream(sys.stdout, txt, *args)
2861 def ToStderr(txt, *args):
2862 """Write a message to stderr only, bypassing the logging system
2864 This is just a wrapper over _ToStream.
2867 @param txt: the message
2870 _ToStream(sys.stderr, txt, *args)
2873 class JobExecutor(object):
2874 """Class which manages the submission and execution of multiple jobs.
2876 Note that instances of this class should not be reused between
2880 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2885 self.verbose = verbose
2888 self.feedback_fn = feedback_fn
2890 def QueueJob(self, name, *ops):
2891 """Record a job for later submit.
2894 @param name: a description of the job, will be used in WaitJobSet
2896 SetGenericOpcodeOpts(ops, self.opts)
2897 self.queue.append((name, ops))
2899 def SubmitPending(self, each=False):
2900 """Submit all pending jobs.
2905 for row in self.queue:
2906 # SubmitJob will remove the success status, but raise an exception if
2907 # the submission fails, so we'll notice that anyway.
2908 results.append([True, self.cl.SubmitJob(row[1])])
2910 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2911 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2913 self.jobs.append((idx, status, data, name))
2915 def _ChooseJob(self):
2916 """Choose a non-waiting/queued job to poll next.
2919 assert self.jobs, "_ChooseJob called with empty job list"
2921 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2924 for job_data, status in zip(self.jobs, result):
2925 if (isinstance(status, list) and status and
2926 status[0] in (constants.JOB_STATUS_QUEUED,
2927 constants.JOB_STATUS_WAITLOCK,
2928 constants.JOB_STATUS_CANCELING)):
2929 # job is still present and waiting
2931 # good candidate found (either running job or lost job)
2932 self.jobs.remove(job_data)
2936 return self.jobs.pop(0)
2938 def GetResults(self):
2939 """Wait for and return the results of all jobs.
2942 @return: list of tuples (success, job results), in the same order
2943 as the submitted jobs; if a job has failed, instead of the result
2944 there will be the error message
2948 self.SubmitPending()
2951 ok_jobs = [row[2] for row in self.jobs if row[1]]
2953 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2955 # first, remove any non-submitted jobs
2956 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2957 for idx, _, jid, name in failures:
2958 ToStderr("Failed to submit job for %s: %s", name, jid)
2959 results.append((idx, False, jid))
2962 (idx, _, jid, name) = self._ChooseJob()
2963 ToStdout("Waiting for job %s for %s...", jid, name)
2965 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2967 except errors.JobLost, err:
2968 _, job_result = FormatError(err)
2969 ToStderr("Job %s for %s has been archived, cannot check its result",
2972 except (errors.GenericError, luxi.ProtocolError), err:
2973 _, job_result = FormatError(err)
2975 # the error message will always be shown, verbose or not
2976 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2978 results.append((idx, success, job_result))
2980 # sort based on the index, then drop it
2982 results = [i[1:] for i in results]
2986 def WaitOrShow(self, wait):
2987 """Wait for job results or only print the job IDs.
2990 @param wait: whether to wait or not
2994 return self.GetResults()
2997 self.SubmitPending()
2998 for _, status, result, name in self.jobs:
3000 ToStdout("%s: %s", result, name)
3002 ToStderr("Failure for %s: %s", name, result)
3003 return [row[1:3] for row in self.jobs]
3006 def FormatParameterDict(buf, param_dict, actual, level=1):
3007 """Formats a parameter dictionary.
3009 @type buf: L{StringIO}
3010 @param buf: the buffer into which to write
3011 @type param_dict: dict
3012 @param param_dict: the own parameters
3014 @param actual: the current parameter set (including defaults)
3015 @param level: Level of indent
3018 indent = " " * level
3019 for key in sorted(actual):
3020 val = param_dict.get(key, "default (%s)" % actual[key])
3021 buf.write("%s- %s: %s\n" % (indent, key, val))
3024 def ConfirmOperation(names, list_type, text, extra=""):
3025 """Ask the user to confirm an operation on a list of list_type.
3027 This function is used to request confirmation for doing an operation
3028 on a given list of list_type.
3031 @param names: the list of names that we display when
3032 we ask for confirmation
3033 @type list_type: str
3034 @param list_type: Human readable name for elements in the list (e.g. nodes)
3036 @param text: the operation that the user should confirm
3038 @return: True or False depending on user's confirmation.
3042 msg = ("The %s will operate on %d %s.\n%s"
3043 "Do you want to continue?" % (text, count, list_type, extra))
3044 affected = (("\nAffected %s:\n" % list_type) +
3045 "\n".join([" %s" % name for name in names]))
3047 choices = [("y", True, "Yes, execute the %s" % text),
3048 ("n", False, "No, abort the %s" % text)]
3051 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3054 question = msg + affected
3056 choice = AskUser(question, choices)
3059 choice = AskUser(msg + affected, choices)