4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
39 from ganeti import ssh
40 from ganeti import compat
41 from ganeti import netutils
42 from ganeti import qlang
44 from optparse import (OptionParser, TitledHelpFormatter,
45 Option, OptionValueError)
49 # Command line options
62 "CLUSTER_DOMAIN_SECRET_OPT",
79 "FILESTORE_DRIVER_OPT",
85 "GLOBAL_SHARED_FILEDIR_OPT",
90 "DEFAULT_IALLOCATOR_OPT",
91 "IDENTIFY_DEFAULTS_OPT",
93 "IGNORE_FAILURES_OPT",
95 "IGNORE_REMOVE_FAILURES_OPT",
96 "IGNORE_SECONDARIES_OPT",
100 "MAINTAIN_NODE_HEALTH_OPT",
103 "MIGRATION_MODE_OPT",
105 "NEW_CLUSTER_CERT_OPT",
106 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
107 "NEW_CONFD_HMAC_KEY_OPT",
111 "NODE_FORCE_JOIN_OPT",
113 "NODE_PLACEMENT_OPT",
117 "NODRBD_STORAGE_OPT",
123 "NOMODIFY_ETCHOSTS_OPT",
124 "NOMODIFY_SSH_SETUP_OPT",
130 "NOSSH_KEYCHECK_OPT",
141 "PREALLOC_WIPE_DISKS_OPT",
142 "PRIMARY_IP_VERSION_OPT",
147 "REMOVE_INSTANCE_OPT",
155 "SHUTDOWN_TIMEOUT_OPT",
170 # Generic functions for CLI programs
173 "GenericInstanceCreate",
179 "JobSubmittedException",
181 "RunWhileClusterStopped",
185 # Formatting functions
186 "ToStderr", "ToStdout",
189 "FormatParameterDict",
198 # command line options support infrastructure
199 "ARGS_MANY_INSTANCES",
218 "OPT_COMPL_INST_ADD_NODES",
219 "OPT_COMPL_MANY_NODES",
220 "OPT_COMPL_ONE_IALLOCATOR",
221 "OPT_COMPL_ONE_INSTANCE",
222 "OPT_COMPL_ONE_NODE",
223 "OPT_COMPL_ONE_NODEGROUP",
229 "COMMON_CREATE_OPTS",
235 #: Priorities (sorted)
237 ("low", constants.OP_PRIO_LOW),
238 ("normal", constants.OP_PRIO_NORMAL),
239 ("high", constants.OP_PRIO_HIGH),
242 #: Priority dictionary for easier lookup
243 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
244 # we migrate to Python 2.6
245 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
247 # Query result status for clients
250 QR_INCOMPLETE) = range(3)
254 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
259 return ("<%s min=%s max=%s>" %
260 (self.__class__.__name__, self.min, self.max))
263 class ArgSuggest(_Argument):
264 """Suggesting argument.
266 Value can be any of the ones passed to the constructor.
269 # pylint: disable-msg=W0622
270 def __init__(self, min=0, max=None, choices=None):
271 _Argument.__init__(self, min=min, max=max)
272 self.choices = choices
275 return ("<%s min=%s max=%s choices=%r>" %
276 (self.__class__.__name__, self.min, self.max, self.choices))
279 class ArgChoice(ArgSuggest):
282 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
283 but value must be one of the choices.
288 class ArgUnknown(_Argument):
289 """Unknown argument to program (e.g. determined at runtime).
294 class ArgInstance(_Argument):
295 """Instances argument.
300 class ArgNode(_Argument):
306 class ArgGroup(_Argument):
307 """Node group argument.
312 class ArgJobId(_Argument):
318 class ArgFile(_Argument):
319 """File path argument.
324 class ArgCommand(_Argument):
330 class ArgHost(_Argument):
336 class ArgOs(_Argument):
343 ARGS_MANY_INSTANCES = [ArgInstance()]
344 ARGS_MANY_NODES = [ArgNode()]
345 ARGS_MANY_GROUPS = [ArgGroup()]
346 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
347 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
348 ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
349 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
352 def _ExtractTagsObject(opts, args):
353 """Extract the tag type object.
355 Note that this function will modify its args parameter.
358 if not hasattr(opts, "tag_type"):
359 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
361 if kind == constants.TAG_CLUSTER:
363 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
365 raise errors.OpPrereqError("no arguments passed to the command")
369 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
373 def _ExtendTags(opts, args):
374 """Extend the args if a source file has been given.
376 This function will extend the tags with the contents of the file
377 passed in the 'tags_source' attribute of the opts parameter. A file
378 named '-' will be replaced by stdin.
381 fname = opts.tags_source
387 new_fh = open(fname, "r")
390 # we don't use the nice 'new_data = [line.strip() for line in fh]'
391 # because of python bug 1633941
393 line = new_fh.readline()
396 new_data.append(line.strip())
399 args.extend(new_data)
402 def ListTags(opts, args):
403 """List the tags on a given object.
405 This is a generic implementation that knows how to deal with all
406 three cases of tag objects (cluster, node, instance). The opts
407 argument is expected to contain a tag_type field denoting what
408 object type we work on.
411 kind, name = _ExtractTagsObject(opts, args)
413 result = cl.QueryTags(kind, name)
414 result = list(result)
420 def AddTags(opts, args):
421 """Add tags on a given object.
423 This is a generic implementation that knows how to deal with all
424 three cases of tag objects (cluster, node, instance). The opts
425 argument is expected to contain a tag_type field denoting what
426 object type we work on.
429 kind, name = _ExtractTagsObject(opts, args)
430 _ExtendTags(opts, args)
432 raise errors.OpPrereqError("No tags to be added")
433 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
434 SubmitOpCode(op, opts=opts)
437 def RemoveTags(opts, args):
438 """Remove tags from a given object.
440 This is a generic implementation that knows how to deal with all
441 three cases of tag objects (cluster, node, instance). The opts
442 argument is expected to contain a tag_type field denoting what
443 object type we work on.
446 kind, name = _ExtractTagsObject(opts, args)
447 _ExtendTags(opts, args)
449 raise errors.OpPrereqError("No tags to be removed")
450 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
451 SubmitOpCode(op, opts=opts)
454 def check_unit(option, opt, value): # pylint: disable-msg=W0613
455 """OptParsers custom converter for units.
459 return utils.ParseUnit(value)
460 except errors.UnitParseError, err:
461 raise OptionValueError("option %s: %s" % (opt, err))
464 def _SplitKeyVal(opt, data):
465 """Convert a KeyVal string into a dict.
467 This function will convert a key=val[,...] string into a dict. Empty
468 values will be converted specially: keys which have the prefix 'no_'
469 will have the value=False and the prefix stripped, the others will
473 @param opt: a string holding the option name for which we process the
474 data, used in building error messages
476 @param data: a string of the format key=val,key=val,...
478 @return: {key=val, key=val}
479 @raises errors.ParameterError: if there are duplicate keys
484 for elem in utils.UnescapeAndSplit(data, sep=","):
486 key, val = elem.split("=", 1)
488 if elem.startswith(NO_PREFIX):
489 key, val = elem[len(NO_PREFIX):], False
490 elif elem.startswith(UN_PREFIX):
491 key, val = elem[len(UN_PREFIX):], None
493 key, val = elem, True
495 raise errors.ParameterError("Duplicate key '%s' in option %s" %
501 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
502 """Custom parser for ident:key=val,key=val options.
504 This will store the parsed values as a tuple (ident, {key: val}). As such,
505 multiple uses of this option via action=append is possible.
509 ident, rest = value, ''
511 ident, rest = value.split(":", 1)
513 if ident.startswith(NO_PREFIX):
515 msg = "Cannot pass options when removing parameter groups: %s" % value
516 raise errors.ParameterError(msg)
517 retval = (ident[len(NO_PREFIX):], False)
518 elif ident.startswith(UN_PREFIX):
520 msg = "Cannot pass options when removing parameter groups: %s" % value
521 raise errors.ParameterError(msg)
522 retval = (ident[len(UN_PREFIX):], None)
524 kv_dict = _SplitKeyVal(opt, rest)
525 retval = (ident, kv_dict)
529 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
530 """Custom parser class for key=val,key=val options.
532 This will store the parsed values as a dict {key: val}.
535 return _SplitKeyVal(opt, value)
538 def check_bool(option, opt, value): # pylint: disable-msg=W0613
539 """Custom parser for yes/no options.
541 This will store the parsed value as either True or False.
544 value = value.lower()
545 if value == constants.VALUE_FALSE or value == "no":
547 elif value == constants.VALUE_TRUE or value == "yes":
550 raise errors.ParameterError("Invalid boolean value '%s'" % value)
553 # completion_suggestion is normally a list. Using numeric values not evaluating
554 # to False for dynamic completion.
555 (OPT_COMPL_MANY_NODES,
557 OPT_COMPL_ONE_INSTANCE,
559 OPT_COMPL_ONE_IALLOCATOR,
560 OPT_COMPL_INST_ADD_NODES,
561 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
563 OPT_COMPL_ALL = frozenset([
564 OPT_COMPL_MANY_NODES,
566 OPT_COMPL_ONE_INSTANCE,
568 OPT_COMPL_ONE_IALLOCATOR,
569 OPT_COMPL_INST_ADD_NODES,
570 OPT_COMPL_ONE_NODEGROUP,
574 class CliOption(Option):
575 """Custom option class for optparse.
578 ATTRS = Option.ATTRS + [
579 "completion_suggest",
581 TYPES = Option.TYPES + (
587 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
588 TYPE_CHECKER["identkeyval"] = check_ident_key_val
589 TYPE_CHECKER["keyval"] = check_key_val
590 TYPE_CHECKER["unit"] = check_unit
591 TYPE_CHECKER["bool"] = check_bool
594 # optparse.py sets make_option, so we do it for our own option class, too
595 cli_option = CliOption
600 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
601 help="Increase debugging level")
603 NOHDR_OPT = cli_option("--no-headers", default=False,
604 action="store_true", dest="no_headers",
605 help="Don't display column headers")
607 SEP_OPT = cli_option("--separator", default=None,
608 action="store", dest="separator",
609 help=("Separator between output fields"
610 " (defaults to one space)"))
612 USEUNITS_OPT = cli_option("--units", default=None,
613 dest="units", choices=('h', 'm', 'g', 't'),
614 help="Specify units for output (one of h/m/g/t)")
616 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
617 type="string", metavar="FIELDS",
618 help="Comma separated list of output fields")
620 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
621 default=False, help="Force the operation")
623 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
624 default=False, help="Do not require confirmation")
626 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
627 action="store_true", default=False,
628 help=("Ignore offline nodes and do as much"
631 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
632 default=None, help="File with tag names")
634 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
635 default=False, action="store_true",
636 help=("Submit the job and return the job ID, but"
637 " don't wait for the job to finish"))
639 SYNC_OPT = cli_option("--sync", dest="do_locking",
640 default=False, action="store_true",
641 help=("Grab locks while doing the queries"
642 " in order to ensure more consistent results"))
644 DRY_RUN_OPT = cli_option("--dry-run", default=False,
646 help=("Do not execute the operation, just run the"
647 " check steps and verify it it could be"
650 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
652 help="Increase the verbosity of the operation")
654 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
655 action="store_true", dest="simulate_errors",
656 help="Debugging option that makes the operation"
657 " treat most runtime checks as failed")
659 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
660 default=True, action="store_false",
661 help="Don't wait for sync (DANGEROUS!)")
663 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
664 help="Custom disk setup (diskless, file,"
666 default=None, metavar="TEMPL",
667 choices=list(constants.DISK_TEMPLATES))
669 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
670 help="Do not create any network cards for"
673 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
674 help="Relative path under default cluster-wide"
675 " file storage dir to store file-based disks",
676 default=None, metavar="<DIR>")
678 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
679 help="Driver to use for image files",
680 default="loop", metavar="<DRIVER>",
681 choices=list(constants.FILE_DRIVER))
683 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
684 help="Select nodes for the instance automatically"
685 " using the <NAME> iallocator plugin",
686 default=None, type="string",
687 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
689 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
691 help="Set the default instance allocator plugin",
692 default=None, type="string",
693 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
695 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
697 completion_suggest=OPT_COMPL_ONE_OS)
699 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
700 type="keyval", default={},
701 help="OS parameters")
703 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
704 action="store_true", default=False,
705 help="Force an unknown variant")
707 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
708 action="store_true", default=False,
709 help="Do not install the OS (will"
712 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
713 type="keyval", default={},
714 help="Backend parameters")
716 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
717 default={}, dest="hvparams",
718 help="Hypervisor parameters")
720 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
721 help="Hypervisor and hypervisor options, in the"
722 " format hypervisor:option=value,option=value,...",
723 default=None, type="identkeyval")
725 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
726 help="Hypervisor and hypervisor options, in the"
727 " format hypervisor:option=value,option=value,...",
728 default=[], action="append", type="identkeyval")
730 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
731 action="store_false",
732 help="Don't check that the instance's IP"
735 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
736 default=True, action="store_false",
737 help="Don't check that the instance's name"
740 NET_OPT = cli_option("--net",
741 help="NIC parameters", default=[],
742 dest="nics", action="append", type="identkeyval")
744 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
745 dest="disks", action="append", type="identkeyval")
747 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
748 help="Comma-separated list of disks"
749 " indices to act on (e.g. 0,2) (optional,"
750 " defaults to all disks)")
752 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
753 help="Enforces a single-disk configuration using the"
754 " given disk size, in MiB unless a suffix is used",
755 default=None, type="unit", metavar="<size>")
757 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
758 dest="ignore_consistency",
759 action="store_true", default=False,
760 help="Ignore the consistency of the disks on"
763 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
764 dest="allow_failover",
765 action="store_true", default=False,
766 help="If migration is not possible fallback to"
769 NONLIVE_OPT = cli_option("--non-live", dest="live",
770 default=True, action="store_false",
771 help="Do a non-live migration (this usually means"
772 " freeze the instance, save the state, transfer and"
773 " only then resume running on the secondary node)")
775 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
777 choices=list(constants.HT_MIGRATION_MODES),
778 help="Override default migration mode (choose"
779 " either live or non-live")
781 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
782 help="Target node and optional secondary node",
783 metavar="<pnode>[:<snode>]",
784 completion_suggest=OPT_COMPL_INST_ADD_NODES)
786 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
787 action="append", metavar="<node>",
788 help="Use only this node (can be used multiple"
789 " times, if not given defaults to all nodes)",
790 completion_suggest=OPT_COMPL_ONE_NODE)
792 NODEGROUP_OPT = cli_option("-g", "--node-group",
794 help="Node group (name or uuid)",
795 metavar="<nodegroup>",
796 default=None, type="string",
797 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
799 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
801 completion_suggest=OPT_COMPL_ONE_NODE)
803 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
804 action="store_false",
805 help="Don't start the instance after creation")
807 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
808 action="store_true", default=False,
809 help="Show command instead of executing it")
811 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
812 default=False, action="store_true",
813 help="Instead of performing the migration, try to"
814 " recover from a failed cleanup. This is safe"
815 " to run even if the instance is healthy, but it"
816 " will create extra replication traffic and "
817 " disrupt briefly the replication (like during the"
820 STATIC_OPT = cli_option("-s", "--static", dest="static",
821 action="store_true", default=False,
822 help="Only show configuration data, not runtime data")
824 ALL_OPT = cli_option("--all", dest="show_all",
825 default=False, action="store_true",
826 help="Show info on all instances on the cluster."
827 " This can take a long time to run, use wisely")
829 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
830 action="store_true", default=False,
831 help="Interactive OS reinstall, lists available"
832 " OS templates for selection")
834 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
835 action="store_true", default=False,
836 help="Remove the instance from the cluster"
837 " configuration even if there are failures"
838 " during the removal process")
840 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
841 dest="ignore_remove_failures",
842 action="store_true", default=False,
843 help="Remove the instance from the"
844 " cluster configuration even if there"
845 " are failures during the removal"
848 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
849 action="store_true", default=False,
850 help="Remove the instance from the cluster")
852 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
853 help="Specifies the new node for the instance",
854 metavar="NODE", default=None,
855 completion_suggest=OPT_COMPL_ONE_NODE)
857 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
858 help="Specifies the new secondary node",
859 metavar="NODE", default=None,
860 completion_suggest=OPT_COMPL_ONE_NODE)
862 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
863 default=False, action="store_true",
864 help="Replace the disk(s) on the primary"
865 " node (only for the drbd template)")
867 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
868 default=False, action="store_true",
869 help="Replace the disk(s) on the secondary"
870 " node (only for the drbd template)")
872 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
873 default=False, action="store_true",
874 help="Lock all nodes and auto-promote as needed"
877 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
878 default=False, action="store_true",
879 help="Automatically replace faulty disks"
880 " (only for the drbd template)")
882 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
883 default=False, action="store_true",
884 help="Ignore current recorded size"
885 " (useful for forcing activation when"
886 " the recorded size is wrong)")
888 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
890 completion_suggest=OPT_COMPL_ONE_NODE)
892 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
895 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
896 help="Specify the secondary ip for the node",
897 metavar="ADDRESS", default=None)
899 READD_OPT = cli_option("--readd", dest="readd",
900 default=False, action="store_true",
901 help="Readd old node after replacing it")
903 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
904 default=True, action="store_false",
905 help="Disable SSH key fingerprint checking")
907 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
908 default=False, action="store_true",
909 help="Force the joining of a node")
911 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
912 type="bool", default=None, metavar=_YORNO,
913 help="Set the master_candidate flag on the node")
915 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
916 type="bool", default=None,
917 help=("Set the offline flag on the node"
918 " (cluster does not communicate with offline"
921 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
922 type="bool", default=None,
923 help=("Set the drained flag on the node"
924 " (excluded from allocation operations)"))
926 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
927 type="bool", default=None, metavar=_YORNO,
928 help="Set the master_capable flag on the node")
930 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
931 type="bool", default=None, metavar=_YORNO,
932 help="Set the vm_capable flag on the node")
934 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
935 type="bool", default=None, metavar=_YORNO,
936 help="Set the allocatable flag on a volume")
938 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
939 help="Disable support for lvm based instances"
941 action="store_false", default=True)
943 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
944 dest="enabled_hypervisors",
945 help="Comma-separated list of hypervisors",
946 type="string", default=None)
948 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
949 type="keyval", default={},
950 help="NIC parameters")
952 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
953 dest="candidate_pool_size", type="int",
954 help="Set the candidate pool size")
956 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
957 help=("Enables LVM and specifies the volume group"
958 " name (cluster-wide) for disk allocation"
959 " [%s]" % constants.DEFAULT_VG),
960 metavar="VG", default=None)
962 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
963 help="Destroy cluster", action="store_true")
965 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
966 help="Skip node agreement check (dangerous)",
967 action="store_true", default=False)
969 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
970 help="Specify the mac prefix for the instance IP"
971 " addresses, in the format XX:XX:XX",
975 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
976 help="Specify the node interface (cluster-wide)"
977 " on which the master IP address will be added"
978 " (cluster init default: %s)" %
979 constants.DEFAULT_BRIDGE,
983 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
984 help="Specify the default directory (cluster-"
985 "wide) for storing the file-based disks [%s]" %
986 constants.DEFAULT_FILE_STORAGE_DIR,
988 default=constants.DEFAULT_FILE_STORAGE_DIR)
990 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
991 dest="shared_file_storage_dir",
992 help="Specify the default directory (cluster-"
993 "wide) for storing the shared file-based"
995 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
997 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
999 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1000 help="Don't modify /etc/hosts",
1001 action="store_false", default=True)
1003 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1004 help="Don't initialize SSH keys",
1005 action="store_false", default=True)
1007 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1008 help="Enable parseable error messages",
1009 action="store_true", default=False)
1011 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1012 help="Skip N+1 memory redundancy tests",
1013 action="store_true", default=False)
1015 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1016 help="Type of reboot: soft/hard/full",
1017 default=constants.INSTANCE_REBOOT_HARD,
1019 choices=list(constants.REBOOT_TYPES))
1021 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1022 dest="ignore_secondaries",
1023 default=False, action="store_true",
1024 help="Ignore errors from secondaries")
1026 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1027 action="store_false", default=True,
1028 help="Don't shutdown the instance (unsafe)")
1030 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1031 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1032 help="Maximum time to wait")
1034 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1035 dest="shutdown_timeout", type="int",
1036 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1037 help="Maximum time to wait for instance shutdown")
1039 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1041 help=("Number of seconds between repetions of the"
1044 EARLY_RELEASE_OPT = cli_option("--early-release",
1045 dest="early_release", default=False,
1046 action="store_true",
1047 help="Release the locks on the secondary"
1050 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1051 dest="new_cluster_cert",
1052 default=False, action="store_true",
1053 help="Generate a new cluster certificate")
1055 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1057 help="File containing new RAPI certificate")
1059 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1060 default=None, action="store_true",
1061 help=("Generate a new self-signed RAPI"
1064 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1065 dest="new_confd_hmac_key",
1066 default=False, action="store_true",
1067 help=("Create a new HMAC key for %s" %
1070 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1071 dest="cluster_domain_secret",
1073 help=("Load new new cluster domain"
1074 " secret from file"))
1076 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1077 dest="new_cluster_domain_secret",
1078 default=False, action="store_true",
1079 help=("Create a new cluster domain"
1082 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1083 dest="use_replication_network",
1084 help="Whether to use the replication network"
1085 " for talking to the nodes",
1086 action="store_true", default=False)
1088 MAINTAIN_NODE_HEALTH_OPT = \
1089 cli_option("--maintain-node-health", dest="maintain_node_health",
1090 metavar=_YORNO, default=None, type="bool",
1091 help="Configure the cluster to automatically maintain node"
1092 " health, by shutting down unknown instances, shutting down"
1093 " unknown DRBD devices, etc.")
1095 IDENTIFY_DEFAULTS_OPT = \
1096 cli_option("--identify-defaults", dest="identify_defaults",
1097 default=False, action="store_true",
1098 help="Identify which saved instance parameters are equal to"
1099 " the current cluster defaults and set them as such, instead"
1100 " of marking them as overridden")
1102 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1103 action="store", dest="uid_pool",
1104 help=("A list of user-ids or user-id"
1105 " ranges separated by commas"))
1107 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1108 action="store", dest="add_uids",
1109 help=("A list of user-ids or user-id"
1110 " ranges separated by commas, to be"
1111 " added to the user-id pool"))
1113 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1114 action="store", dest="remove_uids",
1115 help=("A list of user-ids or user-id"
1116 " ranges separated by commas, to be"
1117 " removed from the user-id pool"))
1119 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1120 action="store", dest="reserved_lvs",
1121 help=("A comma-separated list of reserved"
1122 " logical volumes names, that will be"
1123 " ignored by cluster verify"))
1125 ROMAN_OPT = cli_option("--roman",
1126 dest="roman_integers", default=False,
1127 action="store_true",
1128 help="Use roman numbers for positive integers")
1130 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1131 action="store", default=None,
1132 help="Specifies usermode helper for DRBD")
1134 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1135 action="store_false", default=True,
1136 help="Disable support for DRBD")
1138 PRIMARY_IP_VERSION_OPT = \
1139 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1140 action="store", dest="primary_ip_version",
1141 metavar="%d|%d" % (constants.IP4_VERSION,
1142 constants.IP6_VERSION),
1143 help="Cluster-wide IP version for primary IP")
1145 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1146 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1147 choices=_PRIONAME_TO_VALUE.keys(),
1148 help="Priority for opcode processing")
1150 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1151 type="bool", default=None, metavar=_YORNO,
1152 help="Sets the hidden flag on the OS")
1154 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1155 type="bool", default=None, metavar=_YORNO,
1156 help="Sets the blacklisted flag on the OS")
1158 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1159 type="bool", metavar=_YORNO,
1160 dest="prealloc_wipe_disks",
1161 help=("Wipe disks prior to instance"
1164 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1165 type="keyval", default=None,
1166 help="Node parameters")
1168 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1169 action="store", metavar="POLICY", default=None,
1170 help="Allocation policy for the node group")
1172 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1173 type="bool", metavar=_YORNO,
1174 dest="node_powered",
1175 help="Specify if the SoR for node is powered")
1177 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1178 default=constants.OOB_TIMEOUT,
1179 help="Maximum time to wait for out-of-band helper")
1181 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1182 default=constants.OOB_POWER_DELAY,
1183 help="Time in seconds to wait between power-ons")
1185 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1186 action="store_true", default=False,
1187 help=("Whether command argument should be treated"
1191 #: Options provided by all commands
1192 COMMON_OPTS = [DEBUG_OPT]
1194 # common options for creating instances. add and import then add their own
1196 COMMON_CREATE_OPTS = [
1201 FILESTORE_DRIVER_OPT,
1218 def _ParseArgs(argv, commands, aliases):
1219 """Parser for the command line arguments.
1221 This function parses the arguments and returns the function which
1222 must be executed together with its (modified) arguments.
1224 @param argv: the command line
1225 @param commands: dictionary with special contents, see the design
1226 doc for cmdline handling
1227 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1231 binary = "<command>"
1233 binary = argv[0].split("/")[-1]
1235 if len(argv) > 1 and argv[1] == "--version":
1236 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1237 constants.RELEASE_VERSION)
1238 # Quit right away. That way we don't have to care about this special
1239 # argument. optparse.py does it the same.
1242 if len(argv) < 2 or not (argv[1] in commands or
1243 argv[1] in aliases):
1244 # let's do a nice thing
1245 sortedcmds = commands.keys()
1248 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1249 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1252 # compute the max line length for cmd + usage
1253 mlen = max([len(" %s" % cmd) for cmd in commands])
1254 mlen = min(60, mlen) # should not get here...
1256 # and format a nice command list
1257 ToStdout("Commands:")
1258 for cmd in sortedcmds:
1259 cmdstr = " %s" % (cmd,)
1260 help_text = commands[cmd][4]
1261 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1262 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1263 for line in help_lines:
1264 ToStdout("%-*s %s", mlen, "", line)
1268 return None, None, None
1270 # get command, unalias it, and look it up in commands
1274 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1277 if aliases[cmd] not in commands:
1278 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1279 " command '%s'" % (cmd, aliases[cmd]))
1283 func, args_def, parser_opts, usage, description = commands[cmd]
1284 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1285 description=description,
1286 formatter=TitledHelpFormatter(),
1287 usage="%%prog %s %s" % (cmd, usage))
1288 parser.disable_interspersed_args()
1289 options, args = parser.parse_args()
1291 if not _CheckArguments(cmd, args_def, args):
1292 return None, None, None
1294 return func, options, args
1297 def _CheckArguments(cmd, args_def, args):
1298 """Verifies the arguments using the argument definition.
1302 1. Abort with error if values specified by user but none expected.
1304 1. For each argument in definition
1306 1. Keep running count of minimum number of values (min_count)
1307 1. Keep running count of maximum number of values (max_count)
1308 1. If it has an unlimited number of values
1310 1. Abort with error if it's not the last argument in the definition
1312 1. If last argument has limited number of values
1314 1. Abort with error if number of values doesn't match or is too large
1316 1. Abort with error if user didn't pass enough values (min_count)
1319 if args and not args_def:
1320 ToStderr("Error: Command %s expects no arguments", cmd)
1327 last_idx = len(args_def) - 1
1329 for idx, arg in enumerate(args_def):
1330 if min_count is None:
1332 elif arg.min is not None:
1333 min_count += arg.min
1335 if max_count is None:
1337 elif arg.max is not None:
1338 max_count += arg.max
1341 check_max = (arg.max is not None)
1343 elif arg.max is None:
1344 raise errors.ProgrammerError("Only the last argument can have max=None")
1347 # Command with exact number of arguments
1348 if (min_count is not None and max_count is not None and
1349 min_count == max_count and len(args) != min_count):
1350 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1353 # Command with limited number of arguments
1354 if max_count is not None and len(args) > max_count:
1355 ToStderr("Error: Command %s expects only %d argument(s)",
1359 # Command with some required arguments
1360 if min_count is not None and len(args) < min_count:
1361 ToStderr("Error: Command %s expects at least %d argument(s)",
1368 def SplitNodeOption(value):
1369 """Splits the value of a --node option.
1372 if value and ':' in value:
1373 return value.split(':', 1)
1375 return (value, None)
1378 def CalculateOSNames(os_name, os_variants):
1379 """Calculates all the names an OS can be called, according to its variants.
1381 @type os_name: string
1382 @param os_name: base name of the os
1383 @type os_variants: list or None
1384 @param os_variants: list of supported variants
1386 @return: list of valid names
1390 return ['%s+%s' % (os_name, v) for v in os_variants]
1395 def ParseFields(selected, default):
1396 """Parses the values of "--field"-like options.
1398 @type selected: string or None
1399 @param selected: User-selected options
1401 @param default: Default fields
1404 if selected is None:
1407 if selected.startswith("+"):
1408 return default + selected[1:].split(",")
1410 return selected.split(",")
1413 UsesRPC = rpc.RunWithRPC
1416 def AskUser(text, choices=None):
1417 """Ask the user a question.
1419 @param text: the question to ask
1421 @param choices: list with elements tuples (input_char, return_value,
1422 description); if not given, it will default to: [('y', True,
1423 'Perform the operation'), ('n', False, 'Do no do the operation')];
1424 note that the '?' char is reserved for help
1426 @return: one of the return values from the choices list; if input is
1427 not possible (i.e. not running with a tty, we return the last
1432 choices = [('y', True, 'Perform the operation'),
1433 ('n', False, 'Do not perform the operation')]
1434 if not choices or not isinstance(choices, list):
1435 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1436 for entry in choices:
1437 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1438 raise errors.ProgrammerError("Invalid choices element to AskUser")
1440 answer = choices[-1][1]
1442 for line in text.splitlines():
1443 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1444 text = "\n".join(new_text)
1446 f = file("/dev/tty", "a+")
1450 chars = [entry[0] for entry in choices]
1451 chars[-1] = "[%s]" % chars[-1]
1453 maps = dict([(entry[0], entry[1]) for entry in choices])
1457 f.write("/".join(chars))
1459 line = f.readline(2).strip().lower()
1464 for entry in choices:
1465 f.write(" %s - %s\n" % (entry[0], entry[2]))
1473 class JobSubmittedException(Exception):
1474 """Job was submitted, client should exit.
1476 This exception has one argument, the ID of the job that was
1477 submitted. The handler should print this ID.
1479 This is not an error, just a structured way to exit from clients.
1484 def SendJob(ops, cl=None):
1485 """Function to submit an opcode without waiting for the results.
1488 @param ops: list of opcodes
1489 @type cl: luxi.Client
1490 @param cl: the luxi client to use for communicating with the master;
1491 if None, a new client will be created
1497 job_id = cl.SubmitJob(ops)
1502 def GenericPollJob(job_id, cbs, report_cbs):
1503 """Generic job-polling function.
1505 @type job_id: number
1506 @param job_id: Job ID
1507 @type cbs: Instance of L{JobPollCbBase}
1508 @param cbs: Data callbacks
1509 @type report_cbs: Instance of L{JobPollReportCbBase}
1510 @param report_cbs: Reporting callbacks
1513 prev_job_info = None
1514 prev_logmsg_serial = None
1519 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1522 # job not found, go away!
1523 raise errors.JobLost("Job with id %s lost" % job_id)
1525 if result == constants.JOB_NOTCHANGED:
1526 report_cbs.ReportNotChanged(job_id, status)
1531 # Split result, a tuple of (field values, log entries)
1532 (job_info, log_entries) = result
1533 (status, ) = job_info
1536 for log_entry in log_entries:
1537 (serial, timestamp, log_type, message) = log_entry
1538 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1540 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1542 # TODO: Handle canceled and archived jobs
1543 elif status in (constants.JOB_STATUS_SUCCESS,
1544 constants.JOB_STATUS_ERROR,
1545 constants.JOB_STATUS_CANCELING,
1546 constants.JOB_STATUS_CANCELED):
1549 prev_job_info = job_info
1551 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1553 raise errors.JobLost("Job with id %s lost" % job_id)
1555 status, opstatus, result = jobs[0]
1557 if status == constants.JOB_STATUS_SUCCESS:
1560 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1561 raise errors.OpExecError("Job was canceled")
1564 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1565 if status == constants.OP_STATUS_SUCCESS:
1567 elif status == constants.OP_STATUS_ERROR:
1568 errors.MaybeRaise(msg)
1571 raise errors.OpExecError("partial failure (opcode %d): %s" %
1574 raise errors.OpExecError(str(msg))
1576 # default failure mode
1577 raise errors.OpExecError(result)
1580 class JobPollCbBase:
1581 """Base class for L{GenericPollJob} callbacks.
1585 """Initializes this class.
1589 def WaitForJobChangeOnce(self, job_id, fields,
1590 prev_job_info, prev_log_serial):
1591 """Waits for changes on a job.
1594 raise NotImplementedError()
1596 def QueryJobs(self, job_ids, fields):
1597 """Returns the selected fields for the selected job IDs.
1599 @type job_ids: list of numbers
1600 @param job_ids: Job IDs
1601 @type fields: list of strings
1602 @param fields: Fields
1605 raise NotImplementedError()
1608 class JobPollReportCbBase:
1609 """Base class for L{GenericPollJob} reporting callbacks.
1613 """Initializes this class.
1617 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1618 """Handles a log message.
1621 raise NotImplementedError()
1623 def ReportNotChanged(self, job_id, status):
1624 """Called for if a job hasn't changed in a while.
1626 @type job_id: number
1627 @param job_id: Job ID
1628 @type status: string or None
1629 @param status: Job status if available
1632 raise NotImplementedError()
1635 class _LuxiJobPollCb(JobPollCbBase):
1636 def __init__(self, cl):
1637 """Initializes this class.
1640 JobPollCbBase.__init__(self)
1643 def WaitForJobChangeOnce(self, job_id, fields,
1644 prev_job_info, prev_log_serial):
1645 """Waits for changes on a job.
1648 return self.cl.WaitForJobChangeOnce(job_id, fields,
1649 prev_job_info, prev_log_serial)
1651 def QueryJobs(self, job_ids, fields):
1652 """Returns the selected fields for the selected job IDs.
1655 return self.cl.QueryJobs(job_ids, fields)
1658 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1659 def __init__(self, feedback_fn):
1660 """Initializes this class.
1663 JobPollReportCbBase.__init__(self)
1665 self.feedback_fn = feedback_fn
1667 assert callable(feedback_fn)
1669 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1670 """Handles a log message.
1673 self.feedback_fn((timestamp, log_type, log_msg))
1675 def ReportNotChanged(self, job_id, status):
1676 """Called if a job hasn't changed in a while.
1682 class StdioJobPollReportCb(JobPollReportCbBase):
1684 """Initializes this class.
1687 JobPollReportCbBase.__init__(self)
1689 self.notified_queued = False
1690 self.notified_waitlock = False
1692 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1693 """Handles a log message.
1696 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1697 FormatLogMessage(log_type, log_msg))
1699 def ReportNotChanged(self, job_id, status):
1700 """Called if a job hasn't changed in a while.
1706 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1707 ToStderr("Job %s is waiting in queue", job_id)
1708 self.notified_queued = True
1710 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1711 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1712 self.notified_waitlock = True
1715 def FormatLogMessage(log_type, log_msg):
1716 """Formats a job message according to its type.
1719 if log_type != constants.ELOG_MESSAGE:
1720 log_msg = str(log_msg)
1722 return utils.SafeEncode(log_msg)
1725 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1726 """Function to poll for the result of a job.
1728 @type job_id: job identified
1729 @param job_id: the job to poll for results
1730 @type cl: luxi.Client
1731 @param cl: the luxi client to use for communicating with the master;
1732 if None, a new client will be created
1738 if reporter is None:
1740 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1742 reporter = StdioJobPollReportCb()
1744 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1746 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1749 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1750 """Legacy function to submit an opcode.
1752 This is just a simple wrapper over the construction of the processor
1753 instance. It should be extended to better handle feedback and
1754 interaction functions.
1760 SetGenericOpcodeOpts([op], opts)
1762 job_id = SendJob([op], cl=cl)
1764 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1767 return op_results[0]
1770 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1771 """Wrapper around SubmitOpCode or SendJob.
1773 This function will decide, based on the 'opts' parameter, whether to
1774 submit and wait for the result of the opcode (and return it), or
1775 whether to just send the job and print its identifier. It is used in
1776 order to simplify the implementation of the '--submit' option.
1778 It will also process the opcodes if we're sending the via SendJob
1779 (otherwise SubmitOpCode does it).
1782 if opts and opts.submit_only:
1784 SetGenericOpcodeOpts(job, opts)
1785 job_id = SendJob(job, cl=cl)
1786 raise JobSubmittedException(job_id)
1788 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1791 def SetGenericOpcodeOpts(opcode_list, options):
1792 """Processor for generic options.
1794 This function updates the given opcodes based on generic command
1795 line options (like debug, dry-run, etc.).
1797 @param opcode_list: list of opcodes
1798 @param options: command line options or None
1799 @return: None (in-place modification)
1804 for op in opcode_list:
1805 op.debug_level = options.debug
1806 if hasattr(options, "dry_run"):
1807 op.dry_run = options.dry_run
1808 if getattr(options, "priority", None) is not None:
1809 op.priority = _PRIONAME_TO_VALUE[options.priority]
1813 # TODO: Cache object?
1815 client = luxi.Client()
1816 except luxi.NoMasterError:
1817 ss = ssconf.SimpleStore()
1819 # Try to read ssconf file
1822 except errors.ConfigurationError:
1823 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1824 " not part of a cluster")
1826 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1827 if master != myself:
1828 raise errors.OpPrereqError("This is not the master node, please connect"
1829 " to node '%s' and rerun the command" %
1835 def FormatError(err):
1836 """Return a formatted error message for a given error.
1838 This function takes an exception instance and returns a tuple
1839 consisting of two values: first, the recommended exit code, and
1840 second, a string describing the error message (not
1841 newline-terminated).
1847 if isinstance(err, errors.ConfigurationError):
1848 txt = "Corrupt configuration file: %s" % msg
1850 obuf.write(txt + "\n")
1851 obuf.write("Aborting.")
1853 elif isinstance(err, errors.HooksAbort):
1854 obuf.write("Failure: hooks execution failed:\n")
1855 for node, script, out in err.args[0]:
1857 obuf.write(" node: %s, script: %s, output: %s\n" %
1858 (node, script, out))
1860 obuf.write(" node: %s, script: %s (no output)\n" %
1862 elif isinstance(err, errors.HooksFailure):
1863 obuf.write("Failure: hooks general failure: %s" % msg)
1864 elif isinstance(err, errors.ResolverError):
1865 this_host = netutils.Hostname.GetSysName()
1866 if err.args[0] == this_host:
1867 msg = "Failure: can't resolve my own hostname ('%s')"
1869 msg = "Failure: can't resolve hostname '%s'"
1870 obuf.write(msg % err.args[0])
1871 elif isinstance(err, errors.OpPrereqError):
1872 if len(err.args) == 2:
1873 obuf.write("Failure: prerequisites not met for this"
1874 " operation:\nerror type: %s, error details:\n%s" %
1875 (err.args[1], err.args[0]))
1877 obuf.write("Failure: prerequisites not met for this"
1878 " operation:\n%s" % msg)
1879 elif isinstance(err, errors.OpExecError):
1880 obuf.write("Failure: command execution error:\n%s" % msg)
1881 elif isinstance(err, errors.TagError):
1882 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1883 elif isinstance(err, errors.JobQueueDrainError):
1884 obuf.write("Failure: the job queue is marked for drain and doesn't"
1885 " accept new requests\n")
1886 elif isinstance(err, errors.JobQueueFull):
1887 obuf.write("Failure: the job queue is full and doesn't accept new"
1888 " job submissions until old jobs are archived\n")
1889 elif isinstance(err, errors.TypeEnforcementError):
1890 obuf.write("Parameter Error: %s" % msg)
1891 elif isinstance(err, errors.ParameterError):
1892 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1893 elif isinstance(err, luxi.NoMasterError):
1894 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1895 " and listening for connections?")
1896 elif isinstance(err, luxi.TimeoutError):
1897 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1898 " been submitted and will continue to run even if the call"
1899 " timed out. Useful commands in this situation are \"gnt-job"
1900 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1902 elif isinstance(err, luxi.PermissionError):
1903 obuf.write("It seems you don't have permissions to connect to the"
1904 " master daemon.\nPlease retry as a different user.")
1905 elif isinstance(err, luxi.ProtocolError):
1906 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1908 elif isinstance(err, errors.JobLost):
1909 obuf.write("Error checking job status: %s" % msg)
1910 elif isinstance(err, errors.QueryFilterParseError):
1911 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1912 obuf.write("\n".join(err.GetDetails()))
1913 elif isinstance(err, errors.GenericError):
1914 obuf.write("Unhandled Ganeti error: %s" % msg)
1915 elif isinstance(err, JobSubmittedException):
1916 obuf.write("JobID: %s\n" % err.args[0])
1919 obuf.write("Unhandled exception: %s" % msg)
1920 return retcode, obuf.getvalue().rstrip('\n')
1923 def GenericMain(commands, override=None, aliases=None):
1924 """Generic main function for all the gnt-* commands.
1927 - commands: a dictionary with a special structure, see the design doc
1928 for command line handling.
1929 - override: if not None, we expect a dictionary with keys that will
1930 override command line options; this can be used to pass
1931 options from the scripts to generic functions
1932 - aliases: dictionary with command aliases {'alias': 'target, ...}
1935 # save the program name and the entire command line for later logging
1937 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1938 if len(sys.argv) >= 2:
1939 binary += " " + sys.argv[1]
1940 old_cmdline = " ".join(sys.argv[2:])
1944 binary = "<unknown program>"
1951 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1952 except errors.ParameterError, err:
1953 result, err_msg = FormatError(err)
1957 if func is None: # parse error
1960 if override is not None:
1961 for key, val in override.iteritems():
1962 setattr(options, key, val)
1964 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1965 stderr_logging=True)
1968 logging.info("run with arguments '%s'", old_cmdline)
1970 logging.info("run with no arguments")
1973 result = func(options, args)
1974 except (errors.GenericError, luxi.ProtocolError,
1975 JobSubmittedException), err:
1976 result, err_msg = FormatError(err)
1977 logging.exception("Error during command processing")
1979 except KeyboardInterrupt:
1980 result = constants.EXIT_FAILURE
1981 ToStderr("Aborted. Note that if the operation created any jobs, they"
1982 " might have been submitted and"
1983 " will continue to run in the background.")
1988 def ParseNicOption(optvalue):
1989 """Parses the value of the --net option(s).
1993 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1994 except (TypeError, ValueError), err:
1995 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1997 nics = [{}] * nic_max
1998 for nidx, ndict in optvalue:
2001 if not isinstance(ndict, dict):
2002 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2003 " got %s" % (nidx, ndict))
2005 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2012 def GenericInstanceCreate(mode, opts, args):
2013 """Add an instance to the cluster via either creation or import.
2015 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2016 @param opts: the command line options selected by the user
2018 @param args: should contain only one element, the new instance name
2020 @return: the desired exit code
2025 (pnode, snode) = SplitNodeOption(opts.node)
2030 hypervisor, hvparams = opts.hypervisor
2033 nics = ParseNicOption(opts.nics)
2037 elif mode == constants.INSTANCE_CREATE:
2038 # default of one nic, all auto
2044 if opts.disk_template == constants.DT_DISKLESS:
2045 if opts.disks or opts.sd_size is not None:
2046 raise errors.OpPrereqError("Diskless instance but disk"
2047 " information passed")
2050 if (not opts.disks and not opts.sd_size
2051 and mode == constants.INSTANCE_CREATE):
2052 raise errors.OpPrereqError("No disk information specified")
2053 if opts.disks and opts.sd_size is not None:
2054 raise errors.OpPrereqError("Please use either the '--disk' or"
2056 if opts.sd_size is not None:
2057 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2061 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2062 except ValueError, err:
2063 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2064 disks = [{}] * disk_max
2067 for didx, ddict in opts.disks:
2069 if not isinstance(ddict, dict):
2070 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2071 raise errors.OpPrereqError(msg)
2072 elif constants.IDISK_SIZE in ddict:
2073 if constants.IDISK_ADOPT in ddict:
2074 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2075 " (disk %d)" % didx)
2077 ddict[constants.IDISK_SIZE] = \
2078 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2079 except ValueError, err:
2080 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2082 elif constants.IDISK_ADOPT in ddict:
2083 if mode == constants.INSTANCE_IMPORT:
2084 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2086 ddict[constants.IDISK_SIZE] = 0
2088 raise errors.OpPrereqError("Missing size or adoption source for"
2092 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2093 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2095 if mode == constants.INSTANCE_CREATE:
2098 force_variant = opts.force_variant
2101 no_install = opts.no_install
2102 identify_defaults = False
2103 elif mode == constants.INSTANCE_IMPORT:
2106 force_variant = False
2107 src_node = opts.src_node
2108 src_path = opts.src_dir
2110 identify_defaults = opts.identify_defaults
2112 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2114 op = opcodes.OpInstanceCreate(instance_name=instance,
2116 disk_template=opts.disk_template,
2118 pnode=pnode, snode=snode,
2119 ip_check=opts.ip_check,
2120 name_check=opts.name_check,
2121 wait_for_sync=opts.wait_for_sync,
2122 file_storage_dir=opts.file_storage_dir,
2123 file_driver=opts.file_driver,
2124 iallocator=opts.iallocator,
2125 hypervisor=hypervisor,
2127 beparams=opts.beparams,
2128 osparams=opts.osparams,
2132 force_variant=force_variant,
2135 no_install=no_install,
2136 identify_defaults=identify_defaults)
2138 SubmitOrSend(op, opts)
2142 class _RunWhileClusterStoppedHelper:
2143 """Helper class for L{RunWhileClusterStopped} to simplify state management
2146 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2147 """Initializes this class.
2149 @type feedback_fn: callable
2150 @param feedback_fn: Feedback function
2151 @type cluster_name: string
2152 @param cluster_name: Cluster name
2153 @type master_node: string
2154 @param master_node Master node name
2155 @type online_nodes: list
2156 @param online_nodes: List of names of online nodes
2159 self.feedback_fn = feedback_fn
2160 self.cluster_name = cluster_name
2161 self.master_node = master_node
2162 self.online_nodes = online_nodes
2164 self.ssh = ssh.SshRunner(self.cluster_name)
2166 self.nonmaster_nodes = [name for name in online_nodes
2167 if name != master_node]
2169 assert self.master_node not in self.nonmaster_nodes
2171 def _RunCmd(self, node_name, cmd):
2172 """Runs a command on the local or a remote machine.
2174 @type node_name: string
2175 @param node_name: Machine name
2180 if node_name is None or node_name == self.master_node:
2181 # No need to use SSH
2182 result = utils.RunCmd(cmd)
2184 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2187 errmsg = ["Failed to run command %s" % result.cmd]
2189 errmsg.append("on node %s" % node_name)
2190 errmsg.append(": exitcode %s and error %s" %
2191 (result.exit_code, result.output))
2192 raise errors.OpExecError(" ".join(errmsg))
2194 def Call(self, fn, *args):
2195 """Call function while all daemons are stopped.
2198 @param fn: Function to be called
2201 # Pause watcher by acquiring an exclusive lock on watcher state file
2202 self.feedback_fn("Blocking watcher")
2203 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2205 # TODO: Currently, this just blocks. There's no timeout.
2206 # TODO: Should it be a shared lock?
2207 watcher_block.Exclusive(blocking=True)
2209 # Stop master daemons, so that no new jobs can come in and all running
2211 self.feedback_fn("Stopping master daemons")
2212 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2214 # Stop daemons on all nodes
2215 for node_name in self.online_nodes:
2216 self.feedback_fn("Stopping daemons on %s" % node_name)
2217 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2219 # All daemons are shut down now
2221 return fn(self, *args)
2222 except Exception, err:
2223 _, errmsg = FormatError(err)
2224 logging.exception("Caught exception")
2225 self.feedback_fn(errmsg)
2228 # Start cluster again, master node last
2229 for node_name in self.nonmaster_nodes + [self.master_node]:
2230 self.feedback_fn("Starting daemons on %s" % node_name)
2231 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2234 watcher_block.Close()
2237 def RunWhileClusterStopped(feedback_fn, fn, *args):
2238 """Calls a function while all cluster daemons are stopped.
2240 @type feedback_fn: callable
2241 @param feedback_fn: Feedback function
2243 @param fn: Function to be called when daemons are stopped
2246 feedback_fn("Gathering cluster information")
2248 # This ensures we're running on the master daemon
2251 (cluster_name, master_node) = \
2252 cl.QueryConfigValues(["cluster_name", "master_node"])
2254 online_nodes = GetOnlineNodes([], cl=cl)
2256 # Don't keep a reference to the client. The master daemon will go away.
2259 assert master_node in online_nodes
2261 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2262 online_nodes).Call(fn, *args)
2265 def GenerateTable(headers, fields, separator, data,
2266 numfields=None, unitfields=None,
2268 """Prints a table with headers and different fields.
2271 @param headers: dictionary mapping field names to headers for
2274 @param fields: the field names corresponding to each row in
2276 @param separator: the separator to be used; if this is None,
2277 the default 'smart' algorithm is used which computes optimal
2278 field width, otherwise just the separator is used between
2281 @param data: a list of lists, each sublist being one row to be output
2282 @type numfields: list
2283 @param numfields: a list with the fields that hold numeric
2284 values and thus should be right-aligned
2285 @type unitfields: list
2286 @param unitfields: a list with the fields that hold numeric
2287 values that should be formatted with the units field
2288 @type units: string or None
2289 @param units: the units we should use for formatting, or None for
2290 automatic choice (human-readable for non-separator usage, otherwise
2291 megabytes); this is a one-letter string
2300 if numfields is None:
2302 if unitfields is None:
2305 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2306 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2309 for field in fields:
2310 if headers and field not in headers:
2311 # TODO: handle better unknown fields (either revert to old
2312 # style of raising exception, or deal more intelligently with
2314 headers[field] = field
2315 if separator is not None:
2316 format_fields.append("%s")
2317 elif numfields.Matches(field):
2318 format_fields.append("%*s")
2320 format_fields.append("%-*s")
2322 if separator is None:
2323 mlens = [0 for name in fields]
2324 format_str = ' '.join(format_fields)
2326 format_str = separator.replace("%", "%%").join(format_fields)
2331 for idx, val in enumerate(row):
2332 if unitfields.Matches(fields[idx]):
2335 except (TypeError, ValueError):
2338 val = row[idx] = utils.FormatUnit(val, units)
2339 val = row[idx] = str(val)
2340 if separator is None:
2341 mlens[idx] = max(mlens[idx], len(val))
2346 for idx, name in enumerate(fields):
2348 if separator is None:
2349 mlens[idx] = max(mlens[idx], len(hdr))
2350 args.append(mlens[idx])
2352 result.append(format_str % tuple(args))
2354 if separator is None:
2355 assert len(mlens) == len(fields)
2357 if fields and not numfields.Matches(fields[-1]):
2363 line = ['-' for _ in fields]
2364 for idx in range(len(fields)):
2365 if separator is None:
2366 args.append(mlens[idx])
2367 args.append(line[idx])
2368 result.append(format_str % tuple(args))
2373 def _FormatBool(value):
2374 """Formats a boolean value as a string.
2382 #: Default formatting for query results; (callback, align right)
2383 _DEFAULT_FORMAT_QUERY = {
2384 constants.QFT_TEXT: (str, False),
2385 constants.QFT_BOOL: (_FormatBool, False),
2386 constants.QFT_NUMBER: (str, True),
2387 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2388 constants.QFT_OTHER: (str, False),
2389 constants.QFT_UNKNOWN: (str, False),
2393 def _GetColumnFormatter(fdef, override, unit):
2394 """Returns formatting function for a field.
2396 @type fdef: L{objects.QueryFieldDefinition}
2397 @type override: dict
2398 @param override: Dictionary for overriding field formatting functions,
2399 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2401 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2402 @rtype: tuple; (callable, bool)
2403 @return: Returns the function to format a value (takes one parameter) and a
2404 boolean for aligning the value on the right-hand side
2407 fmt = override.get(fdef.name, None)
2411 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2413 if fdef.kind == constants.QFT_UNIT:
2414 # Can't keep this information in the static dictionary
2415 return (lambda value: utils.FormatUnit(value, unit), True)
2417 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2421 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2424 class _QueryColumnFormatter:
2425 """Callable class for formatting fields of a query.
2428 def __init__(self, fn, status_fn, verbose):
2429 """Initializes this class.
2432 @param fn: Formatting function
2433 @type status_fn: callable
2434 @param status_fn: Function to report fields' status
2435 @type verbose: boolean
2436 @param verbose: whether to use verbose field descriptions or not
2440 self._status_fn = status_fn
2441 self._verbose = verbose
2443 def __call__(self, data):
2444 """Returns a field's string representation.
2447 (status, value) = data
2450 self._status_fn(status)
2452 if status == constants.RS_NORMAL:
2453 return self._fn(value)
2455 assert value is None, \
2456 "Found value %r for abnormal status %s" % (value, status)
2458 return FormatResultError(status, self._verbose)
2461 def FormatResultError(status, verbose):
2462 """Formats result status other than L{constants.RS_NORMAL}.
2464 @param status: The result status
2465 @type verbose: boolean
2466 @param verbose: Whether to return the verbose text
2467 @return: Text of result status
2470 assert status != constants.RS_NORMAL, \
2471 "FormatResultError called with status equal to constants.RS_NORMAL"
2473 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2475 raise NotImplementedError("Unknown status %s" % status)
2482 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2483 header=False, verbose=False):
2484 """Formats data in L{objects.QueryResponse}.
2486 @type result: L{objects.QueryResponse}
2487 @param result: result of query operation
2489 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2490 see L{utils.text.FormatUnit}
2491 @type format_override: dict
2492 @param format_override: Dictionary for overriding field formatting functions,
2493 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2494 @type separator: string or None
2495 @param separator: String used to separate fields
2497 @param header: Whether to output header row
2498 @type verbose: boolean
2499 @param verbose: whether to use verbose field descriptions or not
2508 if format_override is None:
2509 format_override = {}
2511 stats = dict.fromkeys(constants.RS_ALL, 0)
2513 def _RecordStatus(status):
2518 for fdef in result.fields:
2519 assert fdef.title and fdef.name
2520 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2521 columns.append(TableColumn(fdef.title,
2522 _QueryColumnFormatter(fn, _RecordStatus,
2526 table = FormatTable(result.data, columns, header, separator)
2528 # Collect statistics
2529 assert len(stats) == len(constants.RS_ALL)
2530 assert compat.all(count >= 0 for count in stats.values())
2532 # Determine overall status. If there was no data, unknown fields must be
2533 # detected via the field definitions.
2534 if (stats[constants.RS_UNKNOWN] or
2535 (not result.data and _GetUnknownFields(result.fields))):
2537 elif compat.any(count > 0 for key, count in stats.items()
2538 if key != constants.RS_NORMAL):
2539 status = QR_INCOMPLETE
2543 return (status, table)
2546 def _GetUnknownFields(fdefs):
2547 """Returns list of unknown fields included in C{fdefs}.
2549 @type fdefs: list of L{objects.QueryFieldDefinition}
2552 return [fdef for fdef in fdefs
2553 if fdef.kind == constants.QFT_UNKNOWN]
2556 def _WarnUnknownFields(fdefs):
2557 """Prints a warning to stderr if a query included unknown fields.
2559 @type fdefs: list of L{objects.QueryFieldDefinition}
2562 unknown = _GetUnknownFields(fdefs)
2564 ToStderr("Warning: Queried for unknown fields %s",
2565 utils.CommaJoin(fdef.name for fdef in unknown))
2571 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2572 format_override=None, verbose=False):
2573 """Generic implementation for listing all items of a resource.
2575 @param resource: One of L{constants.QR_VIA_LUXI}
2576 @type fields: list of strings
2577 @param fields: List of fields to query for
2578 @type names: list of strings
2579 @param names: Names of items to query for
2580 @type unit: string or None
2581 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2582 None for automatic choice (human-readable for non-separator usage,
2583 otherwise megabytes); this is a one-letter string
2584 @type separator: string or None
2585 @param separator: String used to separate fields
2587 @param header: Whether to show header row
2588 @type format_override: dict
2589 @param format_override: Dictionary for overriding field formatting functions,
2590 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2591 @type verbose: boolean
2592 @param verbose: whether to use verbose field descriptions or not
2601 response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2603 found_unknown = _WarnUnknownFields(response.fields)
2605 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2607 format_override=format_override,
2613 assert ((found_unknown and status == QR_UNKNOWN) or
2614 (not found_unknown and status != QR_UNKNOWN))
2616 if status == QR_UNKNOWN:
2617 return constants.EXIT_UNKNOWN_FIELD
2619 # TODO: Should the list command fail if not all data could be collected?
2620 return constants.EXIT_SUCCESS
2623 def GenericListFields(resource, fields, separator, header, cl=None):
2624 """Generic implementation for listing fields for a resource.
2626 @param resource: One of L{constants.QR_VIA_LUXI}
2627 @type fields: list of strings
2628 @param fields: List of fields to query for
2629 @type separator: string or None
2630 @param separator: String used to separate fields
2632 @param header: Whether to show header row
2641 response = cl.QueryFields(resource, fields)
2643 found_unknown = _WarnUnknownFields(response.fields)
2646 TableColumn("Name", str, False),
2647 TableColumn("Title", str, False),
2648 TableColumn("Description", str, False),
2651 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2653 for line in FormatTable(rows, columns, header, separator):
2657 return constants.EXIT_UNKNOWN_FIELD
2659 return constants.EXIT_SUCCESS
2663 """Describes a column for L{FormatTable}.
2666 def __init__(self, title, fn, align_right):
2667 """Initializes this class.
2670 @param title: Column title
2672 @param fn: Formatting function
2673 @type align_right: bool
2674 @param align_right: Whether to align values on the right-hand side
2679 self.align_right = align_right
2682 def _GetColFormatString(width, align_right):
2683 """Returns the format string for a field.
2691 return "%%%s%ss" % (sign, width)
2694 def FormatTable(rows, columns, header, separator):
2695 """Formats data as a table.
2697 @type rows: list of lists
2698 @param rows: Row data, one list per row
2699 @type columns: list of L{TableColumn}
2700 @param columns: Column descriptions
2702 @param header: Whether to show header row
2703 @type separator: string or None
2704 @param separator: String used to separate columns
2708 data = [[col.title for col in columns]]
2709 colwidth = [len(col.title) for col in columns]
2712 colwidth = [0 for _ in columns]
2716 assert len(row) == len(columns)
2718 formatted = [col.format(value) for value, col in zip(row, columns)]
2720 if separator is None:
2721 # Update column widths
2722 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2723 # Modifying a list's items while iterating is fine
2724 colwidth[idx] = max(oldwidth, len(value))
2726 data.append(formatted)
2728 if separator is not None:
2729 # Return early if a separator is used
2730 return [separator.join(row) for row in data]
2732 if columns and not columns[-1].align_right:
2733 # Avoid unnecessary spaces at end of line
2736 # Build format string
2737 fmt = " ".join([_GetColFormatString(width, col.align_right)
2738 for col, width in zip(columns, colwidth)])
2740 return [fmt % tuple(row) for row in data]
2743 def FormatTimestamp(ts):
2744 """Formats a given timestamp.
2747 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2750 @return: a string with the formatted timestamp
2753 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2756 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2759 def ParseTimespec(value):
2760 """Parse a time specification.
2762 The following suffixed will be recognized:
2770 Without any suffix, the value will be taken to be in seconds.
2775 raise errors.OpPrereqError("Empty time specification passed")
2783 if value[-1] not in suffix_map:
2786 except (TypeError, ValueError):
2787 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2789 multiplier = suffix_map[value[-1]]
2791 if not value: # no data left after stripping the suffix
2792 raise errors.OpPrereqError("Invalid time specification (only"
2795 value = int(value) * multiplier
2796 except (TypeError, ValueError):
2797 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2801 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2802 filter_master=False):
2803 """Returns the names of online nodes.
2805 This function will also log a warning on stderr with the names of
2808 @param nodes: if not empty, use only this subset of nodes (minus the
2810 @param cl: if not None, luxi client to use
2811 @type nowarn: boolean
2812 @param nowarn: by default, this function will output a note with the
2813 offline nodes that are skipped; if this parameter is True the
2814 note is not displayed
2815 @type secondary_ips: boolean
2816 @param secondary_ips: if True, return the secondary IPs instead of the
2817 names, useful for doing network traffic over the replication interface
2819 @type filter_master: boolean
2820 @param filter_master: if True, do not return the master node in the list
2821 (useful in coordination with secondary_ips where we cannot check our
2822 node name against the list)
2834 master_node = cl.QueryConfigValues(["master_node"])[0]
2835 filter_fn = lambda x: x != master_node
2837 filter_fn = lambda _: True
2839 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2841 offline = [row[0] for row in result if row[1]]
2842 if offline and not nowarn:
2843 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2844 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2847 def _ToStream(stream, txt, *args):
2848 """Write a message to a stream, bypassing the logging system
2850 @type stream: file object
2851 @param stream: the file to which we should write
2853 @param txt: the message
2858 stream.write(txt % args)
2865 def ToStdout(txt, *args):
2866 """Write a message to stdout only, bypassing the logging system
2868 This is just a wrapper over _ToStream.
2871 @param txt: the message
2874 _ToStream(sys.stdout, txt, *args)
2877 def ToStderr(txt, *args):
2878 """Write a message to stderr only, bypassing the logging system
2880 This is just a wrapper over _ToStream.
2883 @param txt: the message
2886 _ToStream(sys.stderr, txt, *args)
2889 class JobExecutor(object):
2890 """Class which manages the submission and execution of multiple jobs.
2892 Note that instances of this class should not be reused between
2896 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2901 self.verbose = verbose
2904 self.feedback_fn = feedback_fn
2906 def QueueJob(self, name, *ops):
2907 """Record a job for later submit.
2910 @param name: a description of the job, will be used in WaitJobSet
2912 SetGenericOpcodeOpts(ops, self.opts)
2913 self.queue.append((name, ops))
2915 def SubmitPending(self, each=False):
2916 """Submit all pending jobs.
2921 for row in self.queue:
2922 # SubmitJob will remove the success status, but raise an exception if
2923 # the submission fails, so we'll notice that anyway.
2924 results.append([True, self.cl.SubmitJob(row[1])])
2926 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2927 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2929 self.jobs.append((idx, status, data, name))
2931 def _ChooseJob(self):
2932 """Choose a non-waiting/queued job to poll next.
2935 assert self.jobs, "_ChooseJob called with empty job list"
2937 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2940 for job_data, status in zip(self.jobs, result):
2941 if (isinstance(status, list) and status and
2942 status[0] in (constants.JOB_STATUS_QUEUED,
2943 constants.JOB_STATUS_WAITLOCK,
2944 constants.JOB_STATUS_CANCELING)):
2945 # job is still present and waiting
2947 # good candidate found (either running job or lost job)
2948 self.jobs.remove(job_data)
2952 return self.jobs.pop(0)
2954 def GetResults(self):
2955 """Wait for and return the results of all jobs.
2958 @return: list of tuples (success, job results), in the same order
2959 as the submitted jobs; if a job has failed, instead of the result
2960 there will be the error message
2964 self.SubmitPending()
2967 ok_jobs = [row[2] for row in self.jobs if row[1]]
2969 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2971 # first, remove any non-submitted jobs
2972 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2973 for idx, _, jid, name in failures:
2974 ToStderr("Failed to submit job for %s: %s", name, jid)
2975 results.append((idx, False, jid))
2978 (idx, _, jid, name) = self._ChooseJob()
2979 ToStdout("Waiting for job %s for %s...", jid, name)
2981 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2983 except errors.JobLost, err:
2984 _, job_result = FormatError(err)
2985 ToStderr("Job %s for %s has been archived, cannot check its result",
2988 except (errors.GenericError, luxi.ProtocolError), err:
2989 _, job_result = FormatError(err)
2991 # the error message will always be shown, verbose or not
2992 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2994 results.append((idx, success, job_result))
2996 # sort based on the index, then drop it
2998 results = [i[1:] for i in results]
3002 def WaitOrShow(self, wait):
3003 """Wait for job results or only print the job IDs.
3006 @param wait: whether to wait or not
3010 return self.GetResults()
3013 self.SubmitPending()
3014 for _, status, result, name in self.jobs:
3016 ToStdout("%s: %s", result, name)
3018 ToStderr("Failure for %s: %s", name, result)
3019 return [row[1:3] for row in self.jobs]
3022 def FormatParameterDict(buf, param_dict, actual, level=1):
3023 """Formats a parameter dictionary.
3025 @type buf: L{StringIO}
3026 @param buf: the buffer into which to write
3027 @type param_dict: dict
3028 @param param_dict: the own parameters
3030 @param actual: the current parameter set (including defaults)
3031 @param level: Level of indent
3034 indent = " " * level
3035 for key in sorted(actual):
3036 val = param_dict.get(key, "default (%s)" % actual[key])
3037 buf.write("%s- %s: %s\n" % (indent, key, val))
3040 def ConfirmOperation(names, list_type, text, extra=""):
3041 """Ask the user to confirm an operation on a list of list_type.
3043 This function is used to request confirmation for doing an operation
3044 on a given list of list_type.
3047 @param names: the list of names that we display when
3048 we ask for confirmation
3049 @type list_type: str
3050 @param list_type: Human readable name for elements in the list (e.g. nodes)
3052 @param text: the operation that the user should confirm
3054 @return: True or False depending on user's confirmation.
3058 msg = ("The %s will operate on %d %s.\n%s"
3059 "Do you want to continue?" % (text, count, list_type, extra))
3060 affected = (("\nAffected %s:\n" % list_type) +
3061 "\n".join([" %s" % name for name in names]))
3063 choices = [("y", True, "Yes, execute the %s" % text),
3064 ("n", False, "No, abort the %s" % text)]
3067 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3070 question = msg + affected
3072 choice = AskUser(question, choices)
3075 choice = AskUser(msg + affected, choices)