4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
39 from ganeti import ssh
40 from ganeti import compat
41 from ganeti import netutils
42 from ganeti import qlang
44 from optparse import (OptionParser, TitledHelpFormatter,
45 Option, OptionValueError)
49 # Command line options
61 "CLUSTER_DOMAIN_SECRET_OPT",
77 "FILESTORE_DRIVER_OPT",
86 "DEFAULT_IALLOCATOR_OPT",
87 "IDENTIFY_DEFAULTS_OPT",
89 "IGNORE_FAILURES_OPT",
91 "IGNORE_REMOVE_FAILURES_OPT",
92 "IGNORE_SECONDARIES_OPT",
96 "MAINTAIN_NODE_HEALTH_OPT",
101 "NEW_CLUSTER_CERT_OPT",
102 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103 "NEW_CONFD_HMAC_KEY_OPT",
108 "NODE_PLACEMENT_OPT",
112 "NODRBD_STORAGE_OPT",
118 "NOMODIFY_ETCHOSTS_OPT",
119 "NOMODIFY_SSH_SETUP_OPT",
125 "NOSSH_KEYCHECK_OPT",
134 "PREALLOC_WIPE_DISKS_OPT",
135 "PRIMARY_IP_VERSION_OPT",
140 "REMOVE_INSTANCE_OPT",
148 "SHUTDOWN_TIMEOUT_OPT",
163 # Generic functions for CLI programs
165 "GenericInstanceCreate",
171 "JobSubmittedException",
173 "RunWhileClusterStopped",
177 # Formatting functions
178 "ToStderr", "ToStdout",
181 "FormatParameterDict",
190 # command line options support infrastructure
191 "ARGS_MANY_INSTANCES",
210 "OPT_COMPL_INST_ADD_NODES",
211 "OPT_COMPL_MANY_NODES",
212 "OPT_COMPL_ONE_IALLOCATOR",
213 "OPT_COMPL_ONE_INSTANCE",
214 "OPT_COMPL_ONE_NODE",
215 "OPT_COMPL_ONE_NODEGROUP",
221 "COMMON_CREATE_OPTS",
227 #: Priorities (sorted)
229 ("low", constants.OP_PRIO_LOW),
230 ("normal", constants.OP_PRIO_NORMAL),
231 ("high", constants.OP_PRIO_HIGH),
234 #: Priority dictionary for easier lookup
235 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
236 # we migrate to Python 2.6
237 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
239 # Query result status for clients
242 QR_INCOMPLETE) = range(3)
246 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
251 return ("<%s min=%s max=%s>" %
252 (self.__class__.__name__, self.min, self.max))
255 class ArgSuggest(_Argument):
256 """Suggesting argument.
258 Value can be any of the ones passed to the constructor.
261 # pylint: disable-msg=W0622
262 def __init__(self, min=0, max=None, choices=None):
263 _Argument.__init__(self, min=min, max=max)
264 self.choices = choices
267 return ("<%s min=%s max=%s choices=%r>" %
268 (self.__class__.__name__, self.min, self.max, self.choices))
271 class ArgChoice(ArgSuggest):
274 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
275 but value must be one of the choices.
280 class ArgUnknown(_Argument):
281 """Unknown argument to program (e.g. determined at runtime).
286 class ArgInstance(_Argument):
287 """Instances argument.
292 class ArgNode(_Argument):
298 class ArgGroup(_Argument):
299 """Node group argument.
304 class ArgJobId(_Argument):
310 class ArgFile(_Argument):
311 """File path argument.
316 class ArgCommand(_Argument):
322 class ArgHost(_Argument):
328 class ArgOs(_Argument):
335 ARGS_MANY_INSTANCES = [ArgInstance()]
336 ARGS_MANY_NODES = [ArgNode()]
337 ARGS_MANY_GROUPS = [ArgGroup()]
338 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
339 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
340 ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
341 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
344 def _ExtractTagsObject(opts, args):
345 """Extract the tag type object.
347 Note that this function will modify its args parameter.
350 if not hasattr(opts, "tag_type"):
351 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
353 if kind == constants.TAG_CLUSTER:
355 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
357 raise errors.OpPrereqError("no arguments passed to the command")
361 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
365 def _ExtendTags(opts, args):
366 """Extend the args if a source file has been given.
368 This function will extend the tags with the contents of the file
369 passed in the 'tags_source' attribute of the opts parameter. A file
370 named '-' will be replaced by stdin.
373 fname = opts.tags_source
379 new_fh = open(fname, "r")
382 # we don't use the nice 'new_data = [line.strip() for line in fh]'
383 # because of python bug 1633941
385 line = new_fh.readline()
388 new_data.append(line.strip())
391 args.extend(new_data)
394 def ListTags(opts, args):
395 """List the tags on a given object.
397 This is a generic implementation that knows how to deal with all
398 three cases of tag objects (cluster, node, instance). The opts
399 argument is expected to contain a tag_type field denoting what
400 object type we work on.
403 kind, name = _ExtractTagsObject(opts, args)
405 result = cl.QueryTags(kind, name)
406 result = list(result)
412 def AddTags(opts, args):
413 """Add tags on a given object.
415 This is a generic implementation that knows how to deal with all
416 three cases of tag objects (cluster, node, instance). The opts
417 argument is expected to contain a tag_type field denoting what
418 object type we work on.
421 kind, name = _ExtractTagsObject(opts, args)
422 _ExtendTags(opts, args)
424 raise errors.OpPrereqError("No tags to be added")
425 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
426 SubmitOpCode(op, opts=opts)
429 def RemoveTags(opts, args):
430 """Remove tags from a given object.
432 This is a generic implementation that knows how to deal with all
433 three cases of tag objects (cluster, node, instance). The opts
434 argument is expected to contain a tag_type field denoting what
435 object type we work on.
438 kind, name = _ExtractTagsObject(opts, args)
439 _ExtendTags(opts, args)
441 raise errors.OpPrereqError("No tags to be removed")
442 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
443 SubmitOpCode(op, opts=opts)
446 def check_unit(option, opt, value): # pylint: disable-msg=W0613
447 """OptParsers custom converter for units.
451 return utils.ParseUnit(value)
452 except errors.UnitParseError, err:
453 raise OptionValueError("option %s: %s" % (opt, err))
456 def _SplitKeyVal(opt, data):
457 """Convert a KeyVal string into a dict.
459 This function will convert a key=val[,...] string into a dict. Empty
460 values will be converted specially: keys which have the prefix 'no_'
461 will have the value=False and the prefix stripped, the others will
465 @param opt: a string holding the option name for which we process the
466 data, used in building error messages
468 @param data: a string of the format key=val,key=val,...
470 @return: {key=val, key=val}
471 @raises errors.ParameterError: if there are duplicate keys
476 for elem in utils.UnescapeAndSplit(data, sep=","):
478 key, val = elem.split("=", 1)
480 if elem.startswith(NO_PREFIX):
481 key, val = elem[len(NO_PREFIX):], False
482 elif elem.startswith(UN_PREFIX):
483 key, val = elem[len(UN_PREFIX):], None
485 key, val = elem, True
487 raise errors.ParameterError("Duplicate key '%s' in option %s" %
493 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
494 """Custom parser for ident:key=val,key=val options.
496 This will store the parsed values as a tuple (ident, {key: val}). As such,
497 multiple uses of this option via action=append is possible.
501 ident, rest = value, ''
503 ident, rest = value.split(":", 1)
505 if ident.startswith(NO_PREFIX):
507 msg = "Cannot pass options when removing parameter groups: %s" % value
508 raise errors.ParameterError(msg)
509 retval = (ident[len(NO_PREFIX):], False)
510 elif ident.startswith(UN_PREFIX):
512 msg = "Cannot pass options when removing parameter groups: %s" % value
513 raise errors.ParameterError(msg)
514 retval = (ident[len(UN_PREFIX):], None)
516 kv_dict = _SplitKeyVal(opt, rest)
517 retval = (ident, kv_dict)
521 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
522 """Custom parser class for key=val,key=val options.
524 This will store the parsed values as a dict {key: val}.
527 return _SplitKeyVal(opt, value)
530 def check_bool(option, opt, value): # pylint: disable-msg=W0613
531 """Custom parser for yes/no options.
533 This will store the parsed value as either True or False.
536 value = value.lower()
537 if value == constants.VALUE_FALSE or value == "no":
539 elif value == constants.VALUE_TRUE or value == "yes":
542 raise errors.ParameterError("Invalid boolean value '%s'" % value)
545 # completion_suggestion is normally a list. Using numeric values not evaluating
546 # to False for dynamic completion.
547 (OPT_COMPL_MANY_NODES,
549 OPT_COMPL_ONE_INSTANCE,
551 OPT_COMPL_ONE_IALLOCATOR,
552 OPT_COMPL_INST_ADD_NODES,
553 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
555 OPT_COMPL_ALL = frozenset([
556 OPT_COMPL_MANY_NODES,
558 OPT_COMPL_ONE_INSTANCE,
560 OPT_COMPL_ONE_IALLOCATOR,
561 OPT_COMPL_INST_ADD_NODES,
562 OPT_COMPL_ONE_NODEGROUP,
566 class CliOption(Option):
567 """Custom option class for optparse.
570 ATTRS = Option.ATTRS + [
571 "completion_suggest",
573 TYPES = Option.TYPES + (
579 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
580 TYPE_CHECKER["identkeyval"] = check_ident_key_val
581 TYPE_CHECKER["keyval"] = check_key_val
582 TYPE_CHECKER["unit"] = check_unit
583 TYPE_CHECKER["bool"] = check_bool
586 # optparse.py sets make_option, so we do it for our own option class, too
587 cli_option = CliOption
592 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
593 help="Increase debugging level")
595 NOHDR_OPT = cli_option("--no-headers", default=False,
596 action="store_true", dest="no_headers",
597 help="Don't display column headers")
599 SEP_OPT = cli_option("--separator", default=None,
600 action="store", dest="separator",
601 help=("Separator between output fields"
602 " (defaults to one space)"))
604 USEUNITS_OPT = cli_option("--units", default=None,
605 dest="units", choices=('h', 'm', 'g', 't'),
606 help="Specify units for output (one of h/m/g/t)")
608 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
609 type="string", metavar="FIELDS",
610 help="Comma separated list of output fields")
612 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
613 default=False, help="Force the operation")
615 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
616 default=False, help="Do not require confirmation")
618 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
619 action="store_true", default=False,
620 help=("Ignore offline nodes and do as much"
623 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
624 default=None, help="File with tag names")
626 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
627 default=False, action="store_true",
628 help=("Submit the job and return the job ID, but"
629 " don't wait for the job to finish"))
631 SYNC_OPT = cli_option("--sync", dest="do_locking",
632 default=False, action="store_true",
633 help=("Grab locks while doing the queries"
634 " in order to ensure more consistent results"))
636 DRY_RUN_OPT = cli_option("--dry-run", default=False,
638 help=("Do not execute the operation, just run the"
639 " check steps and verify it it could be"
642 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
644 help="Increase the verbosity of the operation")
646 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
647 action="store_true", dest="simulate_errors",
648 help="Debugging option that makes the operation"
649 " treat most runtime checks as failed")
651 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
652 default=True, action="store_false",
653 help="Don't wait for sync (DANGEROUS!)")
655 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
656 help="Custom disk setup (diskless, file,"
658 default=None, metavar="TEMPL",
659 choices=list(constants.DISK_TEMPLATES))
661 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
662 help="Do not create any network cards for"
665 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
666 help="Relative path under default cluster-wide"
667 " file storage dir to store file-based disks",
668 default=None, metavar="<DIR>")
670 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
671 help="Driver to use for image files",
672 default="loop", metavar="<DRIVER>",
673 choices=list(constants.FILE_DRIVER))
675 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
676 help="Select nodes for the instance automatically"
677 " using the <NAME> iallocator plugin",
678 default=None, type="string",
679 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
681 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
683 help="Set the default instance allocator plugin",
684 default=None, type="string",
685 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
687 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
689 completion_suggest=OPT_COMPL_ONE_OS)
691 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
692 type="keyval", default={},
693 help="OS parameters")
695 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
696 action="store_true", default=False,
697 help="Force an unknown variant")
699 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
700 action="store_true", default=False,
701 help="Do not install the OS (will"
704 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
705 type="keyval", default={},
706 help="Backend parameters")
708 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
709 default={}, dest="hvparams",
710 help="Hypervisor parameters")
712 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
713 help="Hypervisor and hypervisor options, in the"
714 " format hypervisor:option=value,option=value,...",
715 default=None, type="identkeyval")
717 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
718 help="Hypervisor and hypervisor options, in the"
719 " format hypervisor:option=value,option=value,...",
720 default=[], action="append", type="identkeyval")
722 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
723 action="store_false",
724 help="Don't check that the instance's IP"
727 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
728 default=True, action="store_false",
729 help="Don't check that the instance's name"
732 NET_OPT = cli_option("--net",
733 help="NIC parameters", default=[],
734 dest="nics", action="append", type="identkeyval")
736 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
737 dest="disks", action="append", type="identkeyval")
739 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
740 help="Comma-separated list of disks"
741 " indices to act on (e.g. 0,2) (optional,"
742 " defaults to all disks)")
744 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
745 help="Enforces a single-disk configuration using the"
746 " given disk size, in MiB unless a suffix is used",
747 default=None, type="unit", metavar="<size>")
749 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
750 dest="ignore_consistency",
751 action="store_true", default=False,
752 help="Ignore the consistency of the disks on"
755 NONLIVE_OPT = cli_option("--non-live", dest="live",
756 default=True, action="store_false",
757 help="Do a non-live migration (this usually means"
758 " freeze the instance, save the state, transfer and"
759 " only then resume running on the secondary node)")
761 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
763 choices=list(constants.HT_MIGRATION_MODES),
764 help="Override default migration mode (choose"
765 " either live or non-live")
767 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
768 help="Target node and optional secondary node",
769 metavar="<pnode>[:<snode>]",
770 completion_suggest=OPT_COMPL_INST_ADD_NODES)
772 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
773 action="append", metavar="<node>",
774 help="Use only this node (can be used multiple"
775 " times, if not given defaults to all nodes)",
776 completion_suggest=OPT_COMPL_ONE_NODE)
778 NODEGROUP_OPT = cli_option("-g", "--node-group",
780 help="Node group (name or uuid)",
781 metavar="<nodegroup>",
782 default=None, type="string",
783 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
785 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
787 completion_suggest=OPT_COMPL_ONE_NODE)
789 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
790 action="store_false",
791 help="Don't start the instance after creation")
793 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
794 action="store_true", default=False,
795 help="Show command instead of executing it")
797 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
798 default=False, action="store_true",
799 help="Instead of performing the migration, try to"
800 " recover from a failed cleanup. This is safe"
801 " to run even if the instance is healthy, but it"
802 " will create extra replication traffic and "
803 " disrupt briefly the replication (like during the"
806 STATIC_OPT = cli_option("-s", "--static", dest="static",
807 action="store_true", default=False,
808 help="Only show configuration data, not runtime data")
810 ALL_OPT = cli_option("--all", dest="show_all",
811 default=False, action="store_true",
812 help="Show info on all instances on the cluster."
813 " This can take a long time to run, use wisely")
815 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
816 action="store_true", default=False,
817 help="Interactive OS reinstall, lists available"
818 " OS templates for selection")
820 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
821 action="store_true", default=False,
822 help="Remove the instance from the cluster"
823 " configuration even if there are failures"
824 " during the removal process")
826 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
827 dest="ignore_remove_failures",
828 action="store_true", default=False,
829 help="Remove the instance from the"
830 " cluster configuration even if there"
831 " are failures during the removal"
834 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
835 action="store_true", default=False,
836 help="Remove the instance from the cluster")
838 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
839 help="Specifies the new secondary node",
840 metavar="NODE", default=None,
841 completion_suggest=OPT_COMPL_ONE_NODE)
843 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
844 default=False, action="store_true",
845 help="Replace the disk(s) on the primary"
846 " node (only for the drbd template)")
848 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
849 default=False, action="store_true",
850 help="Replace the disk(s) on the secondary"
851 " node (only for the drbd template)")
853 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
854 default=False, action="store_true",
855 help="Lock all nodes and auto-promote as needed"
858 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
859 default=False, action="store_true",
860 help="Automatically replace faulty disks"
861 " (only for the drbd template)")
863 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
864 default=False, action="store_true",
865 help="Ignore current recorded size"
866 " (useful for forcing activation when"
867 " the recorded size is wrong)")
869 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
871 completion_suggest=OPT_COMPL_ONE_NODE)
873 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
876 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
877 help="Specify the secondary ip for the node",
878 metavar="ADDRESS", default=None)
880 READD_OPT = cli_option("--readd", dest="readd",
881 default=False, action="store_true",
882 help="Readd old node after replacing it")
884 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
885 default=True, action="store_false",
886 help="Disable SSH key fingerprint checking")
888 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
889 type="bool", default=None, metavar=_YORNO,
890 help="Set the master_candidate flag on the node")
892 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
893 type="bool", default=None,
894 help=("Set the offline flag on the node"
895 " (cluster does not communicate with offline"
898 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
899 type="bool", default=None,
900 help=("Set the drained flag on the node"
901 " (excluded from allocation operations)"))
903 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
904 type="bool", default=None, metavar=_YORNO,
905 help="Set the master_capable flag on the node")
907 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
908 type="bool", default=None, metavar=_YORNO,
909 help="Set the vm_capable flag on the node")
911 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
912 type="bool", default=None, metavar=_YORNO,
913 help="Set the allocatable flag on a volume")
915 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
916 help="Disable support for lvm based instances"
918 action="store_false", default=True)
920 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
921 dest="enabled_hypervisors",
922 help="Comma-separated list of hypervisors",
923 type="string", default=None)
925 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
926 type="keyval", default={},
927 help="NIC parameters")
929 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
930 dest="candidate_pool_size", type="int",
931 help="Set the candidate pool size")
933 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
934 help=("Enables LVM and specifies the volume group"
935 " name (cluster-wide) for disk allocation"
936 " [%s]" % constants.DEFAULT_VG),
937 metavar="VG", default=None)
939 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
940 help="Destroy cluster", action="store_true")
942 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
943 help="Skip node agreement check (dangerous)",
944 action="store_true", default=False)
946 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
947 help="Specify the mac prefix for the instance IP"
948 " addresses, in the format XX:XX:XX",
952 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
953 help="Specify the node interface (cluster-wide)"
954 " on which the master IP address will be added"
955 " (cluster init default: %s)" %
956 constants.DEFAULT_BRIDGE,
960 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
961 help="Specify the default directory (cluster-"
962 "wide) for storing the file-based disks [%s]" %
963 constants.DEFAULT_FILE_STORAGE_DIR,
965 default=constants.DEFAULT_FILE_STORAGE_DIR)
967 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
968 help="Don't modify /etc/hosts",
969 action="store_false", default=True)
971 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
972 help="Don't initialize SSH keys",
973 action="store_false", default=True)
975 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
976 help="Enable parseable error messages",
977 action="store_true", default=False)
979 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
980 help="Skip N+1 memory redundancy tests",
981 action="store_true", default=False)
983 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
984 help="Type of reboot: soft/hard/full",
985 default=constants.INSTANCE_REBOOT_HARD,
987 choices=list(constants.REBOOT_TYPES))
989 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
990 dest="ignore_secondaries",
991 default=False, action="store_true",
992 help="Ignore errors from secondaries")
994 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
995 action="store_false", default=True,
996 help="Don't shutdown the instance (unsafe)")
998 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
999 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1000 help="Maximum time to wait")
1002 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1003 dest="shutdown_timeout", type="int",
1004 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1005 help="Maximum time to wait for instance shutdown")
1007 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1009 help=("Number of seconds between repetions of the"
1012 EARLY_RELEASE_OPT = cli_option("--early-release",
1013 dest="early_release", default=False,
1014 action="store_true",
1015 help="Release the locks on the secondary"
1018 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1019 dest="new_cluster_cert",
1020 default=False, action="store_true",
1021 help="Generate a new cluster certificate")
1023 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1025 help="File containing new RAPI certificate")
1027 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1028 default=None, action="store_true",
1029 help=("Generate a new self-signed RAPI"
1032 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1033 dest="new_confd_hmac_key",
1034 default=False, action="store_true",
1035 help=("Create a new HMAC key for %s" %
1038 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1039 dest="cluster_domain_secret",
1041 help=("Load new new cluster domain"
1042 " secret from file"))
1044 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1045 dest="new_cluster_domain_secret",
1046 default=False, action="store_true",
1047 help=("Create a new cluster domain"
1050 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1051 dest="use_replication_network",
1052 help="Whether to use the replication network"
1053 " for talking to the nodes",
1054 action="store_true", default=False)
1056 MAINTAIN_NODE_HEALTH_OPT = \
1057 cli_option("--maintain-node-health", dest="maintain_node_health",
1058 metavar=_YORNO, default=None, type="bool",
1059 help="Configure the cluster to automatically maintain node"
1060 " health, by shutting down unknown instances, shutting down"
1061 " unknown DRBD devices, etc.")
1063 IDENTIFY_DEFAULTS_OPT = \
1064 cli_option("--identify-defaults", dest="identify_defaults",
1065 default=False, action="store_true",
1066 help="Identify which saved instance parameters are equal to"
1067 " the current cluster defaults and set them as such, instead"
1068 " of marking them as overridden")
1070 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1071 action="store", dest="uid_pool",
1072 help=("A list of user-ids or user-id"
1073 " ranges separated by commas"))
1075 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1076 action="store", dest="add_uids",
1077 help=("A list of user-ids or user-id"
1078 " ranges separated by commas, to be"
1079 " added to the user-id pool"))
1081 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1082 action="store", dest="remove_uids",
1083 help=("A list of user-ids or user-id"
1084 " ranges separated by commas, to be"
1085 " removed from the user-id pool"))
1087 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1088 action="store", dest="reserved_lvs",
1089 help=("A comma-separated list of reserved"
1090 " logical volumes names, that will be"
1091 " ignored by cluster verify"))
1093 ROMAN_OPT = cli_option("--roman",
1094 dest="roman_integers", default=False,
1095 action="store_true",
1096 help="Use roman numbers for positive integers")
1098 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1099 action="store", default=None,
1100 help="Specifies usermode helper for DRBD")
1102 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1103 action="store_false", default=True,
1104 help="Disable support for DRBD")
1106 PRIMARY_IP_VERSION_OPT = \
1107 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1108 action="store", dest="primary_ip_version",
1109 metavar="%d|%d" % (constants.IP4_VERSION,
1110 constants.IP6_VERSION),
1111 help="Cluster-wide IP version for primary IP")
1113 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1114 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1115 choices=_PRIONAME_TO_VALUE.keys(),
1116 help="Priority for opcode processing")
1118 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1119 type="bool", default=None, metavar=_YORNO,
1120 help="Sets the hidden flag on the OS")
1122 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1123 type="bool", default=None, metavar=_YORNO,
1124 help="Sets the blacklisted flag on the OS")
1126 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1127 type="bool", metavar=_YORNO,
1128 dest="prealloc_wipe_disks",
1129 help=("Wipe disks prior to instance"
1132 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1133 type="keyval", default=None,
1134 help="Node parameters")
1136 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1137 action="store", metavar="POLICY", default=None,
1138 help="Allocation policy for the node group")
1140 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1141 type="bool", metavar=_YORNO,
1142 dest="node_powered",
1143 help="Specify if the SoR for node is powered")
1146 #: Options provided by all commands
1147 COMMON_OPTS = [DEBUG_OPT]
1149 # common options for creating instances. add and import then add their own
1151 COMMON_CREATE_OPTS = [
1156 FILESTORE_DRIVER_OPT,
1173 def _ParseArgs(argv, commands, aliases):
1174 """Parser for the command line arguments.
1176 This function parses the arguments and returns the function which
1177 must be executed together with its (modified) arguments.
1179 @param argv: the command line
1180 @param commands: dictionary with special contents, see the design
1181 doc for cmdline handling
1182 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1186 binary = "<command>"
1188 binary = argv[0].split("/")[-1]
1190 if len(argv) > 1 and argv[1] == "--version":
1191 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1192 constants.RELEASE_VERSION)
1193 # Quit right away. That way we don't have to care about this special
1194 # argument. optparse.py does it the same.
1197 if len(argv) < 2 or not (argv[1] in commands or
1198 argv[1] in aliases):
1199 # let's do a nice thing
1200 sortedcmds = commands.keys()
1203 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1204 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1207 # compute the max line length for cmd + usage
1208 mlen = max([len(" %s" % cmd) for cmd in commands])
1209 mlen = min(60, mlen) # should not get here...
1211 # and format a nice command list
1212 ToStdout("Commands:")
1213 for cmd in sortedcmds:
1214 cmdstr = " %s" % (cmd,)
1215 help_text = commands[cmd][4]
1216 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1217 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1218 for line in help_lines:
1219 ToStdout("%-*s %s", mlen, "", line)
1223 return None, None, None
1225 # get command, unalias it, and look it up in commands
1229 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1232 if aliases[cmd] not in commands:
1233 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1234 " command '%s'" % (cmd, aliases[cmd]))
1238 func, args_def, parser_opts, usage, description = commands[cmd]
1239 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1240 description=description,
1241 formatter=TitledHelpFormatter(),
1242 usage="%%prog %s %s" % (cmd, usage))
1243 parser.disable_interspersed_args()
1244 options, args = parser.parse_args()
1246 if not _CheckArguments(cmd, args_def, args):
1247 return None, None, None
1249 return func, options, args
1252 def _CheckArguments(cmd, args_def, args):
1253 """Verifies the arguments using the argument definition.
1257 1. Abort with error if values specified by user but none expected.
1259 1. For each argument in definition
1261 1. Keep running count of minimum number of values (min_count)
1262 1. Keep running count of maximum number of values (max_count)
1263 1. If it has an unlimited number of values
1265 1. Abort with error if it's not the last argument in the definition
1267 1. If last argument has limited number of values
1269 1. Abort with error if number of values doesn't match or is too large
1271 1. Abort with error if user didn't pass enough values (min_count)
1274 if args and not args_def:
1275 ToStderr("Error: Command %s expects no arguments", cmd)
1282 last_idx = len(args_def) - 1
1284 for idx, arg in enumerate(args_def):
1285 if min_count is None:
1287 elif arg.min is not None:
1288 min_count += arg.min
1290 if max_count is None:
1292 elif arg.max is not None:
1293 max_count += arg.max
1296 check_max = (arg.max is not None)
1298 elif arg.max is None:
1299 raise errors.ProgrammerError("Only the last argument can have max=None")
1302 # Command with exact number of arguments
1303 if (min_count is not None and max_count is not None and
1304 min_count == max_count and len(args) != min_count):
1305 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1308 # Command with limited number of arguments
1309 if max_count is not None and len(args) > max_count:
1310 ToStderr("Error: Command %s expects only %d argument(s)",
1314 # Command with some required arguments
1315 if min_count is not None and len(args) < min_count:
1316 ToStderr("Error: Command %s expects at least %d argument(s)",
1323 def SplitNodeOption(value):
1324 """Splits the value of a --node option.
1327 if value and ':' in value:
1328 return value.split(':', 1)
1330 return (value, None)
1333 def CalculateOSNames(os_name, os_variants):
1334 """Calculates all the names an OS can be called, according to its variants.
1336 @type os_name: string
1337 @param os_name: base name of the os
1338 @type os_variants: list or None
1339 @param os_variants: list of supported variants
1341 @return: list of valid names
1345 return ['%s+%s' % (os_name, v) for v in os_variants]
1350 def ParseFields(selected, default):
1351 """Parses the values of "--field"-like options.
1353 @type selected: string or None
1354 @param selected: User-selected options
1356 @param default: Default fields
1359 if selected is None:
1362 if selected.startswith("+"):
1363 return default + selected[1:].split(",")
1365 return selected.split(",")
1368 UsesRPC = rpc.RunWithRPC
1371 def AskUser(text, choices=None):
1372 """Ask the user a question.
1374 @param text: the question to ask
1376 @param choices: list with elements tuples (input_char, return_value,
1377 description); if not given, it will default to: [('y', True,
1378 'Perform the operation'), ('n', False, 'Do no do the operation')];
1379 note that the '?' char is reserved for help
1381 @return: one of the return values from the choices list; if input is
1382 not possible (i.e. not running with a tty, we return the last
1387 choices = [('y', True, 'Perform the operation'),
1388 ('n', False, 'Do not perform the operation')]
1389 if not choices or not isinstance(choices, list):
1390 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1391 for entry in choices:
1392 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1393 raise errors.ProgrammerError("Invalid choices element to AskUser")
1395 answer = choices[-1][1]
1397 for line in text.splitlines():
1398 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1399 text = "\n".join(new_text)
1401 f = file("/dev/tty", "a+")
1405 chars = [entry[0] for entry in choices]
1406 chars[-1] = "[%s]" % chars[-1]
1408 maps = dict([(entry[0], entry[1]) for entry in choices])
1412 f.write("/".join(chars))
1414 line = f.readline(2).strip().lower()
1419 for entry in choices:
1420 f.write(" %s - %s\n" % (entry[0], entry[2]))
1428 class JobSubmittedException(Exception):
1429 """Job was submitted, client should exit.
1431 This exception has one argument, the ID of the job that was
1432 submitted. The handler should print this ID.
1434 This is not an error, just a structured way to exit from clients.
1439 def SendJob(ops, cl=None):
1440 """Function to submit an opcode without waiting for the results.
1443 @param ops: list of opcodes
1444 @type cl: luxi.Client
1445 @param cl: the luxi client to use for communicating with the master;
1446 if None, a new client will be created
1452 job_id = cl.SubmitJob(ops)
1457 def GenericPollJob(job_id, cbs, report_cbs):
1458 """Generic job-polling function.
1460 @type job_id: number
1461 @param job_id: Job ID
1462 @type cbs: Instance of L{JobPollCbBase}
1463 @param cbs: Data callbacks
1464 @type report_cbs: Instance of L{JobPollReportCbBase}
1465 @param report_cbs: Reporting callbacks
1468 prev_job_info = None
1469 prev_logmsg_serial = None
1474 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1477 # job not found, go away!
1478 raise errors.JobLost("Job with id %s lost" % job_id)
1480 if result == constants.JOB_NOTCHANGED:
1481 report_cbs.ReportNotChanged(job_id, status)
1486 # Split result, a tuple of (field values, log entries)
1487 (job_info, log_entries) = result
1488 (status, ) = job_info
1491 for log_entry in log_entries:
1492 (serial, timestamp, log_type, message) = log_entry
1493 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1495 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1497 # TODO: Handle canceled and archived jobs
1498 elif status in (constants.JOB_STATUS_SUCCESS,
1499 constants.JOB_STATUS_ERROR,
1500 constants.JOB_STATUS_CANCELING,
1501 constants.JOB_STATUS_CANCELED):
1504 prev_job_info = job_info
1506 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1508 raise errors.JobLost("Job with id %s lost" % job_id)
1510 status, opstatus, result = jobs[0]
1512 if status == constants.JOB_STATUS_SUCCESS:
1515 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1516 raise errors.OpExecError("Job was canceled")
1519 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1520 if status == constants.OP_STATUS_SUCCESS:
1522 elif status == constants.OP_STATUS_ERROR:
1523 errors.MaybeRaise(msg)
1526 raise errors.OpExecError("partial failure (opcode %d): %s" %
1529 raise errors.OpExecError(str(msg))
1531 # default failure mode
1532 raise errors.OpExecError(result)
1535 class JobPollCbBase:
1536 """Base class for L{GenericPollJob} callbacks.
1540 """Initializes this class.
1544 def WaitForJobChangeOnce(self, job_id, fields,
1545 prev_job_info, prev_log_serial):
1546 """Waits for changes on a job.
1549 raise NotImplementedError()
1551 def QueryJobs(self, job_ids, fields):
1552 """Returns the selected fields for the selected job IDs.
1554 @type job_ids: list of numbers
1555 @param job_ids: Job IDs
1556 @type fields: list of strings
1557 @param fields: Fields
1560 raise NotImplementedError()
1563 class JobPollReportCbBase:
1564 """Base class for L{GenericPollJob} reporting callbacks.
1568 """Initializes this class.
1572 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1573 """Handles a log message.
1576 raise NotImplementedError()
1578 def ReportNotChanged(self, job_id, status):
1579 """Called for if a job hasn't changed in a while.
1581 @type job_id: number
1582 @param job_id: Job ID
1583 @type status: string or None
1584 @param status: Job status if available
1587 raise NotImplementedError()
1590 class _LuxiJobPollCb(JobPollCbBase):
1591 def __init__(self, cl):
1592 """Initializes this class.
1595 JobPollCbBase.__init__(self)
1598 def WaitForJobChangeOnce(self, job_id, fields,
1599 prev_job_info, prev_log_serial):
1600 """Waits for changes on a job.
1603 return self.cl.WaitForJobChangeOnce(job_id, fields,
1604 prev_job_info, prev_log_serial)
1606 def QueryJobs(self, job_ids, fields):
1607 """Returns the selected fields for the selected job IDs.
1610 return self.cl.QueryJobs(job_ids, fields)
1613 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1614 def __init__(self, feedback_fn):
1615 """Initializes this class.
1618 JobPollReportCbBase.__init__(self)
1620 self.feedback_fn = feedback_fn
1622 assert callable(feedback_fn)
1624 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1625 """Handles a log message.
1628 self.feedback_fn((timestamp, log_type, log_msg))
1630 def ReportNotChanged(self, job_id, status):
1631 """Called if a job hasn't changed in a while.
1637 class StdioJobPollReportCb(JobPollReportCbBase):
1639 """Initializes this class.
1642 JobPollReportCbBase.__init__(self)
1644 self.notified_queued = False
1645 self.notified_waitlock = False
1647 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1648 """Handles a log message.
1651 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1652 FormatLogMessage(log_type, log_msg))
1654 def ReportNotChanged(self, job_id, status):
1655 """Called if a job hasn't changed in a while.
1661 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1662 ToStderr("Job %s is waiting in queue", job_id)
1663 self.notified_queued = True
1665 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1666 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1667 self.notified_waitlock = True
1670 def FormatLogMessage(log_type, log_msg):
1671 """Formats a job message according to its type.
1674 if log_type != constants.ELOG_MESSAGE:
1675 log_msg = str(log_msg)
1677 return utils.SafeEncode(log_msg)
1680 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1681 """Function to poll for the result of a job.
1683 @type job_id: job identified
1684 @param job_id: the job to poll for results
1685 @type cl: luxi.Client
1686 @param cl: the luxi client to use for communicating with the master;
1687 if None, a new client will be created
1693 if reporter is None:
1695 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1697 reporter = StdioJobPollReportCb()
1699 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1701 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1704 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1705 """Legacy function to submit an opcode.
1707 This is just a simple wrapper over the construction of the processor
1708 instance. It should be extended to better handle feedback and
1709 interaction functions.
1715 SetGenericOpcodeOpts([op], opts)
1717 job_id = SendJob([op], cl=cl)
1719 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1722 return op_results[0]
1725 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1726 """Wrapper around SubmitOpCode or SendJob.
1728 This function will decide, based on the 'opts' parameter, whether to
1729 submit and wait for the result of the opcode (and return it), or
1730 whether to just send the job and print its identifier. It is used in
1731 order to simplify the implementation of the '--submit' option.
1733 It will also process the opcodes if we're sending the via SendJob
1734 (otherwise SubmitOpCode does it).
1737 if opts and opts.submit_only:
1739 SetGenericOpcodeOpts(job, opts)
1740 job_id = SendJob(job, cl=cl)
1741 raise JobSubmittedException(job_id)
1743 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1746 def SetGenericOpcodeOpts(opcode_list, options):
1747 """Processor for generic options.
1749 This function updates the given opcodes based on generic command
1750 line options (like debug, dry-run, etc.).
1752 @param opcode_list: list of opcodes
1753 @param options: command line options or None
1754 @return: None (in-place modification)
1759 for op in opcode_list:
1760 op.debug_level = options.debug
1761 if hasattr(options, "dry_run"):
1762 op.dry_run = options.dry_run
1763 if getattr(options, "priority", None) is not None:
1764 op.priority = _PRIONAME_TO_VALUE[options.priority]
1768 # TODO: Cache object?
1770 client = luxi.Client()
1771 except luxi.NoMasterError:
1772 ss = ssconf.SimpleStore()
1774 # Try to read ssconf file
1777 except errors.ConfigurationError:
1778 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1779 " not part of a cluster")
1781 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1782 if master != myself:
1783 raise errors.OpPrereqError("This is not the master node, please connect"
1784 " to node '%s' and rerun the command" %
1790 def FormatError(err):
1791 """Return a formatted error message for a given error.
1793 This function takes an exception instance and returns a tuple
1794 consisting of two values: first, the recommended exit code, and
1795 second, a string describing the error message (not
1796 newline-terminated).
1802 if isinstance(err, errors.ConfigurationError):
1803 txt = "Corrupt configuration file: %s" % msg
1805 obuf.write(txt + "\n")
1806 obuf.write("Aborting.")
1808 elif isinstance(err, errors.HooksAbort):
1809 obuf.write("Failure: hooks execution failed:\n")
1810 for node, script, out in err.args[0]:
1812 obuf.write(" node: %s, script: %s, output: %s\n" %
1813 (node, script, out))
1815 obuf.write(" node: %s, script: %s (no output)\n" %
1817 elif isinstance(err, errors.HooksFailure):
1818 obuf.write("Failure: hooks general failure: %s" % msg)
1819 elif isinstance(err, errors.ResolverError):
1820 this_host = netutils.Hostname.GetSysName()
1821 if err.args[0] == this_host:
1822 msg = "Failure: can't resolve my own hostname ('%s')"
1824 msg = "Failure: can't resolve hostname '%s'"
1825 obuf.write(msg % err.args[0])
1826 elif isinstance(err, errors.OpPrereqError):
1827 if len(err.args) == 2:
1828 obuf.write("Failure: prerequisites not met for this"
1829 " operation:\nerror type: %s, error details:\n%s" %
1830 (err.args[1], err.args[0]))
1832 obuf.write("Failure: prerequisites not met for this"
1833 " operation:\n%s" % msg)
1834 elif isinstance(err, errors.OpExecError):
1835 obuf.write("Failure: command execution error:\n%s" % msg)
1836 elif isinstance(err, errors.TagError):
1837 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1838 elif isinstance(err, errors.JobQueueDrainError):
1839 obuf.write("Failure: the job queue is marked for drain and doesn't"
1840 " accept new requests\n")
1841 elif isinstance(err, errors.JobQueueFull):
1842 obuf.write("Failure: the job queue is full and doesn't accept new"
1843 " job submissions until old jobs are archived\n")
1844 elif isinstance(err, errors.TypeEnforcementError):
1845 obuf.write("Parameter Error: %s" % msg)
1846 elif isinstance(err, errors.ParameterError):
1847 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1848 elif isinstance(err, luxi.NoMasterError):
1849 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1850 " and listening for connections?")
1851 elif isinstance(err, luxi.TimeoutError):
1852 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1853 " been submitted and will continue to run even if the call"
1854 " timed out. Useful commands in this situation are \"gnt-job"
1855 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1857 elif isinstance(err, luxi.PermissionError):
1858 obuf.write("It seems you don't have permissions to connect to the"
1859 " master daemon.\nPlease retry as a different user.")
1860 elif isinstance(err, luxi.ProtocolError):
1861 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1863 elif isinstance(err, errors.JobLost):
1864 obuf.write("Error checking job status: %s" % msg)
1865 elif isinstance(err, errors.GenericError):
1866 obuf.write("Unhandled Ganeti error: %s" % msg)
1867 elif isinstance(err, JobSubmittedException):
1868 obuf.write("JobID: %s\n" % err.args[0])
1871 obuf.write("Unhandled exception: %s" % msg)
1872 return retcode, obuf.getvalue().rstrip('\n')
1875 def GenericMain(commands, override=None, aliases=None):
1876 """Generic main function for all the gnt-* commands.
1879 - commands: a dictionary with a special structure, see the design doc
1880 for command line handling.
1881 - override: if not None, we expect a dictionary with keys that will
1882 override command line options; this can be used to pass
1883 options from the scripts to generic functions
1884 - aliases: dictionary with command aliases {'alias': 'target, ...}
1887 # save the program name and the entire command line for later logging
1889 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1890 if len(sys.argv) >= 2:
1891 binary += " " + sys.argv[1]
1892 old_cmdline = " ".join(sys.argv[2:])
1896 binary = "<unknown program>"
1903 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1904 except errors.ParameterError, err:
1905 result, err_msg = FormatError(err)
1909 if func is None: # parse error
1912 if override is not None:
1913 for key, val in override.iteritems():
1914 setattr(options, key, val)
1916 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1917 stderr_logging=True, program=binary)
1920 logging.info("run with arguments '%s'", old_cmdline)
1922 logging.info("run with no arguments")
1925 result = func(options, args)
1926 except (errors.GenericError, luxi.ProtocolError,
1927 JobSubmittedException), err:
1928 result, err_msg = FormatError(err)
1929 logging.exception("Error during command processing")
1935 def ParseNicOption(optvalue):
1936 """Parses the value of the --net option(s).
1940 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1941 except (TypeError, ValueError), err:
1942 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1944 nics = [{}] * nic_max
1945 for nidx, ndict in optvalue:
1948 if not isinstance(ndict, dict):
1949 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1950 " got %s" % (nidx, ndict))
1952 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1959 def GenericInstanceCreate(mode, opts, args):
1960 """Add an instance to the cluster via either creation or import.
1962 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1963 @param opts: the command line options selected by the user
1965 @param args: should contain only one element, the new instance name
1967 @return: the desired exit code
1972 (pnode, snode) = SplitNodeOption(opts.node)
1977 hypervisor, hvparams = opts.hypervisor
1980 nics = ParseNicOption(opts.nics)
1984 elif mode == constants.INSTANCE_CREATE:
1985 # default of one nic, all auto
1991 if opts.disk_template == constants.DT_DISKLESS:
1992 if opts.disks or opts.sd_size is not None:
1993 raise errors.OpPrereqError("Diskless instance but disk"
1994 " information passed")
1997 if (not opts.disks and not opts.sd_size
1998 and mode == constants.INSTANCE_CREATE):
1999 raise errors.OpPrereqError("No disk information specified")
2000 if opts.disks and opts.sd_size is not None:
2001 raise errors.OpPrereqError("Please use either the '--disk' or"
2003 if opts.sd_size is not None:
2004 opts.disks = [(0, {"size": opts.sd_size})]
2008 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2009 except ValueError, err:
2010 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2011 disks = [{}] * disk_max
2014 for didx, ddict in opts.disks:
2016 if not isinstance(ddict, dict):
2017 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2018 raise errors.OpPrereqError(msg)
2019 elif "size" in ddict:
2020 if "adopt" in ddict:
2021 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2022 " (disk %d)" % didx)
2024 ddict["size"] = utils.ParseUnit(ddict["size"])
2025 except ValueError, err:
2026 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2028 elif "adopt" in ddict:
2029 if mode == constants.INSTANCE_IMPORT:
2030 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2034 raise errors.OpPrereqError("Missing size or adoption source for"
2038 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2039 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2041 if mode == constants.INSTANCE_CREATE:
2044 force_variant = opts.force_variant
2047 no_install = opts.no_install
2048 identify_defaults = False
2049 elif mode == constants.INSTANCE_IMPORT:
2052 force_variant = False
2053 src_node = opts.src_node
2054 src_path = opts.src_dir
2056 identify_defaults = opts.identify_defaults
2058 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2060 op = opcodes.OpInstanceCreate(instance_name=instance,
2062 disk_template=opts.disk_template,
2064 pnode=pnode, snode=snode,
2065 ip_check=opts.ip_check,
2066 name_check=opts.name_check,
2067 wait_for_sync=opts.wait_for_sync,
2068 file_storage_dir=opts.file_storage_dir,
2069 file_driver=opts.file_driver,
2070 iallocator=opts.iallocator,
2071 hypervisor=hypervisor,
2073 beparams=opts.beparams,
2074 osparams=opts.osparams,
2078 force_variant=force_variant,
2081 no_install=no_install,
2082 identify_defaults=identify_defaults)
2084 SubmitOrSend(op, opts)
2088 class _RunWhileClusterStoppedHelper:
2089 """Helper class for L{RunWhileClusterStopped} to simplify state management
2092 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2093 """Initializes this class.
2095 @type feedback_fn: callable
2096 @param feedback_fn: Feedback function
2097 @type cluster_name: string
2098 @param cluster_name: Cluster name
2099 @type master_node: string
2100 @param master_node Master node name
2101 @type online_nodes: list
2102 @param online_nodes: List of names of online nodes
2105 self.feedback_fn = feedback_fn
2106 self.cluster_name = cluster_name
2107 self.master_node = master_node
2108 self.online_nodes = online_nodes
2110 self.ssh = ssh.SshRunner(self.cluster_name)
2112 self.nonmaster_nodes = [name for name in online_nodes
2113 if name != master_node]
2115 assert self.master_node not in self.nonmaster_nodes
2117 def _RunCmd(self, node_name, cmd):
2118 """Runs a command on the local or a remote machine.
2120 @type node_name: string
2121 @param node_name: Machine name
2126 if node_name is None or node_name == self.master_node:
2127 # No need to use SSH
2128 result = utils.RunCmd(cmd)
2130 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2133 errmsg = ["Failed to run command %s" % result.cmd]
2135 errmsg.append("on node %s" % node_name)
2136 errmsg.append(": exitcode %s and error %s" %
2137 (result.exit_code, result.output))
2138 raise errors.OpExecError(" ".join(errmsg))
2140 def Call(self, fn, *args):
2141 """Call function while all daemons are stopped.
2144 @param fn: Function to be called
2147 # Pause watcher by acquiring an exclusive lock on watcher state file
2148 self.feedback_fn("Blocking watcher")
2149 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2151 # TODO: Currently, this just blocks. There's no timeout.
2152 # TODO: Should it be a shared lock?
2153 watcher_block.Exclusive(blocking=True)
2155 # Stop master daemons, so that no new jobs can come in and all running
2157 self.feedback_fn("Stopping master daemons")
2158 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2160 # Stop daemons on all nodes
2161 for node_name in self.online_nodes:
2162 self.feedback_fn("Stopping daemons on %s" % node_name)
2163 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2165 # All daemons are shut down now
2167 return fn(self, *args)
2168 except Exception, err:
2169 _, errmsg = FormatError(err)
2170 logging.exception("Caught exception")
2171 self.feedback_fn(errmsg)
2174 # Start cluster again, master node last
2175 for node_name in self.nonmaster_nodes + [self.master_node]:
2176 self.feedback_fn("Starting daemons on %s" % node_name)
2177 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2180 watcher_block.Close()
2183 def RunWhileClusterStopped(feedback_fn, fn, *args):
2184 """Calls a function while all cluster daemons are stopped.
2186 @type feedback_fn: callable
2187 @param feedback_fn: Feedback function
2189 @param fn: Function to be called when daemons are stopped
2192 feedback_fn("Gathering cluster information")
2194 # This ensures we're running on the master daemon
2197 (cluster_name, master_node) = \
2198 cl.QueryConfigValues(["cluster_name", "master_node"])
2200 online_nodes = GetOnlineNodes([], cl=cl)
2202 # Don't keep a reference to the client. The master daemon will go away.
2205 assert master_node in online_nodes
2207 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2208 online_nodes).Call(fn, *args)
2211 def GenerateTable(headers, fields, separator, data,
2212 numfields=None, unitfields=None,
2214 """Prints a table with headers and different fields.
2217 @param headers: dictionary mapping field names to headers for
2220 @param fields: the field names corresponding to each row in
2222 @param separator: the separator to be used; if this is None,
2223 the default 'smart' algorithm is used which computes optimal
2224 field width, otherwise just the separator is used between
2227 @param data: a list of lists, each sublist being one row to be output
2228 @type numfields: list
2229 @param numfields: a list with the fields that hold numeric
2230 values and thus should be right-aligned
2231 @type unitfields: list
2232 @param unitfields: a list with the fields that hold numeric
2233 values that should be formatted with the units field
2234 @type units: string or None
2235 @param units: the units we should use for formatting, or None for
2236 automatic choice (human-readable for non-separator usage, otherwise
2237 megabytes); this is a one-letter string
2246 if numfields is None:
2248 if unitfields is None:
2251 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2252 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2255 for field in fields:
2256 if headers and field not in headers:
2257 # TODO: handle better unknown fields (either revert to old
2258 # style of raising exception, or deal more intelligently with
2260 headers[field] = field
2261 if separator is not None:
2262 format_fields.append("%s")
2263 elif numfields.Matches(field):
2264 format_fields.append("%*s")
2266 format_fields.append("%-*s")
2268 if separator is None:
2269 mlens = [0 for name in fields]
2270 format_str = ' '.join(format_fields)
2272 format_str = separator.replace("%", "%%").join(format_fields)
2277 for idx, val in enumerate(row):
2278 if unitfields.Matches(fields[idx]):
2281 except (TypeError, ValueError):
2284 val = row[idx] = utils.FormatUnit(val, units)
2285 val = row[idx] = str(val)
2286 if separator is None:
2287 mlens[idx] = max(mlens[idx], len(val))
2292 for idx, name in enumerate(fields):
2294 if separator is None:
2295 mlens[idx] = max(mlens[idx], len(hdr))
2296 args.append(mlens[idx])
2298 result.append(format_str % tuple(args))
2300 if separator is None:
2301 assert len(mlens) == len(fields)
2303 if fields and not numfields.Matches(fields[-1]):
2309 line = ['-' for _ in fields]
2310 for idx in range(len(fields)):
2311 if separator is None:
2312 args.append(mlens[idx])
2313 args.append(line[idx])
2314 result.append(format_str % tuple(args))
2319 def _FormatBool(value):
2320 """Formats a boolean value as a string.
2328 #: Default formatting for query results; (callback, align right)
2329 _DEFAULT_FORMAT_QUERY = {
2330 constants.QFT_TEXT: (str, False),
2331 constants.QFT_BOOL: (_FormatBool, False),
2332 constants.QFT_NUMBER: (str, True),
2333 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2334 constants.QFT_OTHER: (str, False),
2335 constants.QFT_UNKNOWN: (str, False),
2339 def _GetColumnFormatter(fdef, override, unit):
2340 """Returns formatting function for a field.
2342 @type fdef: L{objects.QueryFieldDefinition}
2343 @type override: dict
2344 @param override: Dictionary for overriding field formatting functions,
2345 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2347 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2348 @rtype: tuple; (callable, bool)
2349 @return: Returns the function to format a value (takes one parameter) and a
2350 boolean for aligning the value on the right-hand side
2353 fmt = override.get(fdef.name, None)
2357 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2359 if fdef.kind == constants.QFT_UNIT:
2360 # Can't keep this information in the static dictionary
2361 return (lambda value: utils.FormatUnit(value, unit), True)
2363 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2367 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2370 class _QueryColumnFormatter:
2371 """Callable class for formatting fields of a query.
2374 def __init__(self, fn, status_fn):
2375 """Initializes this class.
2378 @param fn: Formatting function
2379 @type status_fn: callable
2380 @param status_fn: Function to report fields' status
2384 self._status_fn = status_fn
2386 def __call__(self, data):
2387 """Returns a field's string representation.
2390 (status, value) = data
2393 self._status_fn(status)
2395 if status == constants.RS_NORMAL:
2396 return self._fn(value)
2398 assert value is None, \
2399 "Found value %r for abnormal status %s" % (value, status)
2401 if status == constants.RS_UNKNOWN:
2404 if status == constants.RS_NODATA:
2407 if status == constants.RS_UNAVAIL:
2410 if status == constants.RS_OFFLINE:
2413 raise NotImplementedError("Unknown status %s" % status)
2416 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2418 """Formats data in L{objects.QueryResponse}.
2420 @type result: L{objects.QueryResponse}
2421 @param result: result of query operation
2423 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2424 see L{utils.text.FormatUnit}
2425 @type format_override: dict
2426 @param format_override: Dictionary for overriding field formatting functions,
2427 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2428 @type separator: string or None
2429 @param separator: String used to separate fields
2431 @param header: Whether to output header row
2440 if format_override is None:
2441 format_override = {}
2443 stats = dict.fromkeys(constants.RS_ALL, 0)
2445 def _RecordStatus(status):
2450 for fdef in result.fields:
2451 assert fdef.title and fdef.name
2452 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2453 columns.append(TableColumn(fdef.title,
2454 _QueryColumnFormatter(fn, _RecordStatus),
2457 table = FormatTable(result.data, columns, header, separator)
2459 # Collect statistics
2460 assert len(stats) == len(constants.RS_ALL)
2461 assert compat.all(count >= 0 for count in stats.values())
2463 # Determine overall status. If there was no data, unknown fields must be
2464 # detected via the field definitions.
2465 if (stats[constants.RS_UNKNOWN] or
2466 (not result.data and _GetUnknownFields(result.fields))):
2468 elif compat.any(count > 0 for key, count in stats.items()
2469 if key != constants.RS_NORMAL):
2470 status = QR_INCOMPLETE
2474 return (status, table)
2477 def _GetUnknownFields(fdefs):
2478 """Returns list of unknown fields included in C{fdefs}.
2480 @type fdefs: list of L{objects.QueryFieldDefinition}
2483 return [fdef for fdef in fdefs
2484 if fdef.kind == constants.QFT_UNKNOWN]
2487 def _WarnUnknownFields(fdefs):
2488 """Prints a warning to stderr if a query included unknown fields.
2490 @type fdefs: list of L{objects.QueryFieldDefinition}
2493 unknown = _GetUnknownFields(fdefs)
2495 ToStderr("Warning: Queried for unknown fields %s",
2496 utils.CommaJoin(fdef.name for fdef in unknown))
2502 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2503 format_override=None):
2504 """Generic implementation for listing all items of a resource.
2506 @param resource: One of L{constants.QR_OP_LUXI}
2507 @type fields: list of strings
2508 @param fields: List of fields to query for
2509 @type names: list of strings
2510 @param names: Names of items to query for
2511 @type unit: string or None
2512 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2513 None for automatic choice (human-readable for non-separator usage,
2514 otherwise megabytes); this is a one-letter string
2515 @type separator: string or None
2516 @param separator: String used to separate fields
2518 @param header: Whether to show header row
2519 @type format_override: dict
2520 @param format_override: Dictionary for overriding field formatting functions,
2521 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2530 response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2532 found_unknown = _WarnUnknownFields(response.fields)
2534 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2536 format_override=format_override)
2541 assert ((found_unknown and status == QR_UNKNOWN) or
2542 (not found_unknown and status != QR_UNKNOWN))
2544 if status == QR_UNKNOWN:
2545 return constants.EXIT_UNKNOWN_FIELD
2547 # TODO: Should the list command fail if not all data could be collected?
2548 return constants.EXIT_SUCCESS
2551 def GenericListFields(resource, fields, separator, header, cl=None):
2552 """Generic implementation for listing fields for a resource.
2554 @param resource: One of L{constants.QR_OP_LUXI}
2555 @type fields: list of strings
2556 @param fields: List of fields to query for
2557 @type separator: string or None
2558 @param separator: String used to separate fields
2560 @param header: Whether to show header row
2569 response = cl.QueryFields(resource, fields)
2571 found_unknown = _WarnUnknownFields(response.fields)
2574 TableColumn("Name", str, False),
2575 TableColumn("Title", str, False),
2576 # TODO: Add field description to master daemon
2579 rows = [[fdef.name, fdef.title] for fdef in response.fields]
2581 for line in FormatTable(rows, columns, header, separator):
2585 return constants.EXIT_UNKNOWN_FIELD
2587 return constants.EXIT_SUCCESS
2591 """Describes a column for L{FormatTable}.
2594 def __init__(self, title, fn, align_right):
2595 """Initializes this class.
2598 @param title: Column title
2600 @param fn: Formatting function
2601 @type align_right: bool
2602 @param align_right: Whether to align values on the right-hand side
2607 self.align_right = align_right
2610 def _GetColFormatString(width, align_right):
2611 """Returns the format string for a field.
2619 return "%%%s%ss" % (sign, width)
2622 def FormatTable(rows, columns, header, separator):
2623 """Formats data as a table.
2625 @type rows: list of lists
2626 @param rows: Row data, one list per row
2627 @type columns: list of L{TableColumn}
2628 @param columns: Column descriptions
2630 @param header: Whether to show header row
2631 @type separator: string or None
2632 @param separator: String used to separate columns
2636 data = [[col.title for col in columns]]
2637 colwidth = [len(col.title) for col in columns]
2640 colwidth = [0 for _ in columns]
2644 assert len(row) == len(columns)
2646 formatted = [col.format(value) for value, col in zip(row, columns)]
2648 if separator is None:
2649 # Update column widths
2650 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2651 # Modifying a list's items while iterating is fine
2652 colwidth[idx] = max(oldwidth, len(value))
2654 data.append(formatted)
2656 if separator is not None:
2657 # Return early if a separator is used
2658 return [separator.join(row) for row in data]
2660 if columns and not columns[-1].align_right:
2661 # Avoid unnecessary spaces at end of line
2664 # Build format string
2665 fmt = " ".join([_GetColFormatString(width, col.align_right)
2666 for col, width in zip(columns, colwidth)])
2668 return [fmt % tuple(row) for row in data]
2671 def FormatTimestamp(ts):
2672 """Formats a given timestamp.
2675 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2678 @return: a string with the formatted timestamp
2681 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2684 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2687 def ParseTimespec(value):
2688 """Parse a time specification.
2690 The following suffixed will be recognized:
2698 Without any suffix, the value will be taken to be in seconds.
2703 raise errors.OpPrereqError("Empty time specification passed")
2711 if value[-1] not in suffix_map:
2714 except (TypeError, ValueError):
2715 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2717 multiplier = suffix_map[value[-1]]
2719 if not value: # no data left after stripping the suffix
2720 raise errors.OpPrereqError("Invalid time specification (only"
2723 value = int(value) * multiplier
2724 except (TypeError, ValueError):
2725 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2729 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2730 filter_master=False):
2731 """Returns the names of online nodes.
2733 This function will also log a warning on stderr with the names of
2736 @param nodes: if not empty, use only this subset of nodes (minus the
2738 @param cl: if not None, luxi client to use
2739 @type nowarn: boolean
2740 @param nowarn: by default, this function will output a note with the
2741 offline nodes that are skipped; if this parameter is True the
2742 note is not displayed
2743 @type secondary_ips: boolean
2744 @param secondary_ips: if True, return the secondary IPs instead of the
2745 names, useful for doing network traffic over the replication interface
2747 @type filter_master: boolean
2748 @param filter_master: if True, do not return the master node in the list
2749 (useful in coordination with secondary_ips where we cannot check our
2750 node name against the list)
2762 master_node = cl.QueryConfigValues(["master_node"])[0]
2763 filter_fn = lambda x: x != master_node
2765 filter_fn = lambda _: True
2767 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2769 offline = [row[0] for row in result if row[1]]
2770 if offline and not nowarn:
2771 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2772 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2775 def _ToStream(stream, txt, *args):
2776 """Write a message to a stream, bypassing the logging system
2778 @type stream: file object
2779 @param stream: the file to which we should write
2781 @param txt: the message
2786 stream.write(txt % args)
2793 def ToStdout(txt, *args):
2794 """Write a message to stdout only, bypassing the logging system
2796 This is just a wrapper over _ToStream.
2799 @param txt: the message
2802 _ToStream(sys.stdout, txt, *args)
2805 def ToStderr(txt, *args):
2806 """Write a message to stderr only, bypassing the logging system
2808 This is just a wrapper over _ToStream.
2811 @param txt: the message
2814 _ToStream(sys.stderr, txt, *args)
2817 class JobExecutor(object):
2818 """Class which manages the submission and execution of multiple jobs.
2820 Note that instances of this class should not be reused between
2824 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2829 self.verbose = verbose
2832 self.feedback_fn = feedback_fn
2834 def QueueJob(self, name, *ops):
2835 """Record a job for later submit.
2838 @param name: a description of the job, will be used in WaitJobSet
2840 SetGenericOpcodeOpts(ops, self.opts)
2841 self.queue.append((name, ops))
2843 def SubmitPending(self, each=False):
2844 """Submit all pending jobs.
2849 for row in self.queue:
2850 # SubmitJob will remove the success status, but raise an exception if
2851 # the submission fails, so we'll notice that anyway.
2852 results.append([True, self.cl.SubmitJob(row[1])])
2854 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2855 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2857 self.jobs.append((idx, status, data, name))
2859 def _ChooseJob(self):
2860 """Choose a non-waiting/queued job to poll next.
2863 assert self.jobs, "_ChooseJob called with empty job list"
2865 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2868 for job_data, status in zip(self.jobs, result):
2869 if (isinstance(status, list) and status and
2870 status[0] in (constants.JOB_STATUS_QUEUED,
2871 constants.JOB_STATUS_WAITLOCK,
2872 constants.JOB_STATUS_CANCELING)):
2873 # job is still present and waiting
2875 # good candidate found (either running job or lost job)
2876 self.jobs.remove(job_data)
2880 return self.jobs.pop(0)
2882 def GetResults(self):
2883 """Wait for and return the results of all jobs.
2886 @return: list of tuples (success, job results), in the same order
2887 as the submitted jobs; if a job has failed, instead of the result
2888 there will be the error message
2892 self.SubmitPending()
2895 ok_jobs = [row[2] for row in self.jobs if row[1]]
2897 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2899 # first, remove any non-submitted jobs
2900 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2901 for idx, _, jid, name in failures:
2902 ToStderr("Failed to submit job for %s: %s", name, jid)
2903 results.append((idx, False, jid))
2906 (idx, _, jid, name) = self._ChooseJob()
2907 ToStdout("Waiting for job %s for %s...", jid, name)
2909 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2911 except errors.JobLost, err:
2912 _, job_result = FormatError(err)
2913 ToStderr("Job %s for %s has been archived, cannot check its result",
2916 except (errors.GenericError, luxi.ProtocolError), err:
2917 _, job_result = FormatError(err)
2919 # the error message will always be shown, verbose or not
2920 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2922 results.append((idx, success, job_result))
2924 # sort based on the index, then drop it
2926 results = [i[1:] for i in results]
2930 def WaitOrShow(self, wait):
2931 """Wait for job results or only print the job IDs.
2934 @param wait: whether to wait or not
2938 return self.GetResults()
2941 self.SubmitPending()
2942 for _, status, result, name in self.jobs:
2944 ToStdout("%s: %s", result, name)
2946 ToStderr("Failure for %s: %s", name, result)
2947 return [row[1:3] for row in self.jobs]
2950 def FormatParameterDict(buf, param_dict, actual, level=1):
2951 """Formats a parameter dictionary.
2953 @type buf: L{StringIO}
2954 @param buf: the buffer into which to write
2955 @type param_dict: dict
2956 @param param_dict: the own parameters
2958 @param actual: the current parameter set (including defaults)
2959 @param level: Level of indent
2962 indent = " " * level
2963 for key in sorted(actual):
2964 val = param_dict.get(key, "default (%s)" % actual[key])
2965 buf.write("%s- %s: %s\n" % (indent, key, val))