4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
39 from ganeti import ssh
40 from ganeti import compat
41 from ganeti import netutils
42 from ganeti import qlang
44 from optparse import (OptionParser, TitledHelpFormatter,
45 Option, OptionValueError)
49 # Command line options
61 "CLUSTER_DOMAIN_SECRET_OPT",
77 "FILESTORE_DRIVER_OPT",
86 "DEFAULT_IALLOCATOR_OPT",
87 "IDENTIFY_DEFAULTS_OPT",
89 "IGNORE_FAILURES_OPT",
91 "IGNORE_REMOVE_FAILURES_OPT",
92 "IGNORE_SECONDARIES_OPT",
96 "MAINTAIN_NODE_HEALTH_OPT",
101 "NEW_CLUSTER_CERT_OPT",
102 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103 "NEW_CONFD_HMAC_KEY_OPT",
108 "NODE_PLACEMENT_OPT",
112 "NODRBD_STORAGE_OPT",
118 "NOMODIFY_ETCHOSTS_OPT",
119 "NOMODIFY_SSH_SETUP_OPT",
125 "NOSSH_KEYCHECK_OPT",
134 "PREALLOC_WIPE_DISKS_OPT",
135 "PRIMARY_IP_VERSION_OPT",
140 "REMOVE_INSTANCE_OPT",
148 "SHUTDOWN_TIMEOUT_OPT",
163 # Generic functions for CLI programs
165 "GenericInstanceCreate",
171 "JobSubmittedException",
173 "RunWhileClusterStopped",
177 # Formatting functions
178 "ToStderr", "ToStdout",
181 "FormatParameterDict",
190 # command line options support infrastructure
191 "ARGS_MANY_INSTANCES",
210 "OPT_COMPL_INST_ADD_NODES",
211 "OPT_COMPL_MANY_NODES",
212 "OPT_COMPL_ONE_IALLOCATOR",
213 "OPT_COMPL_ONE_INSTANCE",
214 "OPT_COMPL_ONE_NODE",
215 "OPT_COMPL_ONE_NODEGROUP",
221 "COMMON_CREATE_OPTS",
227 #: Priorities (sorted)
229 ("low", constants.OP_PRIO_LOW),
230 ("normal", constants.OP_PRIO_NORMAL),
231 ("high", constants.OP_PRIO_HIGH),
234 #: Priority dictionary for easier lookup
235 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
236 # we migrate to Python 2.6
237 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
239 # Query result status for clients
242 QR_INCOMPLETE) = range(3)
246 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
251 return ("<%s min=%s max=%s>" %
252 (self.__class__.__name__, self.min, self.max))
255 class ArgSuggest(_Argument):
256 """Suggesting argument.
258 Value can be any of the ones passed to the constructor.
261 # pylint: disable-msg=W0622
262 def __init__(self, min=0, max=None, choices=None):
263 _Argument.__init__(self, min=min, max=max)
264 self.choices = choices
267 return ("<%s min=%s max=%s choices=%r>" %
268 (self.__class__.__name__, self.min, self.max, self.choices))
271 class ArgChoice(ArgSuggest):
274 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
275 but value must be one of the choices.
280 class ArgUnknown(_Argument):
281 """Unknown argument to program (e.g. determined at runtime).
286 class ArgInstance(_Argument):
287 """Instances argument.
292 class ArgNode(_Argument):
298 class ArgGroup(_Argument):
299 """Node group argument.
304 class ArgJobId(_Argument):
310 class ArgFile(_Argument):
311 """File path argument.
316 class ArgCommand(_Argument):
322 class ArgHost(_Argument):
328 class ArgOs(_Argument):
335 ARGS_MANY_INSTANCES = [ArgInstance()]
336 ARGS_MANY_NODES = [ArgNode()]
337 ARGS_MANY_GROUPS = [ArgGroup()]
338 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
339 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
340 ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
341 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
344 def _ExtractTagsObject(opts, args):
345 """Extract the tag type object.
347 Note that this function will modify its args parameter.
350 if not hasattr(opts, "tag_type"):
351 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
353 if kind == constants.TAG_CLUSTER:
355 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
357 raise errors.OpPrereqError("no arguments passed to the command")
361 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
365 def _ExtendTags(opts, args):
366 """Extend the args if a source file has been given.
368 This function will extend the tags with the contents of the file
369 passed in the 'tags_source' attribute of the opts parameter. A file
370 named '-' will be replaced by stdin.
373 fname = opts.tags_source
379 new_fh = open(fname, "r")
382 # we don't use the nice 'new_data = [line.strip() for line in fh]'
383 # because of python bug 1633941
385 line = new_fh.readline()
388 new_data.append(line.strip())
391 args.extend(new_data)
394 def ListTags(opts, args):
395 """List the tags on a given object.
397 This is a generic implementation that knows how to deal with all
398 three cases of tag objects (cluster, node, instance). The opts
399 argument is expected to contain a tag_type field denoting what
400 object type we work on.
403 kind, name = _ExtractTagsObject(opts, args)
405 result = cl.QueryTags(kind, name)
406 result = list(result)
412 def AddTags(opts, args):
413 """Add tags on a given object.
415 This is a generic implementation that knows how to deal with all
416 three cases of tag objects (cluster, node, instance). The opts
417 argument is expected to contain a tag_type field denoting what
418 object type we work on.
421 kind, name = _ExtractTagsObject(opts, args)
422 _ExtendTags(opts, args)
424 raise errors.OpPrereqError("No tags to be added")
425 op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
426 SubmitOpCode(op, opts=opts)
429 def RemoveTags(opts, args):
430 """Remove tags from a given object.
432 This is a generic implementation that knows how to deal with all
433 three cases of tag objects (cluster, node, instance). The opts
434 argument is expected to contain a tag_type field denoting what
435 object type we work on.
438 kind, name = _ExtractTagsObject(opts, args)
439 _ExtendTags(opts, args)
441 raise errors.OpPrereqError("No tags to be removed")
442 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
443 SubmitOpCode(op, opts=opts)
446 def check_unit(option, opt, value): # pylint: disable-msg=W0613
447 """OptParsers custom converter for units.
451 return utils.ParseUnit(value)
452 except errors.UnitParseError, err:
453 raise OptionValueError("option %s: %s" % (opt, err))
456 def _SplitKeyVal(opt, data):
457 """Convert a KeyVal string into a dict.
459 This function will convert a key=val[,...] string into a dict. Empty
460 values will be converted specially: keys which have the prefix 'no_'
461 will have the value=False and the prefix stripped, the others will
465 @param opt: a string holding the option name for which we process the
466 data, used in building error messages
468 @param data: a string of the format key=val,key=val,...
470 @return: {key=val, key=val}
471 @raises errors.ParameterError: if there are duplicate keys
476 for elem in utils.UnescapeAndSplit(data, sep=","):
478 key, val = elem.split("=", 1)
480 if elem.startswith(NO_PREFIX):
481 key, val = elem[len(NO_PREFIX):], False
482 elif elem.startswith(UN_PREFIX):
483 key, val = elem[len(UN_PREFIX):], None
485 key, val = elem, True
487 raise errors.ParameterError("Duplicate key '%s' in option %s" %
493 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
494 """Custom parser for ident:key=val,key=val options.
496 This will store the parsed values as a tuple (ident, {key: val}). As such,
497 multiple uses of this option via action=append is possible.
501 ident, rest = value, ''
503 ident, rest = value.split(":", 1)
505 if ident.startswith(NO_PREFIX):
507 msg = "Cannot pass options when removing parameter groups: %s" % value
508 raise errors.ParameterError(msg)
509 retval = (ident[len(NO_PREFIX):], False)
510 elif ident.startswith(UN_PREFIX):
512 msg = "Cannot pass options when removing parameter groups: %s" % value
513 raise errors.ParameterError(msg)
514 retval = (ident[len(UN_PREFIX):], None)
516 kv_dict = _SplitKeyVal(opt, rest)
517 retval = (ident, kv_dict)
521 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
522 """Custom parser class for key=val,key=val options.
524 This will store the parsed values as a dict {key: val}.
527 return _SplitKeyVal(opt, value)
530 def check_bool(option, opt, value): # pylint: disable-msg=W0613
531 """Custom parser for yes/no options.
533 This will store the parsed value as either True or False.
536 value = value.lower()
537 if value == constants.VALUE_FALSE or value == "no":
539 elif value == constants.VALUE_TRUE or value == "yes":
542 raise errors.ParameterError("Invalid boolean value '%s'" % value)
545 # completion_suggestion is normally a list. Using numeric values not evaluating
546 # to False for dynamic completion.
547 (OPT_COMPL_MANY_NODES,
549 OPT_COMPL_ONE_INSTANCE,
551 OPT_COMPL_ONE_IALLOCATOR,
552 OPT_COMPL_INST_ADD_NODES,
553 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
555 OPT_COMPL_ALL = frozenset([
556 OPT_COMPL_MANY_NODES,
558 OPT_COMPL_ONE_INSTANCE,
560 OPT_COMPL_ONE_IALLOCATOR,
561 OPT_COMPL_INST_ADD_NODES,
562 OPT_COMPL_ONE_NODEGROUP,
566 class CliOption(Option):
567 """Custom option class for optparse.
570 ATTRS = Option.ATTRS + [
571 "completion_suggest",
573 TYPES = Option.TYPES + (
579 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
580 TYPE_CHECKER["identkeyval"] = check_ident_key_val
581 TYPE_CHECKER["keyval"] = check_key_val
582 TYPE_CHECKER["unit"] = check_unit
583 TYPE_CHECKER["bool"] = check_bool
586 # optparse.py sets make_option, so we do it for our own option class, too
587 cli_option = CliOption
592 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
593 help="Increase debugging level")
595 NOHDR_OPT = cli_option("--no-headers", default=False,
596 action="store_true", dest="no_headers",
597 help="Don't display column headers")
599 SEP_OPT = cli_option("--separator", default=None,
600 action="store", dest="separator",
601 help=("Separator between output fields"
602 " (defaults to one space)"))
604 USEUNITS_OPT = cli_option("--units", default=None,
605 dest="units", choices=('h', 'm', 'g', 't'),
606 help="Specify units for output (one of h/m/g/t)")
608 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
609 type="string", metavar="FIELDS",
610 help="Comma separated list of output fields")
612 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
613 default=False, help="Force the operation")
615 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
616 default=False, help="Do not require confirmation")
618 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
619 action="store_true", default=False,
620 help=("Ignore offline nodes and do as much"
623 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
624 default=None, help="File with tag names")
626 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
627 default=False, action="store_true",
628 help=("Submit the job and return the job ID, but"
629 " don't wait for the job to finish"))
631 SYNC_OPT = cli_option("--sync", dest="do_locking",
632 default=False, action="store_true",
633 help=("Grab locks while doing the queries"
634 " in order to ensure more consistent results"))
636 DRY_RUN_OPT = cli_option("--dry-run", default=False,
638 help=("Do not execute the operation, just run the"
639 " check steps and verify it it could be"
642 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
644 help="Increase the verbosity of the operation")
646 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
647 action="store_true", dest="simulate_errors",
648 help="Debugging option that makes the operation"
649 " treat most runtime checks as failed")
651 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
652 default=True, action="store_false",
653 help="Don't wait for sync (DANGEROUS!)")
655 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
656 help="Custom disk setup (diskless, file,"
658 default=None, metavar="TEMPL",
659 choices=list(constants.DISK_TEMPLATES))
661 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
662 help="Do not create any network cards for"
665 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
666 help="Relative path under default cluster-wide"
667 " file storage dir to store file-based disks",
668 default=None, metavar="<DIR>")
670 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
671 help="Driver to use for image files",
672 default="loop", metavar="<DRIVER>",
673 choices=list(constants.FILE_DRIVER))
675 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
676 help="Select nodes for the instance automatically"
677 " using the <NAME> iallocator plugin",
678 default=None, type="string",
679 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
681 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
683 help="Set the default instance allocator plugin",
684 default=None, type="string",
685 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
687 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
689 completion_suggest=OPT_COMPL_ONE_OS)
691 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
692 type="keyval", default={},
693 help="OS parameters")
695 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
696 action="store_true", default=False,
697 help="Force an unknown variant")
699 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
700 action="store_true", default=False,
701 help="Do not install the OS (will"
704 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
705 type="keyval", default={},
706 help="Backend parameters")
708 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
709 default={}, dest="hvparams",
710 help="Hypervisor parameters")
712 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
713 help="Hypervisor and hypervisor options, in the"
714 " format hypervisor:option=value,option=value,...",
715 default=None, type="identkeyval")
717 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
718 help="Hypervisor and hypervisor options, in the"
719 " format hypervisor:option=value,option=value,...",
720 default=[], action="append", type="identkeyval")
722 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
723 action="store_false",
724 help="Don't check that the instance's IP"
727 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
728 default=True, action="store_false",
729 help="Don't check that the instance's name"
732 NET_OPT = cli_option("--net",
733 help="NIC parameters", default=[],
734 dest="nics", action="append", type="identkeyval")
736 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
737 dest="disks", action="append", type="identkeyval")
739 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
740 help="Comma-separated list of disks"
741 " indices to act on (e.g. 0,2) (optional,"
742 " defaults to all disks)")
744 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
745 help="Enforces a single-disk configuration using the"
746 " given disk size, in MiB unless a suffix is used",
747 default=None, type="unit", metavar="<size>")
749 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
750 dest="ignore_consistency",
751 action="store_true", default=False,
752 help="Ignore the consistency of the disks on"
755 NONLIVE_OPT = cli_option("--non-live", dest="live",
756 default=True, action="store_false",
757 help="Do a non-live migration (this usually means"
758 " freeze the instance, save the state, transfer and"
759 " only then resume running on the secondary node)")
761 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
763 choices=list(constants.HT_MIGRATION_MODES),
764 help="Override default migration mode (choose"
765 " either live or non-live")
767 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
768 help="Target node and optional secondary node",
769 metavar="<pnode>[:<snode>]",
770 completion_suggest=OPT_COMPL_INST_ADD_NODES)
772 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
773 action="append", metavar="<node>",
774 help="Use only this node (can be used multiple"
775 " times, if not given defaults to all nodes)",
776 completion_suggest=OPT_COMPL_ONE_NODE)
778 NODEGROUP_OPT = cli_option("-g", "--node-group",
780 help="Node group (name or uuid)",
781 metavar="<nodegroup>",
782 default=None, type="string",
783 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
785 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
787 completion_suggest=OPT_COMPL_ONE_NODE)
789 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
790 action="store_false",
791 help="Don't start the instance after creation")
793 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
794 action="store_true", default=False,
795 help="Show command instead of executing it")
797 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
798 default=False, action="store_true",
799 help="Instead of performing the migration, try to"
800 " recover from a failed cleanup. This is safe"
801 " to run even if the instance is healthy, but it"
802 " will create extra replication traffic and "
803 " disrupt briefly the replication (like during the"
806 STATIC_OPT = cli_option("-s", "--static", dest="static",
807 action="store_true", default=False,
808 help="Only show configuration data, not runtime data")
810 ALL_OPT = cli_option("--all", dest="show_all",
811 default=False, action="store_true",
812 help="Show info on all instances on the cluster."
813 " This can take a long time to run, use wisely")
815 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
816 action="store_true", default=False,
817 help="Interactive OS reinstall, lists available"
818 " OS templates for selection")
820 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
821 action="store_true", default=False,
822 help="Remove the instance from the cluster"
823 " configuration even if there are failures"
824 " during the removal process")
826 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
827 dest="ignore_remove_failures",
828 action="store_true", default=False,
829 help="Remove the instance from the"
830 " cluster configuration even if there"
831 " are failures during the removal"
834 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
835 action="store_true", default=False,
836 help="Remove the instance from the cluster")
838 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
839 help="Specifies the new secondary node",
840 metavar="NODE", default=None,
841 completion_suggest=OPT_COMPL_ONE_NODE)
843 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
844 default=False, action="store_true",
845 help="Replace the disk(s) on the primary"
846 " node (only for the drbd template)")
848 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
849 default=False, action="store_true",
850 help="Replace the disk(s) on the secondary"
851 " node (only for the drbd template)")
853 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
854 default=False, action="store_true",
855 help="Lock all nodes and auto-promote as needed"
858 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
859 default=False, action="store_true",
860 help="Automatically replace faulty disks"
861 " (only for the drbd template)")
863 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
864 default=False, action="store_true",
865 help="Ignore current recorded size"
866 " (useful for forcing activation when"
867 " the recorded size is wrong)")
869 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
871 completion_suggest=OPT_COMPL_ONE_NODE)
873 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
876 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
877 help="Specify the secondary ip for the node",
878 metavar="ADDRESS", default=None)
880 READD_OPT = cli_option("--readd", dest="readd",
881 default=False, action="store_true",
882 help="Readd old node after replacing it")
884 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
885 default=True, action="store_false",
886 help="Disable SSH key fingerprint checking")
889 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
890 type="bool", default=None, metavar=_YORNO,
891 help="Set the master_candidate flag on the node")
893 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
894 type="bool", default=None,
895 help="Set the offline flag on the node")
897 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
898 type="bool", default=None,
899 help="Set the drained flag on the node")
901 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
902 type="bool", default=None, metavar=_YORNO,
903 help="Set the master_capable flag on the node")
905 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
906 type="bool", default=None, metavar=_YORNO,
907 help="Set the vm_capable flag on the node")
909 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
910 type="bool", default=None, metavar=_YORNO,
911 help="Set the allocatable flag on a volume")
913 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
914 help="Disable support for lvm based instances"
916 action="store_false", default=True)
918 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
919 dest="enabled_hypervisors",
920 help="Comma-separated list of hypervisors",
921 type="string", default=None)
923 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
924 type="keyval", default={},
925 help="NIC parameters")
927 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
928 dest="candidate_pool_size", type="int",
929 help="Set the candidate pool size")
931 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
932 help="Enables LVM and specifies the volume group"
933 " name (cluster-wide) for disk allocation [xenvg]",
934 metavar="VG", default=None)
936 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
937 help="Destroy cluster", action="store_true")
939 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
940 help="Skip node agreement check (dangerous)",
941 action="store_true", default=False)
943 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
944 help="Specify the mac prefix for the instance IP"
945 " addresses, in the format XX:XX:XX",
949 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
950 help="Specify the node interface (cluster-wide)"
951 " on which the master IP address will be added"
952 " (cluster init default: %s)" %
953 constants.DEFAULT_BRIDGE,
957 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
958 help="Specify the default directory (cluster-"
959 "wide) for storing the file-based disks [%s]" %
960 constants.DEFAULT_FILE_STORAGE_DIR,
962 default=constants.DEFAULT_FILE_STORAGE_DIR)
964 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
965 help="Don't modify /etc/hosts",
966 action="store_false", default=True)
968 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
969 help="Don't initialize SSH keys",
970 action="store_false", default=True)
972 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
973 help="Enable parseable error messages",
974 action="store_true", default=False)
976 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
977 help="Skip N+1 memory redundancy tests",
978 action="store_true", default=False)
980 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
981 help="Type of reboot: soft/hard/full",
982 default=constants.INSTANCE_REBOOT_HARD,
984 choices=list(constants.REBOOT_TYPES))
986 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
987 dest="ignore_secondaries",
988 default=False, action="store_true",
989 help="Ignore errors from secondaries")
991 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
992 action="store_false", default=True,
993 help="Don't shutdown the instance (unsafe)")
995 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
996 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
997 help="Maximum time to wait")
999 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1000 dest="shutdown_timeout", type="int",
1001 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1002 help="Maximum time to wait for instance shutdown")
1004 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1006 help=("Number of seconds between repetions of the"
1009 EARLY_RELEASE_OPT = cli_option("--early-release",
1010 dest="early_release", default=False,
1011 action="store_true",
1012 help="Release the locks on the secondary"
1015 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1016 dest="new_cluster_cert",
1017 default=False, action="store_true",
1018 help="Generate a new cluster certificate")
1020 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1022 help="File containing new RAPI certificate")
1024 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1025 default=None, action="store_true",
1026 help=("Generate a new self-signed RAPI"
1029 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1030 dest="new_confd_hmac_key",
1031 default=False, action="store_true",
1032 help=("Create a new HMAC key for %s" %
1035 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1036 dest="cluster_domain_secret",
1038 help=("Load new new cluster domain"
1039 " secret from file"))
1041 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1042 dest="new_cluster_domain_secret",
1043 default=False, action="store_true",
1044 help=("Create a new cluster domain"
1047 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1048 dest="use_replication_network",
1049 help="Whether to use the replication network"
1050 " for talking to the nodes",
1051 action="store_true", default=False)
1053 MAINTAIN_NODE_HEALTH_OPT = \
1054 cli_option("--maintain-node-health", dest="maintain_node_health",
1055 metavar=_YORNO, default=None, type="bool",
1056 help="Configure the cluster to automatically maintain node"
1057 " health, by shutting down unknown instances, shutting down"
1058 " unknown DRBD devices, etc.")
1060 IDENTIFY_DEFAULTS_OPT = \
1061 cli_option("--identify-defaults", dest="identify_defaults",
1062 default=False, action="store_true",
1063 help="Identify which saved instance parameters are equal to"
1064 " the current cluster defaults and set them as such, instead"
1065 " of marking them as overridden")
1067 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1068 action="store", dest="uid_pool",
1069 help=("A list of user-ids or user-id"
1070 " ranges separated by commas"))
1072 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1073 action="store", dest="add_uids",
1074 help=("A list of user-ids or user-id"
1075 " ranges separated by commas, to be"
1076 " added to the user-id pool"))
1078 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1079 action="store", dest="remove_uids",
1080 help=("A list of user-ids or user-id"
1081 " ranges separated by commas, to be"
1082 " removed from the user-id pool"))
1084 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1085 action="store", dest="reserved_lvs",
1086 help=("A comma-separated list of reserved"
1087 " logical volumes names, that will be"
1088 " ignored by cluster verify"))
1090 ROMAN_OPT = cli_option("--roman",
1091 dest="roman_integers", default=False,
1092 action="store_true",
1093 help="Use roman numbers for positive integers")
1095 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1096 action="store", default=None,
1097 help="Specifies usermode helper for DRBD")
1099 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1100 action="store_false", default=True,
1101 help="Disable support for DRBD")
1103 PRIMARY_IP_VERSION_OPT = \
1104 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1105 action="store", dest="primary_ip_version",
1106 metavar="%d|%d" % (constants.IP4_VERSION,
1107 constants.IP6_VERSION),
1108 help="Cluster-wide IP version for primary IP")
1110 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1111 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1112 choices=_PRIONAME_TO_VALUE.keys(),
1113 help="Priority for opcode processing")
1115 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1116 type="bool", default=None, metavar=_YORNO,
1117 help="Sets the hidden flag on the OS")
1119 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1120 type="bool", default=None, metavar=_YORNO,
1121 help="Sets the blacklisted flag on the OS")
1123 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1124 type="bool", metavar=_YORNO,
1125 dest="prealloc_wipe_disks",
1126 help=("Wipe disks prior to instance"
1129 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1130 type="keyval", default=None,
1131 help="Node parameters")
1133 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1134 action="store", metavar="POLICY", default=None,
1135 help="Allocation policy for the node group")
1137 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1138 type="bool", metavar=_YORNO,
1139 dest="node_powered",
1140 help="Specify if the SoR for node is powered")
1143 #: Options provided by all commands
1144 COMMON_OPTS = [DEBUG_OPT]
1146 # common options for creating instances. add and import then add their own
1148 COMMON_CREATE_OPTS = [
1153 FILESTORE_DRIVER_OPT,
1170 def _ParseArgs(argv, commands, aliases):
1171 """Parser for the command line arguments.
1173 This function parses the arguments and returns the function which
1174 must be executed together with its (modified) arguments.
1176 @param argv: the command line
1177 @param commands: dictionary with special contents, see the design
1178 doc for cmdline handling
1179 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1183 binary = "<command>"
1185 binary = argv[0].split("/")[-1]
1187 if len(argv) > 1 and argv[1] == "--version":
1188 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1189 constants.RELEASE_VERSION)
1190 # Quit right away. That way we don't have to care about this special
1191 # argument. optparse.py does it the same.
1194 if len(argv) < 2 or not (argv[1] in commands or
1195 argv[1] in aliases):
1196 # let's do a nice thing
1197 sortedcmds = commands.keys()
1200 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1201 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1204 # compute the max line length for cmd + usage
1205 mlen = max([len(" %s" % cmd) for cmd in commands])
1206 mlen = min(60, mlen) # should not get here...
1208 # and format a nice command list
1209 ToStdout("Commands:")
1210 for cmd in sortedcmds:
1211 cmdstr = " %s" % (cmd,)
1212 help_text = commands[cmd][4]
1213 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1214 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1215 for line in help_lines:
1216 ToStdout("%-*s %s", mlen, "", line)
1220 return None, None, None
1222 # get command, unalias it, and look it up in commands
1226 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1229 if aliases[cmd] not in commands:
1230 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1231 " command '%s'" % (cmd, aliases[cmd]))
1235 func, args_def, parser_opts, usage, description = commands[cmd]
1236 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1237 description=description,
1238 formatter=TitledHelpFormatter(),
1239 usage="%%prog %s %s" % (cmd, usage))
1240 parser.disable_interspersed_args()
1241 options, args = parser.parse_args()
1243 if not _CheckArguments(cmd, args_def, args):
1244 return None, None, None
1246 return func, options, args
1249 def _CheckArguments(cmd, args_def, args):
1250 """Verifies the arguments using the argument definition.
1254 1. Abort with error if values specified by user but none expected.
1256 1. For each argument in definition
1258 1. Keep running count of minimum number of values (min_count)
1259 1. Keep running count of maximum number of values (max_count)
1260 1. If it has an unlimited number of values
1262 1. Abort with error if it's not the last argument in the definition
1264 1. If last argument has limited number of values
1266 1. Abort with error if number of values doesn't match or is too large
1268 1. Abort with error if user didn't pass enough values (min_count)
1271 if args and not args_def:
1272 ToStderr("Error: Command %s expects no arguments", cmd)
1279 last_idx = len(args_def) - 1
1281 for idx, arg in enumerate(args_def):
1282 if min_count is None:
1284 elif arg.min is not None:
1285 min_count += arg.min
1287 if max_count is None:
1289 elif arg.max is not None:
1290 max_count += arg.max
1293 check_max = (arg.max is not None)
1295 elif arg.max is None:
1296 raise errors.ProgrammerError("Only the last argument can have max=None")
1299 # Command with exact number of arguments
1300 if (min_count is not None and max_count is not None and
1301 min_count == max_count and len(args) != min_count):
1302 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1305 # Command with limited number of arguments
1306 if max_count is not None and len(args) > max_count:
1307 ToStderr("Error: Command %s expects only %d argument(s)",
1311 # Command with some required arguments
1312 if min_count is not None and len(args) < min_count:
1313 ToStderr("Error: Command %s expects at least %d argument(s)",
1320 def SplitNodeOption(value):
1321 """Splits the value of a --node option.
1324 if value and ':' in value:
1325 return value.split(':', 1)
1327 return (value, None)
1330 def CalculateOSNames(os_name, os_variants):
1331 """Calculates all the names an OS can be called, according to its variants.
1333 @type os_name: string
1334 @param os_name: base name of the os
1335 @type os_variants: list or None
1336 @param os_variants: list of supported variants
1338 @return: list of valid names
1342 return ['%s+%s' % (os_name, v) for v in os_variants]
1347 def ParseFields(selected, default):
1348 """Parses the values of "--field"-like options.
1350 @type selected: string or None
1351 @param selected: User-selected options
1353 @param default: Default fields
1356 if selected is None:
1359 if selected.startswith("+"):
1360 return default + selected[1:].split(",")
1362 return selected.split(",")
1365 UsesRPC = rpc.RunWithRPC
1368 def AskUser(text, choices=None):
1369 """Ask the user a question.
1371 @param text: the question to ask
1373 @param choices: list with elements tuples (input_char, return_value,
1374 description); if not given, it will default to: [('y', True,
1375 'Perform the operation'), ('n', False, 'Do no do the operation')];
1376 note that the '?' char is reserved for help
1378 @return: one of the return values from the choices list; if input is
1379 not possible (i.e. not running with a tty, we return the last
1384 choices = [('y', True, 'Perform the operation'),
1385 ('n', False, 'Do not perform the operation')]
1386 if not choices or not isinstance(choices, list):
1387 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1388 for entry in choices:
1389 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1390 raise errors.ProgrammerError("Invalid choices element to AskUser")
1392 answer = choices[-1][1]
1394 for line in text.splitlines():
1395 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1396 text = "\n".join(new_text)
1398 f = file("/dev/tty", "a+")
1402 chars = [entry[0] for entry in choices]
1403 chars[-1] = "[%s]" % chars[-1]
1405 maps = dict([(entry[0], entry[1]) for entry in choices])
1409 f.write("/".join(chars))
1411 line = f.readline(2).strip().lower()
1416 for entry in choices:
1417 f.write(" %s - %s\n" % (entry[0], entry[2]))
1425 class JobSubmittedException(Exception):
1426 """Job was submitted, client should exit.
1428 This exception has one argument, the ID of the job that was
1429 submitted. The handler should print this ID.
1431 This is not an error, just a structured way to exit from clients.
1436 def SendJob(ops, cl=None):
1437 """Function to submit an opcode without waiting for the results.
1440 @param ops: list of opcodes
1441 @type cl: luxi.Client
1442 @param cl: the luxi client to use for communicating with the master;
1443 if None, a new client will be created
1449 job_id = cl.SubmitJob(ops)
1454 def GenericPollJob(job_id, cbs, report_cbs):
1455 """Generic job-polling function.
1457 @type job_id: number
1458 @param job_id: Job ID
1459 @type cbs: Instance of L{JobPollCbBase}
1460 @param cbs: Data callbacks
1461 @type report_cbs: Instance of L{JobPollReportCbBase}
1462 @param report_cbs: Reporting callbacks
1465 prev_job_info = None
1466 prev_logmsg_serial = None
1471 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1474 # job not found, go away!
1475 raise errors.JobLost("Job with id %s lost" % job_id)
1477 if result == constants.JOB_NOTCHANGED:
1478 report_cbs.ReportNotChanged(job_id, status)
1483 # Split result, a tuple of (field values, log entries)
1484 (job_info, log_entries) = result
1485 (status, ) = job_info
1488 for log_entry in log_entries:
1489 (serial, timestamp, log_type, message) = log_entry
1490 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1492 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1494 # TODO: Handle canceled and archived jobs
1495 elif status in (constants.JOB_STATUS_SUCCESS,
1496 constants.JOB_STATUS_ERROR,
1497 constants.JOB_STATUS_CANCELING,
1498 constants.JOB_STATUS_CANCELED):
1501 prev_job_info = job_info
1503 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1505 raise errors.JobLost("Job with id %s lost" % job_id)
1507 status, opstatus, result = jobs[0]
1509 if status == constants.JOB_STATUS_SUCCESS:
1512 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1513 raise errors.OpExecError("Job was canceled")
1516 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1517 if status == constants.OP_STATUS_SUCCESS:
1519 elif status == constants.OP_STATUS_ERROR:
1520 errors.MaybeRaise(msg)
1523 raise errors.OpExecError("partial failure (opcode %d): %s" %
1526 raise errors.OpExecError(str(msg))
1528 # default failure mode
1529 raise errors.OpExecError(result)
1532 class JobPollCbBase:
1533 """Base class for L{GenericPollJob} callbacks.
1537 """Initializes this class.
1541 def WaitForJobChangeOnce(self, job_id, fields,
1542 prev_job_info, prev_log_serial):
1543 """Waits for changes on a job.
1546 raise NotImplementedError()
1548 def QueryJobs(self, job_ids, fields):
1549 """Returns the selected fields for the selected job IDs.
1551 @type job_ids: list of numbers
1552 @param job_ids: Job IDs
1553 @type fields: list of strings
1554 @param fields: Fields
1557 raise NotImplementedError()
1560 class JobPollReportCbBase:
1561 """Base class for L{GenericPollJob} reporting callbacks.
1565 """Initializes this class.
1569 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1570 """Handles a log message.
1573 raise NotImplementedError()
1575 def ReportNotChanged(self, job_id, status):
1576 """Called for if a job hasn't changed in a while.
1578 @type job_id: number
1579 @param job_id: Job ID
1580 @type status: string or None
1581 @param status: Job status if available
1584 raise NotImplementedError()
1587 class _LuxiJobPollCb(JobPollCbBase):
1588 def __init__(self, cl):
1589 """Initializes this class.
1592 JobPollCbBase.__init__(self)
1595 def WaitForJobChangeOnce(self, job_id, fields,
1596 prev_job_info, prev_log_serial):
1597 """Waits for changes on a job.
1600 return self.cl.WaitForJobChangeOnce(job_id, fields,
1601 prev_job_info, prev_log_serial)
1603 def QueryJobs(self, job_ids, fields):
1604 """Returns the selected fields for the selected job IDs.
1607 return self.cl.QueryJobs(job_ids, fields)
1610 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1611 def __init__(self, feedback_fn):
1612 """Initializes this class.
1615 JobPollReportCbBase.__init__(self)
1617 self.feedback_fn = feedback_fn
1619 assert callable(feedback_fn)
1621 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1622 """Handles a log message.
1625 self.feedback_fn((timestamp, log_type, log_msg))
1627 def ReportNotChanged(self, job_id, status):
1628 """Called if a job hasn't changed in a while.
1634 class StdioJobPollReportCb(JobPollReportCbBase):
1636 """Initializes this class.
1639 JobPollReportCbBase.__init__(self)
1641 self.notified_queued = False
1642 self.notified_waitlock = False
1644 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1645 """Handles a log message.
1648 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1649 FormatLogMessage(log_type, log_msg))
1651 def ReportNotChanged(self, job_id, status):
1652 """Called if a job hasn't changed in a while.
1658 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1659 ToStderr("Job %s is waiting in queue", job_id)
1660 self.notified_queued = True
1662 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1663 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1664 self.notified_waitlock = True
1667 def FormatLogMessage(log_type, log_msg):
1668 """Formats a job message according to its type.
1671 if log_type != constants.ELOG_MESSAGE:
1672 log_msg = str(log_msg)
1674 return utils.SafeEncode(log_msg)
1677 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1678 """Function to poll for the result of a job.
1680 @type job_id: job identified
1681 @param job_id: the job to poll for results
1682 @type cl: luxi.Client
1683 @param cl: the luxi client to use for communicating with the master;
1684 if None, a new client will be created
1690 if reporter is None:
1692 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1694 reporter = StdioJobPollReportCb()
1696 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1698 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1701 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1702 """Legacy function to submit an opcode.
1704 This is just a simple wrapper over the construction of the processor
1705 instance. It should be extended to better handle feedback and
1706 interaction functions.
1712 SetGenericOpcodeOpts([op], opts)
1714 job_id = SendJob([op], cl=cl)
1716 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1719 return op_results[0]
1722 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1723 """Wrapper around SubmitOpCode or SendJob.
1725 This function will decide, based on the 'opts' parameter, whether to
1726 submit and wait for the result of the opcode (and return it), or
1727 whether to just send the job and print its identifier. It is used in
1728 order to simplify the implementation of the '--submit' option.
1730 It will also process the opcodes if we're sending the via SendJob
1731 (otherwise SubmitOpCode does it).
1734 if opts and opts.submit_only:
1736 SetGenericOpcodeOpts(job, opts)
1737 job_id = SendJob(job, cl=cl)
1738 raise JobSubmittedException(job_id)
1740 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1743 def SetGenericOpcodeOpts(opcode_list, options):
1744 """Processor for generic options.
1746 This function updates the given opcodes based on generic command
1747 line options (like debug, dry-run, etc.).
1749 @param opcode_list: list of opcodes
1750 @param options: command line options or None
1751 @return: None (in-place modification)
1756 for op in opcode_list:
1757 op.debug_level = options.debug
1758 if hasattr(options, "dry_run"):
1759 op.dry_run = options.dry_run
1760 if getattr(options, "priority", None) is not None:
1761 op.priority = _PRIONAME_TO_VALUE[options.priority]
1765 # TODO: Cache object?
1767 client = luxi.Client()
1768 except luxi.NoMasterError:
1769 ss = ssconf.SimpleStore()
1771 # Try to read ssconf file
1774 except errors.ConfigurationError:
1775 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1776 " not part of a cluster")
1778 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1779 if master != myself:
1780 raise errors.OpPrereqError("This is not the master node, please connect"
1781 " to node '%s' and rerun the command" %
1787 def FormatError(err):
1788 """Return a formatted error message for a given error.
1790 This function takes an exception instance and returns a tuple
1791 consisting of two values: first, the recommended exit code, and
1792 second, a string describing the error message (not
1793 newline-terminated).
1799 if isinstance(err, errors.ConfigurationError):
1800 txt = "Corrupt configuration file: %s" % msg
1802 obuf.write(txt + "\n")
1803 obuf.write("Aborting.")
1805 elif isinstance(err, errors.HooksAbort):
1806 obuf.write("Failure: hooks execution failed:\n")
1807 for node, script, out in err.args[0]:
1809 obuf.write(" node: %s, script: %s, output: %s\n" %
1810 (node, script, out))
1812 obuf.write(" node: %s, script: %s (no output)\n" %
1814 elif isinstance(err, errors.HooksFailure):
1815 obuf.write("Failure: hooks general failure: %s" % msg)
1816 elif isinstance(err, errors.ResolverError):
1817 this_host = netutils.Hostname.GetSysName()
1818 if err.args[0] == this_host:
1819 msg = "Failure: can't resolve my own hostname ('%s')"
1821 msg = "Failure: can't resolve hostname '%s'"
1822 obuf.write(msg % err.args[0])
1823 elif isinstance(err, errors.OpPrereqError):
1824 if len(err.args) == 2:
1825 obuf.write("Failure: prerequisites not met for this"
1826 " operation:\nerror type: %s, error details:\n%s" %
1827 (err.args[1], err.args[0]))
1829 obuf.write("Failure: prerequisites not met for this"
1830 " operation:\n%s" % msg)
1831 elif isinstance(err, errors.OpExecError):
1832 obuf.write("Failure: command execution error:\n%s" % msg)
1833 elif isinstance(err, errors.TagError):
1834 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1835 elif isinstance(err, errors.JobQueueDrainError):
1836 obuf.write("Failure: the job queue is marked for drain and doesn't"
1837 " accept new requests\n")
1838 elif isinstance(err, errors.JobQueueFull):
1839 obuf.write("Failure: the job queue is full and doesn't accept new"
1840 " job submissions until old jobs are archived\n")
1841 elif isinstance(err, errors.TypeEnforcementError):
1842 obuf.write("Parameter Error: %s" % msg)
1843 elif isinstance(err, errors.ParameterError):
1844 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1845 elif isinstance(err, luxi.NoMasterError):
1846 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1847 " and listening for connections?")
1848 elif isinstance(err, luxi.TimeoutError):
1849 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1850 " been submitted and will continue to run even if the call"
1851 " timed out. Useful commands in this situation are \"gnt-job"
1852 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1854 elif isinstance(err, luxi.PermissionError):
1855 obuf.write("It seems you don't have permissions to connect to the"
1856 " master daemon.\nPlease retry as a different user.")
1857 elif isinstance(err, luxi.ProtocolError):
1858 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1860 elif isinstance(err, errors.JobLost):
1861 obuf.write("Error checking job status: %s" % msg)
1862 elif isinstance(err, errors.GenericError):
1863 obuf.write("Unhandled Ganeti error: %s" % msg)
1864 elif isinstance(err, JobSubmittedException):
1865 obuf.write("JobID: %s\n" % err.args[0])
1868 obuf.write("Unhandled exception: %s" % msg)
1869 return retcode, obuf.getvalue().rstrip('\n')
1872 def GenericMain(commands, override=None, aliases=None):
1873 """Generic main function for all the gnt-* commands.
1876 - commands: a dictionary with a special structure, see the design doc
1877 for command line handling.
1878 - override: if not None, we expect a dictionary with keys that will
1879 override command line options; this can be used to pass
1880 options from the scripts to generic functions
1881 - aliases: dictionary with command aliases {'alias': 'target, ...}
1884 # save the program name and the entire command line for later logging
1886 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1887 if len(sys.argv) >= 2:
1888 binary += " " + sys.argv[1]
1889 old_cmdline = " ".join(sys.argv[2:])
1893 binary = "<unknown program>"
1900 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1901 except errors.ParameterError, err:
1902 result, err_msg = FormatError(err)
1906 if func is None: # parse error
1909 if override is not None:
1910 for key, val in override.iteritems():
1911 setattr(options, key, val)
1913 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1914 stderr_logging=True, program=binary)
1917 logging.info("run with arguments '%s'", old_cmdline)
1919 logging.info("run with no arguments")
1922 result = func(options, args)
1923 except (errors.GenericError, luxi.ProtocolError,
1924 JobSubmittedException), err:
1925 result, err_msg = FormatError(err)
1926 logging.exception("Error during command processing")
1932 def ParseNicOption(optvalue):
1933 """Parses the value of the --net option(s).
1937 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1938 except (TypeError, ValueError), err:
1939 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1941 nics = [{}] * nic_max
1942 for nidx, ndict in optvalue:
1945 if not isinstance(ndict, dict):
1946 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1947 " got %s" % (nidx, ndict))
1949 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1956 def GenericInstanceCreate(mode, opts, args):
1957 """Add an instance to the cluster via either creation or import.
1959 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1960 @param opts: the command line options selected by the user
1962 @param args: should contain only one element, the new instance name
1964 @return: the desired exit code
1969 (pnode, snode) = SplitNodeOption(opts.node)
1974 hypervisor, hvparams = opts.hypervisor
1977 nics = ParseNicOption(opts.nics)
1981 elif mode == constants.INSTANCE_CREATE:
1982 # default of one nic, all auto
1988 if opts.disk_template == constants.DT_DISKLESS:
1989 if opts.disks or opts.sd_size is not None:
1990 raise errors.OpPrereqError("Diskless instance but disk"
1991 " information passed")
1994 if (not opts.disks and not opts.sd_size
1995 and mode == constants.INSTANCE_CREATE):
1996 raise errors.OpPrereqError("No disk information specified")
1997 if opts.disks and opts.sd_size is not None:
1998 raise errors.OpPrereqError("Please use either the '--disk' or"
2000 if opts.sd_size is not None:
2001 opts.disks = [(0, {"size": opts.sd_size})]
2005 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2006 except ValueError, err:
2007 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2008 disks = [{}] * disk_max
2011 for didx, ddict in opts.disks:
2013 if not isinstance(ddict, dict):
2014 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2015 raise errors.OpPrereqError(msg)
2016 elif "size" in ddict:
2017 if "adopt" in ddict:
2018 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2019 " (disk %d)" % didx)
2021 ddict["size"] = utils.ParseUnit(ddict["size"])
2022 except ValueError, err:
2023 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2025 elif "adopt" in ddict:
2026 if mode == constants.INSTANCE_IMPORT:
2027 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2031 raise errors.OpPrereqError("Missing size or adoption source for"
2035 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2036 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2038 if mode == constants.INSTANCE_CREATE:
2041 force_variant = opts.force_variant
2044 no_install = opts.no_install
2045 identify_defaults = False
2046 elif mode == constants.INSTANCE_IMPORT:
2049 force_variant = False
2050 src_node = opts.src_node
2051 src_path = opts.src_dir
2053 identify_defaults = opts.identify_defaults
2055 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2057 op = opcodes.OpInstanceCreate(instance_name=instance,
2059 disk_template=opts.disk_template,
2061 pnode=pnode, snode=snode,
2062 ip_check=opts.ip_check,
2063 name_check=opts.name_check,
2064 wait_for_sync=opts.wait_for_sync,
2065 file_storage_dir=opts.file_storage_dir,
2066 file_driver=opts.file_driver,
2067 iallocator=opts.iallocator,
2068 hypervisor=hypervisor,
2070 beparams=opts.beparams,
2071 osparams=opts.osparams,
2075 force_variant=force_variant,
2078 no_install=no_install,
2079 identify_defaults=identify_defaults)
2081 SubmitOrSend(op, opts)
2085 class _RunWhileClusterStoppedHelper:
2086 """Helper class for L{RunWhileClusterStopped} to simplify state management
2089 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2090 """Initializes this class.
2092 @type feedback_fn: callable
2093 @param feedback_fn: Feedback function
2094 @type cluster_name: string
2095 @param cluster_name: Cluster name
2096 @type master_node: string
2097 @param master_node Master node name
2098 @type online_nodes: list
2099 @param online_nodes: List of names of online nodes
2102 self.feedback_fn = feedback_fn
2103 self.cluster_name = cluster_name
2104 self.master_node = master_node
2105 self.online_nodes = online_nodes
2107 self.ssh = ssh.SshRunner(self.cluster_name)
2109 self.nonmaster_nodes = [name for name in online_nodes
2110 if name != master_node]
2112 assert self.master_node not in self.nonmaster_nodes
2114 def _RunCmd(self, node_name, cmd):
2115 """Runs a command on the local or a remote machine.
2117 @type node_name: string
2118 @param node_name: Machine name
2123 if node_name is None or node_name == self.master_node:
2124 # No need to use SSH
2125 result = utils.RunCmd(cmd)
2127 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2130 errmsg = ["Failed to run command %s" % result.cmd]
2132 errmsg.append("on node %s" % node_name)
2133 errmsg.append(": exitcode %s and error %s" %
2134 (result.exit_code, result.output))
2135 raise errors.OpExecError(" ".join(errmsg))
2137 def Call(self, fn, *args):
2138 """Call function while all daemons are stopped.
2141 @param fn: Function to be called
2144 # Pause watcher by acquiring an exclusive lock on watcher state file
2145 self.feedback_fn("Blocking watcher")
2146 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2148 # TODO: Currently, this just blocks. There's no timeout.
2149 # TODO: Should it be a shared lock?
2150 watcher_block.Exclusive(blocking=True)
2152 # Stop master daemons, so that no new jobs can come in and all running
2154 self.feedback_fn("Stopping master daemons")
2155 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2157 # Stop daemons on all nodes
2158 for node_name in self.online_nodes:
2159 self.feedback_fn("Stopping daemons on %s" % node_name)
2160 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2162 # All daemons are shut down now
2164 return fn(self, *args)
2165 except Exception, err:
2166 _, errmsg = FormatError(err)
2167 logging.exception("Caught exception")
2168 self.feedback_fn(errmsg)
2171 # Start cluster again, master node last
2172 for node_name in self.nonmaster_nodes + [self.master_node]:
2173 self.feedback_fn("Starting daemons on %s" % node_name)
2174 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2177 watcher_block.Close()
2180 def RunWhileClusterStopped(feedback_fn, fn, *args):
2181 """Calls a function while all cluster daemons are stopped.
2183 @type feedback_fn: callable
2184 @param feedback_fn: Feedback function
2186 @param fn: Function to be called when daemons are stopped
2189 feedback_fn("Gathering cluster information")
2191 # This ensures we're running on the master daemon
2194 (cluster_name, master_node) = \
2195 cl.QueryConfigValues(["cluster_name", "master_node"])
2197 online_nodes = GetOnlineNodes([], cl=cl)
2199 # Don't keep a reference to the client. The master daemon will go away.
2202 assert master_node in online_nodes
2204 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2205 online_nodes).Call(fn, *args)
2208 def GenerateTable(headers, fields, separator, data,
2209 numfields=None, unitfields=None,
2211 """Prints a table with headers and different fields.
2214 @param headers: dictionary mapping field names to headers for
2217 @param fields: the field names corresponding to each row in
2219 @param separator: the separator to be used; if this is None,
2220 the default 'smart' algorithm is used which computes optimal
2221 field width, otherwise just the separator is used between
2224 @param data: a list of lists, each sublist being one row to be output
2225 @type numfields: list
2226 @param numfields: a list with the fields that hold numeric
2227 values and thus should be right-aligned
2228 @type unitfields: list
2229 @param unitfields: a list with the fields that hold numeric
2230 values that should be formatted with the units field
2231 @type units: string or None
2232 @param units: the units we should use for formatting, or None for
2233 automatic choice (human-readable for non-separator usage, otherwise
2234 megabytes); this is a one-letter string
2243 if numfields is None:
2245 if unitfields is None:
2248 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2249 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2252 for field in fields:
2253 if headers and field not in headers:
2254 # TODO: handle better unknown fields (either revert to old
2255 # style of raising exception, or deal more intelligently with
2257 headers[field] = field
2258 if separator is not None:
2259 format_fields.append("%s")
2260 elif numfields.Matches(field):
2261 format_fields.append("%*s")
2263 format_fields.append("%-*s")
2265 if separator is None:
2266 mlens = [0 for name in fields]
2267 format_str = ' '.join(format_fields)
2269 format_str = separator.replace("%", "%%").join(format_fields)
2274 for idx, val in enumerate(row):
2275 if unitfields.Matches(fields[idx]):
2278 except (TypeError, ValueError):
2281 val = row[idx] = utils.FormatUnit(val, units)
2282 val = row[idx] = str(val)
2283 if separator is None:
2284 mlens[idx] = max(mlens[idx], len(val))
2289 for idx, name in enumerate(fields):
2291 if separator is None:
2292 mlens[idx] = max(mlens[idx], len(hdr))
2293 args.append(mlens[idx])
2295 result.append(format_str % tuple(args))
2297 if separator is None:
2298 assert len(mlens) == len(fields)
2300 if fields and not numfields.Matches(fields[-1]):
2306 line = ['-' for _ in fields]
2307 for idx in range(len(fields)):
2308 if separator is None:
2309 args.append(mlens[idx])
2310 args.append(line[idx])
2311 result.append(format_str % tuple(args))
2316 def _FormatBool(value):
2317 """Formats a boolean value as a string.
2325 #: Default formatting for query results; (callback, align right)
2326 _DEFAULT_FORMAT_QUERY = {
2327 constants.QFT_TEXT: (str, False),
2328 constants.QFT_BOOL: (_FormatBool, False),
2329 constants.QFT_NUMBER: (str, True),
2330 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2331 constants.QFT_OTHER: (str, False),
2332 constants.QFT_UNKNOWN: (str, False),
2336 def _GetColumnFormatter(fdef, override, unit):
2337 """Returns formatting function for a field.
2339 @type fdef: L{objects.QueryFieldDefinition}
2340 @type override: dict
2341 @param override: Dictionary for overriding field formatting functions,
2342 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2344 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2345 @rtype: tuple; (callable, bool)
2346 @return: Returns the function to format a value (takes one parameter) and a
2347 boolean for aligning the value on the right-hand side
2350 fmt = override.get(fdef.name, None)
2354 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2356 if fdef.kind == constants.QFT_UNIT:
2357 # Can't keep this information in the static dictionary
2358 return (lambda value: utils.FormatUnit(value, unit), True)
2360 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2364 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2367 class _QueryColumnFormatter:
2368 """Callable class for formatting fields of a query.
2371 def __init__(self, fn, status_fn):
2372 """Initializes this class.
2375 @param fn: Formatting function
2376 @type status_fn: callable
2377 @param status_fn: Function to report fields' status
2381 self._status_fn = status_fn
2383 def __call__(self, data):
2384 """Returns a field's string representation.
2387 (status, value) = data
2390 self._status_fn(status)
2392 if status == constants.QRFS_NORMAL:
2393 return self._fn(value)
2395 assert value is None, \
2396 "Found value %r for abnormal status %s" % (value, status)
2398 if status == constants.QRFS_UNKNOWN:
2401 if status == constants.QRFS_NODATA:
2404 if status == constants.QRFS_UNAVAIL:
2407 if status == constants.QRFS_OFFLINE:
2410 raise NotImplementedError("Unknown status %s" % status)
2413 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2415 """Formats data in L{objects.QueryResponse}.
2417 @type result: L{objects.QueryResponse}
2418 @param result: result of query operation
2420 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2421 see L{utils.text.FormatUnit}
2422 @type format_override: dict
2423 @param format_override: Dictionary for overriding field formatting functions,
2424 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2425 @type separator: string or None
2426 @param separator: String used to separate fields
2428 @param header: Whether to output header row
2437 if format_override is None:
2438 format_override = {}
2440 stats = dict.fromkeys(constants.QRFS_ALL, 0)
2442 def _RecordStatus(status):
2447 for fdef in result.fields:
2448 assert fdef.title and fdef.name
2449 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2450 columns.append(TableColumn(fdef.title,
2451 _QueryColumnFormatter(fn, _RecordStatus),
2454 table = FormatTable(result.data, columns, header, separator)
2456 # Collect statistics
2457 assert len(stats) == len(constants.QRFS_ALL)
2458 assert compat.all(count >= 0 for count in stats.values())
2460 # Determine overall status. If there was no data, unknown fields must be
2461 # detected via the field definitions.
2462 if (stats[constants.QRFS_UNKNOWN] or
2463 (not result.data and _GetUnknownFields(result.fields))):
2465 elif compat.any(count > 0 for key, count in stats.items()
2466 if key != constants.QRFS_NORMAL):
2467 status = QR_INCOMPLETE
2471 return (status, table)
2474 def _GetUnknownFields(fdefs):
2475 """Returns list of unknown fields included in C{fdefs}.
2477 @type fdefs: list of L{objects.QueryFieldDefinition}
2480 return [fdef for fdef in fdefs
2481 if fdef.kind == constants.QFT_UNKNOWN]
2484 def _WarnUnknownFields(fdefs):
2485 """Prints a warning to stderr if a query included unknown fields.
2487 @type fdefs: list of L{objects.QueryFieldDefinition}
2490 unknown = _GetUnknownFields(fdefs)
2492 ToStderr("Warning: Queried for unknown fields %s",
2493 utils.CommaJoin(fdef.name for fdef in unknown))
2499 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2500 format_override=None):
2501 """Generic implementation for listing all items of a resource.
2503 @param resource: One of L{constants.QR_OP_LUXI}
2504 @type fields: list of strings
2505 @param fields: List of fields to query for
2506 @type names: list of strings
2507 @param names: Names of items to query for
2508 @type unit: string or None
2509 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2510 None for automatic choice (human-readable for non-separator usage,
2511 otherwise megabytes); this is a one-letter string
2512 @type separator: string or None
2513 @param separator: String used to separate fields
2515 @param header: Whether to show header row
2516 @type format_override: dict
2517 @param format_override: Dictionary for overriding field formatting functions,
2518 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2527 response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2529 found_unknown = _WarnUnknownFields(response.fields)
2531 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2533 format_override=format_override)
2538 assert ((found_unknown and status == QR_UNKNOWN) or
2539 (not found_unknown and status != QR_UNKNOWN))
2541 if status == QR_UNKNOWN:
2542 return constants.EXIT_UNKNOWN_FIELD
2544 # TODO: Should the list command fail if not all data could be collected?
2545 return constants.EXIT_SUCCESS
2548 def GenericListFields(resource, fields, separator, header, cl=None):
2549 """Generic implementation for listing fields for a resource.
2551 @param resource: One of L{constants.QR_OP_LUXI}
2552 @type fields: list of strings
2553 @param fields: List of fields to query for
2554 @type separator: string or None
2555 @param separator: String used to separate fields
2557 @param header: Whether to show header row
2566 response = cl.QueryFields(resource, fields)
2568 found_unknown = _WarnUnknownFields(response.fields)
2571 TableColumn("Name", str, False),
2572 TableColumn("Title", str, False),
2573 # TODO: Add field description to master daemon
2576 rows = [[fdef.name, fdef.title] for fdef in response.fields]
2578 for line in FormatTable(rows, columns, header, separator):
2582 return constants.EXIT_UNKNOWN_FIELD
2584 return constants.EXIT_SUCCESS
2588 """Describes a column for L{FormatTable}.
2591 def __init__(self, title, fn, align_right):
2592 """Initializes this class.
2595 @param title: Column title
2597 @param fn: Formatting function
2598 @type align_right: bool
2599 @param align_right: Whether to align values on the right-hand side
2604 self.align_right = align_right
2607 def _GetColFormatString(width, align_right):
2608 """Returns the format string for a field.
2616 return "%%%s%ss" % (sign, width)
2619 def FormatTable(rows, columns, header, separator):
2620 """Formats data as a table.
2622 @type rows: list of lists
2623 @param rows: Row data, one list per row
2624 @type columns: list of L{TableColumn}
2625 @param columns: Column descriptions
2627 @param header: Whether to show header row
2628 @type separator: string or None
2629 @param separator: String used to separate columns
2633 data = [[col.title for col in columns]]
2634 colwidth = [len(col.title) for col in columns]
2637 colwidth = [0 for _ in columns]
2641 assert len(row) == len(columns)
2643 formatted = [col.format(value) for value, col in zip(row, columns)]
2645 if separator is None:
2646 # Update column widths
2647 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2648 # Modifying a list's items while iterating is fine
2649 colwidth[idx] = max(oldwidth, len(value))
2651 data.append(formatted)
2653 if separator is not None:
2654 # Return early if a separator is used
2655 return [separator.join(row) for row in data]
2657 if columns and not columns[-1].align_right:
2658 # Avoid unnecessary spaces at end of line
2661 # Build format string
2662 fmt = " ".join([_GetColFormatString(width, col.align_right)
2663 for col, width in zip(columns, colwidth)])
2665 return [fmt % tuple(row) for row in data]
2668 def FormatTimestamp(ts):
2669 """Formats a given timestamp.
2672 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2675 @return: a string with the formatted timestamp
2678 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2681 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2684 def ParseTimespec(value):
2685 """Parse a time specification.
2687 The following suffixed will be recognized:
2695 Without any suffix, the value will be taken to be in seconds.
2700 raise errors.OpPrereqError("Empty time specification passed")
2708 if value[-1] not in suffix_map:
2711 except (TypeError, ValueError):
2712 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2714 multiplier = suffix_map[value[-1]]
2716 if not value: # no data left after stripping the suffix
2717 raise errors.OpPrereqError("Invalid time specification (only"
2720 value = int(value) * multiplier
2721 except (TypeError, ValueError):
2722 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2726 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2727 filter_master=False):
2728 """Returns the names of online nodes.
2730 This function will also log a warning on stderr with the names of
2733 @param nodes: if not empty, use only this subset of nodes (minus the
2735 @param cl: if not None, luxi client to use
2736 @type nowarn: boolean
2737 @param nowarn: by default, this function will output a note with the
2738 offline nodes that are skipped; if this parameter is True the
2739 note is not displayed
2740 @type secondary_ips: boolean
2741 @param secondary_ips: if True, return the secondary IPs instead of the
2742 names, useful for doing network traffic over the replication interface
2744 @type filter_master: boolean
2745 @param filter_master: if True, do not return the master node in the list
2746 (useful in coordination with secondary_ips where we cannot check our
2747 node name against the list)
2759 master_node = cl.QueryConfigValues(["master_node"])[0]
2760 filter_fn = lambda x: x != master_node
2762 filter_fn = lambda _: True
2764 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2766 offline = [row[0] for row in result if row[1]]
2767 if offline and not nowarn:
2768 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2769 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2772 def _ToStream(stream, txt, *args):
2773 """Write a message to a stream, bypassing the logging system
2775 @type stream: file object
2776 @param stream: the file to which we should write
2778 @param txt: the message
2783 stream.write(txt % args)
2790 def ToStdout(txt, *args):
2791 """Write a message to stdout only, bypassing the logging system
2793 This is just a wrapper over _ToStream.
2796 @param txt: the message
2799 _ToStream(sys.stdout, txt, *args)
2802 def ToStderr(txt, *args):
2803 """Write a message to stderr only, bypassing the logging system
2805 This is just a wrapper over _ToStream.
2808 @param txt: the message
2811 _ToStream(sys.stderr, txt, *args)
2814 class JobExecutor(object):
2815 """Class which manages the submission and execution of multiple jobs.
2817 Note that instances of this class should not be reused between
2821 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2826 self.verbose = verbose
2829 self.feedback_fn = feedback_fn
2831 def QueueJob(self, name, *ops):
2832 """Record a job for later submit.
2835 @param name: a description of the job, will be used in WaitJobSet
2837 SetGenericOpcodeOpts(ops, self.opts)
2838 self.queue.append((name, ops))
2840 def SubmitPending(self, each=False):
2841 """Submit all pending jobs.
2846 for row in self.queue:
2847 # SubmitJob will remove the success status, but raise an exception if
2848 # the submission fails, so we'll notice that anyway.
2849 results.append([True, self.cl.SubmitJob(row[1])])
2851 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2852 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2854 self.jobs.append((idx, status, data, name))
2856 def _ChooseJob(self):
2857 """Choose a non-waiting/queued job to poll next.
2860 assert self.jobs, "_ChooseJob called with empty job list"
2862 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2865 for job_data, status in zip(self.jobs, result):
2866 if (isinstance(status, list) and status and
2867 status[0] in (constants.JOB_STATUS_QUEUED,
2868 constants.JOB_STATUS_WAITLOCK,
2869 constants.JOB_STATUS_CANCELING)):
2870 # job is still present and waiting
2872 # good candidate found (either running job or lost job)
2873 self.jobs.remove(job_data)
2877 return self.jobs.pop(0)
2879 def GetResults(self):
2880 """Wait for and return the results of all jobs.
2883 @return: list of tuples (success, job results), in the same order
2884 as the submitted jobs; if a job has failed, instead of the result
2885 there will be the error message
2889 self.SubmitPending()
2892 ok_jobs = [row[2] for row in self.jobs if row[1]]
2894 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2896 # first, remove any non-submitted jobs
2897 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2898 for idx, _, jid, name in failures:
2899 ToStderr("Failed to submit job for %s: %s", name, jid)
2900 results.append((idx, False, jid))
2903 (idx, _, jid, name) = self._ChooseJob()
2904 ToStdout("Waiting for job %s for %s...", jid, name)
2906 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2908 except errors.JobLost, err:
2909 _, job_result = FormatError(err)
2910 ToStderr("Job %s for %s has been archived, cannot check its result",
2913 except (errors.GenericError, luxi.ProtocolError), err:
2914 _, job_result = FormatError(err)
2916 # the error message will always be shown, verbose or not
2917 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2919 results.append((idx, success, job_result))
2921 # sort based on the index, then drop it
2923 results = [i[1:] for i in results]
2927 def WaitOrShow(self, wait):
2928 """Wait for job results or only print the job IDs.
2931 @param wait: whether to wait or not
2935 return self.GetResults()
2938 self.SubmitPending()
2939 for _, status, result, name in self.jobs:
2941 ToStdout("%s: %s", result, name)
2943 ToStderr("Failure for %s: %s", name, result)
2944 return [row[1:3] for row in self.jobs]
2947 def FormatParameterDict(buf, param_dict, actual, level=1):
2948 """Formats a parameter dictionary.
2950 @type buf: L{StringIO}
2951 @param buf: the buffer into which to write
2952 @type param_dict: dict
2953 @param param_dict: the own parameters
2955 @param actual: the current parameter set (including defaults)
2956 @param level: Level of indent
2959 indent = " " * level
2960 for key in sorted(actual):
2961 val = param_dict.get(key, "default (%s)" % actual[key])
2962 buf.write("%s- %s: %s\n" % (indent, key, val))