4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
39 from ganeti import ssh
40 from ganeti import compat
41 from ganeti import netutils
42 from ganeti import qlang
44 from optparse import (OptionParser, TitledHelpFormatter,
45 Option, OptionValueError)
49 # Command line options
61 "CLUSTER_DOMAIN_SECRET_OPT",
77 "FILESTORE_DRIVER_OPT",
86 "DEFAULT_IALLOCATOR_OPT",
87 "IDENTIFY_DEFAULTS_OPT",
89 "IGNORE_FAILURES_OPT",
91 "IGNORE_REMOVE_FAILURES_OPT",
92 "IGNORE_SECONDARIES_OPT",
96 "MAINTAIN_NODE_HEALTH_OPT",
101 "NEW_CLUSTER_CERT_OPT",
102 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103 "NEW_CONFD_HMAC_KEY_OPT",
108 "NODE_PLACEMENT_OPT",
112 "NODRBD_STORAGE_OPT",
118 "NOMODIFY_ETCHOSTS_OPT",
119 "NOMODIFY_SSH_SETUP_OPT",
125 "NOSSH_KEYCHECK_OPT",
134 "PREALLOC_WIPE_DISKS_OPT",
135 "PRIMARY_IP_VERSION_OPT",
140 "REMOVE_INSTANCE_OPT",
148 "SHUTDOWN_TIMEOUT_OPT",
163 # Generic functions for CLI programs
165 "GenericInstanceCreate",
171 "JobSubmittedException",
173 "RunWhileClusterStopped",
177 # Formatting functions
178 "ToStderr", "ToStdout",
189 # command line options support infrastructure
190 "ARGS_MANY_INSTANCES",
209 "OPT_COMPL_INST_ADD_NODES",
210 "OPT_COMPL_MANY_NODES",
211 "OPT_COMPL_ONE_IALLOCATOR",
212 "OPT_COMPL_ONE_INSTANCE",
213 "OPT_COMPL_ONE_NODE",
214 "OPT_COMPL_ONE_NODEGROUP",
220 "COMMON_CREATE_OPTS",
226 #: Priorities (sorted)
228 ("low", constants.OP_PRIO_LOW),
229 ("normal", constants.OP_PRIO_NORMAL),
230 ("high", constants.OP_PRIO_HIGH),
233 #: Priority dictionary for easier lookup
234 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
235 # we migrate to Python 2.6
236 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
238 # Query result status for clients
241 QR_INCOMPLETE) = range(3)
245 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
250 return ("<%s min=%s max=%s>" %
251 (self.__class__.__name__, self.min, self.max))
254 class ArgSuggest(_Argument):
255 """Suggesting argument.
257 Value can be any of the ones passed to the constructor.
260 # pylint: disable-msg=W0622
261 def __init__(self, min=0, max=None, choices=None):
262 _Argument.__init__(self, min=min, max=max)
263 self.choices = choices
266 return ("<%s min=%s max=%s choices=%r>" %
267 (self.__class__.__name__, self.min, self.max, self.choices))
270 class ArgChoice(ArgSuggest):
273 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
274 but value must be one of the choices.
279 class ArgUnknown(_Argument):
280 """Unknown argument to program (e.g. determined at runtime).
285 class ArgInstance(_Argument):
286 """Instances argument.
291 class ArgNode(_Argument):
297 class ArgGroup(_Argument):
298 """Node group argument.
303 class ArgJobId(_Argument):
309 class ArgFile(_Argument):
310 """File path argument.
315 class ArgCommand(_Argument):
321 class ArgHost(_Argument):
327 class ArgOs(_Argument):
334 ARGS_MANY_INSTANCES = [ArgInstance()]
335 ARGS_MANY_NODES = [ArgNode()]
336 ARGS_MANY_GROUPS = [ArgGroup()]
337 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
338 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
339 ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
340 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
343 def _ExtractTagsObject(opts, args):
344 """Extract the tag type object.
346 Note that this function will modify its args parameter.
349 if not hasattr(opts, "tag_type"):
350 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
352 if kind == constants.TAG_CLUSTER:
354 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
356 raise errors.OpPrereqError("no arguments passed to the command")
360 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
364 def _ExtendTags(opts, args):
365 """Extend the args if a source file has been given.
367 This function will extend the tags with the contents of the file
368 passed in the 'tags_source' attribute of the opts parameter. A file
369 named '-' will be replaced by stdin.
372 fname = opts.tags_source
378 new_fh = open(fname, "r")
381 # we don't use the nice 'new_data = [line.strip() for line in fh]'
382 # because of python bug 1633941
384 line = new_fh.readline()
387 new_data.append(line.strip())
390 args.extend(new_data)
393 def ListTags(opts, args):
394 """List the tags on a given object.
396 This is a generic implementation that knows how to deal with all
397 three cases of tag objects (cluster, node, instance). The opts
398 argument is expected to contain a tag_type field denoting what
399 object type we work on.
402 kind, name = _ExtractTagsObject(opts, args)
404 result = cl.QueryTags(kind, name)
405 result = list(result)
411 def AddTags(opts, args):
412 """Add tags on a given object.
414 This is a generic implementation that knows how to deal with all
415 three cases of tag objects (cluster, node, instance). The opts
416 argument is expected to contain a tag_type field denoting what
417 object type we work on.
420 kind, name = _ExtractTagsObject(opts, args)
421 _ExtendTags(opts, args)
423 raise errors.OpPrereqError("No tags to be added")
424 op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
425 SubmitOpCode(op, opts=opts)
428 def RemoveTags(opts, args):
429 """Remove tags from a given object.
431 This is a generic implementation that knows how to deal with all
432 three cases of tag objects (cluster, node, instance). The opts
433 argument is expected to contain a tag_type field denoting what
434 object type we work on.
437 kind, name = _ExtractTagsObject(opts, args)
438 _ExtendTags(opts, args)
440 raise errors.OpPrereqError("No tags to be removed")
441 op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
442 SubmitOpCode(op, opts=opts)
445 def check_unit(option, opt, value): # pylint: disable-msg=W0613
446 """OptParsers custom converter for units.
450 return utils.ParseUnit(value)
451 except errors.UnitParseError, err:
452 raise OptionValueError("option %s: %s" % (opt, err))
455 def _SplitKeyVal(opt, data):
456 """Convert a KeyVal string into a dict.
458 This function will convert a key=val[,...] string into a dict. Empty
459 values will be converted specially: keys which have the prefix 'no_'
460 will have the value=False and the prefix stripped, the others will
464 @param opt: a string holding the option name for which we process the
465 data, used in building error messages
467 @param data: a string of the format key=val,key=val,...
469 @return: {key=val, key=val}
470 @raises errors.ParameterError: if there are duplicate keys
475 for elem in utils.UnescapeAndSplit(data, sep=","):
477 key, val = elem.split("=", 1)
479 if elem.startswith(NO_PREFIX):
480 key, val = elem[len(NO_PREFIX):], False
481 elif elem.startswith(UN_PREFIX):
482 key, val = elem[len(UN_PREFIX):], None
484 key, val = elem, True
486 raise errors.ParameterError("Duplicate key '%s' in option %s" %
492 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
493 """Custom parser for ident:key=val,key=val options.
495 This will store the parsed values as a tuple (ident, {key: val}). As such,
496 multiple uses of this option via action=append is possible.
500 ident, rest = value, ''
502 ident, rest = value.split(":", 1)
504 if ident.startswith(NO_PREFIX):
506 msg = "Cannot pass options when removing parameter groups: %s" % value
507 raise errors.ParameterError(msg)
508 retval = (ident[len(NO_PREFIX):], False)
509 elif ident.startswith(UN_PREFIX):
511 msg = "Cannot pass options when removing parameter groups: %s" % value
512 raise errors.ParameterError(msg)
513 retval = (ident[len(UN_PREFIX):], None)
515 kv_dict = _SplitKeyVal(opt, rest)
516 retval = (ident, kv_dict)
520 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
521 """Custom parser class for key=val,key=val options.
523 This will store the parsed values as a dict {key: val}.
526 return _SplitKeyVal(opt, value)
529 def check_bool(option, opt, value): # pylint: disable-msg=W0613
530 """Custom parser for yes/no options.
532 This will store the parsed value as either True or False.
535 value = value.lower()
536 if value == constants.VALUE_FALSE or value == "no":
538 elif value == constants.VALUE_TRUE or value == "yes":
541 raise errors.ParameterError("Invalid boolean value '%s'" % value)
544 # completion_suggestion is normally a list. Using numeric values not evaluating
545 # to False for dynamic completion.
546 (OPT_COMPL_MANY_NODES,
548 OPT_COMPL_ONE_INSTANCE,
550 OPT_COMPL_ONE_IALLOCATOR,
551 OPT_COMPL_INST_ADD_NODES,
552 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
554 OPT_COMPL_ALL = frozenset([
555 OPT_COMPL_MANY_NODES,
557 OPT_COMPL_ONE_INSTANCE,
559 OPT_COMPL_ONE_IALLOCATOR,
560 OPT_COMPL_INST_ADD_NODES,
561 OPT_COMPL_ONE_NODEGROUP,
565 class CliOption(Option):
566 """Custom option class for optparse.
569 ATTRS = Option.ATTRS + [
570 "completion_suggest",
572 TYPES = Option.TYPES + (
578 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
579 TYPE_CHECKER["identkeyval"] = check_ident_key_val
580 TYPE_CHECKER["keyval"] = check_key_val
581 TYPE_CHECKER["unit"] = check_unit
582 TYPE_CHECKER["bool"] = check_bool
585 # optparse.py sets make_option, so we do it for our own option class, too
586 cli_option = CliOption
591 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
592 help="Increase debugging level")
594 NOHDR_OPT = cli_option("--no-headers", default=False,
595 action="store_true", dest="no_headers",
596 help="Don't display column headers")
598 SEP_OPT = cli_option("--separator", default=None,
599 action="store", dest="separator",
600 help=("Separator between output fields"
601 " (defaults to one space)"))
603 USEUNITS_OPT = cli_option("--units", default=None,
604 dest="units", choices=('h', 'm', 'g', 't'),
605 help="Specify units for output (one of hmgt)")
607 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
608 type="string", metavar="FIELDS",
609 help="Comma separated list of output fields")
611 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
612 default=False, help="Force the operation")
614 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
615 default=False, help="Do not require confirmation")
617 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
618 action="store_true", default=False,
619 help=("Ignore offline nodes and do as much"
622 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
623 default=None, help="File with tag names")
625 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
626 default=False, action="store_true",
627 help=("Submit the job and return the job ID, but"
628 " don't wait for the job to finish"))
630 SYNC_OPT = cli_option("--sync", dest="do_locking",
631 default=False, action="store_true",
632 help=("Grab locks while doing the queries"
633 " in order to ensure more consistent results"))
635 DRY_RUN_OPT = cli_option("--dry-run", default=False,
637 help=("Do not execute the operation, just run the"
638 " check steps and verify it it could be"
641 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
643 help="Increase the verbosity of the operation")
645 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
646 action="store_true", dest="simulate_errors",
647 help="Debugging option that makes the operation"
648 " treat most runtime checks as failed")
650 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
651 default=True, action="store_false",
652 help="Don't wait for sync (DANGEROUS!)")
654 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
655 help="Custom disk setup (diskless, file,"
657 default=None, metavar="TEMPL",
658 choices=list(constants.DISK_TEMPLATES))
660 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
661 help="Do not create any network cards for"
664 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
665 help="Relative path under default cluster-wide"
666 " file storage dir to store file-based disks",
667 default=None, metavar="<DIR>")
669 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
670 help="Driver to use for image files",
671 default="loop", metavar="<DRIVER>",
672 choices=list(constants.FILE_DRIVER))
674 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
675 help="Select nodes for the instance automatically"
676 " using the <NAME> iallocator plugin",
677 default=None, type="string",
678 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
680 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
682 help="Set the default instance allocator plugin",
683 default=None, type="string",
684 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
686 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
688 completion_suggest=OPT_COMPL_ONE_OS)
690 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
691 type="keyval", default={},
692 help="OS parameters")
694 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
695 action="store_true", default=False,
696 help="Force an unknown variant")
698 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
699 action="store_true", default=False,
700 help="Do not install the OS (will"
703 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
704 type="keyval", default={},
705 help="Backend parameters")
707 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
708 default={}, dest="hvparams",
709 help="Hypervisor parameters")
711 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
712 help="Hypervisor and hypervisor options, in the"
713 " format hypervisor:option=value,option=value,...",
714 default=None, type="identkeyval")
716 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
717 help="Hypervisor and hypervisor options, in the"
718 " format hypervisor:option=value,option=value,...",
719 default=[], action="append", type="identkeyval")
721 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
722 action="store_false",
723 help="Don't check that the instance's IP"
726 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
727 default=True, action="store_false",
728 help="Don't check that the instance's name"
731 NET_OPT = cli_option("--net",
732 help="NIC parameters", default=[],
733 dest="nics", action="append", type="identkeyval")
735 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
736 dest="disks", action="append", type="identkeyval")
738 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
739 help="Comma-separated list of disks"
740 " indices to act on (e.g. 0,2) (optional,"
741 " defaults to all disks)")
743 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
744 help="Enforces a single-disk configuration using the"
745 " given disk size, in MiB unless a suffix is used",
746 default=None, type="unit", metavar="<size>")
748 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
749 dest="ignore_consistency",
750 action="store_true", default=False,
751 help="Ignore the consistency of the disks on"
754 NONLIVE_OPT = cli_option("--non-live", dest="live",
755 default=True, action="store_false",
756 help="Do a non-live migration (this usually means"
757 " freeze the instance, save the state, transfer and"
758 " only then resume running on the secondary node)")
760 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
762 choices=list(constants.HT_MIGRATION_MODES),
763 help="Override default migration mode (choose"
764 " either live or non-live")
766 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
767 help="Target node and optional secondary node",
768 metavar="<pnode>[:<snode>]",
769 completion_suggest=OPT_COMPL_INST_ADD_NODES)
771 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
772 action="append", metavar="<node>",
773 help="Use only this node (can be used multiple"
774 " times, if not given defaults to all nodes)",
775 completion_suggest=OPT_COMPL_ONE_NODE)
777 NODEGROUP_OPT = cli_option("-g", "--node-group",
779 help="Node group (name or uuid)",
780 metavar="<nodegroup>",
781 default=None, type="string",
782 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
784 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
786 completion_suggest=OPT_COMPL_ONE_NODE)
788 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
789 action="store_false",
790 help="Don't start the instance after creation")
792 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
793 action="store_true", default=False,
794 help="Show command instead of executing it")
796 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
797 default=False, action="store_true",
798 help="Instead of performing the migration, try to"
799 " recover from a failed cleanup. This is safe"
800 " to run even if the instance is healthy, but it"
801 " will create extra replication traffic and "
802 " disrupt briefly the replication (like during the"
805 STATIC_OPT = cli_option("-s", "--static", dest="static",
806 action="store_true", default=False,
807 help="Only show configuration data, not runtime data")
809 ALL_OPT = cli_option("--all", dest="show_all",
810 default=False, action="store_true",
811 help="Show info on all instances on the cluster."
812 " This can take a long time to run, use wisely")
814 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
815 action="store_true", default=False,
816 help="Interactive OS reinstall, lists available"
817 " OS templates for selection")
819 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
820 action="store_true", default=False,
821 help="Remove the instance from the cluster"
822 " configuration even if there are failures"
823 " during the removal process")
825 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
826 dest="ignore_remove_failures",
827 action="store_true", default=False,
828 help="Remove the instance from the"
829 " cluster configuration even if there"
830 " are failures during the removal"
833 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
834 action="store_true", default=False,
835 help="Remove the instance from the cluster")
837 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
838 help="Specifies the new secondary node",
839 metavar="NODE", default=None,
840 completion_suggest=OPT_COMPL_ONE_NODE)
842 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
843 default=False, action="store_true",
844 help="Replace the disk(s) on the primary"
845 " node (only for the drbd template)")
847 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
848 default=False, action="store_true",
849 help="Replace the disk(s) on the secondary"
850 " node (only for the drbd template)")
852 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
853 default=False, action="store_true",
854 help="Lock all nodes and auto-promote as needed"
857 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
858 default=False, action="store_true",
859 help="Automatically replace faulty disks"
860 " (only for the drbd template)")
862 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
863 default=False, action="store_true",
864 help="Ignore current recorded size"
865 " (useful for forcing activation when"
866 " the recorded size is wrong)")
868 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
870 completion_suggest=OPT_COMPL_ONE_NODE)
872 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
875 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
876 help="Specify the secondary ip for the node",
877 metavar="ADDRESS", default=None)
879 READD_OPT = cli_option("--readd", dest="readd",
880 default=False, action="store_true",
881 help="Readd old node after replacing it")
883 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
884 default=True, action="store_false",
885 help="Disable SSH key fingerprint checking")
888 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
889 type="bool", default=None, metavar=_YORNO,
890 help="Set the master_candidate flag on the node")
892 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
893 type="bool", default=None,
894 help="Set the offline flag on the node")
896 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
897 type="bool", default=None,
898 help="Set the drained flag on the node")
900 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
901 type="bool", default=None, metavar=_YORNO,
902 help="Set the master_capable flag on the node")
904 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
905 type="bool", default=None, metavar=_YORNO,
906 help="Set the vm_capable flag on the node")
908 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
909 type="bool", default=None, metavar=_YORNO,
910 help="Set the allocatable flag on a volume")
912 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
913 help="Disable support for lvm based instances"
915 action="store_false", default=True)
917 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
918 dest="enabled_hypervisors",
919 help="Comma-separated list of hypervisors",
920 type="string", default=None)
922 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
923 type="keyval", default={},
924 help="NIC parameters")
926 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
927 dest="candidate_pool_size", type="int",
928 help="Set the candidate pool size")
930 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
931 help="Enables LVM and specifies the volume group"
932 " name (cluster-wide) for disk allocation [xenvg]",
933 metavar="VG", default=None)
935 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
936 help="Destroy cluster", action="store_true")
938 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
939 help="Skip node agreement check (dangerous)",
940 action="store_true", default=False)
942 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
943 help="Specify the mac prefix for the instance IP"
944 " addresses, in the format XX:XX:XX",
948 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
949 help="Specify the node interface (cluster-wide)"
950 " on which the master IP address will be added "
951 " [%s]" % constants.DEFAULT_BRIDGE,
953 default=constants.DEFAULT_BRIDGE)
955 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
956 help="Specify the default directory (cluster-"
957 "wide) for storing the file-based disks [%s]" %
958 constants.DEFAULT_FILE_STORAGE_DIR,
960 default=constants.DEFAULT_FILE_STORAGE_DIR)
962 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
963 help="Don't modify /etc/hosts",
964 action="store_false", default=True)
966 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
967 help="Don't initialize SSH keys",
968 action="store_false", default=True)
970 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
971 help="Enable parseable error messages",
972 action="store_true", default=False)
974 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
975 help="Skip N+1 memory redundancy tests",
976 action="store_true", default=False)
978 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
979 help="Type of reboot: soft/hard/full",
980 default=constants.INSTANCE_REBOOT_HARD,
982 choices=list(constants.REBOOT_TYPES))
984 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
985 dest="ignore_secondaries",
986 default=False, action="store_true",
987 help="Ignore errors from secondaries")
989 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
990 action="store_false", default=True,
991 help="Don't shutdown the instance (unsafe)")
993 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
994 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
995 help="Maximum time to wait")
997 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
998 dest="shutdown_timeout", type="int",
999 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1000 help="Maximum time to wait for instance shutdown")
1002 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1004 help=("Number of seconds between repetions of the"
1007 EARLY_RELEASE_OPT = cli_option("--early-release",
1008 dest="early_release", default=False,
1009 action="store_true",
1010 help="Release the locks on the secondary"
1013 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1014 dest="new_cluster_cert",
1015 default=False, action="store_true",
1016 help="Generate a new cluster certificate")
1018 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1020 help="File containing new RAPI certificate")
1022 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1023 default=None, action="store_true",
1024 help=("Generate a new self-signed RAPI"
1027 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1028 dest="new_confd_hmac_key",
1029 default=False, action="store_true",
1030 help=("Create a new HMAC key for %s" %
1033 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1034 dest="cluster_domain_secret",
1036 help=("Load new new cluster domain"
1037 " secret from file"))
1039 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1040 dest="new_cluster_domain_secret",
1041 default=False, action="store_true",
1042 help=("Create a new cluster domain"
1045 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1046 dest="use_replication_network",
1047 help="Whether to use the replication network"
1048 " for talking to the nodes",
1049 action="store_true", default=False)
1051 MAINTAIN_NODE_HEALTH_OPT = \
1052 cli_option("--maintain-node-health", dest="maintain_node_health",
1053 metavar=_YORNO, default=None, type="bool",
1054 help="Configure the cluster to automatically maintain node"
1055 " health, by shutting down unknown instances, shutting down"
1056 " unknown DRBD devices, etc.")
1058 IDENTIFY_DEFAULTS_OPT = \
1059 cli_option("--identify-defaults", dest="identify_defaults",
1060 default=False, action="store_true",
1061 help="Identify which saved instance parameters are equal to"
1062 " the current cluster defaults and set them as such, instead"
1063 " of marking them as overridden")
1065 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1066 action="store", dest="uid_pool",
1067 help=("A list of user-ids or user-id"
1068 " ranges separated by commas"))
1070 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1071 action="store", dest="add_uids",
1072 help=("A list of user-ids or user-id"
1073 " ranges separated by commas, to be"
1074 " added to the user-id pool"))
1076 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1077 action="store", dest="remove_uids",
1078 help=("A list of user-ids or user-id"
1079 " ranges separated by commas, to be"
1080 " removed from the user-id pool"))
1082 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1083 action="store", dest="reserved_lvs",
1084 help=("A comma-separated list of reserved"
1085 " logical volumes names, that will be"
1086 " ignored by cluster verify"))
1088 ROMAN_OPT = cli_option("--roman",
1089 dest="roman_integers", default=False,
1090 action="store_true",
1091 help="Use roman numbers for positive integers")
1093 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1094 action="store", default=None,
1095 help="Specifies usermode helper for DRBD")
1097 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1098 action="store_false", default=True,
1099 help="Disable support for DRBD")
1101 PRIMARY_IP_VERSION_OPT = \
1102 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1103 action="store", dest="primary_ip_version",
1104 metavar="%d|%d" % (constants.IP4_VERSION,
1105 constants.IP6_VERSION),
1106 help="Cluster-wide IP version for primary IP")
1108 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1109 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1110 choices=_PRIONAME_TO_VALUE.keys(),
1111 help="Priority for opcode processing")
1113 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1114 type="bool", default=None, metavar=_YORNO,
1115 help="Sets the hidden flag on the OS")
1117 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1118 type="bool", default=None, metavar=_YORNO,
1119 help="Sets the blacklisted flag on the OS")
1121 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1122 type="bool", metavar=_YORNO,
1123 dest="prealloc_wipe_disks",
1124 help=("Wipe disks prior to instance"
1127 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1128 type="keyval", default=None,
1129 help="Node parameters")
1131 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1132 action="store", metavar="POLICY", default=None,
1133 help="Allocation policy for the node group")
1135 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1136 type="bool", metavar=_YORNO,
1137 dest="node_powered",
1138 help="Specify if the SoR for node is powered")
1141 #: Options provided by all commands
1142 COMMON_OPTS = [DEBUG_OPT]
1144 # common options for creating instances. add and import then add their own
1146 COMMON_CREATE_OPTS = [
1151 FILESTORE_DRIVER_OPT,
1168 def _ParseArgs(argv, commands, aliases):
1169 """Parser for the command line arguments.
1171 This function parses the arguments and returns the function which
1172 must be executed together with its (modified) arguments.
1174 @param argv: the command line
1175 @param commands: dictionary with special contents, see the design
1176 doc for cmdline handling
1177 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1181 binary = "<command>"
1183 binary = argv[0].split("/")[-1]
1185 if len(argv) > 1 and argv[1] == "--version":
1186 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1187 constants.RELEASE_VERSION)
1188 # Quit right away. That way we don't have to care about this special
1189 # argument. optparse.py does it the same.
1192 if len(argv) < 2 or not (argv[1] in commands or
1193 argv[1] in aliases):
1194 # let's do a nice thing
1195 sortedcmds = commands.keys()
1198 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1199 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1202 # compute the max line length for cmd + usage
1203 mlen = max([len(" %s" % cmd) for cmd in commands])
1204 mlen = min(60, mlen) # should not get here...
1206 # and format a nice command list
1207 ToStdout("Commands:")
1208 for cmd in sortedcmds:
1209 cmdstr = " %s" % (cmd,)
1210 help_text = commands[cmd][4]
1211 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1212 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1213 for line in help_lines:
1214 ToStdout("%-*s %s", mlen, "", line)
1218 return None, None, None
1220 # get command, unalias it, and look it up in commands
1224 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1227 if aliases[cmd] not in commands:
1228 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1229 " command '%s'" % (cmd, aliases[cmd]))
1233 func, args_def, parser_opts, usage, description = commands[cmd]
1234 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1235 description=description,
1236 formatter=TitledHelpFormatter(),
1237 usage="%%prog %s %s" % (cmd, usage))
1238 parser.disable_interspersed_args()
1239 options, args = parser.parse_args()
1241 if not _CheckArguments(cmd, args_def, args):
1242 return None, None, None
1244 return func, options, args
1247 def _CheckArguments(cmd, args_def, args):
1248 """Verifies the arguments using the argument definition.
1252 1. Abort with error if values specified by user but none expected.
1254 1. For each argument in definition
1256 1. Keep running count of minimum number of values (min_count)
1257 1. Keep running count of maximum number of values (max_count)
1258 1. If it has an unlimited number of values
1260 1. Abort with error if it's not the last argument in the definition
1262 1. If last argument has limited number of values
1264 1. Abort with error if number of values doesn't match or is too large
1266 1. Abort with error if user didn't pass enough values (min_count)
1269 if args and not args_def:
1270 ToStderr("Error: Command %s expects no arguments", cmd)
1277 last_idx = len(args_def) - 1
1279 for idx, arg in enumerate(args_def):
1280 if min_count is None:
1282 elif arg.min is not None:
1283 min_count += arg.min
1285 if max_count is None:
1287 elif arg.max is not None:
1288 max_count += arg.max
1291 check_max = (arg.max is not None)
1293 elif arg.max is None:
1294 raise errors.ProgrammerError("Only the last argument can have max=None")
1297 # Command with exact number of arguments
1298 if (min_count is not None and max_count is not None and
1299 min_count == max_count and len(args) != min_count):
1300 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1303 # Command with limited number of arguments
1304 if max_count is not None and len(args) > max_count:
1305 ToStderr("Error: Command %s expects only %d argument(s)",
1309 # Command with some required arguments
1310 if min_count is not None and len(args) < min_count:
1311 ToStderr("Error: Command %s expects at least %d argument(s)",
1318 def SplitNodeOption(value):
1319 """Splits the value of a --node option.
1322 if value and ':' in value:
1323 return value.split(':', 1)
1325 return (value, None)
1328 def CalculateOSNames(os_name, os_variants):
1329 """Calculates all the names an OS can be called, according to its variants.
1331 @type os_name: string
1332 @param os_name: base name of the os
1333 @type os_variants: list or None
1334 @param os_variants: list of supported variants
1336 @return: list of valid names
1340 return ['%s+%s' % (os_name, v) for v in os_variants]
1345 def ParseFields(selected, default):
1346 """Parses the values of "--field"-like options.
1348 @type selected: string or None
1349 @param selected: User-selected options
1351 @param default: Default fields
1354 if selected is None:
1357 if selected.startswith("+"):
1358 return default + selected[1:].split(",")
1360 return selected.split(",")
1363 UsesRPC = rpc.RunWithRPC
1366 def AskUser(text, choices=None):
1367 """Ask the user a question.
1369 @param text: the question to ask
1371 @param choices: list with elements tuples (input_char, return_value,
1372 description); if not given, it will default to: [('y', True,
1373 'Perform the operation'), ('n', False, 'Do no do the operation')];
1374 note that the '?' char is reserved for help
1376 @return: one of the return values from the choices list; if input is
1377 not possible (i.e. not running with a tty, we return the last
1382 choices = [('y', True, 'Perform the operation'),
1383 ('n', False, 'Do not perform the operation')]
1384 if not choices or not isinstance(choices, list):
1385 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1386 for entry in choices:
1387 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1388 raise errors.ProgrammerError("Invalid choices element to AskUser")
1390 answer = choices[-1][1]
1392 for line in text.splitlines():
1393 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1394 text = "\n".join(new_text)
1396 f = file("/dev/tty", "a+")
1400 chars = [entry[0] for entry in choices]
1401 chars[-1] = "[%s]" % chars[-1]
1403 maps = dict([(entry[0], entry[1]) for entry in choices])
1407 f.write("/".join(chars))
1409 line = f.readline(2).strip().lower()
1414 for entry in choices:
1415 f.write(" %s - %s\n" % (entry[0], entry[2]))
1423 class JobSubmittedException(Exception):
1424 """Job was submitted, client should exit.
1426 This exception has one argument, the ID of the job that was
1427 submitted. The handler should print this ID.
1429 This is not an error, just a structured way to exit from clients.
1434 def SendJob(ops, cl=None):
1435 """Function to submit an opcode without waiting for the results.
1438 @param ops: list of opcodes
1439 @type cl: luxi.Client
1440 @param cl: the luxi client to use for communicating with the master;
1441 if None, a new client will be created
1447 job_id = cl.SubmitJob(ops)
1452 def GenericPollJob(job_id, cbs, report_cbs):
1453 """Generic job-polling function.
1455 @type job_id: number
1456 @param job_id: Job ID
1457 @type cbs: Instance of L{JobPollCbBase}
1458 @param cbs: Data callbacks
1459 @type report_cbs: Instance of L{JobPollReportCbBase}
1460 @param report_cbs: Reporting callbacks
1463 prev_job_info = None
1464 prev_logmsg_serial = None
1469 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1472 # job not found, go away!
1473 raise errors.JobLost("Job with id %s lost" % job_id)
1475 if result == constants.JOB_NOTCHANGED:
1476 report_cbs.ReportNotChanged(job_id, status)
1481 # Split result, a tuple of (field values, log entries)
1482 (job_info, log_entries) = result
1483 (status, ) = job_info
1486 for log_entry in log_entries:
1487 (serial, timestamp, log_type, message) = log_entry
1488 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1490 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1492 # TODO: Handle canceled and archived jobs
1493 elif status in (constants.JOB_STATUS_SUCCESS,
1494 constants.JOB_STATUS_ERROR,
1495 constants.JOB_STATUS_CANCELING,
1496 constants.JOB_STATUS_CANCELED):
1499 prev_job_info = job_info
1501 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1503 raise errors.JobLost("Job with id %s lost" % job_id)
1505 status, opstatus, result = jobs[0]
1507 if status == constants.JOB_STATUS_SUCCESS:
1510 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1511 raise errors.OpExecError("Job was canceled")
1514 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1515 if status == constants.OP_STATUS_SUCCESS:
1517 elif status == constants.OP_STATUS_ERROR:
1518 errors.MaybeRaise(msg)
1521 raise errors.OpExecError("partial failure (opcode %d): %s" %
1524 raise errors.OpExecError(str(msg))
1526 # default failure mode
1527 raise errors.OpExecError(result)
1530 class JobPollCbBase:
1531 """Base class for L{GenericPollJob} callbacks.
1535 """Initializes this class.
1539 def WaitForJobChangeOnce(self, job_id, fields,
1540 prev_job_info, prev_log_serial):
1541 """Waits for changes on a job.
1544 raise NotImplementedError()
1546 def QueryJobs(self, job_ids, fields):
1547 """Returns the selected fields for the selected job IDs.
1549 @type job_ids: list of numbers
1550 @param job_ids: Job IDs
1551 @type fields: list of strings
1552 @param fields: Fields
1555 raise NotImplementedError()
1558 class JobPollReportCbBase:
1559 """Base class for L{GenericPollJob} reporting callbacks.
1563 """Initializes this class.
1567 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1568 """Handles a log message.
1571 raise NotImplementedError()
1573 def ReportNotChanged(self, job_id, status):
1574 """Called for if a job hasn't changed in a while.
1576 @type job_id: number
1577 @param job_id: Job ID
1578 @type status: string or None
1579 @param status: Job status if available
1582 raise NotImplementedError()
1585 class _LuxiJobPollCb(JobPollCbBase):
1586 def __init__(self, cl):
1587 """Initializes this class.
1590 JobPollCbBase.__init__(self)
1593 def WaitForJobChangeOnce(self, job_id, fields,
1594 prev_job_info, prev_log_serial):
1595 """Waits for changes on a job.
1598 return self.cl.WaitForJobChangeOnce(job_id, fields,
1599 prev_job_info, prev_log_serial)
1601 def QueryJobs(self, job_ids, fields):
1602 """Returns the selected fields for the selected job IDs.
1605 return self.cl.QueryJobs(job_ids, fields)
1608 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1609 def __init__(self, feedback_fn):
1610 """Initializes this class.
1613 JobPollReportCbBase.__init__(self)
1615 self.feedback_fn = feedback_fn
1617 assert callable(feedback_fn)
1619 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1620 """Handles a log message.
1623 self.feedback_fn((timestamp, log_type, log_msg))
1625 def ReportNotChanged(self, job_id, status):
1626 """Called if a job hasn't changed in a while.
1632 class StdioJobPollReportCb(JobPollReportCbBase):
1634 """Initializes this class.
1637 JobPollReportCbBase.__init__(self)
1639 self.notified_queued = False
1640 self.notified_waitlock = False
1642 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1643 """Handles a log message.
1646 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1647 FormatLogMessage(log_type, log_msg))
1649 def ReportNotChanged(self, job_id, status):
1650 """Called if a job hasn't changed in a while.
1656 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1657 ToStderr("Job %s is waiting in queue", job_id)
1658 self.notified_queued = True
1660 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1661 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1662 self.notified_waitlock = True
1665 def FormatLogMessage(log_type, log_msg):
1666 """Formats a job message according to its type.
1669 if log_type != constants.ELOG_MESSAGE:
1670 log_msg = str(log_msg)
1672 return utils.SafeEncode(log_msg)
1675 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1676 """Function to poll for the result of a job.
1678 @type job_id: job identified
1679 @param job_id: the job to poll for results
1680 @type cl: luxi.Client
1681 @param cl: the luxi client to use for communicating with the master;
1682 if None, a new client will be created
1688 if reporter is None:
1690 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1692 reporter = StdioJobPollReportCb()
1694 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1696 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1699 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1700 """Legacy function to submit an opcode.
1702 This is just a simple wrapper over the construction of the processor
1703 instance. It should be extended to better handle feedback and
1704 interaction functions.
1710 SetGenericOpcodeOpts([op], opts)
1712 job_id = SendJob([op], cl=cl)
1714 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1717 return op_results[0]
1720 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1721 """Wrapper around SubmitOpCode or SendJob.
1723 This function will decide, based on the 'opts' parameter, whether to
1724 submit and wait for the result of the opcode (and return it), or
1725 whether to just send the job and print its identifier. It is used in
1726 order to simplify the implementation of the '--submit' option.
1728 It will also process the opcodes if we're sending the via SendJob
1729 (otherwise SubmitOpCode does it).
1732 if opts and opts.submit_only:
1734 SetGenericOpcodeOpts(job, opts)
1735 job_id = SendJob(job, cl=cl)
1736 raise JobSubmittedException(job_id)
1738 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1741 def SetGenericOpcodeOpts(opcode_list, options):
1742 """Processor for generic options.
1744 This function updates the given opcodes based on generic command
1745 line options (like debug, dry-run, etc.).
1747 @param opcode_list: list of opcodes
1748 @param options: command line options or None
1749 @return: None (in-place modification)
1754 for op in opcode_list:
1755 op.debug_level = options.debug
1756 if hasattr(options, "dry_run"):
1757 op.dry_run = options.dry_run
1758 if getattr(options, "priority", None) is not None:
1759 op.priority = _PRIONAME_TO_VALUE[options.priority]
1763 # TODO: Cache object?
1765 client = luxi.Client()
1766 except luxi.NoMasterError:
1767 ss = ssconf.SimpleStore()
1769 # Try to read ssconf file
1772 except errors.ConfigurationError:
1773 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1774 " not part of a cluster")
1776 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1777 if master != myself:
1778 raise errors.OpPrereqError("This is not the master node, please connect"
1779 " to node '%s' and rerun the command" %
1785 def FormatError(err):
1786 """Return a formatted error message for a given error.
1788 This function takes an exception instance and returns a tuple
1789 consisting of two values: first, the recommended exit code, and
1790 second, a string describing the error message (not
1791 newline-terminated).
1797 if isinstance(err, errors.ConfigurationError):
1798 txt = "Corrupt configuration file: %s" % msg
1800 obuf.write(txt + "\n")
1801 obuf.write("Aborting.")
1803 elif isinstance(err, errors.HooksAbort):
1804 obuf.write("Failure: hooks execution failed:\n")
1805 for node, script, out in err.args[0]:
1807 obuf.write(" node: %s, script: %s, output: %s\n" %
1808 (node, script, out))
1810 obuf.write(" node: %s, script: %s (no output)\n" %
1812 elif isinstance(err, errors.HooksFailure):
1813 obuf.write("Failure: hooks general failure: %s" % msg)
1814 elif isinstance(err, errors.ResolverError):
1815 this_host = netutils.Hostname.GetSysName()
1816 if err.args[0] == this_host:
1817 msg = "Failure: can't resolve my own hostname ('%s')"
1819 msg = "Failure: can't resolve hostname '%s'"
1820 obuf.write(msg % err.args[0])
1821 elif isinstance(err, errors.OpPrereqError):
1822 if len(err.args) == 2:
1823 obuf.write("Failure: prerequisites not met for this"
1824 " operation:\nerror type: %s, error details:\n%s" %
1825 (err.args[1], err.args[0]))
1827 obuf.write("Failure: prerequisites not met for this"
1828 " operation:\n%s" % msg)
1829 elif isinstance(err, errors.OpExecError):
1830 obuf.write("Failure: command execution error:\n%s" % msg)
1831 elif isinstance(err, errors.TagError):
1832 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1833 elif isinstance(err, errors.JobQueueDrainError):
1834 obuf.write("Failure: the job queue is marked for drain and doesn't"
1835 " accept new requests\n")
1836 elif isinstance(err, errors.JobQueueFull):
1837 obuf.write("Failure: the job queue is full and doesn't accept new"
1838 " job submissions until old jobs are archived\n")
1839 elif isinstance(err, errors.TypeEnforcementError):
1840 obuf.write("Parameter Error: %s" % msg)
1841 elif isinstance(err, errors.ParameterError):
1842 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1843 elif isinstance(err, luxi.NoMasterError):
1844 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1845 " and listening for connections?")
1846 elif isinstance(err, luxi.TimeoutError):
1847 obuf.write("Timeout while talking to the master daemon. Error:\n"
1849 elif isinstance(err, luxi.PermissionError):
1850 obuf.write("It seems you don't have permissions to connect to the"
1851 " master daemon.\nPlease retry as a different user.")
1852 elif isinstance(err, luxi.ProtocolError):
1853 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1855 elif isinstance(err, errors.JobLost):
1856 obuf.write("Error checking job status: %s" % msg)
1857 elif isinstance(err, errors.GenericError):
1858 obuf.write("Unhandled Ganeti error: %s" % msg)
1859 elif isinstance(err, JobSubmittedException):
1860 obuf.write("JobID: %s\n" % err.args[0])
1863 obuf.write("Unhandled exception: %s" % msg)
1864 return retcode, obuf.getvalue().rstrip('\n')
1867 def GenericMain(commands, override=None, aliases=None):
1868 """Generic main function for all the gnt-* commands.
1871 - commands: a dictionary with a special structure, see the design doc
1872 for command line handling.
1873 - override: if not None, we expect a dictionary with keys that will
1874 override command line options; this can be used to pass
1875 options from the scripts to generic functions
1876 - aliases: dictionary with command aliases {'alias': 'target, ...}
1879 # save the program name and the entire command line for later logging
1881 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1882 if len(sys.argv) >= 2:
1883 binary += " " + sys.argv[1]
1884 old_cmdline = " ".join(sys.argv[2:])
1888 binary = "<unknown program>"
1895 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1896 except errors.ParameterError, err:
1897 result, err_msg = FormatError(err)
1901 if func is None: # parse error
1904 if override is not None:
1905 for key, val in override.iteritems():
1906 setattr(options, key, val)
1908 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1909 stderr_logging=True, program=binary)
1912 logging.info("run with arguments '%s'", old_cmdline)
1914 logging.info("run with no arguments")
1917 result = func(options, args)
1918 except (errors.GenericError, luxi.ProtocolError,
1919 JobSubmittedException), err:
1920 result, err_msg = FormatError(err)
1921 logging.exception("Error during command processing")
1927 def ParseNicOption(optvalue):
1928 """Parses the value of the --net option(s).
1932 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1933 except (TypeError, ValueError), err:
1934 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1936 nics = [{}] * nic_max
1937 for nidx, ndict in optvalue:
1940 if not isinstance(ndict, dict):
1941 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1942 " got %s" % (nidx, ndict))
1944 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1951 def GenericInstanceCreate(mode, opts, args):
1952 """Add an instance to the cluster via either creation or import.
1954 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1955 @param opts: the command line options selected by the user
1957 @param args: should contain only one element, the new instance name
1959 @return: the desired exit code
1964 (pnode, snode) = SplitNodeOption(opts.node)
1969 hypervisor, hvparams = opts.hypervisor
1972 nics = ParseNicOption(opts.nics)
1976 elif mode == constants.INSTANCE_CREATE:
1977 # default of one nic, all auto
1983 if opts.disk_template == constants.DT_DISKLESS:
1984 if opts.disks or opts.sd_size is not None:
1985 raise errors.OpPrereqError("Diskless instance but disk"
1986 " information passed")
1989 if (not opts.disks and not opts.sd_size
1990 and mode == constants.INSTANCE_CREATE):
1991 raise errors.OpPrereqError("No disk information specified")
1992 if opts.disks and opts.sd_size is not None:
1993 raise errors.OpPrereqError("Please use either the '--disk' or"
1995 if opts.sd_size is not None:
1996 opts.disks = [(0, {"size": opts.sd_size})]
2000 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2001 except ValueError, err:
2002 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2003 disks = [{}] * disk_max
2006 for didx, ddict in opts.disks:
2008 if not isinstance(ddict, dict):
2009 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2010 raise errors.OpPrereqError(msg)
2011 elif "size" in ddict:
2012 if "adopt" in ddict:
2013 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2014 " (disk %d)" % didx)
2016 ddict["size"] = utils.ParseUnit(ddict["size"])
2017 except ValueError, err:
2018 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2020 elif "adopt" in ddict:
2021 if mode == constants.INSTANCE_IMPORT:
2022 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2026 raise errors.OpPrereqError("Missing size or adoption source for"
2030 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2031 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2033 if mode == constants.INSTANCE_CREATE:
2036 force_variant = opts.force_variant
2039 no_install = opts.no_install
2040 identify_defaults = False
2041 elif mode == constants.INSTANCE_IMPORT:
2044 force_variant = False
2045 src_node = opts.src_node
2046 src_path = opts.src_dir
2048 identify_defaults = opts.identify_defaults
2050 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2052 op = opcodes.OpCreateInstance(instance_name=instance,
2054 disk_template=opts.disk_template,
2056 pnode=pnode, snode=snode,
2057 ip_check=opts.ip_check,
2058 name_check=opts.name_check,
2059 wait_for_sync=opts.wait_for_sync,
2060 file_storage_dir=opts.file_storage_dir,
2061 file_driver=opts.file_driver,
2062 iallocator=opts.iallocator,
2063 hypervisor=hypervisor,
2065 beparams=opts.beparams,
2066 osparams=opts.osparams,
2070 force_variant=force_variant,
2073 no_install=no_install,
2074 identify_defaults=identify_defaults)
2076 SubmitOrSend(op, opts)
2080 class _RunWhileClusterStoppedHelper:
2081 """Helper class for L{RunWhileClusterStopped} to simplify state management
2084 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2085 """Initializes this class.
2087 @type feedback_fn: callable
2088 @param feedback_fn: Feedback function
2089 @type cluster_name: string
2090 @param cluster_name: Cluster name
2091 @type master_node: string
2092 @param master_node Master node name
2093 @type online_nodes: list
2094 @param online_nodes: List of names of online nodes
2097 self.feedback_fn = feedback_fn
2098 self.cluster_name = cluster_name
2099 self.master_node = master_node
2100 self.online_nodes = online_nodes
2102 self.ssh = ssh.SshRunner(self.cluster_name)
2104 self.nonmaster_nodes = [name for name in online_nodes
2105 if name != master_node]
2107 assert self.master_node not in self.nonmaster_nodes
2109 def _RunCmd(self, node_name, cmd):
2110 """Runs a command on the local or a remote machine.
2112 @type node_name: string
2113 @param node_name: Machine name
2118 if node_name is None or node_name == self.master_node:
2119 # No need to use SSH
2120 result = utils.RunCmd(cmd)
2122 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2125 errmsg = ["Failed to run command %s" % result.cmd]
2127 errmsg.append("on node %s" % node_name)
2128 errmsg.append(": exitcode %s and error %s" %
2129 (result.exit_code, result.output))
2130 raise errors.OpExecError(" ".join(errmsg))
2132 def Call(self, fn, *args):
2133 """Call function while all daemons are stopped.
2136 @param fn: Function to be called
2139 # Pause watcher by acquiring an exclusive lock on watcher state file
2140 self.feedback_fn("Blocking watcher")
2141 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2143 # TODO: Currently, this just blocks. There's no timeout.
2144 # TODO: Should it be a shared lock?
2145 watcher_block.Exclusive(blocking=True)
2147 # Stop master daemons, so that no new jobs can come in and all running
2149 self.feedback_fn("Stopping master daemons")
2150 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2152 # Stop daemons on all nodes
2153 for node_name in self.online_nodes:
2154 self.feedback_fn("Stopping daemons on %s" % node_name)
2155 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2157 # All daemons are shut down now
2159 return fn(self, *args)
2160 except Exception, err:
2161 _, errmsg = FormatError(err)
2162 logging.exception("Caught exception")
2163 self.feedback_fn(errmsg)
2166 # Start cluster again, master node last
2167 for node_name in self.nonmaster_nodes + [self.master_node]:
2168 self.feedback_fn("Starting daemons on %s" % node_name)
2169 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2172 watcher_block.Close()
2175 def RunWhileClusterStopped(feedback_fn, fn, *args):
2176 """Calls a function while all cluster daemons are stopped.
2178 @type feedback_fn: callable
2179 @param feedback_fn: Feedback function
2181 @param fn: Function to be called when daemons are stopped
2184 feedback_fn("Gathering cluster information")
2186 # This ensures we're running on the master daemon
2189 (cluster_name, master_node) = \
2190 cl.QueryConfigValues(["cluster_name", "master_node"])
2192 online_nodes = GetOnlineNodes([], cl=cl)
2194 # Don't keep a reference to the client. The master daemon will go away.
2197 assert master_node in online_nodes
2199 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2200 online_nodes).Call(fn, *args)
2203 def GenerateTable(headers, fields, separator, data,
2204 numfields=None, unitfields=None,
2206 """Prints a table with headers and different fields.
2209 @param headers: dictionary mapping field names to headers for
2212 @param fields: the field names corresponding to each row in
2214 @param separator: the separator to be used; if this is None,
2215 the default 'smart' algorithm is used which computes optimal
2216 field width, otherwise just the separator is used between
2219 @param data: a list of lists, each sublist being one row to be output
2220 @type numfields: list
2221 @param numfields: a list with the fields that hold numeric
2222 values and thus should be right-aligned
2223 @type unitfields: list
2224 @param unitfields: a list with the fields that hold numeric
2225 values that should be formatted with the units field
2226 @type units: string or None
2227 @param units: the units we should use for formatting, or None for
2228 automatic choice (human-readable for non-separator usage, otherwise
2229 megabytes); this is a one-letter string
2238 if numfields is None:
2240 if unitfields is None:
2243 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
2244 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2247 for field in fields:
2248 if headers and field not in headers:
2249 # TODO: handle better unknown fields (either revert to old
2250 # style of raising exception, or deal more intelligently with
2252 headers[field] = field
2253 if separator is not None:
2254 format_fields.append("%s")
2255 elif numfields.Matches(field):
2256 format_fields.append("%*s")
2258 format_fields.append("%-*s")
2260 if separator is None:
2261 mlens = [0 for name in fields]
2262 format_str = ' '.join(format_fields)
2264 format_str = separator.replace("%", "%%").join(format_fields)
2269 for idx, val in enumerate(row):
2270 if unitfields.Matches(fields[idx]):
2273 except (TypeError, ValueError):
2276 val = row[idx] = utils.FormatUnit(val, units)
2277 val = row[idx] = str(val)
2278 if separator is None:
2279 mlens[idx] = max(mlens[idx], len(val))
2284 for idx, name in enumerate(fields):
2286 if separator is None:
2287 mlens[idx] = max(mlens[idx], len(hdr))
2288 args.append(mlens[idx])
2290 result.append(format_str % tuple(args))
2292 if separator is None:
2293 assert len(mlens) == len(fields)
2295 if fields and not numfields.Matches(fields[-1]):
2301 line = ['-' for _ in fields]
2302 for idx in range(len(fields)):
2303 if separator is None:
2304 args.append(mlens[idx])
2305 args.append(line[idx])
2306 result.append(format_str % tuple(args))
2311 def _FormatBool(value):
2312 """Formats a boolean value as a string.
2320 #: Default formatting for query results; (callback, align right)
2321 _DEFAULT_FORMAT_QUERY = {
2322 constants.QFT_TEXT: (str, False),
2323 constants.QFT_BOOL: (_FormatBool, False),
2324 constants.QFT_NUMBER: (str, True),
2325 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2326 constants.QFT_OTHER: (str, False),
2327 constants.QFT_UNKNOWN: (str, False),
2331 def _GetColumnFormatter(fdef, override, unit):
2332 """Returns formatting function for a field.
2334 @type fdef: L{objects.QueryFieldDefinition}
2335 @type override: dict
2336 @param override: Dictionary for overriding field formatting functions,
2337 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2339 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2340 @rtype: tuple; (callable, bool)
2341 @return: Returns the function to format a value (takes one parameter) and a
2342 boolean for aligning the value on the right-hand side
2345 fmt = override.get(fdef.name, None)
2349 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2351 if fdef.kind == constants.QFT_UNIT:
2352 # Can't keep this information in the static dictionary
2353 return (lambda value: utils.FormatUnit(value, unit), True)
2355 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2359 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2362 class _QueryColumnFormatter:
2363 """Callable class for formatting fields of a query.
2366 def __init__(self, fn, status_fn):
2367 """Initializes this class.
2370 @param fn: Formatting function
2371 @type status_fn: callable
2372 @param status_fn: Function to report fields' status
2376 self._status_fn = status_fn
2378 def __call__(self, data):
2379 """Returns a field's string representation.
2382 (status, value) = data
2385 self._status_fn(status)
2387 if status == constants.QRFS_NORMAL:
2388 return self._fn(value)
2390 assert value is None, \
2391 "Found value %r for abnormal status %s" % (value, status)
2393 if status == constants.QRFS_UNKNOWN:
2396 if status == constants.QRFS_NODATA:
2399 if status == constants.QRFS_UNAVAIL:
2402 raise NotImplementedError("Unknown status %s" % status)
2405 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2407 """Formats data in L{objects.QueryResponse}.
2409 @type result: L{objects.QueryResponse}
2410 @param result: result of query operation
2412 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2413 see L{utils.FormatUnit}
2414 @type format_override: dict
2415 @param format_override: Dictionary for overriding field formatting functions,
2416 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2417 @type separator: string or None
2418 @param separator: String used to separate fields
2420 @param header: Whether to output header row
2429 if format_override is None:
2430 format_override = {}
2432 stats = dict.fromkeys(constants.QRFS_ALL, 0)
2434 def _RecordStatus(status):
2439 for fdef in result.fields:
2440 assert fdef.title and fdef.name
2441 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2442 columns.append(TableColumn(fdef.title,
2443 _QueryColumnFormatter(fn, _RecordStatus),
2446 table = FormatTable(result.data, columns, header, separator)
2448 # Collect statistics
2449 assert len(stats) == len(constants.QRFS_ALL)
2450 assert compat.all(count >= 0 for count in stats.values())
2452 # Determine overall status. If there was no data, unknown fields must be
2453 # detected via the field definitions.
2454 if (stats[constants.QRFS_UNKNOWN] or
2455 (not result.data and _GetUnknownFields(result.fields))):
2457 elif compat.any(count > 0 for key, count in stats.items()
2458 if key != constants.QRFS_NORMAL):
2459 status = QR_INCOMPLETE
2463 return (status, table)
2466 def _GetUnknownFields(fdefs):
2467 """Returns list of unknown fields included in C{fdefs}.
2469 @type fdefs: list of L{objects.QueryFieldDefinition}
2472 return [fdef for fdef in fdefs
2473 if fdef.kind == constants.QFT_UNKNOWN]
2476 def _WarnUnknownFields(fdefs):
2477 """Prints a warning to stderr if a query included unknown fields.
2479 @type fdefs: list of L{objects.QueryFieldDefinition}
2482 unknown = _GetUnknownFields(fdefs)
2484 ToStderr("Warning: Queried for unknown fields %s",
2485 utils.CommaJoin(fdef.name for fdef in unknown))
2491 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2492 format_override=None):
2493 """Generic implementation for listing all items of a resource.
2495 @param resource: One of L{constants.QR_OP_LUXI}
2496 @type fields: list of strings
2497 @param fields: List of fields to query for
2498 @type names: list of strings
2499 @param names: Names of items to query for
2500 @type unit: string or None
2501 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2502 None for automatic choice (human-readable for non-separator usage,
2503 otherwise megabytes); this is a one-letter string
2504 @type separator: string or None
2505 @param separator: String used to separate fields
2507 @param header: Whether to show header row
2508 @type format_override: dict
2509 @param format_override: Dictionary for overriding field formatting functions,
2510 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2519 response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2521 found_unknown = _WarnUnknownFields(response.fields)
2523 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2525 format_override=format_override)
2530 assert ((found_unknown and status == QR_UNKNOWN) or
2531 (not found_unknown and status != QR_UNKNOWN))
2533 if status == QR_UNKNOWN:
2534 return constants.EXIT_UNKNOWN_FIELD
2536 # TODO: Should the list command fail if not all data could be collected?
2537 return constants.EXIT_SUCCESS
2540 def GenericListFields(resource, fields, separator, header, cl=None):
2541 """Generic implementation for listing fields for a resource.
2543 @param resource: One of L{constants.QR_OP_LUXI}
2544 @type fields: list of strings
2545 @param fields: List of fields to query for
2546 @type separator: string or None
2547 @param separator: String used to separate fields
2549 @param header: Whether to show header row
2558 response = cl.QueryFields(resource, fields)
2560 found_unknown = _WarnUnknownFields(response.fields)
2563 TableColumn("Name", str, False),
2564 TableColumn("Title", str, False),
2565 # TODO: Add field description to master daemon
2568 rows = [[fdef.name, fdef.title] for fdef in response.fields]
2570 for line in FormatTable(rows, columns, header, separator):
2574 return constants.EXIT_UNKNOWN_FIELD
2576 return constants.EXIT_SUCCESS
2580 """Describes a column for L{FormatTable}.
2583 def __init__(self, title, fn, align_right):
2584 """Initializes this class.
2587 @param title: Column title
2589 @param fn: Formatting function
2590 @type align_right: bool
2591 @param align_right: Whether to align values on the right-hand side
2596 self.align_right = align_right
2599 def _GetColFormatString(width, align_right):
2600 """Returns the format string for a field.
2608 return "%%%s%ss" % (sign, width)
2611 def FormatTable(rows, columns, header, separator):
2612 """Formats data as a table.
2614 @type rows: list of lists
2615 @param rows: Row data, one list per row
2616 @type columns: list of L{TableColumn}
2617 @param columns: Column descriptions
2619 @param header: Whether to show header row
2620 @type separator: string or None
2621 @param separator: String used to separate columns
2625 data = [[col.title for col in columns]]
2626 colwidth = [len(col.title) for col in columns]
2629 colwidth = [0 for _ in columns]
2633 assert len(row) == len(columns)
2635 formatted = [col.format(value) for value, col in zip(row, columns)]
2637 if separator is None:
2638 # Update column widths
2639 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2640 # Modifying a list's items while iterating is fine
2641 colwidth[idx] = max(oldwidth, len(value))
2643 data.append(formatted)
2645 if separator is not None:
2646 # Return early if a separator is used
2647 return [separator.join(row) for row in data]
2649 if columns and not columns[-1].align_right:
2650 # Avoid unnecessary spaces at end of line
2653 # Build format string
2654 fmt = " ".join([_GetColFormatString(width, col.align_right)
2655 for col, width in zip(columns, colwidth)])
2657 return [fmt % tuple(row) for row in data]
2660 def FormatTimestamp(ts):
2661 """Formats a given timestamp.
2664 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2667 @return: a string with the formatted timestamp
2670 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2673 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2676 def ParseTimespec(value):
2677 """Parse a time specification.
2679 The following suffixed will be recognized:
2687 Without any suffix, the value will be taken to be in seconds.
2692 raise errors.OpPrereqError("Empty time specification passed")
2700 if value[-1] not in suffix_map:
2703 except (TypeError, ValueError):
2704 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2706 multiplier = suffix_map[value[-1]]
2708 if not value: # no data left after stripping the suffix
2709 raise errors.OpPrereqError("Invalid time specification (only"
2712 value = int(value) * multiplier
2713 except (TypeError, ValueError):
2714 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2718 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2719 filter_master=False):
2720 """Returns the names of online nodes.
2722 This function will also log a warning on stderr with the names of
2725 @param nodes: if not empty, use only this subset of nodes (minus the
2727 @param cl: if not None, luxi client to use
2728 @type nowarn: boolean
2729 @param nowarn: by default, this function will output a note with the
2730 offline nodes that are skipped; if this parameter is True the
2731 note is not displayed
2732 @type secondary_ips: boolean
2733 @param secondary_ips: if True, return the secondary IPs instead of the
2734 names, useful for doing network traffic over the replication interface
2736 @type filter_master: boolean
2737 @param filter_master: if True, do not return the master node in the list
2738 (useful in coordination with secondary_ips where we cannot check our
2739 node name against the list)
2751 master_node = cl.QueryConfigValues(["master_node"])[0]
2752 filter_fn = lambda x: x != master_node
2754 filter_fn = lambda _: True
2756 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2758 offline = [row[0] for row in result if row[1]]
2759 if offline and not nowarn:
2760 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2761 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2764 def _ToStream(stream, txt, *args):
2765 """Write a message to a stream, bypassing the logging system
2767 @type stream: file object
2768 @param stream: the file to which we should write
2770 @param txt: the message
2775 stream.write(txt % args)
2782 def ToStdout(txt, *args):
2783 """Write a message to stdout only, bypassing the logging system
2785 This is just a wrapper over _ToStream.
2788 @param txt: the message
2791 _ToStream(sys.stdout, txt, *args)
2794 def ToStderr(txt, *args):
2795 """Write a message to stderr only, bypassing the logging system
2797 This is just a wrapper over _ToStream.
2800 @param txt: the message
2803 _ToStream(sys.stderr, txt, *args)
2806 class JobExecutor(object):
2807 """Class which manages the submission and execution of multiple jobs.
2809 Note that instances of this class should not be reused between
2813 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2818 self.verbose = verbose
2821 self.feedback_fn = feedback_fn
2823 def QueueJob(self, name, *ops):
2824 """Record a job for later submit.
2827 @param name: a description of the job, will be used in WaitJobSet
2829 SetGenericOpcodeOpts(ops, self.opts)
2830 self.queue.append((name, ops))
2832 def SubmitPending(self, each=False):
2833 """Submit all pending jobs.
2838 for row in self.queue:
2839 # SubmitJob will remove the success status, but raise an exception if
2840 # the submission fails, so we'll notice that anyway.
2841 results.append([True, self.cl.SubmitJob(row[1])])
2843 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2844 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2846 self.jobs.append((idx, status, data, name))
2848 def _ChooseJob(self):
2849 """Choose a non-waiting/queued job to poll next.
2852 assert self.jobs, "_ChooseJob called with empty job list"
2854 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2857 for job_data, status in zip(self.jobs, result):
2858 if (isinstance(status, list) and status and
2859 status[0] in (constants.JOB_STATUS_QUEUED,
2860 constants.JOB_STATUS_WAITLOCK,
2861 constants.JOB_STATUS_CANCELING)):
2862 # job is still present and waiting
2864 # good candidate found (either running job or lost job)
2865 self.jobs.remove(job_data)
2869 return self.jobs.pop(0)
2871 def GetResults(self):
2872 """Wait for and return the results of all jobs.
2875 @return: list of tuples (success, job results), in the same order
2876 as the submitted jobs; if a job has failed, instead of the result
2877 there will be the error message
2881 self.SubmitPending()
2884 ok_jobs = [row[2] for row in self.jobs if row[1]]
2886 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2888 # first, remove any non-submitted jobs
2889 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2890 for idx, _, jid, name in failures:
2891 ToStderr("Failed to submit job for %s: %s", name, jid)
2892 results.append((idx, False, jid))
2895 (idx, _, jid, name) = self._ChooseJob()
2896 ToStdout("Waiting for job %s for %s...", jid, name)
2898 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2900 except errors.JobLost, err:
2901 _, job_result = FormatError(err)
2902 ToStderr("Job %s for %s has been archived, cannot check its result",
2905 except (errors.GenericError, luxi.ProtocolError), err:
2906 _, job_result = FormatError(err)
2908 # the error message will always be shown, verbose or not
2909 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2911 results.append((idx, success, job_result))
2913 # sort based on the index, then drop it
2915 results = [i[1:] for i in results]
2919 def WaitOrShow(self, wait):
2920 """Wait for job results or only print the job IDs.
2923 @param wait: whether to wait or not
2927 return self.GetResults()
2930 self.SubmitPending()
2931 for _, status, result, name in self.jobs:
2933 ToStdout("%s: %s", result, name)
2935 ToStderr("Failure for %s: %s", name, result)
2936 return [row[1:3] for row in self.jobs]