4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
30 from cStringIO import StringIO
32 from ganeti import utils
33 from ganeti import errors
34 from ganeti import constants
35 from ganeti import opcodes
36 from ganeti import luxi
37 from ganeti import ssconf
38 from ganeti import rpc
39 from ganeti import ssh
41 from optparse import (OptionParser, TitledHelpFormatter,
42 Option, OptionValueError)
46 # Command line options
66 "FILESTORE_DRIVER_OPT",
75 "IGNORE_FAILURES_OPT",
76 "IGNORE_SECONDARIES_OPT",
82 "NEW_CLUSTER_CERT_OPT",
83 "NEW_CONFD_HMAC_KEY_OPT",
94 "NOMODIFY_ETCHOSTS_OPT",
95 "NOMODIFY_SSH_SETUP_OPT",
101 "NOSSH_KEYCHECK_OPT",
116 "SHUTDOWN_TIMEOUT_OPT",
129 # Generic functions for CLI programs
131 "GenericInstanceCreate",
135 "JobSubmittedException",
137 "RunWhileClusterStopped",
141 # Formatting functions
142 "ToStderr", "ToStdout",
151 # command line options support infrastructure
152 "ARGS_MANY_INSTANCES",
168 "OPT_COMPL_INST_ADD_NODES",
169 "OPT_COMPL_MANY_NODES",
170 "OPT_COMPL_ONE_IALLOCATOR",
171 "OPT_COMPL_ONE_INSTANCE",
172 "OPT_COMPL_ONE_NODE",
184 def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
189 return ("<%s min=%s max=%s>" %
190 (self.__class__.__name__, self.min, self.max))
193 class ArgSuggest(_Argument):
194 """Suggesting argument.
196 Value can be any of the ones passed to the constructor.
199 # pylint: disable-msg=W0622
200 def __init__(self, min=0, max=None, choices=None):
201 _Argument.__init__(self, min=min, max=max)
202 self.choices = choices
205 return ("<%s min=%s max=%s choices=%r>" %
206 (self.__class__.__name__, self.min, self.max, self.choices))
209 class ArgChoice(ArgSuggest):
212 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
213 but value must be one of the choices.
218 class ArgUnknown(_Argument):
219 """Unknown argument to program (e.g. determined at runtime).
224 class ArgInstance(_Argument):
225 """Instances argument.
230 class ArgNode(_Argument):
235 class ArgJobId(_Argument):
241 class ArgFile(_Argument):
242 """File path argument.
247 class ArgCommand(_Argument):
253 class ArgHost(_Argument):
259 class ArgOs(_Argument):
266 ARGS_MANY_INSTANCES = [ArgInstance()]
267 ARGS_MANY_NODES = [ArgNode()]
268 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
269 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
270 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
273 def _ExtractTagsObject(opts, args):
274 """Extract the tag type object.
276 Note that this function will modify its args parameter.
279 if not hasattr(opts, "tag_type"):
280 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
282 if kind == constants.TAG_CLUSTER:
284 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
286 raise errors.OpPrereqError("no arguments passed to the command")
290 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
294 def _ExtendTags(opts, args):
295 """Extend the args if a source file has been given.
297 This function will extend the tags with the contents of the file
298 passed in the 'tags_source' attribute of the opts parameter. A file
299 named '-' will be replaced by stdin.
302 fname = opts.tags_source
308 new_fh = open(fname, "r")
311 # we don't use the nice 'new_data = [line.strip() for line in fh]'
312 # because of python bug 1633941
314 line = new_fh.readline()
317 new_data.append(line.strip())
320 args.extend(new_data)
323 def ListTags(opts, args):
324 """List the tags on a given object.
326 This is a generic implementation that knows how to deal with all
327 three cases of tag objects (cluster, node, instance). The opts
328 argument is expected to contain a tag_type field denoting what
329 object type we work on.
332 kind, name = _ExtractTagsObject(opts, args)
334 result = cl.QueryTags(kind, name)
335 result = list(result)
341 def AddTags(opts, args):
342 """Add tags on a given object.
344 This is a generic implementation that knows how to deal with all
345 three cases of tag objects (cluster, node, instance). The opts
346 argument is expected to contain a tag_type field denoting what
347 object type we work on.
350 kind, name = _ExtractTagsObject(opts, args)
351 _ExtendTags(opts, args)
353 raise errors.OpPrereqError("No tags to be added")
354 op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
358 def RemoveTags(opts, args):
359 """Remove tags from a given object.
361 This is a generic implementation that knows how to deal with all
362 three cases of tag objects (cluster, node, instance). The opts
363 argument is expected to contain a tag_type field denoting what
364 object type we work on.
367 kind, name = _ExtractTagsObject(opts, args)
368 _ExtendTags(opts, args)
370 raise errors.OpPrereqError("No tags to be removed")
371 op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
375 def check_unit(option, opt, value): # pylint: disable-msg=W0613
376 """OptParsers custom converter for units.
380 return utils.ParseUnit(value)
381 except errors.UnitParseError, err:
382 raise OptionValueError("option %s: %s" % (opt, err))
385 def _SplitKeyVal(opt, data):
386 """Convert a KeyVal string into a dict.
388 This function will convert a key=val[,...] string into a dict. Empty
389 values will be converted specially: keys which have the prefix 'no_'
390 will have the value=False and the prefix stripped, the others will
394 @param opt: a string holding the option name for which we process the
395 data, used in building error messages
397 @param data: a string of the format key=val,key=val,...
399 @return: {key=val, key=val}
400 @raises errors.ParameterError: if there are duplicate keys
405 for elem in utils.UnescapeAndSplit(data, sep=","):
407 key, val = elem.split("=", 1)
409 if elem.startswith(NO_PREFIX):
410 key, val = elem[len(NO_PREFIX):], False
411 elif elem.startswith(UN_PREFIX):
412 key, val = elem[len(UN_PREFIX):], None
414 key, val = elem, True
416 raise errors.ParameterError("Duplicate key '%s' in option %s" %
422 def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
423 """Custom parser for ident:key=val,key=val options.
425 This will store the parsed values as a tuple (ident, {key: val}). As such,
426 multiple uses of this option via action=append is possible.
430 ident, rest = value, ''
432 ident, rest = value.split(":", 1)
434 if ident.startswith(NO_PREFIX):
436 msg = "Cannot pass options when removing parameter groups: %s" % value
437 raise errors.ParameterError(msg)
438 retval = (ident[len(NO_PREFIX):], False)
439 elif ident.startswith(UN_PREFIX):
441 msg = "Cannot pass options when removing parameter groups: %s" % value
442 raise errors.ParameterError(msg)
443 retval = (ident[len(UN_PREFIX):], None)
445 kv_dict = _SplitKeyVal(opt, rest)
446 retval = (ident, kv_dict)
450 def check_key_val(option, opt, value): # pylint: disable-msg=W0613
451 """Custom parser class for key=val,key=val options.
453 This will store the parsed values as a dict {key: val}.
456 return _SplitKeyVal(opt, value)
459 # completion_suggestion is normally a list. Using numeric values not evaluating
460 # to False for dynamic completion.
461 (OPT_COMPL_MANY_NODES,
463 OPT_COMPL_ONE_INSTANCE,
465 OPT_COMPL_ONE_IALLOCATOR,
466 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
468 OPT_COMPL_ALL = frozenset([
469 OPT_COMPL_MANY_NODES,
471 OPT_COMPL_ONE_INSTANCE,
473 OPT_COMPL_ONE_IALLOCATOR,
474 OPT_COMPL_INST_ADD_NODES,
478 class CliOption(Option):
479 """Custom option class for optparse.
482 ATTRS = Option.ATTRS + [
483 "completion_suggest",
485 TYPES = Option.TYPES + (
490 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
491 TYPE_CHECKER["identkeyval"] = check_ident_key_val
492 TYPE_CHECKER["keyval"] = check_key_val
493 TYPE_CHECKER["unit"] = check_unit
496 # optparse.py sets make_option, so we do it for our own option class, too
497 cli_option = CliOption
500 _YESNO = ("yes", "no")
503 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
504 help="Increase debugging level")
506 NOHDR_OPT = cli_option("--no-headers", default=False,
507 action="store_true", dest="no_headers",
508 help="Don't display column headers")
510 SEP_OPT = cli_option("--separator", default=None,
511 action="store", dest="separator",
512 help=("Separator between output fields"
513 " (defaults to one space)"))
515 USEUNITS_OPT = cli_option("--units", default=None,
516 dest="units", choices=('h', 'm', 'g', 't'),
517 help="Specify units for output (one of hmgt)")
519 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
520 type="string", metavar="FIELDS",
521 help="Comma separated list of output fields")
523 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
524 default=False, help="Force the operation")
526 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
527 default=False, help="Do not require confirmation")
529 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
530 default=None, help="File with tag names")
532 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
533 default=False, action="store_true",
534 help=("Submit the job and return the job ID, but"
535 " don't wait for the job to finish"))
537 SYNC_OPT = cli_option("--sync", dest="do_locking",
538 default=False, action="store_true",
539 help=("Grab locks while doing the queries"
540 " in order to ensure more consistent results"))
542 _DRY_RUN_OPT = cli_option("--dry-run", default=False,
544 help=("Do not execute the operation, just run the"
545 " check steps and verify it it could be"
548 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
550 help="Increase the verbosity of the operation")
552 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
553 action="store_true", dest="simulate_errors",
554 help="Debugging option that makes the operation"
555 " treat most runtime checks as failed")
557 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
558 default=True, action="store_false",
559 help="Don't wait for sync (DANGEROUS!)")
561 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
562 help="Custom disk setup (diskless, file,"
564 default=None, metavar="TEMPL",
565 choices=list(constants.DISK_TEMPLATES))
567 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
568 help="Do not create any network cards for"
571 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
572 help="Relative path under default cluster-wide"
573 " file storage dir to store file-based disks",
574 default=None, metavar="<DIR>")
576 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
577 help="Driver to use for image files",
578 default="loop", metavar="<DRIVER>",
579 choices=list(constants.FILE_DRIVER))
581 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
582 help="Select nodes for the instance automatically"
583 " using the <NAME> iallocator plugin",
584 default=None, type="string",
585 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
587 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
589 completion_suggest=OPT_COMPL_ONE_OS)
591 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
592 action="store_true", default=False,
593 help="Force an unknown variant")
595 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
596 action="store_true", default=False,
597 help="Do not install the OS (will"
600 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
601 type="keyval", default={},
602 help="Backend parameters")
604 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
605 default={}, dest="hvparams",
606 help="Hypervisor parameters")
608 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
609 help="Hypervisor and hypervisor options, in the"
610 " format hypervisor:option=value,option=value,...",
611 default=None, type="identkeyval")
613 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
614 help="Hypervisor and hypervisor options, in the"
615 " format hypervisor:option=value,option=value,...",
616 default=[], action="append", type="identkeyval")
618 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
619 action="store_false",
620 help="Don't check that the instance's IP"
623 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
624 default=True, action="store_false",
625 help="Don't check that the instance's name"
628 NET_OPT = cli_option("--net",
629 help="NIC parameters", default=[],
630 dest="nics", action="append", type="identkeyval")
632 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
633 dest="disks", action="append", type="identkeyval")
635 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
636 help="Comma-separated list of disks"
637 " indices to act on (e.g. 0,2) (optional,"
638 " defaults to all disks)")
640 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
641 help="Enforces a single-disk configuration using the"
642 " given disk size, in MiB unless a suffix is used",
643 default=None, type="unit", metavar="<size>")
645 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
646 dest="ignore_consistency",
647 action="store_true", default=False,
648 help="Ignore the consistency of the disks on"
651 NONLIVE_OPT = cli_option("--non-live", dest="live",
652 default=True, action="store_false",
653 help="Do a non-live migration (this usually means"
654 " freeze the instance, save the state, transfer and"
655 " only then resume running on the secondary node)")
657 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
658 help="Target node and optional secondary node",
659 metavar="<pnode>[:<snode>]",
660 completion_suggest=OPT_COMPL_INST_ADD_NODES)
662 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
663 action="append", metavar="<node>",
664 help="Use only this node (can be used multiple"
665 " times, if not given defaults to all nodes)",
666 completion_suggest=OPT_COMPL_ONE_NODE)
668 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
670 completion_suggest=OPT_COMPL_ONE_NODE)
672 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
673 action="store_false",
674 help="Don't start the instance after creation")
676 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
677 action="store_true", default=False,
678 help="Show command instead of executing it")
680 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
681 default=False, action="store_true",
682 help="Instead of performing the migration, try to"
683 " recover from a failed cleanup. This is safe"
684 " to run even if the instance is healthy, but it"
685 " will create extra replication traffic and "
686 " disrupt briefly the replication (like during the"
689 STATIC_OPT = cli_option("-s", "--static", dest="static",
690 action="store_true", default=False,
691 help="Only show configuration data, not runtime data")
693 ALL_OPT = cli_option("--all", dest="show_all",
694 default=False, action="store_true",
695 help="Show info on all instances on the cluster."
696 " This can take a long time to run, use wisely")
698 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
699 action="store_true", default=False,
700 help="Interactive OS reinstall, lists available"
701 " OS templates for selection")
703 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
704 action="store_true", default=False,
705 help="Remove the instance from the cluster"
706 " configuration even if there are failures"
707 " during the removal process")
709 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
710 help="Specifies the new secondary node",
711 metavar="NODE", default=None,
712 completion_suggest=OPT_COMPL_ONE_NODE)
714 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
715 default=False, action="store_true",
716 help="Replace the disk(s) on the primary"
717 " node (only for the drbd template)")
719 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
720 default=False, action="store_true",
721 help="Replace the disk(s) on the secondary"
722 " node (only for the drbd template)")
724 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
725 default=False, action="store_true",
726 help="Lock all nodes and auto-promote as needed"
729 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
730 default=False, action="store_true",
731 help="Automatically replace faulty disks"
732 " (only for the drbd template)")
734 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
735 default=False, action="store_true",
736 help="Ignore current recorded size"
737 " (useful for forcing activation when"
738 " the recorded size is wrong)")
740 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
742 completion_suggest=OPT_COMPL_ONE_NODE)
744 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
747 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
748 help="Specify the secondary ip for the node",
749 metavar="ADDRESS", default=None)
751 READD_OPT = cli_option("--readd", dest="readd",
752 default=False, action="store_true",
753 help="Readd old node after replacing it")
755 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
756 default=True, action="store_false",
757 help="Disable SSH key fingerprint checking")
760 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
761 choices=_YESNO, default=None, metavar=_YORNO,
762 help="Set the master_candidate flag on the node")
764 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
765 choices=_YESNO, default=None,
766 help="Set the offline flag on the node")
768 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
769 choices=_YESNO, default=None,
770 help="Set the drained flag on the node")
772 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
773 choices=_YESNO, default=None, metavar=_YORNO,
774 help="Set the allocatable flag on a volume")
776 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
777 help="Disable support for lvm based instances"
779 action="store_false", default=True)
781 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
782 dest="enabled_hypervisors",
783 help="Comma-separated list of hypervisors",
784 type="string", default=None)
786 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
787 type="keyval", default={},
788 help="NIC parameters")
790 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
791 dest="candidate_pool_size", type="int",
792 help="Set the candidate pool size")
794 VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
795 help="Enables LVM and specifies the volume group"
796 " name (cluster-wide) for disk allocation [xenvg]",
797 metavar="VG", default=None)
799 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
800 help="Destroy cluster", action="store_true")
802 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
803 help="Skip node agreement check (dangerous)",
804 action="store_true", default=False)
806 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
807 help="Specify the mac prefix for the instance IP"
808 " addresses, in the format XX:XX:XX",
812 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
813 help="Specify the node interface (cluster-wide)"
814 " on which the master IP address will be added "
815 " [%s]" % constants.DEFAULT_BRIDGE,
817 default=constants.DEFAULT_BRIDGE)
820 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
821 help="Specify the default directory (cluster-"
822 "wide) for storing the file-based disks [%s]" %
823 constants.DEFAULT_FILE_STORAGE_DIR,
825 default=constants.DEFAULT_FILE_STORAGE_DIR)
827 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
828 help="Don't modify /etc/hosts",
829 action="store_false", default=True)
831 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
832 help="Don't initialize SSH keys",
833 action="store_false", default=True)
835 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
836 help="Enable parseable error messages",
837 action="store_true", default=False)
839 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
840 help="Skip N+1 memory redundancy tests",
841 action="store_true", default=False)
843 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
844 help="Type of reboot: soft/hard/full",
845 default=constants.INSTANCE_REBOOT_HARD,
847 choices=list(constants.REBOOT_TYPES))
849 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
850 dest="ignore_secondaries",
851 default=False, action="store_true",
852 help="Ignore errors from secondaries")
854 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
855 action="store_false", default=True,
856 help="Don't shutdown the instance (unsafe)")
858 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
859 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
860 help="Maximum time to wait")
862 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
863 dest="shutdown_timeout", type="int",
864 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
865 help="Maximum time to wait for instance shutdown")
867 EARLY_RELEASE_OPT = cli_option("--early-release",
868 dest="early_release", default=False,
870 help="Release the locks on the secondary"
873 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
874 dest="new_cluster_cert",
875 default=False, action="store_true",
876 help="Generate a new cluster certificate")
878 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
880 help="File containing new RAPI certificate")
882 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
883 default=None, action="store_true",
884 help=("Generate a new self-signed RAPI"
887 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
888 dest="new_confd_hmac_key",
889 default=False, action="store_true",
890 help=("Create a new HMAC key for %s" %
894 def _ParseArgs(argv, commands, aliases):
895 """Parser for the command line arguments.
897 This function parses the arguments and returns the function which
898 must be executed together with its (modified) arguments.
900 @param argv: the command line
901 @param commands: dictionary with special contents, see the design
902 doc for cmdline handling
903 @param aliases: dictionary with command aliases {'alias': 'target, ...}
909 binary = argv[0].split("/")[-1]
911 if len(argv) > 1 and argv[1] == "--version":
912 ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
913 # Quit right away. That way we don't have to care about this special
914 # argument. optparse.py does it the same.
917 if len(argv) < 2 or not (argv[1] in commands or
919 # let's do a nice thing
920 sortedcmds = commands.keys()
923 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
924 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
927 # compute the max line length for cmd + usage
928 mlen = max([len(" %s" % cmd) for cmd in commands])
929 mlen = min(60, mlen) # should not get here...
931 # and format a nice command list
932 ToStdout("Commands:")
933 for cmd in sortedcmds:
934 cmdstr = " %s" % (cmd,)
935 help_text = commands[cmd][4]
936 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
937 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
938 for line in help_lines:
939 ToStdout("%-*s %s", mlen, "", line)
943 return None, None, None
945 # get command, unalias it, and look it up in commands
949 raise errors.ProgrammerError("Alias '%s' overrides an existing"
952 if aliases[cmd] not in commands:
953 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
954 " command '%s'" % (cmd, aliases[cmd]))
958 func, args_def, parser_opts, usage, description = commands[cmd]
959 parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
960 description=description,
961 formatter=TitledHelpFormatter(),
962 usage="%%prog %s %s" % (cmd, usage))
963 parser.disable_interspersed_args()
964 options, args = parser.parse_args()
966 if not _CheckArguments(cmd, args_def, args):
967 return None, None, None
969 return func, options, args
972 def _CheckArguments(cmd, args_def, args):
973 """Verifies the arguments using the argument definition.
977 1. Abort with error if values specified by user but none expected.
979 1. For each argument in definition
981 1. Keep running count of minimum number of values (min_count)
982 1. Keep running count of maximum number of values (max_count)
983 1. If it has an unlimited number of values
985 1. Abort with error if it's not the last argument in the definition
987 1. If last argument has limited number of values
989 1. Abort with error if number of values doesn't match or is too large
991 1. Abort with error if user didn't pass enough values (min_count)
994 if args and not args_def:
995 ToStderr("Error: Command %s expects no arguments", cmd)
1002 last_idx = len(args_def) - 1
1004 for idx, arg in enumerate(args_def):
1005 if min_count is None:
1007 elif arg.min is not None:
1008 min_count += arg.min
1010 if max_count is None:
1012 elif arg.max is not None:
1013 max_count += arg.max
1016 check_max = (arg.max is not None)
1018 elif arg.max is None:
1019 raise errors.ProgrammerError("Only the last argument can have max=None")
1022 # Command with exact number of arguments
1023 if (min_count is not None and max_count is not None and
1024 min_count == max_count and len(args) != min_count):
1025 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1028 # Command with limited number of arguments
1029 if max_count is not None and len(args) > max_count:
1030 ToStderr("Error: Command %s expects only %d argument(s)",
1034 # Command with some required arguments
1035 if min_count is not None and len(args) < min_count:
1036 ToStderr("Error: Command %s expects at least %d argument(s)",
1043 def SplitNodeOption(value):
1044 """Splits the value of a --node option.
1047 if value and ':' in value:
1048 return value.split(':', 1)
1050 return (value, None)
1053 def CalculateOSNames(os_name, os_variants):
1054 """Calculates all the names an OS can be called, according to its variants.
1056 @type os_name: string
1057 @param os_name: base name of the os
1058 @type os_variants: list or None
1059 @param os_variants: list of supported variants
1061 @return: list of valid names
1065 return ['%s+%s' % (os_name, v) for v in os_variants]
1071 def wrapper(*args, **kwargs):
1074 return fn(*args, **kwargs)
1080 def AskUser(text, choices=None):
1081 """Ask the user a question.
1083 @param text: the question to ask
1085 @param choices: list with elements tuples (input_char, return_value,
1086 description); if not given, it will default to: [('y', True,
1087 'Perform the operation'), ('n', False, 'Do no do the operation')];
1088 note that the '?' char is reserved for help
1090 @return: one of the return values from the choices list; if input is
1091 not possible (i.e. not running with a tty, we return the last
1096 choices = [('y', True, 'Perform the operation'),
1097 ('n', False, 'Do not perform the operation')]
1098 if not choices or not isinstance(choices, list):
1099 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1100 for entry in choices:
1101 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1102 raise errors.ProgrammerError("Invalid choices element to AskUser")
1104 answer = choices[-1][1]
1106 for line in text.splitlines():
1107 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1108 text = "\n".join(new_text)
1110 f = file("/dev/tty", "a+")
1114 chars = [entry[0] for entry in choices]
1115 chars[-1] = "[%s]" % chars[-1]
1117 maps = dict([(entry[0], entry[1]) for entry in choices])
1121 f.write("/".join(chars))
1123 line = f.readline(2).strip().lower()
1128 for entry in choices:
1129 f.write(" %s - %s\n" % (entry[0], entry[2]))
1137 class JobSubmittedException(Exception):
1138 """Job was submitted, client should exit.
1140 This exception has one argument, the ID of the job that was
1141 submitted. The handler should print this ID.
1143 This is not an error, just a structured way to exit from clients.
1148 def SendJob(ops, cl=None):
1149 """Function to submit an opcode without waiting for the results.
1152 @param ops: list of opcodes
1153 @type cl: luxi.Client
1154 @param cl: the luxi client to use for communicating with the master;
1155 if None, a new client will be created
1161 job_id = cl.SubmitJob(ops)
1166 def PollJob(job_id, cl=None, feedback_fn=None):
1167 """Function to poll for the result of a job.
1169 @type job_id: job identified
1170 @param job_id: the job to poll for results
1171 @type cl: luxi.Client
1172 @param cl: the luxi client to use for communicating with the master;
1173 if None, a new client will be created
1179 prev_job_info = None
1180 prev_logmsg_serial = None
1184 notified_queued = False
1185 notified_waitlock = False
1188 result = cl.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1191 # job not found, go away!
1192 raise errors.JobLost("Job with id %s lost" % job_id)
1193 elif result == constants.JOB_NOTCHANGED:
1194 if status is not None and not callable(feedback_fn):
1195 if status == constants.JOB_STATUS_QUEUED and not notified_queued:
1196 ToStderr("Job %s is waiting in queue", job_id)
1197 notified_queued = True
1198 elif status == constants.JOB_STATUS_WAITLOCK and not notified_waitlock:
1199 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1200 notified_waitlock = True
1205 # Split result, a tuple of (field values, log entries)
1206 (job_info, log_entries) = result
1207 (status, ) = job_info
1210 for log_entry in log_entries:
1211 (serial, timestamp, _, message) = log_entry
1212 if callable(feedback_fn):
1213 feedback_fn(log_entry[1:])
1215 encoded = utils.SafeEncode(message)
1216 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), encoded)
1217 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1219 # TODO: Handle canceled and archived jobs
1220 elif status in (constants.JOB_STATUS_SUCCESS,
1221 constants.JOB_STATUS_ERROR,
1222 constants.JOB_STATUS_CANCELING,
1223 constants.JOB_STATUS_CANCELED):
1226 prev_job_info = job_info
1228 jobs = cl.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1230 raise errors.JobLost("Job with id %s lost" % job_id)
1232 status, opstatus, result = jobs[0]
1233 if status == constants.JOB_STATUS_SUCCESS:
1235 elif status in (constants.JOB_STATUS_CANCELING,
1236 constants.JOB_STATUS_CANCELED):
1237 raise errors.OpExecError("Job was canceled")
1240 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1241 if status == constants.OP_STATUS_SUCCESS:
1243 elif status == constants.OP_STATUS_ERROR:
1244 errors.MaybeRaise(msg)
1246 raise errors.OpExecError("partial failure (opcode %d): %s" %
1249 raise errors.OpExecError(str(msg))
1250 # default failure mode
1251 raise errors.OpExecError(result)
1254 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None):
1255 """Legacy function to submit an opcode.
1257 This is just a simple wrapper over the construction of the processor
1258 instance. It should be extended to better handle feedback and
1259 interaction functions.
1265 SetGenericOpcodeOpts([op], opts)
1267 job_id = SendJob([op], cl)
1269 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn)
1271 return op_results[0]
1274 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1275 """Wrapper around SubmitOpCode or SendJob.
1277 This function will decide, based on the 'opts' parameter, whether to
1278 submit and wait for the result of the opcode (and return it), or
1279 whether to just send the job and print its identifier. It is used in
1280 order to simplify the implementation of the '--submit' option.
1282 It will also process the opcodes if we're sending the via SendJob
1283 (otherwise SubmitOpCode does it).
1286 if opts and opts.submit_only:
1288 SetGenericOpcodeOpts(job, opts)
1289 job_id = SendJob(job, cl=cl)
1290 raise JobSubmittedException(job_id)
1292 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1295 def SetGenericOpcodeOpts(opcode_list, options):
1296 """Processor for generic options.
1298 This function updates the given opcodes based on generic command
1299 line options (like debug, dry-run, etc.).
1301 @param opcode_list: list of opcodes
1302 @param options: command line options or None
1303 @return: None (in-place modification)
1308 for op in opcode_list:
1309 op.dry_run = options.dry_run
1310 op.debug_level = options.debug
1314 # TODO: Cache object?
1316 client = luxi.Client()
1317 except luxi.NoMasterError:
1318 ss = ssconf.SimpleStore()
1320 # Try to read ssconf file
1323 except errors.ConfigurationError:
1324 raise errors.OpPrereqError("Cluster not initialized or this machine is"
1325 " not part of a cluster")
1327 master, myself = ssconf.GetMasterAndMyself(ss=ss)
1328 if master != myself:
1329 raise errors.OpPrereqError("This is not the master node, please connect"
1330 " to node '%s' and rerun the command" %
1336 def FormatError(err):
1337 """Return a formatted error message for a given error.
1339 This function takes an exception instance and returns a tuple
1340 consisting of two values: first, the recommended exit code, and
1341 second, a string describing the error message (not
1342 newline-terminated).
1348 if isinstance(err, errors.ConfigurationError):
1349 txt = "Corrupt configuration file: %s" % msg
1351 obuf.write(txt + "\n")
1352 obuf.write("Aborting.")
1354 elif isinstance(err, errors.HooksAbort):
1355 obuf.write("Failure: hooks execution failed:\n")
1356 for node, script, out in err.args[0]:
1358 obuf.write(" node: %s, script: %s, output: %s\n" %
1359 (node, script, out))
1361 obuf.write(" node: %s, script: %s (no output)\n" %
1363 elif isinstance(err, errors.HooksFailure):
1364 obuf.write("Failure: hooks general failure: %s" % msg)
1365 elif isinstance(err, errors.ResolverError):
1366 this_host = utils.HostInfo.SysName()
1367 if err.args[0] == this_host:
1368 msg = "Failure: can't resolve my own hostname ('%s')"
1370 msg = "Failure: can't resolve hostname '%s'"
1371 obuf.write(msg % err.args[0])
1372 elif isinstance(err, errors.OpPrereqError):
1373 if len(err.args) == 2:
1374 obuf.write("Failure: prerequisites not met for this"
1375 " operation:\nerror type: %s, error details:\n%s" %
1376 (err.args[1], err.args[0]))
1378 obuf.write("Failure: prerequisites not met for this"
1379 " operation:\n%s" % msg)
1380 elif isinstance(err, errors.OpExecError):
1381 obuf.write("Failure: command execution error:\n%s" % msg)
1382 elif isinstance(err, errors.TagError):
1383 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1384 elif isinstance(err, errors.JobQueueDrainError):
1385 obuf.write("Failure: the job queue is marked for drain and doesn't"
1386 " accept new requests\n")
1387 elif isinstance(err, errors.JobQueueFull):
1388 obuf.write("Failure: the job queue is full and doesn't accept new"
1389 " job submissions until old jobs are archived\n")
1390 elif isinstance(err, errors.TypeEnforcementError):
1391 obuf.write("Parameter Error: %s" % msg)
1392 elif isinstance(err, errors.ParameterError):
1393 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1394 elif isinstance(err, errors.GenericError):
1395 obuf.write("Unhandled Ganeti error: %s" % msg)
1396 elif isinstance(err, luxi.NoMasterError):
1397 obuf.write("Cannot communicate with the master daemon.\nIs it running"
1398 " and listening for connections?")
1399 elif isinstance(err, luxi.TimeoutError):
1400 obuf.write("Timeout while talking to the master daemon. Error:\n"
1402 elif isinstance(err, luxi.ProtocolError):
1403 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1405 elif isinstance(err, JobSubmittedException):
1406 obuf.write("JobID: %s\n" % err.args[0])
1409 obuf.write("Unhandled exception: %s" % msg)
1410 return retcode, obuf.getvalue().rstrip('\n')
1413 def GenericMain(commands, override=None, aliases=None):
1414 """Generic main function for all the gnt-* commands.
1417 - commands: a dictionary with a special structure, see the design doc
1418 for command line handling.
1419 - override: if not None, we expect a dictionary with keys that will
1420 override command line options; this can be used to pass
1421 options from the scripts to generic functions
1422 - aliases: dictionary with command aliases {'alias': 'target, ...}
1425 # save the program name and the entire command line for later logging
1427 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1428 if len(sys.argv) >= 2:
1429 binary += " " + sys.argv[1]
1430 old_cmdline = " ".join(sys.argv[2:])
1434 binary = "<unknown program>"
1441 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1442 except errors.ParameterError, err:
1443 result, err_msg = FormatError(err)
1447 if func is None: # parse error
1450 if override is not None:
1451 for key, val in override.iteritems():
1452 setattr(options, key, val)
1454 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1455 stderr_logging=True, program=binary)
1458 logging.info("run with arguments '%s'", old_cmdline)
1460 logging.info("run with no arguments")
1463 result = func(options, args)
1464 except (errors.GenericError, luxi.ProtocolError,
1465 JobSubmittedException), err:
1466 result, err_msg = FormatError(err)
1467 logging.exception("Error during command processing")
1473 def GenericInstanceCreate(mode, opts, args):
1474 """Add an instance to the cluster via either creation or import.
1476 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1477 @param opts: the command line options selected by the user
1479 @param args: should contain only one element, the new instance name
1481 @return: the desired exit code
1486 (pnode, snode) = SplitNodeOption(opts.node)
1491 hypervisor, hvparams = opts.hypervisor
1495 nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1496 except ValueError, err:
1497 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1498 nics = [{}] * nic_max
1499 for nidx, ndict in opts.nics:
1501 if not isinstance(ndict, dict):
1502 msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1503 raise errors.OpPrereqError(msg)
1509 # default of one nic, all auto
1512 if opts.disk_template == constants.DT_DISKLESS:
1513 if opts.disks or opts.sd_size is not None:
1514 raise errors.OpPrereqError("Diskless instance but disk"
1515 " information passed")
1518 if not opts.disks and not opts.sd_size:
1519 raise errors.OpPrereqError("No disk information specified")
1520 if opts.disks and opts.sd_size is not None:
1521 raise errors.OpPrereqError("Please use either the '--disk' or"
1523 if opts.sd_size is not None:
1524 opts.disks = [(0, {"size": opts.sd_size})]
1526 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1527 except ValueError, err:
1528 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1529 disks = [{}] * disk_max
1530 for didx, ddict in opts.disks:
1532 if not isinstance(ddict, dict):
1533 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1534 raise errors.OpPrereqError(msg)
1535 elif "size" in ddict:
1536 if "adopt" in ddict:
1537 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1538 " (disk %d)" % didx)
1540 ddict["size"] = utils.ParseUnit(ddict["size"])
1541 except ValueError, err:
1542 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1544 elif "adopt" in ddict:
1545 if mode == constants.INSTANCE_IMPORT:
1546 raise errors.OpPrereqError("Disk adoption not allowed for instance"
1550 raise errors.OpPrereqError("Missing size or adoption source for"
1554 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1555 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1557 if mode == constants.INSTANCE_CREATE:
1562 no_install = opts.no_install
1563 elif mode == constants.INSTANCE_IMPORT:
1566 src_node = opts.src_node
1567 src_path = opts.src_dir
1570 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1572 op = opcodes.OpCreateInstance(instance_name=instance,
1574 disk_template=opts.disk_template,
1576 pnode=pnode, snode=snode,
1577 ip_check=opts.ip_check,
1578 name_check=opts.name_check,
1579 wait_for_sync=opts.wait_for_sync,
1580 file_storage_dir=opts.file_storage_dir,
1581 file_driver=opts.file_driver,
1582 iallocator=opts.iallocator,
1583 hypervisor=hypervisor,
1585 beparams=opts.beparams,
1591 no_install=no_install)
1593 SubmitOrSend(op, opts)
1597 class _RunWhileClusterStoppedHelper:
1598 """Helper class for L{RunWhileClusterStopped} to simplify state management
1601 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
1602 """Initializes this class.
1604 @type feedback_fn: callable
1605 @param feedback_fn: Feedback function
1606 @type cluster_name: string
1607 @param cluster_name: Cluster name
1608 @type master_node: string
1609 @param master_node Master node name
1610 @type online_nodes: list
1611 @param online_nodes: List of names of online nodes
1614 self.feedback_fn = feedback_fn
1615 self.cluster_name = cluster_name
1616 self.master_node = master_node
1617 self.online_nodes = online_nodes
1619 self.ssh = ssh.SshRunner(self.cluster_name)
1621 self.nonmaster_nodes = [name for name in online_nodes
1622 if name != master_node]
1624 assert self.master_node not in self.nonmaster_nodes
1626 def _RunCmd(self, node_name, cmd):
1627 """Runs a command on the local or a remote machine.
1629 @type node_name: string
1630 @param node_name: Machine name
1635 if node_name is None or node_name == self.master_node:
1636 # No need to use SSH
1637 result = utils.RunCmd(cmd)
1639 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
1642 errmsg = ["Failed to run command %s" % result.cmd]
1644 errmsg.append("on node %s" % node_name)
1645 errmsg.append(": exitcode %s and error %s" %
1646 (result.exit_code, result.output))
1647 raise errors.OpExecError(" ".join(errmsg))
1649 def Call(self, fn, *args):
1650 """Call function while all daemons are stopped.
1653 @param fn: Function to be called
1656 # Pause watcher by acquiring an exclusive lock on watcher state file
1657 self.feedback_fn("Blocking watcher")
1658 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
1660 # TODO: Currently, this just blocks. There's no timeout.
1661 # TODO: Should it be a shared lock?
1662 watcher_block.Exclusive(blocking=True)
1664 # Stop master daemons, so that no new jobs can come in and all running
1666 self.feedback_fn("Stopping master daemons")
1667 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
1669 # Stop daemons on all nodes
1670 for node_name in self.online_nodes:
1671 self.feedback_fn("Stopping daemons on %s" % node_name)
1672 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
1674 # All daemons are shut down now
1676 return fn(self, *args)
1677 except Exception, err:
1678 _, errmsg = FormatError(err)
1679 logging.exception("Caught exception")
1680 self.feedback_fn(errmsg)
1683 # Start cluster again, master node last
1684 for node_name in self.nonmaster_nodes + [self.master_node]:
1685 self.feedback_fn("Starting daemons on %s" % node_name)
1686 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
1689 watcher_block.Close()
1692 def RunWhileClusterStopped(feedback_fn, fn, *args):
1693 """Calls a function while all cluster daemons are stopped.
1695 @type feedback_fn: callable
1696 @param feedback_fn: Feedback function
1698 @param fn: Function to be called when daemons are stopped
1701 feedback_fn("Gathering cluster information")
1703 # This ensures we're running on the master daemon
1706 (cluster_name, master_node) = \
1707 cl.QueryConfigValues(["cluster_name", "master_node"])
1709 online_nodes = GetOnlineNodes([], cl=cl)
1711 # Don't keep a reference to the client. The master daemon will go away.
1714 assert master_node in online_nodes
1716 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
1717 online_nodes).Call(fn, *args)
1720 def GenerateTable(headers, fields, separator, data,
1721 numfields=None, unitfields=None,
1723 """Prints a table with headers and different fields.
1726 @param headers: dictionary mapping field names to headers for
1729 @param fields: the field names corresponding to each row in
1731 @param separator: the separator to be used; if this is None,
1732 the default 'smart' algorithm is used which computes optimal
1733 field width, otherwise just the separator is used between
1736 @param data: a list of lists, each sublist being one row to be output
1737 @type numfields: list
1738 @param numfields: a list with the fields that hold numeric
1739 values and thus should be right-aligned
1740 @type unitfields: list
1741 @param unitfields: a list with the fields that hold numeric
1742 values that should be formatted with the units field
1743 @type units: string or None
1744 @param units: the units we should use for formatting, or None for
1745 automatic choice (human-readable for non-separator usage, otherwise
1746 megabytes); this is a one-letter string
1755 if numfields is None:
1757 if unitfields is None:
1760 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
1761 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
1764 for field in fields:
1765 if headers and field not in headers:
1766 # TODO: handle better unknown fields (either revert to old
1767 # style of raising exception, or deal more intelligently with
1769 headers[field] = field
1770 if separator is not None:
1771 format_fields.append("%s")
1772 elif numfields.Matches(field):
1773 format_fields.append("%*s")
1775 format_fields.append("%-*s")
1777 if separator is None:
1778 mlens = [0 for name in fields]
1779 format = ' '.join(format_fields)
1781 format = separator.replace("%", "%%").join(format_fields)
1786 for idx, val in enumerate(row):
1787 if unitfields.Matches(fields[idx]):
1790 except (TypeError, ValueError):
1793 val = row[idx] = utils.FormatUnit(val, units)
1794 val = row[idx] = str(val)
1795 if separator is None:
1796 mlens[idx] = max(mlens[idx], len(val))
1801 for idx, name in enumerate(fields):
1803 if separator is None:
1804 mlens[idx] = max(mlens[idx], len(hdr))
1805 args.append(mlens[idx])
1807 result.append(format % tuple(args))
1809 if separator is None:
1810 assert len(mlens) == len(fields)
1812 if fields and not numfields.Matches(fields[-1]):
1818 line = ['-' for _ in fields]
1819 for idx in range(len(fields)):
1820 if separator is None:
1821 args.append(mlens[idx])
1822 args.append(line[idx])
1823 result.append(format % tuple(args))
1828 def FormatTimestamp(ts):
1829 """Formats a given timestamp.
1832 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
1835 @return: a string with the formatted timestamp
1838 if not isinstance (ts, (tuple, list)) or len(ts) != 2:
1841 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
1844 def ParseTimespec(value):
1845 """Parse a time specification.
1847 The following suffixed will be recognized:
1855 Without any suffix, the value will be taken to be in seconds.
1860 raise errors.OpPrereqError("Empty time specification passed")
1868 if value[-1] not in suffix_map:
1871 except (TypeError, ValueError):
1872 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1874 multiplier = suffix_map[value[-1]]
1876 if not value: # no data left after stripping the suffix
1877 raise errors.OpPrereqError("Invalid time specification (only"
1880 value = int(value) * multiplier
1881 except (TypeError, ValueError):
1882 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1886 def GetOnlineNodes(nodes, cl=None, nowarn=False):
1887 """Returns the names of online nodes.
1889 This function will also log a warning on stderr with the names of
1892 @param nodes: if not empty, use only this subset of nodes (minus the
1894 @param cl: if not None, luxi client to use
1895 @type nowarn: boolean
1896 @param nowarn: by default, this function will output a note with the
1897 offline nodes that are skipped; if this parameter is True the
1898 note is not displayed
1904 result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
1906 offline = [row[0] for row in result if row[1]]
1907 if offline and not nowarn:
1908 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
1909 return [row[0] for row in result if not row[1]]
1912 def _ToStream(stream, txt, *args):
1913 """Write a message to a stream, bypassing the logging system
1915 @type stream: file object
1916 @param stream: the file to which we should write
1918 @param txt: the message
1923 stream.write(txt % args)
1930 def ToStdout(txt, *args):
1931 """Write a message to stdout only, bypassing the logging system
1933 This is just a wrapper over _ToStream.
1936 @param txt: the message
1939 _ToStream(sys.stdout, txt, *args)
1942 def ToStderr(txt, *args):
1943 """Write a message to stderr only, bypassing the logging system
1945 This is just a wrapper over _ToStream.
1948 @param txt: the message
1951 _ToStream(sys.stderr, txt, *args)
1954 class JobExecutor(object):
1955 """Class which manages the submission and execution of multiple jobs.
1957 Note that instances of this class should not be reused between
1961 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
1966 self.verbose = verbose
1969 self.feedback_fn = feedback_fn
1971 def QueueJob(self, name, *ops):
1972 """Record a job for later submit.
1975 @param name: a description of the job, will be used in WaitJobSet
1977 SetGenericOpcodeOpts(ops, self.opts)
1978 self.queue.append((name, ops))
1980 def SubmitPending(self):
1981 """Submit all pending jobs.
1984 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
1985 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
1987 self.jobs.append((idx, status, data, name))
1989 def _ChooseJob(self):
1990 """Choose a non-waiting/queued job to poll next.
1993 assert self.jobs, "_ChooseJob called with empty job list"
1995 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
1998 for job_data, status in zip(self.jobs, result):
1999 if status[0] in (constants.JOB_STATUS_QUEUED,
2000 constants.JOB_STATUS_WAITLOCK,
2001 constants.JOB_STATUS_CANCELING):
2002 # job is still waiting
2004 # good candidate found
2005 self.jobs.remove(job_data)
2009 return self.jobs.pop(0)
2011 def GetResults(self):
2012 """Wait for and return the results of all jobs.
2015 @return: list of tuples (success, job results), in the same order
2016 as the submitted jobs; if a job has failed, instead of the result
2017 there will be the error message
2021 self.SubmitPending()
2024 ok_jobs = [row[2] for row in self.jobs if row[1]]
2026 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2028 # first, remove any non-submitted jobs
2029 self.jobs, failures = utils.partition(self.jobs, lambda x: x[1])
2030 for idx, _, jid, name in failures:
2031 ToStderr("Failed to submit job for %s: %s", name, jid)
2032 results.append((idx, False, jid))
2035 (idx, _, jid, name) = self._ChooseJob()
2036 ToStdout("Waiting for job %s for %s...", jid, name)
2038 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2040 except (errors.GenericError, luxi.ProtocolError), err:
2041 _, job_result = FormatError(err)
2043 # the error message will always be shown, verbose or not
2044 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2046 results.append((idx, success, job_result))
2048 # sort based on the index, then drop it
2050 results = [i[1:] for i in results]
2054 def WaitOrShow(self, wait):
2055 """Wait for job results or only print the job IDs.
2058 @param wait: whether to wait or not
2062 return self.GetResults()
2065 self.SubmitPending()
2066 for status, result, name in self.jobs:
2068 ToStdout("%s: %s", result, name)
2070 ToStderr("Failure for %s: %s", name, result)