]
-def _ExpandMultiNames(mode, names):
+def _ExpandMultiNames(mode, names, client=None):
"""Expand the given names using the passed mode.
For _SHUTDOWN_CLUSTER, all instances will be returned. For
@raise errors.OpPrereqError: for invalid input parameters
"""
+ if client is None:
+ client = GetClient()
if mode == _SHUTDOWN_CLUSTER:
if names:
raise errors.OpPrereqError("Cluster filter mode takes no arguments")
- client = GetClient()
- idata = client.QueryInstances([], ["name"])
+ idata = client.QueryInstances([], ["name"], False)
inames = [row[0] for row in idata]
elif mode in (_SHUTDOWN_NODES_BOTH,
_SHUTDOWN_NODES_SEC):
if not names:
raise errors.OpPrereqError("No node names passed")
- client = GetClient()
- ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"])
+ ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
+ False)
ipri = [row[1] for row in ndata]
pri_names = list(itertools.chain(*ipri))
isec = [row[2] for row in ndata]
elif mode == _SHUTDOWN_INSTANCES:
if not names:
raise errors.OpPrereqError("No instance names passed")
- client = GetClient()
- idata = client.QueryInstances(names, ["name"])
+ idata = client.QueryInstances(names, ["name"], False)
inames = [row[0] for row in idata]
else:
return inames
-def _ConfirmOperation(inames, text):
+def _ConfirmOperation(inames, text, extra=""):
"""Ask the user to confirm an operation on a list of instances.
This function is used to request confirmation for doing an operation
"""
count = len(inames)
- msg = ("The %s will operate on %d instances.\n"
- "Do you want to continue?" % (text, count))
+ msg = ("The %s will operate on %d instances.\n%s"
+ "Do you want to continue?" % (text, count, extra))
affected = ("\nAffected instances:\n" +
"\n".join([" %s" % name for name in inames]))
return choice
-def _TransformPath(user_input):
- """Transform a user path into a canonical value.
+def _EnsureInstancesExist(client, names):
+ """Check for and ensure the given instance names exist.
- This function transforms the a path passed as textual information
- into the constants that the LU code expects.
+ This function will raise an OpPrereqError in case they don't
+ exist. Otherwise it will exit cleanly.
- """
- if user_input:
- if user_input.lower() == "default":
- result_path = constants.VALUE_DEFAULT
- elif user_input.lower() == "none":
- result_path = constants.VALUE_NONE
- else:
- if not os.path.isabs(user_input):
- raise errors.OpPrereqError("Path '%s' is not an absolute filename" %
- user_input)
- result_path = user_input
- else:
- result_path = constants.VALUE_DEFAULT
+ @type client: L{ganeti.luxi.Client}
+ @param client: the client to use for the query
+ @type names: list
+ @param names: the list of instance names to query
+ @raise errors.OpPrereqError: in case any instance is missing
- return result_path
+ """
+ # TODO: change LUQueryInstances to that it actually returns None
+ # instead of raising an exception, or devise a better mechanism
+ result = client.QueryInstances(names, ["name"], False)
+ for orig_name, row in zip(names, result):
+ if row[0] is None:
+ raise errors.OpPrereqError("Instance '%s' does not exist" % orig_name)
def ListInstances(opts, args):
else:
selected_fields = opts.output.split(",")
- output = GetClient().QueryInstances([], selected_fields)
+ output = GetClient().QueryInstances(args, selected_fields, opts.do_locking)
if not opts.no_headers:
headers = {
"oper_state": "Running",
"oper_ram": "Memory", "disk_template": "Disk_template",
"ip": "IP_address", "mac": "MAC_address",
+ "nic_mode": "NIC_Mode", "nic_link": "NIC_Link",
"bridge": "Bridge",
"sda_size": "Disk/0", "sdb_size": "Disk/1",
+ "disk_usage": "DiskUsage",
"status": "Status", "tags": "Tags",
"network_port": "Network_port",
"hv/kernel_path": "Kernel_path",
"be/auto_balance": "Auto_balance",
"disk.count": "Disks", "disk.sizes": "Disk_sizes",
"nic.count": "NICs", "nic.ips": "NIC_IPs",
+ "nic.modes": "NIC_modes", "nic.links": "NIC_links",
"nic.bridges": "NIC_bridges", "nic.macs": "NIC_MACs",
}
else:
numfields = ["be/memory", "oper_ram", "sd(a|b)_size", "be/vcpus",
"serial_no", "(disk|nic)\.count", "disk\.size/.*"]
- list_type_fields = ("tags", "disk.sizes",
- "nic.macs", "nic.ips", "nic.bridges")
+ list_type_fields = ("tags", "disk.sizes", "nic.macs", "nic.ips",
+ "nic.modes", "nic.links", "nic.bridges")
# change raw values to nicer strings
for row in output:
for idx, field in enumerate(selected_fields):
except ValueError, err:
raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
nics = [{}] * nic_max
- for nidx, ndict in opts.nics.items():
+ for nidx, ndict in opts.nics:
nidx = int(nidx)
nics[nidx] = ndict
elif opts.no_nics:
nics = [{}]
if opts.disk_template == constants.DT_DISKLESS:
- if opts.disks:
+ if opts.disks or opts.sd_size is not None:
raise errors.OpPrereqError("Diskless instance but disk"
" information passed")
disks = []
else:
- if not opts.disks:
+ if not opts.disks and not opts.sd_size:
raise errors.OpPrereqError("No disk information specified")
+ if opts.disks and opts.sd_size is not None:
+ raise errors.OpPrereqError("Please use either the '--disk' or"
+ " '-s' option")
+ if opts.sd_size is not None:
+ opts.disks = [(0, {"size": opts.sd_size})]
try:
disk_max = max(int(didx[0])+1 for didx in opts.disks)
except ValueError, err:
(didx, err))
disks[didx] = ddict
- ValidateBeParams(opts.beparams)
-
-## kernel_path = _TransformPath(opts.kernel_path)
-## initrd_path = _TransformPath(opts.initrd_path)
-
-## hvm_acpi = opts.hvm_acpi == _VALUE_TRUE
-## hvm_pae = opts.hvm_pae == _VALUE_TRUE
-
-## if ((opts.hvm_cdrom_image_path is not None) and
-## (opts.hvm_cdrom_image_path.lower() == constants.VALUE_NONE)):
-## hvm_cdrom_image_path = None
-## else:
-## hvm_cdrom_image_path = opts.hvm_cdrom_image_path
+ utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
op = opcodes.OpCreateInstance(instance_name=instance,
disks=disks,
"iallocator": None,
"primary_node": None,
"secondary_node": None,
- "ip": 'none',
- "mac": 'auto',
- "bridge": None,
+ "nics": None,
"start": True,
"ip_check": True,
"hypervisor": None,
+ "hvparams": {},
"file_storage_dir": None,
"file_driver": 'loop'}
raise errors.OpPrereqError('You have to provide at least a primary_node'
' or an iallocator.')
- if (spec['hypervisor'] and
- not isinstance(spec['hypervisor'], dict)):
+ if (spec['hvparams'] and
+ not isinstance(spec['hvparams'], dict)):
raise errors.OpPrereqError('Hypervisor parameters must be a dict.')
json_filename = args[0]
- fd = open(json_filename, 'r')
try:
+ fd = open(json_filename, 'r')
instance_data = simplejson.load(fd)
- finally:
fd.close()
+ except Exception, err:
+ ToStderr("Can't parse the instance definition file: %s" % str(err))
+ return 1
+
+ jex = JobExecutor()
# Iterate over the instances and do:
# * Populate the specs with default value
specs = _PopulateWithDefaults(specs)
_Validate(specs)
- hypervisor = None
- hvparams = {}
- if specs['hypervisor']:
- hypervisor, hvparams = specs['hypervisor'].iteritems()
+ hypervisor = specs['hypervisor']
+ hvparams = specs['hvparams']
disks = []
for elem in specs['disk_size']:
(elem, name, err))
disks.append({"size": size})
- nic0 = {'ip': specs['ip'], 'bridge': specs['bridge'], 'mac': specs['mac']}
+ utils.ForceDictType(specs['backend'], constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
+
+ tmp_nics = []
+ for field in ('ip', 'mac', 'mode', 'link', 'bridge'):
+ if field in specs:
+ if not tmp_nics:
+ tmp_nics.append({})
+ tmp_nics[0][field] = specs[field]
+
+ if specs['nics'] is not None and tmp_nics:
+ raise errors.OpPrereqError("'nics' list incompatible with using"
+ " individual nic fields as well")
+ elif specs['nics'] is not None:
+ tmp_nics = specs['nics']
+ elif not tmp_nics:
+ tmp_nics = [{}]
op = opcodes.OpCreateInstance(instance_name=name,
disks=disks,
os_type=specs['os'],
pnode=specs['primary_node'],
snode=specs['secondary_node'],
- nics=[nic0],
+ nics=tmp_nics,
start=specs['start'],
ip_check=specs['ip_check'],
wait_for_sync=True,
file_storage_dir=specs['file_storage_dir'],
file_driver=specs['file_driver'])
- ToStdout("%s: %s", name, cli.SendJob([op]))
+ jex.QueueJob(name, op)
+ # we never want to wait, just show the submitted job IDs
+ jex.WaitOrShow(False)
return 0
@return: the desired exit code
"""
- instance_name = args[0]
+ # first, compute the desired name list
+ if opts.multi_mode is None:
+ opts.multi_mode = _SHUTDOWN_INSTANCES
+ inames = _ExpandMultiNames(opts.multi_mode, args)
+ if not inames:
+ raise errors.OpPrereqError("Selection filter does not match any instances")
+
+ # second, if requested, ask for an OS
if opts.select_os is True:
op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
result = SubmitOpCode(op)
number = number + 1
choices.append(('x', 'exit', 'Exit gnt-instance reinstall'))
- selected = AskUser("Enter OS template name or number (or x to abort):",
+ selected = AskUser("Enter OS template number (or x to abort):",
choices)
if selected == 'exit':
- ToStdout("User aborted reinstall, exiting")
+ ToStderr("User aborted reinstall, exiting")
return 1
os_name = selected
else:
os_name = opts.os
- if not opts.force:
- usertext = ("This will reinstall the instance %s and remove"
- " all data. Continue?") % instance_name
- if not AskUser(usertext):
+ # third, get confirmation: multi-reinstall requires --force-multi
+ # *and* --force, single-reinstall just --force
+ multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
+ if multi_on:
+ warn_msg = "Note: this will remove *all* data for the below instances!\n"
+ if not ((opts.force_multi and opts.force) or
+ _ConfirmOperation(inames, "reinstall", extra=warn_msg)):
return 1
-
- op = opcodes.OpReinstallInstance(instance_name=instance_name,
- os_type=os_name)
- SubmitOrSend(op, opts)
-
+ else:
+ if not opts.force:
+ usertext = ("This will reinstall the instance %s and remove"
+ " all data. Continue?") % inames[0]
+ if not AskUser(usertext):
+ return 1
+
+ jex = JobExecutor(verbose=multi_on)
+ for instance_name in inames:
+ op = opcodes.OpReinstallInstance(instance_name=instance_name,
+ os_type=os_name)
+ jex.QueueJob(instance_name, op)
+
+ jex.WaitOrShow(not opts.submit_only)
return 0
"""
instance_name = args[0]
force = opts.force
+ cl = GetClient()
if not force:
+ _EnsureInstancesExist(cl, [instance_name])
+
usertext = ("This will remove the volumes of the instance %s"
" (including mirrors), thus removing all the data"
" of the instance. Continue?") % instance_name
op = opcodes.OpRemoveInstance(instance_name=instance_name,
ignore_failures=opts.ignore_failures)
- SubmitOrSend(op, opts)
+ SubmitOrSend(op, opts, cl=cl)
return 0
@return: the desired exit code
"""
+ cl = GetClient()
if opts.multi_mode is None:
opts.multi_mode = _SHUTDOWN_INSTANCES
- inames = _ExpandMultiNames(opts.multi_mode, args)
+ inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
if not inames:
raise errors.OpPrereqError("Selection filter does not match any instances")
multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
if not (opts.force_multi or not multi_on
or _ConfirmOperation(inames, "startup")):
return 1
+ jex = cli.JobExecutor(verbose=multi_on, cl=cl)
for name in inames:
op = opcodes.OpStartupInstance(instance_name=name,
- force=opts.force,
- extra_args=opts.extra_args)
- if multi_on:
- ToStdout("Starting up %s", name)
- try:
- SubmitOrSend(op, opts)
- except JobSubmittedException, err:
- _, txt = FormatError(err)
- ToStdout("%s", txt)
+ force=opts.force)
+ # do not add these parameters to the opcode unless they're defined
+ if opts.hvparams:
+ op.hvparams = opts.hvparams
+ if opts.beparams:
+ op.beparams = opts.beparams
+ jex.QueueJob(name, op)
+ jex.WaitOrShow(not opts.submit_only)
return 0
@return: the desired exit code
"""
+ cl = GetClient()
if opts.multi_mode is None:
opts.multi_mode = _SHUTDOWN_INSTANCES
- inames = _ExpandMultiNames(opts.multi_mode, args)
+ inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
if not inames:
raise errors.OpPrereqError("Selection filter does not match any instances")
multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
if not (opts.force_multi or not multi_on
or _ConfirmOperation(inames, "reboot")):
return 1
+ jex = JobExecutor(verbose=multi_on, cl=cl)
for name in inames:
op = opcodes.OpRebootInstance(instance_name=name,
reboot_type=opts.reboot_type,
ignore_secondaries=opts.ignore_secondaries)
-
- SubmitOrSend(op, opts)
+ jex.QueueJob(name, op)
+ jex.WaitOrShow(not opts.submit_only)
return 0
@return: the desired exit code
"""
+ cl = GetClient()
if opts.multi_mode is None:
opts.multi_mode = _SHUTDOWN_INSTANCES
- inames = _ExpandMultiNames(opts.multi_mode, args)
+ inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
if not inames:
raise errors.OpPrereqError("Selection filter does not match any instances")
multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
if not (opts.force_multi or not multi_on
or _ConfirmOperation(inames, "shutdown")):
return 1
+
+ jex = cli.JobExecutor(verbose=multi_on, cl=cl)
for name in inames:
op = opcodes.OpShutdownInstance(instance_name=name)
- if multi_on:
- ToStdout("Shutting down %s", name)
- try:
- SubmitOrSend(op, opts)
- except JobSubmittedException, err:
- _, txt = FormatError(err)
- ToStdout("%s", txt)
+ jex.QueueJob(name, op)
+ jex.WaitOrShow(not opts.submit_only)
return 0
@return: the desired exit code
"""
+ cl = GetClient()
instance_name = args[0]
force = opts.force
if not force:
+ _EnsureInstancesExist(cl, [instance_name])
+
usertext = ("Failover will happen to image %s."
" This requires a shutdown of the instance. Continue?" %
(instance_name,))
op = opcodes.OpFailoverInstance(instance_name=instance_name,
ignore_consistency=opts.ignore_consistency)
- SubmitOrSend(op, opts)
+ SubmitOrSend(op, opts, cl=cl)
return 0
@return: the desired exit code
"""
+ cl = GetClient()
instance_name = args[0]
force = opts.force
if not force:
+ _EnsureInstancesExist(cl, [instance_name])
+
if opts.cleanup:
usertext = ("Instance %s will be recovered from a failed migration."
" Note that the migration procedure (including cleanup)" %
op = opcodes.OpMigrateInstance(instance_name=instance_name, live=opts.live,
cleanup=opts.cleanup)
- SubmitOpCode(op)
+ SubmitOpCode(op, cl=cl)
return 0
@return: the desired exit code
"""
+ if not args and not opts.show_all:
+ ToStderr("No instance selected."
+ " Please pass in --all if you want to query all instances.\n"
+ "Note that this can take a long time on a big cluster.")
+ return 1
+ elif args and opts.show_all:
+ ToStderr("Cannot use --all if you specify instance names.")
+ return 1
+
retcode = 0
op = opcodes.OpQueryInstanceData(instances=args, static=opts.static)
result = SubmitOpCode(op)
if instance.has_key("network_port"):
buf.write(" Allocated network port: %s\n" % instance["network_port"])
buf.write(" Hypervisor: %s\n" % instance["hypervisor"])
- if instance["hypervisor"] == constants.HT_XEN_PVM:
- hvattrs = ((constants.HV_KERNEL_PATH, "kernel path"),
- (constants.HV_INITRD_PATH, "initrd path"))
- elif instance["hypervisor"] == constants.HT_XEN_HVM:
- hvattrs = ((constants.HV_BOOT_ORDER, "boot order"),
- (constants.HV_ACPI, "ACPI"),
- (constants.HV_PAE, "PAE"),
- (constants.HV_CDROM_IMAGE_PATH, "virtual CDROM"),
- (constants.HV_NIC_TYPE, "NIC type"),
- (constants.HV_DISK_TYPE, "Disk type"),
- (constants.HV_VNC_BIND_ADDRESS, "VNC bind address"),
- )
- # custom console information for HVM
- vnc_bind_address = instance["hv_actual"][constants.HV_VNC_BIND_ADDRESS]
- if vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
- vnc_console_port = "%s:%s" % (instance["pnode"],
- instance["network_port"])
- elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
- vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
- instance["network_port"],
- instance["pnode"])
+
+ # custom VNC console information
+ vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
+ None)
+ if vnc_bind_address:
+ port = instance["network_port"]
+ display = int(port) - constants.VNC_BASE_PORT
+ if display > 0 and vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
+ vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
+ port,
+ display)
+ elif display > 0 and utils.IsValidIP(vnc_bind_address):
+ vnc_console_port = ("%s:%s (node %s) (display %s)" %
+ (vnc_bind_address, port,
+ instance["pnode"], display))
else:
- vnc_console_port = "%s:%s" % (vnc_bind_address,
- instance["network_port"])
+ # vnc bind address is a file
+ vnc_console_port = "%s:%s" % (instance["pnode"],
+ vnc_bind_address)
buf.write(" - console connection: vnc to %s\n" % vnc_console_port)
- else:
- # auto-handle other hypervisor types
- hvattrs = [(key, key) for key in instance["hv_actual"]]
-
- for key, desc in hvattrs:
+ for key in instance["hv_actual"]:
if key in instance["hv_instance"]:
val = instance["hv_instance"][key]
else:
val = "default (%s)" % instance["hv_actual"][key]
- buf.write(" - %s: %s\n" % (desc, val))
+ buf.write(" - %s: %s\n" % (key, val))
buf.write(" Hardware:\n")
buf.write(" - VCPUs: %d\n" %
instance["be_actual"][constants.BE_VCPUS])
buf.write(" - memory: %dMiB\n" %
instance["be_actual"][constants.BE_MEMORY])
buf.write(" - NICs:\n")
- for idx, (mac, ip, bridge) in enumerate(instance["nics"]):
- buf.write(" - nic/%d: MAC: %s, IP: %s, bridge: %s\n" %
- (idx, mac, ip, bridge))
+ for idx, (mac, ip, mode, link) in enumerate(instance["nics"]):
+ buf.write(" - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" %
+ (idx, mac, ip, mode, link))
buf.write(" Disks:\n")
for idx, device in enumerate(instance["disks"]):
return 1
for param in opts.beparams:
- if opts.beparams[param].lower() == "default":
- opts.beparams[param] = constants.VALUE_DEFAULT
- elif opts.beparams[param].lower() == "none":
- opts.beparams[param] = constants.VALUE_NONE
- elif param == constants.BE_MEMORY:
- opts.beparams[constants.BE_MEMORY] = \
- utils.ParseUnit(opts.beparams[constants.BE_MEMORY])
+ if isinstance(opts.beparams[param], basestring):
+ if opts.beparams[param].lower() == "default":
+ opts.beparams[param] = constants.VALUE_DEFAULT
+
+ utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES,
+ allowed_values=[constants.VALUE_DEFAULT])
for param in opts.hypervisor:
- if opts.hypervisor[param].lower() == "default":
- opts.hypervisor[param] = constants.VALUE_DEFAULT
- elif opts.hypervisor[param].lower() == "none":
- opts.hypervisor[param] = constants.VALUE_NONE
+ if isinstance(opts.hypervisor[param], basestring):
+ if opts.hypervisor[param].lower() == "default":
+ opts.hypervisor[param] = constants.VALUE_DEFAULT
+
+ utils.ForceDictType(opts.hypervisor, constants.HVS_PARAMETER_TYPES,
+ allowed_values=[constants.VALUE_DEFAULT])
for idx, (nic_op, nic_dict) in enumerate(opts.nics):
try:
make_option("-t", "--disk-template", dest="disk_template",
help="Custom disk setup (diskless, file, plain or drbd)",
default=None, metavar="TEMPL"),
+ cli_option("-s", "--os-size", dest="sd_size", help="Disk size for a"
+ " single-disk configuration, when not using the --disk option,"
+ " in MiB unless a suffix is used",
+ default=None, type="unit", metavar="<size>"),
ikv_option("--disk", help="Disk information",
default=[], dest="disks",
action="append",
make_option("-s", "--static", dest="static",
action="store_true", default=False,
help="Only show configuration data, not runtime data"),
- ], "[-s] [<instance>...]",
+ make_option("--all", dest="show_all",
+ default=False, action="store_true",
+ help="Show info on all instances on the cluster."
+ " This can take a long time to run, use wisely."),
+ ], "[-s] {--all | <instance>...}",
"Show information on the specified instance(s)"),
- 'list': (ListInstances, ARGS_NONE,
- [DEBUG_OPT, NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT], "",
+ 'list': (ListInstances, ARGS_ANY,
+ [DEBUG_OPT, NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, SYNC_OPT],
+ "[<instance>...]",
"Lists the instances and their status. The available fields are"
" (see the man page for details): status, oper_state, oper_ram,"
" name, os, pnode, snodes, admin_state, admin_ram, disk_template,"
- " ip, mac, bridge, sda_size, sdb_size, vcpus, serial_no,"
+ " ip, mac, mode, link, sda_size, sdb_size, vcpus, serial_no,"
" hypervisor."
" The default field"
" list is (in order): %s." % ", ".join(_LIST_DEF_FIELDS),
),
- 'reinstall': (ReinstallInstance, ARGS_ONE,
+ 'reinstall': (ReinstallInstance, ARGS_ANY,
[DEBUG_OPT, FORCE_OPT, os_opt,
+ m_force_multi,
+ m_node_opt, m_pri_node_opt, m_sec_node_opt,
+ m_clust_opt, m_inst_opt,
make_option("--select-os", dest="select_os",
action="store_true", default=False,
help="Interactive OS reinstall, lists available"
help=("Replace the disk(s) on the secondary"
" node (only for the drbd template)")),
make_option("--disks", dest="disks", default=None,
- help=("Comma-separated list of disks"
- " to replace (e.g. sda) (optional,"
- " defaults to all disks")),
- make_option("-i", "--iallocator", metavar="<NAME>",
+ help="Comma-separated list of disks"
+ " indices to replace (e.g. 0,2) (optional,"
+ " defaults to all disks)"),
+ make_option("-I", "--iallocator", metavar="<NAME>",
help="Select new secondary for the instance"
" automatically using the"
" <NAME> iallocator plugin (enables"
default=None, type="string"),
SUBMIT_OPT,
],
- "[-s|-p|-n NODE] <instance>",
+ "[-s|-p|-n NODE|-I NAME] <instance>",
"Replaces all disks for the instance"),
'modify': (SetInstanceParams, ARGS_ONE,
[DEBUG_OPT, FORCE_OPT,
"<instance>", "Stops an instance"),
'startup': (StartupInstance, ARGS_ANY,
[DEBUG_OPT, FORCE_OPT, m_force_multi,
- make_option("-e", "--extra", dest="extra_args",
- help="Extra arguments for the instance's kernel",
- default=None, type="string", metavar="<PARAMS>"),
m_node_opt, m_pri_node_opt, m_sec_node_opt,
m_clust_opt, m_inst_opt,
SUBMIT_OPT,
+ keyval_option("-H", "--hypervisor", type="keyval",
+ default={}, dest="hvparams",
+ help="Temporary hypervisor parameters"),
+ keyval_option("-B", "--backend", type="keyval",
+ default={}, dest="beparams",
+ help="Temporary backend parameters"),
],
- "<instance>", "Starts an instance"),
+ "<instance>", "Starts an instance"),
'reboot': (RebootInstance, ARGS_ANY,
[DEBUG_OPT, m_force_multi,
- make_option("-e", "--extra", dest="extra_args",
- help="Extra arguments for the instance's kernel",
- default=None, type="string", metavar="<PARAMS>"),
make_option("-t", "--type", dest="reboot_type",
help="Type of reboot: soft/hard/full",
default=constants.INSTANCE_REBOOT_HARD,