#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
"""Instance related commands"""
-# pylint: disable-msg=W0401,W0614,C0103
+# pylint: disable=W0401,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-instance
+import copy
import itertools
import simplejson
import logging
-from cStringIO import StringIO
from ganeti.cli import *
from ganeti import opcodes
from ganeti import netutils
from ganeti import ssh
from ganeti import objects
+from ganeti import ht
_EXPAND_CLUSTER = "cluster"
_EXPAND_INSTANCES = "instances"
_EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
-_EXPAND_NODES_TAGS_MODES = frozenset([
+_EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([
_EXPAND_NODES_BOTH_BY_TAGS,
_EXPAND_NODES_PRI_BY_TAGS,
_EXPAND_NODES_SEC_BY_TAGS,
])
-
#: default list of options for L{ListInstances}
_LIST_DEF_FIELDS = [
"name", "hypervisor", "os", "pnode", "status", "oper_ram",
]
+_MISSING = object()
+_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
+
+_INST_DATA_VAL = ht.TListOf(ht.TDict)
+
def _ExpandMultiNames(mode, names, client=None):
"""Expand the given names using the passed mode.
@raise errors.OpPrereqError: for invalid input parameters
"""
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
if client is None:
client = GetClient()
if not names:
raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
- False)
+ False)
ipri = [row[1] for row in ndata]
pri_names = list(itertools.chain(*ipri))
fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
"nic.modes", "nic.links", "nic.bridges",
+ "nic.networks",
"snodes", "snodes.group", "snodes.group.uuid"],
(lambda value: ",".join(str(item)
for item in value),
def BatchCreate(opts, args):
"""Create instances using a definition file.
- This function reads a json file with instances defined
- in the form::
-
- {"instance-name":{
- "disk_size": [20480],
- "template": "drbd",
- "backend": {
- "memory": 512,
- "vcpus": 1 },
- "os": "debootstrap",
- "primary_node": "firstnode",
- "secondary_node": "secondnode",
- "iallocator": "dumb"}
- }
-
- Note that I{primary_node} and I{secondary_node} have precedence over
- I{iallocator}.
+ This function reads a json file with L{opcodes.OpInstanceCreate}
+ serialisations.
@param opts: the command line options selected by the user
@type args: list
@return: the desired exit code
"""
- _DEFAULT_SPECS = {"disk_size": [20 * 1024],
- "backend": {},
- "iallocator": None,
- "primary_node": None,
- "secondary_node": None,
- "nics": None,
- "start": True,
- "ip_check": True,
- "name_check": True,
- "hypervisor": None,
- "hvparams": {},
- "file_storage_dir": None,
- "force_variant": False,
- "file_driver": "loop"}
-
- def _PopulateWithDefaults(spec):
- """Returns a new hash combined with default values."""
- mydict = _DEFAULT_SPECS.copy()
- mydict.update(spec)
- return mydict
-
- def _Validate(spec):
- """Validate the instance specs."""
- # Validate fields required under any circumstances
- for required_field in ("os", "template"):
- if required_field not in spec:
- raise errors.OpPrereqError('Required field "%s" is missing.' %
- required_field, errors.ECODE_INVAL)
- # Validate special fields
- if spec["primary_node"] is not None:
- if (spec["template"] in constants.DTS_INT_MIRROR and
- spec["secondary_node"] is None):
- raise errors.OpPrereqError("Template requires secondary node, but"
- " there was no secondary provided.",
- errors.ECODE_INVAL)
- elif spec["iallocator"] is None:
- raise errors.OpPrereqError("You have to provide at least a primary_node"
- " or an iallocator.",
- errors.ECODE_INVAL)
-
- if (spec["hvparams"] and
- not isinstance(spec["hvparams"], dict)):
- raise errors.OpPrereqError("Hypervisor parameters must be a dict.",
- errors.ECODE_INVAL)
+ (json_filename,) = args
+ cl = GetClient()
- json_filename = args[0]
try:
instance_data = simplejson.loads(utils.ReadFile(json_filename))
- except Exception, err: # pylint: disable-msg=W0703
+ except Exception, err: # pylint: disable=W0703
ToStderr("Can't parse the instance definition file: %s" % str(err))
return 1
- if not isinstance(instance_data, dict):
- ToStderr("The instance definition file is not in dict format.")
+ if not _INST_DATA_VAL(instance_data):
+ ToStderr("The instance definition file is not %s" % _INST_DATA_VAL)
return 1
- jex = JobExecutor(opts=opts)
+ instances = []
+ possible_params = set(opcodes.OpInstanceCreate.GetAllSlots())
+ for (idx, inst) in enumerate(instance_data):
+ unknown = set(inst.keys()) - possible_params
- # Iterate over the instances and do:
- # * Populate the specs with default value
- # * Validate the instance specs
- i_names = utils.NiceSort(instance_data.keys()) # pylint: disable-msg=E1103
- for name in i_names:
- specs = instance_data[name]
- specs = _PopulateWithDefaults(specs)
- _Validate(specs)
+ if unknown:
+ # TODO: Suggest closest match for more user friendly experience
+ raise errors.OpPrereqError("Unknown fields in definition %s: %s" %
+ (idx, utils.CommaJoin(unknown)),
+ errors.ECODE_INVAL)
- hypervisor = specs["hypervisor"]
- hvparams = specs["hvparams"]
+ op = opcodes.OpInstanceCreate(**inst) # pylint: disable=W0142
+ op.Validate(False)
+ instances.append(op)
- disks = []
- for elem in specs["disk_size"]:
- try:
- size = utils.ParseUnit(elem)
- except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid disk size '%s' for"
- " instance %s: %s" %
- (elem, name, err), errors.ECODE_INVAL)
- disks.append({"size": size})
-
- utils.ForceDictType(specs["backend"], constants.BES_PARAMETER_TYPES)
- utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
-
- tmp_nics = []
- for field in constants.INIC_PARAMS:
- if field in specs:
- if not tmp_nics:
- tmp_nics.append({})
- tmp_nics[0][field] = specs[field]
-
- if specs["nics"] is not None and tmp_nics:
- raise errors.OpPrereqError("'nics' list incompatible with using"
- " individual nic fields as well",
- errors.ECODE_INVAL)
- elif specs["nics"] is not None:
- tmp_nics = specs["nics"]
- elif not tmp_nics:
- tmp_nics = [{}]
-
- op = opcodes.OpInstanceCreate(instance_name=name,
- disks=disks,
- disk_template=specs["template"],
- mode=constants.INSTANCE_CREATE,
- os_type=specs["os"],
- force_variant=specs["force_variant"],
- pnode=specs["primary_node"],
- snode=specs["secondary_node"],
- nics=tmp_nics,
- start=specs["start"],
- ip_check=specs["ip_check"],
- name_check=specs["name_check"],
- wait_for_sync=True,
- iallocator=specs["iallocator"],
- hypervisor=hypervisor,
- hvparams=hvparams,
- beparams=specs["backend"],
- file_storage_dir=specs["file_storage_dir"],
- file_driver=specs["file_driver"])
-
- jex.QueueJob(name, op)
- # we never want to wait, just show the submitted job IDs
- jex.WaitOrShow(False)
+ op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator,
+ instances=instances)
+ result = SubmitOrSend(op, opts, cl=cl)
- return 0
+ # Keep track of submitted jobs
+ jex = JobExecutor(cl=cl, opts=opts)
+
+ for (status, job_id) in result[constants.JOB_IDS_KEY]:
+ jex.AddJobId(None, status, job_id)
+
+ results = jex.GetResults()
+ bad_cnt = len([row for row in results if not row[0]])
+ if bad_cnt == 0:
+ ToStdout("All instances created successfully.")
+ rcode = constants.EXIT_SUCCESS
+ else:
+ ToStdout("There were %s errors during the creation.", bad_cnt)
+ rcode = constants.EXIT_FAILURE
+
+ return rcode
def ReinstallInstance(opts, args):
osparams=opts.osparams)
jex.QueueJob(instance_name, op)
- jex.WaitOrShow(not opts.submit_only)
- return 0
+ results = jex.WaitOrShow(not opts.submit_only)
+
+ if compat.all(map(compat.fst, results)):
+ return constants.EXIT_SUCCESS
+ else:
+ return constants.EXIT_FAILURE
def RemoveInstance(opts, args):
"""
instance_name = args[0]
op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
- ignore_size=opts.ignore_size)
+ ignore_size=opts.ignore_size,
+ wait_for_sync=opts.wait_for_sync)
disks_info = SubmitOrSend(op, opts)
for host, iname, nname in disks_info:
ToStdout("%s:%s:%s", host, iname, nname)
"""
instance_name = args[0]
+
+ disks = []
+
if opts.disks:
- try:
- opts.disks = [int(v) for v in opts.disks.split(",")]
- except (ValueError, TypeError), err:
- ToStderr("Invalid disks value: %s" % str(err))
- return 1
- else:
- opts.disks = []
+ for didx, ddict in opts.disks:
+ didx = int(didx)
+
+ if not ht.TDict(ddict):
+ msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+ if constants.IDISK_SIZE in ddict:
+ try:
+ ddict[constants.IDISK_SIZE] = \
+ utils.ParseUnit(ddict[constants.IDISK_SIZE])
+ except ValueError, err:
+ raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
+ (didx, err), errors.ECODE_INVAL)
+
+ disks.append((didx, ddict))
+
+ # TODO: Verify modifyable parameters (already done in
+ # LUInstanceRecreateDisks, but it'd be nice to have in the client)
if opts.node:
+ if opts.iallocator:
+ msg = "At most one of either --nodes or --iallocator can be passed"
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
pnode, snode = SplitNodeOption(opts.node)
nodes = [pnode]
if snode is not None:
nodes = []
op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
- disks=opts.disks,
- nodes=nodes)
+ disks=disks, nodes=nodes,
+ iallocator=opts.iallocator)
SubmitOrSend(op, opts)
+
return 0
@param opts: the command line options selected by the user
@type args: list
- @param args: should contain two elements, the instance name
- whose disks we grow and the disk name, e.g. I{sda}
+ @param args: should contain three elements, the target instance name,
+ the target disk id, and the target growth
@rtype: int
@return: the desired exit code
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
errors.ECODE_INVAL)
- amount = utils.ParseUnit(args[2])
+ try:
+ amount = utils.ParseUnit(args[2])
+ except errors.UnitParseError:
+ raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
+ errors.ECODE_INVAL)
op = opcodes.OpInstanceGrowDisk(instance_name=instance,
disk=disk, amount=amount,
- wait_for_sync=opts.wait_for_sync)
+ wait_for_sync=opts.wait_for_sync,
+ absolute=opts.absolute)
SubmitOrSend(op, opts)
return 0
"""
return opcodes.OpInstanceShutdown(instance_name=name,
+ force=opts.force,
timeout=opts.timeout,
ignore_offline_nodes=opts.ignore_offline,
no_remember=opts.no_remember)
op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
remote_node=new_2ndary, mode=mode,
iallocator=iallocator,
- early_release=opts.early_release)
+ early_release=opts.early_release,
+ ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts)
return 0
ignore_consistency=opts.ignore_consistency,
shutdown_timeout=opts.shutdown_timeout,
iallocator=iallocator,
- target_node=target_node)
+ target_node=target_node,
+ ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
cleanup=opts.cleanup, iallocator=iallocator,
target_node=target_node,
- allow_failover=opts.allow_failover)
- SubmitOpCode(op, cl=cl, opts=opts)
+ allow_failover=opts.allow_failover,
+ allow_runtime_changes=opts.allow_runtime_chgs,
+ ignore_ipolicy=opts.ignore_ipolicy)
+ SubmitOrSend(op, cl=cl, opts=opts)
return 0
op = opcodes.OpInstanceMove(instance_name=instance_name,
target_node=opts.node,
shutdown_timeout=opts.shutdown_timeout,
- ignore_consistency=opts.ignore_consistency)
+ ignore_consistency=opts.ignore_consistency,
+ ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
" URL <vnc://%s:%s/>",
console.instance, console.host, console.port,
console.display, console.host, console.port)
+ elif console.kind == constants.CONS_SPICE:
+ feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
+ console.host, console.port)
elif console.kind == constants.CONS_SSH:
# Convert to string if not already one
if isinstance(console.command, basestring):
return constants.EXIT_SUCCESS
-def _FormatLogicalID(dev_type, logical_id, roman):
+def _FormatDiskDetails(dev_type, dev, roman):
"""Formats the logical_id of a disk.
"""
if dev_type == constants.LD_DRBD8:
- node_a, node_b, port, minor_a, minor_b, key = logical_id
+ drbd_info = dev["drbd_info"]
data = [
- ("nodeA", "%s, minor=%s" % (node_a, compat.TryToRoman(minor_a,
- convert=roman))),
- ("nodeB", "%s, minor=%s" % (node_b, compat.TryToRoman(minor_b,
- convert=roman))),
- ("port", compat.TryToRoman(port, convert=roman)),
- ("auth key", key),
+ ("nodeA", "%s, minor=%s" %
+ (drbd_info["primary_node"],
+ compat.TryToRoman(drbd_info["primary_minor"],
+ convert=roman))),
+ ("nodeB", "%s, minor=%s" %
+ (drbd_info["secondary_node"],
+ compat.TryToRoman(drbd_info["secondary_minor"],
+ convert=roman))),
+ ("port", str(compat.TryToRoman(drbd_info["port"], convert=roman))),
+ ("auth key", str(drbd_info["secret"])),
]
elif dev_type == constants.LD_LV:
- vg_name, lv_name = logical_id
+ vg_name, lv_name = dev["logical_id"]
data = ["%s/%s" % (vg_name, lv_name)]
else:
- data = [str(logical_id)]
+ data = [str(dev["logical_id"])]
return data
+def _FormatListInfo(data):
+ return list(str(i) for i in data)
+
+
def _FormatBlockDevInfo(idx, top_level, dev, roman):
"""Show block device information.
if isinstance(dev["size"], int):
nice_size = utils.FormatUnit(dev["size"], "h")
else:
- nice_size = dev["size"]
- d1 = ["- %s: %s, size %s" % (txt, dev["dev_type"], nice_size)]
- data = []
+ nice_size = str(dev["size"])
+ data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))]
if top_level:
+ if dev["spindles"] is not None:
+ data.append(("spindles", dev["spindles"]))
data.append(("access mode", dev["mode"]))
if dev["logical_id"] is not None:
try:
- l_id = _FormatLogicalID(dev["dev_type"], dev["logical_id"], roman)
+ l_id = _FormatDiskDetails(dev["dev_type"], dev, roman)
except ValueError:
l_id = [str(dev["logical_id"])]
if len(l_id) == 1:
else:
data.extend(l_id)
elif dev["physical_id"] is not None:
- data.append("physical_id:")
- data.append([dev["physical_id"]])
+ data.append(("physical_id:", _FormatListInfo(dev["physical_id"])))
if dev["pstatus"]:
data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
if dev["sstatus"]:
data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
- if dev["children"]:
- data.append("child devices:")
- for c_idx, child in enumerate(dev["children"]):
- data.append(_FormatBlockDevInfo(c_idx, False, child, roman))
- d1.append(data)
- return d1
-
+ data.append(("name", dev["name"]))
+ data.append(("UUID", dev["uuid"]))
-def _FormatList(buf, data, indent_level):
- """Formats a list of data at a given indent level.
-
- If the element of the list is:
- - a string, it is simply formatted as is
- - a tuple, it will be split into key, value and the all the
- values in a list will be aligned all at the same start column
- - a list, will be recursively formatted
+ if dev["children"]:
+ data.append(("child devices", [
+ _FormatBlockDevInfo(c_idx, False, child, roman)
+ for c_idx, child in enumerate(dev["children"])
+ ]))
+ return data
- @type buf: StringIO
- @param buf: the buffer into which we write the output
- @param data: the list to format
- @type indent_level: int
- @param indent_level: the indent level to format at
- """
- max_tlen = max([len(elem[0]) for elem in data
- if isinstance(elem, tuple)] or [0])
- for elem in data:
- if isinstance(elem, basestring):
- buf.write("%*s%s\n" % (2*indent_level, "", elem))
- elif isinstance(elem, tuple):
- key, value = elem
- spacer = "%*s" % (max_tlen - len(key), "")
- buf.write("%*s%s:%s %s\n" % (2*indent_level, "", key, spacer, value))
- elif isinstance(elem, list):
- _FormatList(buf, elem, indent_level+1)
+def _FormatInstanceNicInfo(idx, nic):
+ """Helper function for L{_FormatInstanceInfo()}"""
+ (name, uuid, ip, mac, mode, link, vlan, _, netinfo) = nic
+ network_name = None
+ if netinfo:
+ network_name = netinfo["name"]
+ return [
+ ("nic/%d" % idx, ""),
+ ("MAC", str(mac)),
+ ("IP", str(ip)),
+ ("mode", str(mode)),
+ ("link", str(link)),
+ ("vlan", str(vlan)),
+ ("network", str(network_name)),
+ ("UUID", str(uuid)),
+ ("name", str(name)),
+ ]
+
+
+def _FormatInstanceNodesInfo(instance):
+ """Helper function for L{_FormatInstanceInfo()}"""
+ pgroup = ("%s (UUID %s)" %
+ (instance["pnode_group_name"], instance["pnode_group_uuid"]))
+ secs = utils.CommaJoin(("%s (group %s, group UUID %s)" %
+ (name, group_name, group_uuid))
+ for (name, group_name, group_uuid) in
+ zip(instance["snodes"],
+ instance["snodes_group_names"],
+ instance["snodes_group_uuids"]))
+ return [
+ [
+ ("primary", instance["pnode"]),
+ ("group", pgroup),
+ ],
+ [("secondaries", secs)],
+ ]
+
+
+def _GetVncConsoleInfo(instance):
+ """Helper function for L{_FormatInstanceInfo()}"""
+ vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
+ None)
+ if vnc_bind_address:
+ port = instance["network_port"]
+ display = int(port) - constants.VNC_BASE_PORT
+ if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
+ vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
+ port,
+ display)
+ elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
+ vnc_console_port = ("%s:%s (node %s) (display %s)" %
+ (vnc_bind_address, port,
+ instance["pnode"], display))
+ else:
+ # vnc bind address is a file
+ vnc_console_port = "%s:%s" % (instance["pnode"],
+ vnc_bind_address)
+ ret = "vnc to %s" % vnc_console_port
+ else:
+ ret = None
+ return ret
+
+
+def _FormatInstanceInfo(instance, roman_integers):
+ """Format instance information for L{cli.PrintGenericInfo()}"""
+ istate = "configured to be %s" % instance["config_state"]
+ if instance["run_state"]:
+ istate += ", actual state is %s" % instance["run_state"]
+ info = [
+ ("Instance name", instance["name"]),
+ ("UUID", instance["uuid"]),
+ ("Serial number",
+ str(compat.TryToRoman(instance["serial_no"], convert=roman_integers))),
+ ("Creation time", utils.FormatTime(instance["ctime"])),
+ ("Modification time", utils.FormatTime(instance["mtime"])),
+ ("State", istate),
+ ("Nodes", _FormatInstanceNodesInfo(instance)),
+ ("Operating system", instance["os"]),
+ ("Operating system parameters",
+ FormatParamsDictInfo(instance["os_instance"], instance["os_actual"])),
+ ]
+
+ if "network_port" in instance:
+ info.append(("Allocated network port",
+ str(compat.TryToRoman(instance["network_port"],
+ convert=roman_integers))))
+ info.append(("Hypervisor", instance["hypervisor"]))
+ console = _GetVncConsoleInfo(instance)
+ if console:
+ info.append(("console connection", console))
+ # deprecated "memory" value, kept for one version for compatibility
+ # TODO(ganeti 2.7) remove.
+ be_actual = copy.deepcopy(instance["be_actual"])
+ be_actual["memory"] = be_actual[constants.BE_MAXMEM]
+ info.extend([
+ ("Hypervisor parameters",
+ FormatParamsDictInfo(instance["hv_instance"], instance["hv_actual"])),
+ ("Back-end parameters",
+ FormatParamsDictInfo(instance["be_instance"], be_actual)),
+ ("NICs", [
+ _FormatInstanceNicInfo(idx, nic)
+ for (idx, nic) in enumerate(instance["nics"])
+ ]),
+ ("Disk template", instance["disk_template"]),
+ ("Disks", [
+ _FormatBlockDevInfo(idx, True, device, roman_integers)
+ for (idx, device) in enumerate(instance["disks"])
+ ]),
+ ])
+ return info
def ShowInstanceConfig(opts, args):
ToStdout("No instances.")
return 1
- buf = StringIO()
- retcode = 0
- for instance_name in result:
- instance = result[instance_name]
- buf.write("Instance name: %s\n" % instance["name"])
- buf.write("UUID: %s\n" % instance["uuid"])
- buf.write("Serial number: %s\n" %
- compat.TryToRoman(instance["serial_no"],
- convert=opts.roman_integers))
- buf.write("Creation time: %s\n" % utils.FormatTime(instance["ctime"]))
- buf.write("Modification time: %s\n" % utils.FormatTime(instance["mtime"]))
- buf.write("State: configured to be %s" % instance["config_state"])
- if instance["run_state"]:
- buf.write(", actual state is %s" % instance["run_state"])
- buf.write("\n")
- ##buf.write("Considered for memory checks in cluster verify: %s\n" %
- ## instance["auto_balance"])
- buf.write(" Nodes:\n")
- buf.write(" - primary: %s\n" % instance["pnode"])
- buf.write(" - secondaries: %s\n" % utils.CommaJoin(instance["snodes"]))
- buf.write(" Operating system: %s\n" % instance["os"])
- FormatParameterDict(buf, instance["os_instance"], instance["os_actual"],
- level=2)
- if instance.has_key("network_port"):
- buf.write(" Allocated network port: %s\n" %
- compat.TryToRoman(instance["network_port"],
- convert=opts.roman_integers))
- buf.write(" Hypervisor: %s\n" % instance["hypervisor"])
-
- # custom VNC console information
- vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
- None)
- if vnc_bind_address:
- port = instance["network_port"]
- display = int(port) - constants.VNC_BASE_PORT
- if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
- vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
- port,
- display)
- elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
- vnc_console_port = ("%s:%s (node %s) (display %s)" %
- (vnc_bind_address, port,
- instance["pnode"], display))
- else:
- # vnc bind address is a file
- vnc_console_port = "%s:%s" % (instance["pnode"],
- vnc_bind_address)
- buf.write(" - console connection: vnc to %s\n" % vnc_console_port)
-
- FormatParameterDict(buf, instance["hv_instance"], instance["hv_actual"],
- level=2)
- buf.write(" Hardware:\n")
- buf.write(" - VCPUs: %s\n" %
- compat.TryToRoman(instance["be_actual"][constants.BE_VCPUS],
- convert=opts.roman_integers))
- buf.write(" - memory: %sMiB\n" %
- compat.TryToRoman(instance["be_actual"][constants.BE_MEMORY],
- convert=opts.roman_integers))
- buf.write(" - NICs:\n")
- for idx, (ip, mac, mode, link) in enumerate(instance["nics"]):
- buf.write(" - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" %
- (idx, mac, ip, mode, link))
- buf.write(" Disk template: %s\n" % instance["disk_template"])
- buf.write(" Disks:\n")
-
- for idx, device in enumerate(instance["disks"]):
- _FormatList(buf, _FormatBlockDevInfo(idx, True, device,
- opts.roman_integers), 2)
-
- ToStdout(buf.getvalue().rstrip("\n"))
+ PrintGenericInfo([
+ _FormatInstanceInfo(instance, opts.roman_integers)
+ for instance in result.values()
+ ])
return retcode
+def _ConvertNicDiskModifications(mods):
+ """Converts NIC/disk modifications from CLI to opcode.
+
+ When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
+ disks at arbitrary indices, its parameter format changed. This function
+ converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
+ newer format and adds support for new-style requests (e.g. "--new 4:add").
+
+ @type mods: list of tuples
+ @param mods: Modifications as given by command line parser
+ @rtype: list of tuples
+ @return: Modifications as understood by L{opcodes.OpInstanceSetParams}
+
+ """
+ result = []
+
+ for (identifier, params) in mods:
+ if identifier == constants.DDM_ADD:
+ # Add item as last item (legacy interface)
+ action = constants.DDM_ADD
+ identifier = -1
+ elif identifier == constants.DDM_REMOVE:
+ # Remove last item (legacy interface)
+ action = constants.DDM_REMOVE
+ identifier = -1
+ else:
+ # Modifications and adding/removing at arbitrary indices
+ add = params.pop(constants.DDM_ADD, _MISSING)
+ remove = params.pop(constants.DDM_REMOVE, _MISSING)
+ modify = params.pop(constants.DDM_MODIFY, _MISSING)
+
+ if modify is _MISSING:
+ if not (add is _MISSING or remove is _MISSING):
+ raise errors.OpPrereqError("Cannot add and remove at the same time",
+ errors.ECODE_INVAL)
+ elif add is not _MISSING:
+ action = constants.DDM_ADD
+ elif remove is not _MISSING:
+ action = constants.DDM_REMOVE
+ else:
+ action = constants.DDM_MODIFY
+
+ elif add is _MISSING and remove is _MISSING:
+ action = constants.DDM_MODIFY
+ else:
+ raise errors.OpPrereqError("Cannot modify and add/remove at the"
+ " same time", errors.ECODE_INVAL)
+
+ assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
+
+ if action == constants.DDM_REMOVE and params:
+ raise errors.OpPrereqError("Not accepting parameters on removal",
+ errors.ECODE_INVAL)
+
+ result.append((action, identifier, params))
+
+ return result
+
+
+def _ParseDiskSizes(mods):
+ """Parses disk sizes in parameters.
+
+ """
+ for (action, _, params) in mods:
+ if params and constants.IDISK_SIZE in params:
+ params[constants.IDISK_SIZE] = \
+ utils.ParseUnit(params[constants.IDISK_SIZE])
+ elif action == constants.DDM_ADD:
+ raise errors.OpPrereqError("Missing required parameter 'size'",
+ errors.ECODE_INVAL)
+
+ return mods
+
+
def SetInstanceParams(opts, args):
"""Modifies an instance.
"""
if not (opts.nics or opts.disks or opts.disk_template or
- opts.hvparams or opts.beparams or opts.os or opts.osparams):
+ opts.hvparams or opts.beparams or opts.os or opts.osparams or
+ opts.offline_inst or opts.online_inst or opts.runtime_mem or
+ opts.new_primary_node):
ToStderr("Please give at least one of the parameters.")
return 1
if opts.beparams[param].lower() == "default":
opts.beparams[param] = constants.VALUE_DEFAULT
- utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES,
+ utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
allowed_values=[constants.VALUE_DEFAULT])
for param in opts.hvparams:
utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
allowed_values=[constants.VALUE_DEFAULT])
- for idx, (nic_op, nic_dict) in enumerate(opts.nics):
- try:
- nic_op = int(nic_op)
- opts.nics[idx] = (nic_op, nic_dict)
- except (TypeError, ValueError):
- pass
-
- for idx, (disk_op, disk_dict) in enumerate(opts.disks):
- try:
- disk_op = int(disk_op)
- opts.disks[idx] = (disk_op, disk_dict)
- except (TypeError, ValueError):
- pass
- if disk_op == constants.DDM_ADD:
- if "size" not in disk_dict:
- raise errors.OpPrereqError("Missing required parameter 'size'",
- errors.ECODE_INVAL)
- disk_dict["size"] = utils.ParseUnit(disk_dict["size"])
+ nics = _ConvertNicDiskModifications(opts.nics)
+ disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
if (opts.disk_template and
opts.disk_template in constants.DTS_INT_MIRROR and
" specifying a secondary node")
return 1
+ if opts.offline_inst:
+ offline = True
+ elif opts.online_inst:
+ offline = False
+ else:
+ offline = None
+
op = opcodes.OpInstanceSetParams(instance_name=args[0],
- nics=opts.nics,
- disks=opts.disks,
+ nics=nics,
+ disks=disks,
disk_template=opts.disk_template,
remote_node=opts.node,
+ pnode=opts.new_primary_node,
hvparams=opts.hvparams,
beparams=opts.beparams,
+ runtime_mem=opts.runtime_mem,
os_name=opts.os,
osparams=opts.osparams,
force_variant=opts.force_variant,
force=opts.force,
- wait_for_sync=opts.wait_for_sync)
+ wait_for_sync=opts.wait_for_sync,
+ offline=offline,
+ conflicts_check=opts.conflicts_check,
+ ignore_ipolicy=opts.ignore_ipolicy)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
for param, data in result:
ToStdout(" - %-5s -> %s", param, data)
ToStdout("Please don't forget that most parameters take effect"
- " only at the next start of the instance.")
+ " only at the next (re)start of the instance initiated by"
+ " ganeti; restarting from within the instance will"
+ " not be enough.")
return 0
iallocator=opts.iallocator,
target_groups=opts.to,
early_release=opts.early_release)
- result = SubmitOpCode(op, cl=cl, opts=opts)
+ result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
OS_OPT,
FORCE_VARIANT_OPT,
NO_INSTALL_OPT,
+ IGNORE_IPOLICY_OPT,
]
commands = {
"[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
"Creates and adds a new instance to the cluster"),
"batch-create": (
- BatchCreate, [ArgFile(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT],
+ BatchCreate, [ArgFile(min=1, max=1)],
+ [DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT] + SUBMIT_OPTS,
"<instances.json>",
"Create a bunch of instances based on specs in the file."),
"console": (
"[--show-cmd] <instance>", "Opens a console on the specified instance"),
"failover": (
FailoverInstance, ARGS_ONE_INSTANCE,
- [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT],
+ [FORCE_OPT, IGNORE_CONSIST_OPT] + SUBMIT_OPTS +
+ [SHUTDOWN_TIMEOUT_OPT,
+ DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
+ IGNORE_IPOLICY_OPT, CLEANUP_OPT],
"[-f] <instance>", "Stops the instance, changes its primary node and"
" (if it was originally running) starts it on the new node"
" (the secondary for mirrored instances or any node"
"migrate": (
MigrateInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
- PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT],
+ PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
+ IGNORE_IPOLICY_OPT, NORUNTIME_CHGS_OPT] + SUBMIT_OPTS,
"[-f] <instance>", "Migrate instance to its secondary node"
" (only for mirrored instances)"),
"move": (
MoveInstance, ARGS_ONE_INSTANCE,
- [FORCE_OPT, SUBMIT_OPT, SINGLE_NODE_OPT, SHUTDOWN_TIMEOUT_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT],
+ [FORCE_OPT] + SUBMIT_OPTS +
+ [SINGLE_NODE_OPT,
+ SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT,
+ IGNORE_IPOLICY_OPT],
"[-f] <instance>", "Move instance to an arbitrary node"
" (only for instances of type file and lv)"),
"info": (
ReinstallInstance, [ArgInstance()],
[FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
- m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT,
- SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT],
+ m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT]
+ + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT],
"[-f] <instance>", "Reinstall a stopped instance"),
"remove": (
RemoveInstance, ARGS_ONE_INSTANCE,
- [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT, SUBMIT_OPT,
- DRY_RUN_OPT, PRIORITY_OPT],
+ [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT] + SUBMIT_OPTS
+ + [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Shuts down the instance and removes it"),
"rename": (
RenameInstance,
[ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
- [NOIPCHECK_OPT, NONAMECHECK_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+ [NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS
+ + [DRY_RUN_OPT, PRIORITY_OPT],
"<instance> <new_name>", "Rename the instance"),
"replace-disks": (
ReplaceDisks, ARGS_ONE_INSTANCE,
[AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
- NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT, SUBMIT_OPT,
- DRY_RUN_OPT, PRIORITY_OPT],
- "[-s|-p|-n NODE|-I NAME] <instance>",
- "Replaces all disks for the instance"),
+ NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT] + SUBMIT_OPTS
+ + [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
+ "[-s|-p|-a|-n NODE|-I NAME] <instance>",
+ "Replaces disks for the instance"),
"modify": (
SetInstanceParams, ARGS_ONE_INSTANCE,
- [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT,
- DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
- OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT],
+ [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT] + SUBMIT_OPTS +
+ [DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
+ OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
+ ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
+ NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT],
"<instance>", "Alters the parameters of an instance"),
"shutdown": (
GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
- [m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
+ [FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
- m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
+ m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT] + SUBMIT_OPTS
+ + [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
"<instance>", "Stops an instance"),
"startup": (
GenericManyOps("startup", _StartupInstance), [ArgInstance()],
[FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
- m_inst_tags_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, HVOPTS_OPT,
+ m_inst_tags_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
+ [HVOPTS_OPT,
BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
"<instance>", "Starts an instance"),
"reboot": (
GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
[m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
- m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT,
- m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
+ m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
+ [m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
"<instance>", "Reboots an instance"),
"activate-disks": (
ActivateDisks, ARGS_ONE_INSTANCE,
- [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT],
+ SUBMIT_OPTS + [IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
"<instance>", "Activate an instance's disks"),
"deactivate-disks": (
DeactivateDisks, ARGS_ONE_INSTANCE,
- [FORCE_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+ [FORCE_OPT] + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Deactivate an instance's disks"),
"recreate-disks": (
RecreateDisks, ARGS_ONE_INSTANCE,
- [SUBMIT_OPT, DISKIDX_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+ SUBMIT_OPTS +
+ [DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
+ IALLOCATOR_OPT],
"<instance>", "Recreate an instance's disks"),
"grow-disk": (
GrowDisk,
[ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
ArgUnknown(min=1, max=1)],
- [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+ SUBMIT_OPTS + [NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
"<instance> <disk> <size>", "Grow an instance's disk"),
"change-group": (
ChangeGroup, ARGS_ONE_INSTANCE,
- [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT],
+ [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT]
+ + SUBMIT_OPTS,
"[-I <iallocator>] [--to <group>]", "Change group of instance"),
"list-tags": (
- ListTags, ARGS_ONE_INSTANCE, [PRIORITY_OPT],
+ ListTags, ARGS_ONE_INSTANCE, [],
"<instance_name>", "List the tags of the given instance"),
"add-tags": (
AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
- [TAG_SRC_OPT, PRIORITY_OPT],
+ [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Add tags to the given instance"),
"remove-tags": (
RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
- [TAG_SRC_OPT, PRIORITY_OPT],
+ [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Remove tags from given instance"),
}
aliases = {
"start": "startup",
"stop": "shutdown",
+ "show": "info",
}
def Main():
return GenericMain(commands, aliases=aliases,
- override={"tag_type": constants.TAG_INSTANCE})
+ override={"tag_type": constants.TAG_INSTANCE},
+ env_override=_ENV_OVERRIDE)