Introduce --hotplug-if-possible option
[ganeti-local] / lib / client / gnt_instance.py
index 5edf822..e71c0da 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 
 """Instance related commands"""
 
-# pylint: disable-msg=W0401,W0614,C0103
+# pylint: disable=W0401,W0614,C0103
 # W0401: Wildcard import ganeti.cli
 # W0614: Unused import %s from wildcard import (since we need cli)
 # C0103: Invalid name gnt-instance
 
+import copy
 import itertools
 import simplejson
 import logging
-from cStringIO import StringIO
 
 from ganeti.cli import *
 from ganeti import opcodes
@@ -39,43 +39,49 @@ from ganeti import errors
 from ganeti import netutils
 from ganeti import ssh
 from ganeti import objects
+from ganeti import ht
 
 
-_SHUTDOWN_CLUSTER = "cluster"
-_SHUTDOWN_NODES_BOTH = "nodes"
-_SHUTDOWN_NODES_PRI = "nodes-pri"
-_SHUTDOWN_NODES_SEC = "nodes-sec"
-_SHUTDOWN_NODES_BOTH_BY_TAGS = "nodes-by-tags"
-_SHUTDOWN_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
-_SHUTDOWN_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
-_SHUTDOWN_INSTANCES = "instances"
-_SHUTDOWN_INSTANCES_BY_TAGS = "instances-by-tags"
-
-_SHUTDOWN_NODES_TAGS_MODES = (
-    _SHUTDOWN_NODES_BOTH_BY_TAGS,
-    _SHUTDOWN_NODES_PRI_BY_TAGS,
-    _SHUTDOWN_NODES_SEC_BY_TAGS)
+_EXPAND_CLUSTER = "cluster"
+_EXPAND_NODES_BOTH = "nodes"
+_EXPAND_NODES_PRI = "nodes-pri"
+_EXPAND_NODES_SEC = "nodes-sec"
+_EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags"
+_EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
+_EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
+_EXPAND_INSTANCES = "instances"
+_EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
 
+_EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([
+  _EXPAND_NODES_BOTH_BY_TAGS,
+  _EXPAND_NODES_PRI_BY_TAGS,
+  _EXPAND_NODES_SEC_BY_TAGS,
+  ])
 
 #: default list of options for L{ListInstances}
 _LIST_DEF_FIELDS = [
   "name", "hypervisor", "os", "pnode", "status", "oper_ram",
   ]
 
+_MISSING = object()
+_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
+
+_INST_DATA_VAL = ht.TListOf(ht.TDict)
+
 
 def _ExpandMultiNames(mode, names, client=None):
   """Expand the given names using the passed mode.
 
-  For _SHUTDOWN_CLUSTER, all instances will be returned. For
-  _SHUTDOWN_NODES_PRI/SEC, all instances having those nodes as
-  primary/secondary will be returned. For _SHUTDOWN_NODES_BOTH, all
+  For _EXPAND_CLUSTER, all instances will be returned. For
+  _EXPAND_NODES_PRI/SEC, all instances having those nodes as
+  primary/secondary will be returned. For _EXPAND_NODES_BOTH, all
   instances having those nodes as either primary or secondary will be
-  returned. For _SHUTDOWN_INSTANCES, the given instances will be
+  returned. For _EXPAND_INSTANCES, the given instances will be
   returned.
 
-  @param mode: one of L{_SHUTDOWN_CLUSTER}, L{_SHUTDOWN_NODES_BOTH},
-      L{_SHUTDOWN_NODES_PRI}, L{_SHUTDOWN_NODES_SEC} or
-      L{_SHUTDOWN_INSTANCES}
+  @param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH},
+      L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or
+      L{_EXPAND_INSTANCES}
   @param names: a list of names; for cluster, it must be empty,
       and for node and instance it must be a list of valid item
       names (short names are valid as usual, e.g. node1 instead of
@@ -86,21 +92,20 @@ def _ExpandMultiNames(mode, names, client=None):
   @raise errors.OpPrereqError: for invalid input parameters
 
   """
-  # pylint: disable-msg=W0142
+  # pylint: disable=W0142
 
   if client is None:
     client = GetClient()
-  if mode == _SHUTDOWN_CLUSTER:
+  if mode == _EXPAND_CLUSTER:
     if names:
       raise errors.OpPrereqError("Cluster filter mode takes no arguments",
                                  errors.ECODE_INVAL)
     idata = client.QueryInstances([], ["name"], False)
     inames = [row[0] for row in idata]
 
-  elif mode in (_SHUTDOWN_NODES_BOTH,
-                _SHUTDOWN_NODES_PRI,
-                _SHUTDOWN_NODES_SEC) + _SHUTDOWN_NODES_TAGS_MODES:
-    if mode in _SHUTDOWN_NODES_TAGS_MODES:
+  elif (mode in _EXPAND_NODES_TAGS_MODES or
+        mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)):
+    if mode in _EXPAND_NODES_TAGS_MODES:
       if not names:
         raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
       ndata = client.QueryNodes([], ["name", "pinst_list",
@@ -110,27 +115,27 @@ def _ExpandMultiNames(mode, names, client=None):
       if not names:
         raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
       ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
-                              False)
+                                False)
 
     ipri = [row[1] for row in ndata]
     pri_names = list(itertools.chain(*ipri))
     isec = [row[2] for row in ndata]
     sec_names = list(itertools.chain(*isec))
-    if mode in (_SHUTDOWN_NODES_BOTH, _SHUTDOWN_NODES_BOTH_BY_TAGS):
+    if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS):
       inames = pri_names + sec_names
-    elif mode in (_SHUTDOWN_NODES_PRI, _SHUTDOWN_NODES_PRI_BY_TAGS):
+    elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS):
       inames = pri_names
-    elif mode in (_SHUTDOWN_NODES_SEC, _SHUTDOWN_NODES_SEC_BY_TAGS):
+    elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS):
       inames = sec_names
     else:
       raise errors.ProgrammerError("Unhandled shutdown type")
-  elif mode == _SHUTDOWN_INSTANCES:
+  elif mode == _EXPAND_INSTANCES:
     if not names:
       raise errors.OpPrereqError("No instance names passed",
                                  errors.ECODE_INVAL)
     idata = client.QueryInstances(names, ["name"], False)
     inames = [row[0] for row in idata]
-  elif mode == _SHUTDOWN_INSTANCES_BY_TAGS:
+  elif mode == _EXPAND_INSTANCES_BY_TAGS:
     if not names:
       raise errors.OpPrereqError("No instance tags passed",
                                  errors.ECODE_INVAL)
@@ -142,44 +147,6 @@ def _ExpandMultiNames(mode, names, client=None):
   return inames
 
 
-def _ConfirmOperation(inames, text, extra=""):
-  """Ask the user to confirm an operation on a list of instances.
-
-  This function is used to request confirmation for doing an operation
-  on a given list of instances.
-
-  @type inames: list
-  @param inames: the list of names that we display when
-      we ask for confirmation
-  @type text: str
-  @param text: the operation that the user should confirm
-      (e.g. I{shutdown} or I{startup})
-  @rtype: boolean
-  @return: True or False depending on user's confirmation.
-
-  """
-  count = len(inames)
-  msg = ("The %s will operate on %d instances.\n%s"
-         "Do you want to continue?" % (text, count, extra))
-  affected = ("\nAffected instances:\n" +
-              "\n".join(["  %s" % name for name in inames]))
-
-  choices = [('y', True, 'Yes, execute the %s' % text),
-             ('n', False, 'No, abort the %s' % text)]
-
-  if count > 20:
-    choices.insert(1, ('v', 'v', 'View the list of affected instances'))
-    ask = msg
-  else:
-    ask = msg + affected
-
-  choice = AskUser(ask, choices)
-  if choice == 'v':
-    choices.pop(1)
-    choice = AskUser(msg + affected, choices)
-  return choice
-
-
 def _EnsureInstancesExist(client, names):
   """Check for and ensure the given instance names exist.
 
@@ -213,18 +180,18 @@ def GenericManyOps(operation, fn):
   """
   def realfn(opts, args):
     if opts.multi_mode is None:
-      opts.multi_mode = _SHUTDOWN_INSTANCES
+      opts.multi_mode = _EXPAND_INSTANCES
     cl = GetClient()
     inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
     if not inames:
-      if opts.multi_mode == _SHUTDOWN_CLUSTER:
+      if opts.multi_mode == _EXPAND_CLUSTER:
         ToStdout("Cluster is empty, no instances to shutdown")
         return 0
       raise errors.OpPrereqError("Selection filter does not match"
                                  " any instances", errors.ECODE_INVAL)
-    multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
+    multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
     if not (opts.force_multi or not multi_on
-            or _ConfirmOperation(inames, operation)):
+            or ConfirmOperation(inames, "instances", operation)):
       return 1
     jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts)
     for name in inames:
@@ -250,14 +217,16 @@ def ListInstances(opts, args):
 
   fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
                                "nic.modes", "nic.links", "nic.bridges",
-                               "snodes"],
+                               "nic.networks",
+                               "snodes", "snodes.group", "snodes.group.uuid"],
                               (lambda value: ",".join(str(item)
                                                       for item in value),
                                False))
 
   return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units,
                      opts.separator, not opts.no_headers,
-                     format_override=fmtoverride, verbose=opts.verbose)
+                     format_override=fmtoverride, verbose=opts.verbose,
+                     force_filter=opts.force_filter)
 
 
 def ListInstanceFields(opts, args):
@@ -286,23 +255,8 @@ def AddInstance(opts, args):
 def BatchCreate(opts, args):
   """Create instances using a definition file.
 
-  This function reads a json file with instances defined
-  in the form::
-
-    {"instance-name":{
-      "disk_size": [20480],
-      "template": "drbd",
-      "backend": {
-        "memory": 512,
-        "vcpus": 1 },
-      "os": "debootstrap",
-      "primary_node": "firstnode",
-      "secondary_node": "secondnode",
-      "iallocator": "dumb"}
-    }
-
-  Note that I{primary_node} and I{secondary_node} have precedence over
-  I{iallocator}.
+  This function reads a json file with L{opcodes.OpInstanceCreate}
+  serialisations.
 
   @param opts: the command line options selected by the user
   @type args: list
@@ -311,130 +265,54 @@ def BatchCreate(opts, args):
   @return: the desired exit code
 
   """
-  _DEFAULT_SPECS = {"disk_size": [20 * 1024],
-                    "backend": {},
-                    "iallocator": None,
-                    "primary_node": None,
-                    "secondary_node": None,
-                    "nics": None,
-                    "start": True,
-                    "ip_check": True,
-                    "name_check": True,
-                    "hypervisor": None,
-                    "hvparams": {},
-                    "file_storage_dir": None,
-                    "force_variant": False,
-                    "file_driver": 'loop'}
-
-  def _PopulateWithDefaults(spec):
-    """Returns a new hash combined with default values."""
-    mydict = _DEFAULT_SPECS.copy()
-    mydict.update(spec)
-    return mydict
-
-  def _Validate(spec):
-    """Validate the instance specs."""
-    # Validate fields required under any circumstances
-    for required_field in ('os', 'template'):
-      if required_field not in spec:
-        raise errors.OpPrereqError('Required field "%s" is missing.' %
-                                   required_field, errors.ECODE_INVAL)
-    # Validate special fields
-    if spec['primary_node'] is not None:
-      if (spec['template'] in constants.DTS_NET_MIRROR and
-          spec['secondary_node'] is None):
-        raise errors.OpPrereqError('Template requires secondary node, but'
-                                   ' there was no secondary provided.',
-                                   errors.ECODE_INVAL)
-    elif spec['iallocator'] is None:
-      raise errors.OpPrereqError('You have to provide at least a primary_node'
-                                 ' or an iallocator.',
-                                 errors.ECODE_INVAL)
-
-    if (spec['hvparams'] and
-        not isinstance(spec['hvparams'], dict)):
-      raise errors.OpPrereqError('Hypervisor parameters must be a dict.',
-                                 errors.ECODE_INVAL)
+  (json_filename,) = args
+  cl = GetClient()
 
-  json_filename = args[0]
   try:
     instance_data = simplejson.loads(utils.ReadFile(json_filename))
-  except Exception, err: # pylint: disable-msg=W0703
+  except Exception, err: # pylint: disable=W0703
     ToStderr("Can't parse the instance definition file: %s" % str(err))
     return 1
 
-  if not isinstance(instance_data, dict):
-    ToStderr("The instance definition file is not in dict format.")
+  if not _INST_DATA_VAL(instance_data):
+    ToStderr("The instance definition file is not %s" % _INST_DATA_VAL)
     return 1
 
-  jex = JobExecutor(opts=opts)
+  instances = []
+  possible_params = set(opcodes.OpInstanceCreate.GetAllSlots())
+  for (idx, inst) in enumerate(instance_data):
+    unknown = set(inst.keys()) - possible_params
 
-  # Iterate over the instances and do:
-  #  * Populate the specs with default value
-  #  * Validate the instance specs
-  i_names = utils.NiceSort(instance_data.keys()) # pylint: disable-msg=E1103
-  for name in i_names:
-    specs = instance_data[name]
-    specs = _PopulateWithDefaults(specs)
-    _Validate(specs)
+    if unknown:
+      # TODO: Suggest closest match for more user friendly experience
+      raise errors.OpPrereqError("Unknown fields in definition %s: %s" %
+                                 (idx, utils.CommaJoin(unknown)),
+                                 errors.ECODE_INVAL)
 
-    hypervisor = specs['hypervisor']
-    hvparams = specs['hvparams']
+    op = opcodes.OpInstanceCreate(**inst) # pylint: disable=W0142
+    op.Validate(False)
+    instances.append(op)
 
-    disks = []
-    for elem in specs['disk_size']:
-      try:
-        size = utils.ParseUnit(elem)
-      except (TypeError, ValueError), err:
-        raise errors.OpPrereqError("Invalid disk size '%s' for"
-                                   " instance %s: %s" %
-                                   (elem, name, err), errors.ECODE_INVAL)
-      disks.append({"size": size})
-
-    utils.ForceDictType(specs['backend'], constants.BES_PARAMETER_TYPES)
-    utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
-
-    tmp_nics = []
-    for field in ('ip', 'mac', 'mode', 'link', 'bridge'):
-      if field in specs:
-        if not tmp_nics:
-          tmp_nics.append({})
-        tmp_nics[0][field] = specs[field]
-
-    if specs['nics'] is not None and tmp_nics:
-      raise errors.OpPrereqError("'nics' list incompatible with using"
-                                 " individual nic fields as well",
-                                 errors.ECODE_INVAL)
-    elif specs['nics'] is not None:
-      tmp_nics = specs['nics']
-    elif not tmp_nics:
-      tmp_nics = [{}]
-
-    op = opcodes.OpInstanceCreate(instance_name=name,
-                                  disks=disks,
-                                  disk_template=specs['template'],
-                                  mode=constants.INSTANCE_CREATE,
-                                  os_type=specs['os'],
-                                  force_variant=specs["force_variant"],
-                                  pnode=specs['primary_node'],
-                                  snode=specs['secondary_node'],
-                                  nics=tmp_nics,
-                                  start=specs['start'],
-                                  ip_check=specs['ip_check'],
-                                  name_check=specs['name_check'],
-                                  wait_for_sync=True,
-                                  iallocator=specs['iallocator'],
-                                  hypervisor=hypervisor,
-                                  hvparams=hvparams,
-                                  beparams=specs['backend'],
-                                  file_storage_dir=specs['file_storage_dir'],
-                                  file_driver=specs['file_driver'])
-
-    jex.QueueJob(name, op)
-  # we never want to wait, just show the submitted job IDs
-  jex.WaitOrShow(False)
+  op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator,
+                                    instances=instances)
+  result = SubmitOrSend(op, opts, cl=cl)
 
-  return 0
+  # Keep track of submitted jobs
+  jex = JobExecutor(cl=cl, opts=opts)
+
+  for (status, job_id) in result[constants.JOB_IDS_KEY]:
+    jex.AddJobId(None, status, job_id)
+
+  results = jex.GetResults()
+  bad_cnt = len([row for row in results if not row[0]])
+  if bad_cnt == 0:
+    ToStdout("All instances created successfully.")
+    rcode = constants.EXIT_SUCCESS
+  else:
+    ToStdout("There were %s errors during the creation.", bad_cnt)
+    rcode = constants.EXIT_FAILURE
+
+  return rcode
 
 
 def ReinstallInstance(opts, args):
@@ -450,7 +328,7 @@ def ReinstallInstance(opts, args):
   """
   # first, compute the desired name list
   if opts.multi_mode is None:
-    opts.multi_mode = _SHUTDOWN_INSTANCES
+    opts.multi_mode = _EXPAND_INSTANCES
 
   inames = _ExpandMultiNames(opts.multi_mode, args)
   if not inames:
@@ -475,31 +353,37 @@ def ReinstallInstance(opts, args):
         choices.append(("%s" % number, entry, entry))
         number += 1
 
-    choices.append(('x', 'exit', 'Exit gnt-instance reinstall'))
+    choices.append(("x", "exit", "Exit gnt-instance reinstall"))
     selected = AskUser("Enter OS template number (or x to abort):",
                        choices)
 
-    if selected == 'exit':
+    if selected == "exit":
       ToStderr("User aborted reinstall, exiting")
       return 1
 
     os_name = selected
+    os_msg = "change the OS to '%s'" % selected
   else:
     os_name = opts.os
+    if opts.os is not None:
+      os_msg = "change the OS to '%s'" % os_name
+    else:
+      os_msg = "keep the same OS"
 
   # third, get confirmation: multi-reinstall requires --force-multi,
   # single-reinstall either --force or --force-multi (--force-multi is
   # a stronger --force)
-  multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
+  multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
   if multi_on:
-    warn_msg = "Note: this will remove *all* data for the below instances!\n"
+    warn_msg = ("Note: this will remove *all* data for the"
+                " below instances! It will %s.\n" % os_msg)
     if not (opts.force_multi or
-            _ConfirmOperation(inames, "reinstall", extra=warn_msg)):
+            ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)):
       return 1
   else:
     if not (opts.force or opts.force_multi):
-      usertext = ("This will reinstall the instance %s and remove"
-                  " all data. Continue?") % inames[0]
+      usertext = ("This will reinstall the instance '%s' (and %s) which"
+                  " removes all data. Continue?") % (inames[0], os_msg)
       if not AskUser(usertext):
         return 1
 
@@ -511,9 +395,43 @@ def ReinstallInstance(opts, args):
                                      osparams=opts.osparams)
     jex.QueueJob(instance_name, op)
 
-  jex.WaitOrShow(not opts.submit_only)
-  return 0
+  results = jex.WaitOrShow(not opts.submit_only)
 
+  if compat.all(map(compat.fst, results)):
+    return constants.EXIT_SUCCESS
+  else:
+    return constants.EXIT_FAILURE
+
+
+def SnapshotInstance(opts, args):
+  """Snapshot an instance.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: should contain only one element, the name of the
+      instance to be reinstalled
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  instance_name  = args[0]
+  inames = _ExpandMultiNames(_EXPAND_INSTANCES, [instance_name])
+  if not inames:
+    raise errors.OpPrereqError("Selection filter does not match any instances",
+                               errors.ECODE_INVAL)
+  multi_on = len(inames) > 1
+  jex = JobExecutor(verbose=multi_on, opts=opts)
+  for instance_name in inames:
+    op = opcodes.OpInstanceSnapshot(instance_name=instance_name,
+                                    disks=opts.disks)
+    jex.QueueJob(instance_name, op)
+
+  results = jex.WaitOrShow(not opts.submit_only)
+
+  if compat.all(map(compat.fst, results)):
+    return constants.EXIT_SUCCESS
+  else:
+    return constants.EXIT_FAILURE
 
 def RemoveInstance(opts, args):
   """Remove an instance.
@@ -541,7 +459,8 @@ def RemoveInstance(opts, args):
 
   op = opcodes.OpInstanceRemove(instance_name=instance_name,
                                 ignore_failures=opts.ignore_failures,
-                                shutdown_timeout=opts.shutdown_timeout)
+                                shutdown_timeout=opts.shutdown_timeout,
+                                keep_disks=opts.keep_disks)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -591,7 +510,8 @@ def ActivateDisks(opts, args):
   """
   instance_name = args[0]
   op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
-                                       ignore_size=opts.ignore_size)
+                                       ignore_size=opts.ignore_size,
+                                       wait_for_sync=opts.wait_for_sync)
   disks_info = SubmitOrSend(op, opts)
   for host, iname, nname in disks_info:
     ToStdout("%s:%s:%s", host, iname, nname)
@@ -629,16 +549,34 @@ def RecreateDisks(opts, args):
 
   """
   instance_name = args[0]
+
+  disks = []
+
   if opts.disks:
-    try:
-      opts.disks = [int(v) for v in opts.disks.split(",")]
-    except (ValueError, TypeError), err:
-      ToStderr("Invalid disks value: %s" % str(err))
-      return 1
-  else:
-    opts.disks = []
+    for didx, ddict in opts.disks:
+      didx = int(didx)
+
+      if not ht.TDict(ddict):
+        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
+        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+      if constants.IDISK_SIZE in ddict:
+        try:
+          ddict[constants.IDISK_SIZE] = \
+            utils.ParseUnit(ddict[constants.IDISK_SIZE])
+        except ValueError, err:
+          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
+                                     (didx, err), errors.ECODE_INVAL)
+
+      disks.append((didx, ddict))
+
+    # TODO: Verify modifyable parameters (already done in
+    # LUInstanceRecreateDisks, but it'd be nice to have in the client)
 
   if opts.node:
+    if opts.iallocator:
+      msg = "At most one of either --nodes or --iallocator can be passed"
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
     pnode, snode = SplitNodeOption(opts.node)
     nodes = [pnode]
     if snode is not None:
@@ -647,9 +585,10 @@ def RecreateDisks(opts, args):
     nodes = []
 
   op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
-                                       disks=opts.disks,
-                                       nodes=nodes)
+                                       disks=disks, nodes=nodes,
+                                       iallocator=opts.iallocator)
   SubmitOrSend(op, opts)
+
   return 0
 
 
@@ -658,8 +597,8 @@ def GrowDisk(opts, args):
 
   @param opts: the command line options selected by the user
   @type args: list
-  @param args: should contain two elements, the instance name
-      whose disks we grow and the disk name, e.g. I{sda}
+  @param args: should contain three elements, the target instance name,
+      the target disk id, and the target growth
   @rtype: int
   @return: the desired exit code
 
@@ -671,10 +610,15 @@ def GrowDisk(opts, args):
   except (TypeError, ValueError), err:
     raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
                                errors.ECODE_INVAL)
-  amount = utils.ParseUnit(args[2])
+  try:
+    amount = utils.ParseUnit(args[2])
+  except errors.UnitParseError:
+    raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
+                               errors.ECODE_INVAL)
   op = opcodes.OpInstanceGrowDisk(instance_name=instance,
                                   disk=disk, amount=amount,
-                                  wait_for_sync=opts.wait_for_sync)
+                                  wait_for_sync=opts.wait_for_sync,
+                                  absolute=opts.absolute)
   SubmitOrSend(op, opts)
   return 0
 
@@ -692,7 +636,9 @@ def _StartupInstance(name, opts):
   """
   op = opcodes.OpInstanceStartup(instance_name=name,
                                  force=opts.force,
-                                 ignore_offline_nodes=opts.ignore_offline)
+                                 ignore_offline_nodes=opts.ignore_offline,
+                                 no_remember=opts.no_remember,
+                                 startup_paused=opts.startup_paused)
   # do not add these parameters to the opcode unless they're defined
   if opts.hvparams:
     op.hvparams = opts.hvparams
@@ -730,8 +676,10 @@ def _ShutdownInstance(name, opts):
 
   """
   return opcodes.OpInstanceShutdown(instance_name=name,
+                                    force=opts.force,
                                     timeout=opts.timeout,
-                                    ignore_offline_nodes=opts.ignore_offline)
+                                    ignore_offline_nodes=opts.ignore_offline,
+                                    no_remember=opts.no_remember)
 
 
 def ReplaceDisks(opts, args):
@@ -775,7 +723,8 @@ def ReplaceDisks(opts, args):
   op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
                                       remote_node=new_2ndary, mode=mode,
                                       iallocator=iallocator,
-                                      early_release=opts.early_release)
+                                      early_release=opts.early_release,
+                                      ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts)
   return 0
 
@@ -796,6 +745,12 @@ def FailoverInstance(opts, args):
   cl = GetClient()
   instance_name = args[0]
   force = opts.force
+  iallocator = opts.iallocator
+  target_node = opts.dst_node
+
+  if iallocator and target_node:
+    raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
+                               " node (-n) but not both", errors.ECODE_INVAL)
 
   if not force:
     _EnsureInstancesExist(cl, [instance_name])
@@ -808,7 +763,10 @@ def FailoverInstance(opts, args):
 
   op = opcodes.OpInstanceFailover(instance_name=instance_name,
                                   ignore_consistency=opts.ignore_consistency,
-                                  shutdown_timeout=opts.shutdown_timeout)
+                                  shutdown_timeout=opts.shutdown_timeout,
+                                  iallocator=iallocator,
+                                  target_node=target_node,
+                                  ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -828,6 +786,12 @@ def MigrateInstance(opts, args):
   cl = GetClient()
   instance_name = args[0]
   force = opts.force
+  iallocator = opts.iallocator
+  target_node = opts.dst_node
+
+  if iallocator and target_node:
+    raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
+                               " node (-n) but not both", errors.ECODE_INVAL)
 
   if not force:
     _EnsureInstancesExist(cl, [instance_name])
@@ -855,8 +819,12 @@ def MigrateInstance(opts, args):
     mode = opts.migration_mode
 
   op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
-                                 cleanup=opts.cleanup)
-  SubmitOpCode(op, cl=cl, opts=opts)
+                                 cleanup=opts.cleanup, iallocator=iallocator,
+                                 target_node=target_node,
+                                 allow_failover=opts.allow_failover,
+                                 allow_runtime_changes=opts.allow_runtime_chgs,
+                                 ignore_ipolicy=opts.ignore_ipolicy)
+  SubmitOrSend(op, cl=cl, opts=opts)
   return 0
 
 
@@ -883,7 +851,9 @@ def MoveInstance(opts, args):
 
   op = opcodes.OpInstanceMove(instance_name=instance_name,
                               target_node=opts.node,
-                              shutdown_timeout=opts.shutdown_timeout)
+                              shutdown_timeout=opts.shutdown_timeout,
+                              ignore_consistency=opts.ignore_consistency,
+                              ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -900,18 +870,26 @@ def ConnectToInstanceConsole(opts, args):
   """
   instance_name = args[0]
 
-  op = opcodes.OpInstanceConsole(instance_name=instance_name)
-
   cl = GetClient()
   try:
     cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
-    console_data = SubmitOpCode(op, opts=opts, cl=cl)
+    ((console_data, oper_state), ) = \
+      cl.QueryInstances([instance_name], ["console", "oper_state"], False)
   finally:
     # Ensure client connection is closed while external commands are run
     cl.Close()
 
   del cl
 
+  if not console_data:
+    if oper_state:
+      # Instance is running
+      raise errors.OpExecError("Console information for instance %s is"
+                               " unavailable" % instance_name)
+    else:
+      raise errors.OpExecError("Instance %s is not running, can't get console" %
+                               instance_name)
+
   return _DoConsole(objects.InstanceConsole.FromDict(console_data),
                     opts.show_command, cluster_name)
 
@@ -937,6 +915,9 @@ def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
                 " URL <vnc://%s:%s/>",
                 console.instance, console.host, console.port,
                 console.display, console.host, console.port)
+  elif console.kind == constants.CONS_SPICE:
+    feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
+                console.host, console.port)
   elif console.kind == constants.CONS_SSH:
     # Convert to string if not already one
     if isinstance(console.command, basestring):
@@ -976,8 +957,8 @@ def _FormatLogicalID(dev_type, logical_id, roman):
                                                             convert=roman))),
       ("nodeB", "%s, minor=%s" % (node_b, compat.TryToRoman(minor_b,
                                                             convert=roman))),
-      ("port", compat.TryToRoman(port, convert=roman)),
-      ("auth key", key),
+      ("port", str(compat.TryToRoman(port, convert=roman))),
+      ("auth key", str(key)),
       ]
   elif dev_type == constants.LD_LV:
     vg_name, lv_name = logical_id
@@ -988,7 +969,11 @@ def _FormatLogicalID(dev_type, logical_id, roman):
   return data
 
 
-def _FormatBlockDevInfo(idx, top_level, dev, static, roman):
+def _FormatListInfo(data):
+  return list(str(i) for i in data)
+
+
+def _FormatBlockDevInfo(idx, top_level, dev, roman):
   """Show block device information.
 
   This is only used by L{ShowInstanceConfig}, but it's too big to be
@@ -1000,9 +985,6 @@ def _FormatBlockDevInfo(idx, top_level, dev, static, roman):
   @param top_level: if this a top-level disk?
   @type dev: dict
   @param dev: dictionary with disk information
-  @type static: boolean
-  @param static: wheter the device information doesn't contain
-      runtime information but only static data
   @type roman: boolean
   @param roman: whether to try to use roman integers
   @return: a list of either strings, tuples or lists
@@ -1073,9 +1055,8 @@ def _FormatBlockDevInfo(idx, top_level, dev, static, roman):
   if isinstance(dev["size"], int):
     nice_size = utils.FormatUnit(dev["size"], "h")
   else:
-    nice_size = dev["size"]
-  d1 = ["- %s: %s, size %s" % (txt, dev["dev_type"], nice_size)]
-  data = []
+    nice_size = str(dev["size"])
+  data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))]
   if top_level:
     data.append(("access mode", dev["mode"]))
   if dev["logical_id"] is not None:
@@ -1088,48 +1069,134 @@ def _FormatBlockDevInfo(idx, top_level, dev, static, roman):
     else:
       data.extend(l_id)
   elif dev["physical_id"] is not None:
-    data.append("physical_id:")
-    data.append([dev["physical_id"]])
-  if not static:
-    data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
-  if dev["sstatus"] and not static:
-    data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
+    data.append(("physical_id:", _FormatListInfo(dev["physical_id"])))
 
-  if dev["children"]:
-    data.append("child devices:")
-    for c_idx, child in enumerate(dev["children"]):
-      data.append(_FormatBlockDevInfo(c_idx, False, child, static, roman))
-  d1.append(data)
-  return d1
+  if dev["pstatus"]:
+    data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
 
+  if dev["sstatus"]:
+    data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
 
-def _FormatList(buf, data, indent_level):
-  """Formats a list of data at a given indent level.
+  data.append(("name", dev["name"]))
+  data.append(("UUID", dev["uuid"]))
 
-  If the element of the list is:
-    - a string, it is simply formatted as is
-    - a tuple, it will be split into key, value and the all the
-      values in a list will be aligned all at the same start column
-    - a list, will be recursively formatted
+  if dev["children"]:
+    data.append(("child devices", [
+      _FormatBlockDevInfo(c_idx, False, child, roman)
+      for c_idx, child in enumerate(dev["children"])
+      ]))
+  return data
 
-  @type buf: StringIO
-  @param buf: the buffer into which we write the output
-  @param data: the list to format
-  @type indent_level: int
-  @param indent_level: the indent level to format at
 
-  """
-  max_tlen = max([len(elem[0]) for elem in data
-                 if isinstance(elem, tuple)] or [0])
-  for elem in data:
-    if isinstance(elem, basestring):
-      buf.write("%*s%s\n" % (2*indent_level, "", elem))
-    elif isinstance(elem, tuple):
-      key, value = elem
-      spacer = "%*s" % (max_tlen - len(key), "")
-      buf.write("%*s%s:%s %s\n" % (2*indent_level, "", key, spacer, value))
-    elif isinstance(elem, list):
-      _FormatList(buf, elem, indent_level+1)
+def _FormatInstanceNicInfo(idx, nic):
+  """Helper function for L{_FormatInstanceInfo()}"""
+  (name, uuid, ip, mac, mode, link, _, netinfo) = nic
+  network_name = None
+  if netinfo:
+    network_name = netinfo["name"]
+  return [
+    ("nic/%d" % idx, ""),
+    ("MAC", str(mac)),
+    ("IP", str(ip)),
+    ("mode", str(mode)),
+    ("link", str(link)),
+    ("network", str(network_name)),
+    ("UUID", str(uuid)),
+    ("name", str(name)),
+    ]
+
+
+def _FormatInstanceNodesInfo(instance):
+  """Helper function for L{_FormatInstanceInfo()}"""
+  pgroup = ("%s (UUID %s)" %
+            (instance["pnode_group_name"], instance["pnode_group_uuid"]))
+  secs = utils.CommaJoin(("%s (group %s, group UUID %s)" %
+                          (name, group_name, group_uuid))
+                         for (name, group_name, group_uuid) in
+                           zip(instance["snodes"],
+                               instance["snodes_group_names"],
+                               instance["snodes_group_uuids"]))
+  return [
+    [
+      ("primary", instance["pnode"]),
+      ("group", pgroup),
+      ],
+    [("secondaries", secs)],
+    ]
+
+
+def _GetVncConsoleInfo(instance):
+  """Helper function for L{_FormatInstanceInfo()}"""
+  vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
+                                               None)
+  if vnc_bind_address:
+    port = instance["network_port"]
+    display = int(port) - constants.VNC_BASE_PORT
+    if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
+      vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
+                                                 port,
+                                                 display)
+    elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
+      vnc_console_port = ("%s:%s (node %s) (display %s)" %
+                           (vnc_bind_address, port,
+                            instance["pnode"], display))
+    else:
+      # vnc bind address is a file
+      vnc_console_port = "%s:%s" % (instance["pnode"],
+                                    vnc_bind_address)
+    ret = "vnc to %s" % vnc_console_port
+  else:
+    ret = None
+  return ret
+
+
+def _FormatInstanceInfo(instance, roman_integers):
+  """Format instance information for L{cli.PrintGenericInfo()}"""
+  istate = "configured to be %s" % instance["config_state"]
+  if instance["run_state"]:
+    istate += ", actual state is %s" % instance["run_state"]
+  info = [
+    ("Instance name", instance["name"]),
+    ("UUID", instance["uuid"]),
+    ("Serial number",
+     str(compat.TryToRoman(instance["serial_no"], convert=roman_integers))),
+    ("Creation time", utils.FormatTime(instance["ctime"])),
+    ("Modification time", utils.FormatTime(instance["mtime"])),
+    ("State", istate),
+    ("Nodes", _FormatInstanceNodesInfo(instance)),
+    ("Operating system", instance["os"]),
+    ("Operating system parameters",
+     FormatParamsDictInfo(instance["os_instance"], instance["os_actual"])),
+    ]
+
+  if "network_port" in instance:
+    info.append(("Allocated network port",
+                 str(compat.TryToRoman(instance["network_port"],
+                                       convert=roman_integers))))
+  info.append(("Hypervisor", instance["hypervisor"]))
+  console = _GetVncConsoleInfo(instance)
+  if console:
+    info.append(("console connection", console))
+  # deprecated "memory" value, kept for one version for compatibility
+  # TODO(ganeti 2.7) remove.
+  be_actual = copy.deepcopy(instance["be_actual"])
+  be_actual["memory"] = be_actual[constants.BE_MAXMEM]
+  info.extend([
+    ("Hypervisor parameters",
+     FormatParamsDictInfo(instance["hv_instance"], instance["hv_actual"])),
+    ("Back-end parameters",
+     FormatParamsDictInfo(instance["be_instance"], be_actual)),
+    ("NICs", [
+      _FormatInstanceNicInfo(idx, nic)
+      for (idx, nic) in enumerate(instance["nics"])
+      ]),
+    ("Disk template", instance["disk_template"]),
+    ("Disks", [
+      _FormatBlockDevInfo(idx, True, device, roman_integers)
+      for (idx, device) in enumerate(instance["disks"])
+      ]),
+    ])
+  return info
 
 
 def ShowInstanceConfig(opts, args):
@@ -1160,79 +1227,87 @@ def ShowInstanceConfig(opts, args):
     ToStdout("No instances.")
     return 1
 
-  buf = StringIO()
-  retcode = 0
-  for instance_name in result:
-    instance = result[instance_name]
-    buf.write("Instance name: %s\n" % instance["name"])
-    buf.write("UUID: %s\n" % instance["uuid"])
-    buf.write("Serial number: %s\n" %
-              compat.TryToRoman(instance["serial_no"],
-                                convert=opts.roman_integers))
-    buf.write("Creation time: %s\n" % utils.FormatTime(instance["ctime"]))
-    buf.write("Modification time: %s\n" % utils.FormatTime(instance["mtime"]))
-    buf.write("State: configured to be %s" % instance["config_state"])
-    if not opts.static:
-      buf.write(", actual state is %s" % instance["run_state"])
-    buf.write("\n")
-    ##buf.write("Considered for memory checks in cluster verify: %s\n" %
-    ##          instance["auto_balance"])
-    buf.write("  Nodes:\n")
-    buf.write("    - primary: %s\n" % instance["pnode"])
-    buf.write("    - secondaries: %s\n" % utils.CommaJoin(instance["snodes"]))
-    buf.write("  Operating system: %s\n" % instance["os"])
-    FormatParameterDict(buf, instance["os_instance"], instance["os_actual"],
-                        level=2)
-    if instance.has_key("network_port"):
-      buf.write("  Allocated network port: %s\n" %
-                compat.TryToRoman(instance["network_port"],
-                                  convert=opts.roman_integers))
-    buf.write("  Hypervisor: %s\n" % instance["hypervisor"])
-
-    # custom VNC console information
-    vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
-                                                 None)
-    if vnc_bind_address:
-      port = instance["network_port"]
-      display = int(port) - constants.VNC_BASE_PORT
-      if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
-        vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
-                                                   port,
-                                                   display)
-      elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
-        vnc_console_port = ("%s:%s (node %s) (display %s)" %
-                             (vnc_bind_address, port,
-                              instance["pnode"], display))
-      else:
-        # vnc bind address is a file
-        vnc_console_port = "%s:%s" % (instance["pnode"],
-                                      vnc_bind_address)
-      buf.write("    - console connection: vnc to %s\n" % vnc_console_port)
-
-    FormatParameterDict(buf, instance["hv_instance"], instance["hv_actual"],
-                        level=2)
-    buf.write("  Hardware:\n")
-    buf.write("    - VCPUs: %s\n" %
-              compat.TryToRoman(instance["be_actual"][constants.BE_VCPUS],
-                                convert=opts.roman_integers))
-    buf.write("    - memory: %sMiB\n" %
-              compat.TryToRoman(instance["be_actual"][constants.BE_MEMORY],
-                                convert=opts.roman_integers))
-    buf.write("    - NICs:\n")
-    for idx, (ip, mac, mode, link) in enumerate(instance["nics"]):
-      buf.write("      - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" %
-                (idx, mac, ip, mode, link))
-    buf.write("  Disk template: %s\n" % instance["disk_template"])
-    buf.write("  Disks:\n")
-
-    for idx, device in enumerate(instance["disks"]):
-      _FormatList(buf, _FormatBlockDevInfo(idx, True, device, opts.static,
-                  opts.roman_integers), 2)
-
-  ToStdout(buf.getvalue().rstrip('\n'))
+  PrintGenericInfo([
+    _FormatInstanceInfo(instance, opts.roman_integers)
+    for instance in result.values()
+    ])
   return retcode
 
 
+def _ConvertNicDiskModifications(mods):
+  """Converts NIC/disk modifications from CLI to opcode.
+
+  When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
+  disks at arbitrary indices, its parameter format changed. This function
+  converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
+  newer format and adds support for new-style requests (e.g. "--new 4:add").
+
+  @type mods: list of tuples
+  @param mods: Modifications as given by command line parser
+  @rtype: list of tuples
+  @return: Modifications as understood by L{opcodes.OpInstanceSetParams}
+
+  """
+  result = []
+
+  for (identifier, params) in mods:
+    if identifier == constants.DDM_ADD:
+      # Add item as last item (legacy interface)
+      action = constants.DDM_ADD
+      identifier = -1
+    elif identifier == constants.DDM_REMOVE:
+      # Remove last item (legacy interface)
+      action = constants.DDM_REMOVE
+      identifier = -1
+    else:
+      # Modifications and adding/removing at arbitrary indices
+      add = params.pop(constants.DDM_ADD, _MISSING)
+      remove = params.pop(constants.DDM_REMOVE, _MISSING)
+      modify = params.pop(constants.DDM_MODIFY, _MISSING)
+
+      if modify is _MISSING:
+        if not (add is _MISSING or remove is _MISSING):
+          raise errors.OpPrereqError("Cannot add and remove at the same time",
+                                     errors.ECODE_INVAL)
+        elif add is not _MISSING:
+          action = constants.DDM_ADD
+        elif remove is not _MISSING:
+          action = constants.DDM_REMOVE
+        else:
+          action = constants.DDM_MODIFY
+
+      elif add is _MISSING and remove is _MISSING:
+        action = constants.DDM_MODIFY
+      else:
+        raise errors.OpPrereqError("Cannot modify and add/remove at the"
+                                   " same time", errors.ECODE_INVAL)
+
+      assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
+
+    if action == constants.DDM_REMOVE and params:
+      raise errors.OpPrereqError("Not accepting parameters on removal",
+                                 errors.ECODE_INVAL)
+
+    result.append((action, identifier, params))
+
+  return result
+
+
+def _ParseDiskSizes(mods):
+  """Parses disk sizes in parameters.
+
+  """
+  for (action, _, params) in mods:
+    if params and constants.IDISK_SIZE in params:
+      params[constants.IDISK_SIZE] = \
+        utils.ParseUnit(params[constants.IDISK_SIZE])
+    elif action == constants.DDM_ADD:
+      raise errors.OpPrereqError("Missing required parameter 'size'",
+                                 errors.ECODE_INVAL)
+
+  return mods
+
+
 def SetInstanceParams(opts, args):
   """Modifies an instance.
 
@@ -1246,7 +1321,9 @@ def SetInstanceParams(opts, args):
 
   """
   if not (opts.nics or opts.disks or opts.disk_template or
-          opts.hvparams or opts.beparams or opts.os or opts.osparams):
+          opts.hvparams or opts.beparams or opts.os or opts.osparams or
+          opts.offline_inst or opts.online_inst or opts.runtime_mem or
+          opts.new_primary_node):
     ToStderr("Please give at least one of the parameters.")
     return 1
 
@@ -1255,7 +1332,7 @@ def SetInstanceParams(opts, args):
       if opts.beparams[param].lower() == "default":
         opts.beparams[param] = constants.VALUE_DEFAULT
 
-  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES,
+  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
                       allowed_values=[constants.VALUE_DEFAULT])
 
   for param in opts.hvparams:
@@ -1266,43 +1343,51 @@ def SetInstanceParams(opts, args):
   utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
                       allowed_values=[constants.VALUE_DEFAULT])
 
-  for idx, (nic_op, nic_dict) in enumerate(opts.nics):
-    try:
-      nic_op = int(nic_op)
-      opts.nics[idx] = (nic_op, nic_dict)
-    except (TypeError, ValueError):
-      pass
+  nics = _ConvertNicDiskModifications(opts.nics)
+  for action, _, __ in nics:
+    if action == constants.DDM_MODIFY and opts.hotplug:
+      usertext = ("You are about to hot-modify a NIC. This will be done"
+                  " by removing the exisiting and then adding a new one."
+                  " Network connection might be lost. Continue?")
+      if not AskUser(usertext):
+        return 1
 
-  for idx, (disk_op, disk_dict) in enumerate(opts.disks):
-    try:
-      disk_op = int(disk_op)
-      opts.disks[idx] = (disk_op, disk_dict)
-    except (TypeError, ValueError):
-      pass
-    if disk_op == constants.DDM_ADD:
-      if 'size' not in disk_dict:
-        raise errors.OpPrereqError("Missing required parameter 'size'",
-                                   errors.ECODE_INVAL)
-      disk_dict['size'] = utils.ParseUnit(disk_dict['size'])
+  disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
 
   if (opts.disk_template and
-      opts.disk_template in constants.DTS_NET_MIRROR and
+      opts.disk_template in constants.DTS_INT_MIRROR and
       not opts.node):
     ToStderr("Changing the disk template to a mirrored one requires"
              " specifying a secondary node")
     return 1
 
+  if opts.offline_inst:
+    offline = True
+  elif opts.online_inst:
+    offline = False
+  else:
+    offline = None
+
   op = opcodes.OpInstanceSetParams(instance_name=args[0],
-                                   nics=opts.nics,
-                                   disks=opts.disks,
+                                   nics=nics,
+                                   disks=disks,
+                                   hotplug=opts.hotplug,
+                                   hotplug_if_possible=opts.hotplug_if_possible,
+                                   keep_disks=opts.keep_disks,
                                    disk_template=opts.disk_template,
                                    remote_node=opts.node,
+                                   pnode=opts.new_primary_node,
                                    hvparams=opts.hvparams,
                                    beparams=opts.beparams,
+                                   runtime_mem=opts.runtime_mem,
                                    os_name=opts.os,
                                    osparams=opts.osparams,
                                    force_variant=opts.force_variant,
-                                   force=opts.force)
+                                   force=opts.force,
+                                   wait_for_sync=opts.wait_for_sync,
+                                   offline=offline,
+                                   conflicts_check=opts.conflicts_check,
+                                   ignore_ipolicy=opts.ignore_ipolicy)
 
   # even if here we process the result, we allow submit only
   result = SubmitOrSend(op, opts)
@@ -1311,11 +1396,47 @@ def SetInstanceParams(opts, args):
     ToStdout("Modified instance %s", args[0])
     for param, data in result:
       ToStdout(" - %-5s -> %s", param, data)
-    ToStdout("Please don't forget that most parameters take effect"
-             " only at the next start of the instance.")
+    if not opts.hotplug:
+      ToStdout("Please don't forget that most parameters take effect"
+               " only at the next (re)start of the instance initiated by"
+               " ganeti; restarting from within the instance will"
+               " not be enough.")
   return 0
 
 
+def ChangeGroup(opts, args):
+  """Moves an instance to another group.
+
+  """
+  (instance_name, ) = args
+
+  cl = GetClient()
+
+  op = opcodes.OpInstanceChangeGroup(instance_name=instance_name,
+                                     iallocator=opts.iallocator,
+                                     target_groups=opts.to,
+                                     early_release=opts.early_release)
+  result = SubmitOrSend(op, opts, cl=cl)
+
+  # Keep track of submitted jobs
+  jex = JobExecutor(cl=cl, opts=opts)
+
+  for (status, job_id) in result[constants.JOB_IDS_KEY]:
+    jex.AddJobId(None, status, job_id)
+
+  results = jex.GetResults()
+  bad_cnt = len([row for row in results if not row[0]])
+  if bad_cnt == 0:
+    ToStdout("Instance '%s' changed group successfully.", instance_name)
+    rcode = constants.EXIT_SUCCESS
+  else:
+    ToStdout("There were %s errors while changing group of instance '%s'.",
+             bad_cnt, instance_name)
+    rcode = constants.EXIT_FAILURE
+
+  return rcode
+
+
 # multi-instance selection options
 m_force_multi = cli_option("--force-multiple", dest="force_multi",
                            help="Do not ask for confirmation when more than"
@@ -1324,42 +1445,42 @@ m_force_multi = cli_option("--force-multiple", dest="force_multi",
 
 m_pri_node_opt = cli_option("--primary", dest="multi_mode",
                             help="Filter by nodes (primary only)",
-                            const=_SHUTDOWN_NODES_PRI, action="store_const")
+                            const=_EXPAND_NODES_PRI, action="store_const")
 
 m_sec_node_opt = cli_option("--secondary", dest="multi_mode",
                             help="Filter by nodes (secondary only)",
-                            const=_SHUTDOWN_NODES_SEC, action="store_const")
+                            const=_EXPAND_NODES_SEC, action="store_const")
 
 m_node_opt = cli_option("--node", dest="multi_mode",
                         help="Filter by nodes (primary and secondary)",
-                        const=_SHUTDOWN_NODES_BOTH, action="store_const")
+                        const=_EXPAND_NODES_BOTH, action="store_const")
 
 m_clust_opt = cli_option("--all", dest="multi_mode",
                          help="Select all instances in the cluster",
-                         const=_SHUTDOWN_CLUSTER, action="store_const")
+                         const=_EXPAND_CLUSTER, action="store_const")
 
 m_inst_opt = cli_option("--instance", dest="multi_mode",
                         help="Filter by instance name [default]",
-                        const=_SHUTDOWN_INSTANCES, action="store_const")
+                        const=_EXPAND_INSTANCES, action="store_const")
 
 m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
                              help="Filter by node tag",
-                             const=_SHUTDOWN_NODES_BOTH_BY_TAGS,
+                             const=_EXPAND_NODES_BOTH_BY_TAGS,
                              action="store_const")
 
 m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
                                  help="Filter by primary node tag",
-                                 const=_SHUTDOWN_NODES_PRI_BY_TAGS,
+                                 const=_EXPAND_NODES_PRI_BY_TAGS,
                                  action="store_const")
 
 m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
                                  help="Filter by secondary node tag",
-                                 const=_SHUTDOWN_NODES_SEC_BY_TAGS,
+                                 const=_EXPAND_NODES_SEC_BY_TAGS,
                                  action="store_const")
 
 m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
                              help="Filter by instance tag",
-                             const=_SHUTDOWN_INSTANCES_BY_TAGS,
+                             const=_EXPAND_INSTANCES_BY_TAGS,
                              action="store_const")
 
 # this is defined separately due to readability only
@@ -1368,47 +1489,54 @@ add_opts = [
   OS_OPT,
   FORCE_VARIANT_OPT,
   NO_INSTALL_OPT,
+  IGNORE_IPOLICY_OPT,
   ]
 
 commands = {
-  'add': (
+  "add": (
     AddInstance, [ArgHost(min=1, max=1)], COMMON_CREATE_OPTS + add_opts,
     "[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
     "Creates and adds a new instance to the cluster"),
-  'batch-create': (
-    BatchCreate, [ArgFile(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT],
+  "batch-create": (
+    BatchCreate, [ArgFile(min=1, max=1)],
+    [DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT, SUBMIT_OPT],
     "<instances.json>",
     "Create a bunch of instances based on specs in the file."),
-  'console': (
+  "console": (
     ConnectToInstanceConsole, ARGS_ONE_INSTANCE,
     [SHOWCMD_OPT, PRIORITY_OPT],
     "[--show-cmd] <instance>", "Opens a console on the specified instance"),
-  'failover': (
+  "failover": (
     FailoverInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
-    "[-f] <instance>", "Stops the instance and starts it on the backup node,"
-    " using the remote mirror (only for instances of type drbd)"),
-  'migrate': (
+     DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
+     IGNORE_IPOLICY_OPT, CLEANUP_OPT],
+    "[-f] <instance>", "Stops the instance, changes its primary node and"
+    " (if it was originally running) starts it on the new node"
+    " (the secondary for mirrored instances or any node"
+    " for shared storage)."),
+  "migrate": (
     MigrateInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
-     PRIORITY_OPT],
+     PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
+     IGNORE_IPOLICY_OPT, NORUNTIME_CHGS_OPT, SUBMIT_OPT],
     "[-f] <instance>", "Migrate instance to its secondary node"
-    " (only for instances of type drbd)"),
-  'move': (
+    " (only for mirrored instances)"),
+  "move": (
     MoveInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SUBMIT_OPT, SINGLE_NODE_OPT, SHUTDOWN_TIMEOUT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT, IGNORE_IPOLICY_OPT],
     "[-f] <instance>", "Move instance to an arbitrary node"
     " (only for instances of type file and lv)"),
-  'info': (
+  "info": (
     ShowInstanceConfig, ARGS_MANY_INSTANCES,
     [STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT],
     "[-s] {--all | <instance>...}",
     "Show information on the specified instance(s)"),
-  'list': (
+  "list": (
     ListInstances, ARGS_MANY_INSTANCES,
-    [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT],
+    [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
+     FORCE_FILTER_OPT],
     "[<instance>...]",
     "Lists the instances and their status. The available fields can be shown"
     " using the \"list-fields\" command (see the man page for details)."
@@ -1420,95 +1548,110 @@ commands = {
     [NOHDR_OPT, SEP_OPT],
     "[fields...]",
     "Lists all available fields for instances"),
-  'reinstall': (
+  "reinstall": (
     ReinstallInstance, [ArgInstance()],
     [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
      m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
      m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT,
      SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT],
     "[-f] <instance>", "Reinstall a stopped instance"),
-  'remove': (
+  "snapshot": (
+    SnapshotInstance, [ArgInstance(min=1,max=1)],
+    [DISK_OPT, SUBMIT_OPT, DRY_RUN_OPT],
+    "<instance>", "Snapshot an instance's disk(s)"),
+  "remove": (
     RemoveInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT, SUBMIT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, KEEPDISKS_OPT],
     "[-f] <instance>", "Shuts down the instance and removes it"),
-  'rename': (
+  "rename": (
     RenameInstance,
     [ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
     [NOIPCHECK_OPT, NONAMECHECK_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "<instance> <new_name>", "Rename the instance"),
-  'replace-disks': (
+  "replace-disks": (
     ReplaceDisks, ARGS_ONE_INSTANCE,
     [AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
      NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT, SUBMIT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
-    "[-s|-p|-n NODE|-I NAME] <instance>",
-    "Replaces all disks for the instance"),
-  'modify': (
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
+    "[-s|-p|-a|-n NODE|-I NAME] <instance>",
+    "Replaces disks for the instance"),
+  "modify": (
     SetInstanceParams, ARGS_ONE_INSTANCE,
     [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT,
      DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
-     OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+     OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
+     ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
+     NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT, HOTPLUG_OPT, KEEPDISKS_OPT,
+     HOTPLUG_IF_POSSIBLE_OPT],
     "<instance>", "Alters the parameters of an instance"),
-  'shutdown': (
+  "shutdown": (
     GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
-    [m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
+    [FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
     "<instance>", "Stops an instance"),
-  'startup': (
+  "startup": (
     GenericManyOps("startup", _StartupInstance), [ArgInstance()],
     [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, HVOPTS_OPT,
-     BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT],
+     BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
+     NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
     "<instance>", "Starts an instance"),
-  'reboot': (
+  "reboot": (
     GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
     [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
      m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "<instance>", "Reboots an instance"),
-  'activate-disks': (
+  "activate-disks": (
     ActivateDisks, ARGS_ONE_INSTANCE,
-    [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
     "<instance>", "Activate an instance's disks"),
-  'deactivate-disks': (
+  "deactivate-disks": (
     DeactivateDisks, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "[-f] <instance>", "Deactivate an instance's disks"),
-  'recreate-disks': (
+  "recreate-disks": (
     RecreateDisks, ARGS_ONE_INSTANCE,
-    [SUBMIT_OPT, DISKIDX_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
+     IALLOCATOR_OPT],
     "<instance>", "Recreate an instance's disks"),
-  'grow-disk': (
+  "grow-disk": (
     GrowDisk,
     [ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
      ArgUnknown(min=1, max=1)],
-    [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
     "<instance> <disk> <size>", "Grow an instance's disk"),
-  'list-tags': (
-    ListTags, ARGS_ONE_INSTANCE, [PRIORITY_OPT],
+  "change-group": (
+    ChangeGroup, ARGS_ONE_INSTANCE,
+    [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT, SUBMIT_OPT],
+    "[-I <iallocator>] [--to <group>]", "Change group of instance"),
+  "list-tags": (
+    ListTags, ARGS_ONE_INSTANCE, [],
     "<instance_name>", "List the tags of the given instance"),
-  'add-tags': (
+  "add-tags": (
     AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
-    [TAG_SRC_OPT, PRIORITY_OPT],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
     "<instance_name> tag...", "Add tags to the given instance"),
-  'remove-tags': (
+  "remove-tags": (
     RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
-    [TAG_SRC_OPT, PRIORITY_OPT],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
     "<instance_name> tag...", "Remove tags from given instance"),
   }
 
 #: dictionary with aliases for commands
 aliases = {
-  'start': 'startup',
-  'stop': 'shutdown',
+  "start": "startup",
+  "stop": "shutdown",
+  "show": "info",
   }
 
 
 def Main():
   return GenericMain(commands, aliases=aliases,
-                     override={"tag_type": constants.TAG_INSTANCE})
+                     override={"tag_type": constants.TAG_INSTANCE},
+                     env_override=_ENV_OVERRIDE)