Add cleanup parameter to instance failover
[ganeti-local] / lib / client / gnt_instance.py
index 402629d..8ee9ca9 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 
 """Instance related commands"""
 
-# pylint: disable-msg=W0401,W0614,C0103
+# pylint: disable=W0401,W0614,C0103
 # W0401: Wildcard import ganeti.cli
 # W0614: Unused import %s from wildcard import (since we need cli)
 # C0103: Invalid name gnt-instance
 
+import copy
 import itertools
 import simplejson
 import logging
-from cStringIO import StringIO
 
 from ganeti.cli import *
 from ganeti import opcodes
@@ -39,43 +39,49 @@ from ganeti import errors
 from ganeti import netutils
 from ganeti import ssh
 from ganeti import objects
+from ganeti import ht
 
 
-_SHUTDOWN_CLUSTER = "cluster"
-_SHUTDOWN_NODES_BOTH = "nodes"
-_SHUTDOWN_NODES_PRI = "nodes-pri"
-_SHUTDOWN_NODES_SEC = "nodes-sec"
-_SHUTDOWN_NODES_BOTH_BY_TAGS = "nodes-by-tags"
-_SHUTDOWN_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
-_SHUTDOWN_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
-_SHUTDOWN_INSTANCES = "instances"
-_SHUTDOWN_INSTANCES_BY_TAGS = "instances-by-tags"
-
-_SHUTDOWN_NODES_TAGS_MODES = (
-    _SHUTDOWN_NODES_BOTH_BY_TAGS,
-    _SHUTDOWN_NODES_PRI_BY_TAGS,
-    _SHUTDOWN_NODES_SEC_BY_TAGS)
+_EXPAND_CLUSTER = "cluster"
+_EXPAND_NODES_BOTH = "nodes"
+_EXPAND_NODES_PRI = "nodes-pri"
+_EXPAND_NODES_SEC = "nodes-sec"
+_EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags"
+_EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
+_EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
+_EXPAND_INSTANCES = "instances"
+_EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
 
+_EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([
+  _EXPAND_NODES_BOTH_BY_TAGS,
+  _EXPAND_NODES_PRI_BY_TAGS,
+  _EXPAND_NODES_SEC_BY_TAGS,
+  ])
 
 #: default list of options for L{ListInstances}
 _LIST_DEF_FIELDS = [
   "name", "hypervisor", "os", "pnode", "status", "oper_ram",
   ]
 
+_MISSING = object()
+_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
+
+_INST_DATA_VAL = ht.TListOf(ht.TDict)
+
 
 def _ExpandMultiNames(mode, names, client=None):
   """Expand the given names using the passed mode.
 
-  For _SHUTDOWN_CLUSTER, all instances will be returned. For
-  _SHUTDOWN_NODES_PRI/SEC, all instances having those nodes as
-  primary/secondary will be returned. For _SHUTDOWN_NODES_BOTH, all
+  For _EXPAND_CLUSTER, all instances will be returned. For
+  _EXPAND_NODES_PRI/SEC, all instances having those nodes as
+  primary/secondary will be returned. For _EXPAND_NODES_BOTH, all
   instances having those nodes as either primary or secondary will be
-  returned. For _SHUTDOWN_INSTANCES, the given instances will be
+  returned. For _EXPAND_INSTANCES, the given instances will be
   returned.
 
-  @param mode: one of L{_SHUTDOWN_CLUSTER}, L{_SHUTDOWN_NODES_BOTH},
-      L{_SHUTDOWN_NODES_PRI}, L{_SHUTDOWN_NODES_SEC} or
-      L{_SHUTDOWN_INSTANCES}
+  @param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH},
+      L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or
+      L{_EXPAND_INSTANCES}
   @param names: a list of names; for cluster, it must be empty,
       and for node and instance it must be a list of valid item
       names (short names are valid as usual, e.g. node1 instead of
@@ -86,21 +92,20 @@ def _ExpandMultiNames(mode, names, client=None):
   @raise errors.OpPrereqError: for invalid input parameters
 
   """
-  # pylint: disable-msg=W0142
+  # pylint: disable=W0142
 
   if client is None:
     client = GetClient()
-  if mode == _SHUTDOWN_CLUSTER:
+  if mode == _EXPAND_CLUSTER:
     if names:
       raise errors.OpPrereqError("Cluster filter mode takes no arguments",
                                  errors.ECODE_INVAL)
     idata = client.QueryInstances([], ["name"], False)
     inames = [row[0] for row in idata]
 
-  elif mode in (_SHUTDOWN_NODES_BOTH,
-                _SHUTDOWN_NODES_PRI,
-                _SHUTDOWN_NODES_SEC) + _SHUTDOWN_NODES_TAGS_MODES:
-    if mode in _SHUTDOWN_NODES_TAGS_MODES:
+  elif (mode in _EXPAND_NODES_TAGS_MODES or
+        mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)):
+    if mode in _EXPAND_NODES_TAGS_MODES:
       if not names:
         raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
       ndata = client.QueryNodes([], ["name", "pinst_list",
@@ -110,27 +115,27 @@ def _ExpandMultiNames(mode, names, client=None):
       if not names:
         raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
       ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
-                              False)
+                                False)
 
     ipri = [row[1] for row in ndata]
     pri_names = list(itertools.chain(*ipri))
     isec = [row[2] for row in ndata]
     sec_names = list(itertools.chain(*isec))
-    if mode in (_SHUTDOWN_NODES_BOTH, _SHUTDOWN_NODES_BOTH_BY_TAGS):
+    if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS):
       inames = pri_names + sec_names
-    elif mode in (_SHUTDOWN_NODES_PRI, _SHUTDOWN_NODES_PRI_BY_TAGS):
+    elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS):
       inames = pri_names
-    elif mode in (_SHUTDOWN_NODES_SEC, _SHUTDOWN_NODES_SEC_BY_TAGS):
+    elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS):
       inames = sec_names
     else:
       raise errors.ProgrammerError("Unhandled shutdown type")
-  elif mode == _SHUTDOWN_INSTANCES:
+  elif mode == _EXPAND_INSTANCES:
     if not names:
       raise errors.OpPrereqError("No instance names passed",
                                  errors.ECODE_INVAL)
     idata = client.QueryInstances(names, ["name"], False)
     inames = [row[0] for row in idata]
-  elif mode == _SHUTDOWN_INSTANCES_BY_TAGS:
+  elif mode == _EXPAND_INSTANCES_BY_TAGS:
     if not names:
       raise errors.OpPrereqError("No instance tags passed",
                                  errors.ECODE_INVAL)
@@ -175,16 +180,16 @@ def GenericManyOps(operation, fn):
   """
   def realfn(opts, args):
     if opts.multi_mode is None:
-      opts.multi_mode = _SHUTDOWN_INSTANCES
+      opts.multi_mode = _EXPAND_INSTANCES
     cl = GetClient()
     inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
     if not inames:
-      if opts.multi_mode == _SHUTDOWN_CLUSTER:
+      if opts.multi_mode == _EXPAND_CLUSTER:
         ToStdout("Cluster is empty, no instances to shutdown")
         return 0
       raise errors.OpPrereqError("Selection filter does not match"
                                  " any instances", errors.ECODE_INVAL)
-    multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
+    multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
     if not (opts.force_multi or not multi_on
             or ConfirmOperation(inames, "instances", operation)):
       return 1
@@ -212,7 +217,8 @@ def ListInstances(opts, args):
 
   fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
                                "nic.modes", "nic.links", "nic.bridges",
-                               "snodes"],
+                               "nic.networks",
+                               "snodes", "snodes.group", "snodes.group.uuid"],
                               (lambda value: ",".join(str(item)
                                                       for item in value),
                                False))
@@ -249,23 +255,8 @@ def AddInstance(opts, args):
 def BatchCreate(opts, args):
   """Create instances using a definition file.
 
-  This function reads a json file with instances defined
-  in the form::
-
-    {"instance-name":{
-      "disk_size": [20480],
-      "template": "drbd",
-      "backend": {
-        "memory": 512,
-        "vcpus": 1 },
-      "os": "debootstrap",
-      "primary_node": "firstnode",
-      "secondary_node": "secondnode",
-      "iallocator": "dumb"}
-    }
-
-  Note that I{primary_node} and I{secondary_node} have precedence over
-  I{iallocator}.
+  This function reads a json file with L{opcodes.OpInstanceCreate}
+  serialisations.
 
   @param opts: the command line options selected by the user
   @type args: list
@@ -274,130 +265,54 @@ def BatchCreate(opts, args):
   @return: the desired exit code
 
   """
-  _DEFAULT_SPECS = {"disk_size": [20 * 1024],
-                    "backend": {},
-                    "iallocator": None,
-                    "primary_node": None,
-                    "secondary_node": None,
-                    "nics": None,
-                    "start": True,
-                    "ip_check": True,
-                    "name_check": True,
-                    "hypervisor": None,
-                    "hvparams": {},
-                    "file_storage_dir": None,
-                    "force_variant": False,
-                    "file_driver": 'loop'}
-
-  def _PopulateWithDefaults(spec):
-    """Returns a new hash combined with default values."""
-    mydict = _DEFAULT_SPECS.copy()
-    mydict.update(spec)
-    return mydict
-
-  def _Validate(spec):
-    """Validate the instance specs."""
-    # Validate fields required under any circumstances
-    for required_field in ('os', 'template'):
-      if required_field not in spec:
-        raise errors.OpPrereqError('Required field "%s" is missing.' %
-                                   required_field, errors.ECODE_INVAL)
-    # Validate special fields
-    if spec['primary_node'] is not None:
-      if (spec['template'] in constants.DTS_INT_MIRROR and
-          spec['secondary_node'] is None):
-        raise errors.OpPrereqError('Template requires secondary node, but'
-                                   ' there was no secondary provided.',
-                                   errors.ECODE_INVAL)
-    elif spec['iallocator'] is None:
-      raise errors.OpPrereqError('You have to provide at least a primary_node'
-                                 ' or an iallocator.',
-                                 errors.ECODE_INVAL)
-
-    if (spec['hvparams'] and
-        not isinstance(spec['hvparams'], dict)):
-      raise errors.OpPrereqError('Hypervisor parameters must be a dict.',
-                                 errors.ECODE_INVAL)
+  (json_filename,) = args
+  cl = GetClient()
 
-  json_filename = args[0]
   try:
     instance_data = simplejson.loads(utils.ReadFile(json_filename))
-  except Exception, err: # pylint: disable-msg=W0703
+  except Exception, err: # pylint: disable=W0703
     ToStderr("Can't parse the instance definition file: %s" % str(err))
     return 1
 
-  if not isinstance(instance_data, dict):
-    ToStderr("The instance definition file is not in dict format.")
+  if not _INST_DATA_VAL(instance_data):
+    ToStderr("The instance definition file is not %s" % _INST_DATA_VAL)
     return 1
 
-  jex = JobExecutor(opts=opts)
+  instances = []
+  possible_params = set(opcodes.OpInstanceCreate.GetAllSlots())
+  for (idx, inst) in enumerate(instance_data):
+    unknown = set(inst.keys()) - possible_params
 
-  # Iterate over the instances and do:
-  #  * Populate the specs with default value
-  #  * Validate the instance specs
-  i_names = utils.NiceSort(instance_data.keys()) # pylint: disable-msg=E1103
-  for name in i_names:
-    specs = instance_data[name]
-    specs = _PopulateWithDefaults(specs)
-    _Validate(specs)
+    if unknown:
+      # TODO: Suggest closest match for more user friendly experience
+      raise errors.OpPrereqError("Unknown fields in definition %s: %s" %
+                                 (idx, utils.CommaJoin(unknown)),
+                                 errors.ECODE_INVAL)
 
-    hypervisor = specs['hypervisor']
-    hvparams = specs['hvparams']
+    op = opcodes.OpInstanceCreate(**inst) # pylint: disable=W0142
+    op.Validate(False)
+    instances.append(op)
 
-    disks = []
-    for elem in specs['disk_size']:
-      try:
-        size = utils.ParseUnit(elem)
-      except (TypeError, ValueError), err:
-        raise errors.OpPrereqError("Invalid disk size '%s' for"
-                                   " instance %s: %s" %
-                                   (elem, name, err), errors.ECODE_INVAL)
-      disks.append({"size": size})
-
-    utils.ForceDictType(specs['backend'], constants.BES_PARAMETER_TYPES)
-    utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
-
-    tmp_nics = []
-    for field in constants.INIC_PARAMS:
-      if field in specs:
-        if not tmp_nics:
-          tmp_nics.append({})
-        tmp_nics[0][field] = specs[field]
-
-    if specs['nics'] is not None and tmp_nics:
-      raise errors.OpPrereqError("'nics' list incompatible with using"
-                                 " individual nic fields as well",
-                                 errors.ECODE_INVAL)
-    elif specs['nics'] is not None:
-      tmp_nics = specs['nics']
-    elif not tmp_nics:
-      tmp_nics = [{}]
-
-    op = opcodes.OpInstanceCreate(instance_name=name,
-                                  disks=disks,
-                                  disk_template=specs['template'],
-                                  mode=constants.INSTANCE_CREATE,
-                                  os_type=specs['os'],
-                                  force_variant=specs["force_variant"],
-                                  pnode=specs['primary_node'],
-                                  snode=specs['secondary_node'],
-                                  nics=tmp_nics,
-                                  start=specs['start'],
-                                  ip_check=specs['ip_check'],
-                                  name_check=specs['name_check'],
-                                  wait_for_sync=True,
-                                  iallocator=specs['iallocator'],
-                                  hypervisor=hypervisor,
-                                  hvparams=hvparams,
-                                  beparams=specs['backend'],
-                                  file_storage_dir=specs['file_storage_dir'],
-                                  file_driver=specs['file_driver'])
-
-    jex.QueueJob(name, op)
-  # we never want to wait, just show the submitted job IDs
-  jex.WaitOrShow(False)
+  op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator,
+                                    instances=instances)
+  result = SubmitOrSend(op, opts, cl=cl)
 
-  return 0
+  # Keep track of submitted jobs
+  jex = JobExecutor(cl=cl, opts=opts)
+
+  for (status, job_id) in result[constants.JOB_IDS_KEY]:
+    jex.AddJobId(None, status, job_id)
+
+  results = jex.GetResults()
+  bad_cnt = len([row for row in results if not row[0]])
+  if bad_cnt == 0:
+    ToStdout("All instances created successfully.")
+    rcode = constants.EXIT_SUCCESS
+  else:
+    ToStdout("There were %s errors during the creation.", bad_cnt)
+    rcode = constants.EXIT_FAILURE
+
+  return rcode
 
 
 def ReinstallInstance(opts, args):
@@ -413,7 +328,7 @@ def ReinstallInstance(opts, args):
   """
   # first, compute the desired name list
   if opts.multi_mode is None:
-    opts.multi_mode = _SHUTDOWN_INSTANCES
+    opts.multi_mode = _EXPAND_INSTANCES
 
   inames = _ExpandMultiNames(opts.multi_mode, args)
   if not inames:
@@ -438,11 +353,11 @@ def ReinstallInstance(opts, args):
         choices.append(("%s" % number, entry, entry))
         number += 1
 
-    choices.append(('x', 'exit', 'Exit gnt-instance reinstall'))
+    choices.append(("x", "exit", "Exit gnt-instance reinstall"))
     selected = AskUser("Enter OS template number (or x to abort):",
                        choices)
 
-    if selected == 'exit':
+    if selected == "exit":
       ToStderr("User aborted reinstall, exiting")
       return 1
 
@@ -458,7 +373,7 @@ def ReinstallInstance(opts, args):
   # third, get confirmation: multi-reinstall requires --force-multi,
   # single-reinstall either --force or --force-multi (--force-multi is
   # a stronger --force)
-  multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
+  multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
   if multi_on:
     warn_msg = ("Note: this will remove *all* data for the"
                 " below instances! It will %s.\n" % os_msg)
@@ -480,8 +395,12 @@ def ReinstallInstance(opts, args):
                                      osparams=opts.osparams)
     jex.QueueJob(instance_name, op)
 
-  jex.WaitOrShow(not opts.submit_only)
-  return 0
+  results = jex.WaitOrShow(not opts.submit_only)
+
+  if compat.all(map(compat.fst, results)):
+    return constants.EXIT_SUCCESS
+  else:
+    return constants.EXIT_FAILURE
 
 
 def RemoveInstance(opts, args):
@@ -560,7 +479,8 @@ def ActivateDisks(opts, args):
   """
   instance_name = args[0]
   op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
-                                       ignore_size=opts.ignore_size)
+                                       ignore_size=opts.ignore_size,
+                                       wait_for_sync=opts.wait_for_sync)
   disks_info = SubmitOrSend(op, opts)
   for host, iname, nname in disks_info:
     ToStdout("%s:%s:%s", host, iname, nname)
@@ -598,16 +518,34 @@ def RecreateDisks(opts, args):
 
   """
   instance_name = args[0]
+
+  disks = []
+
   if opts.disks:
-    try:
-      opts.disks = [int(v) for v in opts.disks.split(",")]
-    except (ValueError, TypeError), err:
-      ToStderr("Invalid disks value: %s" % str(err))
-      return 1
-  else:
-    opts.disks = []
+    for didx, ddict in opts.disks:
+      didx = int(didx)
+
+      if not ht.TDict(ddict):
+        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
+        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+      if constants.IDISK_SIZE in ddict:
+        try:
+          ddict[constants.IDISK_SIZE] = \
+            utils.ParseUnit(ddict[constants.IDISK_SIZE])
+        except ValueError, err:
+          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
+                                     (didx, err), errors.ECODE_INVAL)
+
+      disks.append((didx, ddict))
+
+    # TODO: Verify modifyable parameters (already done in
+    # LUInstanceRecreateDisks, but it'd be nice to have in the client)
 
   if opts.node:
+    if opts.iallocator:
+      msg = "At most one of either --nodes or --iallocator can be passed"
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
     pnode, snode = SplitNodeOption(opts.node)
     nodes = [pnode]
     if snode is not None:
@@ -616,9 +554,10 @@ def RecreateDisks(opts, args):
     nodes = []
 
   op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
-                                       disks=opts.disks,
-                                       nodes=nodes)
+                                       disks=disks, nodes=nodes,
+                                       iallocator=opts.iallocator)
   SubmitOrSend(op, opts)
+
   return 0
 
 
@@ -627,8 +566,8 @@ def GrowDisk(opts, args):
 
   @param opts: the command line options selected by the user
   @type args: list
-  @param args: should contain two elements, the instance name
-      whose disks we grow and the disk name, e.g. I{sda}
+  @param args: should contain three elements, the target instance name,
+      the target disk id, and the target growth
   @rtype: int
   @return: the desired exit code
 
@@ -640,10 +579,15 @@ def GrowDisk(opts, args):
   except (TypeError, ValueError), err:
     raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
                                errors.ECODE_INVAL)
-  amount = utils.ParseUnit(args[2])
+  try:
+    amount = utils.ParseUnit(args[2])
+  except errors.UnitParseError:
+    raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
+                               errors.ECODE_INVAL)
   op = opcodes.OpInstanceGrowDisk(instance_name=instance,
                                   disk=disk, amount=amount,
-                                  wait_for_sync=opts.wait_for_sync)
+                                  wait_for_sync=opts.wait_for_sync,
+                                  absolute=opts.absolute)
   SubmitOrSend(op, opts)
   return 0
 
@@ -701,6 +645,7 @@ def _ShutdownInstance(name, opts):
 
   """
   return opcodes.OpInstanceShutdown(instance_name=name,
+                                    force=opts.force,
                                     timeout=opts.timeout,
                                     ignore_offline_nodes=opts.ignore_offline,
                                     no_remember=opts.no_remember)
@@ -747,7 +692,8 @@ def ReplaceDisks(opts, args):
   op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
                                       remote_node=new_2ndary, mode=mode,
                                       iallocator=iallocator,
-                                      early_release=opts.early_release)
+                                      early_release=opts.early_release,
+                                      ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts)
   return 0
 
@@ -788,7 +734,8 @@ def FailoverInstance(opts, args):
                                   ignore_consistency=opts.ignore_consistency,
                                   shutdown_timeout=opts.shutdown_timeout,
                                   iallocator=iallocator,
-                                  target_node=target_node)
+                                  target_node=target_node,
+                                  ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -843,8 +790,10 @@ def MigrateInstance(opts, args):
   op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
                                  cleanup=opts.cleanup, iallocator=iallocator,
                                  target_node=target_node,
-                                 allow_failover=opts.allow_failover)
-  SubmitOpCode(op, cl=cl, opts=opts)
+                                 allow_failover=opts.allow_failover,
+                                 allow_runtime_changes=opts.allow_runtime_chgs,
+                                 ignore_ipolicy=opts.ignore_ipolicy)
+  SubmitOrSend(op, cl=cl, opts=opts)
   return 0
 
 
@@ -872,7 +821,8 @@ def MoveInstance(opts, args):
   op = opcodes.OpInstanceMove(instance_name=instance_name,
                               target_node=opts.node,
                               shutdown_timeout=opts.shutdown_timeout,
-                              ignore_consistency=opts.ignore_consistency)
+                              ignore_consistency=opts.ignore_consistency,
+                              ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -934,6 +884,9 @@ def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
                 " URL <vnc://%s:%s/>",
                 console.instance, console.host, console.port,
                 console.display, console.host, console.port)
+  elif console.kind == constants.CONS_SPICE:
+    feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
+                console.host, console.port)
   elif console.kind == constants.CONS_SSH:
     # Convert to string if not already one
     if isinstance(console.command, basestring):
@@ -973,8 +926,8 @@ def _FormatLogicalID(dev_type, logical_id, roman):
                                                             convert=roman))),
       ("nodeB", "%s, minor=%s" % (node_b, compat.TryToRoman(minor_b,
                                                             convert=roman))),
-      ("port", compat.TryToRoman(port, convert=roman)),
-      ("auth key", key),
+      ("port", str(compat.TryToRoman(port, convert=roman))),
+      ("auth key", str(key)),
       ]
   elif dev_type == constants.LD_LV:
     vg_name, lv_name = logical_id
@@ -985,6 +938,10 @@ def _FormatLogicalID(dev_type, logical_id, roman):
   return data
 
 
+def _FormatListInfo(data):
+  return list(str(i) for i in data)
+
+
 def _FormatBlockDevInfo(idx, top_level, dev, roman):
   """Show block device information.
 
@@ -1067,9 +1024,8 @@ def _FormatBlockDevInfo(idx, top_level, dev, roman):
   if isinstance(dev["size"], int):
     nice_size = utils.FormatUnit(dev["size"], "h")
   else:
-    nice_size = dev["size"]
-  d1 = ["- %s: %s, size %s" % (txt, dev["dev_type"], nice_size)]
-  data = []
+    nice_size = str(dev["size"])
+  data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))]
   if top_level:
     data.append(("access mode", dev["mode"]))
   if dev["logical_id"] is not None:
@@ -1082,8 +1038,7 @@ def _FormatBlockDevInfo(idx, top_level, dev, roman):
     else:
       data.extend(l_id)
   elif dev["physical_id"] is not None:
-    data.append("physical_id:")
-    data.append([dev["physical_id"]])
+    data.append(("physical_id:", _FormatListInfo(dev["physical_id"])))
 
   if dev["pstatus"]:
     data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
@@ -1091,41 +1046,126 @@ def _FormatBlockDevInfo(idx, top_level, dev, roman):
   if dev["sstatus"]:
     data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
 
-  if dev["children"]:
-    data.append("child devices:")
-    for c_idx, child in enumerate(dev["children"]):
-      data.append(_FormatBlockDevInfo(c_idx, False, child, roman))
-  d1.append(data)
-  return d1
-
-
-def _FormatList(buf, data, indent_level):
-  """Formats a list of data at a given indent level.
+  data.append(("name", dev["name"]))
+  data.append(("UUID", dev["uuid"]))
 
-  If the element of the list is:
-    - a string, it is simply formatted as is
-    - a tuple, it will be split into key, value and the all the
-      values in a list will be aligned all at the same start column
-    - a list, will be recursively formatted
+  if dev["children"]:
+    data.append(("child devices", [
+      _FormatBlockDevInfo(c_idx, False, child, roman)
+      for c_idx, child in enumerate(dev["children"])
+      ]))
+  return data
 
-  @type buf: StringIO
-  @param buf: the buffer into which we write the output
-  @param data: the list to format
-  @type indent_level: int
-  @param indent_level: the indent level to format at
 
-  """
-  max_tlen = max([len(elem[0]) for elem in data
-                 if isinstance(elem, tuple)] or [0])
-  for elem in data:
-    if isinstance(elem, basestring):
-      buf.write("%*s%s\n" % (2*indent_level, "", elem))
-    elif isinstance(elem, tuple):
-      key, value = elem
-      spacer = "%*s" % (max_tlen - len(key), "")
-      buf.write("%*s%s:%s %s\n" % (2*indent_level, "", key, spacer, value))
-    elif isinstance(elem, list):
-      _FormatList(buf, elem, indent_level+1)
+def _FormatInstanceNicInfo(idx, nic):
+  """Helper function for L{_FormatInstanceInfo()}"""
+  (name, uuid, ip, mac, mode, link, _, netinfo) = nic
+  network_name = None
+  if netinfo:
+    network_name = netinfo["name"]
+  return [
+    ("nic/%d" % idx, ""),
+    ("MAC", str(mac)),
+    ("IP", str(ip)),
+    ("mode", str(mode)),
+    ("link", str(link)),
+    ("network", str(network_name)),
+    ("UUID", str(uuid)),
+    ("name", str(name)),
+    ]
+
+
+def _FormatInstanceNodesInfo(instance):
+  """Helper function for L{_FormatInstanceInfo()}"""
+  pgroup = ("%s (UUID %s)" %
+            (instance["pnode_group_name"], instance["pnode_group_uuid"]))
+  secs = utils.CommaJoin(("%s (group %s, group UUID %s)" %
+                          (name, group_name, group_uuid))
+                         for (name, group_name, group_uuid) in
+                           zip(instance["snodes"],
+                               instance["snodes_group_names"],
+                               instance["snodes_group_uuids"]))
+  return [
+    [
+      ("primary", instance["pnode"]),
+      ("group", pgroup),
+      ],
+    [("secondaries", secs)],
+    ]
+
+
+def _GetVncConsoleInfo(instance):
+  """Helper function for L{_FormatInstanceInfo()}"""
+  vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
+                                               None)
+  if vnc_bind_address:
+    port = instance["network_port"]
+    display = int(port) - constants.VNC_BASE_PORT
+    if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
+      vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
+                                                 port,
+                                                 display)
+    elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
+      vnc_console_port = ("%s:%s (node %s) (display %s)" %
+                           (vnc_bind_address, port,
+                            instance["pnode"], display))
+    else:
+      # vnc bind address is a file
+      vnc_console_port = "%s:%s" % (instance["pnode"],
+                                    vnc_bind_address)
+    ret = "vnc to %s" % vnc_console_port
+  else:
+    ret = None
+  return ret
+
+
+def _FormatInstanceInfo(instance, roman_integers):
+  """Format instance information for L{cli.PrintGenericInfo()}"""
+  istate = "configured to be %s" % instance["config_state"]
+  if instance["run_state"]:
+    istate += ", actual state is %s" % instance["run_state"]
+  info = [
+    ("Instance name", instance["name"]),
+    ("UUID", instance["uuid"]),
+    ("Serial number",
+     str(compat.TryToRoman(instance["serial_no"], convert=roman_integers))),
+    ("Creation time", utils.FormatTime(instance["ctime"])),
+    ("Modification time", utils.FormatTime(instance["mtime"])),
+    ("State", istate),
+    ("Nodes", _FormatInstanceNodesInfo(instance)),
+    ("Operating system", instance["os"]),
+    ("Operating system parameters",
+     FormatParamsDictInfo(instance["os_instance"], instance["os_actual"])),
+    ]
+
+  if "network_port" in instance:
+    info.append(("Allocated network port",
+                 str(compat.TryToRoman(instance["network_port"],
+                                       convert=roman_integers))))
+  info.append(("Hypervisor", instance["hypervisor"]))
+  console = _GetVncConsoleInfo(instance)
+  if console:
+    info.append(("console connection", console))
+  # deprecated "memory" value, kept for one version for compatibility
+  # TODO(ganeti 2.7) remove.
+  be_actual = copy.deepcopy(instance["be_actual"])
+  be_actual["memory"] = be_actual[constants.BE_MAXMEM]
+  info.extend([
+    ("Hypervisor parameters",
+     FormatParamsDictInfo(instance["hv_instance"], instance["hv_actual"])),
+    ("Back-end parameters",
+     FormatParamsDictInfo(instance["be_instance"], be_actual)),
+    ("NICs", [
+      _FormatInstanceNicInfo(idx, nic)
+      for (idx, nic) in enumerate(instance["nics"])
+      ]),
+    ("Disk template", instance["disk_template"]),
+    ("Disks", [
+      _FormatBlockDevInfo(idx, True, device, roman_integers)
+      for (idx, device) in enumerate(instance["disks"])
+      ]),
+    ])
+  return info
 
 
 def ShowInstanceConfig(opts, args):
@@ -1156,79 +1196,87 @@ def ShowInstanceConfig(opts, args):
     ToStdout("No instances.")
     return 1
 
-  buf = StringIO()
-  retcode = 0
-  for instance_name in result:
-    instance = result[instance_name]
-    buf.write("Instance name: %s\n" % instance["name"])
-    buf.write("UUID: %s\n" % instance["uuid"])
-    buf.write("Serial number: %s\n" %
-              compat.TryToRoman(instance["serial_no"],
-                                convert=opts.roman_integers))
-    buf.write("Creation time: %s\n" % utils.FormatTime(instance["ctime"]))
-    buf.write("Modification time: %s\n" % utils.FormatTime(instance["mtime"]))
-    buf.write("State: configured to be %s" % instance["config_state"])
-    if instance["run_state"]:
-      buf.write(", actual state is %s" % instance["run_state"])
-    buf.write("\n")
-    ##buf.write("Considered for memory checks in cluster verify: %s\n" %
-    ##          instance["auto_balance"])
-    buf.write("  Nodes:\n")
-    buf.write("    - primary: %s\n" % instance["pnode"])
-    buf.write("    - secondaries: %s\n" % utils.CommaJoin(instance["snodes"]))
-    buf.write("  Operating system: %s\n" % instance["os"])
-    FormatParameterDict(buf, instance["os_instance"], instance["os_actual"],
-                        level=2)
-    if instance.has_key("network_port"):
-      buf.write("  Allocated network port: %s\n" %
-                compat.TryToRoman(instance["network_port"],
-                                  convert=opts.roman_integers))
-    buf.write("  Hypervisor: %s\n" % instance["hypervisor"])
-
-    # custom VNC console information
-    vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
-                                                 None)
-    if vnc_bind_address:
-      port = instance["network_port"]
-      display = int(port) - constants.VNC_BASE_PORT
-      if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
-        vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
-                                                   port,
-                                                   display)
-      elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
-        vnc_console_port = ("%s:%s (node %s) (display %s)" %
-                             (vnc_bind_address, port,
-                              instance["pnode"], display))
-      else:
-        # vnc bind address is a file
-        vnc_console_port = "%s:%s" % (instance["pnode"],
-                                      vnc_bind_address)
-      buf.write("    - console connection: vnc to %s\n" % vnc_console_port)
-
-    FormatParameterDict(buf, instance["hv_instance"], instance["hv_actual"],
-                        level=2)
-    buf.write("  Hardware:\n")
-    buf.write("    - VCPUs: %s\n" %
-              compat.TryToRoman(instance["be_actual"][constants.BE_VCPUS],
-                                convert=opts.roman_integers))
-    buf.write("    - memory: %sMiB\n" %
-              compat.TryToRoman(instance["be_actual"][constants.BE_MEMORY],
-                                convert=opts.roman_integers))
-    buf.write("    - NICs:\n")
-    for idx, (ip, mac, mode, link) in enumerate(instance["nics"]):
-      buf.write("      - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" %
-                (idx, mac, ip, mode, link))
-    buf.write("  Disk template: %s\n" % instance["disk_template"])
-    buf.write("  Disks:\n")
-
-    for idx, device in enumerate(instance["disks"]):
-      _FormatList(buf, _FormatBlockDevInfo(idx, True, device,
-                  opts.roman_integers), 2)
-
-  ToStdout(buf.getvalue().rstrip('\n'))
+  PrintGenericInfo([
+    _FormatInstanceInfo(instance, opts.roman_integers)
+    for instance in result.values()
+    ])
   return retcode
 
 
+def _ConvertNicDiskModifications(mods):
+  """Converts NIC/disk modifications from CLI to opcode.
+
+  When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
+  disks at arbitrary indices, its parameter format changed. This function
+  converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
+  newer format and adds support for new-style requests (e.g. "--new 4:add").
+
+  @type mods: list of tuples
+  @param mods: Modifications as given by command line parser
+  @rtype: list of tuples
+  @return: Modifications as understood by L{opcodes.OpInstanceSetParams}
+
+  """
+  result = []
+
+  for (identifier, params) in mods:
+    if identifier == constants.DDM_ADD:
+      # Add item as last item (legacy interface)
+      action = constants.DDM_ADD
+      identifier = -1
+    elif identifier == constants.DDM_REMOVE:
+      # Remove last item (legacy interface)
+      action = constants.DDM_REMOVE
+      identifier = -1
+    else:
+      # Modifications and adding/removing at arbitrary indices
+      add = params.pop(constants.DDM_ADD, _MISSING)
+      remove = params.pop(constants.DDM_REMOVE, _MISSING)
+      modify = params.pop(constants.DDM_MODIFY, _MISSING)
+
+      if modify is _MISSING:
+        if not (add is _MISSING or remove is _MISSING):
+          raise errors.OpPrereqError("Cannot add and remove at the same time",
+                                     errors.ECODE_INVAL)
+        elif add is not _MISSING:
+          action = constants.DDM_ADD
+        elif remove is not _MISSING:
+          action = constants.DDM_REMOVE
+        else:
+          action = constants.DDM_MODIFY
+
+      elif add is _MISSING and remove is _MISSING:
+        action = constants.DDM_MODIFY
+      else:
+        raise errors.OpPrereqError("Cannot modify and add/remove at the"
+                                   " same time", errors.ECODE_INVAL)
+
+      assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
+
+    if action == constants.DDM_REMOVE and params:
+      raise errors.OpPrereqError("Not accepting parameters on removal",
+                                 errors.ECODE_INVAL)
+
+    result.append((action, identifier, params))
+
+  return result
+
+
+def _ParseDiskSizes(mods):
+  """Parses disk sizes in parameters.
+
+  """
+  for (action, _, params) in mods:
+    if params and constants.IDISK_SIZE in params:
+      params[constants.IDISK_SIZE] = \
+        utils.ParseUnit(params[constants.IDISK_SIZE])
+    elif action == constants.DDM_ADD:
+      raise errors.OpPrereqError("Missing required parameter 'size'",
+                                 errors.ECODE_INVAL)
+
+  return mods
+
+
 def SetInstanceParams(opts, args):
   """Modifies an instance.
 
@@ -1242,7 +1290,9 @@ def SetInstanceParams(opts, args):
 
   """
   if not (opts.nics or opts.disks or opts.disk_template or
-          opts.hvparams or opts.beparams or opts.os or opts.osparams):
+          opts.hvparams or opts.beparams or opts.os or opts.osparams or
+          opts.offline_inst or opts.online_inst or opts.runtime_mem or
+          opts.new_primary_node):
     ToStderr("Please give at least one of the parameters.")
     return 1
 
@@ -1251,7 +1301,7 @@ def SetInstanceParams(opts, args):
       if opts.beparams[param].lower() == "default":
         opts.beparams[param] = constants.VALUE_DEFAULT
 
-  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES,
+  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
                       allowed_values=[constants.VALUE_DEFAULT])
 
   for param in opts.hvparams:
@@ -1262,24 +1312,8 @@ def SetInstanceParams(opts, args):
   utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
                       allowed_values=[constants.VALUE_DEFAULT])
 
-  for idx, (nic_op, nic_dict) in enumerate(opts.nics):
-    try:
-      nic_op = int(nic_op)
-      opts.nics[idx] = (nic_op, nic_dict)
-    except (TypeError, ValueError):
-      pass
-
-  for idx, (disk_op, disk_dict) in enumerate(opts.disks):
-    try:
-      disk_op = int(disk_op)
-      opts.disks[idx] = (disk_op, disk_dict)
-    except (TypeError, ValueError):
-      pass
-    if disk_op == constants.DDM_ADD:
-      if 'size' not in disk_dict:
-        raise errors.OpPrereqError("Missing required parameter 'size'",
-                                   errors.ECODE_INVAL)
-      disk_dict['size'] = utils.ParseUnit(disk_dict['size'])
+  nics = _ConvertNicDiskModifications(opts.nics)
+  disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
 
   if (opts.disk_template and
       opts.disk_template in constants.DTS_INT_MIRROR and
@@ -1288,18 +1322,30 @@ def SetInstanceParams(opts, args):
              " specifying a secondary node")
     return 1
 
+  if opts.offline_inst:
+    offline = True
+  elif opts.online_inst:
+    offline = False
+  else:
+    offline = None
+
   op = opcodes.OpInstanceSetParams(instance_name=args[0],
-                                   nics=opts.nics,
-                                   disks=opts.disks,
+                                   nics=nics,
+                                   disks=disks,
                                    disk_template=opts.disk_template,
                                    remote_node=opts.node,
+                                   pnode=opts.new_primary_node,
                                    hvparams=opts.hvparams,
                                    beparams=opts.beparams,
+                                   runtime_mem=opts.runtime_mem,
                                    os_name=opts.os,
                                    osparams=opts.osparams,
                                    force_variant=opts.force_variant,
                                    force=opts.force,
-                                   wait_for_sync=opts.wait_for_sync)
+                                   wait_for_sync=opts.wait_for_sync,
+                                   offline=offline,
+                                   conflicts_check=opts.conflicts_check,
+                                   ignore_ipolicy=opts.ignore_ipolicy)
 
   # even if here we process the result, we allow submit only
   result = SubmitOrSend(op, opts)
@@ -1309,10 +1355,45 @@ def SetInstanceParams(opts, args):
     for param, data in result:
       ToStdout(" - %-5s -> %s", param, data)
     ToStdout("Please don't forget that most parameters take effect"
-             " only at the next start of the instance.")
+             " only at the next (re)start of the instance initiated by"
+             " ganeti; restarting from within the instance will"
+             " not be enough.")
   return 0
 
 
+def ChangeGroup(opts, args):
+  """Moves an instance to another group.
+
+  """
+  (instance_name, ) = args
+
+  cl = GetClient()
+
+  op = opcodes.OpInstanceChangeGroup(instance_name=instance_name,
+                                     iallocator=opts.iallocator,
+                                     target_groups=opts.to,
+                                     early_release=opts.early_release)
+  result = SubmitOrSend(op, opts, cl=cl)
+
+  # Keep track of submitted jobs
+  jex = JobExecutor(cl=cl, opts=opts)
+
+  for (status, job_id) in result[constants.JOB_IDS_KEY]:
+    jex.AddJobId(None, status, job_id)
+
+  results = jex.GetResults()
+  bad_cnt = len([row for row in results if not row[0]])
+  if bad_cnt == 0:
+    ToStdout("Instance '%s' changed group successfully.", instance_name)
+    rcode = constants.EXIT_SUCCESS
+  else:
+    ToStdout("There were %s errors while changing group of instance '%s'.",
+             bad_cnt, instance_name)
+    rcode = constants.EXIT_FAILURE
+
+  return rcode
+
+
 # multi-instance selection options
 m_force_multi = cli_option("--force-multiple", dest="force_multi",
                            help="Do not ask for confirmation when more than"
@@ -1321,42 +1402,42 @@ m_force_multi = cli_option("--force-multiple", dest="force_multi",
 
 m_pri_node_opt = cli_option("--primary", dest="multi_mode",
                             help="Filter by nodes (primary only)",
-                            const=_SHUTDOWN_NODES_PRI, action="store_const")
+                            const=_EXPAND_NODES_PRI, action="store_const")
 
 m_sec_node_opt = cli_option("--secondary", dest="multi_mode",
                             help="Filter by nodes (secondary only)",
-                            const=_SHUTDOWN_NODES_SEC, action="store_const")
+                            const=_EXPAND_NODES_SEC, action="store_const")
 
 m_node_opt = cli_option("--node", dest="multi_mode",
                         help="Filter by nodes (primary and secondary)",
-                        const=_SHUTDOWN_NODES_BOTH, action="store_const")
+                        const=_EXPAND_NODES_BOTH, action="store_const")
 
 m_clust_opt = cli_option("--all", dest="multi_mode",
                          help="Select all instances in the cluster",
-                         const=_SHUTDOWN_CLUSTER, action="store_const")
+                         const=_EXPAND_CLUSTER, action="store_const")
 
 m_inst_opt = cli_option("--instance", dest="multi_mode",
                         help="Filter by instance name [default]",
-                        const=_SHUTDOWN_INSTANCES, action="store_const")
+                        const=_EXPAND_INSTANCES, action="store_const")
 
 m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
                              help="Filter by node tag",
-                             const=_SHUTDOWN_NODES_BOTH_BY_TAGS,
+                             const=_EXPAND_NODES_BOTH_BY_TAGS,
                              action="store_const")
 
 m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
                                  help="Filter by primary node tag",
-                                 const=_SHUTDOWN_NODES_PRI_BY_TAGS,
+                                 const=_EXPAND_NODES_PRI_BY_TAGS,
                                  action="store_const")
 
 m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
                                  help="Filter by secondary node tag",
-                                 const=_SHUTDOWN_NODES_SEC_BY_TAGS,
+                                 const=_EXPAND_NODES_SEC_BY_TAGS,
                                  action="store_const")
 
 m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
                              help="Filter by instance tag",
-                             const=_SHUTDOWN_INSTANCES_BY_TAGS,
+                             const=_EXPAND_INSTANCES_BY_TAGS,
                              action="store_const")
 
 # this is defined separately due to readability only
@@ -1365,45 +1446,51 @@ add_opts = [
   OS_OPT,
   FORCE_VARIANT_OPT,
   NO_INSTALL_OPT,
+  IGNORE_IPOLICY_OPT,
   ]
 
 commands = {
-  'add': (
+  "add": (
     AddInstance, [ArgHost(min=1, max=1)], COMMON_CREATE_OPTS + add_opts,
     "[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
     "Creates and adds a new instance to the cluster"),
-  'batch-create': (
-    BatchCreate, [ArgFile(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT],
+  "batch-create": (
+    BatchCreate, [ArgFile(min=1, max=1)],
+    [DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT, SUBMIT_OPT],
     "<instances.json>",
     "Create a bunch of instances based on specs in the file."),
-  'console': (
+  "console": (
     ConnectToInstanceConsole, ARGS_ONE_INSTANCE,
     [SHOWCMD_OPT, PRIORITY_OPT],
     "[--show-cmd] <instance>", "Opens a console on the specified instance"),
-  'failover': (
+  "failover": (
     FailoverInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT],
-    "[-f] <instance>", "Stops the instance and starts it on the backup node,"
-    " using the remote mirror (only for mirrored instances)"),
-  'migrate': (
+     DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
+     IGNORE_IPOLICY_OPT, CLEANUP_OPT],
+    "[-f] <instance>", "Stops the instance, changes its primary node and"
+    " (if it was originally running) starts it on the new node"
+    " (the secondary for mirrored instances or any node"
+    " for shared storage)."),
+  "migrate": (
     MigrateInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
-     PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT],
+     PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
+     IGNORE_IPOLICY_OPT, NORUNTIME_CHGS_OPT, SUBMIT_OPT],
     "[-f] <instance>", "Migrate instance to its secondary node"
     " (only for mirrored instances)"),
-  'move': (
+  "move": (
     MoveInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SUBMIT_OPT, SINGLE_NODE_OPT, SHUTDOWN_TIMEOUT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT, IGNORE_IPOLICY_OPT],
     "[-f] <instance>", "Move instance to an arbitrary node"
     " (only for instances of type file and lv)"),
-  'info': (
+  "info": (
     ShowInstanceConfig, ARGS_MANY_INSTANCES,
     [STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT],
     "[-s] {--all | <instance>...}",
     "Show information on the specified instance(s)"),
-  'list': (
+  "list": (
     ListInstances, ARGS_MANY_INSTANCES,
     [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
      FORCE_FILTER_OPT],
@@ -1418,44 +1505,46 @@ commands = {
     [NOHDR_OPT, SEP_OPT],
     "[fields...]",
     "Lists all available fields for instances"),
-  'reinstall': (
+  "reinstall": (
     ReinstallInstance, [ArgInstance()],
     [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
      m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
      m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT,
      SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT],
     "[-f] <instance>", "Reinstall a stopped instance"),
-  'remove': (
+  "remove": (
     RemoveInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT, SUBMIT_OPT,
      DRY_RUN_OPT, PRIORITY_OPT],
     "[-f] <instance>", "Shuts down the instance and removes it"),
-  'rename': (
+  "rename": (
     RenameInstance,
     [ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
     [NOIPCHECK_OPT, NONAMECHECK_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "<instance> <new_name>", "Rename the instance"),
-  'replace-disks': (
+  "replace-disks": (
     ReplaceDisks, ARGS_ONE_INSTANCE,
     [AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
      NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT, SUBMIT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
-    "[-s|-p|-n NODE|-I NAME] <instance>",
-    "Replaces all disks for the instance"),
-  'modify': (
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
+    "[-s|-p|-a|-n NODE|-I NAME] <instance>",
+    "Replaces disks for the instance"),
+  "modify": (
     SetInstanceParams, ARGS_ONE_INSTANCE,
     [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT,
      DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
-     OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT],
+     OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
+     ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
+     NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT],
     "<instance>", "Alters the parameters of an instance"),
-  'shutdown': (
+  "shutdown": (
     GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
-    [m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
+    [FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT,
      DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
     "<instance>", "Stops an instance"),
-  'startup': (
+  "startup": (
     GenericManyOps("startup", _StartupInstance), [ArgInstance()],
     [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
@@ -1463,51 +1552,58 @@ commands = {
      BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
      NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
     "<instance>", "Starts an instance"),
-  'reboot': (
+  "reboot": (
     GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
     [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
      m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "<instance>", "Reboots an instance"),
-  'activate-disks': (
+  "activate-disks": (
     ActivateDisks, ARGS_ONE_INSTANCE,
-    [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
     "<instance>", "Activate an instance's disks"),
-  'deactivate-disks': (
+  "deactivate-disks": (
     DeactivateDisks, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "[-f] <instance>", "Deactivate an instance's disks"),
-  'recreate-disks': (
+  "recreate-disks": (
     RecreateDisks, ARGS_ONE_INSTANCE,
-    [SUBMIT_OPT, DISKIDX_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
+     IALLOCATOR_OPT],
     "<instance>", "Recreate an instance's disks"),
-  'grow-disk': (
+  "grow-disk": (
     GrowDisk,
     [ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
      ArgUnknown(min=1, max=1)],
-    [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
     "<instance> <disk> <size>", "Grow an instance's disk"),
-  'list-tags': (
-    ListTags, ARGS_ONE_INSTANCE, [PRIORITY_OPT],
+  "change-group": (
+    ChangeGroup, ARGS_ONE_INSTANCE,
+    [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT, SUBMIT_OPT],
+    "[-I <iallocator>] [--to <group>]", "Change group of instance"),
+  "list-tags": (
+    ListTags, ARGS_ONE_INSTANCE, [],
     "<instance_name>", "List the tags of the given instance"),
-  'add-tags': (
+  "add-tags": (
     AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
-    [TAG_SRC_OPT, PRIORITY_OPT],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
     "<instance_name> tag...", "Add tags to the given instance"),
-  'remove-tags': (
+  "remove-tags": (
     RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
-    [TAG_SRC_OPT, PRIORITY_OPT],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
     "<instance_name> tag...", "Remove tags from given instance"),
   }
 
 #: dictionary with aliases for commands
 aliases = {
-  'start': 'startup',
-  'stop': 'shutdown',
+  "start": "startup",
+  "stop": "shutdown",
+  "show": "info",
   }
 
 
 def Main():
   return GenericMain(commands, aliases=aliases,
-                     override={"tag_type": constants.TAG_INSTANCE})
+                     override={"tag_type": constants.TAG_INSTANCE},
+                     env_override=_ENV_OVERRIDE)