Front-end and doc to use allocator in recreate-disks
[ganeti-local] / lib / client / gnt_instance.py
index 2cf2510..4e1eb57 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 
 """Instance related commands"""
 
-# pylint: disable-msg=W0401,W0614,C0103
+# pylint: disable=W0401,W0614,C0103
 # W0401: Wildcard import ganeti.cli
 # W0614: Unused import %s from wildcard import (since we need cli)
 # C0103: Invalid name gnt-instance
 
+import copy
 import itertools
 import simplejson
 import logging
@@ -39,22 +40,24 @@ from ganeti import errors
 from ganeti import netutils
 from ganeti import ssh
 from ganeti import objects
+from ganeti import ht
 
 
-_SHUTDOWN_CLUSTER = "cluster"
-_SHUTDOWN_NODES_BOTH = "nodes"
-_SHUTDOWN_NODES_PRI = "nodes-pri"
-_SHUTDOWN_NODES_SEC = "nodes-sec"
-_SHUTDOWN_NODES_BOTH_BY_TAGS = "nodes-by-tags"
-_SHUTDOWN_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
-_SHUTDOWN_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
-_SHUTDOWN_INSTANCES = "instances"
-_SHUTDOWN_INSTANCES_BY_TAGS = "instances-by-tags"
+_EXPAND_CLUSTER = "cluster"
+_EXPAND_NODES_BOTH = "nodes"
+_EXPAND_NODES_PRI = "nodes-pri"
+_EXPAND_NODES_SEC = "nodes-sec"
+_EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags"
+_EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
+_EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
+_EXPAND_INSTANCES = "instances"
+_EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
 
-_SHUTDOWN_NODES_TAGS_MODES = (
-    _SHUTDOWN_NODES_BOTH_BY_TAGS,
-    _SHUTDOWN_NODES_PRI_BY_TAGS,
-    _SHUTDOWN_NODES_SEC_BY_TAGS)
+_EXPAND_NODES_TAGS_MODES = frozenset([
+  _EXPAND_NODES_BOTH_BY_TAGS,
+  _EXPAND_NODES_PRI_BY_TAGS,
+  _EXPAND_NODES_SEC_BY_TAGS,
+  ])
 
 
 #: default list of options for L{ListInstances}
@@ -63,19 +66,23 @@ _LIST_DEF_FIELDS = [
   ]
 
 
+_MISSING = object()
+_ENV_OVERRIDE = frozenset(["list"])
+
+
 def _ExpandMultiNames(mode, names, client=None):
   """Expand the given names using the passed mode.
 
-  For _SHUTDOWN_CLUSTER, all instances will be returned. For
-  _SHUTDOWN_NODES_PRI/SEC, all instances having those nodes as
-  primary/secondary will be returned. For _SHUTDOWN_NODES_BOTH, all
+  For _EXPAND_CLUSTER, all instances will be returned. For
+  _EXPAND_NODES_PRI/SEC, all instances having those nodes as
+  primary/secondary will be returned. For _EXPAND_NODES_BOTH, all
   instances having those nodes as either primary or secondary will be
-  returned. For _SHUTDOWN_INSTANCES, the given instances will be
+  returned. For _EXPAND_INSTANCES, the given instances will be
   returned.
 
-  @param mode: one of L{_SHUTDOWN_CLUSTER}, L{_SHUTDOWN_NODES_BOTH},
-      L{_SHUTDOWN_NODES_PRI}, L{_SHUTDOWN_NODES_SEC} or
-      L{_SHUTDOWN_INSTANCES}
+  @param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH},
+      L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or
+      L{_EXPAND_INSTANCES}
   @param names: a list of names; for cluster, it must be empty,
       and for node and instance it must be a list of valid item
       names (short names are valid as usual, e.g. node1 instead of
@@ -86,21 +93,20 @@ def _ExpandMultiNames(mode, names, client=None):
   @raise errors.OpPrereqError: for invalid input parameters
 
   """
-  # pylint: disable-msg=W0142
+  # pylint: disable=W0142
 
   if client is None:
     client = GetClient()
-  if mode == _SHUTDOWN_CLUSTER:
+  if mode == _EXPAND_CLUSTER:
     if names:
       raise errors.OpPrereqError("Cluster filter mode takes no arguments",
                                  errors.ECODE_INVAL)
     idata = client.QueryInstances([], ["name"], False)
     inames = [row[0] for row in idata]
 
-  elif mode in (_SHUTDOWN_NODES_BOTH,
-                _SHUTDOWN_NODES_PRI,
-                _SHUTDOWN_NODES_SEC) + _SHUTDOWN_NODES_TAGS_MODES:
-    if mode in _SHUTDOWN_NODES_TAGS_MODES:
+  elif (mode in _EXPAND_NODES_TAGS_MODES or
+        mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)):
+    if mode in _EXPAND_NODES_TAGS_MODES:
       if not names:
         raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
       ndata = client.QueryNodes([], ["name", "pinst_list",
@@ -116,21 +122,21 @@ def _ExpandMultiNames(mode, names, client=None):
     pri_names = list(itertools.chain(*ipri))
     isec = [row[2] for row in ndata]
     sec_names = list(itertools.chain(*isec))
-    if mode in (_SHUTDOWN_NODES_BOTH, _SHUTDOWN_NODES_BOTH_BY_TAGS):
+    if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS):
       inames = pri_names + sec_names
-    elif mode in (_SHUTDOWN_NODES_PRI, _SHUTDOWN_NODES_PRI_BY_TAGS):
+    elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS):
       inames = pri_names
-    elif mode in (_SHUTDOWN_NODES_SEC, _SHUTDOWN_NODES_SEC_BY_TAGS):
+    elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS):
       inames = sec_names
     else:
       raise errors.ProgrammerError("Unhandled shutdown type")
-  elif mode == _SHUTDOWN_INSTANCES:
+  elif mode == _EXPAND_INSTANCES:
     if not names:
       raise errors.OpPrereqError("No instance names passed",
                                  errors.ECODE_INVAL)
     idata = client.QueryInstances(names, ["name"], False)
     inames = [row[0] for row in idata]
-  elif mode == _SHUTDOWN_INSTANCES_BY_TAGS:
+  elif mode == _EXPAND_INSTANCES_BY_TAGS:
     if not names:
       raise errors.OpPrereqError("No instance tags passed",
                                  errors.ECODE_INVAL)
@@ -175,13 +181,16 @@ def GenericManyOps(operation, fn):
   """
   def realfn(opts, args):
     if opts.multi_mode is None:
-      opts.multi_mode = _SHUTDOWN_INSTANCES
+      opts.multi_mode = _EXPAND_INSTANCES
     cl = GetClient()
     inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
     if not inames:
+      if opts.multi_mode == _EXPAND_CLUSTER:
+        ToStdout("Cluster is empty, no instances to shutdown")
+        return 0
       raise errors.OpPrereqError("Selection filter does not match"
                                  " any instances", errors.ECODE_INVAL)
-    multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
+    multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
     if not (opts.force_multi or not multi_on
             or ConfirmOperation(inames, "instances", operation)):
       return 1
@@ -209,14 +218,15 @@ def ListInstances(opts, args):
 
   fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
                                "nic.modes", "nic.links", "nic.bridges",
-                               "snodes"],
+                               "snodes", "snodes.group", "snodes.group.uuid"],
                               (lambda value: ",".join(str(item)
                                                       for item in value),
                                False))
 
   return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units,
                      opts.separator, not opts.no_headers,
-                     format_override=fmtoverride)
+                     format_override=fmtoverride, verbose=opts.verbose,
+                     force_filter=opts.force_filter)
 
 
 def ListInstanceFields(opts, args):
@@ -283,7 +293,7 @@ def BatchCreate(opts, args):
                     "hvparams": {},
                     "file_storage_dir": None,
                     "force_variant": False,
-                    "file_driver": 'loop'}
+                    "file_driver": "loop"}
 
   def _PopulateWithDefaults(spec):
     """Returns a new hash combined with default values."""
@@ -294,31 +304,31 @@ def BatchCreate(opts, args):
   def _Validate(spec):
     """Validate the instance specs."""
     # Validate fields required under any circumstances
-    for required_field in ('os', 'template'):
+    for required_field in ("os", "template"):
       if required_field not in spec:
         raise errors.OpPrereqError('Required field "%s" is missing.' %
                                    required_field, errors.ECODE_INVAL)
     # Validate special fields
-    if spec['primary_node'] is not None:
-      if (spec['template'] in constants.DTS_NET_MIRROR and
-          spec['secondary_node'] is None):
-        raise errors.OpPrereqError('Template requires secondary node, but'
-                                   ' there was no secondary provided.',
+    if spec["primary_node"] is not None:
+      if (spec["template"] in constants.DTS_INT_MIRROR and
+          spec["secondary_node"] is None):
+        raise errors.OpPrereqError("Template requires secondary node, but"
+                                   " there was no secondary provided.",
                                    errors.ECODE_INVAL)
-    elif spec['iallocator'] is None:
-      raise errors.OpPrereqError('You have to provide at least a primary_node'
-                                 ' or an iallocator.',
+    elif spec["iallocator"] is None:
+      raise errors.OpPrereqError("You have to provide at least a primary_node"
+                                 " or an iallocator.",
                                  errors.ECODE_INVAL)
 
-    if (spec['hvparams'] and
-        not isinstance(spec['hvparams'], dict)):
-      raise errors.OpPrereqError('Hypervisor parameters must be a dict.',
+    if (spec["hvparams"] and
+        not isinstance(spec["hvparams"], dict)):
+      raise errors.OpPrereqError("Hypervisor parameters must be a dict.",
                                  errors.ECODE_INVAL)
 
   json_filename = args[0]
   try:
     instance_data = simplejson.loads(utils.ReadFile(json_filename))
-  except Exception, err: # pylint: disable-msg=W0703
+  except Exception, err: # pylint: disable=W0703
     ToStderr("Can't parse the instance definition file: %s" % str(err))
     return 1
 
@@ -331,17 +341,17 @@ def BatchCreate(opts, args):
   # Iterate over the instances and do:
   #  * Populate the specs with default value
   #  * Validate the instance specs
-  i_names = utils.NiceSort(instance_data.keys()) # pylint: disable-msg=E1103
+  i_names = utils.NiceSort(instance_data.keys()) # pylint: disable=E1103
   for name in i_names:
     specs = instance_data[name]
     specs = _PopulateWithDefaults(specs)
     _Validate(specs)
 
-    hypervisor = specs['hypervisor']
-    hvparams = specs['hvparams']
+    hypervisor = specs["hypervisor"]
+    hvparams = specs["hvparams"]
 
     disks = []
-    for elem in specs['disk_size']:
+    for elem in specs["disk_size"]:
       try:
         size = utils.ParseUnit(elem)
       except (TypeError, ValueError), err:
@@ -350,44 +360,44 @@ def BatchCreate(opts, args):
                                    (elem, name, err), errors.ECODE_INVAL)
       disks.append({"size": size})
 
-    utils.ForceDictType(specs['backend'], constants.BES_PARAMETER_TYPES)
+    utils.ForceDictType(specs["backend"], constants.BES_PARAMETER_COMPAT)
     utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
 
     tmp_nics = []
-    for field in ('ip', 'mac', 'mode', 'link', 'bridge'):
+    for field in constants.INIC_PARAMS:
       if field in specs:
         if not tmp_nics:
           tmp_nics.append({})
         tmp_nics[0][field] = specs[field]
 
-    if specs['nics'] is not None and tmp_nics:
+    if specs["nics"] is not None and tmp_nics:
       raise errors.OpPrereqError("'nics' list incompatible with using"
                                  " individual nic fields as well",
                                  errors.ECODE_INVAL)
-    elif specs['nics'] is not None:
-      tmp_nics = specs['nics']
+    elif specs["nics"] is not None:
+      tmp_nics = specs["nics"]
     elif not tmp_nics:
       tmp_nics = [{}]
 
     op = opcodes.OpInstanceCreate(instance_name=name,
                                   disks=disks,
-                                  disk_template=specs['template'],
+                                  disk_template=specs["template"],
                                   mode=constants.INSTANCE_CREATE,
-                                  os_type=specs['os'],
+                                  os_type=specs["os"],
                                   force_variant=specs["force_variant"],
-                                  pnode=specs['primary_node'],
-                                  snode=specs['secondary_node'],
+                                  pnode=specs["primary_node"],
+                                  snode=specs["secondary_node"],
                                   nics=tmp_nics,
-                                  start=specs['start'],
-                                  ip_check=specs['ip_check'],
-                                  name_check=specs['name_check'],
+                                  start=specs["start"],
+                                  ip_check=specs["ip_check"],
+                                  name_check=specs["name_check"],
                                   wait_for_sync=True,
-                                  iallocator=specs['iallocator'],
+                                  iallocator=specs["iallocator"],
                                   hypervisor=hypervisor,
                                   hvparams=hvparams,
-                                  beparams=specs['backend'],
-                                  file_storage_dir=specs['file_storage_dir'],
-                                  file_driver=specs['file_driver'])
+                                  beparams=specs["backend"],
+                                  file_storage_dir=specs["file_storage_dir"],
+                                  file_driver=specs["file_driver"])
 
     jex.QueueJob(name, op)
   # we never want to wait, just show the submitted job IDs
@@ -409,7 +419,7 @@ def ReinstallInstance(opts, args):
   """
   # first, compute the desired name list
   if opts.multi_mode is None:
-    opts.multi_mode = _SHUTDOWN_INSTANCES
+    opts.multi_mode = _EXPAND_INSTANCES
 
   inames = _ExpandMultiNames(opts.multi_mode, args)
   if not inames:
@@ -434,31 +444,37 @@ def ReinstallInstance(opts, args):
         choices.append(("%s" % number, entry, entry))
         number += 1
 
-    choices.append(('x', 'exit', 'Exit gnt-instance reinstall'))
+    choices.append(("x", "exit", "Exit gnt-instance reinstall"))
     selected = AskUser("Enter OS template number (or x to abort):",
                        choices)
 
-    if selected == 'exit':
+    if selected == "exit":
       ToStderr("User aborted reinstall, exiting")
       return 1
 
     os_name = selected
+    os_msg = "change the OS to '%s'" % selected
   else:
     os_name = opts.os
+    if opts.os is not None:
+      os_msg = "change the OS to '%s'" % os_name
+    else:
+      os_msg = "keep the same OS"
 
   # third, get confirmation: multi-reinstall requires --force-multi,
   # single-reinstall either --force or --force-multi (--force-multi is
   # a stronger --force)
-  multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
+  multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
   if multi_on:
-    warn_msg = "Note: this will remove *all* data for the below instances!\n"
+    warn_msg = ("Note: this will remove *all* data for the"
+                " below instances! It will %s.\n" % os_msg)
     if not (opts.force_multi or
             ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)):
       return 1
   else:
     if not (opts.force or opts.force_multi):
-      usertext = ("This will reinstall the instance %s and remove"
-                  " all data. Continue?") % inames[0]
+      usertext = ("This will reinstall the instance '%s' (and %s) which"
+                  " removes all data. Continue?") % (inames[0], os_msg)
       if not AskUser(usertext):
         return 1
 
@@ -550,7 +566,8 @@ def ActivateDisks(opts, args):
   """
   instance_name = args[0]
   op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
-                                       ignore_size=opts.ignore_size)
+                                       ignore_size=opts.ignore_size,
+                                       wait_for_sync=opts.wait_for_sync)
   disks_info = SubmitOrSend(op, opts)
   for host, iname, nname in disks_info:
     ToStdout("%s:%s:%s", host, iname, nname)
@@ -588,18 +605,46 @@ def RecreateDisks(opts, args):
 
   """
   instance_name = args[0]
+
+  disks = []
+
   if opts.disks:
-    try:
-      opts.disks = [int(v) for v in opts.disks.split(",")]
-    except (ValueError, TypeError), err:
-      ToStderr("Invalid disks value: %s" % str(err))
-      return 1
+    for didx, ddict in opts.disks:
+      didx = int(didx)
+
+      if not ht.TDict(ddict):
+        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
+        raise errors.OpPrereqError(msg)
+
+      if constants.IDISK_SIZE in ddict:
+        try:
+          ddict[constants.IDISK_SIZE] = \
+            utils.ParseUnit(ddict[constants.IDISK_SIZE])
+        except ValueError, err:
+          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
+                                     (didx, err))
+
+      disks.append((didx, ddict))
+
+    # TODO: Verify modifyable parameters (already done in
+    # LUInstanceRecreateDisks, but it'd be nice to have in the client)
+
+  if opts.node:
+    if opts.iallocator:
+      msg = "At most one of either --nodes or --iallocator can be passed"
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+    pnode, snode = SplitNodeOption(opts.node)
+    nodes = [pnode]
+    if snode is not None:
+      nodes.append(snode)
   else:
-    opts.disks = []
+    nodes = []
 
   op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
-                                       disks=opts.disks)
+                                       disks=disks, nodes=nodes,
+                                       iallocator=opts.iallocator)
   SubmitOrSend(op, opts)
+
   return 0
 
 
@@ -608,8 +653,8 @@ def GrowDisk(opts, args):
 
   @param opts: the command line options selected by the user
   @type args: list
-  @param args: should contain two elements, the instance name
-      whose disks we grow and the disk name, e.g. I{sda}
+  @param args: should contain three elements, the target instance name,
+      the target disk id, and the target growth
   @rtype: int
   @return: the desired exit code
 
@@ -621,10 +666,15 @@ def GrowDisk(opts, args):
   except (TypeError, ValueError), err:
     raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
                                errors.ECODE_INVAL)
-  amount = utils.ParseUnit(args[2])
+  try:
+    amount = utils.ParseUnit(args[2])
+  except errors.UnitParseError:
+    raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
+                               errors.ECODE_INVAL)
   op = opcodes.OpInstanceGrowDisk(instance_name=instance,
                                   disk=disk, amount=amount,
-                                  wait_for_sync=opts.wait_for_sync)
+                                  wait_for_sync=opts.wait_for_sync,
+                                  absolute=opts.absolute)
   SubmitOrSend(op, opts)
   return 0
 
@@ -642,7 +692,9 @@ def _StartupInstance(name, opts):
   """
   op = opcodes.OpInstanceStartup(instance_name=name,
                                  force=opts.force,
-                                 ignore_offline_nodes=opts.ignore_offline)
+                                 ignore_offline_nodes=opts.ignore_offline,
+                                 no_remember=opts.no_remember,
+                                 startup_paused=opts.startup_paused)
   # do not add these parameters to the opcode unless they're defined
   if opts.hvparams:
     op.hvparams = opts.hvparams
@@ -681,7 +733,8 @@ def _ShutdownInstance(name, opts):
   """
   return opcodes.OpInstanceShutdown(instance_name=name,
                                     timeout=opts.timeout,
-                                    ignore_offline_nodes=opts.ignore_offline)
+                                    ignore_offline_nodes=opts.ignore_offline,
+                                    no_remember=opts.no_remember)
 
 
 def ReplaceDisks(opts, args):
@@ -707,7 +760,7 @@ def ReplaceDisks(opts, args):
   cnt = [opts.on_primary, opts.on_secondary, opts.auto,
          new_2ndary is not None, iallocator is not None].count(True)
   if cnt != 1:
-    raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -i"
+    raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -I"
                                " options must be passed", errors.ECODE_INVAL)
   elif opts.on_primary:
     mode = constants.REPLACE_DISK_PRI
@@ -725,7 +778,8 @@ def ReplaceDisks(opts, args):
   op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
                                       remote_node=new_2ndary, mode=mode,
                                       iallocator=iallocator,
-                                      early_release=opts.early_release)
+                                      early_release=opts.early_release,
+                                      ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts)
   return 0
 
@@ -746,6 +800,12 @@ def FailoverInstance(opts, args):
   cl = GetClient()
   instance_name = args[0]
   force = opts.force
+  iallocator = opts.iallocator
+  target_node = opts.dst_node
+
+  if iallocator and target_node:
+    raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
+                               " node (-n) but not both", errors.ECODE_INVAL)
 
   if not force:
     _EnsureInstancesExist(cl, [instance_name])
@@ -758,7 +818,10 @@ def FailoverInstance(opts, args):
 
   op = opcodes.OpInstanceFailover(instance_name=instance_name,
                                   ignore_consistency=opts.ignore_consistency,
-                                  shutdown_timeout=opts.shutdown_timeout)
+                                  shutdown_timeout=opts.shutdown_timeout,
+                                  iallocator=iallocator,
+                                  target_node=target_node,
+                                  ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -778,6 +841,12 @@ def MigrateInstance(opts, args):
   cl = GetClient()
   instance_name = args[0]
   force = opts.force
+  iallocator = opts.iallocator
+  target_node = opts.dst_node
+
+  if iallocator and target_node:
+    raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
+                               " node (-n) but not both", errors.ECODE_INVAL)
 
   if not force:
     _EnsureInstancesExist(cl, [instance_name])
@@ -805,8 +874,12 @@ def MigrateInstance(opts, args):
     mode = opts.migration_mode
 
   op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
-                                 cleanup=opts.cleanup)
-  SubmitOpCode(op, cl=cl, opts=opts)
+                                 cleanup=opts.cleanup, iallocator=iallocator,
+                                 target_node=target_node,
+                                 allow_failover=opts.allow_failover,
+                                 allow_runtime_changes=opts.allow_runtime_chgs,
+                                 ignore_ipolicy=opts.ignore_ipolicy)
+  SubmitOrSend(op, cl=cl, opts=opts)
   return 0
 
 
@@ -833,7 +906,9 @@ def MoveInstance(opts, args):
 
   op = opcodes.OpInstanceMove(instance_name=instance_name,
                               target_node=opts.node,
-                              shutdown_timeout=opts.shutdown_timeout)
+                              shutdown_timeout=opts.shutdown_timeout,
+                              ignore_consistency=opts.ignore_consistency,
+                              ignore_ipolicy=opts.ignore_ipolicy)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -850,18 +925,26 @@ def ConnectToInstanceConsole(opts, args):
   """
   instance_name = args[0]
 
-  op = opcodes.OpInstanceConsole(instance_name=instance_name)
-
   cl = GetClient()
   try:
     cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
-    console_data = SubmitOpCode(op, opts=opts, cl=cl)
+    ((console_data, oper_state), ) = \
+      cl.QueryInstances([instance_name], ["console", "oper_state"], False)
   finally:
     # Ensure client connection is closed while external commands are run
     cl.Close()
 
   del cl
 
+  if not console_data:
+    if oper_state:
+      # Instance is running
+      raise errors.OpExecError("Console information for instance %s is"
+                               " unavailable" % instance_name)
+    else:
+      raise errors.OpExecError("Instance %s is not running, can't get console" %
+                               instance_name)
+
   return _DoConsole(objects.InstanceConsole.FromDict(console_data),
                     opts.show_command, cluster_name)
 
@@ -887,6 +970,9 @@ def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
                 " URL <vnc://%s:%s/>",
                 console.instance, console.host, console.port,
                 console.display, console.host, console.port)
+  elif console.kind == constants.CONS_SPICE:
+    feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
+                console.host, console.port)
   elif console.kind == constants.CONS_SSH:
     # Convert to string if not already one
     if isinstance(console.command, basestring):
@@ -938,7 +1024,7 @@ def _FormatLogicalID(dev_type, logical_id, roman):
   return data
 
 
-def _FormatBlockDevInfo(idx, top_level, dev, static, roman):
+def _FormatBlockDevInfo(idx, top_level, dev, roman):
   """Show block device information.
 
   This is only used by L{ShowInstanceConfig}, but it's too big to be
@@ -950,9 +1036,6 @@ def _FormatBlockDevInfo(idx, top_level, dev, static, roman):
   @param top_level: if this a top-level disk?
   @type dev: dict
   @param dev: dictionary with disk information
-  @type static: boolean
-  @param static: wheter the device information doesn't contain
-      runtime information but only static data
   @type roman: boolean
   @param roman: whether to try to use roman integers
   @return: a list of either strings, tuples or lists
@@ -1040,15 +1123,17 @@ def _FormatBlockDevInfo(idx, top_level, dev, static, roman):
   elif dev["physical_id"] is not None:
     data.append("physical_id:")
     data.append([dev["physical_id"]])
-  if not static:
+
+  if dev["pstatus"]:
     data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
-  if dev["sstatus"] and not static:
+
+  if dev["sstatus"]:
     data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
 
   if dev["children"]:
     data.append("child devices:")
     for c_idx, child in enumerate(dev["children"]):
-      data.append(_FormatBlockDevInfo(c_idx, False, child, static, roman))
+      data.append(_FormatBlockDevInfo(c_idx, False, child, roman))
   d1.append(data)
   return d1
 
@@ -1073,13 +1158,13 @@ def _FormatList(buf, data, indent_level):
                  if isinstance(elem, tuple)] or [0])
   for elem in data:
     if isinstance(elem, basestring):
-      buf.write("%*s%s\n" % (2*indent_level, "", elem))
+      buf.write("%*s%s\n" % (2 * indent_level, "", elem))
     elif isinstance(elem, tuple):
       key, value = elem
       spacer = "%*s" % (max_tlen - len(key), "")
-      buf.write("%*s%s:%s %s\n" % (2*indent_level, "", key, spacer, value))
+      buf.write("%*s%s:%s %s\n" % (2 * indent_level, "", key, spacer, value))
     elif isinstance(elem, list):
-      _FormatList(buf, elem, indent_level+1)
+      _FormatList(buf, elem, indent_level + 1)
 
 
 def ShowInstanceConfig(opts, args):
@@ -1103,7 +1188,8 @@ def ShowInstanceConfig(opts, args):
     return 1
 
   retcode = 0
-  op = opcodes.OpInstanceQueryData(instances=args, static=opts.static)
+  op = opcodes.OpInstanceQueryData(instances=args, static=opts.static,
+                                   use_locking=not opts.static)
   result = SubmitOpCode(op, opts=opts)
   if not result:
     ToStdout("No instances.")
@@ -1121,18 +1207,26 @@ def ShowInstanceConfig(opts, args):
     buf.write("Creation time: %s\n" % utils.FormatTime(instance["ctime"]))
     buf.write("Modification time: %s\n" % utils.FormatTime(instance["mtime"]))
     buf.write("State: configured to be %s" % instance["config_state"])
-    if not opts.static:
+    if instance["run_state"]:
       buf.write(", actual state is %s" % instance["run_state"])
     buf.write("\n")
     ##buf.write("Considered for memory checks in cluster verify: %s\n" %
     ##          instance["auto_balance"])
     buf.write("  Nodes:\n")
     buf.write("    - primary: %s\n" % instance["pnode"])
-    buf.write("    - secondaries: %s\n" % utils.CommaJoin(instance["snodes"]))
+    buf.write("      group: %s (UUID %s)\n" %
+              (instance["pnode_group_name"], instance["pnode_group_uuid"]))
+    buf.write("    - secondaries: %s\n" %
+              utils.CommaJoin("%s (group %s, group UUID %s)" %
+                                (name, group_name, group_uuid)
+                              for (name, group_name, group_uuid) in
+                                zip(instance["snodes"],
+                                    instance["snodes_group_names"],
+                                    instance["snodes_group_uuids"])))
     buf.write("  Operating system: %s\n" % instance["os"])
     FormatParameterDict(buf, instance["os_instance"], instance["os_actual"],
                         level=2)
-    if instance.has_key("network_port"):
+    if "network_port" in instance:
       buf.write("  Allocated network port: %s\n" %
                 compat.TryToRoman(instance["network_port"],
                                   convert=opts.roman_integers))
@@ -1161,12 +1255,12 @@ def ShowInstanceConfig(opts, args):
     FormatParameterDict(buf, instance["hv_instance"], instance["hv_actual"],
                         level=2)
     buf.write("  Hardware:\n")
-    buf.write("    - VCPUs: %s\n" %
-              compat.TryToRoman(instance["be_actual"][constants.BE_VCPUS],
-                                convert=opts.roman_integers))
-    buf.write("    - memory: %sMiB\n" %
-              compat.TryToRoman(instance["be_actual"][constants.BE_MEMORY],
-                                convert=opts.roman_integers))
+    # deprecated "memory" value, kept for one version for compatibility
+    # TODO(ganeti 2.7) remove.
+    be_actual = copy.deepcopy(instance["be_actual"])
+    be_actual["memory"] = be_actual[constants.BE_MAXMEM]
+    FormatParameterDict(buf, instance["be_instance"], be_actual, level=2)
+    # TODO(ganeti 2.7) rework the NICs as well
     buf.write("    - NICs:\n")
     for idx, (ip, mac, mode, link) in enumerate(instance["nics"]):
       buf.write("      - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" %
@@ -1175,13 +1269,94 @@ def ShowInstanceConfig(opts, args):
     buf.write("  Disks:\n")
 
     for idx, device in enumerate(instance["disks"]):
-      _FormatList(buf, _FormatBlockDevInfo(idx, True, device, opts.static,
+      _FormatList(buf, _FormatBlockDevInfo(idx, True, device,
                   opts.roman_integers), 2)
 
-  ToStdout(buf.getvalue().rstrip('\n'))
+  ToStdout(buf.getvalue().rstrip("\n"))
   return retcode
 
 
+def _ConvertNicDiskModifications(mods):
+  """Converts NIC/disk modifications from CLI to opcode.
+
+  When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
+  disks at arbitrary indices, its parameter format changed. This function
+  converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
+  newer format and adds support for new-style requests (e.g. "--new 4:add").
+
+  @type mods: list of tuples
+  @param mods: Modifications as given by command line parser
+  @rtype: list of tuples
+  @return: Modifications as understood by L{opcodes.OpInstanceSetParams}
+
+  """
+  result = []
+
+  for (idx, params) in mods:
+    if idx == constants.DDM_ADD:
+      # Add item as last item (legacy interface)
+      action = constants.DDM_ADD
+      idxno = -1
+    elif idx == constants.DDM_REMOVE:
+      # Remove last item (legacy interface)
+      action = constants.DDM_REMOVE
+      idxno = -1
+    else:
+      # Modifications and adding/removing at arbitrary indices
+      try:
+        idxno = int(idx)
+      except (TypeError, ValueError):
+        raise errors.OpPrereqError("Non-numeric index '%s'" % idx,
+                                   errors.ECODE_INVAL)
+
+      add = params.pop(constants.DDM_ADD, _MISSING)
+      remove = params.pop(constants.DDM_REMOVE, _MISSING)
+      modify = params.pop(constants.DDM_MODIFY, _MISSING)
+
+      if modify is _MISSING:
+        if not (add is _MISSING or remove is _MISSING):
+          raise errors.OpPrereqError("Cannot add and remove at the same time",
+                                     errors.ECODE_INVAL)
+        elif add is not _MISSING:
+          action = constants.DDM_ADD
+        elif remove is not _MISSING:
+          action = constants.DDM_REMOVE
+        else:
+          action = constants.DDM_MODIFY
+
+      else:
+        if add is _MISSING and remove is _MISSING:
+          action = constants.DDM_MODIFY
+        else:
+          raise errors.OpPrereqError("Cannot modify and add/remove at the"
+                                     " same time", errors.ECODE_INVAL)
+
+      assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
+
+    if action == constants.DDM_REMOVE and params:
+      raise errors.OpPrereqError("Not accepting parameters on removal",
+                                 errors.ECODE_INVAL)
+
+    result.append((action, idxno, params))
+
+  return result
+
+
+def _ParseDiskSizes(mods):
+  """Parses disk sizes in parameters.
+
+  """
+  for (action, _, params) in mods:
+    if params and constants.IDISK_SIZE in params:
+      params[constants.IDISK_SIZE] = \
+        utils.ParseUnit(params[constants.IDISK_SIZE])
+    elif action == constants.DDM_ADD:
+      raise errors.OpPrereqError("Missing required parameter 'size'",
+                                 errors.ECODE_INVAL)
+
+  return mods
+
+
 def SetInstanceParams(opts, args):
   """Modifies an instance.
 
@@ -1195,7 +1370,8 @@ def SetInstanceParams(opts, args):
 
   """
   if not (opts.nics or opts.disks or opts.disk_template or
-          opts.hvparams or opts.beparams or opts.os or opts.osparams):
+          opts.hvparams or opts.beparams or opts.os or opts.osparams or
+          opts.offline_inst or opts.online_inst or opts.runtime_mem):
     ToStderr("Please give at least one of the parameters.")
     return 1
 
@@ -1204,7 +1380,7 @@ def SetInstanceParams(opts, args):
       if opts.beparams[param].lower() == "default":
         opts.beparams[param] = constants.VALUE_DEFAULT
 
-  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES,
+  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
                       allowed_values=[constants.VALUE_DEFAULT])
 
   for param in opts.hvparams:
@@ -1215,43 +1391,38 @@ def SetInstanceParams(opts, args):
   utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
                       allowed_values=[constants.VALUE_DEFAULT])
 
-  for idx, (nic_op, nic_dict) in enumerate(opts.nics):
-    try:
-      nic_op = int(nic_op)
-      opts.nics[idx] = (nic_op, nic_dict)
-    except (TypeError, ValueError):
-      pass
-
-  for idx, (disk_op, disk_dict) in enumerate(opts.disks):
-    try:
-      disk_op = int(disk_op)
-      opts.disks[idx] = (disk_op, disk_dict)
-    except (TypeError, ValueError):
-      pass
-    if disk_op == constants.DDM_ADD:
-      if 'size' not in disk_dict:
-        raise errors.OpPrereqError("Missing required parameter 'size'",
-                                   errors.ECODE_INVAL)
-      disk_dict['size'] = utils.ParseUnit(disk_dict['size'])
+  nics = _ConvertNicDiskModifications(opts.nics)
+  disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
 
   if (opts.disk_template and
-      opts.disk_template in constants.DTS_NET_MIRROR and
+      opts.disk_template in constants.DTS_INT_MIRROR and
       not opts.node):
     ToStderr("Changing the disk template to a mirrored one requires"
              " specifying a secondary node")
     return 1
 
+  if opts.offline_inst:
+    offline = True
+  elif opts.online_inst:
+    offline = False
+  else:
+    offline = None
+
   op = opcodes.OpInstanceSetParams(instance_name=args[0],
-                                   nics=opts.nics,
-                                   disks=opts.disks,
+                                   nics=nics,
+                                   disks=disks,
                                    disk_template=opts.disk_template,
                                    remote_node=opts.node,
                                    hvparams=opts.hvparams,
                                    beparams=opts.beparams,
+                                   runtime_mem=opts.runtime_mem,
                                    os_name=opts.os,
                                    osparams=opts.osparams,
                                    force_variant=opts.force_variant,
-                                   force=opts.force)
+                                   force=opts.force,
+                                   wait_for_sync=opts.wait_for_sync,
+                                   offline=offline,
+                                   ignore_ipolicy=opts.ignore_ipolicy)
 
   # even if here we process the result, we allow submit only
   result = SubmitOrSend(op, opts)
@@ -1265,6 +1436,39 @@ def SetInstanceParams(opts, args):
   return 0
 
 
+def ChangeGroup(opts, args):
+  """Moves an instance to another group.
+
+  """
+  (instance_name, ) = args
+
+  cl = GetClient()
+
+  op = opcodes.OpInstanceChangeGroup(instance_name=instance_name,
+                                     iallocator=opts.iallocator,
+                                     target_groups=opts.to,
+                                     early_release=opts.early_release)
+  result = SubmitOrSend(op, opts, cl=cl)
+
+  # Keep track of submitted jobs
+  jex = JobExecutor(cl=cl, opts=opts)
+
+  for (status, job_id) in result[constants.JOB_IDS_KEY]:
+    jex.AddJobId(None, status, job_id)
+
+  results = jex.GetResults()
+  bad_cnt = len([row for row in results if not row[0]])
+  if bad_cnt == 0:
+    ToStdout("Instance '%s' changed group successfully.", instance_name)
+    rcode = constants.EXIT_SUCCESS
+  else:
+    ToStdout("There were %s errors while changing group of instance '%s'.",
+             bad_cnt, instance_name)
+    rcode = constants.EXIT_FAILURE
+
+  return rcode
+
+
 # multi-instance selection options
 m_force_multi = cli_option("--force-multiple", dest="force_multi",
                            help="Do not ask for confirmation when more than"
@@ -1273,42 +1477,42 @@ m_force_multi = cli_option("--force-multiple", dest="force_multi",
 
 m_pri_node_opt = cli_option("--primary", dest="multi_mode",
                             help="Filter by nodes (primary only)",
-                            const=_SHUTDOWN_NODES_PRI, action="store_const")
+                            const=_EXPAND_NODES_PRI, action="store_const")
 
 m_sec_node_opt = cli_option("--secondary", dest="multi_mode",
                             help="Filter by nodes (secondary only)",
-                            const=_SHUTDOWN_NODES_SEC, action="store_const")
+                            const=_EXPAND_NODES_SEC, action="store_const")
 
 m_node_opt = cli_option("--node", dest="multi_mode",
                         help="Filter by nodes (primary and secondary)",
-                        const=_SHUTDOWN_NODES_BOTH, action="store_const")
+                        const=_EXPAND_NODES_BOTH, action="store_const")
 
 m_clust_opt = cli_option("--all", dest="multi_mode",
                          help="Select all instances in the cluster",
-                         const=_SHUTDOWN_CLUSTER, action="store_const")
+                         const=_EXPAND_CLUSTER, action="store_const")
 
 m_inst_opt = cli_option("--instance", dest="multi_mode",
                         help="Filter by instance name [default]",
-                        const=_SHUTDOWN_INSTANCES, action="store_const")
+                        const=_EXPAND_INSTANCES, action="store_const")
 
 m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
                              help="Filter by node tag",
-                             const=_SHUTDOWN_NODES_BOTH_BY_TAGS,
+                             const=_EXPAND_NODES_BOTH_BY_TAGS,
                              action="store_const")
 
 m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
                                  help="Filter by primary node tag",
-                                 const=_SHUTDOWN_NODES_PRI_BY_TAGS,
+                                 const=_EXPAND_NODES_PRI_BY_TAGS,
                                  action="store_const")
 
 m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
                                  help="Filter by secondary node tag",
-                                 const=_SHUTDOWN_NODES_SEC_BY_TAGS,
+                                 const=_EXPAND_NODES_SEC_BY_TAGS,
                                  action="store_const")
 
 m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
                              help="Filter by instance tag",
-                             const=_SHUTDOWN_INSTANCES_BY_TAGS,
+                             const=_EXPAND_INSTANCES_BY_TAGS,
                              action="store_const")
 
 # this is defined separately due to readability only
@@ -1317,47 +1521,53 @@ add_opts = [
   OS_OPT,
   FORCE_VARIANT_OPT,
   NO_INSTALL_OPT,
+  IGNORE_IPOLICY_OPT,
   ]
 
 commands = {
-  'add': (
+  "add": (
     AddInstance, [ArgHost(min=1, max=1)], COMMON_CREATE_OPTS + add_opts,
     "[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
     "Creates and adds a new instance to the cluster"),
-  'batch-create': (
+  "batch-create": (
     BatchCreate, [ArgFile(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT],
     "<instances.json>",
     "Create a bunch of instances based on specs in the file."),
-  'console': (
+  "console": (
     ConnectToInstanceConsole, ARGS_ONE_INSTANCE,
     [SHOWCMD_OPT, PRIORITY_OPT],
     "[--show-cmd] <instance>", "Opens a console on the specified instance"),
-  'failover': (
+  "failover": (
     FailoverInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
-    "[-f] <instance>", "Stops the instance and starts it on the backup node,"
-    " using the remote mirror (only for instances of type drbd)"),
-  'migrate': (
+     DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
+     IGNORE_IPOLICY_OPT],
+    "[-f] <instance>", "Stops the instance, changes its primary node and"
+    " (if it was originally running) starts it on the new node"
+    " (the secondary for mirrored instances or any node"
+    " for shared storage)."),
+  "migrate": (
     MigrateInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
-     PRIORITY_OPT],
+     PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
+     IGNORE_IPOLICY_OPT, NORUNTIME_CHGS_OPT, SUBMIT_OPT],
     "[-f] <instance>", "Migrate instance to its secondary node"
-    " (only for instances of type drbd)"),
-  'move': (
+    " (only for mirrored instances)"),
+  "move": (
     MoveInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SUBMIT_OPT, SINGLE_NODE_OPT, SHUTDOWN_TIMEOUT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT, IGNORE_IPOLICY_OPT],
     "[-f] <instance>", "Move instance to an arbitrary node"
     " (only for instances of type file and lv)"),
-  'info': (
+  "info": (
     ShowInstanceConfig, ARGS_MANY_INSTANCES,
     [STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT],
     "[-s] {--all | <instance>...}",
     "Show information on the specified instance(s)"),
-  'list': (
+  "list": (
     ListInstances, ARGS_MANY_INSTANCES,
-    [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT],
+    [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
+     FORCE_FILTER_OPT],
     "[<instance>...]",
     "Lists the instances and their status. The available fields can be shown"
     " using the \"list-fields\" command (see the man page for details)."
@@ -1369,95 +1579,104 @@ commands = {
     [NOHDR_OPT, SEP_OPT],
     "[fields...]",
     "Lists all available fields for instances"),
-  'reinstall': (
+  "reinstall": (
     ReinstallInstance, [ArgInstance()],
     [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
      m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
      m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT,
      SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT],
     "[-f] <instance>", "Reinstall a stopped instance"),
-  'remove': (
+  "remove": (
     RemoveInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT, SUBMIT_OPT,
      DRY_RUN_OPT, PRIORITY_OPT],
     "[-f] <instance>", "Shuts down the instance and removes it"),
-  'rename': (
+  "rename": (
     RenameInstance,
     [ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
     [NOIPCHECK_OPT, NONAMECHECK_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "<instance> <new_name>", "Rename the instance"),
-  'replace-disks': (
+  "replace-disks": (
     ReplaceDisks, ARGS_ONE_INSTANCE,
     [AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
      NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT, SUBMIT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
     "[-s|-p|-n NODE|-I NAME] <instance>",
     "Replaces all disks for the instance"),
-  'modify': (
+  "modify": (
     SetInstanceParams, ARGS_ONE_INSTANCE,
     [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT,
      DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
-     OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+     OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
+     ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT],
     "<instance>", "Alters the parameters of an instance"),
-  'shutdown': (
+  "shutdown": (
     GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
     [m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
     "<instance>", "Stops an instance"),
-  'startup': (
+  "startup": (
     GenericManyOps("startup", _StartupInstance), [ArgInstance()],
     [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, HVOPTS_OPT,
-     BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT],
+     BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
+     NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
     "<instance>", "Starts an instance"),
-  'reboot': (
+  "reboot": (
     GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
     [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
      m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT,
      m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
      m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "<instance>", "Reboots an instance"),
-  'activate-disks': (
+  "activate-disks": (
     ActivateDisks, ARGS_ONE_INSTANCE,
-    [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
     "<instance>", "Activate an instance's disks"),
-  'deactivate-disks': (
+  "deactivate-disks": (
     DeactivateDisks, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
     "[-f] <instance>", "Deactivate an instance's disks"),
-  'recreate-disks': (
+  "recreate-disks": (
     RecreateDisks, ARGS_ONE_INSTANCE,
-    [SUBMIT_OPT, DISKIDX_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
+     IALLOCATOR_OPT],
     "<instance>", "Recreate an instance's disks"),
-  'grow-disk': (
+  "grow-disk": (
     GrowDisk,
     [ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
      ArgUnknown(min=1, max=1)],
-    [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+    [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
     "<instance> <disk> <size>", "Grow an instance's disk"),
-  'list-tags': (
-    ListTags, ARGS_ONE_INSTANCE, [PRIORITY_OPT],
+  "change-group": (
+    ChangeGroup, ARGS_ONE_INSTANCE,
+    [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT, SUBMIT_OPT],
+    "[-I <iallocator>] [--to <group>]", "Change group of instance"),
+  "list-tags": (
+    ListTags, ARGS_ONE_INSTANCE, [],
     "<instance_name>", "List the tags of the given instance"),
-  'add-tags': (
+  "add-tags": (
     AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
-    [TAG_SRC_OPT, PRIORITY_OPT],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
     "<instance_name> tag...", "Add tags to the given instance"),
-  'remove-tags': (
+  "remove-tags": (
     RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
-    [TAG_SRC_OPT, PRIORITY_OPT],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
     "<instance_name> tag...", "Remove tags from given instance"),
   }
 
 #: dictionary with aliases for commands
 aliases = {
-  'start': 'startup',
-  'stop': 'shutdown',
+  "start": "startup",
+  "stop": "shutdown",
+  "show": "info",
   }
 
 
 def Main():
   return GenericMain(commands, aliases=aliases,
-                     override={"tag_type": constants.TAG_INSTANCE})
+                     override={"tag_type": constants.TAG_INSTANCE},
+                     env_override=_ENV_OVERRIDE)