Export extractExTags and updateExclTags
[ganeti-local] / lib / client / gnt_cluster.py
index 4945178..408fde7 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 
 """Cluster related commands"""
 
-# pylint: disable-msg=W0401,W0613,W0614,C0103
+# pylint: disable=W0401,W0613,W0614,C0103
 # W0401: Wildcard import ganeti.cli
 # W0613: Unused argument, since all functions follow the same API
 # W0614: Unused import %s from wildcard import (since we need cli)
 # C0103: Invalid name gnt-cluster
 
+from cStringIO import StringIO
 import os.path
 import time
 import OpenSSL
@@ -42,6 +43,7 @@ from ganeti import objects
 from ganeti import uidpool
 from ganeti import compat
 from ganeti import netutils
+from ganeti import pathutils
 
 
 ON_OPT = cli_option("--on", default=False,
@@ -49,14 +51,31 @@ ON_OPT = cli_option("--on", default=False,
                     help="Recover from an EPO")
 
 GROUPS_OPT = cli_option("--groups", default=False,
-                    action="store_true", dest="groups",
-                    help="Arguments are node groups instead of nodes")
+                        action="store_true", dest="groups",
+                        help="Arguments are node groups instead of nodes")
+
+FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
+                            help="Override interactive check for --no-voting",
+                            default=False, action="store_true")
 
 _EPO_PING_INTERVAL = 30 # 30 seconds between pings
 _EPO_PING_TIMEOUT = 1 # 1 second
 _EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
 
 
+def _CheckNoLvmStorageOptDeprecated(opts):
+  """Checks if the legacy option '--no-lvm-storage' is used.
+
+  """
+  if not opts.lvm_storage:
+    ToStderr("The option --no-lvm-storage is no longer supported. If you want"
+             " to disable lvm-based storage cluster-wide, use the option"
+             " --enabled-disk-templates to disable all of these lvm-base disk "
+             "  templates: %s" %
+             utils.CommaJoin(utils.GetLvmDiskTemplates()))
+    return 1
+
+
 @UsesRPC
 def InitCluster(opts, args):
   """Initialize the cluster.
@@ -69,13 +88,28 @@ def InitCluster(opts, args):
   @return: the desired exit code
 
   """
-  if not opts.lvm_storage and opts.vg_name:
-    ToStderr("Options --no-lvm-storage and --vg-name conflict.")
+  if _CheckNoLvmStorageOptDeprecated(opts):
     return 1
-
-  vg_name = opts.vg_name
-  if opts.lvm_storage and not opts.vg_name:
-    vg_name = constants.DEFAULT_VG
+  enabled_disk_templates = opts.enabled_disk_templates
+  if enabled_disk_templates:
+    enabled_disk_templates = enabled_disk_templates.split(",")
+  else:
+    enabled_disk_templates = constants.DEFAULT_ENABLED_DISK_TEMPLATES
+
+  vg_name = None
+  if opts.vg_name is not None:
+    vg_name = opts.vg_name
+    if vg_name:
+      if not utils.IsLvmEnabled(enabled_disk_templates):
+        ToStdout("You specified a volume group with --vg-name, but you did not"
+                 " enable any disk template that uses lvm.")
+    else:
+      if utils.IsLvmEnabled(enabled_disk_templates):
+        ToStderr("LVM disk templates are enabled, but vg name not set.")
+        return 1
+  else:
+    if utils.IsLvmEnabled(enabled_disk_templates):
+      vg_name = constants.DEFAULT_VG
 
   if not opts.drbd_storage and opts.drbd_helper:
     ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
@@ -98,9 +132,19 @@ def InitCluster(opts, args):
   beparams = opts.beparams
   nicparams = opts.nicparams
 
+  diskparams = dict(opts.diskparams)
+
+  # check the disk template types here, as we cannot rely on the type check done
+  # by the opcode parameter types
+  diskparams_keys = set(diskparams.keys())
+  if not (diskparams_keys <= constants.DISK_TEMPLATES):
+    unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
+    ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
+    return 1
+
   # prepare beparams dict
   beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
-  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+  utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
 
   # prepare nicparams dict
   nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
@@ -120,6 +164,28 @@ def InitCluster(opts, args):
     hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
     utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
 
+  # prepare diskparams dict
+  for templ in constants.DISK_TEMPLATES:
+    if templ not in diskparams:
+      diskparams[templ] = {}
+    diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
+                                         diskparams[templ])
+    utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
+
+  # prepare ipolicy dict
+  ipolicy = CreateIPolicyFromOpts(
+    ispecs_mem_size=opts.ispecs_mem_size,
+    ispecs_cpu_count=opts.ispecs_cpu_count,
+    ispecs_disk_count=opts.ispecs_disk_count,
+    ispecs_disk_size=opts.ispecs_disk_size,
+    ispecs_nic_count=opts.ispecs_nic_count,
+    minmax_ispecs=opts.ipolicy_bounds_specs,
+    std_ispecs=opts.ipolicy_std_specs,
+    ipolicy_disk_templates=opts.ipolicy_disk_templates,
+    ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
+    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
+    fill_all=True)
+
   if opts.candidate_pool_size is None:
     opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
 
@@ -133,16 +199,36 @@ def InitCluster(opts, args):
   if opts.prealloc_wipe_disks is None:
     opts.prealloc_wipe_disks = False
 
+  external_ip_setup_script = opts.use_external_mip_script
+  if external_ip_setup_script is None:
+    external_ip_setup_script = False
+
   try:
     primary_ip_version = int(opts.primary_ip_version)
   except (ValueError, TypeError), err:
     ToStderr("Invalid primary ip version value: %s" % str(err))
     return 1
 
+  master_netmask = opts.master_netmask
+  try:
+    if master_netmask is not None:
+      master_netmask = int(master_netmask)
+  except (ValueError, TypeError), err:
+    ToStderr("Invalid master netmask value: %s" % str(err))
+    return 1
+
+  if opts.disk_state:
+    disk_state = utils.FlatToDict(opts.disk_state)
+  else:
+    disk_state = {}
+
+  hv_state = dict(opts.hv_state)
+
   bootstrap.InitCluster(cluster_name=args[0],
                         secondary_ip=opts.secondary_ip,
                         vg_name=vg_name,
                         mac_prefix=opts.mac_prefix,
+                        master_netmask=master_netmask,
                         master_netdev=master_netdev,
                         file_storage_dir=opts.file_storage_dir,
                         shared_file_storage_dir=opts.shared_file_storage_dir,
@@ -151,6 +237,8 @@ def InitCluster(opts, args):
                         beparams=beparams,
                         nicparams=nicparams,
                         ndparams=ndparams,
+                        diskparams=diskparams,
+                        ipolicy=ipolicy,
                         candidate_pool_size=opts.candidate_pool_size,
                         modify_etc_hosts=opts.modify_etc_hosts,
                         modify_ssh_setup=opts.modify_ssh_setup,
@@ -160,6 +248,10 @@ def InitCluster(opts, args):
                         default_iallocator=opts.default_iallocator,
                         primary_ip_version=primary_ip_version,
                         prealloc_wipe_disks=opts.prealloc_wipe_disks,
+                        use_external_mip_script=external_ip_setup_script,
+                        hv_state=hv_state,
+                        disk_state=disk_state,
+                        enabled_disk_templates=enabled_disk_templates,
                         )
   op = opcodes.OpClusterPostInit()
   SubmitOpCode(op, opts=opts)
@@ -183,10 +275,10 @@ def DestroyCluster(opts, args):
     return 1
 
   op = opcodes.OpClusterDestroy()
-  master = SubmitOpCode(op, opts=opts)
+  master_uuid = SubmitOpCode(op, opts=opts)
   # if we reached this, the opcode didn't fail; we can proceed to
   # shutdown all the daemons
-  bootstrap.FinalizeClusterDestroy(master)
+  bootstrap.FinalizeClusterDestroy(master_uuid)
   return 0
 
 
@@ -223,6 +315,32 @@ def RenameCluster(opts, args):
   return 0
 
 
+def ActivateMasterIp(opts, args):
+  """Activates the master IP.
+
+  """
+  op = opcodes.OpClusterActivateMasterIp()
+  SubmitOpCode(op)
+  return 0
+
+
+def DeactivateMasterIp(opts, args):
+  """Deactivates the master IP.
+
+  """
+  if not opts.confirm:
+    usertext = ("This will disable the master IP. All the open connections to"
+                " the master IP will be closed. To reach the master you will"
+                " need to use its node IP."
+                " Continue?")
+    if not AskUser(usertext):
+      return 1
+
+  op = opcodes.OpClusterDeactivateMasterIp()
+  SubmitOpCode(op)
+  return 0
+
+
 def RedistributeConfig(opts, args):
   """Forces push of the cluster configuration.
 
@@ -248,13 +366,14 @@ def ShowClusterVersion(opts, args):
   @return: the desired exit code
 
   """
-  cl = GetClient()
+  cl = GetClient(query=True)
   result = cl.QueryClusterInfo()
   ToStdout("Software version: %s", result["software_version"])
   ToStdout("Internode protocol: %s", result["protocol_version"])
   ToStdout("Configuration format: %s", result["config_version"])
   ToStdout("OS api version: %s", result["os_api_version"])
   ToStdout("Export interface: %s", result["export_version"])
+  ToStdout("VCS version: %s", result["vcs_version"])
   return 0
 
 
@@ -273,24 +392,24 @@ def ShowClusterMaster(opts, args):
   return 0
 
 
-def _PrintGroupedParams(paramsdict, level=1, roman=False):
-  """Print Grouped parameters (be, nic, disk) by group.
+def _FormatGroupedParams(paramsdict, roman=False):
+  """Format Grouped parameters (be, nic, disk) by group.
 
   @type paramsdict: dict of dicts
   @param paramsdict: {group: {param: value, ...}, ...}
-  @type level: int
-  @param level: Level of indention
+  @rtype: dict of dicts
+  @return: copy of the input dictionaries with strings as values
 
   """
-  indent = "  " * level
-  for item, val in sorted(paramsdict.items()):
+  ret = {}
+  for (item, val) in paramsdict.items():
     if isinstance(val, dict):
-      ToStdout("%s- %s:", indent, item)
-      _PrintGroupedParams(val, level=level + 1, roman=roman)
+      ret[item] = _FormatGroupedParams(val, roman=roman)
     elif roman and isinstance(val, int):
-      ToStdout("%s  %s: %s", indent, item, compat.TryToRoman(val))
+      ret[item] = compat.TryToRoman(val)
     else:
-      ToStdout("%s  %s: %s", indent, item, val)
+      ret[item] = str(val)
+  return ret
 
 
 def ShowClusterConfig(opts, args):
@@ -303,76 +422,91 @@ def ShowClusterConfig(opts, args):
   @return: the desired exit code
 
   """
-  cl = GetClient()
+  cl = GetClient(query=True)
   result = cl.QueryClusterInfo()
 
-  ToStdout("Cluster name: %s", result["name"])
-  ToStdout("Cluster UUID: %s", result["uuid"])
-
-  ToStdout("Creation time: %s", utils.FormatTime(result["ctime"]))
-  ToStdout("Modification time: %s", utils.FormatTime(result["mtime"]))
-
-  ToStdout("Master node: %s", result["master"])
-
-  ToStdout("Architecture (this node): %s (%s)",
-           result["architecture"][0], result["architecture"][1])
-
   if result["tags"]:
     tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
   else:
     tags = "(none)"
+  if result["reserved_lvs"]:
+    reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
+  else:
+    reserved_lvs = "(none)"
 
-  ToStdout("Tags: %s", tags)
+  enabled_hv = result["enabled_hypervisors"]
+  hvparams = dict((k, v) for k, v in result["hvparams"].iteritems()
+                  if k in enabled_hv)
 
-  ToStdout("Default hypervisor: %s", result["default_hypervisor"])
-  ToStdout("Enabled hypervisors: %s",
-           utils.CommaJoin(result["enabled_hypervisors"]))
+  info = [
+    ("Cluster name", result["name"]),
+    ("Cluster UUID", result["uuid"]),
 
-  ToStdout("Hypervisor parameters:")
-  _PrintGroupedParams(result["hvparams"])
+    ("Creation time", utils.FormatTime(result["ctime"])),
+    ("Modification time", utils.FormatTime(result["mtime"])),
 
-  ToStdout("OS-specific hypervisor parameters:")
-  _PrintGroupedParams(result["os_hvp"])
+    ("Master node", result["master"]),
 
-  ToStdout("OS parameters:")
-  _PrintGroupedParams(result["osparams"])
+    ("Architecture (this node)",
+     "%s (%s)" % (result["architecture"][0], result["architecture"][1])),
 
-  ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"]))
-  ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"]))
+    ("Tags", tags),
 
-  ToStdout("Cluster parameters:")
-  ToStdout("  - candidate pool size: %s",
-            compat.TryToRoman(result["candidate_pool_size"],
-                              convert=opts.roman_integers))
-  ToStdout("  - master netdev: %s", result["master_netdev"])
-  ToStdout("  - lvm volume group: %s", result["volume_group_name"])
-  if result["reserved_lvs"]:
-    reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
-  else:
-    reserved_lvs = "(none)"
-  ToStdout("  - lvm reserved volumes: %s", reserved_lvs)
-  ToStdout("  - drbd usermode helper: %s", result["drbd_usermode_helper"])
-  ToStdout("  - file storage path: %s", result["file_storage_dir"])
-  ToStdout("  - shared file storage path: %s",
-           result["shared_file_storage_dir"])
-  ToStdout("  - maintenance of node health: %s",
-           result["maintain_node_health"])
-  ToStdout("  - uid pool: %s",
-            uidpool.FormatUidPool(result["uid_pool"],
-                                  roman=opts.roman_integers))
-  ToStdout("  - default instance allocator: %s", result["default_iallocator"])
-  ToStdout("  - primary ip version: %d", result["primary_ip_version"])
-  ToStdout("  - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
-
-  ToStdout("Default node parameters:")
-  _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
-
-  ToStdout("Default instance parameters:")
-  _PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
-
-  ToStdout("Default nic parameters:")
-  _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
+    ("Default hypervisor", result["default_hypervisor"]),
+    ("Enabled hypervisors", utils.CommaJoin(enabled_hv)),
+
+    ("Hypervisor parameters", _FormatGroupedParams(hvparams)),
+
+    ("OS-specific hypervisor parameters",
+     _FormatGroupedParams(result["os_hvp"])),
+
+    ("OS parameters", _FormatGroupedParams(result["osparams"])),
+
+    ("Hidden OSes", utils.CommaJoin(result["hidden_os"])),
+    ("Blacklisted OSes", utils.CommaJoin(result["blacklisted_os"])),
+
+    ("Cluster parameters", [
+      ("candidate pool size",
+       compat.TryToRoman(result["candidate_pool_size"],
+                         convert=opts.roman_integers)),
+      ("master netdev", result["master_netdev"]),
+      ("master netmask", result["master_netmask"]),
+      ("use external master IP address setup script",
+       result["use_external_mip_script"]),
+      ("lvm volume group", result["volume_group_name"]),
+      ("lvm reserved volumes", reserved_lvs),
+      ("drbd usermode helper", result["drbd_usermode_helper"]),
+      ("file storage path", result["file_storage_dir"]),
+      ("shared file storage path", result["shared_file_storage_dir"]),
+      ("maintenance of node health", result["maintain_node_health"]),
+      ("uid pool", uidpool.FormatUidPool(result["uid_pool"])),
+      ("default instance allocator", result["default_iallocator"]),
+      ("primary ip version", result["primary_ip_version"]),
+      ("preallocation wipe disks", result["prealloc_wipe_disks"]),
+      ("OS search path", utils.CommaJoin(pathutils.OS_SEARCH_PATH)),
+      ("ExtStorage Providers search path",
+       utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
+      ("enabled disk templates",
+       utils.CommaJoin(result["enabled_disk_templates"])),
+      ]),
+
+    ("Default node parameters",
+     _FormatGroupedParams(result["ndparams"], roman=opts.roman_integers)),
 
+    ("Default instance parameters",
+     _FormatGroupedParams(result["beparams"], roman=opts.roman_integers)),
+
+    ("Default nic parameters",
+     _FormatGroupedParams(result["nicparams"], roman=opts.roman_integers)),
+
+    ("Default disk parameters",
+     _FormatGroupedParams(result["diskparams"], roman=opts.roman_integers)),
+
+    ("Instance policy - limits for instances",
+     FormatPolicyInfo(result["ipolicy"], None, True)),
+    ]
+
+  PrintGenericInfo(info)
   return 0
 
 
@@ -388,6 +522,8 @@ def ClusterCopyFile(opts, args):
 
   """
   filename = args[0]
+  filename = os.path.abspath(filename)
+
   if not os.path.exists(filename):
     raise errors.OpPrereqError("No such filename '%s'" % filename,
                                errors.ECODE_INVAL)
@@ -397,9 +533,10 @@ def ClusterCopyFile(opts, args):
   cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
 
   results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
-                           secondary_ips=opts.use_replication_network)
+                           secondary_ips=opts.use_replication_network,
+                           nodegroup=opts.nodegroup)
 
-  srun = ssh.SshRunner(cluster_name=cluster_name)
+  srun = ssh.SshRunner(cluster_name)
   for node in results:
     if not srun.CopyFileToNode(node, filename):
       ToStderr("Copy of file %s to node %s failed", filename, node)
@@ -421,7 +558,7 @@ def RunClusterCommand(opts, args):
 
   command = " ".join(args)
 
-  nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl)
+  nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
 
   cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
                                                     "master_node"])
@@ -434,10 +571,19 @@ def RunClusterCommand(opts, args):
     nodes.append(master_node)
 
   for name in nodes:
-    result = srun.Run(name, "root", command)
+    result = srun.Run(name, constants.SSH_LOGIN_USER, command)
+
+    if opts.failure_only and result.exit_code == constants.EXIT_SUCCESS:
+      # Do not output anything for successful commands
+      continue
+
     ToStdout("------------------------------------------------")
-    ToStdout("node: %s", name)
-    ToStdout("%s", result.output)
+    if opts.show_machine_names:
+      for line in result.output.splitlines():
+        ToStdout("%s: %s", name, line)
+    else:
+      ToStdout("node: %s", name)
+      ToStdout("%s", result.output)
     ToStdout("return code = %s", result.exit_code)
 
   return 0
@@ -453,40 +599,47 @@ def VerifyCluster(opts, args):
   @return: the desired exit code
 
   """
-  simulate = opts.simulate_errors
   skip_checks = []
 
-  if opts.nodegroup is None:
-    # Verify cluster config.
-    op = opcodes.OpClusterVerifyConfig(verbose=opts.verbose,
-                                       error_codes=opts.error_codes,
-                                       debug_simulate_errors=simulate)
-
-    success, all_groups = SubmitOpCode(op, opts=opts)
-  else:
-    success = True
-    all_groups = [opts.nodegroup]
-
   if opts.skip_nplusone_mem:
     skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
 
-  jex = JobExecutor(opts=opts, verbose=False)
+  cl = GetClient()
+
+  op = opcodes.OpClusterVerify(verbose=opts.verbose,
+                               error_codes=opts.error_codes,
+                               debug_simulate_errors=opts.simulate_errors,
+                               skip_checks=skip_checks,
+                               ignore_errors=opts.ignore_errors,
+                               group_name=opts.nodegroup)
+  result = SubmitOpCode(op, cl=cl, opts=opts)
+
+  # Keep track of submitted jobs
+  jex = JobExecutor(cl=cl, opts=opts)
 
-  for group in all_groups:
-    op = opcodes.OpClusterVerifyGroup(group_name=group,
-                                      skip_checks=skip_checks,
-                                      verbose=opts.verbose,
-                                      error_codes=opts.error_codes,
-                                      debug_simulate_errors=simulate)
-    jex.QueueJob("group " + group, op)
+  for (status, job_id) in result[constants.JOB_IDS_KEY]:
+    jex.AddJobId(None, status, job_id)
 
   results = jex.GetResults()
-  success &= compat.all(r[1][0] for r in results)
 
-  if success:
-    return constants.EXIT_SUCCESS
+  (bad_jobs, bad_results) = \
+    map(len,
+        # Convert iterators to lists
+        map(list,
+            # Count errors
+            map(compat.partial(itertools.ifilterfalse, bool),
+                # Convert result to booleans in a tuple
+                zip(*((job_success, len(op_results) == 1 and op_results[0])
+                      for (job_success, op_results) in results)))))
+
+  if bad_jobs == 0 and bad_results == 0:
+    rcode = constants.EXIT_SUCCESS
   else:
-    return constants.EXIT_FAILURE
+    rcode = constants.EXIT_FAILURE
+    if bad_jobs > 0:
+      ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
+
+  return rcode
 
 
 def VerifyDisks(opts, args):
@@ -502,22 +655,30 @@ def VerifyDisks(opts, args):
   cl = GetClient()
 
   op = opcodes.OpClusterVerifyDisks()
-  result = SubmitOpCode(op, opts=opts, cl=cl)
-  if not isinstance(result, (list, tuple)) or len(result) != 3:
-    raise errors.ProgrammerError("Unknown result type for OpClusterVerifyDisks")
 
-  bad_nodes, instances, missing = result
+  result = SubmitOpCode(op, cl=cl, opts=opts)
+
+  # Keep track of submitted jobs
+  jex = JobExecutor(cl=cl, opts=opts)
+
+  for (status, job_id) in result[constants.JOB_IDS_KEY]:
+    jex.AddJobId(None, status, job_id)
 
   retcode = constants.EXIT_SUCCESS
 
-  if bad_nodes:
+  for (status, result) in jex.GetResults():
+    if not status:
+      ToStdout("Job failed: %s", result)
+      continue
+
+    ((bad_nodes, instances, missing), ) = result
+
     for node, text in bad_nodes.items():
       ToStdout("Error gathering data on node %s: %s",
                node, utils.SafeEncode(text[-400:]))
-      retcode |= 1
+      retcode = constants.EXIT_FAILURE
       ToStdout("You need to fix these nodes first before fixing instances")
 
-  if instances:
     for iname in instances:
       if iname in missing:
         continue
@@ -530,24 +691,26 @@ def VerifyDisks(opts, args):
         retcode |= nret
         ToStderr("Error activating disks for instance %s: %s", iname, msg)
 
-  if missing:
-    for iname, ival in missing.iteritems():
-      all_missing = compat.all(x[0] in bad_nodes for x in ival)
-      if all_missing:
-        ToStdout("Instance %s cannot be verified as it lives on"
-                 " broken nodes", iname)
-      else:
-        ToStdout("Instance %s has missing logical volumes:", iname)
-        ival.sort()
-        for node, vol in ival:
-          if node in bad_nodes:
-            ToStdout("\tbroken node %s /dev/%s", node, vol)
-          else:
-            ToStdout("\t%s /dev/%s", node, vol)
-
-    ToStdout("You need to run replace or recreate disks for all the above"
-             " instances, if this message persist after fixing nodes.")
-    retcode |= 1
+    if missing:
+      for iname, ival in missing.iteritems():
+        all_missing = compat.all(x[0] in bad_nodes for x in ival)
+        if all_missing:
+          ToStdout("Instance %s cannot be verified as it lives on"
+                   " broken nodes", iname)
+        else:
+          ToStdout("Instance %s has missing logical volumes:", iname)
+          ival.sort()
+          for node, vol in ival:
+            if node in bad_nodes:
+              ToStdout("\tbroken node %s /dev/%s", node, vol)
+            else:
+              ToStdout("\t%s /dev/%s", node, vol)
+
+      ToStdout("You need to replace or recreate disks for all the above"
+               " instances if this message persists after fixing broken nodes.")
+      retcode = constants.EXIT_FAILURE
+    elif not instances:
+      ToStdout("No disks need to be activated.")
 
   return retcode
 
@@ -581,7 +744,7 @@ def MasterFailover(opts, args):
   @return: the desired exit code
 
   """
-  if opts.no_voting:
+  if opts.no_voting and not opts.yes_do_it:
     usertext = ("This will perform the failover even if most other nodes"
                 " are down, or if this node is outdated. This is dangerous"
                 " as it can lead to a non-consistent cluster. Check the"
@@ -606,7 +769,7 @@ def MasterPing(opts, args):
     cl = GetClient()
     cl.QueryClusterInfo()
     return 0
-  except Exception: # pylint: disable-msg=W0703
+  except Exception: # pylint: disable=W0703
     return 1
 
 
@@ -630,9 +793,45 @@ def SearchTags(opts, args):
     ToStdout("%s %s", path, tag)
 
 
-def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
-                 new_confd_hmac_key, new_cds, cds_filename,
-                 force):
+def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
+  """Reads and verifies an X509 certificate.
+
+  @type cert_filename: string
+  @param cert_filename: the path of the file containing the certificate to
+                        verify encoded in PEM format
+  @type verify_private_key: bool
+  @param verify_private_key: whether to verify the private key in addition to
+                             the public certificate
+  @rtype: string
+  @return: a string containing the PEM-encoded certificate.
+
+  """
+  try:
+    pem = utils.ReadFile(cert_filename)
+  except IOError, err:
+    raise errors.X509CertError(cert_filename,
+                               "Unable to read certificate: %s" % str(err))
+
+  try:
+    OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
+  except Exception, err:
+    raise errors.X509CertError(cert_filename,
+                               "Unable to load certificate: %s" % str(err))
+
+  if verify_private_key:
+    try:
+      OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
+    except Exception, err:
+      raise errors.X509CertError(cert_filename,
+                                 "Unable to load private key: %s" % str(err))
+
+  return pem
+
+
+def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
+                 rapi_cert_filename, new_spice_cert, spice_cert_filename,
+                 spice_cacert_filename, new_confd_hmac_key, new_cds,
+                 cds_filename, force):
   """Renews cluster certificates, keys and secrets.
 
   @type new_cluster_cert: bool
@@ -641,6 +840,13 @@ def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
   @param new_rapi_cert: Whether to generate a new RAPI certificate
   @type rapi_cert_filename: string
   @param rapi_cert_filename: Path to file containing new RAPI certificate
+  @type new_spice_cert: bool
+  @param new_spice_cert: Whether to generate a new SPICE certificate
+  @type spice_cert_filename: string
+  @param spice_cert_filename: Path to file containing new SPICE certificate
+  @type spice_cacert_filename: string
+  @param spice_cacert_filename: Path to file containing the certificate of the
+                                CA that signed the SPICE certificate
   @type new_confd_hmac_key: bool
   @param new_confd_hmac_key: Whether to generate a new HMAC key
   @type new_cds: bool
@@ -652,7 +858,7 @@ def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
 
   """
   if new_rapi_cert and rapi_cert_filename:
-    ToStderr("Only one of the --new-rapi-certficate and --rapi-certificate"
+    ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
              " options can be specified at the same time.")
     return 1
 
@@ -662,32 +868,31 @@ def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
              " the same time.")
     return 1
 
-  if rapi_cert_filename:
-    # Read and verify new certificate
-    try:
-      rapi_cert_pem = utils.ReadFile(rapi_cert_filename)
-
-      OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
-                                      rapi_cert_pem)
-    except Exception, err: # pylint: disable-msg=W0703
-      ToStderr("Can't load new RAPI certificate from %s: %s" %
-               (rapi_cert_filename, str(err)))
-      return 1
+  if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
+    ToStderr("When using --new-spice-certificate, the --spice-certificate"
+             " and --spice-ca-certificate must not be used.")
+    return 1
 
-    try:
-      OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, rapi_cert_pem)
-    except Exception, err: # pylint: disable-msg=W0703
-      ToStderr("Can't load new RAPI private key from %s: %s" %
-               (rapi_cert_filename, str(err)))
-      return 1
+  if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
+    ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
+             " specified.")
+    return 1
 
-  else:
-    rapi_cert_pem = None
+  rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
+  try:
+    if rapi_cert_filename:
+      rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
+    if spice_cert_filename:
+      spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
+      spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
+  except errors.X509CertError, err:
+    ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
+    return 1
 
   if cds_filename:
     try:
       cds = utils.ReadFile(cds_filename)
-    except Exception, err: # pylint: disable-msg=W0703
+    except Exception, err: # pylint: disable=W0703
       ToStderr("Can't load new cluster domain secret from %s: %s" %
                (cds_filename, str(err)))
       return 1
@@ -702,25 +907,33 @@ def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
 
   def _RenewCryptoInner(ctx):
     ctx.feedback_fn("Updating certificates and keys")
-    bootstrap.GenerateClusterCrypto(new_cluster_cert, new_rapi_cert,
+    bootstrap.GenerateClusterCrypto(new_cluster_cert,
+                                    new_rapi_cert,
+                                    new_spice_cert,
                                     new_confd_hmac_key,
                                     new_cds,
                                     rapi_cert_pem=rapi_cert_pem,
+                                    spice_cert_pem=spice_cert_pem,
+                                    spice_cacert_pem=spice_cacert_pem,
                                     cds=cds)
 
     files_to_copy = []
 
     if new_cluster_cert:
-      files_to_copy.append(constants.NODED_CERT_FILE)
+      files_to_copy.append(pathutils.NODED_CERT_FILE)
 
     if new_rapi_cert or rapi_cert_pem:
-      files_to_copy.append(constants.RAPI_CERT_FILE)
+      files_to_copy.append(pathutils.RAPI_CERT_FILE)
+
+    if new_spice_cert or spice_cert_pem:
+      files_to_copy.append(pathutils.SPICE_CERT_FILE)
+      files_to_copy.append(pathutils.SPICE_CACERT_FILE)
 
     if new_confd_hmac_key:
-      files_to_copy.append(constants.CONFD_HMAC_KEY)
+      files_to_copy.append(pathutils.CONFD_HMAC_KEY)
 
     if new_cds or cds:
-      files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE)
+      files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
 
     if files_to_copy:
       for node_name in ctx.nonmaster_nodes:
@@ -744,6 +957,9 @@ def RenewCrypto(opts, args):
   return _RenewCrypto(opts.new_cluster_cert,
                       opts.new_rapi_cert,
                       opts.rapi_cert,
+                      opts.new_spice_cert,
+                      opts.spice_cert,
+                      opts.spice_cacert,
                       opts.new_confd_hmac_key,
                       opts.new_cluster_domain_secret,
                       opts.cluster_domain_secret,
@@ -760,10 +976,10 @@ def SetClusterParams(opts, args):
   @return: the desired exit code
 
   """
-  if not (not opts.lvm_storage or opts.vg_name or
-          not opts.drbd_storage or opts.drbd_helper or
+  if not (opts.vg_name is not None or opts.drbd_helper or
           opts.enabled_hypervisors or opts.hvparams or
-          opts.beparams or opts.nicparams or opts.ndparams or
+          opts.beparams or opts.nicparams or
+          opts.ndparams or opts.diskparams or
           opts.candidate_pool_size is not None or
           opts.uid_pool is not None or
           opts.maintain_node_health is not None or
@@ -772,17 +988,38 @@ def SetClusterParams(opts, args):
           opts.default_iallocator is not None or
           opts.reserved_lvs is not None or
           opts.master_netdev is not None or
-          opts.prealloc_wipe_disks is not None):
+          opts.master_netmask is not None or
+          opts.use_external_mip_script is not None or
+          opts.prealloc_wipe_disks is not None or
+          opts.hv_state or
+          opts.enabled_disk_templates or
+          opts.disk_state or
+          opts.ipolicy_bounds_specs is not None or
+          opts.ipolicy_std_specs is not None or
+          opts.ipolicy_disk_templates is not None or
+          opts.ipolicy_vcpu_ratio is not None or
+          opts.ipolicy_spindle_ratio is not None or
+          opts.modify_etc_hosts is not None or
+          opts.file_storage_dir is not None):
     ToStderr("Please give at least one of the parameters.")
     return 1
 
-  vg_name = opts.vg_name
-  if not opts.lvm_storage and opts.vg_name:
-    ToStderr("Options --no-lvm-storage and --vg-name conflict.")
+  if _CheckNoLvmStorageOptDeprecated(opts):
     return 1
 
-  if not opts.lvm_storage:
-    vg_name = ""
+  enabled_disk_templates = None
+  if opts.enabled_disk_templates:
+    enabled_disk_templates = opts.enabled_disk_templates.split(",")
+
+  # consistency between vg name and enabled disk templates
+  vg_name = None
+  if opts.vg_name is not None:
+    vg_name = opts.vg_name
+  if enabled_disk_templates:
+    if vg_name and not utils.IsLvmEnabled(enabled_disk_templates):
+      ToStdout("You specified a volume group with --vg-name, but you did not"
+               " enable any of the following lvm-based disk templates: %s" %
+               utils.CommaJoin(utils.GetLvmDiskTemplates()))
 
   drbd_helper = opts.drbd_helper
   if not opts.drbd_storage and opts.drbd_helper:
@@ -801,8 +1038,13 @@ def SetClusterParams(opts, args):
   for hv_params in hvparams.values():
     utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
 
+  diskparams = dict(opts.diskparams)
+
+  for dt_params in diskparams.values():
+    utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
   beparams = opts.beparams
-  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+  utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
 
   nicparams = opts.nicparams
   utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
@@ -811,6 +1053,14 @@ def SetClusterParams(opts, args):
   if ndparams is not None:
     utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
 
+  ipolicy = CreateIPolicyFromOpts(
+    minmax_ispecs=opts.ipolicy_bounds_specs,
+    std_ispecs=opts.ipolicy_std_specs,
+    ipolicy_disk_templates=opts.ipolicy_disk_templates,
+    ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
+    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
+    )
+
   mnh = opts.maintain_node_health
 
   uid_pool = opts.uid_pool
@@ -831,24 +1081,52 @@ def SetClusterParams(opts, args):
     else:
       opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
 
-  op = opcodes.OpClusterSetParams(vg_name=vg_name,
-                                  drbd_helper=drbd_helper,
-                                  enabled_hypervisors=hvlist,
-                                  hvparams=hvparams,
-                                  os_hvp=None,
-                                  beparams=beparams,
-                                  nicparams=nicparams,
-                                  ndparams=ndparams,
-                                  candidate_pool_size=opts.candidate_pool_size,
-                                  maintain_node_health=mnh,
-                                  uid_pool=uid_pool,
-                                  add_uids=add_uids,
-                                  remove_uids=remove_uids,
-                                  default_iallocator=opts.default_iallocator,
-                                  prealloc_wipe_disks=opts.prealloc_wipe_disks,
-                                  master_netdev=opts.master_netdev,
-                                  reserved_lvs=opts.reserved_lvs)
-  SubmitOpCode(op, opts=opts)
+  if opts.master_netmask is not None:
+    try:
+      opts.master_netmask = int(opts.master_netmask)
+    except ValueError:
+      ToStderr("The --master-netmask option expects an int parameter.")
+      return 1
+
+  ext_ip_script = opts.use_external_mip_script
+
+  if opts.disk_state:
+    disk_state = utils.FlatToDict(opts.disk_state)
+  else:
+    disk_state = {}
+
+  hv_state = dict(opts.hv_state)
+
+  op = opcodes.OpClusterSetParams(
+    vg_name=vg_name,
+    drbd_helper=drbd_helper,
+    enabled_hypervisors=hvlist,
+    hvparams=hvparams,
+    os_hvp=None,
+    beparams=beparams,
+    nicparams=nicparams,
+    ndparams=ndparams,
+    diskparams=diskparams,
+    ipolicy=ipolicy,
+    candidate_pool_size=opts.candidate_pool_size,
+    maintain_node_health=mnh,
+    modify_etc_hosts=opts.modify_etc_hosts,
+    uid_pool=uid_pool,
+    add_uids=add_uids,
+    remove_uids=remove_uids,
+    default_iallocator=opts.default_iallocator,
+    prealloc_wipe_disks=opts.prealloc_wipe_disks,
+    master_netdev=opts.master_netdev,
+    master_netmask=opts.master_netmask,
+    reserved_lvs=opts.reserved_lvs,
+    use_external_mip_script=ext_ip_script,
+    hv_state=hv_state,
+    disk_state=disk_state,
+    enabled_disk_templates=enabled_disk_templates,
+    force=opts.force,
+    file_storage_dir=opts.file_storage_dir,
+    )
+  SubmitOrSend(op, opts)
   return 0
 
 
@@ -960,12 +1238,13 @@ def _OobPower(opts, node_list, power):
   return True
 
 
-def _InstanceStart(opts, inst_list, start):
+def _InstanceStart(opts, inst_list, start, no_remember=False):
   """Puts the instances in the list to desired state.
 
   @param opts: The command line options selected by the user
   @param inst_list: The list of instances to operate on
   @param start: True if they should be started, False for shutdown
+  @param no_remember: If the instance state should be remembered
   @return: The success of the operation (none failed)
 
   """
@@ -974,7 +1253,8 @@ def _InstanceStart(opts, inst_list, start):
     text_submit, text_success, text_failed = ("startup", "started", "starting")
   else:
     opcls = compat.partial(opcodes.OpInstanceShutdown,
-                           timeout=opts.shutdown_timeout)
+                           timeout=opts.shutdown_timeout,
+                           no_remember=no_remember)
     text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
 
   jex = JobExecutor(opts=opts)
@@ -1154,7 +1434,7 @@ def _EpoOff(opts, node_list, inst_map):
   @return: The desired exit status
 
   """
-  if not _InstanceStart(opts, inst_map.keys(), False):
+  if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
     ToStderr("Please investigate and stop instances manually before continuing")
     return constants.EXIT_FAILURE
 
@@ -1167,7 +1447,9 @@ def _EpoOff(opts, node_list, inst_map):
     return constants.EXIT_FAILURE
 
 
-def Epo(opts, args):
+def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
+        _confirm_fn=ConfirmOperation,
+        _stdout_fn=ToStdout, _stderr_fn=ToStderr):
   """EPO operations.
 
   @param opts: the command line options selected by the user
@@ -1178,32 +1460,29 @@ def Epo(opts, args):
 
   """
   if opts.groups and opts.show_all:
-    ToStderr("Only one of --groups or --all are allowed")
+    _stderr_fn("Only one of --groups or --all are allowed")
     return constants.EXIT_FAILURE
   elif args and opts.show_all:
-    ToStderr("Arguments in combination with --all are not allowed")
+    _stderr_fn("Arguments in combination with --all are not allowed")
     return constants.EXIT_FAILURE
 
-  client = GetClient()
+  if cl is None:
+    cl = GetClient()
 
   if opts.groups:
-    node_query_list = itertools.chain(*client.QueryGroups(names=args,
-                                                          fields=["node_list"],
-                                                          use_locking=False))
+    node_query_list = \
+      itertools.chain(*cl.QueryGroups(args, ["node_list"], False))
   else:
     node_query_list = args
 
-  result = client.QueryNodes(names=node_query_list,
-                             fields=["name", "master", "pinst_list",
-                                     "sinst_list", "powered", "offline"],
-                             use_locking=False)
+  result = cl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
+                                           "sinst_list", "powered", "offline"],
+                         False)
+
+  all_nodes = map(compat.fst, result)
   node_list = []
   inst_map = {}
-  for (idx, (node, master, pinsts, sinsts, powered,
-             offline)) in enumerate(result):
-    # Normalize the node_query_list as well
-    if not opts.show_all:
-      node_query_list[idx] = node
+  for (node, master, pinsts, sinsts, powered, offline) in result:
     if not offline:
       for inst in (pinsts + sinsts):
         if inst in inst_map:
@@ -1219,120 +1498,148 @@ def Epo(opts, args):
       # already operating on the master at this point :)
       continue
     elif master and not opts.show_all:
-      ToStderr("%s is the master node, please do a master-failover to another"
-               " node not affected by the EPO or use --all if you intend to"
-               " shutdown the whole cluster", node)
+      _stderr_fn("%s is the master node, please do a master-failover to another"
+                 " node not affected by the EPO or use --all if you intend to"
+                 " shutdown the whole cluster", node)
       return constants.EXIT_FAILURE
     elif powered is None:
-      ToStdout("Node %s does not support out-of-band handling, it can not be"
-               " handled in a fully automated manner", node)
+      _stdout_fn("Node %s does not support out-of-band handling, it can not be"
+                 " handled in a fully automated manner", node)
     elif powered == opts.on:
-      ToStdout("Node %s is already in desired power state, skipping", node)
+      _stdout_fn("Node %s is already in desired power state, skipping", node)
     elif not offline or (offline and powered):
       node_list.append(node)
 
-  if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"):
+  if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
     return constants.EXIT_FAILURE
 
   if opts.on:
-    return _EpoOn(opts, node_query_list, node_list, inst_map)
+    return _on_fn(opts, all_nodes, node_list, inst_map)
   else:
-    return _EpoOff(opts, node_list, inst_map)
+    return _off_fn(opts, node_list, inst_map)
+
+
+def _GetCreateCommand(info):
+  buf = StringIO()
+  buf.write("gnt-cluster init")
+  PrintIPolicyCommand(buf, info["ipolicy"], False)
+  buf.write(" ")
+  buf.write(info["name"])
+  return buf.getvalue()
+
+
+def ShowCreateCommand(opts, args):
+  """Shows the command that can be used to re-create the cluster.
+
+  Currently it works only for ipolicy specs.
+
+  """
+  cl = GetClient(query=True)
+  result = cl.QueryClusterInfo()
+  ToStdout(_GetCreateCommand(result))
 
 
 commands = {
-  'init': (
+  "init": (
     InitCluster, [ArgHost(min=1, max=1)],
     [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
-     HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
-     NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
-     SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
-     UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
+     HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
+     NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
+     NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
+     MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
      DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
-     NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
+     NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
+     DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
+     IPOLICY_STD_SPECS_OPT] + INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
     "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
-  'destroy': (
+  "destroy": (
     DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
     "", "Destroy cluster"),
-  'rename': (
+  "rename": (
     RenameCluster, [ArgHost(min=1, max=1)],
     [FORCE_OPT, DRY_RUN_OPT],
     "<new_name>",
     "Renames the cluster"),
-  'redist-conf': (
-    RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+  "redist-conf": (
+    RedistributeConfig, ARGS_NONE, SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT],
     "", "Forces a push of the configuration file and ssconf files"
     " to the nodes in the cluster"),
-  'verify': (
+  "verify": (
     VerifyCluster, ARGS_NONE,
     [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
     "", "Does a check on the cluster configuration"),
-  'verify-disks': (
+  "verify-disks": (
     VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
     "", "Does a check on the cluster disk status"),
-  'repair-disk-sizes': (
+  "repair-disk-sizes": (
     RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
-    "", "Updates mismatches in recorded disk sizes"),
-  'master-failover': (
-    MasterFailover, ARGS_NONE, [NOVOTING_OPT],
+    "[instance...]", "Updates mismatches in recorded disk sizes"),
+  "master-failover": (
+    MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
     "", "Makes the current node the master"),
-  'master-ping': (
+  "master-ping": (
     MasterPing, ARGS_NONE, [],
     "", "Checks if the master is alive"),
-  'version': (
+  "version": (
     ShowClusterVersion, ARGS_NONE, [],
     "", "Shows the cluster version"),
-  'getmaster': (
+  "getmaster": (
     ShowClusterMaster, ARGS_NONE, [],
     "", "Shows the cluster master"),
-  'copyfile': (
+  "copyfile": (
     ClusterCopyFile, [ArgFile(min=1, max=1)],
-    [NODE_LIST_OPT, USE_REPL_NET_OPT],
+    [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
     "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
-  'command': (
+  "command": (
     RunClusterCommand, [ArgCommand(min=1)],
-    [NODE_LIST_OPT],
+    [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT, FAILURE_ONLY_OPT],
     "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
-  'info': (
+  "info": (
     ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
     "[--roman]", "Show cluster configuration"),
-  'list-tags': (
+  "list-tags": (
     ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
-  'add-tags': (
-    AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
+  "add-tags": (
+    AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
     "tag...", "Add tags to the cluster"),
-  'remove-tags': (
-    RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
+  "remove-tags": (
+    RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
     "tag...", "Remove tags from the cluster"),
-  'search-tags': (
+  "search-tags": (
     SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
     "Searches the tags on all objects on"
     " the cluster for a given pattern (regex)"),
-  'queue': (
+  "queue": (
     QueueOps,
     [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
     [], "drain|undrain|info", "Change queue properties"),
-  'watcher': (
+  "watcher": (
     WatcherOps,
     [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
      ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
     [],
     "{pause <timespec>|continue|info}", "Change watcher properties"),
-  'modify': (
+  "modify": (
     SetClusterParams, ARGS_NONE,
-    [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
-     NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
-     UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
-     NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT, RESERVED_LVS_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT],
+    [FORCE_OPT,
+     BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
+     MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
+     MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
+     DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
+     RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
+     NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
+     DISK_STATE_OPT] + SUBMIT_OPTS +
+     [ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT] +
+     INSTANCE_POLICY_OPTS + [GLOBAL_FILEDIR_OPT],
     "[opts...]",
     "Alters the parameters of the cluster"),
   "renew-crypto": (
     RenewCrypto, ARGS_NONE,
     [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
      NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
-     NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT],
+     NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
+     NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
     "[opts...]",
     "Renews cluster certificates, keys and secrets"),
   "epo": (
@@ -1341,12 +1648,21 @@ commands = {
      SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
     "[opts...] [args]",
     "Performs an emergency power-off on given args"),
+  "activate-master-ip": (
+    ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
+  "deactivate-master-ip": (
+    DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
+    "Deactivates the master IP"),
+  "show-ispecs-cmd": (
+    ShowCreateCommand, ARGS_NONE, [], "",
+    "Show the command line to re-create the cluster"),
   }
 
 
 #: dictionary with aliases for commands
 aliases = {
-  'masterfailover': 'master-failover',
+  "masterfailover": "master-failover",
+  "show": "info",
 }