cmdlib: Extract instance operation functionality
authorThomas Thrainer <thomasth@google.com>
Tue, 14 May 2013 11:52:28 +0000 (13:52 +0200)
committerThomas Thrainer <thomasth@google.com>
Fri, 17 May 2013 09:32:13 +0000 (11:32 +0200)
Split instance.py further by extracting instance operations
(start/stop/reboot/etc.) related logical units and functions to
instance_operation.py.

The extracted operations have in common that they affect the operating
system in a running instance directly.

Signed-off-by: Thomas Thrainer <thomasth@google.com>
Reviewed-by: Bernardo Dal Seno <bdalseno@google.com>

Makefile.am
lib/cmdlib/__init__.py
lib/cmdlib/instance.py
lib/cmdlib/instance_operation.py [new file with mode: 0644]
lib/cmdlib/instance_utils.py

index 9e0a6f4..4c8b3ac 100644 (file)
@@ -317,6 +317,7 @@ cmdlib_PYTHON = \
        lib/cmdlib/instance.py \
        lib/cmdlib/instance_storage.py \
        lib/cmdlib/instance_migration.py \
+       lib/cmdlib/instance_operation.py \
        lib/cmdlib/instance_utils.py \
        lib/cmdlib/backup.py \
        lib/cmdlib/query.py \
index 50353cc..d4c290e 100644 (file)
@@ -74,11 +74,6 @@ from ganeti.cmdlib.instance import \
   LUInstanceMove, \
   LUInstanceQuery, \
   LUInstanceQueryData, \
-  LUInstanceStartup, \
-  LUInstanceShutdown, \
-  LUInstanceReinstall, \
-  LUInstanceReboot, \
-  LUInstanceConsole, \
   LUInstanceMultiAlloc, \
   LUInstanceSetParams, \
   LUInstanceChangeGroup
@@ -91,6 +86,12 @@ from ganeti.cmdlib.instance_storage import \
 from ganeti.cmdlib.instance_migration import \
   LUInstanceFailover, \
   LUInstanceMigrate
+from ganeti.cmdlib.instance_operation import \
+  LUInstanceStartup, \
+  LUInstanceShutdown, \
+  LUInstanceReinstall, \
+  LUInstanceReboot, \
+  LUInstanceConsole
 from ganeti.cmdlib.backup import \
   LUBackupQuery, \
   LUBackupPrepare, \
index 6f51c50..3c0b136 100644 (file)
@@ -48,7 +48,7 @@ from ganeti import query
 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \
   ResultWithJobs
 
-from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
+from ganeti.cmdlib.common import INSTANCE_DOWN, \
   INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
   _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
   _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
@@ -62,12 +62,13 @@ from ganeti.cmdlib.instance_storage import _CreateDisks, \
   _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
   _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
   _AssembleInstanceDisks
+from ganeti.cmdlib.instance_operation import _GetInstanceConsole
 from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
   _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
   _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
   _ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
   _GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \
-  _CheckInstanceBridgesExist, _CheckNicsBridgesExist
+  _CheckInstanceBridgesExist, _CheckNicsBridgesExist, _CheckNodeHasOS
 
 import ganeti.masterd.instance
 
@@ -320,24 +321,6 @@ def _CheckOSVariant(os_obj, name):
     raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
 
 
-def _CheckNodeHasOS(lu, node, os_name, force_variant):
-  """Ensure that a node supports a given OS.
-
-  @param lu: the LU on behalf of which we make the check
-  @param node: the node to check
-  @param os_name: the OS to query about
-  @param force_variant: whether to ignore variant errors
-  @raise errors.OpPrereqError: if the node is not supporting the OS
-
-  """
-  result = lu.rpc.call_os_get(node, os_name)
-  result.Raise("OS '%s' not in supported OS list for node %s" %
-               (os_name, node),
-               prereq=True, ecode=errors.ECODE_INVAL)
-  if not force_variant:
-    _CheckOSVariant(result.payload, os_name)
-
-
 class LUInstanceCreate(LogicalUnit):
   """Create an instance.
 
@@ -1826,27 +1809,6 @@ class LUInstanceMove(LogicalUnit):
                                  (instance.name, target_node, msg))
 
 
-def _GetInstanceConsole(cluster, instance):
-  """Returns console information for an instance.
-
-  @type cluster: L{objects.Cluster}
-  @type instance: L{objects.Instance}
-  @rtype: dict
-
-  """
-  hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
-  # beparams and hvparams are passed separately, to avoid editing the
-  # instance and then saving the defaults in the instance itself.
-  hvparams = cluster.FillHV(instance)
-  beparams = cluster.FillBE(instance)
-  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
-
-  assert console.instance == instance.name
-  assert console.Validate()
-
-  return console.ToDict()
-
-
 class _InstanceQuery(_QueryBase):
   FIELDS = query.INSTANCE_FIELDS
 
@@ -2241,443 +2203,6 @@ class LUInstanceQueryData(NoHooksLU):
     return result
 
 
-class LUInstanceStartup(LogicalUnit):
-  """Starts an instance.
-
-  """
-  HPATH = "instance-start"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def CheckArguments(self):
-    # extra beparams
-    if self.op.beparams:
-      # fill the beparams dict
-      objects.UpgradeBeParams(self.op.beparams)
-      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
-
-  def DeclareLocks(self, level):
-    if level == locking.LEVEL_NODE_RES:
-      self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    env = {
-      "FORCE": self.op.force,
-      }
-
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-
-    # extra hvparams
-    if self.op.hvparams:
-      # check hypervisor parameter syntax (locally)
-      cluster = self.cfg.GetClusterInfo()
-      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
-      filled_hvp = cluster.FillHV(instance)
-      filled_hvp.update(self.op.hvparams)
-      hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
-      hv_type.CheckParameterSyntax(filled_hvp)
-      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
-
-    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
-
-    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
-
-    if self.primary_offline and self.op.ignore_offline_nodes:
-      self.LogWarning("Ignoring offline primary node")
-
-      if self.op.hvparams or self.op.beparams:
-        self.LogWarning("Overridden parameters are ignored")
-    else:
-      _CheckNodeOnline(self, instance.primary_node)
-
-      bep = self.cfg.GetClusterInfo().FillBE(instance)
-      bep.update(self.op.beparams)
-
-      # check bridges existence
-      _CheckInstanceBridgesExist(self, instance)
-
-      remote_info = self.rpc.call_instance_info(instance.primary_node,
-                                                instance.name,
-                                                instance.hypervisor)
-      remote_info.Raise("Error checking node %s" % instance.primary_node,
-                        prereq=True, ecode=errors.ECODE_ENVIRON)
-      if not remote_info.payload: # not running already
-        _CheckNodeFreeMemory(self, instance.primary_node,
-                             "starting instance %s" % instance.name,
-                             bep[constants.BE_MINMEM], instance.hypervisor)
-
-  def Exec(self, feedback_fn):
-    """Start the instance.
-
-    """
-    instance = self.instance
-    force = self.op.force
-    reason = self.op.reason
-
-    if not self.op.no_remember:
-      self.cfg.MarkInstanceUp(instance.name)
-
-    if self.primary_offline:
-      assert self.op.ignore_offline_nodes
-      self.LogInfo("Primary node offline, marked instance as started")
-    else:
-      node_current = instance.primary_node
-
-      _StartInstanceDisks(self, instance, force)
-
-      result = \
-        self.rpc.call_instance_start(node_current,
-                                     (instance, self.op.hvparams,
-                                      self.op.beparams),
-                                     self.op.startup_paused, reason)
-      msg = result.fail_msg
-      if msg:
-        _ShutdownInstanceDisks(self, instance)
-        raise errors.OpExecError("Could not start instance: %s" % msg)
-
-
-class LUInstanceShutdown(LogicalUnit):
-  """Shutdown an instance.
-
-  """
-  HPATH = "instance-stop"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    env = _BuildInstanceHookEnvByObject(self, self.instance)
-    env["TIMEOUT"] = self.op.timeout
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-
-    if not self.op.force:
-      _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
-    else:
-      self.LogWarning("Ignoring offline instance check")
-
-    self.primary_offline = \
-      self.cfg.GetNodeInfo(self.instance.primary_node).offline
-
-    if self.primary_offline and self.op.ignore_offline_nodes:
-      self.LogWarning("Ignoring offline primary node")
-    else:
-      _CheckNodeOnline(self, self.instance.primary_node)
-
-  def Exec(self, feedback_fn):
-    """Shutdown the instance.
-
-    """
-    instance = self.instance
-    node_current = instance.primary_node
-    timeout = self.op.timeout
-    reason = self.op.reason
-
-    # If the instance is offline we shouldn't mark it as down, as that
-    # resets the offline flag.
-    if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
-      self.cfg.MarkInstanceDown(instance.name)
-
-    if self.primary_offline:
-      assert self.op.ignore_offline_nodes
-      self.LogInfo("Primary node offline, marked instance as stopped")
-    else:
-      result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
-                                               reason)
-      msg = result.fail_msg
-      if msg:
-        self.LogWarning("Could not shutdown instance: %s", msg)
-
-      _ShutdownInstanceDisks(self, instance)
-
-
-class LUInstanceReinstall(LogicalUnit):
-  """Reinstall an instance.
-
-  """
-  HPATH = "instance-reinstall"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    return _BuildInstanceHookEnvByObject(self, self.instance)
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster and is not running.
-
-    """
-    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
-                     " offline, cannot reinstall")
-
-    if instance.disk_template == constants.DT_DISKLESS:
-      raise errors.OpPrereqError("Instance '%s' has no disks" %
-                                 self.op.instance_name,
-                                 errors.ECODE_INVAL)
-    _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
-
-    if self.op.os_type is not None:
-      # OS verification
-      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
-      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
-      instance_os = self.op.os_type
-    else:
-      instance_os = instance.os
-
-    nodelist = list(instance.all_nodes)
-
-    if self.op.osparams:
-      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
-      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
-      self.os_inst = i_osdict # the new dict (without defaults)
-    else:
-      self.os_inst = None
-
-    self.instance = instance
-
-  def Exec(self, feedback_fn):
-    """Reinstall the instance.
-
-    """
-    inst = self.instance
-
-    if self.op.os_type is not None:
-      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
-      inst.os = self.op.os_type
-      # Write to configuration
-      self.cfg.Update(inst, feedback_fn)
-
-    _StartInstanceDisks(self, inst, None)
-    try:
-      feedback_fn("Running the instance OS create scripts...")
-      # FIXME: pass debug option from opcode to backend
-      result = self.rpc.call_instance_os_add(inst.primary_node,
-                                             (inst, self.os_inst), True,
-                                             self.op.debug_level)
-      result.Raise("Could not install OS for instance %s on node %s" %
-                   (inst.name, inst.primary_node))
-    finally:
-      _ShutdownInstanceDisks(self, inst)
-
-
-class LUInstanceReboot(LogicalUnit):
-  """Reboot an instance.
-
-  """
-  HPATH = "instance-reboot"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    env = {
-      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
-      "REBOOT_TYPE": self.op.reboot_type,
-      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
-      }
-
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
-    _CheckNodeOnline(self, instance.primary_node)
-
-    # check bridges existence
-    _CheckInstanceBridgesExist(self, instance)
-
-  def Exec(self, feedback_fn):
-    """Reboot the instance.
-
-    """
-    instance = self.instance
-    ignore_secondaries = self.op.ignore_secondaries
-    reboot_type = self.op.reboot_type
-    reason = self.op.reason
-
-    remote_info = self.rpc.call_instance_info(instance.primary_node,
-                                              instance.name,
-                                              instance.hypervisor)
-    remote_info.Raise("Error checking node %s" % instance.primary_node)
-    instance_running = bool(remote_info.payload)
-
-    node_current = instance.primary_node
-
-    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
-                                            constants.INSTANCE_REBOOT_HARD]:
-      for disk in instance.disks:
-        self.cfg.SetDiskID(disk, node_current)
-      result = self.rpc.call_instance_reboot(node_current, instance,
-                                             reboot_type,
-                                             self.op.shutdown_timeout, reason)
-      result.Raise("Could not reboot instance")
-    else:
-      if instance_running:
-        result = self.rpc.call_instance_shutdown(node_current, instance,
-                                                 self.op.shutdown_timeout,
-                                                 reason)
-        result.Raise("Could not shutdown instance for full reboot")
-        _ShutdownInstanceDisks(self, instance)
-      else:
-        self.LogInfo("Instance %s was already stopped, starting now",
-                     instance.name)
-      _StartInstanceDisks(self, instance, ignore_secondaries)
-      result = self.rpc.call_instance_start(node_current,
-                                            (instance, None, None), False,
-                                            reason)
-      msg = result.fail_msg
-      if msg:
-        _ShutdownInstanceDisks(self, instance)
-        raise errors.OpExecError("Could not start instance for"
-                                 " full reboot: %s" % msg)
-
-    self.cfg.MarkInstanceUp(instance.name)
-
-
-class LUInstanceConsole(NoHooksLU):
-  """Connect to an instance's console.
-
-  This is somewhat special in that it returns the command line that
-  you need to run on the master node in order to connect to the
-  console.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.share_locks = _ShareAll()
-    self._ExpandAndLockInstance()
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, self.instance.primary_node)
-
-  def Exec(self, feedback_fn):
-    """Connect to the console of an instance
-
-    """
-    instance = self.instance
-    node = instance.primary_node
-
-    node_insts = self.rpc.call_instance_list([node],
-                                             [instance.hypervisor])[node]
-    node_insts.Raise("Can't get node information from %s" % node)
-
-    if instance.name not in node_insts.payload:
-      if instance.admin_state == constants.ADMINST_UP:
-        state = constants.INSTST_ERRORDOWN
-      elif instance.admin_state == constants.ADMINST_DOWN:
-        state = constants.INSTST_ADMINDOWN
-      else:
-        state = constants.INSTST_ADMINOFFLINE
-      raise errors.OpExecError("Instance %s is not running (state %s)" %
-                               (instance.name, state))
-
-    logging.debug("Connecting to console of %s on %s", instance.name, node)
-
-    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
-
-
 class LUInstanceMultiAlloc(NoHooksLU):
   """Allocates multiple instances at the same time.
 
diff --git a/lib/cmdlib/instance_operation.py b/lib/cmdlib/instance_operation.py
new file mode 100644 (file)
index 0000000..4129bb0
--- /dev/null
@@ -0,0 +1,502 @@
+#
+#
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Logical units dealing with instance operations (start/stop/...).
+
+Those operations have in common that they affect the operating system in a
+running instance directly.
+
+"""
+
+import logging
+
+from ganeti import constants
+from ganeti import errors
+from ganeti import hypervisor
+from ganeti import locking
+from ganeti import objects
+from ganeti import utils
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU
+from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
+  _CheckHVParams, _CheckInstanceState, _CheckNodeOnline, _ExpandNodeName, \
+  _GetUpdatedParams, _CheckOSParams, _ShareAll
+from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \
+  _ShutdownInstanceDisks
+from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
+  _CheckInstanceBridgesExist, _CheckNodeFreeMemory, _CheckNodeHasOS
+
+
+class LUInstanceStartup(LogicalUnit):
+  """Starts an instance.
+
+  """
+  HPATH = "instance-start"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    # extra beparams
+    if self.op.beparams:
+      # fill the beparams dict
+      objects.UpgradeBeParams(self.op.beparams)
+      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE_RES:
+      self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = {
+      "FORCE": self.op.force,
+      }
+
+    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    return (nl, nl)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster.
+
+    """
+    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+
+    # extra hvparams
+    if self.op.hvparams:
+      # check hypervisor parameter syntax (locally)
+      cluster = self.cfg.GetClusterInfo()
+      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
+      filled_hvp = cluster.FillHV(instance)
+      filled_hvp.update(self.op.hvparams)
+      hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
+      hv_type.CheckParameterSyntax(filled_hvp)
+      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+
+    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
+
+    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
+
+    if self.primary_offline and self.op.ignore_offline_nodes:
+      self.LogWarning("Ignoring offline primary node")
+
+      if self.op.hvparams or self.op.beparams:
+        self.LogWarning("Overridden parameters are ignored")
+    else:
+      _CheckNodeOnline(self, instance.primary_node)
+
+      bep = self.cfg.GetClusterInfo().FillBE(instance)
+      bep.update(self.op.beparams)
+
+      # check bridges existence
+      _CheckInstanceBridgesExist(self, instance)
+
+      remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                                instance.name,
+                                                instance.hypervisor)
+      remote_info.Raise("Error checking node %s" % instance.primary_node,
+                        prereq=True, ecode=errors.ECODE_ENVIRON)
+      if not remote_info.payload: # not running already
+        _CheckNodeFreeMemory(self, instance.primary_node,
+                             "starting instance %s" % instance.name,
+                             bep[constants.BE_MINMEM], instance.hypervisor)
+
+  def Exec(self, feedback_fn):
+    """Start the instance.
+
+    """
+    instance = self.instance
+    force = self.op.force
+    reason = self.op.reason
+
+    if not self.op.no_remember:
+      self.cfg.MarkInstanceUp(instance.name)
+
+    if self.primary_offline:
+      assert self.op.ignore_offline_nodes
+      self.LogInfo("Primary node offline, marked instance as started")
+    else:
+      node_current = instance.primary_node
+
+      _StartInstanceDisks(self, instance, force)
+
+      result = \
+        self.rpc.call_instance_start(node_current,
+                                     (instance, self.op.hvparams,
+                                      self.op.beparams),
+                                     self.op.startup_paused, reason)
+      msg = result.fail_msg
+      if msg:
+        _ShutdownInstanceDisks(self, instance)
+        raise errors.OpExecError("Could not start instance: %s" % msg)
+
+
+class LUInstanceShutdown(LogicalUnit):
+  """Shutdown an instance.
+
+  """
+  HPATH = "instance-stop"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env["TIMEOUT"] = self.op.timeout
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    return (nl, nl)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster.
+
+    """
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+
+    if not self.op.force:
+      _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+    else:
+      self.LogWarning("Ignoring offline instance check")
+
+    self.primary_offline = \
+      self.cfg.GetNodeInfo(self.instance.primary_node).offline
+
+    if self.primary_offline and self.op.ignore_offline_nodes:
+      self.LogWarning("Ignoring offline primary node")
+    else:
+      _CheckNodeOnline(self, self.instance.primary_node)
+
+  def Exec(self, feedback_fn):
+    """Shutdown the instance.
+
+    """
+    instance = self.instance
+    node_current = instance.primary_node
+    timeout = self.op.timeout
+    reason = self.op.reason
+
+    # If the instance is offline we shouldn't mark it as down, as that
+    # resets the offline flag.
+    if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
+      self.cfg.MarkInstanceDown(instance.name)
+
+    if self.primary_offline:
+      assert self.op.ignore_offline_nodes
+      self.LogInfo("Primary node offline, marked instance as stopped")
+    else:
+      result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
+                                               reason)
+      msg = result.fail_msg
+      if msg:
+        self.LogWarning("Could not shutdown instance: %s", msg)
+
+      _ShutdownInstanceDisks(self, instance)
+
+
+class LUInstanceReinstall(LogicalUnit):
+  """Reinstall an instance.
+
+  """
+  HPATH = "instance-reinstall"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    return _BuildInstanceHookEnvByObject(self, self.instance)
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    return (nl, nl)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster and is not running.
+
+    """
+    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
+                     " offline, cannot reinstall")
+
+    if instance.disk_template == constants.DT_DISKLESS:
+      raise errors.OpPrereqError("Instance '%s' has no disks" %
+                                 self.op.instance_name,
+                                 errors.ECODE_INVAL)
+    _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
+
+    if self.op.os_type is not None:
+      # OS verification
+      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
+      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
+      instance_os = self.op.os_type
+    else:
+      instance_os = instance.os
+
+    nodelist = list(instance.all_nodes)
+
+    if self.op.osparams:
+      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
+      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+      self.os_inst = i_osdict # the new dict (without defaults)
+    else:
+      self.os_inst = None
+
+    self.instance = instance
+
+  def Exec(self, feedback_fn):
+    """Reinstall the instance.
+
+    """
+    inst = self.instance
+
+    if self.op.os_type is not None:
+      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
+      inst.os = self.op.os_type
+      # Write to configuration
+      self.cfg.Update(inst, feedback_fn)
+
+    _StartInstanceDisks(self, inst, None)
+    try:
+      feedback_fn("Running the instance OS create scripts...")
+      # FIXME: pass debug option from opcode to backend
+      result = self.rpc.call_instance_os_add(inst.primary_node,
+                                             (inst, self.os_inst), True,
+                                             self.op.debug_level)
+      result.Raise("Could not install OS for instance %s on node %s" %
+                   (inst.name, inst.primary_node))
+    finally:
+      _ShutdownInstanceDisks(self, inst)
+
+
+class LUInstanceReboot(LogicalUnit):
+  """Reboot an instance.
+
+  """
+  HPATH = "instance-reboot"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = {
+      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
+      "REBOOT_TYPE": self.op.reboot_type,
+      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
+      }
+
+    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    return (nl, nl)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster.
+
+    """
+    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
+    _CheckNodeOnline(self, instance.primary_node)
+
+    # check bridges existence
+    _CheckInstanceBridgesExist(self, instance)
+
+  def Exec(self, feedback_fn):
+    """Reboot the instance.
+
+    """
+    instance = self.instance
+    ignore_secondaries = self.op.ignore_secondaries
+    reboot_type = self.op.reboot_type
+    reason = self.op.reason
+
+    remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                              instance.name,
+                                              instance.hypervisor)
+    remote_info.Raise("Error checking node %s" % instance.primary_node)
+    instance_running = bool(remote_info.payload)
+
+    node_current = instance.primary_node
+
+    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
+                                            constants.INSTANCE_REBOOT_HARD]:
+      for disk in instance.disks:
+        self.cfg.SetDiskID(disk, node_current)
+      result = self.rpc.call_instance_reboot(node_current, instance,
+                                             reboot_type,
+                                             self.op.shutdown_timeout, reason)
+      result.Raise("Could not reboot instance")
+    else:
+      if instance_running:
+        result = self.rpc.call_instance_shutdown(node_current, instance,
+                                                 self.op.shutdown_timeout,
+                                                 reason)
+        result.Raise("Could not shutdown instance for full reboot")
+        _ShutdownInstanceDisks(self, instance)
+      else:
+        self.LogInfo("Instance %s was already stopped, starting now",
+                     instance.name)
+      _StartInstanceDisks(self, instance, ignore_secondaries)
+      result = self.rpc.call_instance_start(node_current,
+                                            (instance, None, None), False,
+                                            reason)
+      msg = result.fail_msg
+      if msg:
+        _ShutdownInstanceDisks(self, instance)
+        raise errors.OpExecError("Could not start instance for"
+                                 " full reboot: %s" % msg)
+
+    self.cfg.MarkInstanceUp(instance.name)
+
+
+def _GetInstanceConsole(cluster, instance):
+  """Returns console information for an instance.
+
+  @type cluster: L{objects.Cluster}
+  @type instance: L{objects.Instance}
+  @rtype: dict
+
+  """
+  hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
+  # beparams and hvparams are passed separately, to avoid editing the
+  # instance and then saving the defaults in the instance itself.
+  hvparams = cluster.FillHV(instance)
+  beparams = cluster.FillBE(instance)
+  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
+
+  assert console.instance == instance.name
+  assert console.Validate()
+
+  return console.ToDict()
+
+
+class LUInstanceConsole(NoHooksLU):
+  """Connect to an instance's console.
+
+  This is somewhat special in that it returns the command line that
+  you need to run on the master node in order to connect to the
+  console.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.share_locks = _ShareAll()
+    self._ExpandAndLockInstance()
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster.
+
+    """
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+    _CheckNodeOnline(self, self.instance.primary_node)
+
+  def Exec(self, feedback_fn):
+    """Connect to the console of an instance
+
+    """
+    instance = self.instance
+    node = instance.primary_node
+
+    node_insts = self.rpc.call_instance_list([node],
+                                             [instance.hypervisor])[node]
+    node_insts.Raise("Can't get node information from %s" % node)
+
+    if instance.name not in node_insts.payload:
+      if instance.admin_state == constants.ADMINST_UP:
+        state = constants.INSTST_ERRORDOWN
+      elif instance.admin_state == constants.ADMINST_DOWN:
+        state = constants.INSTST_ADMINDOWN
+      else:
+        state = constants.INSTST_ADMINOFFLINE
+      raise errors.OpExecError("Instance %s is not running (state %s)" %
+                               (instance.name, state))
+
+    logging.debug("Connecting to console of %s on %s", instance.name, node)
+
+    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
index 0d849b1..fd8cd26 100644 (file)
@@ -513,3 +513,45 @@ def _CheckNicsBridgesExist(lu, target_nics, target_node):
     result = lu.rpc.call_bridges_exist(target_node, brlist)
     result.Raise("Error checking bridges on destination node '%s'" %
                  target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
+
+
+def _CheckNodeHasOS(lu, node, os_name, force_variant):
+  """Ensure that a node supports a given OS.
+
+  @param lu: the LU on behalf of which we make the check
+  @param node: the node to check
+  @param os_name: the OS to query about
+  @param force_variant: whether to ignore variant errors
+  @raise errors.OpPrereqError: if the node is not supporting the OS
+
+  """
+  result = lu.rpc.call_os_get(node, os_name)
+  result.Raise("OS '%s' not in supported OS list for node %s" %
+               (os_name, node),
+               prereq=True, ecode=errors.ECODE_INVAL)
+  if not force_variant:
+    _CheckOSVariant(result.payload, os_name)
+
+
+def _CheckOSVariant(os_obj, name):
+  """Check whether an OS name conforms to the os variants specification.
+
+  @type os_obj: L{objects.OS}
+  @param os_obj: OS object to check
+  @type name: string
+  @param name: OS name passed by the user, to check for validity
+
+  """
+  variant = objects.OS.GetVariant(name)
+  if not os_obj.supported_variants:
+    if variant:
+      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
+                                 " passed)" % (os_obj.name, variant),
+                                 errors.ECODE_INVAL)
+    return
+  if not variant:
+    raise errors.OpPrereqError("OS name must include a variant",
+                               errors.ECODE_INVAL)
+
+  if variant not in os_obj.supported_variants:
+    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)