Simplify handling of regular fields in LUQuery*
[ganeti-local] / lib / mcpu.py
index 7c87c59..f9d5ab7 100644 (file)
@@ -28,35 +28,66 @@ are two kinds of classes defined:
 
 """
 
 
 """
 
+import logging
 
 from ganeti import opcodes
 from ganeti import constants
 from ganeti import errors
 from ganeti import rpc
 from ganeti import cmdlib
 
 from ganeti import opcodes
 from ganeti import constants
 from ganeti import errors
 from ganeti import rpc
 from ganeti import cmdlib
-from ganeti import config
-from ganeti import ssconf
-from ganeti import logger
 from ganeti import locking
 from ganeti import locking
+from ganeti import utils
+
+
+class OpExecCbBase:
+  """Base class for OpCode execution callbacks.
+
+  """
+  def NotifyStart(self):
+    """Called when we are about to execute the LU.
+
+    This function is called when we're about to start the lu's Exec() method,
+    that is, after we have acquired all locks.
+
+    """
+
+  def Feedback(self, *args):
+    """Sends feedback from the LU code to the end-user.
+
+    """
+
+  def ReportLocks(self, msg):
+    """Report lock operations.
+
+    """
 
 
 class Processor(object):
   """Object which runs OpCodes"""
   DISPATCH_TABLE = {
     # Cluster
 
 
 class Processor(object):
   """Object which runs OpCodes"""
   DISPATCH_TABLE = {
     # Cluster
+    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
     opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
     opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
     opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
     opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
     opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
     opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
-    opcodes.OpMasterFailover: cmdlib.LUMasterFailover,
-    opcodes.OpDumpClusterConfig: cmdlib.LUDumpClusterConfig,
+    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
     opcodes.OpRenameCluster: cmdlib.LURenameCluster,
     opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
     opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
     opcodes.OpRenameCluster: cmdlib.LURenameCluster,
     opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
     opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
+    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
+    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
     # node lu
     opcodes.OpAddNode: cmdlib.LUAddNode,
     opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
     opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
     # node lu
     opcodes.OpAddNode: cmdlib.LUAddNode,
     opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
     opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
+    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
+    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
+    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
     opcodes.OpRemoveNode: cmdlib.LURemoveNode,
     opcodes.OpRemoveNode: cmdlib.LURemoveNode,
+    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
+    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
+    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
+    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
     # instance lu
     opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
     opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
     # instance lu
     opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
     opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
@@ -68,7 +99,10 @@ class Processor(object):
     opcodes.OpRebootInstance: cmdlib.LURebootInstance,
     opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
     opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
     opcodes.OpRebootInstance: cmdlib.LURebootInstance,
     opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
     opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
+    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
     opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
     opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
+    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
+    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
     opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
     opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
     opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
     opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
     opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
     opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
@@ -90,129 +124,231 @@ class Processor(object):
     opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
     }
 
     opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
     }
 
-  def __init__(self, context, feedback=None):
+  def __init__(self, context):
     """Constructor for Processor
 
     """Constructor for Processor
 
-    Args:
-     - feedback_fn: the feedback function (taking one string) to be run when
-                    interesting events are happening
     """
     self.context = context
     """
     self.context = context
-    self._feedback_fn = feedback
+    self._cbs = None
     self.exclusive_BGL = False
     self.exclusive_BGL = False
+    self.rpc = rpc.RpcRunner(context.cfg)
+    self.hmclass = HooksMaster
 
 
-  def ExecOpCode(self, op):
-    """Execute an opcode.
+  def _ReportLocks(self, level, names, shared, acquired):
+    """Reports lock operations.
 
 
-    Args:
-      op: the opcode to be executed
+    @type level: int
+    @param level: Lock level
+    @type names: list or string
+    @param names: Lock names
+    @type shared: bool
+    @param shared: Whether the lock should be acquired in shared mode
+    @type acquired: bool
+    @param acquired: Whether the lock has already been acquired
 
     """
 
     """
-    if not isinstance(op, opcodes.OpCode):
-      raise errors.ProgrammerError("Non-opcode instance passed"
-                                   " to ExecOpcode")
+    parts = []
 
 
-    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
-    if lu_class is None:
-      raise errors.OpCodeUnknown("Unknown opcode")
+    # Build message
+    if acquired:
+      parts.append("acquired")
+    else:
+      parts.append("waiting")
+
+    parts.append(locking.LEVEL_NAMES[level])
+
+    if names == locking.ALL_SET:
+      parts.append("ALL")
+    elif isinstance(names, basestring):
+      parts.append(names)
+    else:
+      parts.append(",".join(names))
 
 
-    if lu_class.REQ_WSSTORE:
-      sstore = ssconf.WritableSimpleStore()
+    if shared:
+      parts.append("shared")
     else:
     else:
-      sstore = ssconf.SimpleStore()
+      parts.append("exclusive")
 
 
+    msg = "/".join(parts)
+
+    logging.debug("LU locks %s", msg)
+
+    if self._cbs:
+      self._cbs.ReportLocks(msg)
+
+  def _ExecLU(self, lu):
+    """Logical Unit execution sequence.
+
+    """
     write_count = self.context.cfg.write_count
     write_count = self.context.cfg.write_count
+    lu.CheckPrereq()
+    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
+    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
+    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
+                     self._Feedback, None)
+
+    if getattr(lu.op, "dry_run", False):
+      # in this mode, no post-hooks are run, and the config is not
+      # written (as it might have been modified by another LU, and we
+      # shouldn't do writeout on behalf of other threads
+      self.LogInfo("dry-run mode requested, not actually executing"
+                   " the operation")
+      return lu.dry_run_result
 
 
-    # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a
-    # shared fashion otherwise (to prevent concurrent run with an exclusive LU.
-    self.context.GLM.acquire(locking.LEVEL_CLUSTER, [locking.BGL],
-                             shared=not lu_class.REQ_BGL)
     try:
     try:
-      self.exclusive_BGL = lu_class.REQ_BGL
-      lu = lu_class(self, op, self.context.cfg, sstore)
-      lu.CheckPrereq()
-      hm = HooksMaster(rpc.call_hooks_runner, self, lu)
-      h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
-      lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
-                       self._feedback_fn, None)
-      try:
-        result = lu.Exec(self._feedback_fn)
-        h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
-        result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
-                                  self._feedback_fn, result)
-      finally:
-        # FIXME: This needs locks if not lu_class.REQ_BGL
-        if write_count != self.context.cfg.write_count:
-          hm.RunConfigUpdate()
+      result = lu.Exec(self._Feedback)
+      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
+      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
+                                self._Feedback, result)
     finally:
     finally:
-      self.context.GLM.release(locking.LEVEL_CLUSTER)
-      self.exclusive_BGL = False
+      # FIXME: This needs locks if not lu_class.REQ_BGL
+      if write_count != self.context.cfg.write_count:
+        hm.RunConfigUpdate()
 
     return result
 
 
     return result
 
-  def ChainOpCode(self, op):
-    """Chain and execute an opcode.
+  def _LockAndExecLU(self, lu, level):
+    """Execute a Logical Unit, with the needed locks.
 
 
-    This is used by LUs when they need to execute a child LU.
+    This is a recursive function that starts locking the given level, and
+    proceeds up, till there are no more locks to acquire. Then it executes the
+    given LU and its opcodes.
 
 
-    Args:
-     - opcode: the opcode to be executed
+    """
+    adding_locks = level in lu.add_locks
+    acquiring_locks = level in lu.needed_locks
+    if level not in locking.LEVELS:
+      if self._cbs:
+        self._cbs.NotifyStart()
+
+      result = self._ExecLU(lu)
+    elif adding_locks and acquiring_locks:
+      # We could both acquire and add locks at the same level, but for now we
+      # don't need this, so we'll avoid the complicated code needed.
+      raise NotImplementedError(
+        "Can't declare locks to acquire when adding others")
+    elif adding_locks or acquiring_locks:
+      lu.DeclareLocks(level)
+      share = lu.share_locks[level]
+      if acquiring_locks:
+        needed_locks = lu.needed_locks[level]
+
+        self._ReportLocks(level, needed_locks, share, False)
+        lu.acquired_locks[level] = self.context.glm.acquire(level,
+                                                            needed_locks,
+                                                            shared=share)
+        self._ReportLocks(level, needed_locks, share, True)
+
+      else: # adding_locks
+        add_locks = lu.add_locks[level]
+        lu.remove_locks[level] = add_locks
+        try:
+          self.context.glm.add(level, add_locks, acquired=1, shared=share)
+        except errors.LockError:
+          raise errors.OpPrereqError(
+            "Couldn't add locks (%s), probably because of a race condition"
+            " with another job, who added them first" % add_locks)
+      try:
+        try:
+          if adding_locks:
+            lu.acquired_locks[level] = add_locks
+          result = self._LockAndExecLU(lu, level + 1)
+        finally:
+          if level in lu.remove_locks:
+            self.context.glm.remove(level, lu.remove_locks[level])
+      finally:
+        if self.context.glm.is_owned(level):
+          self.context.glm.release(level)
+    else:
+      result = self._LockAndExecLU(lu, level + 1)
+
+    return result
+
+  def ExecOpCode(self, op, cbs):
+    """Execute an opcode.
+
+    @type op: an OpCode instance
+    @param op: the opcode to be executed
+    @type cbs: L{OpExecCbBase}
+    @param cbs: Runtime callbacks
 
     """
     if not isinstance(op, opcodes.OpCode):
       raise errors.ProgrammerError("Non-opcode instance passed"
                                    " to ExecOpcode")
 
 
     """
     if not isinstance(op, opcodes.OpCode):
       raise errors.ProgrammerError("Non-opcode instance passed"
                                    " to ExecOpcode")
 
-    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
-    if lu_class is None:
-      raise errors.OpCodeUnknown("Unknown opcode")
+    self._cbs = cbs
+    try:
+      lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
+      if lu_class is None:
+        raise errors.OpCodeUnknown("Unknown opcode")
+
+      # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a
+      # shared fashion otherwise (to prevent concurrent run with an exclusive
+      # LU.
+      self._ReportLocks(locking.LEVEL_CLUSTER, [locking.BGL],
+                        not lu_class.REQ_BGL, False)
+      try:
+        self.context.glm.acquire(locking.LEVEL_CLUSTER, [locking.BGL],
+                                 shared=not lu_class.REQ_BGL)
+      finally:
+        self._ReportLocks(locking.LEVEL_CLUSTER, [locking.BGL],
+                          not lu_class.REQ_BGL, True)
+      try:
+        self.exclusive_BGL = lu_class.REQ_BGL
+        lu = lu_class(self, op, self.context, self.rpc)
+        lu.ExpandNames()
+        assert lu.needed_locks is not None, "needed_locks not set by LU"
+        result = self._LockAndExecLU(lu, locking.LEVEL_INSTANCE)
+      finally:
+        self.context.glm.release(locking.LEVEL_CLUSTER)
+        self.exclusive_BGL = False
+    finally:
+      self._cbs = None
 
 
-    if lu_class.REQ_BGL and not self.exclusive_BGL:
-      raise errors.ProgrammerError("LUs which require the BGL cannot"
-                                   " be chained to granular ones.")
+    return result
 
 
-    if lu_class.REQ_WSSTORE:
-      sstore = ssconf.WritableSimpleStore()
-    else:
-      sstore = ssconf.SimpleStore()
+  def _Feedback(self, *args):
+    """Forward call to feedback callback function.
 
 
-    #do_hooks = lu_class.HPATH is not None
-    lu = lu_class(self, op, self.context.cfg, sstore)
-    lu.CheckPrereq()
-    #if do_hooks:
-    #  hm = HooksMaster(rpc.call_hooks_runner, self, lu)
-    #  h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
-    #  lu.HooksCallBack(constants.HOOKS_PHASE_PRE,
-    #                   h_results, self._feedback_fn, None)
-    result = lu.Exec(self._feedback_fn)
-    #if do_hooks:
-    #  h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
-    #  result = lu.HooksCallBack(constants.HOOKS_PHASE_POST,
-    #                   h_results, self._feedback_fn, result)
-    return result
+    """
+    if self._cbs:
+      self._cbs.Feedback(*args)
 
   def LogStep(self, current, total, message):
     """Log a change in LU execution progress.
 
     """
 
   def LogStep(self, current, total, message):
     """Log a change in LU execution progress.
 
     """
-    logger.Debug("Step %d/%d %s" % (current, total, message))
-    self._feedback_fn("STEP %d/%d %s" % (current, total, message))
+    logging.debug("Step %d/%d %s", current, total, message)
+    self._Feedback("STEP %d/%d %s" % (current, total, message))
 
 
-  def LogWarning(self, message, hint=None):
+  def LogWarning(self, message, *args, **kwargs):
     """Log a warning to the logs and the user.
 
     """Log a warning to the logs and the user.
 
-    """
-    logger.Error(message)
-    self._feedback_fn(" - WARNING: %s" % message)
-    if hint:
-      self._feedback_fn("      Hint: %s" % hint)
+    The optional keyword argument is 'hint' and can be used to show a
+    hint to the user (presumably related to the warning). If the
+    message is empty, it will not be printed at all, allowing one to
+    show only a hint.
 
 
-  def LogInfo(self, message):
+    """
+    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
+           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
+    if args:
+      message = message % tuple(args)
+    if message:
+      logging.warning(message)
+      self._Feedback(" - WARNING: %s" % message)
+    if "hint" in kwargs:
+      self._Feedback("      Hint: %s" % kwargs["hint"])
+
+  def LogInfo(self, message, *args):
     """Log an informational message to the logs and the user.
 
     """
     """Log an informational message to the logs and the user.
 
     """
-    logger.Info(message)
-    self._feedback_fn(" - INFO: %s" % message)
+    if args:
+      message = message % tuple(args)
+    logging.info(message)
+    self._Feedback(" - INFO: %s" % message)
 
 
 class HooksMaster(object):
 
 
 class HooksMaster(object):
@@ -227,9 +363,8 @@ class HooksMaster(object):
   which behaves the same works.
 
   """
   which behaves the same works.
 
   """
-  def __init__(self, callfn, proc, lu):
+  def __init__(self, callfn, lu):
     self.callfn = callfn
     self.callfn = callfn
-    self.proc = proc
     self.lu = lu
     self.op = lu.op
     self.env, node_list_pre, node_list_post = self._BuildEnv()
     self.lu = lu
     self.op = lu.op
     self.env, node_list_pre, node_list_post = self._BuildEnv()
@@ -270,48 +405,65 @@ class HooksMaster(object):
     env = self.env.copy()
     env["GANETI_HOOKS_PHASE"] = phase
     env["GANETI_HOOKS_PATH"] = hpath
     env = self.env.copy()
     env["GANETI_HOOKS_PHASE"] = phase
     env["GANETI_HOOKS_PATH"] = hpath
-    if self.lu.sstore is not None:
-      env["GANETI_CLUSTER"] = self.lu.sstore.GetClusterName()
-      env["GANETI_MASTER"] = self.lu.sstore.GetMasterNode()
+    if self.lu.cfg is not None:
+      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
+      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
 
     env = dict([(str(key), str(val)) for key, val in env.iteritems()])
 
     return self.callfn(node_list, hpath, phase, env)
 
 
     env = dict([(str(key), str(val)) for key, val in env.iteritems()])
 
     return self.callfn(node_list, hpath, phase, env)
 
-  def RunPhase(self, phase):
+  def RunPhase(self, phase, nodes=None):
     """Run all the scripts for a phase.
 
     This is the main function of the HookMaster.
 
     """Run all the scripts for a phase.
 
     This is the main function of the HookMaster.
 
-    Args:
-      phase: the hooks phase to run
-
-    Returns:
-      the result of the hooks multi-node rpc call
+    @param phase: one of L{constants.HOOKS_PHASE_POST} or
+        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
+    @param nodes: overrides the predefined list of nodes for the given phase
+    @return: the processed results of the hooks multi-node rpc call
+    @raise errors.HooksFailure: on communication failure to the nodes
+    @raise errors.HooksAbort: on failure of one of the hooks
 
     """
 
     """
-    if not self.node_list[phase]:
+    if not self.node_list[phase] and not nodes:
       # empty node list, we should not attempt to run this as either
       # we're in the cluster init phase and the rpc client part can't
       # even attempt to run, or this LU doesn't do hooks at all
       return
     hpath = self.lu.HPATH
       # empty node list, we should not attempt to run this as either
       # we're in the cluster init phase and the rpc client part can't
       # even attempt to run, or this LU doesn't do hooks at all
       return
     hpath = self.lu.HPATH
-    results = self._RunWrapper(self.node_list[phase], hpath, phase)
-    if phase == constants.HOOKS_PHASE_PRE:
-      errs = []
-      if not results:
-        raise errors.HooksFailure("Communication failure")
-      for node_name in results:
-        res = results[node_name]
-        if res is False or not isinstance(res, list):
-          self.proc.LogWarning("Communication failure to node %s" % node_name)
-          continue
-        for script, hkr, output in res:
-          if hkr == constants.HKR_FAIL:
-            output = output.strip().encode("string_escape")
+    if nodes is not None:
+      results = self._RunWrapper(nodes, hpath, phase)
+    else:
+      results = self._RunWrapper(self.node_list[phase], hpath, phase)
+    errs = []
+    if not results:
+      msg = "Communication Failure"
+      if phase == constants.HOOKS_PHASE_PRE:
+        raise errors.HooksFailure(msg)
+      else:
+        self.lu.LogWarning(msg)
+        return results
+    for node_name in results:
+      res = results[node_name]
+      if res.offline:
+        continue
+      msg = res.fail_msg
+      if msg:
+        self.lu.LogWarning("Communication failure to node %s: %s",
+                           node_name, msg)
+        continue
+      for script, hkr, output in res.payload:
+        if hkr == constants.HKR_FAIL:
+          if phase == constants.HOOKS_PHASE_PRE:
             errs.append((node_name, script, output))
             errs.append((node_name, script, output))
-      if errs:
-        raise errors.HooksAbort(errs)
+          else:
+            if not output:
+              output = "(no output)"
+            self.lu.LogWarning("On %s script %s failed, output: %s" %
+                               (node_name, script, output))
+    if errs and phase == constants.HOOKS_PHASE_PRE:
+      raise errors.HooksAbort(errs)
     return results
 
   def RunConfigUpdate(self):
     return results
 
   def RunConfigUpdate(self):
@@ -323,7 +475,5 @@ class HooksMaster(object):
     """
     phase = constants.HOOKS_PHASE_POST
     hpath = constants.HOOKS_NAME_CFGUPDATE
     """
     phase = constants.HOOKS_PHASE_POST
     hpath = constants.HOOKS_NAME_CFGUPDATE
-    if self.lu.sstore is None:
-      raise errors.ProgrammerError("Null sstore on config update hook")
-    nodes = [self.lu.sstore.GetMasterNode()]
-    results = self._RunWrapper(nodes, hpath, phase)
+    nodes = [self.lu.cfg.GetMasterNode()]
+    self._RunWrapper(nodes, hpath, phase)