Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ 131178b9

History | View | Annotate | Download (13.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32

    
33
from ganeti import opcodes
34
from ganeti import constants
35
from ganeti import errors
36
from ganeti import rpc
37
from ganeti import cmdlib
38
from ganeti import locking
39

    
40

    
41
class Processor(object):
42
  """Object which runs OpCodes"""
43
  DISPATCH_TABLE = {
44
    # Cluster
45
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
46
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
47
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
48
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
49
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
50
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
51
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
52
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
53
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
54
    # node lu
55
    opcodes.OpAddNode: cmdlib.LUAddNode,
56
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
57
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
58
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
59
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
60
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
61
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
62
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
63
    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
64
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
65
    # instance lu
66
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
67
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
68
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
69
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
70
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
71
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
72
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
73
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
74
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
75
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
76
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
77
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
78
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
79
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
80
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
81
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
82
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
83
    # os lu
84
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
85
    # exports lu
86
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
87
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
88
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
89
    # tags lu
90
    opcodes.OpGetTags: cmdlib.LUGetTags,
91
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
92
    opcodes.OpAddTags: cmdlib.LUAddTags,
93
    opcodes.OpDelTags: cmdlib.LUDelTags,
94
    # test lu
95
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
96
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
97
    }
98

    
99
  def __init__(self, context):
100
    """Constructor for Processor
101

102
    Args:
103
     - feedback_fn: the feedback function (taking one string) to be run when
104
                    interesting events are happening
105
    """
106
    self.context = context
107
    self._feedback_fn = None
108
    self.exclusive_BGL = False
109
    self.rpc = rpc.RpcRunner(context.cfg)
110

    
111
  def _ExecLU(self, lu):
112
    """Logical Unit execution sequence.
113

114
    """
115
    write_count = self.context.cfg.write_count
116
    lu.CheckPrereq()
117
    hm = HooksMaster(self.rpc.call_hooks_runner, self, lu)
118
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
119
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
120
                     self._feedback_fn, None)
121

    
122
    if getattr(lu.op, "dry_run", False):
123
      # in this mode, no post-hooks are run, and the config is not
124
      # written (as it might have been modified by another LU, and we
125
      # shouldn't do writeout on behalf of other threads
126
      self.LogInfo("dry-run mode requested, not actually executing"
127
                   " the operation")
128
      return lu.dry_run_result
129

    
130
    try:
131
      result = lu.Exec(self._feedback_fn)
132
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
133
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
134
                                self._feedback_fn, result)
135
    finally:
136
      # FIXME: This needs locks if not lu_class.REQ_BGL
137
      if write_count != self.context.cfg.write_count:
138
        hm.RunConfigUpdate()
139

    
140
    return result
141

    
142
  def _LockAndExecLU(self, lu, level):
143
    """Execute a Logical Unit, with the needed locks.
144

145
    This is a recursive function that starts locking the given level, and
146
    proceeds up, till there are no more locks to acquire. Then it executes the
147
    given LU and its opcodes.
148

149
    """
150
    adding_locks = level in lu.add_locks
151
    acquiring_locks = level in lu.needed_locks
152
    if level not in locking.LEVELS:
153
      if callable(self._run_notifier):
154
        self._run_notifier()
155
      result = self._ExecLU(lu)
156
    elif adding_locks and acquiring_locks:
157
      # We could both acquire and add locks at the same level, but for now we
158
      # don't need this, so we'll avoid the complicated code needed.
159
      raise NotImplementedError(
160
        "Can't declare locks to acquire when adding others")
161
    elif adding_locks or acquiring_locks:
162
      lu.DeclareLocks(level)
163
      share = lu.share_locks[level]
164
      if acquiring_locks:
165
        needed_locks = lu.needed_locks[level]
166
        lu.acquired_locks[level] = self.context.glm.acquire(level,
167
                                                            needed_locks,
168
                                                            shared=share)
169
      else: # adding_locks
170
        add_locks = lu.add_locks[level]
171
        lu.remove_locks[level] = add_locks
172
        try:
173
          self.context.glm.add(level, add_locks, acquired=1, shared=share)
174
        except errors.LockError:
175
          raise errors.OpPrereqError(
176
            "Couldn't add locks (%s), probably because of a race condition"
177
            " with another job, who added them first" % add_locks)
178
      try:
179
        try:
180
          if adding_locks:
181
            lu.acquired_locks[level] = add_locks
182
          result = self._LockAndExecLU(lu, level + 1)
183
        finally:
184
          if level in lu.remove_locks:
185
            self.context.glm.remove(level, lu.remove_locks[level])
186
      finally:
187
        if self.context.glm.is_owned(level):
188
          self.context.glm.release(level)
189
    else:
190
      result = self._LockAndExecLU(lu, level + 1)
191

    
192
    return result
193

    
194
  def ExecOpCode(self, op, feedback_fn, run_notifier):
195
    """Execute an opcode.
196

197
    @type op: an OpCode instance
198
    @param op: the opcode to be executed
199
    @type feedback_fn: a function that takes a single argument
200
    @param feedback_fn: this function will be used as feedback from the LU
201
                        code to the end-user
202
    @type run_notifier: callable (no arguments) or None
203
    @param run_notifier:  this function (if callable) will be called when
204
                          we are about to call the lu's Exec() method, that
205
                          is, after we have acquired all locks
206

207
    """
208
    if not isinstance(op, opcodes.OpCode):
209
      raise errors.ProgrammerError("Non-opcode instance passed"
210
                                   " to ExecOpcode")
211

    
212
    self._feedback_fn = feedback_fn
213
    self._run_notifier = run_notifier
214
    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
215
    if lu_class is None:
216
      raise errors.OpCodeUnknown("Unknown opcode")
217

    
218
    # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a
219
    # shared fashion otherwise (to prevent concurrent run with an exclusive LU.
220
    self.context.glm.acquire(locking.LEVEL_CLUSTER, [locking.BGL],
221
                             shared=not lu_class.REQ_BGL)
222
    try:
223
      self.exclusive_BGL = lu_class.REQ_BGL
224
      lu = lu_class(self, op, self.context, self.rpc)
225
      lu.ExpandNames()
226
      assert lu.needed_locks is not None, "needed_locks not set by LU"
227
      result = self._LockAndExecLU(lu, locking.LEVEL_INSTANCE)
228
    finally:
229
      self.context.glm.release(locking.LEVEL_CLUSTER)
230
      self.exclusive_BGL = False
231

    
232
    return result
233

    
234
  def LogStep(self, current, total, message):
235
    """Log a change in LU execution progress.
236

237
    """
238
    logging.debug("Step %d/%d %s", current, total, message)
239
    self._feedback_fn("STEP %d/%d %s" % (current, total, message))
240

    
241
  def LogWarning(self, message, *args, **kwargs):
242
    """Log a warning to the logs and the user.
243

244
    The optional keyword argument is 'hint' and can be used to show a
245
    hint to the user (presumably related to the warning). If the
246
    message is empty, it will not be printed at all, allowing one to
247
    show only a hint.
248

249
    """
250
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
251
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
252
    if args:
253
      message = message % tuple(args)
254
    if message:
255
      logging.warning(message)
256
      self._feedback_fn(" - WARNING: %s" % message)
257
    if "hint" in kwargs:
258
      self._feedback_fn("      Hint: %s" % kwargs["hint"])
259

    
260
  def LogInfo(self, message, *args):
261
    """Log an informational message to the logs and the user.
262

263
    """
264
    if args:
265
      message = message % tuple(args)
266
    logging.info(message)
267
    self._feedback_fn(" - INFO: %s" % message)
268

    
269

    
270
class HooksMaster(object):
271
  """Hooks master.
272

273
  This class distributes the run commands to the nodes based on the
274
  specific LU class.
275

276
  In order to remove the direct dependency on the rpc module, the
277
  constructor needs a function which actually does the remote
278
  call. This will usually be rpc.call_hooks_runner, but any function
279
  which behaves the same works.
280

281
  """
282
  def __init__(self, callfn, proc, lu):
283
    self.callfn = callfn
284
    self.proc = proc
285
    self.lu = lu
286
    self.op = lu.op
287
    self.env, node_list_pre, node_list_post = self._BuildEnv()
288
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
289
                      constants.HOOKS_PHASE_POST: node_list_post}
290

    
291
  def _BuildEnv(self):
292
    """Compute the environment and the target nodes.
293

294
    Based on the opcode and the current node list, this builds the
295
    environment for the hooks and the target node list for the run.
296

297
    """
298
    env = {
299
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
300
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
301
      "GANETI_OP_CODE": self.op.OP_ID,
302
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
303
      "GANETI_DATA_DIR": constants.DATA_DIR,
304
      }
305

    
306
    if self.lu.HPATH is not None:
307
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
308
      if lu_env:
309
        for key in lu_env:
310
          env["GANETI_" + key] = lu_env[key]
311
    else:
312
      lu_nodes_pre = lu_nodes_post = []
313

    
314
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
315

    
316
  def _RunWrapper(self, node_list, hpath, phase):
317
    """Simple wrapper over self.callfn.
318

319
    This method fixes the environment before doing the rpc call.
320

321
    """
322
    env = self.env.copy()
323
    env["GANETI_HOOKS_PHASE"] = phase
324
    env["GANETI_HOOKS_PATH"] = hpath
325
    if self.lu.cfg is not None:
326
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
327
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
328

    
329
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
330

    
331
    return self.callfn(node_list, hpath, phase, env)
332

    
333
  def RunPhase(self, phase):
334
    """Run all the scripts for a phase.
335

336
    This is the main function of the HookMaster.
337

338
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
339
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
340
    @return: the processed results of the hooks multi-node rpc call
341
    @raise errors.HooksFailure: on communication failure to the nodes
342

343
    """
344
    if not self.node_list[phase]:
345
      # empty node list, we should not attempt to run this as either
346
      # we're in the cluster init phase and the rpc client part can't
347
      # even attempt to run, or this LU doesn't do hooks at all
348
      return
349
    hpath = self.lu.HPATH
350
    results = self._RunWrapper(self.node_list[phase], hpath, phase)
351
    if phase == constants.HOOKS_PHASE_PRE:
352
      errs = []
353
      if not results:
354
        raise errors.HooksFailure("Communication failure")
355
      for node_name in results:
356
        res = results[node_name]
357
        if res.offline:
358
          continue
359
        msg = res.RemoteFailMsg()
360
        if msg:
361
          self.proc.LogWarning("Communication failure to node %s: %s",
362
                               node_name, msg)
363
          continue
364
        for script, hkr, output in res.payload:
365
          if hkr == constants.HKR_FAIL:
366
            errs.append((node_name, script, output))
367
      if errs:
368
        raise errors.HooksAbort(errs)
369
    return results
370

    
371
  def RunConfigUpdate(self):
372
    """Run the special configuration update hook
373

374
    This is a special hook that runs only on the master after each
375
    top-level LI if the configuration has been updated.
376

377
    """
378
    phase = constants.HOOKS_PHASE_POST
379
    hpath = constants.HOOKS_NAME_CFGUPDATE
380
    nodes = [self.lu.cfg.GetMasterNode()]
381
    self._RunWrapper(nodes, hpath, phase)