Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ efb8da02

History | View | Annotate | Download (13.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32

    
33
from ganeti import opcodes
34
from ganeti import constants
35
from ganeti import errors
36
from ganeti import rpc
37
from ganeti import cmdlib
38
from ganeti import locking
39

    
40

    
41
class Processor(object):
42
  """Object which runs OpCodes"""
43
  DISPATCH_TABLE = {
44
    # Cluster
45
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
46
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
47
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
48
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
49
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
50
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
51
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
52
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
53
    # node lu
54
    opcodes.OpAddNode: cmdlib.LUAddNode,
55
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
56
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
57
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
58
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
59
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
60
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
61
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
62
    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
63
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
64
    # instance lu
65
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
66
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
67
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
68
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
69
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
70
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
71
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
72
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
73
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
74
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
75
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
76
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
77
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
78
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
79
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
80
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
81
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
82
    # os lu
83
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
84
    # exports lu
85
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
86
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
87
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
88
    # tags lu
89
    opcodes.OpGetTags: cmdlib.LUGetTags,
90
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
91
    opcodes.OpAddTags: cmdlib.LUAddTags,
92
    opcodes.OpDelTags: cmdlib.LUDelTags,
93
    # test lu
94
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
95
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
96
    }
97

    
98
  def __init__(self, context):
99
    """Constructor for Processor
100

101
    Args:
102
     - feedback_fn: the feedback function (taking one string) to be run when
103
                    interesting events are happening
104
    """
105
    self.context = context
106
    self._feedback_fn = None
107
    self.exclusive_BGL = False
108
    self.rpc = rpc.RpcRunner(context.cfg)
109

    
110
  def _ExecLU(self, lu):
111
    """Logical Unit execution sequence.
112

113
    """
114
    write_count = self.context.cfg.write_count
115
    lu.CheckPrereq()
116
    hm = HooksMaster(self.rpc.call_hooks_runner, self, lu)
117
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
118
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
119
                     self._feedback_fn, None)
120

    
121
    if getattr(lu.op, "dry_run", False):
122
      # in this mode, no post-hooks are run, and the config is not
123
      # written (as it might have been modified by another LU, and we
124
      # shouldn't do writeout on behalf of other threads
125
      self.LogInfo("dry-run mode requested, not actually executing"
126
                   " the operation")
127
      return lu.dry_run_result
128

    
129
    try:
130
      result = lu.Exec(self._feedback_fn)
131
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
132
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
133
                                self._feedback_fn, result)
134
    finally:
135
      # FIXME: This needs locks if not lu_class.REQ_BGL
136
      if write_count != self.context.cfg.write_count:
137
        hm.RunConfigUpdate()
138

    
139
    return result
140

    
141
  def _LockAndExecLU(self, lu, level):
142
    """Execute a Logical Unit, with the needed locks.
143

144
    This is a recursive function that starts locking the given level, and
145
    proceeds up, till there are no more locks to acquire. Then it executes the
146
    given LU and its opcodes.
147

148
    """
149
    adding_locks = level in lu.add_locks
150
    acquiring_locks = level in lu.needed_locks
151
    if level not in locking.LEVELS:
152
      if callable(self._run_notifier):
153
        self._run_notifier()
154
      result = self._ExecLU(lu)
155
    elif adding_locks and acquiring_locks:
156
      # We could both acquire and add locks at the same level, but for now we
157
      # don't need this, so we'll avoid the complicated code needed.
158
      raise NotImplementedError(
159
        "Can't declare locks to acquire when adding others")
160
    elif adding_locks or acquiring_locks:
161
      lu.DeclareLocks(level)
162
      share = lu.share_locks[level]
163
      if acquiring_locks:
164
        needed_locks = lu.needed_locks[level]
165
        lu.acquired_locks[level] = self.context.glm.acquire(level,
166
                                                            needed_locks,
167
                                                            shared=share)
168
      else: # adding_locks
169
        add_locks = lu.add_locks[level]
170
        lu.remove_locks[level] = add_locks
171
        try:
172
          self.context.glm.add(level, add_locks, acquired=1, shared=share)
173
        except errors.LockError:
174
          raise errors.OpPrereqError(
175
            "Couldn't add locks (%s), probably because of a race condition"
176
            " with another job, who added them first" % add_locks)
177
      try:
178
        try:
179
          if adding_locks:
180
            lu.acquired_locks[level] = add_locks
181
          result = self._LockAndExecLU(lu, level + 1)
182
        finally:
183
          if level in lu.remove_locks:
184
            self.context.glm.remove(level, lu.remove_locks[level])
185
      finally:
186
        if self.context.glm.is_owned(level):
187
          self.context.glm.release(level)
188
    else:
189
      result = self._LockAndExecLU(lu, level + 1)
190

    
191
    return result
192

    
193
  def ExecOpCode(self, op, feedback_fn, run_notifier):
194
    """Execute an opcode.
195

196
    @type op: an OpCode instance
197
    @param op: the opcode to be executed
198
    @type feedback_fn: a function that takes a single argument
199
    @param feedback_fn: this function will be used as feedback from the LU
200
                        code to the end-user
201
    @type run_notifier: callable (no arguments) or None
202
    @param run_notifier:  this function (if callable) will be called when
203
                          we are about to call the lu's Exec() method, that
204
                          is, after we have acquired all locks
205

206
    """
207
    if not isinstance(op, opcodes.OpCode):
208
      raise errors.ProgrammerError("Non-opcode instance passed"
209
                                   " to ExecOpcode")
210

    
211
    self._feedback_fn = feedback_fn
212
    self._run_notifier = run_notifier
213
    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
214
    if lu_class is None:
215
      raise errors.OpCodeUnknown("Unknown opcode")
216

    
217
    # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a
218
    # shared fashion otherwise (to prevent concurrent run with an exclusive LU.
219
    self.context.glm.acquire(locking.LEVEL_CLUSTER, [locking.BGL],
220
                             shared=not lu_class.REQ_BGL)
221
    try:
222
      self.exclusive_BGL = lu_class.REQ_BGL
223
      lu = lu_class(self, op, self.context, self.rpc)
224
      lu.ExpandNames()
225
      assert lu.needed_locks is not None, "needed_locks not set by LU"
226
      result = self._LockAndExecLU(lu, locking.LEVEL_INSTANCE)
227
    finally:
228
      self.context.glm.release(locking.LEVEL_CLUSTER)
229
      self.exclusive_BGL = False
230

    
231
    return result
232

    
233
  def LogStep(self, current, total, message):
234
    """Log a change in LU execution progress.
235

236
    """
237
    logging.debug("Step %d/%d %s", current, total, message)
238
    self._feedback_fn("STEP %d/%d %s" % (current, total, message))
239

    
240
  def LogWarning(self, message, *args, **kwargs):
241
    """Log a warning to the logs and the user.
242

243
    The optional keyword argument is 'hint' and can be used to show a
244
    hint to the user (presumably related to the warning). If the
245
    message is empty, it will not be printed at all, allowing one to
246
    show only a hint.
247

248
    """
249
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
250
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
251
    if args:
252
      message = message % tuple(args)
253
    if message:
254
      logging.warning(message)
255
      self._feedback_fn(" - WARNING: %s" % message)
256
    if "hint" in kwargs:
257
      self._feedback_fn("      Hint: %s" % kwargs["hint"])
258

    
259
  def LogInfo(self, message, *args):
260
    """Log an informational message to the logs and the user.
261

262
    """
263
    if args:
264
      message = message % tuple(args)
265
    logging.info(message)
266
    self._feedback_fn(" - INFO: %s" % message)
267

    
268

    
269
class HooksMaster(object):
270
  """Hooks master.
271

272
  This class distributes the run commands to the nodes based on the
273
  specific LU class.
274

275
  In order to remove the direct dependency on the rpc module, the
276
  constructor needs a function which actually does the remote
277
  call. This will usually be rpc.call_hooks_runner, but any function
278
  which behaves the same works.
279

280
  """
281
  def __init__(self, callfn, proc, lu):
282
    self.callfn = callfn
283
    self.proc = proc
284
    self.lu = lu
285
    self.op = lu.op
286
    self.env, node_list_pre, node_list_post = self._BuildEnv()
287
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
288
                      constants.HOOKS_PHASE_POST: node_list_post}
289

    
290
  def _BuildEnv(self):
291
    """Compute the environment and the target nodes.
292

293
    Based on the opcode and the current node list, this builds the
294
    environment for the hooks and the target node list for the run.
295

296
    """
297
    env = {
298
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
299
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
300
      "GANETI_OP_CODE": self.op.OP_ID,
301
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
302
      "GANETI_DATA_DIR": constants.DATA_DIR,
303
      }
304

    
305
    if self.lu.HPATH is not None:
306
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
307
      if lu_env:
308
        for key in lu_env:
309
          env["GANETI_" + key] = lu_env[key]
310
    else:
311
      lu_nodes_pre = lu_nodes_post = []
312

    
313
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
314

    
315
  def _RunWrapper(self, node_list, hpath, phase):
316
    """Simple wrapper over self.callfn.
317

318
    This method fixes the environment before doing the rpc call.
319

320
    """
321
    env = self.env.copy()
322
    env["GANETI_HOOKS_PHASE"] = phase
323
    env["GANETI_HOOKS_PATH"] = hpath
324
    if self.lu.cfg is not None:
325
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
326
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
327

    
328
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
329

    
330
    return self.callfn(node_list, hpath, phase, env)
331

    
332
  def RunPhase(self, phase):
333
    """Run all the scripts for a phase.
334

335
    This is the main function of the HookMaster.
336

337
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
338
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
339
    @return: the processed results of the hooks multi-node rpc call
340
    @raise errors.HooksFailure: on communication failure to the nodes
341

342
    """
343
    if not self.node_list[phase]:
344
      # empty node list, we should not attempt to run this as either
345
      # we're in the cluster init phase and the rpc client part can't
346
      # even attempt to run, or this LU doesn't do hooks at all
347
      return
348
    hpath = self.lu.HPATH
349
    results = self._RunWrapper(self.node_list[phase], hpath, phase)
350
    if phase == constants.HOOKS_PHASE_PRE:
351
      errs = []
352
      if not results:
353
        raise errors.HooksFailure("Communication failure")
354
      for node_name in results:
355
        res = results[node_name]
356
        if res.offline:
357
          continue
358
        msg = res.RemoteFailMsg()
359
        if msg:
360
          self.proc.LogWarning("Communication failure to node %s: %s",
361
                               node_name, msg)
362
          continue
363
        for script, hkr, output in res.payload:
364
          if hkr == constants.HKR_FAIL:
365
            errs.append((node_name, script, output))
366
      if errs:
367
        raise errors.HooksAbort(errs)
368
    return results
369

    
370
  def RunConfigUpdate(self):
371
    """Run the special configuration update hook
372

373
    This is a special hook that runs only on the master after each
374
    top-level LI if the configuration has been updated.
375

376
    """
377
    phase = constants.HOOKS_PHASE_POST
378
    hpath = constants.HOOKS_NAME_CFGUPDATE
379
    nodes = [self.lu.cfg.GetMasterNode()]
380
    self._RunWrapper(nodes, hpath, phase)