Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ bd315bfa

History | View | Annotate | Download (13.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32

    
33
from ganeti import opcodes
34
from ganeti import constants
35
from ganeti import errors
36
from ganeti import rpc
37
from ganeti import cmdlib
38
from ganeti import locking
39

    
40

    
41
class Processor(object):
42
  """Object which runs OpCodes"""
43
  DISPATCH_TABLE = {
44
    # Cluster
45
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
46
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
47
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
48
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
49
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
50
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
51
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
52
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
53
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
54
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
55
    # node lu
56
    opcodes.OpAddNode: cmdlib.LUAddNode,
57
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
58
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
59
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
60
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
61
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
62
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
63
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
64
    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
65
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
66
    # instance lu
67
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
68
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
69
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
70
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
71
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
72
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
73
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
74
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
75
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
76
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
77
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
78
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
79
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
80
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
81
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
82
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
83
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
84
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
85
    # os lu
86
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
87
    # exports lu
88
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
89
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
90
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
91
    # tags lu
92
    opcodes.OpGetTags: cmdlib.LUGetTags,
93
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
94
    opcodes.OpAddTags: cmdlib.LUAddTags,
95
    opcodes.OpDelTags: cmdlib.LUDelTags,
96
    # test lu
97
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
98
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
99
    }
100

    
101
  def __init__(self, context):
102
    """Constructor for Processor
103

104
    Args:
105
     - feedback_fn: the feedback function (taking one string) to be run when
106
                    interesting events are happening
107
    """
108
    self.context = context
109
    self._feedback_fn = None
110
    self.exclusive_BGL = False
111
    self.rpc = rpc.RpcRunner(context.cfg)
112

    
113
  def _ExecLU(self, lu):
114
    """Logical Unit execution sequence.
115

116
    """
117
    write_count = self.context.cfg.write_count
118
    lu.CheckPrereq()
119
    hm = HooksMaster(self.rpc.call_hooks_runner, self, lu)
120
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
121
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
122
                     self._feedback_fn, None)
123

    
124
    if getattr(lu.op, "dry_run", False):
125
      # in this mode, no post-hooks are run, and the config is not
126
      # written (as it might have been modified by another LU, and we
127
      # shouldn't do writeout on behalf of other threads
128
      self.LogInfo("dry-run mode requested, not actually executing"
129
                   " the operation")
130
      return lu.dry_run_result
131

    
132
    try:
133
      result = lu.Exec(self._feedback_fn)
134
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
135
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
136
                                self._feedback_fn, result)
137
    finally:
138
      # FIXME: This needs locks if not lu_class.REQ_BGL
139
      if write_count != self.context.cfg.write_count:
140
        hm.RunConfigUpdate()
141

    
142
    return result
143

    
144
  def _LockAndExecLU(self, lu, level):
145
    """Execute a Logical Unit, with the needed locks.
146

147
    This is a recursive function that starts locking the given level, and
148
    proceeds up, till there are no more locks to acquire. Then it executes the
149
    given LU and its opcodes.
150

151
    """
152
    adding_locks = level in lu.add_locks
153
    acquiring_locks = level in lu.needed_locks
154
    if level not in locking.LEVELS:
155
      if callable(self._run_notifier):
156
        self._run_notifier()
157
      result = self._ExecLU(lu)
158
    elif adding_locks and acquiring_locks:
159
      # We could both acquire and add locks at the same level, but for now we
160
      # don't need this, so we'll avoid the complicated code needed.
161
      raise NotImplementedError(
162
        "Can't declare locks to acquire when adding others")
163
    elif adding_locks or acquiring_locks:
164
      lu.DeclareLocks(level)
165
      share = lu.share_locks[level]
166
      if acquiring_locks:
167
        needed_locks = lu.needed_locks[level]
168
        lu.acquired_locks[level] = self.context.glm.acquire(level,
169
                                                            needed_locks,
170
                                                            shared=share)
171
      else: # adding_locks
172
        add_locks = lu.add_locks[level]
173
        lu.remove_locks[level] = add_locks
174
        try:
175
          self.context.glm.add(level, add_locks, acquired=1, shared=share)
176
        except errors.LockError:
177
          raise errors.OpPrereqError(
178
            "Couldn't add locks (%s), probably because of a race condition"
179
            " with another job, who added them first" % add_locks)
180
      try:
181
        try:
182
          if adding_locks:
183
            lu.acquired_locks[level] = add_locks
184
          result = self._LockAndExecLU(lu, level + 1)
185
        finally:
186
          if level in lu.remove_locks:
187
            self.context.glm.remove(level, lu.remove_locks[level])
188
      finally:
189
        if self.context.glm.is_owned(level):
190
          self.context.glm.release(level)
191
    else:
192
      result = self._LockAndExecLU(lu, level + 1)
193

    
194
    return result
195

    
196
  def ExecOpCode(self, op, feedback_fn, run_notifier):
197
    """Execute an opcode.
198

199
    @type op: an OpCode instance
200
    @param op: the opcode to be executed
201
    @type feedback_fn: a function that takes a single argument
202
    @param feedback_fn: this function will be used as feedback from the LU
203
                        code to the end-user
204
    @type run_notifier: callable (no arguments) or None
205
    @param run_notifier:  this function (if callable) will be called when
206
                          we are about to call the lu's Exec() method, that
207
                          is, after we have acquired all locks
208

209
    """
210
    if not isinstance(op, opcodes.OpCode):
211
      raise errors.ProgrammerError("Non-opcode instance passed"
212
                                   " to ExecOpcode")
213

    
214
    self._feedback_fn = feedback_fn
215
    self._run_notifier = run_notifier
216
    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
217
    if lu_class is None:
218
      raise errors.OpCodeUnknown("Unknown opcode")
219

    
220
    # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a
221
    # shared fashion otherwise (to prevent concurrent run with an exclusive LU.
222
    self.context.glm.acquire(locking.LEVEL_CLUSTER, [locking.BGL],
223
                             shared=not lu_class.REQ_BGL)
224
    try:
225
      self.exclusive_BGL = lu_class.REQ_BGL
226
      lu = lu_class(self, op, self.context, self.rpc)
227
      lu.ExpandNames()
228
      assert lu.needed_locks is not None, "needed_locks not set by LU"
229
      result = self._LockAndExecLU(lu, locking.LEVEL_INSTANCE)
230
    finally:
231
      self.context.glm.release(locking.LEVEL_CLUSTER)
232
      self.exclusive_BGL = False
233

    
234
    return result
235

    
236
  def LogStep(self, current, total, message):
237
    """Log a change in LU execution progress.
238

239
    """
240
    logging.debug("Step %d/%d %s", current, total, message)
241
    self._feedback_fn("STEP %d/%d %s" % (current, total, message))
242

    
243
  def LogWarning(self, message, *args, **kwargs):
244
    """Log a warning to the logs and the user.
245

246
    The optional keyword argument is 'hint' and can be used to show a
247
    hint to the user (presumably related to the warning). If the
248
    message is empty, it will not be printed at all, allowing one to
249
    show only a hint.
250

251
    """
252
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
253
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
254
    if args:
255
      message = message % tuple(args)
256
    if message:
257
      logging.warning(message)
258
      self._feedback_fn(" - WARNING: %s" % message)
259
    if "hint" in kwargs:
260
      self._feedback_fn("      Hint: %s" % kwargs["hint"])
261

    
262
  def LogInfo(self, message, *args):
263
    """Log an informational message to the logs and the user.
264

265
    """
266
    if args:
267
      message = message % tuple(args)
268
    logging.info(message)
269
    self._feedback_fn(" - INFO: %s" % message)
270

    
271

    
272
class HooksMaster(object):
273
  """Hooks master.
274

275
  This class distributes the run commands to the nodes based on the
276
  specific LU class.
277

278
  In order to remove the direct dependency on the rpc module, the
279
  constructor needs a function which actually does the remote
280
  call. This will usually be rpc.call_hooks_runner, but any function
281
  which behaves the same works.
282

283
  """
284
  def __init__(self, callfn, proc, lu):
285
    self.callfn = callfn
286
    self.proc = proc
287
    self.lu = lu
288
    self.op = lu.op
289
    self.env, node_list_pre, node_list_post = self._BuildEnv()
290
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
291
                      constants.HOOKS_PHASE_POST: node_list_post}
292

    
293
  def _BuildEnv(self):
294
    """Compute the environment and the target nodes.
295

296
    Based on the opcode and the current node list, this builds the
297
    environment for the hooks and the target node list for the run.
298

299
    """
300
    env = {
301
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
302
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
303
      "GANETI_OP_CODE": self.op.OP_ID,
304
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
305
      "GANETI_DATA_DIR": constants.DATA_DIR,
306
      }
307

    
308
    if self.lu.HPATH is not None:
309
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
310
      if lu_env:
311
        for key in lu_env:
312
          env["GANETI_" + key] = lu_env[key]
313
    else:
314
      lu_nodes_pre = lu_nodes_post = []
315

    
316
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
317

    
318
  def _RunWrapper(self, node_list, hpath, phase):
319
    """Simple wrapper over self.callfn.
320

321
    This method fixes the environment before doing the rpc call.
322

323
    """
324
    env = self.env.copy()
325
    env["GANETI_HOOKS_PHASE"] = phase
326
    env["GANETI_HOOKS_PATH"] = hpath
327
    if self.lu.cfg is not None:
328
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
329
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
330

    
331
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
332

    
333
    return self.callfn(node_list, hpath, phase, env)
334

    
335
  def RunPhase(self, phase):
336
    """Run all the scripts for a phase.
337

338
    This is the main function of the HookMaster.
339

340
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
341
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
342
    @return: the processed results of the hooks multi-node rpc call
343
    @raise errors.HooksFailure: on communication failure to the nodes
344

345
    """
346
    if not self.node_list[phase]:
347
      # empty node list, we should not attempt to run this as either
348
      # we're in the cluster init phase and the rpc client part can't
349
      # even attempt to run, or this LU doesn't do hooks at all
350
      return
351
    hpath = self.lu.HPATH
352
    results = self._RunWrapper(self.node_list[phase], hpath, phase)
353
    if phase == constants.HOOKS_PHASE_PRE:
354
      errs = []
355
      if not results:
356
        raise errors.HooksFailure("Communication failure")
357
      for node_name in results:
358
        res = results[node_name]
359
        if res.offline:
360
          continue
361
        msg = res.RemoteFailMsg()
362
        if msg:
363
          self.proc.LogWarning("Communication failure to node %s: %s",
364
                               node_name, msg)
365
          continue
366
        for script, hkr, output in res.payload:
367
          if hkr == constants.HKR_FAIL:
368
            errs.append((node_name, script, output))
369
      if errs:
370
        raise errors.HooksAbort(errs)
371
    return results
372

    
373
  def RunConfigUpdate(self):
374
    """Run the special configuration update hook
375

376
    This is a special hook that runs only on the master after each
377
    top-level LI if the configuration has been updated.
378

379
    """
380
    phase = constants.HOOKS_PHASE_POST
381
    hpath = constants.HOOKS_NAME_CFGUPDATE
382
    nodes = [self.lu.cfg.GetMasterNode()]
383
    self._RunWrapper(nodes, hpath, phase)