Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ 26d3fd2f

History | View | Annotate | Download (18.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32
import random
33
import time
34

    
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import rpc
39
from ganeti import cmdlib
40
from ganeti import locking
41

    
42

    
43
class LockAcquireTimeout(Exception):
44
  """Exception to report timeouts on acquiring locks.
45

46
  """
47

    
48

    
49
def _CalculateLockAttemptTimeouts():
50
  """Calculate timeouts for lock attempts.
51

52
  """
53
  result = [1.0]
54

    
55
  # Wait for a total of at least 150s before doing a blocking acquire
56
  while sum(result) < 150.0:
57
    timeout = (result[-1] * 1.05) ** 1.25
58

    
59
    # Cap timeout at 10 seconds. This gives other jobs a chance to run
60
    # even if we're still trying to get our locks, before finally moving
61
    # to a blocking acquire.
62
    if timeout > 10.0:
63
      timeout = 10.0
64

    
65
    elif timeout < 0.1:
66
      # Lower boundary for safety
67
      timeout = 0.1
68

    
69
    result.append(timeout)
70

    
71
  return result
72

    
73

    
74
class LockAttemptTimeoutStrategy(object):
75
  """Class with lock acquire timeout strategy.
76

77
  """
78
  __slots__ = [
79
    "_timeouts",
80
    "_random_fn",
81
    "_time_fn",
82
    ]
83

    
84
  _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
85

    
86
  def __init__(self, _time_fn=time.time, _random_fn=random.random):
87
    """Initializes this class.
88

89
    @param _time_fn: Time function for unittests
90
    @param _random_fn: Random number generator for unittests
91

92
    """
93
    object.__init__(self)
94

    
95
    self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT)
96
    self._time_fn = _time_fn
97
    self._random_fn = _random_fn
98

    
99
  def NextAttempt(self):
100
    """Returns the timeout for the next attempt.
101

102
    """
103
    try:
104
      timeout = self._timeouts.next()
105
    except StopIteration:
106
      # No more timeouts, do blocking acquire
107
      timeout = None
108

    
109
    if timeout is not None:
110
      # Add a small variation (-/+ 5%) to timeout. This helps in situations
111
      # where two or more jobs are fighting for the same lock(s).
112
      variation_range = timeout * 0.1
113
      timeout += ((self._random_fn() * variation_range) -
114
                  (variation_range * 0.5))
115

    
116
    return timeout
117

    
118

    
119
class OpExecCbBase: # pylint: disable-msg=W0232
120
  """Base class for OpCode execution callbacks.
121

122
  """
123
  def NotifyStart(self):
124
    """Called when we are about to execute the LU.
125

126
    This function is called when we're about to start the lu's Exec() method,
127
    that is, after we have acquired all locks.
128

129
    """
130

    
131
  def Feedback(self, *args):
132
    """Sends feedback from the LU code to the end-user.
133

134
    """
135

    
136
  def CheckCancel(self):
137
    """Check whether job has been cancelled.
138

139
    """
140

    
141

    
142
class Processor(object):
143
  """Object which runs OpCodes"""
144
  DISPATCH_TABLE = {
145
    # Cluster
146
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
147
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
148
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
149
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
150
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
151
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
152
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
153
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
154
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
155
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
156
    # node lu
157
    opcodes.OpAddNode: cmdlib.LUAddNode,
158
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
159
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
160
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
161
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
162
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
163
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
164
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
165
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
166
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
167
    opcodes.OpNodeEvacuationStrategy: cmdlib.LUNodeEvacuationStrategy,
168
    # instance lu
169
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
170
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
171
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
172
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
173
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
174
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
175
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
176
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
177
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
178
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
179
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
180
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
181
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
182
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
183
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
184
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
185
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
186
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
187
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
188
    # os lu
189
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
190
    # exports lu
191
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
192
    opcodes.OpPrepareExport: cmdlib.LUPrepareExport,
193
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
194
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
195
    # tags lu
196
    opcodes.OpGetTags: cmdlib.LUGetTags,
197
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
198
    opcodes.OpAddTags: cmdlib.LUAddTags,
199
    opcodes.OpDelTags: cmdlib.LUDelTags,
200
    # test lu
201
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
202
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
203
    opcodes.OpTestJobqueue: cmdlib.LUTestJobqueue,
204
    }
205

    
206
  def __init__(self, context, ec_id):
207
    """Constructor for Processor
208

209
    @type context: GanetiContext
210
    @param context: global Ganeti context
211
    @type ec_id: string
212
    @param ec_id: execution context identifier
213

214
    """
215
    self.context = context
216
    self._ec_id = ec_id
217
    self._cbs = None
218
    self.rpc = rpc.RpcRunner(context.cfg)
219
    self.hmclass = HooksMaster
220

    
221
  def _AcquireLocks(self, level, names, shared, timeout):
222
    """Acquires locks via the Ganeti lock manager.
223

224
    @type level: int
225
    @param level: Lock level
226
    @type names: list or string
227
    @param names: Lock names
228
    @type shared: bool
229
    @param shared: Whether the locks should be acquired in shared mode
230
    @type timeout: None or float
231
    @param timeout: Timeout for acquiring the locks
232

233
    """
234
    if self._cbs:
235
      self._cbs.CheckCancel()
236

    
237
    acquired = self.context.glm.acquire(level, names, shared=shared,
238
                                        timeout=timeout)
239

    
240
    return acquired
241

    
242
  def _ExecLU(self, lu):
243
    """Logical Unit execution sequence.
244

245
    """
246
    write_count = self.context.cfg.write_count
247
    lu.CheckPrereq()
248
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
249
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
250
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
251
                     self.Log, None)
252

    
253
    if getattr(lu.op, "dry_run", False):
254
      # in this mode, no post-hooks are run, and the config is not
255
      # written (as it might have been modified by another LU, and we
256
      # shouldn't do writeout on behalf of other threads
257
      self.LogInfo("dry-run mode requested, not actually executing"
258
                   " the operation")
259
      return lu.dry_run_result
260

    
261
    try:
262
      result = lu.Exec(self.Log)
263
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
264
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
265
                                self.Log, result)
266
    finally:
267
      # FIXME: This needs locks if not lu_class.REQ_BGL
268
      if write_count != self.context.cfg.write_count:
269
        hm.RunConfigUpdate()
270

    
271
    return result
272

    
273
  def _LockAndExecLU(self, lu, level, calc_timeout):
274
    """Execute a Logical Unit, with the needed locks.
275

276
    This is a recursive function that starts locking the given level, and
277
    proceeds up, till there are no more locks to acquire. Then it executes the
278
    given LU and its opcodes.
279

280
    """
281
    adding_locks = level in lu.add_locks
282
    acquiring_locks = level in lu.needed_locks
283
    if level not in locking.LEVELS:
284
      if self._cbs:
285
        self._cbs.NotifyStart()
286

    
287
      result = self._ExecLU(lu)
288

    
289
    elif adding_locks and acquiring_locks:
290
      # We could both acquire and add locks at the same level, but for now we
291
      # don't need this, so we'll avoid the complicated code needed.
292
      raise NotImplementedError("Can't declare locks to acquire when adding"
293
                                " others")
294

    
295
    elif adding_locks or acquiring_locks:
296
      lu.DeclareLocks(level)
297
      share = lu.share_locks[level]
298

    
299
      try:
300
        assert adding_locks ^ acquiring_locks, \
301
          "Locks must be either added or acquired"
302

    
303
        if acquiring_locks:
304
          # Acquiring locks
305
          needed_locks = lu.needed_locks[level]
306

    
307
          acquired = self._AcquireLocks(level, needed_locks, share,
308
                                        calc_timeout())
309

    
310
          if acquired is None:
311
            raise LockAcquireTimeout()
312

    
313
        else:
314
          # Adding locks
315
          add_locks = lu.add_locks[level]
316
          lu.remove_locks[level] = add_locks
317

    
318
          try:
319
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
320
          except errors.LockError:
321
            raise errors.OpPrereqError(
322
              "Couldn't add locks (%s), probably because of a race condition"
323
              " with another job, who added them first" % add_locks,
324
              errors.ECODE_FAULT)
325

    
326
          acquired = add_locks
327

    
328
        try:
329
          lu.acquired_locks[level] = acquired
330

    
331
          result = self._LockAndExecLU(lu, level + 1, calc_timeout)
332
        finally:
333
          if level in lu.remove_locks:
334
            self.context.glm.remove(level, lu.remove_locks[level])
335
      finally:
336
        if self.context.glm.is_owned(level):
337
          self.context.glm.release(level)
338

    
339
    else:
340
      result = self._LockAndExecLU(lu, level + 1, calc_timeout)
341

    
342
    return result
343

    
344
  def ExecOpCode(self, op, cbs, timeout=None):
345
    """Execute an opcode.
346

347
    @type op: an OpCode instance
348
    @param op: the opcode to be executed
349
    @type cbs: L{OpExecCbBase}
350
    @param cbs: Runtime callbacks
351
    @type timeout: float or None
352
    @param timeout: Maximum time to acquire all locks, None for no timeout
353
    @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
354
        amount of time
355

356
    """
357
    if not isinstance(op, opcodes.OpCode):
358
      raise errors.ProgrammerError("Non-opcode instance passed"
359
                                   " to ExecOpcode")
360

    
361
    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
362
    if lu_class is None:
363
      raise errors.OpCodeUnknown("Unknown opcode")
364

    
365
    if timeout is None:
366
      calc_timeout = lambda: None
367
    else:
368
      calc_timeout = locking.RunningTimeout(timeout, False).Remaining
369

    
370
    self._cbs = cbs
371
    try:
372
      # Acquire the Big Ganeti Lock exclusively if this LU requires it,
373
      # and in a shared fashion otherwise (to prevent concurrent run with
374
      # an exclusive LU.
375
      if self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
376
                            not lu_class.REQ_BGL, calc_timeout()) is None:
377
        raise LockAcquireTimeout()
378

    
379
      try:
380
        lu = lu_class(self, op, self.context, self.rpc)
381
        lu.ExpandNames()
382
        assert lu.needed_locks is not None, "needed_locks not set by LU"
383

    
384
        try:
385
          return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout)
386
        finally:
387
          if self._ec_id:
388
            self.context.cfg.DropECReservations(self._ec_id)
389
      finally:
390
        self.context.glm.release(locking.LEVEL_CLUSTER)
391
    finally:
392
      self._cbs = None
393

    
394
  def Log(self, *args):
395
    """Forward call to feedback callback function.
396

397
    """
398
    if self._cbs:
399
      self._cbs.Feedback(*args)
400

    
401
  def LogStep(self, current, total, message):
402
    """Log a change in LU execution progress.
403

404
    """
405
    logging.debug("Step %d/%d %s", current, total, message)
406
    self.Log("STEP %d/%d %s" % (current, total, message))
407

    
408
  def LogWarning(self, message, *args, **kwargs):
409
    """Log a warning to the logs and the user.
410

411
    The optional keyword argument is 'hint' and can be used to show a
412
    hint to the user (presumably related to the warning). If the
413
    message is empty, it will not be printed at all, allowing one to
414
    show only a hint.
415

416
    """
417
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
418
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
419
    if args:
420
      message = message % tuple(args)
421
    if message:
422
      logging.warning(message)
423
      self.Log(" - WARNING: %s" % message)
424
    if "hint" in kwargs:
425
      self.Log("      Hint: %s" % kwargs["hint"])
426

    
427
  def LogInfo(self, message, *args):
428
    """Log an informational message to the logs and the user.
429

430
    """
431
    if args:
432
      message = message % tuple(args)
433
    logging.info(message)
434
    self.Log(" - INFO: %s" % message)
435

    
436
  def GetECId(self):
437
    if not self._ec_id:
438
      errors.ProgrammerError("Tried to use execution context id when not set")
439
    return self._ec_id
440

    
441

    
442
class HooksMaster(object):
443
  """Hooks master.
444

445
  This class distributes the run commands to the nodes based on the
446
  specific LU class.
447

448
  In order to remove the direct dependency on the rpc module, the
449
  constructor needs a function which actually does the remote
450
  call. This will usually be rpc.call_hooks_runner, but any function
451
  which behaves the same works.
452

453
  """
454
  def __init__(self, callfn, lu):
455
    self.callfn = callfn
456
    self.lu = lu
457
    self.op = lu.op
458
    self.env, node_list_pre, node_list_post = self._BuildEnv()
459
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
460
                      constants.HOOKS_PHASE_POST: node_list_post}
461

    
462
  def _BuildEnv(self):
463
    """Compute the environment and the target nodes.
464

465
    Based on the opcode and the current node list, this builds the
466
    environment for the hooks and the target node list for the run.
467

468
    """
469
    env = {
470
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
471
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
472
      "GANETI_OP_CODE": self.op.OP_ID,
473
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
474
      "GANETI_DATA_DIR": constants.DATA_DIR,
475
      }
476

    
477
    if self.lu.HPATH is not None:
478
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
479
      if lu_env:
480
        for key in lu_env:
481
          env["GANETI_" + key] = lu_env[key]
482
    else:
483
      lu_nodes_pre = lu_nodes_post = []
484

    
485
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
486

    
487
  def _RunWrapper(self, node_list, hpath, phase):
488
    """Simple wrapper over self.callfn.
489

490
    This method fixes the environment before doing the rpc call.
491

492
    """
493
    env = self.env.copy()
494
    env["GANETI_HOOKS_PHASE"] = phase
495
    env["GANETI_HOOKS_PATH"] = hpath
496
    if self.lu.cfg is not None:
497
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
498
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
499

    
500
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
501

    
502
    return self.callfn(node_list, hpath, phase, env)
503

    
504
  def RunPhase(self, phase, nodes=None):
505
    """Run all the scripts for a phase.
506

507
    This is the main function of the HookMaster.
508

509
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
510
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
511
    @param nodes: overrides the predefined list of nodes for the given phase
512
    @return: the processed results of the hooks multi-node rpc call
513
    @raise errors.HooksFailure: on communication failure to the nodes
514
    @raise errors.HooksAbort: on failure of one of the hooks
515

516
    """
517
    if not self.node_list[phase] and not nodes:
518
      # empty node list, we should not attempt to run this as either
519
      # we're in the cluster init phase and the rpc client part can't
520
      # even attempt to run, or this LU doesn't do hooks at all
521
      return
522
    hpath = self.lu.HPATH
523
    if nodes is not None:
524
      results = self._RunWrapper(nodes, hpath, phase)
525
    else:
526
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
527
    errs = []
528
    if not results:
529
      msg = "Communication Failure"
530
      if phase == constants.HOOKS_PHASE_PRE:
531
        raise errors.HooksFailure(msg)
532
      else:
533
        self.lu.LogWarning(msg)
534
        return results
535
    for node_name in results:
536
      res = results[node_name]
537
      if res.offline:
538
        continue
539
      msg = res.fail_msg
540
      if msg:
541
        self.lu.LogWarning("Communication failure to node %s: %s",
542
                           node_name, msg)
543
        continue
544
      for script, hkr, output in res.payload:
545
        if hkr == constants.HKR_FAIL:
546
          if phase == constants.HOOKS_PHASE_PRE:
547
            errs.append((node_name, script, output))
548
          else:
549
            if not output:
550
              output = "(no output)"
551
            self.lu.LogWarning("On %s script %s failed, output: %s" %
552
                               (node_name, script, output))
553
    if errs and phase == constants.HOOKS_PHASE_PRE:
554
      raise errors.HooksAbort(errs)
555
    return results
556

    
557
  def RunConfigUpdate(self):
558
    """Run the special configuration update hook
559

560
    This is a special hook that runs only on the master after each
561
    top-level LI if the configuration has been updated.
562

563
    """
564
    phase = constants.HOOKS_PHASE_POST
565
    hpath = constants.HOOKS_NAME_CFGUPDATE
566
    nodes = [self.lu.cfg.GetMasterNode()]
567
    self._RunWrapper(nodes, hpath, phase)