Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ b1ee5610

History | View | Annotate | Download (18.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32
import random
33
import time
34

    
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import rpc
39
from ganeti import cmdlib
40
from ganeti import locking
41
from ganeti import utils
42

    
43

    
44
class LockAcquireTimeout(Exception):
45
  """Exception to report timeouts on acquiring locks.
46

47
  """
48

    
49

    
50
def _CalculateLockAttemptTimeouts():
51
  """Calculate timeouts for lock attempts.
52

53
  """
54
  result = [1.0]
55

    
56
  # Wait for a total of at least 150s before doing a blocking acquire
57
  while sum(result) < 150.0:
58
    timeout = (result[-1] * 1.05) ** 1.25
59

    
60
    # Cap timeout at 10 seconds. This gives other jobs a chance to run
61
    # even if we're still trying to get our locks, before finally moving
62
    # to a blocking acquire.
63
    if timeout > 10.0:
64
      timeout = 10.0
65

    
66
    elif timeout < 0.1:
67
      # Lower boundary for safety
68
      timeout = 0.1
69

    
70
    result.append(timeout)
71

    
72
  return result
73

    
74

    
75
class LockAttemptTimeoutStrategy(object):
76
  """Class with lock acquire timeout strategy.
77

78
  """
79
  __slots__ = [
80
    "_timeouts",
81
    "_random_fn",
82
    "_time_fn",
83
    ]
84

    
85
  _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
86

    
87
  def __init__(self, _time_fn=time.time, _random_fn=random.random):
88
    """Initializes this class.
89

90
    @param _time_fn: Time function for unittests
91
    @param _random_fn: Random number generator for unittests
92

93
    """
94
    object.__init__(self)
95

    
96
    self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT)
97
    self._time_fn = _time_fn
98
    self._random_fn = _random_fn
99

    
100
  def NextAttempt(self):
101
    """Returns the timeout for the next attempt.
102

103
    """
104
    try:
105
      timeout = self._timeouts.next()
106
    except StopIteration:
107
      # No more timeouts, do blocking acquire
108
      timeout = None
109

    
110
    if timeout is not None:
111
      # Add a small variation (-/+ 5%) to timeout. This helps in situations
112
      # where two or more jobs are fighting for the same lock(s).
113
      variation_range = timeout * 0.1
114
      timeout += ((self._random_fn() * variation_range) -
115
                  (variation_range * 0.5))
116

    
117
    return timeout
118

    
119

    
120
class OpExecCbBase: # pylint: disable-msg=W0232
121
  """Base class for OpCode execution callbacks.
122

123
  """
124
  def NotifyStart(self):
125
    """Called when we are about to execute the LU.
126

127
    This function is called when we're about to start the lu's Exec() method,
128
    that is, after we have acquired all locks.
129

130
    """
131

    
132
  def Feedback(self, *args):
133
    """Sends feedback from the LU code to the end-user.
134

135
    """
136

    
137
  def CheckCancel(self):
138
    """Check whether job has been cancelled.
139

140
    """
141

    
142

    
143
class Processor(object):
144
  """Object which runs OpCodes"""
145
  DISPATCH_TABLE = {
146
    # Cluster
147
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
148
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
149
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
150
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
151
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
152
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
153
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
154
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
155
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
156
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
157
    opcodes.OpQuery: cmdlib.LUQuery,
158
    opcodes.OpQueryFields: cmdlib.LUQueryFields,
159
    # node lu
160
    opcodes.OpAddNode: cmdlib.LUAddNode,
161
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
162
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
163
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
164
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
165
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
166
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
167
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
168
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
169
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
170
    opcodes.OpNodeEvacuationStrategy: cmdlib.LUNodeEvacuationStrategy,
171
    # instance lu
172
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
173
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
174
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
175
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
176
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
177
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
178
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
179
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
180
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
181
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
182
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
183
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
184
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
185
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
186
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
187
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
188
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
189
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
190
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
191
    # node group lu
192
    opcodes.OpAddGroup: cmdlib.LUAddGroup,
193
    opcodes.OpQueryGroups: cmdlib.LUQueryGroups,
194
    # os lu
195
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
196
    # exports lu
197
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
198
    opcodes.OpPrepareExport: cmdlib.LUPrepareExport,
199
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
200
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
201
    # tags lu
202
    opcodes.OpGetTags: cmdlib.LUGetTags,
203
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
204
    opcodes.OpAddTags: cmdlib.LUAddTags,
205
    opcodes.OpDelTags: cmdlib.LUDelTags,
206
    # test lu
207
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
208
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
209
    opcodes.OpTestJobqueue: cmdlib.LUTestJobqueue,
210
    # OOB lu
211
    opcodes.OpOutOfBand: cmdlib.LUOutOfBand,
212
    }
213

    
214
  def __init__(self, context, ec_id):
215
    """Constructor for Processor
216

217
    @type context: GanetiContext
218
    @param context: global Ganeti context
219
    @type ec_id: string
220
    @param ec_id: execution context identifier
221

222
    """
223
    self.context = context
224
    self._ec_id = ec_id
225
    self._cbs = None
226
    self.rpc = rpc.RpcRunner(context.cfg)
227
    self.hmclass = HooksMaster
228

    
229
  def _AcquireLocks(self, level, names, shared, timeout, priority):
230
    """Acquires locks via the Ganeti lock manager.
231

232
    @type level: int
233
    @param level: Lock level
234
    @type names: list or string
235
    @param names: Lock names
236
    @type shared: bool
237
    @param shared: Whether the locks should be acquired in shared mode
238
    @type timeout: None or float
239
    @param timeout: Timeout for acquiring the locks
240
    @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
241
        amount of time
242

243
    """
244
    if self._cbs:
245
      self._cbs.CheckCancel()
246

    
247
    acquired = self.context.glm.acquire(level, names, shared=shared,
248
                                        timeout=timeout, priority=priority)
249

    
250
    if acquired is None:
251
      raise LockAcquireTimeout()
252

    
253
    return acquired
254

    
255
  def _ExecLU(self, lu):
256
    """Logical Unit execution sequence.
257

258
    """
259
    write_count = self.context.cfg.write_count
260
    lu.CheckPrereq()
261
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
262
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
263
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
264
                     self.Log, None)
265

    
266
    if getattr(lu.op, "dry_run", False):
267
      # in this mode, no post-hooks are run, and the config is not
268
      # written (as it might have been modified by another LU, and we
269
      # shouldn't do writeout on behalf of other threads
270
      self.LogInfo("dry-run mode requested, not actually executing"
271
                   " the operation")
272
      return lu.dry_run_result
273

    
274
    try:
275
      result = lu.Exec(self.Log)
276
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
277
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
278
                                self.Log, result)
279
    finally:
280
      # FIXME: This needs locks if not lu_class.REQ_BGL
281
      if write_count != self.context.cfg.write_count:
282
        hm.RunConfigUpdate()
283

    
284
    return result
285

    
286
  def _LockAndExecLU(self, lu, level, calc_timeout, priority):
287
    """Execute a Logical Unit, with the needed locks.
288

289
    This is a recursive function that starts locking the given level, and
290
    proceeds up, till there are no more locks to acquire. Then it executes the
291
    given LU and its opcodes.
292

293
    """
294
    adding_locks = level in lu.add_locks
295
    acquiring_locks = level in lu.needed_locks
296
    if level not in locking.LEVELS:
297
      if self._cbs:
298
        self._cbs.NotifyStart()
299

    
300
      result = self._ExecLU(lu)
301

    
302
    elif adding_locks and acquiring_locks:
303
      # We could both acquire and add locks at the same level, but for now we
304
      # don't need this, so we'll avoid the complicated code needed.
305
      raise NotImplementedError("Can't declare locks to acquire when adding"
306
                                " others")
307

    
308
    elif adding_locks or acquiring_locks:
309
      lu.DeclareLocks(level)
310
      share = lu.share_locks[level]
311

    
312
      try:
313
        assert adding_locks ^ acquiring_locks, \
314
          "Locks must be either added or acquired"
315

    
316
        if acquiring_locks:
317
          # Acquiring locks
318
          needed_locks = lu.needed_locks[level]
319

    
320
          acquired = self._AcquireLocks(level, needed_locks, share,
321
                                        calc_timeout(), priority)
322
        else:
323
          # Adding locks
324
          add_locks = lu.add_locks[level]
325
          lu.remove_locks[level] = add_locks
326

    
327
          try:
328
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
329
          except errors.LockError:
330
            raise errors.OpPrereqError(
331
              "Couldn't add locks (%s), probably because of a race condition"
332
              " with another job, who added them first" % add_locks,
333
              errors.ECODE_FAULT)
334

    
335
          acquired = add_locks
336

    
337
        try:
338
          lu.acquired_locks[level] = acquired
339

    
340
          result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
341
        finally:
342
          if level in lu.remove_locks:
343
            self.context.glm.remove(level, lu.remove_locks[level])
344
      finally:
345
        if self.context.glm.is_owned(level):
346
          self.context.glm.release(level)
347

    
348
    else:
349
      result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
350

    
351
    return result
352

    
353
  def ExecOpCode(self, op, cbs, timeout=None, priority=None):
354
    """Execute an opcode.
355

356
    @type op: an OpCode instance
357
    @param op: the opcode to be executed
358
    @type cbs: L{OpExecCbBase}
359
    @param cbs: Runtime callbacks
360
    @type timeout: float or None
361
    @param timeout: Maximum time to acquire all locks, None for no timeout
362
    @type priority: number or None
363
    @param priority: Priority for acquiring lock(s)
364
    @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
365
        amount of time
366

367
    """
368
    if not isinstance(op, opcodes.OpCode):
369
      raise errors.ProgrammerError("Non-opcode instance passed"
370
                                   " to ExecOpcode")
371

    
372
    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
373
    if lu_class is None:
374
      raise errors.OpCodeUnknown("Unknown opcode")
375

    
376
    if timeout is None:
377
      calc_timeout = lambda: None
378
    else:
379
      calc_timeout = utils.RunningTimeout(timeout, False).Remaining
380

    
381
    self._cbs = cbs
382
    try:
383
      # Acquire the Big Ganeti Lock exclusively if this LU requires it,
384
      # and in a shared fashion otherwise (to prevent concurrent run with
385
      # an exclusive LU.
386
      self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
387
                          not lu_class.REQ_BGL, calc_timeout(),
388
                          priority)
389
      try:
390
        lu = lu_class(self, op, self.context, self.rpc)
391
        lu.ExpandNames()
392
        assert lu.needed_locks is not None, "needed_locks not set by LU"
393

    
394
        try:
395
          return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout,
396
                                     priority)
397
        finally:
398
          if self._ec_id:
399
            self.context.cfg.DropECReservations(self._ec_id)
400
      finally:
401
        self.context.glm.release(locking.LEVEL_CLUSTER)
402
    finally:
403
      self._cbs = None
404

    
405
  def Log(self, *args):
406
    """Forward call to feedback callback function.
407

408
    """
409
    if self._cbs:
410
      self._cbs.Feedback(*args)
411

    
412
  def LogStep(self, current, total, message):
413
    """Log a change in LU execution progress.
414

415
    """
416
    logging.debug("Step %d/%d %s", current, total, message)
417
    self.Log("STEP %d/%d %s" % (current, total, message))
418

    
419
  def LogWarning(self, message, *args, **kwargs):
420
    """Log a warning to the logs and the user.
421

422
    The optional keyword argument is 'hint' and can be used to show a
423
    hint to the user (presumably related to the warning). If the
424
    message is empty, it will not be printed at all, allowing one to
425
    show only a hint.
426

427
    """
428
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
429
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
430
    if args:
431
      message = message % tuple(args)
432
    if message:
433
      logging.warning(message)
434
      self.Log(" - WARNING: %s" % message)
435
    if "hint" in kwargs:
436
      self.Log("      Hint: %s" % kwargs["hint"])
437

    
438
  def LogInfo(self, message, *args):
439
    """Log an informational message to the logs and the user.
440

441
    """
442
    if args:
443
      message = message % tuple(args)
444
    logging.info(message)
445
    self.Log(" - INFO: %s" % message)
446

    
447
  def GetECId(self):
448
    if not self._ec_id:
449
      errors.ProgrammerError("Tried to use execution context id when not set")
450
    return self._ec_id
451

    
452

    
453
class HooksMaster(object):
454
  """Hooks master.
455

456
  This class distributes the run commands to the nodes based on the
457
  specific LU class.
458

459
  In order to remove the direct dependency on the rpc module, the
460
  constructor needs a function which actually does the remote
461
  call. This will usually be rpc.call_hooks_runner, but any function
462
  which behaves the same works.
463

464
  """
465
  def __init__(self, callfn, lu):
466
    self.callfn = callfn
467
    self.lu = lu
468
    self.op = lu.op
469
    self.env, node_list_pre, node_list_post = self._BuildEnv()
470
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
471
                      constants.HOOKS_PHASE_POST: node_list_post}
472

    
473
  def _BuildEnv(self):
474
    """Compute the environment and the target nodes.
475

476
    Based on the opcode and the current node list, this builds the
477
    environment for the hooks and the target node list for the run.
478

479
    """
480
    env = {
481
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
482
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
483
      "GANETI_OP_CODE": self.op.OP_ID,
484
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
485
      "GANETI_DATA_DIR": constants.DATA_DIR,
486
      }
487

    
488
    if self.lu.HPATH is not None:
489
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
490
      if lu_env:
491
        for key in lu_env:
492
          env["GANETI_" + key] = lu_env[key]
493
    else:
494
      lu_nodes_pre = lu_nodes_post = []
495

    
496
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
497

    
498
  def _RunWrapper(self, node_list, hpath, phase):
499
    """Simple wrapper over self.callfn.
500

501
    This method fixes the environment before doing the rpc call.
502

503
    """
504
    env = self.env.copy()
505
    env["GANETI_HOOKS_PHASE"] = phase
506
    env["GANETI_HOOKS_PATH"] = hpath
507
    if self.lu.cfg is not None:
508
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
509
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
510

    
511
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
512

    
513
    return self.callfn(node_list, hpath, phase, env)
514

    
515
  def RunPhase(self, phase, nodes=None):
516
    """Run all the scripts for a phase.
517

518
    This is the main function of the HookMaster.
519

520
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
521
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
522
    @param nodes: overrides the predefined list of nodes for the given phase
523
    @return: the processed results of the hooks multi-node rpc call
524
    @raise errors.HooksFailure: on communication failure to the nodes
525
    @raise errors.HooksAbort: on failure of one of the hooks
526

527
    """
528
    if not self.node_list[phase] and not nodes:
529
      # empty node list, we should not attempt to run this as either
530
      # we're in the cluster init phase and the rpc client part can't
531
      # even attempt to run, or this LU doesn't do hooks at all
532
      return
533
    hpath = self.lu.HPATH
534
    if nodes is not None:
535
      results = self._RunWrapper(nodes, hpath, phase)
536
    else:
537
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
538
    errs = []
539
    if not results:
540
      msg = "Communication Failure"
541
      if phase == constants.HOOKS_PHASE_PRE:
542
        raise errors.HooksFailure(msg)
543
      else:
544
        self.lu.LogWarning(msg)
545
        return results
546
    for node_name in results:
547
      res = results[node_name]
548
      if res.offline:
549
        continue
550
      msg = res.fail_msg
551
      if msg:
552
        self.lu.LogWarning("Communication failure to node %s: %s",
553
                           node_name, msg)
554
        continue
555
      for script, hkr, output in res.payload:
556
        if hkr == constants.HKR_FAIL:
557
          if phase == constants.HOOKS_PHASE_PRE:
558
            errs.append((node_name, script, output))
559
          else:
560
            if not output:
561
              output = "(no output)"
562
            self.lu.LogWarning("On %s script %s failed, output: %s" %
563
                               (node_name, script, output))
564
    if errs and phase == constants.HOOKS_PHASE_PRE:
565
      raise errors.HooksAbort(errs)
566
    return results
567

    
568
  def RunConfigUpdate(self):
569
    """Run the special configuration update hook
570

571
    This is a special hook that runs only on the master after each
572
    top-level LI if the configuration has been updated.
573

574
    """
575
    phase = constants.HOOKS_PHASE_POST
576
    hpath = constants.HOOKS_NAME_CFGUPDATE
577
    nodes = [self.lu.cfg.GetMasterNode()]
578
    self._RunWrapper(nodes, hpath, phase)