Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ 407339d0

History | View | Annotate | Download (18.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32
import random
33
import time
34

    
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import rpc
39
from ganeti import cmdlib
40
from ganeti import locking
41
from ganeti import utils
42

    
43

    
44
class _LockAcquireTimeout(Exception):
45
  """Internal exception to report timeouts on acquiring locks.
46

47
  """
48

    
49

    
50
class _LockTimeoutStrategy(object):
51
  """Class with lock acquire timeout strategy.
52

53
  """
54
  __slots__ = [
55
    "_attempts",
56
    "_random_fn",
57
    "_start_time",
58
    ]
59

    
60
  _MAX_ATTEMPTS = 10
61
  """How many retries before going into blocking mode"""
62

    
63
  _ATTEMPT_FACTOR = 1.75
64
  """Factor between attempts"""
65

    
66
  def __init__(self, _random_fn=None):
67
    """Initializes this class.
68

69
    @param _random_fn: Random number generator for unittests
70

71
    """
72
    object.__init__(self)
73

    
74
    self._start_time = None
75
    self._attempts = 0
76

    
77
    if _random_fn is None:
78
      self._random_fn = random.random
79
    else:
80
      self._random_fn = _random_fn
81

    
82
  def NextAttempt(self):
83
    """Advances to the next attempt.
84

85
    """
86
    assert self._attempts >= 0
87
    self._attempts += 1
88

    
89
  def CalcRemainingTimeout(self):
90
    """Returns the remaining timeout.
91

92
    """
93
    assert self._attempts >= 0
94

    
95
    if self._attempts == self._MAX_ATTEMPTS:
96
      # Only blocking acquires after 10 retries
97
      return None
98

    
99
    if self._attempts > self._MAX_ATTEMPTS:
100
      raise RuntimeError("Blocking acquire ran into timeout")
101

    
102
    # Get start time on first calculation
103
    if self._start_time is None:
104
      self._start_time = time.time()
105

    
106
    # Calculate remaining time for this attempt
107
    timeout = (self._start_time + (self._ATTEMPT_FACTOR ** self._attempts) -
108
               time.time())
109

    
110
    if timeout > 10.0:
111
      # Cap timeout at 10 seconds. This gives other jobs a chance to run
112
      # even if we're still trying to get our locks, before finally moving
113
      # to a blocking acquire.
114
      timeout = 10.0
115

    
116
    elif timeout < 0.1:
117
      # Lower boundary
118
      timeout = 0.1
119

    
120
    # Add a small variation (-/+ 5%) to timeouts. This helps in situations
121
    # where two or more jobs are fighting for the same lock(s).
122
    variation_range = timeout * 0.1
123
    timeout += (self._random_fn() * variation_range) - (variation_range * 0.5)
124

    
125
    assert timeout >= 0.0, "Timeout must be positive"
126

    
127
    return timeout
128

    
129

    
130
class OpExecCbBase:
131
  """Base class for OpCode execution callbacks.
132

133
  """
134
  def NotifyStart(self):
135
    """Called when we are about to execute the LU.
136

137
    This function is called when we're about to start the lu's Exec() method,
138
    that is, after we have acquired all locks.
139

140
    """
141

    
142
  def Feedback(self, *args):
143
    """Sends feedback from the LU code to the end-user.
144

145
    """
146

    
147
  def ReportLocks(self, msg):
148
    """Report lock operations.
149

150
    """
151

    
152

    
153
class Processor(object):
154
  """Object which runs OpCodes"""
155
  DISPATCH_TABLE = {
156
    # Cluster
157
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
158
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
159
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
160
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
161
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
162
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
163
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
164
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
165
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
166
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
167
    # node lu
168
    opcodes.OpAddNode: cmdlib.LUAddNode,
169
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
170
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
171
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
172
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
173
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
174
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
175
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
176
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
177
    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
178
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
179
    # instance lu
180
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
181
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
182
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
183
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
184
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
185
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
186
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
187
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
188
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
189
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
190
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
191
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
192
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
193
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
194
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
195
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
196
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
197
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
198
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
199
    # os lu
200
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
201
    # exports lu
202
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
203
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
204
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
205
    # tags lu
206
    opcodes.OpGetTags: cmdlib.LUGetTags,
207
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
208
    opcodes.OpAddTags: cmdlib.LUAddTags,
209
    opcodes.OpDelTags: cmdlib.LUDelTags,
210
    # test lu
211
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
212
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
213
    }
214

    
215
  def __init__(self, context):
216
    """Constructor for Processor
217

218
    """
219
    self.context = context
220
    self._cbs = None
221
    self.rpc = rpc.RpcRunner(context.cfg)
222
    self.hmclass = HooksMaster
223

    
224
  def _ReportLocks(self, level, names, shared, acquired):
225
    """Reports lock operations.
226

227
    @type level: int
228
    @param level: Lock level
229
    @type names: list or string
230
    @param names: Lock names
231
    @type shared: bool
232
    @param shared: Whether the lock should be acquired in shared mode
233
    @type acquired: bool
234
    @param acquired: Whether the lock has already been acquired
235

236
    """
237
    parts = []
238

    
239
    # Build message
240
    if acquired:
241
      parts.append("acquired")
242
    else:
243
      parts.append("waiting")
244

    
245
    parts.append(locking.LEVEL_NAMES[level])
246

    
247
    if names == locking.ALL_SET:
248
      parts.append("ALL")
249
    elif isinstance(names, basestring):
250
      parts.append(names)
251
    else:
252
      parts.append(",".join(names))
253

    
254
    if shared:
255
      parts.append("shared")
256
    else:
257
      parts.append("exclusive")
258

    
259
    msg = "/".join(parts)
260

    
261
    logging.debug("LU locks %s", msg)
262

    
263
    if self._cbs:
264
      self._cbs.ReportLocks(msg)
265

    
266
  def _ExecLU(self, lu):
267
    """Logical Unit execution sequence.
268

269
    """
270
    write_count = self.context.cfg.write_count
271
    lu.CheckPrereq()
272
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
273
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
274
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
275
                     self._Feedback, None)
276

    
277
    if getattr(lu.op, "dry_run", False):
278
      # in this mode, no post-hooks are run, and the config is not
279
      # written (as it might have been modified by another LU, and we
280
      # shouldn't do writeout on behalf of other threads
281
      self.LogInfo("dry-run mode requested, not actually executing"
282
                   " the operation")
283
      return lu.dry_run_result
284

    
285
    try:
286
      result = lu.Exec(self._Feedback)
287
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
288
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
289
                                self._Feedback, result)
290
    finally:
291
      # FIXME: This needs locks if not lu_class.REQ_BGL
292
      if write_count != self.context.cfg.write_count:
293
        hm.RunConfigUpdate()
294

    
295
    return result
296

    
297
  def _LockAndExecLU(self, lu, level, calc_timeout):
298
    """Execute a Logical Unit, with the needed locks.
299

300
    This is a recursive function that starts locking the given level, and
301
    proceeds up, till there are no more locks to acquire. Then it executes the
302
    given LU and its opcodes.
303

304
    """
305
    adding_locks = level in lu.add_locks
306
    acquiring_locks = level in lu.needed_locks
307
    if level not in locking.LEVELS:
308
      if self._cbs:
309
        self._cbs.NotifyStart()
310

    
311
      result = self._ExecLU(lu)
312

    
313
    elif adding_locks and acquiring_locks:
314
      # We could both acquire and add locks at the same level, but for now we
315
      # don't need this, so we'll avoid the complicated code needed.
316
      raise NotImplementedError("Can't declare locks to acquire when adding"
317
                                " others")
318

    
319
    elif adding_locks or acquiring_locks:
320
      lu.DeclareLocks(level)
321
      share = lu.share_locks[level]
322

    
323
      try:
324
        assert adding_locks ^ acquiring_locks, \
325
          "Locks must be either added or acquired"
326

    
327
        if acquiring_locks:
328
          # Acquiring locks
329
          needed_locks = lu.needed_locks[level]
330

    
331
          self._ReportLocks(level, needed_locks, share, False)
332
          acquired = self.context.glm.acquire(level,
333
                                              needed_locks,
334
                                              shared=share,
335
                                              timeout=calc_timeout())
336
          # TODO: Report timeout
337
          self._ReportLocks(level, needed_locks, share, True)
338

    
339
          if acquired is None:
340
            raise _LockAcquireTimeout()
341

    
342
          lu.acquired_locks[level] = acquired
343

    
344
        else:
345
          # Adding locks
346
          add_locks = lu.add_locks[level]
347
          lu.remove_locks[level] = add_locks
348

    
349
          try:
350
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
351
          except errors.LockError:
352
            raise errors.OpPrereqError(
353
              "Couldn't add locks (%s), probably because of a race condition"
354
              " with another job, who added them first" % add_locks)
355

    
356
          lu.acquired_locks[level] = add_locks
357
        try:
358
          result = self._LockAndExecLU(lu, level + 1, calc_timeout)
359
        finally:
360
          if level in lu.remove_locks:
361
            self.context.glm.remove(level, lu.remove_locks[level])
362
      finally:
363
        if self.context.glm.is_owned(level):
364
          self.context.glm.release(level)
365

    
366
    else:
367
      result = self._LockAndExecLU(lu, level + 1, calc_timeout)
368

    
369
    return result
370

    
371
  def ExecOpCode(self, op, cbs):
372
    """Execute an opcode.
373

374
    @type op: an OpCode instance
375
    @param op: the opcode to be executed
376
    @type cbs: L{OpExecCbBase}
377
    @param cbs: Runtime callbacks
378

379
    """
380
    if not isinstance(op, opcodes.OpCode):
381
      raise errors.ProgrammerError("Non-opcode instance passed"
382
                                   " to ExecOpcode")
383

    
384
    self._cbs = cbs
385
    try:
386
      lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
387
      if lu_class is None:
388
        raise errors.OpCodeUnknown("Unknown opcode")
389

    
390
      timeout_strategy = _LockTimeoutStrategy()
391
      calc_timeout = timeout_strategy.CalcRemainingTimeout
392

    
393
      while True:
394
        try:
395
          self._ReportLocks(locking.LEVEL_CLUSTER, [locking.BGL],
396
                            not lu_class.REQ_BGL, False)
397
          try:
398
            # Acquire the Big Ganeti Lock exclusively if this LU requires it,
399
            # and in a shared fashion otherwise (to prevent concurrent run with
400
            # an exclusive LU.
401
            acquired_bgl = self.context.glm.acquire(locking.LEVEL_CLUSTER,
402
                                                    [locking.BGL],
403
                                                    shared=not lu_class.REQ_BGL,
404
                                                    timeout=calc_timeout())
405
          finally:
406
            # TODO: Report timeout
407
            self._ReportLocks(locking.LEVEL_CLUSTER, [locking.BGL],
408
                              not lu_class.REQ_BGL, True)
409

    
410
          if acquired_bgl is None:
411
            raise _LockAcquireTimeout()
412

    
413
          try:
414
            lu = lu_class(self, op, self.context, self.rpc)
415
            lu.ExpandNames()
416
            assert lu.needed_locks is not None, "needed_locks not set by LU"
417

    
418
            return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout)
419
          finally:
420
            self.context.glm.release(locking.LEVEL_CLUSTER)
421

    
422
        except _LockAcquireTimeout:
423
          # Timeout while waiting for lock, try again
424
          pass
425

    
426
        timeout_strategy.NextAttempt()
427

    
428
    finally:
429
      self._cbs = None
430

    
431
  def _Feedback(self, *args):
432
    """Forward call to feedback callback function.
433

434
    """
435
    if self._cbs:
436
      self._cbs.Feedback(*args)
437

    
438
  def LogStep(self, current, total, message):
439
    """Log a change in LU execution progress.
440

441
    """
442
    logging.debug("Step %d/%d %s", current, total, message)
443
    self._Feedback("STEP %d/%d %s" % (current, total, message))
444

    
445
  def LogWarning(self, message, *args, **kwargs):
446
    """Log a warning to the logs and the user.
447

448
    The optional keyword argument is 'hint' and can be used to show a
449
    hint to the user (presumably related to the warning). If the
450
    message is empty, it will not be printed at all, allowing one to
451
    show only a hint.
452

453
    """
454
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
455
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
456
    if args:
457
      message = message % tuple(args)
458
    if message:
459
      logging.warning(message)
460
      self._Feedback(" - WARNING: %s" % message)
461
    if "hint" in kwargs:
462
      self._Feedback("      Hint: %s" % kwargs["hint"])
463

    
464
  def LogInfo(self, message, *args):
465
    """Log an informational message to the logs and the user.
466

467
    """
468
    if args:
469
      message = message % tuple(args)
470
    logging.info(message)
471
    self._Feedback(" - INFO: %s" % message)
472

    
473

    
474
class HooksMaster(object):
475
  """Hooks master.
476

477
  This class distributes the run commands to the nodes based on the
478
  specific LU class.
479

480
  In order to remove the direct dependency on the rpc module, the
481
  constructor needs a function which actually does the remote
482
  call. This will usually be rpc.call_hooks_runner, but any function
483
  which behaves the same works.
484

485
  """
486
  def __init__(self, callfn, lu):
487
    self.callfn = callfn
488
    self.lu = lu
489
    self.op = lu.op
490
    self.env, node_list_pre, node_list_post = self._BuildEnv()
491
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
492
                      constants.HOOKS_PHASE_POST: node_list_post}
493

    
494
  def _BuildEnv(self):
495
    """Compute the environment and the target nodes.
496

497
    Based on the opcode and the current node list, this builds the
498
    environment for the hooks and the target node list for the run.
499

500
    """
501
    env = {
502
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
503
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
504
      "GANETI_OP_CODE": self.op.OP_ID,
505
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
506
      "GANETI_DATA_DIR": constants.DATA_DIR,
507
      }
508

    
509
    if self.lu.HPATH is not None:
510
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
511
      if lu_env:
512
        for key in lu_env:
513
          env["GANETI_" + key] = lu_env[key]
514
    else:
515
      lu_nodes_pre = lu_nodes_post = []
516

    
517
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
518

    
519
  def _RunWrapper(self, node_list, hpath, phase):
520
    """Simple wrapper over self.callfn.
521

522
    This method fixes the environment before doing the rpc call.
523

524
    """
525
    env = self.env.copy()
526
    env["GANETI_HOOKS_PHASE"] = phase
527
    env["GANETI_HOOKS_PATH"] = hpath
528
    if self.lu.cfg is not None:
529
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
530
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
531

    
532
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
533

    
534
    return self.callfn(node_list, hpath, phase, env)
535

    
536
  def RunPhase(self, phase, nodes=None):
537
    """Run all the scripts for a phase.
538

539
    This is the main function of the HookMaster.
540

541
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
542
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
543
    @param nodes: overrides the predefined list of nodes for the given phase
544
    @return: the processed results of the hooks multi-node rpc call
545
    @raise errors.HooksFailure: on communication failure to the nodes
546
    @raise errors.HooksAbort: on failure of one of the hooks
547

548
    """
549
    if not self.node_list[phase] and not nodes:
550
      # empty node list, we should not attempt to run this as either
551
      # we're in the cluster init phase and the rpc client part can't
552
      # even attempt to run, or this LU doesn't do hooks at all
553
      return
554
    hpath = self.lu.HPATH
555
    if nodes is not None:
556
      results = self._RunWrapper(nodes, hpath, phase)
557
    else:
558
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
559
    errs = []
560
    if not results:
561
      msg = "Communication Failure"
562
      if phase == constants.HOOKS_PHASE_PRE:
563
        raise errors.HooksFailure(msg)
564
      else:
565
        self.lu.LogWarning(msg)
566
        return results
567
    for node_name in results:
568
      res = results[node_name]
569
      if res.offline:
570
        continue
571
      msg = res.fail_msg
572
      if msg:
573
        self.lu.LogWarning("Communication failure to node %s: %s",
574
                           node_name, msg)
575
        continue
576
      for script, hkr, output in res.payload:
577
        if hkr == constants.HKR_FAIL:
578
          if phase == constants.HOOKS_PHASE_PRE:
579
            errs.append((node_name, script, output))
580
          else:
581
            if not output:
582
              output = "(no output)"
583
            self.lu.LogWarning("On %s script %s failed, output: %s" %
584
                               (node_name, script, output))
585
    if errs and phase == constants.HOOKS_PHASE_PRE:
586
      raise errors.HooksAbort(errs)
587
    return results
588

    
589
  def RunConfigUpdate(self):
590
    """Run the special configuration update hook
591

592
    This is a special hook that runs only on the master after each
593
    top-level LI if the configuration has been updated.
594

595
    """
596
    phase = constants.HOOKS_PHASE_POST
597
    hpath = constants.HOOKS_NAME_CFGUPDATE
598
    nodes = [self.lu.cfg.GetMasterNode()]
599
    self._RunWrapper(nodes, hpath, phase)