Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ 211b6132

History | View | Annotate | Download (19.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32
import random
33
import time
34

    
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import rpc
39
from ganeti import cmdlib
40
from ganeti import locking
41
from ganeti import utils
42

    
43

    
44
class _LockAcquireTimeout(Exception):
45
  """Internal exception to report timeouts on acquiring locks.
46

47
  """
48

    
49

    
50
class _LockTimeoutStrategy(object):
51
  """Class with lock acquire timeout strategy.
52

53
  """
54
  __slots__ = [
55
    "_attempts",
56
    "_random_fn",
57
    "_start_time",
58
    ]
59

    
60
  _MAX_ATTEMPTS = 10
61
  """How many retries before going into blocking mode"""
62

    
63
  _ATTEMPT_FACTOR = 1.75
64
  """Factor between attempts"""
65

    
66
  def __init__(self, _random_fn=None):
67
    """Initializes this class.
68

69
    @param _random_fn: Random number generator for unittests
70

71
    """
72
    object.__init__(self)
73

    
74
    self._start_time = None
75
    self._attempts = 0
76

    
77
    if _random_fn is None:
78
      self._random_fn = random.random
79
    else:
80
      self._random_fn = _random_fn
81

    
82
  def NextAttempt(self):
83
    """Advances to the next attempt.
84

85
    """
86
    assert self._attempts >= 0
87
    self._attempts += 1
88

    
89
  def CalcRemainingTimeout(self):
90
    """Returns the remaining timeout.
91

92
    """
93
    assert self._attempts >= 0
94

    
95
    if self._attempts == self._MAX_ATTEMPTS:
96
      # Only blocking acquires after 10 retries
97
      return None
98

    
99
    if self._attempts > self._MAX_ATTEMPTS:
100
      raise RuntimeError("Blocking acquire ran into timeout")
101

    
102
    # Get start time on first calculation
103
    if self._start_time is None:
104
      self._start_time = time.time()
105

    
106
    # Calculate remaining time for this attempt
107
    timeout = (self._start_time + (self._ATTEMPT_FACTOR ** self._attempts) -
108
               time.time())
109

    
110
    if timeout > 10.0:
111
      # Cap timeout at 10 seconds. This gives other jobs a chance to run
112
      # even if we're still trying to get our locks, before finally moving
113
      # to a blocking acquire.
114
      timeout = 10.0
115

    
116
    elif timeout < 0.1:
117
      # Lower boundary
118
      timeout = 0.1
119

    
120
    # Add a small variation (-/+ 5%) to timeouts. This helps in situations
121
    # where two or more jobs are fighting for the same lock(s).
122
    variation_range = timeout * 0.1
123
    timeout += (self._random_fn() * variation_range) - (variation_range * 0.5)
124

    
125
    assert timeout >= 0.0, "Timeout must be positive"
126

    
127
    return timeout
128

    
129

    
130
class OpExecCbBase:
131
  """Base class for OpCode execution callbacks.
132

133
  """
134
  def NotifyStart(self):
135
    """Called when we are about to execute the LU.
136

137
    This function is called when we're about to start the lu's Exec() method,
138
    that is, after we have acquired all locks.
139

140
    """
141

    
142
  def Feedback(self, *args):
143
    """Sends feedback from the LU code to the end-user.
144

145
    """
146

    
147
  def ReportLocks(self, msg):
148
    """Report lock operations.
149

150
    """
151

    
152

    
153
class Processor(object):
154
  """Object which runs OpCodes"""
155
  DISPATCH_TABLE = {
156
    # Cluster
157
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
158
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
159
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
160
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
161
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
162
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
163
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
164
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
165
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
166
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
167
    # node lu
168
    opcodes.OpAddNode: cmdlib.LUAddNode,
169
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
170
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
171
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
172
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
173
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
174
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
175
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
176
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
177
    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
178
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
179
    # instance lu
180
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
181
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
182
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
183
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
184
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
185
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
186
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
187
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
188
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
189
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
190
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
191
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
192
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
193
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
194
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
195
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
196
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
197
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
198
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
199
    # os lu
200
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
201
    # exports lu
202
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
203
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
204
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
205
    # tags lu
206
    opcodes.OpGetTags: cmdlib.LUGetTags,
207
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
208
    opcodes.OpAddTags: cmdlib.LUAddTags,
209
    opcodes.OpDelTags: cmdlib.LUDelTags,
210
    # test lu
211
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
212
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
213
    }
214

    
215
  def __init__(self, context):
216
    """Constructor for Processor
217

218
    """
219
    self.context = context
220
    self._cbs = None
221
    self.rpc = rpc.RpcRunner(context.cfg)
222
    self.hmclass = HooksMaster
223

    
224
  def _ReportLocks(self, level, names, shared, timeout, acquired, result):
225
    """Reports lock operations.
226

227
    @type level: int
228
    @param level: Lock level
229
    @type names: list or string
230
    @param names: Lock names
231
    @type shared: bool
232
    @param shared: Whether the locks should be acquired in shared mode
233
    @type timeout: None or float
234
    @param timeout: Timeout for acquiring the locks
235
    @type acquired: bool
236
    @param acquired: Whether the locks have already been acquired
237
    @type result: None or set
238
    @param result: Result from L{locking.GanetiLockManager.acquire}
239

240
    """
241
    parts = []
242

    
243
    # Build message
244
    if acquired:
245
      if result is None:
246
        parts.append("timeout")
247
      else:
248
        parts.append("acquired")
249
    else:
250
      parts.append("waiting")
251
      if timeout is None:
252
        parts.append("blocking")
253
      else:
254
        parts.append("timeout=%0.6fs" % timeout)
255

    
256
    parts.append(locking.LEVEL_NAMES[level])
257

    
258
    if names == locking.ALL_SET:
259
      parts.append("ALL")
260
    elif isinstance(names, basestring):
261
      parts.append(names)
262
    else:
263
      parts.append(",".join(names))
264

    
265
    if shared:
266
      parts.append("shared")
267
    else:
268
      parts.append("exclusive")
269

    
270
    msg = "/".join(parts)
271

    
272
    logging.debug("LU locks %s", msg)
273

    
274
    if self._cbs:
275
      self._cbs.ReportLocks(msg)
276

    
277
  def _AcquireLocks(self, level, names, shared, timeout):
278
    """Acquires locks via the Ganeti lock manager.
279

280
    @type level: int
281
    @param level: Lock level
282
    @type names: list or string
283
    @param names: Lock names
284
    @type shared: bool
285
    @param shared: Whether the locks should be acquired in shared mode
286
    @type timeout: None or float
287
    @param timeout: Timeout for acquiring the locks
288

289
    """
290
    self._ReportLocks(level, names, shared, timeout, False, None)
291

    
292
    acquired = self.context.glm.acquire(level, names, shared=shared,
293
                                        timeout=timeout)
294

    
295
    self._ReportLocks(level, names, shared, timeout, True, acquired)
296

    
297
    return acquired
298

    
299
  def _ExecLU(self, lu):
300
    """Logical Unit execution sequence.
301

302
    """
303
    write_count = self.context.cfg.write_count
304
    lu.CheckPrereq()
305
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
306
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
307
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
308
                     self._Feedback, None)
309

    
310
    if getattr(lu.op, "dry_run", False):
311
      # in this mode, no post-hooks are run, and the config is not
312
      # written (as it might have been modified by another LU, and we
313
      # shouldn't do writeout on behalf of other threads
314
      self.LogInfo("dry-run mode requested, not actually executing"
315
                   " the operation")
316
      return lu.dry_run_result
317

    
318
    try:
319
      result = lu.Exec(self._Feedback)
320
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
321
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
322
                                self._Feedback, result)
323
    finally:
324
      # FIXME: This needs locks if not lu_class.REQ_BGL
325
      if write_count != self.context.cfg.write_count:
326
        hm.RunConfigUpdate()
327

    
328
    return result
329

    
330
  def _LockAndExecLU(self, lu, level, calc_timeout):
331
    """Execute a Logical Unit, with the needed locks.
332

333
    This is a recursive function that starts locking the given level, and
334
    proceeds up, till there are no more locks to acquire. Then it executes the
335
    given LU and its opcodes.
336

337
    """
338
    adding_locks = level in lu.add_locks
339
    acquiring_locks = level in lu.needed_locks
340
    if level not in locking.LEVELS:
341
      if self._cbs:
342
        self._cbs.NotifyStart()
343

    
344
      result = self._ExecLU(lu)
345

    
346
    elif adding_locks and acquiring_locks:
347
      # We could both acquire and add locks at the same level, but for now we
348
      # don't need this, so we'll avoid the complicated code needed.
349
      raise NotImplementedError("Can't declare locks to acquire when adding"
350
                                " others")
351

    
352
    elif adding_locks or acquiring_locks:
353
      lu.DeclareLocks(level)
354
      share = lu.share_locks[level]
355

    
356
      try:
357
        assert adding_locks ^ acquiring_locks, \
358
          "Locks must be either added or acquired"
359

    
360
        if acquiring_locks:
361
          # Acquiring locks
362
          needed_locks = lu.needed_locks[level]
363

    
364
          acquired = self._AcquireLocks(level, needed_locks, share,
365
                                        calc_timeout())
366

    
367
          if acquired is None:
368
            raise _LockAcquireTimeout()
369

    
370
          lu.acquired_locks[level] = acquired
371

    
372
        else:
373
          # Adding locks
374
          add_locks = lu.add_locks[level]
375
          lu.remove_locks[level] = add_locks
376

    
377
          try:
378
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
379
          except errors.LockError:
380
            raise errors.OpPrereqError(
381
              "Couldn't add locks (%s), probably because of a race condition"
382
              " with another job, who added them first" % add_locks)
383

    
384
          lu.acquired_locks[level] = add_locks
385
        try:
386
          result = self._LockAndExecLU(lu, level + 1, calc_timeout)
387
        finally:
388
          if level in lu.remove_locks:
389
            self.context.glm.remove(level, lu.remove_locks[level])
390
      finally:
391
        if self.context.glm.is_owned(level):
392
          self.context.glm.release(level)
393

    
394
    else:
395
      result = self._LockAndExecLU(lu, level + 1, calc_timeout)
396

    
397
    return result
398

    
399
  def ExecOpCode(self, op, cbs):
400
    """Execute an opcode.
401

402
    @type op: an OpCode instance
403
    @param op: the opcode to be executed
404
    @type cbs: L{OpExecCbBase}
405
    @param cbs: Runtime callbacks
406

407
    """
408
    if not isinstance(op, opcodes.OpCode):
409
      raise errors.ProgrammerError("Non-opcode instance passed"
410
                                   " to ExecOpcode")
411

    
412
    self._cbs = cbs
413
    try:
414
      lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
415
      if lu_class is None:
416
        raise errors.OpCodeUnknown("Unknown opcode")
417

    
418
      timeout_strategy = _LockTimeoutStrategy()
419
      calc_timeout = timeout_strategy.CalcRemainingTimeout
420

    
421
      while True:
422
        try:
423
          # Acquire the Big Ganeti Lock exclusively if this LU requires it,
424
          # and in a shared fashion otherwise (to prevent concurrent run with
425
          # an exclusive LU.
426
          if self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
427
                                not lu_class.REQ_BGL, calc_timeout()) is None:
428
            raise _LockAcquireTimeout()
429

    
430
          try:
431
            lu = lu_class(self, op, self.context, self.rpc)
432
            lu.ExpandNames()
433
            assert lu.needed_locks is not None, "needed_locks not set by LU"
434

    
435
            return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout)
436
          finally:
437
            self.context.glm.release(locking.LEVEL_CLUSTER)
438

    
439
        except _LockAcquireTimeout:
440
          # Timeout while waiting for lock, try again
441
          pass
442

    
443
        timeout_strategy.NextAttempt()
444

    
445
    finally:
446
      self._cbs = None
447

    
448
  def _Feedback(self, *args):
449
    """Forward call to feedback callback function.
450

451
    """
452
    if self._cbs:
453
      self._cbs.Feedback(*args)
454

    
455
  def LogStep(self, current, total, message):
456
    """Log a change in LU execution progress.
457

458
    """
459
    logging.debug("Step %d/%d %s", current, total, message)
460
    self._Feedback("STEP %d/%d %s" % (current, total, message))
461

    
462
  def LogWarning(self, message, *args, **kwargs):
463
    """Log a warning to the logs and the user.
464

465
    The optional keyword argument is 'hint' and can be used to show a
466
    hint to the user (presumably related to the warning). If the
467
    message is empty, it will not be printed at all, allowing one to
468
    show only a hint.
469

470
    """
471
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
472
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
473
    if args:
474
      message = message % tuple(args)
475
    if message:
476
      logging.warning(message)
477
      self._Feedback(" - WARNING: %s" % message)
478
    if "hint" in kwargs:
479
      self._Feedback("      Hint: %s" % kwargs["hint"])
480

    
481
  def LogInfo(self, message, *args):
482
    """Log an informational message to the logs and the user.
483

484
    """
485
    if args:
486
      message = message % tuple(args)
487
    logging.info(message)
488
    self._Feedback(" - INFO: %s" % message)
489

    
490

    
491
class HooksMaster(object):
492
  """Hooks master.
493

494
  This class distributes the run commands to the nodes based on the
495
  specific LU class.
496

497
  In order to remove the direct dependency on the rpc module, the
498
  constructor needs a function which actually does the remote
499
  call. This will usually be rpc.call_hooks_runner, but any function
500
  which behaves the same works.
501

502
  """
503
  def __init__(self, callfn, lu):
504
    self.callfn = callfn
505
    self.lu = lu
506
    self.op = lu.op
507
    self.env, node_list_pre, node_list_post = self._BuildEnv()
508
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
509
                      constants.HOOKS_PHASE_POST: node_list_post}
510

    
511
  def _BuildEnv(self):
512
    """Compute the environment and the target nodes.
513

514
    Based on the opcode and the current node list, this builds the
515
    environment for the hooks and the target node list for the run.
516

517
    """
518
    env = {
519
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
520
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
521
      "GANETI_OP_CODE": self.op.OP_ID,
522
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
523
      "GANETI_DATA_DIR": constants.DATA_DIR,
524
      }
525

    
526
    if self.lu.HPATH is not None:
527
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
528
      if lu_env:
529
        for key in lu_env:
530
          env["GANETI_" + key] = lu_env[key]
531
    else:
532
      lu_nodes_pre = lu_nodes_post = []
533

    
534
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
535

    
536
  def _RunWrapper(self, node_list, hpath, phase):
537
    """Simple wrapper over self.callfn.
538

539
    This method fixes the environment before doing the rpc call.
540

541
    """
542
    env = self.env.copy()
543
    env["GANETI_HOOKS_PHASE"] = phase
544
    env["GANETI_HOOKS_PATH"] = hpath
545
    if self.lu.cfg is not None:
546
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
547
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
548

    
549
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
550

    
551
    return self.callfn(node_list, hpath, phase, env)
552

    
553
  def RunPhase(self, phase, nodes=None):
554
    """Run all the scripts for a phase.
555

556
    This is the main function of the HookMaster.
557

558
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
559
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
560
    @param nodes: overrides the predefined list of nodes for the given phase
561
    @return: the processed results of the hooks multi-node rpc call
562
    @raise errors.HooksFailure: on communication failure to the nodes
563
    @raise errors.HooksAbort: on failure of one of the hooks
564

565
    """
566
    if not self.node_list[phase] and not nodes:
567
      # empty node list, we should not attempt to run this as either
568
      # we're in the cluster init phase and the rpc client part can't
569
      # even attempt to run, or this LU doesn't do hooks at all
570
      return
571
    hpath = self.lu.HPATH
572
    if nodes is not None:
573
      results = self._RunWrapper(nodes, hpath, phase)
574
    else:
575
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
576
    errs = []
577
    if not results:
578
      msg = "Communication Failure"
579
      if phase == constants.HOOKS_PHASE_PRE:
580
        raise errors.HooksFailure(msg)
581
      else:
582
        self.lu.LogWarning(msg)
583
        return results
584
    for node_name in results:
585
      res = results[node_name]
586
      if res.offline:
587
        continue
588
      msg = res.fail_msg
589
      if msg:
590
        self.lu.LogWarning("Communication failure to node %s: %s",
591
                           node_name, msg)
592
        continue
593
      for script, hkr, output in res.payload:
594
        if hkr == constants.HKR_FAIL:
595
          if phase == constants.HOOKS_PHASE_PRE:
596
            errs.append((node_name, script, output))
597
          else:
598
            if not output:
599
              output = "(no output)"
600
            self.lu.LogWarning("On %s script %s failed, output: %s" %
601
                               (node_name, script, output))
602
    if errs and phase == constants.HOOKS_PHASE_PRE:
603
      raise errors.HooksAbort(errs)
604
    return results
605

    
606
  def RunConfigUpdate(self):
607
    """Run the special configuration update hook
608

609
    This is a special hook that runs only on the master after each
610
    top-level LI if the configuration has been updated.
611

612
    """
613
    phase = constants.HOOKS_PHASE_POST
614
    hpath = constants.HOOKS_NAME_CFGUPDATE
615
    nodes = [self.lu.cfg.GetMasterNode()]
616
    self._RunWrapper(nodes, hpath, phase)