Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ 7260cfbe

History | View | Annotate | Download (20.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32
import random
33
import time
34

    
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import rpc
39
from ganeti import cmdlib
40
from ganeti import locking
41

    
42

    
43
class _LockAcquireTimeout(Exception):
44
  """Internal exception to report timeouts on acquiring locks.
45

46
  """
47

    
48

    
49
def _CalculateLockAttemptTimeouts():
50
  """Calculate timeouts for lock attempts.
51

52
  """
53
  result = [1.0]
54

    
55
  # Wait for a total of at least 150s before doing a blocking acquire
56
  while sum(result) < 150.0:
57
    timeout = (result[-1] * 1.05) ** 1.25
58

    
59
    # Cap timeout at 10 seconds. This gives other jobs a chance to run
60
    # even if we're still trying to get our locks, before finally moving
61
    # to a blocking acquire.
62
    if timeout > 10.0:
63
      timeout = 10.0
64

    
65
    elif timeout < 0.1:
66
      # Lower boundary for safety
67
      timeout = 0.1
68

    
69
    result.append(timeout)
70

    
71
  return result
72

    
73

    
74
class _LockAttemptTimeoutStrategy(object):
75
  """Class with lock acquire timeout strategy.
76

77
  """
78
  __slots__ = [
79
    "_attempt",
80
    "_random_fn",
81
    "_start_time",
82
    "_time_fn",
83
    "_running_timeout",
84
    ]
85

    
86
  _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
87

    
88
  def __init__(self, attempt=0, _time_fn=time.time, _random_fn=random.random):
89
    """Initializes this class.
90

91
    @type attempt: int
92
    @param attempt: Current attempt number
93
    @param _time_fn: Time function for unittests
94
    @param _random_fn: Random number generator for unittests
95

96
    """
97
    object.__init__(self)
98

    
99
    if attempt < 0:
100
      raise ValueError("Attempt must be zero or positive")
101

    
102
    self._attempt = attempt
103
    self._time_fn = _time_fn
104
    self._random_fn = _random_fn
105

    
106
    try:
107
      timeout = self._TIMEOUT_PER_ATTEMPT[attempt]
108
    except IndexError:
109
      # No more timeouts, do blocking acquire
110
      timeout = None
111

    
112
    self._running_timeout = locking.RunningTimeout(timeout, False,
113
                                                   _time_fn=_time_fn)
114

    
115
  def NextAttempt(self):
116
    """Returns the strategy for the next attempt.
117

118
    """
119
    return _LockAttemptTimeoutStrategy(attempt=self._attempt + 1,
120
                                       _time_fn=self._time_fn,
121
                                       _random_fn=self._random_fn)
122

    
123
  def CalcRemainingTimeout(self):
124
    """Returns the remaining timeout.
125

126
    """
127
    timeout = self._running_timeout.Remaining()
128

    
129
    if timeout is not None:
130
      # Add a small variation (-/+ 5%) to timeout. This helps in situations
131
      # where two or more jobs are fighting for the same lock(s).
132
      variation_range = timeout * 0.1
133
      timeout += ((self._random_fn() * variation_range) -
134
                  (variation_range * 0.5))
135

    
136
    return timeout
137

    
138

    
139
class OpExecCbBase: # pylint: disable-msg=W0232
140
  """Base class for OpCode execution callbacks.
141

142
  """
143
  def NotifyStart(self):
144
    """Called when we are about to execute the LU.
145

146
    This function is called when we're about to start the lu's Exec() method,
147
    that is, after we have acquired all locks.
148

149
    """
150

    
151
  def Feedback(self, *args):
152
    """Sends feedback from the LU code to the end-user.
153

154
    """
155

    
156
  def ReportLocks(self, msg):
157
    """Report lock operations.
158

159
    """
160

    
161

    
162
class Processor(object):
163
  """Object which runs OpCodes"""
164
  DISPATCH_TABLE = {
165
    # Cluster
166
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
167
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
168
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
169
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
170
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
171
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
172
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
173
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
174
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
175
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
176
    # node lu
177
    opcodes.OpAddNode: cmdlib.LUAddNode,
178
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
179
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
180
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
181
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
182
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
183
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
184
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
185
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
186
    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
187
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
188
    # instance lu
189
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
190
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
191
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
192
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
193
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
194
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
195
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
196
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
197
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
198
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
199
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
200
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
201
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
202
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
203
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
204
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
205
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
206
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
207
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
208
    # os lu
209
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
210
    # exports lu
211
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
212
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
213
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
214
    # tags lu
215
    opcodes.OpGetTags: cmdlib.LUGetTags,
216
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
217
    opcodes.OpAddTags: cmdlib.LUAddTags,
218
    opcodes.OpDelTags: cmdlib.LUDelTags,
219
    # test lu
220
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
221
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
222
    }
223

    
224
  def __init__(self, context, ec_id):
225
    """Constructor for Processor
226

227
    @type context: GanetiContext
228
    @param context: global Ganeti context
229
    @type ec_id: string
230
    @param ec_id: execution context identifier
231

232
    """
233
    self.context = context
234
    self._ec_id = ec_id
235
    self._cbs = None
236
    self.rpc = rpc.RpcRunner(context.cfg)
237
    self.hmclass = HooksMaster
238

    
239
  def _ReportLocks(self, level, names, shared, timeout, acquired, result):
240
    """Reports lock operations.
241

242
    @type level: int
243
    @param level: Lock level
244
    @type names: list or string
245
    @param names: Lock names
246
    @type shared: bool
247
    @param shared: Whether the locks should be acquired in shared mode
248
    @type timeout: None or float
249
    @param timeout: Timeout for acquiring the locks
250
    @type acquired: bool
251
    @param acquired: Whether the locks have already been acquired
252
    @type result: None or set
253
    @param result: Result from L{locking.GanetiLockManager.acquire}
254

255
    """
256
    parts = []
257

    
258
    # Build message
259
    if acquired:
260
      if result is None:
261
        parts.append("timeout")
262
      else:
263
        parts.append("acquired")
264
    else:
265
      parts.append("waiting")
266
      if timeout is None:
267
        parts.append("blocking")
268
      else:
269
        parts.append("timeout=%0.6fs" % timeout)
270

    
271
    parts.append(locking.LEVEL_NAMES[level])
272

    
273
    if names == locking.ALL_SET:
274
      parts.append("ALL")
275
    elif isinstance(names, basestring):
276
      parts.append(names)
277
    else:
278
      parts.append(",".join(names))
279

    
280
    if shared:
281
      parts.append("shared")
282
    else:
283
      parts.append("exclusive")
284

    
285
    msg = "/".join(parts)
286

    
287
    logging.debug("LU locks %s", msg)
288

    
289
    if self._cbs:
290
      self._cbs.ReportLocks(msg)
291

    
292
  def _AcquireLocks(self, level, names, shared, timeout):
293
    """Acquires locks via the Ganeti lock manager.
294

295
    @type level: int
296
    @param level: Lock level
297
    @type names: list or string
298
    @param names: Lock names
299
    @type shared: bool
300
    @param shared: Whether the locks should be acquired in shared mode
301
    @type timeout: None or float
302
    @param timeout: Timeout for acquiring the locks
303

304
    """
305
    self._ReportLocks(level, names, shared, timeout, False, None)
306

    
307
    acquired = self.context.glm.acquire(level, names, shared=shared,
308
                                        timeout=timeout)
309

    
310
    self._ReportLocks(level, names, shared, timeout, True, acquired)
311

    
312
    return acquired
313

    
314
  def _ExecLU(self, lu):
315
    """Logical Unit execution sequence.
316

317
    """
318
    write_count = self.context.cfg.write_count
319
    lu.CheckPrereq()
320
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
321
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
322
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
323
                     self._Feedback, None)
324

    
325
    if getattr(lu.op, "dry_run", False):
326
      # in this mode, no post-hooks are run, and the config is not
327
      # written (as it might have been modified by another LU, and we
328
      # shouldn't do writeout on behalf of other threads
329
      self.LogInfo("dry-run mode requested, not actually executing"
330
                   " the operation")
331
      return lu.dry_run_result
332

    
333
    try:
334
      result = lu.Exec(self._Feedback)
335
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
336
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
337
                                self._Feedback, result)
338
    finally:
339
      # FIXME: This needs locks if not lu_class.REQ_BGL
340
      if write_count != self.context.cfg.write_count:
341
        hm.RunConfigUpdate()
342

    
343
    return result
344

    
345
  def _LockAndExecLU(self, lu, level, calc_timeout):
346
    """Execute a Logical Unit, with the needed locks.
347

348
    This is a recursive function that starts locking the given level, and
349
    proceeds up, till there are no more locks to acquire. Then it executes the
350
    given LU and its opcodes.
351

352
    """
353
    adding_locks = level in lu.add_locks
354
    acquiring_locks = level in lu.needed_locks
355
    if level not in locking.LEVELS:
356
      if self._cbs:
357
        self._cbs.NotifyStart()
358

    
359
      result = self._ExecLU(lu)
360

    
361
    elif adding_locks and acquiring_locks:
362
      # We could both acquire and add locks at the same level, but for now we
363
      # don't need this, so we'll avoid the complicated code needed.
364
      raise NotImplementedError("Can't declare locks to acquire when adding"
365
                                " others")
366

    
367
    elif adding_locks or acquiring_locks:
368
      lu.DeclareLocks(level)
369
      share = lu.share_locks[level]
370

    
371
      try:
372
        assert adding_locks ^ acquiring_locks, \
373
          "Locks must be either added or acquired"
374

    
375
        if acquiring_locks:
376
          # Acquiring locks
377
          needed_locks = lu.needed_locks[level]
378

    
379
          acquired = self._AcquireLocks(level, needed_locks, share,
380
                                        calc_timeout())
381

    
382
          if acquired is None:
383
            raise _LockAcquireTimeout()
384

    
385
        else:
386
          # Adding locks
387
          add_locks = lu.add_locks[level]
388
          lu.remove_locks[level] = add_locks
389

    
390
          try:
391
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
392
          except errors.LockError:
393
            raise errors.OpPrereqError(
394
              "Couldn't add locks (%s), probably because of a race condition"
395
              " with another job, who added them first" % add_locks,
396
              errors.ECODE_FAULT)
397

    
398
          acquired = add_locks
399

    
400
        try:
401
          lu.acquired_locks[level] = acquired
402

    
403
          result = self._LockAndExecLU(lu, level + 1, calc_timeout)
404
        finally:
405
          if level in lu.remove_locks:
406
            self.context.glm.remove(level, lu.remove_locks[level])
407
      finally:
408
        if self.context.glm.is_owned(level):
409
          self.context.glm.release(level)
410

    
411
    else:
412
      result = self._LockAndExecLU(lu, level + 1, calc_timeout)
413

    
414
    return result
415

    
416
  def ExecOpCode(self, op, cbs):
417
    """Execute an opcode.
418

419
    @type op: an OpCode instance
420
    @param op: the opcode to be executed
421
    @type cbs: L{OpExecCbBase}
422
    @param cbs: Runtime callbacks
423

424
    """
425
    if not isinstance(op, opcodes.OpCode):
426
      raise errors.ProgrammerError("Non-opcode instance passed"
427
                                   " to ExecOpcode")
428

    
429
    self._cbs = cbs
430
    try:
431
      lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
432
      if lu_class is None:
433
        raise errors.OpCodeUnknown("Unknown opcode")
434

    
435
      timeout_strategy = _LockAttemptTimeoutStrategy()
436

    
437
      while True:
438
        try:
439
          acquire_timeout = timeout_strategy.CalcRemainingTimeout()
440

    
441
          # Acquire the Big Ganeti Lock exclusively if this LU requires it,
442
          # and in a shared fashion otherwise (to prevent concurrent run with
443
          # an exclusive LU.
444
          if self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
445
                                not lu_class.REQ_BGL, acquire_timeout) is None:
446
            raise _LockAcquireTimeout()
447

    
448
          try:
449
            lu = lu_class(self, op, self.context, self.rpc)
450
            lu.ExpandNames()
451
            assert lu.needed_locks is not None, "needed_locks not set by LU"
452

    
453
            try:
454
              return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE,
455
                                         timeout_strategy.CalcRemainingTimeout)
456
            finally:
457
              if self._ec_id:
458
                self.context.cfg.DropECReservations(self._ec_id)
459

    
460
          finally:
461
            self.context.glm.release(locking.LEVEL_CLUSTER)
462

    
463
        except _LockAcquireTimeout:
464
          # Timeout while waiting for lock, try again
465
          pass
466

    
467
        timeout_strategy = timeout_strategy.NextAttempt()
468

    
469
    finally:
470
      self._cbs = None
471

    
472
  def _Feedback(self, *args):
473
    """Forward call to feedback callback function.
474

475
    """
476
    if self._cbs:
477
      self._cbs.Feedback(*args)
478

    
479
  def LogStep(self, current, total, message):
480
    """Log a change in LU execution progress.
481

482
    """
483
    logging.debug("Step %d/%d %s", current, total, message)
484
    self._Feedback("STEP %d/%d %s" % (current, total, message))
485

    
486
  def LogWarning(self, message, *args, **kwargs):
487
    """Log a warning to the logs and the user.
488

489
    The optional keyword argument is 'hint' and can be used to show a
490
    hint to the user (presumably related to the warning). If the
491
    message is empty, it will not be printed at all, allowing one to
492
    show only a hint.
493

494
    """
495
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
496
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
497
    if args:
498
      message = message % tuple(args)
499
    if message:
500
      logging.warning(message)
501
      self._Feedback(" - WARNING: %s" % message)
502
    if "hint" in kwargs:
503
      self._Feedback("      Hint: %s" % kwargs["hint"])
504

    
505
  def LogInfo(self, message, *args):
506
    """Log an informational message to the logs and the user.
507

508
    """
509
    if args:
510
      message = message % tuple(args)
511
    logging.info(message)
512
    self._Feedback(" - INFO: %s" % message)
513

    
514
  def GetECId(self):
515
    if not self._ec_id:
516
      errors.ProgrammerError("Tried to use execution context id when not set")
517
    return self._ec_id
518

    
519

    
520
class HooksMaster(object):
521
  """Hooks master.
522

523
  This class distributes the run commands to the nodes based on the
524
  specific LU class.
525

526
  In order to remove the direct dependency on the rpc module, the
527
  constructor needs a function which actually does the remote
528
  call. This will usually be rpc.call_hooks_runner, but any function
529
  which behaves the same works.
530

531
  """
532
  def __init__(self, callfn, lu):
533
    self.callfn = callfn
534
    self.lu = lu
535
    self.op = lu.op
536
    self.env, node_list_pre, node_list_post = self._BuildEnv()
537
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
538
                      constants.HOOKS_PHASE_POST: node_list_post}
539

    
540
  def _BuildEnv(self):
541
    """Compute the environment and the target nodes.
542

543
    Based on the opcode and the current node list, this builds the
544
    environment for the hooks and the target node list for the run.
545

546
    """
547
    env = {
548
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
549
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
550
      "GANETI_OP_CODE": self.op.OP_ID,
551
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
552
      "GANETI_DATA_DIR": constants.DATA_DIR,
553
      }
554

    
555
    if self.lu.HPATH is not None:
556
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
557
      if lu_env:
558
        for key in lu_env:
559
          env["GANETI_" + key] = lu_env[key]
560
    else:
561
      lu_nodes_pre = lu_nodes_post = []
562

    
563
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
564

    
565
  def _RunWrapper(self, node_list, hpath, phase):
566
    """Simple wrapper over self.callfn.
567

568
    This method fixes the environment before doing the rpc call.
569

570
    """
571
    env = self.env.copy()
572
    env["GANETI_HOOKS_PHASE"] = phase
573
    env["GANETI_HOOKS_PATH"] = hpath
574
    if self.lu.cfg is not None:
575
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
576
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
577

    
578
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
579

    
580
    return self.callfn(node_list, hpath, phase, env)
581

    
582
  def RunPhase(self, phase, nodes=None):
583
    """Run all the scripts for a phase.
584

585
    This is the main function of the HookMaster.
586

587
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
588
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
589
    @param nodes: overrides the predefined list of nodes for the given phase
590
    @return: the processed results of the hooks multi-node rpc call
591
    @raise errors.HooksFailure: on communication failure to the nodes
592
    @raise errors.HooksAbort: on failure of one of the hooks
593

594
    """
595
    if not self.node_list[phase] and not nodes:
596
      # empty node list, we should not attempt to run this as either
597
      # we're in the cluster init phase and the rpc client part can't
598
      # even attempt to run, or this LU doesn't do hooks at all
599
      return
600
    hpath = self.lu.HPATH
601
    if nodes is not None:
602
      results = self._RunWrapper(nodes, hpath, phase)
603
    else:
604
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
605
    errs = []
606
    if not results:
607
      msg = "Communication Failure"
608
      if phase == constants.HOOKS_PHASE_PRE:
609
        raise errors.HooksFailure(msg)
610
      else:
611
        self.lu.LogWarning(msg)
612
        return results
613
    for node_name in results:
614
      res = results[node_name]
615
      if res.offline:
616
        continue
617
      msg = res.fail_msg
618
      if msg:
619
        self.lu.LogWarning("Communication failure to node %s: %s",
620
                           node_name, msg)
621
        continue
622
      for script, hkr, output in res.payload:
623
        if hkr == constants.HKR_FAIL:
624
          if phase == constants.HOOKS_PHASE_PRE:
625
            errs.append((node_name, script, output))
626
          else:
627
            if not output:
628
              output = "(no output)"
629
            self.lu.LogWarning("On %s script %s failed, output: %s" %
630
                               (node_name, script, output))
631
    if errs and phase == constants.HOOKS_PHASE_PRE:
632
      raise errors.HooksAbort(errs)
633
    return results
634

    
635
  def RunConfigUpdate(self):
636
    """Run the special configuration update hook
637

638
    This is a special hook that runs only on the master after each
639
    top-level LI if the configuration has been updated.
640

641
    """
642
    phase = constants.HOOKS_PHASE_POST
643
    hpath = constants.HOOKS_NAME_CFGUPDATE
644
    nodes = [self.lu.cfg.GetMasterNode()]
645
    self._RunWrapper(nodes, hpath, phase)