Statistics
| Branch: | Tag: | Revision:

root / lib / mcpu.py @ 792af3ad

History | View | Annotate | Download (18.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the logic behind the cluster operations
23

24
This module implements the logic for doing operations in the cluster. There
25
are two kinds of classes defined:
26
  - logical units, which know how to deal with their specific opcode only
27
  - the processor, which dispatches the opcodes to their logical units
28

29
"""
30

    
31
import logging
32
import random
33
import time
34

    
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import rpc
39
from ganeti import cmdlib
40
from ganeti import locking
41
from ganeti import utils
42

    
43

    
44
class LockAcquireTimeout(Exception):
45
  """Exception to report timeouts on acquiring locks.
46

47
  """
48

    
49

    
50
def _CalculateLockAttemptTimeouts():
51
  """Calculate timeouts for lock attempts.
52

53
  """
54
  result = [1.0]
55

    
56
  # Wait for a total of at least 150s before doing a blocking acquire
57
  while sum(result) < 150.0:
58
    timeout = (result[-1] * 1.05) ** 1.25
59

    
60
    # Cap timeout at 10 seconds. This gives other jobs a chance to run
61
    # even if we're still trying to get our locks, before finally moving
62
    # to a blocking acquire.
63
    if timeout > 10.0:
64
      timeout = 10.0
65

    
66
    elif timeout < 0.1:
67
      # Lower boundary for safety
68
      timeout = 0.1
69

    
70
    result.append(timeout)
71

    
72
  return result
73

    
74

    
75
class LockAttemptTimeoutStrategy(object):
76
  """Class with lock acquire timeout strategy.
77

78
  """
79
  __slots__ = [
80
    "_timeouts",
81
    "_random_fn",
82
    "_time_fn",
83
    ]
84

    
85
  _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
86

    
87
  def __init__(self, _time_fn=time.time, _random_fn=random.random):
88
    """Initializes this class.
89

90
    @param _time_fn: Time function for unittests
91
    @param _random_fn: Random number generator for unittests
92

93
    """
94
    object.__init__(self)
95

    
96
    self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT)
97
    self._time_fn = _time_fn
98
    self._random_fn = _random_fn
99

    
100
  def NextAttempt(self):
101
    """Returns the timeout for the next attempt.
102

103
    """
104
    try:
105
      timeout = self._timeouts.next()
106
    except StopIteration:
107
      # No more timeouts, do blocking acquire
108
      timeout = None
109

    
110
    if timeout is not None:
111
      # Add a small variation (-/+ 5%) to timeout. This helps in situations
112
      # where two or more jobs are fighting for the same lock(s).
113
      variation_range = timeout * 0.1
114
      timeout += ((self._random_fn() * variation_range) -
115
                  (variation_range * 0.5))
116

    
117
    return timeout
118

    
119

    
120
class OpExecCbBase: # pylint: disable-msg=W0232
121
  """Base class for OpCode execution callbacks.
122

123
  """
124
  def NotifyStart(self):
125
    """Called when we are about to execute the LU.
126

127
    This function is called when we're about to start the lu's Exec() method,
128
    that is, after we have acquired all locks.
129

130
    """
131

    
132
  def Feedback(self, *args):
133
    """Sends feedback from the LU code to the end-user.
134

135
    """
136

    
137
  def CheckCancel(self):
138
    """Check whether job has been cancelled.
139

140
    """
141

    
142

    
143
class Processor(object):
144
  """Object which runs OpCodes"""
145
  DISPATCH_TABLE = {
146
    # Cluster
147
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
148
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
149
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
150
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
151
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
152
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
153
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
154
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
155
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
156
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
157
    opcodes.OpQuery: cmdlib.LUQuery,
158
    opcodes.OpQueryFields: cmdlib.LUQueryFields,
159
    # node lu
160
    opcodes.OpAddNode: cmdlib.LUAddNode,
161
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
162
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
163
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
164
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
165
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
166
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
167
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
168
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
169
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
170
    opcodes.OpNodeEvacuationStrategy: cmdlib.LUNodeEvacuationStrategy,
171
    # instance lu
172
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
173
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
174
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
175
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
176
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
177
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
178
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
179
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
180
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
181
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
182
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
183
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
184
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
185
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
186
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
187
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
188
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
189
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
190
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
191
    # node group lu
192
    opcodes.OpAddGroup: cmdlib.LUAddGroup,
193
    opcodes.OpQueryGroups: cmdlib.LUQueryGroups,
194
    opcodes.OpSetGroupParams: cmdlib.LUSetGroupParams,
195
    opcodes.OpRemoveGroup: cmdlib.LURemoveGroup,
196
    opcodes.OpRenameGroup: cmdlib.LURenameGroup,
197
    # os lu
198
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
199
    # exports lu
200
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
201
    opcodes.OpPrepareExport: cmdlib.LUPrepareExport,
202
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
203
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
204
    # tags lu
205
    opcodes.OpGetTags: cmdlib.LUGetTags,
206
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
207
    opcodes.OpAddTags: cmdlib.LUAddTags,
208
    opcodes.OpDelTags: cmdlib.LUDelTags,
209
    # test lu
210
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
211
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
212
    opcodes.OpTestJobqueue: cmdlib.LUTestJobqueue,
213
    # OOB lu
214
    opcodes.OpOobCommand: cmdlib.LUOobCommand,
215
    }
216

    
217
  def __init__(self, context, ec_id):
218
    """Constructor for Processor
219

220
    @type context: GanetiContext
221
    @param context: global Ganeti context
222
    @type ec_id: string
223
    @param ec_id: execution context identifier
224

225
    """
226
    self.context = context
227
    self._ec_id = ec_id
228
    self._cbs = None
229
    self.rpc = rpc.RpcRunner(context.cfg)
230
    self.hmclass = HooksMaster
231

    
232
  def _AcquireLocks(self, level, names, shared, timeout, priority):
233
    """Acquires locks via the Ganeti lock manager.
234

235
    @type level: int
236
    @param level: Lock level
237
    @type names: list or string
238
    @param names: Lock names
239
    @type shared: bool
240
    @param shared: Whether the locks should be acquired in shared mode
241
    @type timeout: None or float
242
    @param timeout: Timeout for acquiring the locks
243
    @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
244
        amount of time
245

246
    """
247
    if self._cbs:
248
      self._cbs.CheckCancel()
249

    
250
    acquired = self.context.glm.acquire(level, names, shared=shared,
251
                                        timeout=timeout, priority=priority)
252

    
253
    if acquired is None:
254
      raise LockAcquireTimeout()
255

    
256
    return acquired
257

    
258
  def _ExecLU(self, lu):
259
    """Logical Unit execution sequence.
260

261
    """
262
    write_count = self.context.cfg.write_count
263
    lu.CheckPrereq()
264
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
265
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
266
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
267
                     self.Log, None)
268

    
269
    if getattr(lu.op, "dry_run", False):
270
      # in this mode, no post-hooks are run, and the config is not
271
      # written (as it might have been modified by another LU, and we
272
      # shouldn't do writeout on behalf of other threads
273
      self.LogInfo("dry-run mode requested, not actually executing"
274
                   " the operation")
275
      return lu.dry_run_result
276

    
277
    try:
278
      result = lu.Exec(self.Log)
279
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
280
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
281
                                self.Log, result)
282
    finally:
283
      # FIXME: This needs locks if not lu_class.REQ_BGL
284
      if write_count != self.context.cfg.write_count:
285
        hm.RunConfigUpdate()
286

    
287
    return result
288

    
289
  def _LockAndExecLU(self, lu, level, calc_timeout, priority):
290
    """Execute a Logical Unit, with the needed locks.
291

292
    This is a recursive function that starts locking the given level, and
293
    proceeds up, till there are no more locks to acquire. Then it executes the
294
    given LU and its opcodes.
295

296
    """
297
    adding_locks = level in lu.add_locks
298
    acquiring_locks = level in lu.needed_locks
299
    if level not in locking.LEVELS:
300
      if self._cbs:
301
        self._cbs.NotifyStart()
302

    
303
      result = self._ExecLU(lu)
304

    
305
    elif adding_locks and acquiring_locks:
306
      # We could both acquire and add locks at the same level, but for now we
307
      # don't need this, so we'll avoid the complicated code needed.
308
      raise NotImplementedError("Can't declare locks to acquire when adding"
309
                                " others")
310

    
311
    elif adding_locks or acquiring_locks:
312
      lu.DeclareLocks(level)
313
      share = lu.share_locks[level]
314

    
315
      try:
316
        assert adding_locks ^ acquiring_locks, \
317
          "Locks must be either added or acquired"
318

    
319
        if acquiring_locks:
320
          # Acquiring locks
321
          needed_locks = lu.needed_locks[level]
322

    
323
          acquired = self._AcquireLocks(level, needed_locks, share,
324
                                        calc_timeout(), priority)
325
        else:
326
          # Adding locks
327
          add_locks = lu.add_locks[level]
328
          lu.remove_locks[level] = add_locks
329

    
330
          try:
331
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
332
          except errors.LockError:
333
            raise errors.OpPrereqError(
334
              "Couldn't add locks (%s), probably because of a race condition"
335
              " with another job, who added them first" % add_locks,
336
              errors.ECODE_FAULT)
337

    
338
          acquired = add_locks
339

    
340
        try:
341
          lu.acquired_locks[level] = acquired
342

    
343
          result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
344
        finally:
345
          if level in lu.remove_locks:
346
            self.context.glm.remove(level, lu.remove_locks[level])
347
      finally:
348
        if self.context.glm.is_owned(level):
349
          self.context.glm.release(level)
350

    
351
    else:
352
      result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
353

    
354
    return result
355

    
356
  def ExecOpCode(self, op, cbs, timeout=None, priority=None):
357
    """Execute an opcode.
358

359
    @type op: an OpCode instance
360
    @param op: the opcode to be executed
361
    @type cbs: L{OpExecCbBase}
362
    @param cbs: Runtime callbacks
363
    @type timeout: float or None
364
    @param timeout: Maximum time to acquire all locks, None for no timeout
365
    @type priority: number or None
366
    @param priority: Priority for acquiring lock(s)
367
    @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
368
        amount of time
369

370
    """
371
    if not isinstance(op, opcodes.OpCode):
372
      raise errors.ProgrammerError("Non-opcode instance passed"
373
                                   " to ExecOpcode")
374

    
375
    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
376
    if lu_class is None:
377
      raise errors.OpCodeUnknown("Unknown opcode")
378

    
379
    if timeout is None:
380
      calc_timeout = lambda: None
381
    else:
382
      calc_timeout = utils.RunningTimeout(timeout, False).Remaining
383

    
384
    self._cbs = cbs
385
    try:
386
      # Acquire the Big Ganeti Lock exclusively if this LU requires it,
387
      # and in a shared fashion otherwise (to prevent concurrent run with
388
      # an exclusive LU.
389
      self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
390
                          not lu_class.REQ_BGL, calc_timeout(),
391
                          priority)
392
      try:
393
        lu = lu_class(self, op, self.context, self.rpc)
394
        lu.ExpandNames()
395
        assert lu.needed_locks is not None, "needed_locks not set by LU"
396

    
397
        try:
398
          return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout,
399
                                     priority)
400
        finally:
401
          if self._ec_id:
402
            self.context.cfg.DropECReservations(self._ec_id)
403
      finally:
404
        self.context.glm.release(locking.LEVEL_CLUSTER)
405
    finally:
406
      self._cbs = None
407

    
408
  def Log(self, *args):
409
    """Forward call to feedback callback function.
410

411
    """
412
    if self._cbs:
413
      self._cbs.Feedback(*args)
414

    
415
  def LogStep(self, current, total, message):
416
    """Log a change in LU execution progress.
417

418
    """
419
    logging.debug("Step %d/%d %s", current, total, message)
420
    self.Log("STEP %d/%d %s" % (current, total, message))
421

    
422
  def LogWarning(self, message, *args, **kwargs):
423
    """Log a warning to the logs and the user.
424

425
    The optional keyword argument is 'hint' and can be used to show a
426
    hint to the user (presumably related to the warning). If the
427
    message is empty, it will not be printed at all, allowing one to
428
    show only a hint.
429

430
    """
431
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
432
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
433
    if args:
434
      message = message % tuple(args)
435
    if message:
436
      logging.warning(message)
437
      self.Log(" - WARNING: %s" % message)
438
    if "hint" in kwargs:
439
      self.Log("      Hint: %s" % kwargs["hint"])
440

    
441
  def LogInfo(self, message, *args):
442
    """Log an informational message to the logs and the user.
443

444
    """
445
    if args:
446
      message = message % tuple(args)
447
    logging.info(message)
448
    self.Log(" - INFO: %s" % message)
449

    
450
  def GetECId(self):
451
    if not self._ec_id:
452
      errors.ProgrammerError("Tried to use execution context id when not set")
453
    return self._ec_id
454

    
455

    
456
class HooksMaster(object):
457
  """Hooks master.
458

459
  This class distributes the run commands to the nodes based on the
460
  specific LU class.
461

462
  In order to remove the direct dependency on the rpc module, the
463
  constructor needs a function which actually does the remote
464
  call. This will usually be rpc.call_hooks_runner, but any function
465
  which behaves the same works.
466

467
  """
468
  def __init__(self, callfn, lu):
469
    self.callfn = callfn
470
    self.lu = lu
471
    self.op = lu.op
472
    self.env, node_list_pre, node_list_post = self._BuildEnv()
473
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
474
                      constants.HOOKS_PHASE_POST: node_list_post}
475

    
476
  def _BuildEnv(self):
477
    """Compute the environment and the target nodes.
478

479
    Based on the opcode and the current node list, this builds the
480
    environment for the hooks and the target node list for the run.
481

482
    """
483
    env = {
484
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
485
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
486
      "GANETI_OP_CODE": self.op.OP_ID,
487
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
488
      "GANETI_DATA_DIR": constants.DATA_DIR,
489
      }
490

    
491
    if self.lu.HPATH is not None:
492
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
493
      if lu_env:
494
        for key in lu_env:
495
          env["GANETI_" + key] = lu_env[key]
496
    else:
497
      lu_nodes_pre = lu_nodes_post = []
498

    
499
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)
500

    
501
  def _RunWrapper(self, node_list, hpath, phase):
502
    """Simple wrapper over self.callfn.
503

504
    This method fixes the environment before doing the rpc call.
505

506
    """
507
    env = self.env.copy()
508
    env["GANETI_HOOKS_PHASE"] = phase
509
    env["GANETI_HOOKS_PATH"] = hpath
510
    if self.lu.cfg is not None:
511
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
512
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
513

    
514
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
515

    
516
    return self.callfn(node_list, hpath, phase, env)
517

    
518
  def RunPhase(self, phase, nodes=None):
519
    """Run all the scripts for a phase.
520

521
    This is the main function of the HookMaster.
522

523
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
524
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
525
    @param nodes: overrides the predefined list of nodes for the given phase
526
    @return: the processed results of the hooks multi-node rpc call
527
    @raise errors.HooksFailure: on communication failure to the nodes
528
    @raise errors.HooksAbort: on failure of one of the hooks
529

530
    """
531
    if not self.node_list[phase] and not nodes:
532
      # empty node list, we should not attempt to run this as either
533
      # we're in the cluster init phase and the rpc client part can't
534
      # even attempt to run, or this LU doesn't do hooks at all
535
      return
536
    hpath = self.lu.HPATH
537
    if nodes is not None:
538
      results = self._RunWrapper(nodes, hpath, phase)
539
    else:
540
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
541
    errs = []
542
    if not results:
543
      msg = "Communication Failure"
544
      if phase == constants.HOOKS_PHASE_PRE:
545
        raise errors.HooksFailure(msg)
546
      else:
547
        self.lu.LogWarning(msg)
548
        return results
549
    for node_name in results:
550
      res = results[node_name]
551
      if res.offline:
552
        continue
553
      msg = res.fail_msg
554
      if msg:
555
        self.lu.LogWarning("Communication failure to node %s: %s",
556
                           node_name, msg)
557
        continue
558
      for script, hkr, output in res.payload:
559
        if hkr == constants.HKR_FAIL:
560
          if phase == constants.HOOKS_PHASE_PRE:
561
            errs.append((node_name, script, output))
562
          else:
563
            if not output:
564
              output = "(no output)"
565
            self.lu.LogWarning("On %s script %s failed, output: %s" %
566
                               (node_name, script, output))
567
    if errs and phase == constants.HOOKS_PHASE_PRE:
568
      raise errors.HooksAbort(errs)
569
    return results
570

    
571
  def RunConfigUpdate(self):
572
    """Run the special configuration update hook
573

574
    This is a special hook that runs only on the master after each
575
    top-level LI if the configuration has been updated.
576

577
    """
578
    phase = constants.HOOKS_PHASE_POST
579
    hpath = constants.HOOKS_NAME_CFGUPDATE
580
    nodes = [self.lu.cfg.GetMasterNode()]
581
    self._RunWrapper(nodes, hpath, phase)