opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
+ opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
# node lu
opcodes.OpAddNode: cmdlib.LUAddNode,
opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
"""
self.context = context
self._feedback_fn = None
- self.exclusive_BGL = False
+ self.exclusive_BGL = False # pylint: disable-msg=C0103
self.rpc = rpc.RpcRunner(context.cfg)
def _ExecLU(self, lu):
self.context.glm.add(level, add_locks, acquired=1, shared=share)
except errors.LockError:
raise errors.OpPrereqError(
- "Coudn't add locks (%s), probably because of a race condition"
+ "Couldn't add locks (%s), probably because of a race condition"
" with another job, who added them first" % add_locks)
try:
try:
@type run_notifier: callable (no arguments) or None
@param run_notifier: this function (if callable) will be called when
we are about to call the lu's Exec() method, that
- is, after we have aquired all locks
+ is, after we have acquired all locks
"""
if not isinstance(op, opcodes.OpCode):
res = results[node_name]
if res.failed or res.data is False or not isinstance(res.data, list):
if not res.offline:
- self.proc.LogWarning("Communication failure to node %s" %
- node_name)
+ self.proc.LogWarning("Communication failure to node %s", node_name)
continue
for script, hkr, output in res.data:
if hkr == constants.HKR_FAIL:
- output = output.strip().encode("string_escape")
errs.append((node_name, script, output))
if errs:
raise errors.HooksAbort(errs)
phase = constants.HOOKS_PHASE_POST
hpath = constants.HOOKS_NAME_CFGUPDATE
nodes = [self.lu.cfg.GetMasterNode()]
- results = self._RunWrapper(nodes, hpath, phase)
+ self._RunWrapper(nodes, hpath, phase)