Revision 0c3c965a lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
10570 | 10570 |
def ExpandNames(self): |
10571 | 10571 |
self._ExpandAndLockInstance() |
10572 | 10572 |
self.needed_locks[locking.LEVEL_NODE] = [] |
10573 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE |
|
10573 |
self.needed_locks[locking.LEVEL_NODE_RES] = [] |
|
10574 |
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE |
|
10574 | 10575 |
|
10575 | 10576 |
def DeclareLocks(self, level): |
10576 | 10577 |
if level == locking.LEVEL_NODE: |
10577 | 10578 |
self._LockInstancesNodes() |
10579 |
elif level == locking.LEVEL_NODE_RES: |
|
10580 |
# Copy node locks |
|
10581 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
|
10582 |
self.needed_locks[locking.LEVEL_NODE][:] |
|
10578 | 10583 |
|
10579 | 10584 |
def BuildHooksEnv(self): |
10580 | 10585 |
"""Build hooks env. |
... | ... | |
10631 | 10636 |
instance = self.instance |
10632 | 10637 |
disk = self.disk |
10633 | 10638 |
|
10639 |
assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE) |
|
10640 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
|
10641 |
self.owned_locks(locking.LEVEL_NODE_RES)) |
|
10642 |
|
|
10634 | 10643 |
disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk]) |
10635 | 10644 |
if not disks_ok: |
10636 | 10645 |
raise errors.OpExecError("Cannot activate block device to grow") |
10637 | 10646 |
|
10647 |
feedback_fn("Growing disk %s of instance '%s' by %s" % |
|
10648 |
(self.op.disk, instance.name, |
|
10649 |
utils.FormatUnit(self.op.amount, "h"))) |
|
10650 |
|
|
10638 | 10651 |
# First run all grow ops in dry-run mode |
10639 | 10652 |
for node in instance.all_nodes: |
10640 | 10653 |
self.cfg.SetDiskID(disk, node) |
... | ... | |
10657 | 10670 |
|
10658 | 10671 |
disk.RecordGrow(self.op.amount) |
10659 | 10672 |
self.cfg.Update(instance, feedback_fn) |
10673 |
|
|
10674 |
# Changes have been recorded, release node lock |
|
10675 |
_ReleaseLocks(self, locking.LEVEL_NODE) |
|
10676 |
|
|
10677 |
# Downgrade lock while waiting for sync |
|
10678 |
self.glm.downgrade(locking.LEVEL_INSTANCE) |
|
10679 |
|
|
10660 | 10680 |
if self.op.wait_for_sync: |
10661 | 10681 |
disk_abort = not _WaitForSync(self, instance, disks=[disk]) |
10662 | 10682 |
if disk_abort: |
... | ... | |
10669 | 10689 |
" not supposed to be running because no wait for" |
10670 | 10690 |
" sync mode was requested") |
10671 | 10691 |
|
10692 |
assert self.owned_locks(locking.LEVEL_NODE_RES) |
|
10693 |
assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE) |
|
10694 |
|
|
10672 | 10695 |
|
10673 | 10696 |
class LUInstanceQueryData(NoHooksLU): |
10674 | 10697 |
"""Query runtime instance data. |
Also available in: Unified diff