Revision ef628379 lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
2588 | 2588 |
_RedistributeAncillaryFiles(self) |
2589 | 2589 |
|
2590 | 2590 |
|
2591 |
def _WaitForSync(lu, instance, oneshot=False): |
|
2591 |
def _WaitForSync(lu, instance, disks=None, oneshot=False):
|
|
2592 | 2592 |
"""Sleep and poll for an instance's disk to sync. |
2593 | 2593 |
|
2594 | 2594 |
""" |
2595 |
if not instance.disks: |
|
2595 |
if not instance.disks or disks is not None and not disks:
|
|
2596 | 2596 |
return True |
2597 | 2597 |
|
2598 |
disks = _ExpandCheckDisks(instance, disks) |
|
2599 |
|
|
2598 | 2600 |
if not oneshot: |
2599 | 2601 |
lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name) |
2600 | 2602 |
|
2601 | 2603 |
node = instance.primary_node |
2602 | 2604 |
|
2603 |
for dev in instance.disks:
|
|
2605 |
for dev in disks: |
|
2604 | 2606 |
lu.cfg.SetDiskID(dev, node) |
2605 | 2607 |
|
2606 | 2608 |
# TODO: Convert to utils.Retry |
... | ... | |
2611 | 2613 |
max_time = 0 |
2612 | 2614 |
done = True |
2613 | 2615 |
cumul_degraded = False |
2614 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
|
|
2616 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks) |
|
2615 | 2617 |
msg = rstats.fail_msg |
2616 | 2618 |
if msg: |
2617 | 2619 |
lu.LogWarning("Can't get any data from node %s: %s", node, msg) |
... | ... | |
2626 | 2628 |
for i, mstat in enumerate(rstats): |
2627 | 2629 |
if mstat is None: |
2628 | 2630 |
lu.LogWarning("Can't compute data for node %s/%s", |
2629 |
node, instance.disks[i].iv_name)
|
|
2631 |
node, disks[i].iv_name) |
|
2630 | 2632 |
continue |
2631 | 2633 |
|
2632 | 2634 |
cumul_degraded = (cumul_degraded or |
... | ... | |
2639 | 2641 |
else: |
2640 | 2642 |
rem_time = "no time estimate" |
2641 | 2643 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" % |
2642 |
(instance.disks[i].iv_name, mstat.sync_percent, |
|
2643 |
rem_time)) |
|
2644 |
(disks[i].iv_name, mstat.sync_percent, rem_time)) |
|
2644 | 2645 |
|
2645 | 2646 |
# if we're done but degraded, let's do a few small retries, to |
2646 | 2647 |
# make sure we see a stable and not transient situation; therefore |
... | ... | |
3875 | 3876 |
return disks_info |
3876 | 3877 |
|
3877 | 3878 |
|
3878 |
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False, |
|
3879 |
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
|
|
3879 | 3880 |
ignore_size=False): |
3880 | 3881 |
"""Prepare the block devices for an instance. |
3881 | 3882 |
|
... | ... | |
3885 | 3886 |
@param lu: the logical unit on whose behalf we execute |
3886 | 3887 |
@type instance: L{objects.Instance} |
3887 | 3888 |
@param instance: the instance for whose disks we assemble |
3889 |
@type disks: list of L{objects.Disk} or None |
|
3890 |
@param disks: which disks to assemble (or all, if None) |
|
3888 | 3891 |
@type ignore_secondaries: boolean |
3889 | 3892 |
@param ignore_secondaries: if true, errors on secondary nodes |
3890 | 3893 |
won't result in an error return from the function |
... | ... | |
3900 | 3903 |
device_info = [] |
3901 | 3904 |
disks_ok = True |
3902 | 3905 |
iname = instance.name |
3906 |
disks = _ExpandCheckDisks(instance, disks) |
|
3907 |
|
|
3903 | 3908 |
# With the two passes mechanism we try to reduce the window of |
3904 | 3909 |
# opportunity for the race condition of switching DRBD to primary |
3905 | 3910 |
# before handshaking occured, but we do not eliminate it |
... | ... | |
3910 | 3915 |
# SyncSource, etc.) |
3911 | 3916 |
|
3912 | 3917 |
# 1st pass, assemble on all nodes in secondary mode |
3913 |
for inst_disk in instance.disks:
|
|
3918 |
for inst_disk in disks: |
|
3914 | 3919 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
3915 | 3920 |
if ignore_size: |
3916 | 3921 |
node_disk = node_disk.Copy() |
... | ... | |
3928 | 3933 |
# FIXME: race condition on drbd migration to primary |
3929 | 3934 |
|
3930 | 3935 |
# 2nd pass, do only the primary node |
3931 |
for inst_disk in instance.disks:
|
|
3936 |
for inst_disk in disks: |
|
3932 | 3937 |
dev_path = None |
3933 | 3938 |
|
3934 | 3939 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
... | ... | |
3953 | 3958 |
# leave the disks configured for the primary node |
3954 | 3959 |
# this is a workaround that would be fixed better by |
3955 | 3960 |
# improving the logical/physical id handling |
3956 |
for disk in instance.disks:
|
|
3961 |
for disk in disks: |
|
3957 | 3962 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
3958 | 3963 |
|
3959 | 3964 |
return disks_ok, device_info |
... | ... | |
4008 | 4013 |
_SafeShutdownInstanceDisks(self, instance) |
4009 | 4014 |
|
4010 | 4015 |
|
4011 |
def _SafeShutdownInstanceDisks(lu, instance): |
|
4016 |
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
|
|
4012 | 4017 |
"""Shutdown block devices of an instance. |
4013 | 4018 |
|
4014 | 4019 |
This function checks if an instance is running, before calling |
... | ... | |
4016 | 4021 |
|
4017 | 4022 |
""" |
4018 | 4023 |
_CheckInstanceDown(lu, instance, "cannot shutdown disks") |
4019 |
_ShutdownInstanceDisks(lu, instance) |
|
4024 |
_ShutdownInstanceDisks(lu, instance, disks=disks) |
|
4025 |
|
|
4026 |
|
|
4027 |
def _ExpandCheckDisks(instance, disks): |
|
4028 |
"""Return the instance disks selected by the disks list |
|
4020 | 4029 |
|
4030 |
@type disks: list of L{objects.Disk} or None |
|
4031 |
@param disks: selected disks |
|
4032 |
@rtype: list of L{objects.Disk} |
|
4033 |
@return: selected instance disks to act on |
|
4021 | 4034 |
|
4022 |
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): |
|
4035 |
""" |
|
4036 |
if disks is None: |
|
4037 |
return instance.disks |
|
4038 |
else: |
|
4039 |
if not set(disks).issubset(instance.disks): |
|
4040 |
raise errors.ProgrammerError("Can only act on disks belonging to the" |
|
4041 |
" target instance") |
|
4042 |
return disks |
|
4043 |
|
|
4044 |
|
|
4045 |
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False): |
|
4023 | 4046 |
"""Shutdown block devices of an instance. |
4024 | 4047 |
|
4025 | 4048 |
This does the shutdown on all nodes of the instance. |
... | ... | |
4029 | 4052 |
|
4030 | 4053 |
""" |
4031 | 4054 |
all_result = True |
4032 |
for disk in instance.disks: |
|
4055 |
disks = _ExpandCheckDisks(instance, disks) |
|
4056 |
|
|
4057 |
for disk in disks: |
|
4033 | 4058 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
4034 | 4059 |
lu.cfg.SetDiskID(top_disk, node) |
4035 | 4060 |
result = lu.rpc.call_blockdev_shutdown(node, top_disk) |
Also available in: Unified diff