Revision fa6dd6bb
b/lib/cmdlib.py | ||
---|---|---|
8229 | 8229 |
(src_version, dst_version)) |
8230 | 8230 |
|
8231 | 8231 |
self.feedback_fn("* checking disk consistency between source and target") |
8232 |
for dev in instance.disks:
|
|
8232 |
for (idx, dev) in enumerate(instance.disks):
|
|
8233 | 8233 |
if not _CheckDiskConsistency(self.lu, dev, target_node, False): |
8234 | 8234 |
raise errors.OpExecError("Disk %s is degraded or not fully" |
8235 | 8235 |
" synchronized on target node," |
8236 |
" aborting migration" % dev.iv_name)
|
|
8236 |
" aborting migration" % idx)
|
|
8237 | 8237 |
|
8238 | 8238 |
if self.current_mem > self.tgt_free_mem: |
8239 | 8239 |
if not self.allow_runtime_changes: |
... | ... | |
8391 | 8391 |
|
8392 | 8392 |
if instance.admin_state == constants.ADMINST_UP: |
8393 | 8393 |
self.feedback_fn("* checking disk consistency between source and target") |
8394 |
for dev in instance.disks:
|
|
8394 |
for (idx, dev) in enumerate(instance.disks):
|
|
8395 | 8395 |
# for drbd, these are drbd over lvm |
8396 | 8396 |
if not _CheckDiskConsistency(self.lu, dev, target_node, False): |
8397 | 8397 |
if primary_node.offline: |
8398 | 8398 |
self.feedback_fn("Node %s is offline, ignoring degraded disk %s on" |
8399 | 8399 |
" target node %s" % |
8400 |
(primary_node.name, dev.iv_name, target_node))
|
|
8400 |
(primary_node.name, idx, target_node))
|
|
8401 | 8401 |
elif not self.ignore_consistency: |
8402 | 8402 |
raise errors.OpExecError("Disk %s is degraded on target node," |
8403 |
" aborting failover" % dev.iv_name)
|
|
8403 |
" aborting failover" % idx)
|
|
8404 | 8404 |
else: |
8405 | 8405 |
self.feedback_fn("* not checking disk consistency as instance is not" |
8406 | 8406 |
" running") |
... | ... | |
8914 | 8914 |
for idx, device in enumerate(instance.disks): |
8915 | 8915 |
if to_skip and idx in to_skip: |
8916 | 8916 |
continue |
8917 |
logging.info("Creating volume %s for instance %s", |
|
8918 |
device.iv_name, instance.name) |
|
8917 |
logging.info("Creating disk %s for instance '%s'", idx, instance.name) |
|
8919 | 8918 |
#HARDCODE |
8920 | 8919 |
for node in all_nodes: |
8921 | 8920 |
f_create = node == pnode |
... | ... | |
8943 | 8942 |
logging.info("Removing block devices for instance %s", instance.name) |
8944 | 8943 |
|
8945 | 8944 |
all_result = True |
8946 |
for device in instance.disks:
|
|
8945 |
for (idx, device) in instance.disks:
|
|
8947 | 8946 |
if target_node: |
8948 | 8947 |
edata = [(target_node, device)] |
8949 | 8948 |
else: |
... | ... | |
8952 | 8951 |
lu.cfg.SetDiskID(disk, node) |
8953 | 8952 |
msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg |
8954 | 8953 |
if msg: |
8955 |
lu.LogWarning("Could not remove block device %s on node %s,"
|
|
8956 |
" continuing anyway: %s", device.iv_name, node, msg)
|
|
8954 |
lu.LogWarning("Could not remove disk %s on node %s,"
|
|
8955 |
" continuing anyway: %s", idx, node, msg)
|
|
8957 | 8956 |
all_result = False |
8958 | 8957 |
|
8959 | 8958 |
# if this is a DRBD disk, return its port to the pool |
Also available in: Unified diff