2004 |
2004 |
else:
|
2005 |
2005 |
rem_time = "no time estimate"
|
2006 |
2006 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
|
2007 |
|
(instance.disks[i].iv_name, mstat.sync_percent, rem_time))
|
|
2007 |
(instance.disks[i].iv_name, mstat.sync_percent,
|
|
2008 |
rem_time))
|
2008 |
2009 |
|
2009 |
2010 |
# if we're done but degraded, let's do a few small retries, to
|
2010 |
2011 |
# make sure we see a stable and not transient situation; therefore
|
... | ... | |
6463 |
6464 |
for dev, old_lvs, new_lvs in iv_names.itervalues():
|
6464 |
6465 |
self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
|
6465 |
6466 |
|
6466 |
|
result = self.rpc.call_blockdev_removechildren(self.target_node, dev, old_lvs)
|
|
6467 |
result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
|
|
6468 |
old_lvs)
|
6467 |
6469 |
result.Raise("Can't detach drbd from local storage on node"
|
6468 |
6470 |
" %s for device %s" % (self.target_node, dev.iv_name))
|
6469 |
6471 |
#dev.children = []
|
... | ... | |
6489 |
6491 |
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
|
6490 |
6492 |
|
6491 |
6493 |
self.lu.LogInfo("Renaming the old LVs on the target node")
|
6492 |
|
result = self.rpc.call_blockdev_rename(self.target_node, rename_old_to_new)
|
|
6494 |
result = self.rpc.call_blockdev_rename(self.target_node,
|
|
6495 |
rename_old_to_new)
|
6493 |
6496 |
result.Raise("Can't rename old LVs on node %s" % self.target_node)
|
6494 |
6497 |
|
6495 |
6498 |
# Now we rename the new LVs to the old LVs
|
6496 |
6499 |
self.lu.LogInfo("Renaming the new LVs on the target node")
|
6497 |
6500 |
rename_new_to_old = [(new, old.physical_id)
|
6498 |
6501 |
for old, new in zip(old_lvs, new_lvs)]
|
6499 |
|
result = self.rpc.call_blockdev_rename(self.target_node, rename_new_to_old)
|
|
6502 |
result = self.rpc.call_blockdev_rename(self.target_node,
|
|
6503 |
rename_new_to_old)
|
6500 |
6504 |
result.Raise("Can't rename new LVs on node %s" % self.target_node)
|
6501 |
6505 |
|
6502 |
6506 |
for old, new in zip(old_lvs, new_lvs):
|
... | ... | |
6509 |
6513 |
|
6510 |
6514 |
# Now that the new lvs have the old name, we can add them to the device
|
6511 |
6515 |
self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
|
6512 |
|
result = self.rpc.call_blockdev_addchildren(self.target_node, dev, new_lvs)
|
|
6516 |
result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
|
|
6517 |
new_lvs)
|
6513 |
6518 |
msg = result.fail_msg
|
6514 |
6519 |
if msg:
|
6515 |
6520 |
for new_lv in new_lvs:
|
6516 |
|
msg2 = self.rpc.call_blockdev_remove(self.target_node, new_lv).fail_msg
|
|
6521 |
msg2 = self.rpc.call_blockdev_remove(self.target_node,
|
|
6522 |
new_lv).fail_msg
|
6517 |
6523 |
if msg2:
|
6518 |
6524 |
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
|
6519 |
6525 |
hint=("cleanup manually the unused logical"
|
... | ... | |
6581 |
6587 |
# after this, we must manually remove the drbd minors on both the
|
6582 |
6588 |
# error and the success paths
|
6583 |
6589 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
|
6584 |
|
minors = self.cfg.AllocateDRBDMinor([self.new_node for dev in self.instance.disks],
|
|
6590 |
minors = self.cfg.AllocateDRBDMinor([self.new_node
|
|
6591 |
for dev in self.instance.disks],
|
6585 |
6592 |
self.instance.name)
|
6586 |
6593 |
logging.debug("Allocated minors %r" % (minors,))
|
6587 |
6594 |
|
6588 |
6595 |
iv_names = {}
|
6589 |
6596 |
for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
|
6590 |
|
self.lu.LogInfo("activating a new drbd on %s for disk/%d" % (self.new_node, idx))
|
|
6597 |
self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
|
|
6598 |
(self.new_node, idx))
|
6591 |
6599 |
# create new devices on new_node; note that we create two IDs:
|
6592 |
6600 |
# one without port, so the drbd will be activated without
|
6593 |
6601 |
# networking information on the new node at this stage, and one
|
... | ... | |
6598 |
6606 |
else:
|
6599 |
6607 |
p_minor = o_minor2
|
6600 |
6608 |
|
6601 |
|
new_alone_id = (self.instance.primary_node, self.new_node, None, p_minor, new_minor, o_secret)
|
6602 |
|
new_net_id = (self.instance.primary_node, self.new_node, o_port, p_minor, new_minor, o_secret)
|
|
6609 |
new_alone_id = (self.instance.primary_node, self.new_node, None,
|
|
6610 |
p_minor, new_minor, o_secret)
|
|
6611 |
new_net_id = (self.instance.primary_node, self.new_node, o_port,
|
|
6612 |
p_minor, new_minor, o_secret)
|
6603 |
6613 |
|
6604 |
6614 |
iv_names[idx] = (dev, dev.children, new_net_id)
|
6605 |
6615 |
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
|
... | ... | |
6627 |
6637 |
" soon as possible"))
|
6628 |
6638 |
|
6629 |
6639 |
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
|
6630 |
|
result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node], self.node_secondary_ip,
|
6631 |
|
self.instance.disks)[self.instance.primary_node]
|
|
6640 |
result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
|
|
6641 |
self.node_secondary_ip,
|
|
6642 |
self.instance.disks)\
|
|
6643 |
[self.instance.primary_node]
|
6632 |
6644 |
|
6633 |
6645 |
msg = result.fail_msg
|
6634 |
6646 |
if msg:
|
... | ... | |
6649 |
6661 |
# and now perform the drbd attach
|
6650 |
6662 |
self.lu.LogInfo("Attaching primary drbds to new secondary"
|
6651 |
6663 |
" (standalone => connected)")
|
6652 |
|
result = self.rpc.call_drbd_attach_net([self.instance.primary_node, self.new_node], self.node_secondary_ip,
|
6653 |
|
self.instance.disks, self.instance.name,
|
|
6664 |
result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
|
|
6665 |
self.new_node],
|
|
6666 |
self.node_secondary_ip,
|
|
6667 |
self.instance.disks,
|
|
6668 |
self.instance.name,
|
6654 |
6669 |
False)
|
6655 |
6670 |
for to_node, to_result in result.items():
|
6656 |
6671 |
msg = to_result.fail_msg
|
6657 |
6672 |
if msg:
|
6658 |
|
self.lu.LogWarning("Can't attach drbd disks on node %s: %s", to_node, msg,
|
|
6673 |
self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
|
|
6674 |
to_node, msg,
|
6659 |
6675 |
hint=("please do a gnt-instance info to see the"
|
6660 |
6676 |
" status of disks"))
|
6661 |
6677 |
|