Revision d418ebfb lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
4041 | 4041 |
for disk_idx in self.op.disks: |
4042 | 4042 |
if disk_idx < 0 or disk_idx >= len(instance.disks): |
4043 | 4043 |
raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" % |
4044 |
(name, instance.name))
|
|
4044 |
(disk_idx, instance.name))
|
|
4045 | 4045 |
|
4046 | 4046 |
def _ExecD8DiskOnly(self, feedback_fn): |
4047 | 4047 |
"""Replace a disk on the primary or secondary for dbrd8. |
... | ... | |
4256 | 4256 |
if not res or my_vg not in res: |
4257 | 4257 |
raise errors.OpExecError("Volume group '%s' not found on %s" % |
4258 | 4258 |
(my_vg, node)) |
4259 |
for dev in instance.disks:
|
|
4260 |
if not dev.iv_name in self.op.disks:
|
|
4259 |
for idx, dev in enumerate(instance.disks):
|
|
4260 |
if idx not in self.op.disks:
|
|
4261 | 4261 |
continue |
4262 |
info("checking %s on %s" % (dev.iv_name, pri_node))
|
|
4262 |
info("checking disk/%d on %s" % (idx, pri_node))
|
|
4263 | 4263 |
cfg.SetDiskID(dev, pri_node) |
4264 | 4264 |
if not self.rpc.call_blockdev_find(pri_node, dev): |
4265 |
raise errors.OpExecError("Can't find device %s on node %s" %
|
|
4266 |
(dev.iv_name, pri_node))
|
|
4265 |
raise errors.OpExecError("Can't find disk/%d on node %s" %
|
|
4266 |
(idx, pri_node))
|
|
4267 | 4267 |
|
4268 | 4268 |
# Step: check other node consistency |
4269 | 4269 |
self.proc.LogStep(2, steps_total, "check peer consistency") |
4270 |
for dev in instance.disks:
|
|
4271 |
if not dev.iv_name in self.op.disks:
|
|
4270 |
for idx, dev in enumerate(instance.disks):
|
|
4271 |
if idx not in self.op.disks:
|
|
4272 | 4272 |
continue |
4273 |
info("checking %s consistency on %s" % (dev.iv_name, pri_node))
|
|
4273 |
info("checking disk/%d consistency on %s" % (idx, pri_node))
|
|
4274 | 4274 |
if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True): |
4275 | 4275 |
raise errors.OpExecError("Primary node (%s) has degraded storage," |
4276 | 4276 |
" unsafe to replace the secondary" % |
... | ... | |
4278 | 4278 |
|
4279 | 4279 |
# Step: create new storage |
4280 | 4280 |
self.proc.LogStep(3, steps_total, "allocate new storage") |
4281 |
for dev in instance.disks:
|
|
4281 |
for idx, dev in enumerate(instance.disks):
|
|
4282 | 4282 |
size = dev.size |
4283 |
info("adding new local storage on %s for %s" % (new_node, dev.iv_name)) |
|
4283 |
info("adding new local storage on %s for disk/%d" % |
|
4284 |
(new_node, idx)) |
|
4284 | 4285 |
# since we *always* want to create this LV, we use the |
4285 | 4286 |
# _Create...OnPrimary (which forces the creation), even if we |
4286 | 4287 |
# are talking about the secondary node |
... | ... | |
4291 | 4292 |
" node '%s'" % |
4292 | 4293 |
(new_lv.logical_id[1], new_node)) |
4293 | 4294 |
|
4294 |
|
|
4295 | 4295 |
# Step 4: dbrd minors and drbd setups changes |
4296 | 4296 |
# after this, we must manually remove the drbd minors on both the |
4297 | 4297 |
# error and the success paths |
... | ... | |
4299 | 4299 |
instance.name) |
4300 | 4300 |
logging.debug("Allocated minors %s" % (minors,)) |
4301 | 4301 |
self.proc.LogStep(4, steps_total, "changing drbd configuration") |
4302 |
for dev, new_minor in zip(instance.disks, minors):
|
|
4302 |
for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
|
|
4303 | 4303 |
size = dev.size |
4304 |
info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
|
|
4304 |
info("activating a new drbd on %s for disk/%d" % (new_node, idx))
|
|
4305 | 4305 |
# create new devices on new_node |
4306 | 4306 |
if pri_node == dev.logical_id[0]: |
4307 | 4307 |
new_logical_id = (pri_node, new_node, |
... | ... | |
4311 | 4311 |
new_logical_id = (new_node, pri_node, |
4312 | 4312 |
dev.logical_id[2], new_minor, dev.logical_id[4], |
4313 | 4313 |
dev.logical_id[5]) |
4314 |
iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
|
|
4314 |
iv_names[idx] = (dev, dev.children, new_logical_id)
|
|
4315 | 4315 |
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor, |
4316 | 4316 |
new_logical_id) |
4317 | 4317 |
new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, |
... | ... | |
4324 | 4324 |
raise errors.OpExecError("Failed to create new DRBD on" |
4325 | 4325 |
" node '%s'" % new_node) |
4326 | 4326 |
|
4327 |
for dev in instance.disks:
|
|
4327 |
for idx, dev in enumerate(instance.disks):
|
|
4328 | 4328 |
# we have new devices, shutdown the drbd on the old secondary |
4329 |
info("shutting down drbd for %s on old node" % dev.iv_name)
|
|
4329 |
info("shutting down drbd for disk/%d on old node" % idx)
|
|
4330 | 4330 |
cfg.SetDiskID(dev, old_node) |
4331 | 4331 |
if not self.rpc.call_blockdev_shutdown(old_node, dev): |
4332 |
warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
|
|
4332 |
warning("Failed to shutdown drbd for disk/%d on old node" % idx,
|
|
4333 | 4333 |
hint="Please cleanup this device manually as soon as possible") |
4334 | 4334 |
|
4335 | 4335 |
info("detaching primary drbds from the network (=> standalone)") |
4336 | 4336 |
done = 0 |
4337 |
for dev in instance.disks:
|
|
4337 |
for idx, dev in enumerate(instance.disks):
|
|
4338 | 4338 |
cfg.SetDiskID(dev, pri_node) |
4339 | 4339 |
# set the network part of the physical (unique in bdev terms) id |
4340 | 4340 |
# to None, meaning detach from network |
... | ... | |
4344 | 4344 |
if self.rpc.call_blockdev_find(pri_node, dev): |
4345 | 4345 |
done += 1 |
4346 | 4346 |
else: |
4347 |
warning("Failed to detach drbd %s from network, unusual case" %
|
|
4348 |
dev.iv_name)
|
|
4347 |
warning("Failed to detach drbd disk/%d from network, unusual case" %
|
|
4348 |
idx)
|
|
4349 | 4349 |
|
4350 | 4350 |
if not done: |
4351 | 4351 |
# no detaches succeeded (very unlikely) |
... | ... | |
4366 | 4366 |
# and now perform the drbd attach |
4367 | 4367 |
info("attaching primary drbds to new secondary (standalone => connected)") |
4368 | 4368 |
failures = [] |
4369 |
for dev in instance.disks:
|
|
4370 |
info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
|
|
4369 |
for idx, dev in enumerate(instance.disks):
|
|
4370 |
info("attaching primary drbd for disk/%d to new secondary node" % idx)
|
|
4371 | 4371 |
# since the attach is smart, it's enough to 'find' the device, |
4372 | 4372 |
# it will automatically activate the network, if the physical_id |
4373 | 4373 |
# is correct |
4374 | 4374 |
cfg.SetDiskID(dev, pri_node) |
4375 | 4375 |
logging.debug("Disk to attach: %s", dev) |
4376 | 4376 |
if not self.rpc.call_blockdev_find(pri_node, dev): |
4377 |
warning("can't attach drbd %s to new secondary!" % dev.iv_name,
|
|
4377 |
warning("can't attach drbd disk/%d to new secondary!" % idx,
|
|
4378 | 4378 |
"please do a gnt-instance info to see the status of disks") |
4379 | 4379 |
|
4380 | 4380 |
# this can fail as the old devices are degraded and _WaitForSync |
... | ... | |
4384 | 4384 |
_WaitForSync(self, instance, unlock=True) |
4385 | 4385 |
|
4386 | 4386 |
# so check manually all the devices |
4387 |
for name, (dev, old_lvs, _) in iv_names.iteritems():
|
|
4387 |
for idx, (dev, old_lvs, _) in iv_names.iteritems():
|
|
4388 | 4388 |
cfg.SetDiskID(dev, pri_node) |
4389 | 4389 |
is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5] |
4390 | 4390 |
if is_degr: |
4391 |
raise errors.OpExecError("DRBD device %s is degraded!" % name)
|
|
4391 |
raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
|
|
4392 | 4392 |
|
4393 | 4393 |
self.proc.LogStep(6, steps_total, "removing old storage") |
4394 |
for name, (dev, old_lvs, _) in iv_names.iteritems():
|
|
4395 |
info("remove logical volumes for %s" % name)
|
|
4394 |
for idx, (dev, old_lvs, _) in iv_names.iteritems():
|
|
4395 |
info("remove logical volumes for disk/%d" % idx)
|
|
4396 | 4396 |
for lv in old_lvs: |
4397 | 4397 |
cfg.SetDiskID(lv, old_node) |
4398 | 4398 |
if not self.rpc.call_blockdev_remove(old_node, lv): |
Also available in: Unified diff