i_non_redundant = [] # Non redundant instances
i_non_a_balanced = [] # Non auto-balanced instances
n_offline = [] # List of offline nodes
+ n_drained = [] # List of nodes being drained
node_volume = {}
node_instance = {}
node_info = {}
ntype = "master"
elif node_i.master_candidate:
ntype = "master candidate"
+ elif node_i.drained:
+ ntype = "drained"
+ n_drained.append(node)
else:
ntype = "regular"
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
if n_offline:
feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline))
+ if n_drained:
+ feedback_fn(" - NOTICE: %d drained node(s) found." % len(n_drained))
+
return not bad
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = lu.rpc.call_blockdev_find(node, dev)
- if rstats.failed or not rstats.data:
- logging.warning("Node %s: disk degraded, not found or node down", node)
+ msg = rstats.RemoteFailMsg()
+ if msg:
+ lu.LogWarning("Can't find disk on node %s: %s", node, msg)
+ result = False
+ elif not rstats.payload:
+ lu.LogWarning("Can't find disk on node %s", node)
result = False
else:
- result = result and (not rstats.data[idx])
+ result = result and (not rstats.payload[idx])
if dev.children:
for child in dev.children:
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
"master_candidate",
"master",
"offline",
+ "drained",
)
def ExpandNames(self):
val = node.name == master_node
elif field == "offline":
val = node.offline
+ elif field == "drained":
+ val = node.drained
elif self._FIELDS_DYNAMIC.Matches(field):
val = live_data[node.name].get(field, None)
else:
primary_ip=primary_ip,
secondary_ip=secondary_ip,
master_candidate=master_candidate,
- offline=False)
+ offline=False, drained=False)
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(node_disk, node)
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
- if result.failed or not result:
+ msg = result.RemoteFailMsg()
+ if msg:
lu.proc.LogWarning("Could not prepare block device %s on node %s"
- " (is_primary=False, pass=1)",
- inst_disk.iv_name, node)
+ " (is_primary=False, pass=1): %s",
+ inst_disk.iv_name, node, msg)
if not ignore_secondaries:
disks_ok = False
continue
lu.cfg.SetDiskID(node_disk, node)
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
- if result.failed or not result:
+ msg = result.RemoteFailMsg()
+ if msg:
lu.proc.LogWarning("Could not prepare block device %s on node %s"
- " (is_primary=True, pass=2)",
- inst_disk.iv_name, node)
+ " (is_primary=True, pass=2): %s",
+ inst_disk.iv_name, node, msg)
disks_ok = False
device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
ignored.
"""
- result = True
+ all_result = True
for disk in instance.disks:
for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(top_disk, node)
result = lu.rpc.call_blockdev_shutdown(node, top_disk)
- if result.failed or not result.data:
- logging.error("Could not shutdown block device %s on node %s",
- disk.iv_name, node)
+ msg = result.RemoteFailMsg()
+ if msg:
+ lu.LogWarning("Could not shutdown block device %s on node %s: %s",
+ disk.iv_name, node, msg)
if not ignore_primary or node != instance.primary_node:
- result = False
- return result
+ all_result = False
+ return all_result
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
"sda_size", "sdb_size", "vcpus", "tags",
"network_port", "beparams",
"(disk).(size)/([0-9]+)",
- "(disk).(sizes)",
+ "(disk).(sizes)", "disk_usage",
"(nic).(mac|ip|bridge)/([0-9]+)",
"(nic).(macs|ips|bridges)",
"(disk|nic).(count)",
val = instance.FindDisk(idx).size
except errors.OpPrereqError:
val = None
+ elif field == "disk_usage": # total disk usage per node
+ disk_sizes = [{'size': disk.size} for disk in instance.disks]
+ val = _ComputeDiskSize(instance.disk_template, disk_sizes)
elif field == "tags":
val = list(instance.GetTags())
elif field == "serial_no":
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
- L{backend.CreateBlockDevice} function where it specifies
+ L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
- L{backend.CreateBlockDevice} function where it specifies
+ L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
"""
logging.info("Removing block devices for instance %s", instance.name)
- result = True
+ all_result = True
for device in instance.disks:
for node, disk in device.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(disk, node)
- result = lu.rpc.call_blockdev_remove(node, disk)
- if result.failed or not result.data:
- lu.proc.LogWarning("Could not remove block device %s on node %s,"
- " continuing anyway", device.iv_name, node)
- result = False
+ msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+ if msg:
+ lu.LogWarning("Could not remove block device %s on node %s,"
+ " continuing anyway: %s", device.iv_name, node, msg)
+ all_result = False
if instance.disk_template == constants.DT_FILE:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
file_storage_dir)
if result.failed or not result.data:
logging.error("Could not remove directory '%s'", file_storage_dir)
- result = False
+ all_result = False
- return result
+ return all_result
def _ComputeDiskSize(disk_template, disks):
for node in tgt_node, oth_node:
info("checking disk/%d on %s" % (idx, node))
cfg.SetDiskID(dev, node)
- if not self.rpc.call_blockdev_find(node, dev):
- raise errors.OpExecError("Can't find disk/%d on node %s" %
- (idx, node))
+ result = self.rpc.call_blockdev_find(node, dev)
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
+ (idx, node, msg))
# Step: check other node consistency
self.proc.LogStep(2, steps_total, "check peer consistency")
# build the rename list based on what LVs exist on the node
rlist = []
for to_ren in old_lvs:
- find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
- if not find_res.failed and find_res.data is not None: # device exists
+ result = self.rpc.call_blockdev_find(tgt_node, to_ren)
+ if not result.RemoteFailMsg() and result.payload:
+ # device exists
rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
info("renaming the old LVs on the target node")
result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
if result.failed or not result.data:
for new_lv in new_lvs:
- result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
- if result.failed or not result.data:
- warning("Can't rollback device %s", hint="manually cleanup unused"
- " logical volumes")
+ msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
+ if msg:
+ warning("Can't rollback device %s: %s", dev, msg,
+ hint="cleanup manually the unused logical volumes")
raise errors.OpExecError("Can't add local storage to drbd")
dev.children = new_lvs
for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
cfg.SetDiskID(dev, instance.primary_node)
result = self.rpc.call_blockdev_find(instance.primary_node, dev)
- if result.failed or result.data[5]:
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find DRBD device %s: %s" %
+ (name, msg))
+ if result.payload[5]:
raise errors.OpExecError("DRBD device %s is degraded!" % name)
# Step: remove old storage
info("remove logical volumes for %s" % name)
for lv in old_lvs:
cfg.SetDiskID(lv, tgt_node)
- result = self.rpc.call_blockdev_remove(tgt_node, lv)
- if result.failed or not result.data:
- warning("Can't remove old LV", hint="manually remove unused LVs")
+ msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
+ if msg:
+ warning("Can't remove old LV: %s" % msg,
+ hint="manually remove unused LVs")
continue
def _ExecD8Secondary(self, feedback_fn):
info("checking disk/%d on %s" % (idx, pri_node))
cfg.SetDiskID(dev, pri_node)
result = self.rpc.call_blockdev_find(pri_node, dev)
- result.Raise()
- if not result.data:
- raise errors.OpExecError("Can't find disk/%d on node %s" %
- (idx, pri_node))
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
+ (idx, pri_node, msg))
# Step: check other node consistency
self.proc.LogStep(2, steps_total, "check peer consistency")
# we have new devices, shutdown the drbd on the old secondary
info("shutting down drbd for disk/%d on old node" % idx)
cfg.SetDiskID(dev, old_node)
- result = self.rpc.call_blockdev_shutdown(old_node, dev)
- if result.failed or not result.data:
- warning("Failed to shutdown drbd for disk/%d on old node" % idx,
+ msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
+ if msg:
+ warning("Failed to shutdown drbd for disk/%d on old node: %s" %
+ (idx, msg),
hint="Please cleanup this device manually as soon as possible")
info("detaching primary drbds from the network (=> standalone)")
for idx, (dev, old_lvs, _) in iv_names.iteritems():
cfg.SetDiskID(dev, pri_node)
result = self.rpc.call_blockdev_find(pri_node, dev)
- result.Raise()
- if result.data[5]:
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
+ (idx, msg))
+ if result.payload[5]:
raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
self.proc.LogStep(6, steps_total, "removing old storage")
info("remove logical volumes for disk/%d" % idx)
for lv in old_lvs:
cfg.SetDiskID(lv, old_node)
- result = self.rpc.call_blockdev_remove(old_node, lv)
- if result.failed or not result.data:
- warning("Can't remove LV on old secondary",
+ msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
+ if msg:
+ warning("Can't remove LV on old secondary: %s", msg,
hint="Cleanup stale volumes by hand")
def Exec(self, feedback_fn):
if not static:
self.cfg.SetDiskID(dev, instance.primary_node)
dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
- dev_pstatus.Raise()
- dev_pstatus = dev_pstatus.data
+ msg = dev_pstatus.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Can't compute disk status for %s: %s" %
+ (instance.name, msg))
+ dev_pstatus = dev_pstatus.payload
else:
dev_pstatus = None
if snode and not static:
self.cfg.SetDiskID(dev, snode)
dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
- dev_sstatus.Raise()
- dev_sstatus = dev_sstatus.data
+ msg = dev_sstatus.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Can't compute disk status for %s: %s" %
+ (instance.name, msg))
+ dev_sstatus = dev_sstatus.payload
else:
dev_sstatus = None
device_idx = len(instance.disks)
for node, disk in device.ComputeNodeTree(instance.primary_node):
self.cfg.SetDiskID(disk, node)
- rpc_result = self.rpc.call_blockdev_remove(node, disk)
- if rpc_result.failed or not rpc_result.data:
- self.proc.LogWarning("Could not remove disk/%d on node %s,"
- " continuing anyway", device_idx, node)
+ msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+ if msg:
+ self.LogWarning("Could not remove disk/%d on node %s: %s,"
+ " continuing anyway", device_idx, node, msg)
result.append(("disk/%d" % device_idx, "remove"))
elif disk_op == constants.DDM_ADD:
# add a new disk
# hvparams changes
if self.op.hvparams:
- instance.hvparams = self.hv_new
+ instance.hvparams = self.hv_inst
for key, val in self.op.hvparams.iteritems():
result.append(("hv/%s" % key, val))
self.LogWarning("Could not export block device %s from node %s to"
" node %s", dev.logical_id[1], src_node,
dst_node.name)
- result = self.rpc.call_blockdev_remove(src_node, dev)
- if result.failed or not result.data:
+ msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
+ if msg:
self.LogWarning("Could not remove snapshot block device %s from node"
- " %s", dev.logical_id[1], src_node)
+ " %s: %s", dev.logical_id[1], src_node, msg)
result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
if result.failed or not result.data:
"primary_ip": ninfo.primary_ip,
"secondary_ip": ninfo.secondary_ip,
"offline": ninfo.offline,
+ "drained": ninfo.drained,
"master_candidate": ninfo.master_candidate,
}