return _BuildInstanceHookEnv(**args)
-def _AdjustCandidatePool(lu):
+def _AdjustCandidatePool(lu, exceptions):
"""Adjust the candidate pool after node operations.
"""
- mod_list = lu.cfg.MaintainCandidatePool()
+ mod_list = lu.cfg.MaintainCandidatePool(exceptions)
if mod_list:
lu.LogInfo("Promoted nodes to master candidate role: %s",
", ".join(node.name for node in mod_list))
for name in mod_list:
lu.context.ReaddNode(name)
- mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
+ mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
if mc_now > mc_max:
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
(mc_now, mc_max))
+def _DecideSelfPromotion(lu, exceptions=None):
+ """Decide whether I should promote myself as a master candidate.
+
+ """
+ cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
+ mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
+ # the new node will increase mc_max with one, so:
+ mc_should = min(mc_should + 1, cp_size)
+ return mc_now < mc_should
+
+
def _CheckNicsBridgesExist(lu, target_nics, target_node,
profile=constants.PP_DEFAULT):
"""Check that the brigdes needed by a list of nics exist.
if full_name is None:
raise errors.OpPrereqError("Instance '%s' not known" % name)
self.wanted_names.append(full_name)
- self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
self.needed_locks = {
locking.LEVEL_NODE: [],
locking.LEVEL_INSTANCE: self.wanted_names,
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
in self.wanted_names]
+ def _EnsureChildSizes(self, disk):
+ """Ensure children of the disk have the needed disk size.
+
+ This is valid mainly for DRBD8 and fixes an issue where the
+ children have smaller disk size.
+
+ @param disk: an L{ganeti.objects.Disk} object
+
+ """
+ if disk.dev_type == constants.LD_DRBD8:
+ assert disk.children, "Empty children for DRBD8?"
+ fchild = disk.children[0]
+ mismatch = fchild.size < disk.size
+ if mismatch:
+ self.LogInfo("Child disk has size %d, parent %d, fixing",
+ fchild.size, disk.size)
+ fchild.size = disk.size
+
+ # and we recurse on this child only, not on the metadev
+ return self._EnsureChildSizes(fchild) or mismatch
+ else:
+ return False
+
def Exec(self, feedback_fn):
"""Verify the size of cluster disks.
changed = []
for node, dskl in per_node_disks.items():
- result = self.rpc.call_blockdev_getsizes(node, [v[2] for v in dskl])
+ newl = [v[2].Copy() for v in dskl]
+ for dsk in newl:
+ self.cfg.SetDiskID(dsk, node)
+ result = self.rpc.call_blockdev_getsizes(node, newl)
if result.fail_msg:
self.LogWarning("Failure in blockdev_getsizes call to node"
" %s, ignoring", node)
disk.size = size
self.cfg.Update(instance)
changed.append((instance.name, idx, size))
+ if self._EnsureChildSizes(disk):
+ self.cfg.Update(instance)
+ changed.append((instance.name, idx, disk.size))
return changed
if self.op.candidate_pool_size is not None:
self.cluster.candidate_pool_size = self.op.candidate_pool_size
# we need to update the pool size here, otherwise the save will fail
- _AdjustCandidatePool(self)
+ _AdjustCandidatePool(self, [])
self.cfg.Update(self.cluster)
logging.info("Stopping the node daemon and removing configs from node %s",
node.name)
+ # Promote nodes to master candidate as needed
+ _AdjustCandidatePool(self, exceptions=[node.name])
self.context.RemoveNode(node.name)
# Run post hooks on the node before it's removed
self.LogWarning("Errors encountered on the remote node while leaving"
" the cluster: %s", msg)
- # Promote nodes to master candidate as needed
- _AdjustCandidatePool(self)
-
class LUQueryNodes(NoHooksLU):
"""Logical unit for querying nodes.
raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
" based ping to noded port")
- cp_size = self.cfg.GetClusterInfo().candidate_pool_size
if self.op.readd:
exceptions = [node]
else:
exceptions = []
- mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
- # the new node will increase mc_max with one, so:
- mc_max = min(mc_max + 1, cp_size)
- self.master_candidate = mc_now < mc_max
+
+ self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
if self.op.readd:
self.new_node = self.cfg.GetNodeInfo(node)
nl_payload = result[verifier].payload[constants.NV_NODELIST]
if nl_payload:
for failed in nl_payload:
- feedback_fn("ssh/hostname verification failed %s -> %s" %
+ feedback_fn("ssh/hostname verification failed"
+ " (checking from %s): %s" %
(verifier, nl_payload[failed]))
raise errors.OpExecError("ssh/hostname verification failed.")
raise errors.OpPrereqError("The master role can be changed"
" only via masterfailover")
- if ((self.op.master_candidate == False or self.op.offline == True or
- self.op.drained == True) and node.master_candidate):
+ # Boolean value that tells us whether we're offlining or draining the node
+ offline_or_drain = self.op.offline == True or self.op.drained == True
+ deoffline_or_drain = self.op.offline == False or self.op.drained == False
+
+ if (node.master_candidate and
+ (self.op.master_candidate == False or offline_or_drain)):
cp_size = self.cfg.GetClusterInfo().candidate_pool_size
- num_candidates, _ = self.cfg.GetMasterCandidateStats()
- if num_candidates <= cp_size:
+ mc_now, mc_should, mc_max = self.cfg.GetMasterCandidateStats()
+ if mc_now <= cp_size:
msg = ("Not enough master candidates (desired"
- " %d, new value will be %d)" % (cp_size, num_candidates-1))
- if self.op.force:
+ " %d, new value will be %d)" % (cp_size, mc_now-1))
+ # Only allow forcing the operation if it's an offline/drain operation,
+ # and we could not possibly promote more nodes.
+ # FIXME: this can still lead to issues if in any way another node which
+ # could be promoted appears in the meantime.
+ if self.op.force and offline_or_drain and mc_should == mc_max:
self.LogWarning(msg)
else:
raise errors.OpPrereqError(msg)
raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
" to master_candidate" % node.name)
+ # If we're being deofflined/drained, we'll MC ourself if needed
+ if (deoffline_or_drain and not offline_or_drain and not
+ self.op.master_candidate == True):
+ self.op.master_candidate = _DecideSelfPromotion(self)
+ if self.op.master_candidate:
+ self.LogInfo("Autopromoting node to master candidate")
+
return
def Exec(self, feedback_fn):
"file_storage_dir": cluster.file_storage_dir,
"ctime": cluster.ctime,
"mtime": cluster.mtime,
+ "uuid": cluster.uuid,
"tags": list(cluster.GetTags()),
}
"serial_no": instance.serial_no,
"mtime": instance.mtime,
"ctime": instance.ctime,
+ "uuid": instance.uuid,
}
result[instance.name] = idict