Revision 601908d0 lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
3158 | 3158 |
_CheckBooleanOpField(self.op, 'master_candidate') |
3159 | 3159 |
_CheckBooleanOpField(self.op, 'offline') |
3160 | 3160 |
_CheckBooleanOpField(self.op, 'drained') |
3161 |
_CheckBooleanOpField(self.op, 'auto_promote') |
|
3161 | 3162 |
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] |
3162 | 3163 |
if all_mods.count(None) == 3: |
3163 | 3164 |
raise errors.OpPrereqError("Please pass at least one modification", |
... | ... | |
3167 | 3168 |
" state at the same time", |
3168 | 3169 |
errors.ECODE_INVAL) |
3169 | 3170 |
|
3171 |
# Boolean value that tells us whether we're offlining or draining the node |
|
3172 |
self.offline_or_drain = (self.op.offline == True or |
|
3173 |
self.op.drained == True) |
|
3174 |
self.deoffline_or_drain = (self.op.offline == False or |
|
3175 |
self.op.drained == False) |
|
3176 |
self.might_demote = (self.op.master_candidate == False or |
|
3177 |
self.offline_or_drain) |
|
3178 |
|
|
3179 |
self.lock_all = self.op.auto_promote and self.might_demote |
|
3180 |
|
|
3181 |
|
|
3170 | 3182 |
def ExpandNames(self): |
3171 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
|
3183 |
if self.lock_all: |
|
3184 |
self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET} |
|
3185 |
else: |
|
3186 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
|
3172 | 3187 |
|
3173 | 3188 |
def BuildHooksEnv(self): |
3174 | 3189 |
"""Build hooks env. |
... | ... | |
3203 | 3218 |
" only via masterfailover", |
3204 | 3219 |
errors.ECODE_INVAL) |
3205 | 3220 |
|
3206 |
# Boolean value that tells us whether we're offlining or draining the node |
|
3207 |
offline_or_drain = self.op.offline == True or self.op.drained == True |
|
3208 |
deoffline_or_drain = self.op.offline == False or self.op.drained == False |
|
3209 |
|
|
3210 |
if (node.master_candidate and |
|
3211 |
(self.op.master_candidate == False or offline_or_drain)): |
|
3212 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size |
|
3213 |
mc_now, mc_should, mc_max = self.cfg.GetMasterCandidateStats() |
|
3214 |
if mc_now <= cp_size: |
|
3215 |
msg = ("Not enough master candidates (desired" |
|
3216 |
" %d, new value will be %d)" % (cp_size, mc_now-1)) |
|
3217 |
# Only allow forcing the operation if it's an offline/drain operation, |
|
3218 |
# and we could not possibly promote more nodes. |
|
3219 |
# FIXME: this can still lead to issues if in any way another node which |
|
3220 |
# could be promoted appears in the meantime. |
|
3221 |
if self.op.force and offline_or_drain and mc_should == mc_max: |
|
3222 |
self.LogWarning(msg) |
|
3223 |
else: |
|
3224 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL) |
|
3221 |
|
|
3222 |
if node.master_candidate and self.might_demote and not self.lock_all: |
|
3223 |
assert not self.op.auto_promote, "auto-promote set but lock_all not" |
|
3224 |
# check if after removing the current node, we're missing master |
|
3225 |
# candidates |
|
3226 |
(mc_remaining, mc_should, _) = \ |
|
3227 |
self.cfg.GetMasterCandidateStats(exceptions=[node.name]) |
|
3228 |
if mc_remaining != mc_should: |
|
3229 |
raise errors.OpPrereqError("Not enough master candidates, please" |
|
3230 |
" pass auto_promote to allow promotion", |
|
3231 |
errors.ECODE_INVAL) |
|
3225 | 3232 |
|
3226 | 3233 |
if (self.op.master_candidate == True and |
3227 | 3234 |
((node.offline and not self.op.offline == False) or |
... | ... | |
3231 | 3238 |
errors.ECODE_INVAL) |
3232 | 3239 |
|
3233 | 3240 |
# If we're being deofflined/drained, we'll MC ourself if needed |
3234 |
if (deoffline_or_drain and not offline_or_drain and not
|
|
3241 |
if (self.deoffline_or_drain and not self.offline_or_drain and not
|
|
3235 | 3242 |
self.op.master_candidate == True and not node.master_candidate): |
3236 | 3243 |
self.op.master_candidate = _DecideSelfPromotion(self) |
3237 | 3244 |
if self.op.master_candidate: |
... | ... | |
3286 | 3293 |
node.offline = False |
3287 | 3294 |
result.append(("offline", "clear offline status due to drain")) |
3288 | 3295 |
|
3296 |
# we locked all nodes, we adjust the CP before updating this node |
|
3297 |
if self.lock_all: |
|
3298 |
_AdjustCandidatePool(self, [node.name]) |
|
3299 |
|
|
3289 | 3300 |
# this will trigger configuration file update, if needed |
3290 | 3301 |
self.cfg.Update(node, feedback_fn) |
3302 |
|
|
3291 | 3303 |
# this will trigger job queue propagation or cleanup |
3292 | 3304 |
if changed_mc: |
3293 | 3305 |
self.context.ReaddNode(node) |
Also available in: Unified diff