Revision c9d443ea lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
2240 | 2240 |
self.op.node_name = node_name |
2241 | 2241 |
_CheckBooleanOpField(self.op, 'master_candidate') |
2242 | 2242 |
_CheckBooleanOpField(self.op, 'offline') |
2243 |
if self.op.master_candidate is None and self.op.offline is None: |
|
2243 |
_CheckBooleanOpField(self.op, 'drained') |
|
2244 |
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] |
|
2245 |
if all_mods.count(None) == 3: |
|
2244 | 2246 |
raise errors.OpPrereqError("Please pass at least one modification") |
2245 |
if self.op.offline == True and self.op.master_candidate == True:
|
|
2246 |
raise errors.OpPrereqError("Can't set the node into offline and"
|
|
2247 |
" master_candidate at the same time")
|
|
2247 |
if all_mods.count(True) > 1:
|
|
2248 |
raise errors.OpPrereqError("Can't set the node into more than one"
|
|
2249 |
" state at the same time")
|
|
2248 | 2250 |
|
2249 | 2251 |
def ExpandNames(self): |
2250 | 2252 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
... | ... | |
2259 | 2261 |
"OP_TARGET": self.op.node_name, |
2260 | 2262 |
"MASTER_CANDIDATE": str(self.op.master_candidate), |
2261 | 2263 |
"OFFLINE": str(self.op.offline), |
2264 |
"DRAINED": str(self.op.drained), |
|
2262 | 2265 |
} |
2263 | 2266 |
nl = [self.cfg.GetMasterNode(), |
2264 | 2267 |
self.op.node_name] |
... | ... | |
2272 | 2275 |
""" |
2273 | 2276 |
node = self.node = self.cfg.GetNodeInfo(self.op.node_name) |
2274 | 2277 |
|
2275 |
if ((self.op.master_candidate == False or self.op.offline == True)
|
|
2276 |
and node.master_candidate): |
|
2278 |
if ((self.op.master_candidate == False or self.op.offline == True or
|
|
2279 |
self.op.drained == True) and node.master_candidate):
|
|
2277 | 2280 |
# we will demote the node from master_candidate |
2278 | 2281 |
if self.op.node_name == self.cfg.GetMasterNode(): |
2279 | 2282 |
raise errors.OpPrereqError("The master node has to be a" |
2280 |
" master candidate and online")
|
|
2283 |
" master candidate, online and not drained")
|
|
2281 | 2284 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size |
2282 | 2285 |
num_candidates, _ = self.cfg.GetMasterCandidateStats() |
2283 | 2286 |
if num_candidates <= cp_size: |
... | ... | |
2288 | 2291 |
else: |
2289 | 2292 |
raise errors.OpPrereqError(msg) |
2290 | 2293 |
|
2291 |
if (self.op.master_candidate == True and node.offline and |
|
2292 |
not self.op.offline == False): |
|
2293 |
raise errors.OpPrereqError("Can't set an offline node to" |
|
2294 |
" master_candidate") |
|
2294 |
if (self.op.master_candidate == True and |
|
2295 |
((node.offline and not self.op.offline == False) or |
|
2296 |
(node.drained and not self.op.drained == False))): |
|
2297 |
raise errors.OpPrereqError("Node '%s' is offline or drained, can't set" |
|
2298 |
" to master_candidate") |
|
2295 | 2299 |
|
2296 | 2300 |
return |
2297 | 2301 |
|
... | ... | |
2302 | 2306 |
node = self.node |
2303 | 2307 |
|
2304 | 2308 |
result = [] |
2309 |
changed_mc = False |
|
2305 | 2310 |
|
2306 | 2311 |
if self.op.offline is not None: |
2307 | 2312 |
node.offline = self.op.offline |
2308 | 2313 |
result.append(("offline", str(self.op.offline))) |
2309 |
if self.op.offline == True and node.master_candidate: |
|
2310 |
node.master_candidate = False |
|
2311 |
result.append(("master_candidate", "auto-demotion due to offline")) |
|
2314 |
if self.op.offline == True: |
|
2315 |
if node.master_candidate: |
|
2316 |
node.master_candidate = False |
|
2317 |
changed_mc = True |
|
2318 |
result.append(("master_candidate", "auto-demotion due to offline")) |
|
2319 |
if node.drained: |
|
2320 |
node.drained = False |
|
2321 |
result.append(("drained", "clear drained status due to offline")) |
|
2312 | 2322 |
|
2313 | 2323 |
if self.op.master_candidate is not None: |
2314 | 2324 |
node.master_candidate = self.op.master_candidate |
2325 |
changed_mc = True |
|
2315 | 2326 |
result.append(("master_candidate", str(self.op.master_candidate))) |
2316 | 2327 |
if self.op.master_candidate == False: |
2317 | 2328 |
rrc = self.rpc.call_node_demote_from_mc(node.name) |
... | ... | |
2319 | 2330 |
if msg: |
2320 | 2331 |
self.LogWarning("Node failed to demote itself: %s" % msg) |
2321 | 2332 |
|
2333 |
if self.op.drained is not None: |
|
2334 |
node.drained = self.op.drained |
|
2335 |
if self.op.drained == True: |
|
2336 |
if node.master_candidate: |
|
2337 |
node.master_candidate = False |
|
2338 |
changed_mc = True |
|
2339 |
result.append(("master_candidate", "auto-demotion due to drain")) |
|
2340 |
if node.offline: |
|
2341 |
node.offline = False |
|
2342 |
result.append(("offline", "clear offline status due to drain")) |
|
2343 |
|
|
2322 | 2344 |
# this will trigger configuration file update, if needed |
2323 | 2345 |
self.cfg.Update(node) |
2324 | 2346 |
# this will trigger job queue propagation or cleanup |
2325 |
if self.op.node_name != self.cfg.GetMasterNode():
|
|
2347 |
if changed_mc:
|
|
2326 | 2348 |
self.context.ReaddNode(node) |
2327 | 2349 |
|
2328 | 2350 |
return result |
Also available in: Unified diff