Revision d0d7d7cf lib/cmdlib/node.py
b/lib/cmdlib/node.py | ||
---|---|---|
151 | 151 |
Any errors are signaled by raising errors.OpPrereqError. |
152 | 152 |
|
153 | 153 |
""" |
154 |
cfg = self.cfg |
|
155 |
hostname = self.hostname |
|
156 |
node_name = hostname.name |
|
157 |
primary_ip = self.op.primary_ip = hostname.ip |
|
154 |
node_name = self.hostname.name |
|
155 |
self.op.primary_ip = self.hostname.ip |
|
158 | 156 |
if self.op.secondary_ip is None: |
159 | 157 |
if self.primary_ip_family == netutils.IP6Address.family: |
160 | 158 |
raise errors.OpPrereqError("When using a IPv6 primary address, a valid" |
161 | 159 |
" IPv4 address must be given as secondary", |
162 | 160 |
errors.ECODE_INVAL) |
163 |
self.op.secondary_ip = primary_ip |
|
161 |
self.op.secondary_ip = self.op.primary_ip
|
|
164 | 162 |
|
165 | 163 |
secondary_ip = self.op.secondary_ip |
166 | 164 |
if not netutils.IP4Address.IsValid(secondary_ip): |
167 | 165 |
raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4" |
168 | 166 |
" address" % secondary_ip, errors.ECODE_INVAL) |
169 | 167 |
|
170 |
existing_node_info = cfg.GetNodeInfoByName(node_name) |
|
168 |
existing_node_info = self.cfg.GetNodeInfoByName(node_name)
|
|
171 | 169 |
if not self.op.readd and existing_node_info is not None: |
172 | 170 |
raise errors.OpPrereqError("Node %s is already in the configuration" % |
173 | 171 |
node_name, errors.ECODE_EXISTS) |
... | ... | |
177 | 175 |
|
178 | 176 |
self.changed_primary_ip = False |
179 | 177 |
|
180 |
for existing_node in cfg.GetAllNodesInfo().values(): |
|
178 |
for existing_node in self.cfg.GetAllNodesInfo().values():
|
|
181 | 179 |
if self.op.readd and node_name == existing_node.name: |
182 | 180 |
if existing_node.secondary_ip != secondary_ip: |
183 | 181 |
raise errors.OpPrereqError("Readded node doesn't have the same IP" |
184 | 182 |
" address configuration as before", |
185 | 183 |
errors.ECODE_INVAL) |
186 |
if existing_node.primary_ip != primary_ip: |
|
184 |
if existing_node.primary_ip != self.op.primary_ip:
|
|
187 | 185 |
self.changed_primary_ip = True |
188 | 186 |
|
189 | 187 |
continue |
190 | 188 |
|
191 |
if (existing_node.primary_ip == primary_ip or |
|
192 |
existing_node.secondary_ip == primary_ip or |
|
189 |
if (existing_node.primary_ip == self.op.primary_ip or
|
|
190 |
existing_node.secondary_ip == self.op.primary_ip or
|
|
193 | 191 |
existing_node.primary_ip == secondary_ip or |
194 | 192 |
existing_node.secondary_ip == secondary_ip): |
195 | 193 |
raise errors.OpPrereqError("New node ip address(es) conflict with" |
... | ... | |
210 | 208 |
setattr(self.op, attr, True) |
211 | 209 |
|
212 | 210 |
if self.op.readd and not self.op.vm_capable: |
213 |
pri, sec = cfg.GetNodeInstances(existing_node_info.uuid) |
|
211 |
pri, sec = self.cfg.GetNodeInstances(existing_node_info.uuid)
|
|
214 | 212 |
if pri or sec: |
215 | 213 |
raise errors.OpPrereqError("Node %s being re-added with vm_capable" |
216 | 214 |
" flag set to false, but it already holds" |
... | ... | |
219 | 217 |
|
220 | 218 |
# check that the type of the node (single versus dual homed) is the |
221 | 219 |
# same as for the master |
222 |
myself = cfg.GetNodeInfo(self.cfg.GetMasterNode()) |
|
220 |
myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
|
|
223 | 221 |
master_singlehomed = myself.secondary_ip == myself.primary_ip |
224 |
newbie_singlehomed = secondary_ip == primary_ip |
|
222 |
newbie_singlehomed = secondary_ip == self.op.primary_ip
|
|
225 | 223 |
if master_singlehomed != newbie_singlehomed: |
226 | 224 |
if master_singlehomed: |
227 | 225 |
raise errors.OpPrereqError("The master has no secondary ip but the" |
... | ... | |
233 | 231 |
errors.ECODE_INVAL) |
234 | 232 |
|
235 | 233 |
# checks reachability |
236 |
if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): |
|
234 |
if not netutils.TcpPing(self.op.primary_ip, constants.DEFAULT_NODED_PORT):
|
|
237 | 235 |
raise errors.OpPrereqError("Node not reachable by ping", |
238 | 236 |
errors.ECODE_ENVIRON) |
239 | 237 |
|
... | ... | |
258 | 256 |
if self.op.readd: |
259 | 257 |
self.new_node = existing_node_info |
260 | 258 |
else: |
261 |
node_group = cfg.LookupNodeGroup(self.op.group) |
|
259 |
node_group = self.cfg.LookupNodeGroup(self.op.group)
|
|
262 | 260 |
self.new_node = objects.Node(name=node_name, |
263 |
primary_ip=primary_ip, |
|
261 |
primary_ip=self.op.primary_ip,
|
|
264 | 262 |
secondary_ip=secondary_ip, |
265 | 263 |
master_candidate=self.master_candidate, |
266 | 264 |
offline=False, drained=False, |
... | ... | |
291 | 289 |
(constants.PROTOCOL_VERSION, result.payload), |
292 | 290 |
errors.ECODE_ENVIRON) |
293 | 291 |
|
294 |
vg_name = cfg.GetVGName() |
|
292 |
vg_name = self.cfg.GetVGName()
|
|
295 | 293 |
if vg_name is not None: |
296 | 294 |
vparams = {constants.NV_PVLIST: [vg_name]} |
297 |
excl_stor = IsExclusiveStorageEnabledNode(cfg, self.new_node) |
|
295 |
excl_stor = IsExclusiveStorageEnabledNode(self.cfg, self.new_node)
|
|
298 | 296 |
cname = self.cfg.GetClusterName() |
299 | 297 |
result = rpcrunner.call_node_verify_light( |
300 |
[node_name], vparams, cname, cfg.GetClusterInfo().hvparams)[node_name] |
|
298 |
[node_name], vparams, cname, |
|
299 |
self.cfg.GetClusterInfo().hvparams)[node_name] |
|
301 | 300 |
(errmsgs, _) = CheckNodePVs(result.payload, excl_stor) |
302 | 301 |
if errmsgs: |
303 | 302 |
raise errors.OpPrereqError("Checks on node PVs failed: %s" % |
... | ... | |
307 | 306 |
"""Adds the new node to the cluster. |
308 | 307 |
|
309 | 308 |
""" |
310 |
new_node = self.new_node |
|
311 |
node_name = new_node.name |
|
312 |
|
|
313 | 309 |
assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \ |
314 | 310 |
"Not owning BGL" |
315 | 311 |
|
316 | 312 |
# We adding a new node so we assume it's powered |
317 |
new_node.powered = True |
|
313 |
self.new_node.powered = True
|
|
318 | 314 |
|
319 | 315 |
# for re-adds, reset the offline/drained/master-candidate flags; |
320 | 316 |
# we need to reset here, otherwise offline would prevent RPC calls |
321 | 317 |
# later in the procedure; this also means that if the re-add |
322 | 318 |
# fails, we are left with a non-offlined, broken node |
323 | 319 |
if self.op.readd: |
324 |
new_node.drained = new_node.offline = False # pylint: disable=W0201
|
|
320 |
self.new_node.drained = False
|
|
325 | 321 |
self.LogInfo("Readding a node, the offline/drained flags were reset") |
326 | 322 |
# if we demote the node, we do cleanup later in the procedure |
327 |
new_node.master_candidate = self.master_candidate |
|
323 |
self.new_node.master_candidate = self.master_candidate
|
|
328 | 324 |
if self.changed_primary_ip: |
329 |
new_node.primary_ip = self.op.primary_ip |
|
325 |
self.new_node.primary_ip = self.op.primary_ip
|
|
330 | 326 |
|
331 | 327 |
# copy the master/vm_capable flags |
332 | 328 |
for attr in self._NFLAGS: |
333 |
setattr(new_node, attr, getattr(self.op, attr)) |
|
329 |
setattr(self.new_node, attr, getattr(self.op, attr))
|
|
334 | 330 |
|
335 | 331 |
# notify the user about any possible mc promotion |
336 |
if new_node.master_candidate: |
|
332 |
if self.new_node.master_candidate:
|
|
337 | 333 |
self.LogInfo("Node will be a master candidate") |
338 | 334 |
|
339 | 335 |
if self.op.ndparams: |
340 |
new_node.ndparams = self.op.ndparams |
|
336 |
self.new_node.ndparams = self.op.ndparams
|
|
341 | 337 |
else: |
342 |
new_node.ndparams = {} |
|
338 |
self.new_node.ndparams = {}
|
|
343 | 339 |
|
344 | 340 |
if self.op.hv_state: |
345 |
new_node.hv_state_static = self.new_hv_state |
|
341 |
self.new_node.hv_state_static = self.new_hv_state
|
|
346 | 342 |
|
347 | 343 |
if self.op.disk_state: |
348 |
new_node.disk_state_static = self.new_disk_state |
|
344 |
self.new_node.disk_state_static = self.new_disk_state
|
|
349 | 345 |
|
350 | 346 |
# Add node to our /etc/hosts, and add key to known_hosts |
351 | 347 |
if self.cfg.GetClusterInfo().modify_etc_hosts: |
... | ... | |
355 | 351 |
self.hostname.ip) |
356 | 352 |
result.Raise("Can't update hosts file with new host data") |
357 | 353 |
|
358 |
if new_node.secondary_ip != new_node.primary_ip: |
|
359 |
_CheckNodeHasSecondaryIP(self, new_node, new_node.secondary_ip, False) |
|
354 |
if self.new_node.secondary_ip != self.new_node.primary_ip: |
|
355 |
_CheckNodeHasSecondaryIP(self, self.new_node, self.new_node.secondary_ip, |
|
356 |
False) |
|
360 | 357 |
|
361 | 358 |
node_verifier_uuids = [self.cfg.GetMasterNode()] |
362 | 359 |
node_verify_param = { |
363 |
constants.NV_NODELIST: ([node_name], {}),
|
|
360 |
constants.NV_NODELIST: ([self.new_node.name], {}),
|
|
364 | 361 |
# TODO: do a node-net-test as well? |
365 | 362 |
} |
366 | 363 |
|
... | ... | |
379 | 376 |
raise errors.OpExecError("ssh/hostname verification failed") |
380 | 377 |
|
381 | 378 |
if self.op.readd: |
382 |
self.context.ReaddNode(new_node) |
|
379 |
self.context.ReaddNode(self.new_node)
|
|
383 | 380 |
RedistributeAncillaryFiles(self) |
384 | 381 |
# make sure we redistribute the config |
385 |
self.cfg.Update(new_node, feedback_fn) |
|
382 |
self.cfg.Update(self.new_node, feedback_fn)
|
|
386 | 383 |
# and make sure the new node will not have old files around |
387 |
if not new_node.master_candidate: |
|
388 |
result = self.rpc.call_node_demote_from_mc(new_node.uuid) |
|
384 |
if not self.new_node.master_candidate:
|
|
385 |
result = self.rpc.call_node_demote_from_mc(self.new_node.uuid)
|
|
389 | 386 |
result.Warn("Node failed to demote itself from master candidate status", |
390 | 387 |
self.LogWarning) |
391 | 388 |
else: |
392 |
self.context.AddNode(new_node, self.proc.GetECId()) |
|
389 |
self.context.AddNode(self.new_node, self.proc.GetECId())
|
|
393 | 390 |
RedistributeAncillaryFiles(self) |
394 | 391 |
|
395 | 392 |
|
... | ... | |
707 | 704 |
|
708 | 705 |
""" |
709 | 706 |
node = self.cfg.GetNodeInfo(self.op.node_uuid) |
710 |
old_role = self.old_role |
|
711 |
new_role = self.new_role |
|
712 |
|
|
713 | 707 |
result = [] |
714 | 708 |
|
715 | 709 |
if self.op.ndparams: |
... | ... | |
730 | 724 |
setattr(node, attr, val) |
731 | 725 |
result.append((attr, str(val))) |
732 | 726 |
|
733 |
if new_role != old_role:
|
|
727 |
if self.new_role != self.old_role:
|
|
734 | 728 |
# Tell the node to demote itself, if no longer MC and not offline |
735 |
if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE: |
|
729 |
if self.old_role == self._ROLE_CANDIDATE and \ |
|
730 |
self.new_role != self._ROLE_OFFLINE: |
|
736 | 731 |
msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg |
737 | 732 |
if msg: |
738 | 733 |
self.LogWarning("Node failed to demote itself: %s", msg) |
739 | 734 |
|
740 |
new_flags = self._R2F[new_role] |
|
735 |
new_flags = self._R2F[self.new_role]
|
|
741 | 736 |
for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS): |
742 | 737 |
if of != nf: |
743 | 738 |
result.append((desc, str(nf))) |
... | ... | |
756 | 751 |
|
757 | 752 |
# this will trigger job queue propagation or cleanup if the mc |
758 | 753 |
# flag changed |
759 |
if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
|
|
754 |
if [self.old_role, self.new_role].count(self._ROLE_CANDIDATE) == 1:
|
|
760 | 755 |
self.context.ReaddNode(node) |
761 | 756 |
|
762 | 757 |
return result |
... | ... | |
1072 | 1067 |
|
1073 | 1068 |
def Exec(self, feedback_fn): |
1074 | 1069 |
# Prepare jobs for migration instances |
1075 |
allow_runtime_changes = self.op.allow_runtime_changes |
|
1076 | 1070 |
jobs = [ |
1077 |
[opcodes.OpInstanceMigrate(instance_name=inst.name, |
|
1078 |
mode=self.op.mode, |
|
1079 |
live=self.op.live, |
|
1080 |
iallocator=self.op.iallocator, |
|
1081 |
target_node=self.op.target_node, |
|
1082 |
allow_runtime_changes=allow_runtime_changes, |
|
1083 |
ignore_ipolicy=self.op.ignore_ipolicy)] |
|
1071 |
[opcodes.OpInstanceMigrate( |
|
1072 |
instance_name=inst.name, |
|
1073 |
mode=self.op.mode, |
|
1074 |
live=self.op.live, |
|
1075 |
iallocator=self.op.iallocator, |
|
1076 |
target_node=self.op.target_node, |
|
1077 |
allow_runtime_changes=self.op.allow_runtime_changes, |
|
1078 |
ignore_ipolicy=self.op.ignore_ipolicy)] |
|
1084 | 1079 |
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_uuid)] |
1085 | 1080 |
|
1086 | 1081 |
# TODO: Run iallocator in this opcode and pass correct placement options to |
... | ... | |
1499 | 1494 |
"""Removes the node from the cluster. |
1500 | 1495 |
|
1501 | 1496 |
""" |
1502 |
node = self.node |
|
1503 | 1497 |
logging.info("Stopping the node daemon and removing configs from node %s", |
1504 |
node.name) |
|
1498 |
self.node.name)
|
|
1505 | 1499 |
|
1506 | 1500 |
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup |
1507 | 1501 |
|
... | ... | |
1509 | 1503 |
"Not owning BGL" |
1510 | 1504 |
|
1511 | 1505 |
# Promote nodes to master candidate as needed |
1512 |
AdjustCandidatePool(self, exceptions=[node.uuid]) |
|
1513 |
self.context.RemoveNode(node) |
|
1506 |
AdjustCandidatePool(self, exceptions=[self.node.uuid])
|
|
1507 |
self.context.RemoveNode(self.node)
|
|
1514 | 1508 |
|
1515 | 1509 |
# Run post hooks on the node before it's removed |
1516 |
RunPostHook(self, node.name) |
|
1510 |
RunPostHook(self, self.node.name)
|
|
1517 | 1511 |
|
1518 | 1512 |
# we have to call this by name rather than by UUID, as the node is no longer |
1519 | 1513 |
# in the config |
1520 |
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup) |
|
1514 |
result = self.rpc.call_node_leave_cluster(self.node.name, modify_ssh_setup)
|
|
1521 | 1515 |
msg = result.fail_msg |
1522 | 1516 |
if msg: |
1523 | 1517 |
self.LogWarning("Errors encountered on the remote node while leaving" |
... | ... | |
1528 | 1522 |
master_node_uuid = self.cfg.GetMasterNode() |
1529 | 1523 |
result = self.rpc.call_etc_hosts_modify(master_node_uuid, |
1530 | 1524 |
constants.ETC_HOSTS_REMOVE, |
1531 |
node.name, None) |
|
1525 |
self.node.name, None)
|
|
1532 | 1526 |
result.Raise("Can't update hosts file with new host data") |
1533 | 1527 |
RedistributeAncillaryFiles(self) |
1534 | 1528 |
|
Also available in: Unified diff