Revision c7dd65be
b/lib/cmdlib/cluster.py | ||
---|---|---|
168 | 168 |
ems = self.cfg.GetUseExternalMipScript() |
169 | 169 |
result = self.rpc.call_node_deactivate_master_ip(master_params.name, |
170 | 170 |
master_params, ems) |
171 |
if result.fail_msg: |
|
172 |
self.LogWarning("Error disabling the master IP address: %s", |
|
173 |
result.fail_msg) |
|
174 |
|
|
171 |
result.Warn("Error disabling the master IP address", self.LogWarning) |
|
175 | 172 |
return master_params.name |
176 | 173 |
|
177 | 174 |
|
... | ... | |
430 | 427 |
master_params.ip = new_ip |
431 | 428 |
result = self.rpc.call_node_activate_master_ip(master_params.name, |
432 | 429 |
master_params, ems) |
433 |
msg = result.fail_msg |
|
434 |
if msg: |
|
435 |
self.LogWarning("Could not re-enable the master role on" |
|
436 |
" the master, please restart manually: %s", msg) |
|
430 |
result.Warn("Could not re-enable the master role on the master," |
|
431 |
" please restart manually", self.LogWarning) |
|
437 | 432 |
|
438 | 433 |
return clustername |
439 | 434 |
|
... | ... | |
1115 | 1110 |
self.op.master_netmask, |
1116 | 1111 |
master_params.ip, |
1117 | 1112 |
master_params.netdev) |
1118 |
if result.fail_msg: |
|
1119 |
msg = "Could not change the master IP netmask: %s" % result.fail_msg |
|
1120 |
feedback_fn(msg) |
|
1121 |
|
|
1113 |
result.Warn("Could not change the master IP netmask", feedback_fn) |
|
1122 | 1114 |
self.cluster.master_netmask = self.op.master_netmask |
1123 | 1115 |
|
1124 | 1116 |
self.cfg.Update(self.cluster, feedback_fn) |
... | ... | |
1130 | 1122 |
ems = self.cfg.GetUseExternalMipScript() |
1131 | 1123 |
result = self.rpc.call_node_activate_master_ip(master_params.name, |
1132 | 1124 |
master_params, ems) |
1133 |
if result.fail_msg: |
|
1134 |
self.LogWarning("Could not re-enable the master ip on" |
|
1135 |
" the master, please restart manually: %s", |
|
1136 |
result.fail_msg) |
|
1125 |
result.Warn("Could not re-enable the master ip on the master," |
|
1126 |
" please restart manually", self.LogWarning) |
|
1137 | 1127 |
|
1138 | 1128 |
|
1139 | 1129 |
class LUClusterVerify(NoHooksLU): |
b/lib/cmdlib/instance.py | ||
---|---|---|
1390 | 1390 |
result = self.rpc.call_instance_run_rename(pnode_name, iobj, |
1391 | 1391 |
rename_from, |
1392 | 1392 |
self.op.debug_level) |
1393 |
if result.fail_msg: |
|
1394 |
self.LogWarning("Failed to run rename script for %s on node" |
|
1395 |
" %s: %s" % (instance, pnode_name, result.fail_msg)) |
|
1393 |
result.Warn("Failed to run rename script for %s on node %s" % |
|
1394 |
(instance, pnode_name), self.LogWarning) |
|
1396 | 1395 |
|
1397 | 1396 |
assert not self.owned_locks(locking.LEVEL_NODE_RES) |
1398 | 1397 |
|
... | ... | |
1512 | 1511 |
for node in inst.all_nodes: |
1513 | 1512 |
self.cfg.SetDiskID(disk, node) |
1514 | 1513 |
result = self.rpc.call_blockdev_setinfo(node, disk, info) |
1515 |
if result.fail_msg: |
|
1516 |
self.LogWarning("Error setting info on node %s for disk %s: %s", |
|
1517 |
node, idx, result.fail_msg) |
|
1514 |
result.Warn("Error setting info on node %s for disk %s" % (node, idx), |
|
1515 |
self.LogWarning) |
|
1518 | 1516 |
try: |
1519 | 1517 |
result = self.rpc.call_instance_run_rename(inst.primary_node, inst, |
1520 | 1518 |
old_name, self.op.debug_level) |
1521 |
msg = result.fail_msg |
|
1522 |
if msg: |
|
1523 |
msg = ("Could not run OS rename script for instance %s on node %s" |
|
1524 |
" (but the instance has been renamed in Ganeti): %s" % |
|
1525 |
(inst.name, inst.primary_node, msg)) |
|
1526 |
self.LogWarning(msg) |
|
1519 |
result.Warn("Could not run OS rename script for instance %s on node %s" |
|
1520 |
" (but the instance has been renamed in Ganeti)" % |
|
1521 |
(inst.name, inst.primary_node), self.LogWarning) |
|
1527 | 1522 |
finally: |
1528 | 1523 |
ShutdownInstanceDisks(self, inst) |
1529 | 1524 |
|
... | ... | |
1591 | 1586 |
result = self.rpc.call_instance_shutdown(instance.primary_node, instance, |
1592 | 1587 |
self.op.shutdown_timeout, |
1593 | 1588 |
self.op.reason) |
1594 |
msg = result.fail_msg |
|
1595 |
if msg: |
|
1596 |
if self.op.ignore_failures: |
|
1597 |
feedback_fn("Warning: can't shutdown instance: %s" % msg) |
|
1598 |
else: |
|
1599 |
raise errors.OpExecError("Could not shutdown instance %s on" |
|
1600 |
" node %s: %s" % |
|
1601 |
(instance.name, instance.primary_node, msg)) |
|
1589 |
if self.op.ignore_failures: |
|
1590 |
result.Warn("Warning: can't shutdown instance", feedback_fn) |
|
1591 |
else: |
|
1592 |
result.Raise("Could not shutdown instance %s on node %s" % |
|
1593 |
(instance.name, instance.primary_node)) |
|
1602 | 1594 |
|
1603 | 1595 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
1604 | 1596 |
self.owned_locks(locking.LEVEL_NODE_RES)) |
... | ... | |
1732 | 1724 |
result = self.rpc.call_instance_shutdown(source_node, instance, |
1733 | 1725 |
self.op.shutdown_timeout, |
1734 | 1726 |
self.op.reason) |
1735 |
msg = result.fail_msg |
|
1736 |
if msg: |
|
1737 |
if self.op.ignore_consistency: |
|
1738 |
self.LogWarning("Could not shutdown instance %s on node %s." |
|
1739 |
" Proceeding anyway. Please make sure node" |
|
1740 |
" %s is down. Error details: %s", |
|
1741 |
instance.name, source_node, source_node, msg) |
|
1742 |
else: |
|
1743 |
raise errors.OpExecError("Could not shutdown instance %s on" |
|
1744 |
" node %s: %s" % |
|
1745 |
(instance.name, source_node, msg)) |
|
1727 |
if self.op.ignore_consistency: |
|
1728 |
result.Warn("Could not shutdown instance %s on node %s. Proceeding" |
|
1729 |
" anyway. Please make sure node %s is down. Error details" % |
|
1730 |
(instance.name, source_node, source_node), self.LogWarning) |
|
1731 |
else: |
|
1732 |
result.Raise("Could not shutdown instance %s on node %s" % |
|
1733 |
(instance.name, source_node)) |
|
1746 | 1734 |
|
1747 | 1735 |
# create the target disks |
1748 | 1736 |
try: |
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
206 | 206 |
for (node, disk) in disks_created: |
207 | 207 |
lu.cfg.SetDiskID(disk, node) |
208 | 208 |
result = lu.rpc.call_blockdev_remove(node, disk) |
209 |
if result.fail_msg: |
|
210 |
logging.warning("Failed to remove newly-created disk %s on node %s:" |
|
211 |
" %s", disk, node, result.fail_msg) |
|
209 |
result.Warn("Failed to remove newly-created disk %s on node %s" % |
|
210 |
(disk, node), logging.warning) |
|
212 | 211 |
|
213 | 212 |
|
214 | 213 |
def CreateDisks(lu, instance, to_skip=None, target_node=None, disks=None): |
b/lib/cmdlib/node.py | ||
---|---|---|
379 | 379 |
# and make sure the new node will not have old files around |
380 | 380 |
if not new_node.master_candidate: |
381 | 381 |
result = self.rpc.call_node_demote_from_mc(new_node.name) |
382 |
msg = result.fail_msg |
|
383 |
if msg: |
|
384 |
self.LogWarning("Node failed to demote itself from master" |
|
385 |
" candidate status: %s" % msg) |
|
382 |
result.Warn("Node failed to demote itself from master candidate status", |
|
383 |
self.LogWarning) |
|
386 | 384 |
else: |
387 | 385 |
RedistributeAncillaryFiles(self, additional_nodes=[node], |
388 | 386 |
additional_vm=self.op.vm_capable) |
Also available in: Unified diff