Revision 9a4f63d1
b/lib/cmdlib.py | ||
---|---|---|
34 | 34 |
import copy |
35 | 35 |
|
36 | 36 |
from ganeti import ssh |
37 |
from ganeti import logger |
|
38 | 37 |
from ganeti import utils |
39 | 38 |
from ganeti import errors |
40 | 39 |
from ganeti import hypervisor |
... | ... | |
995 | 994 |
lvs = node_lvs[node] |
996 | 995 |
|
997 | 996 |
if isinstance(lvs, basestring): |
998 |
logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
|
|
997 |
logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
|
|
999 | 998 |
res_nlvm[node] = lvs |
1000 | 999 |
elif not isinstance(lvs, dict): |
1001 |
logger.Info("connection to node %s failed or invalid data returned" %
|
|
1002 |
(node,))
|
|
1000 |
logging.warning("Connection to node %s failed or invalid data"
|
|
1001 |
" returned", node)
|
|
1003 | 1002 |
res_nodes.append(node) |
1004 | 1003 |
continue |
1005 | 1004 |
|
... | ... | |
1083 | 1082 |
if myself.name in dist_nodes: |
1084 | 1083 |
dist_nodes.remove(myself.name) |
1085 | 1084 |
|
1086 |
logger.Debug("Copying updated ssconf data to all nodes")
|
|
1085 |
logging.debug("Copying updated ssconf data to all nodes")
|
|
1087 | 1086 |
for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]: |
1088 | 1087 |
fname = ss.KeyToFilename(keyname) |
1089 | 1088 |
result = self.rpc.call_upload_file(dist_nodes, fname) |
1090 | 1089 |
for to_node in dist_nodes: |
1091 | 1090 |
if not result[to_node]: |
1092 |
logger.Error("copy of file %s to node %s failed" % |
|
1093 |
(fname, to_node)) |
|
1091 |
logging.error("Copy of file %s to node %s failed", fname, to_node) |
|
1094 | 1092 |
finally: |
1095 | 1093 |
if not self.rpc.call_node_start_master(master, False): |
1096 |
logger.Error("Could not re-enable the master role on the master,"
|
|
1097 |
" please restart manually.") |
|
1094 |
logging.error("Could not re-enable the master role on the master,"
|
|
1095 |
" please restart manually.")
|
|
1098 | 1096 |
|
1099 | 1097 |
|
1100 | 1098 |
def _RecursiveCheckIfLVMBased(disk): |
... | ... | |
1300 | 1298 |
if on_primary or dev.AssembleOnSecondary(): |
1301 | 1299 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
1302 | 1300 |
if not rstats: |
1303 |
logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
|
|
1301 |
logging.warning("Node %s: disk degraded, not found or node down", node)
|
|
1304 | 1302 |
result = False |
1305 | 1303 |
else: |
1306 | 1304 |
result = result and (not rstats[idx]) |
... | ... | |
1458 | 1456 |
|
1459 | 1457 |
""" |
1460 | 1458 |
node = self.node |
1461 |
logger.Info("stopping the node daemon and removing configs from node %s" %
|
|
1462 |
node.name) |
|
1459 |
logging.info("Stopping the node daemon and removing configs from node %s",
|
|
1460 |
node.name)
|
|
1463 | 1461 |
|
1464 | 1462 |
self.context.RemoveNode(node.name) |
1465 | 1463 |
|
... | ... | |
1797 | 1795 |
result = self.rpc.call_version([node])[node] |
1798 | 1796 |
if result: |
1799 | 1797 |
if constants.PROTOCOL_VERSION == result: |
1800 |
logger.Info("communication to node %s fine, sw version %s match" %
|
|
1801 |
(node, result))
|
|
1798 |
logging.info("Communication to node %s fine, sw version %s match",
|
|
1799 |
node, result)
|
|
1802 | 1800 |
else: |
1803 | 1801 |
raise errors.OpExecError("Version mismatch master version %s," |
1804 | 1802 |
" node version %s" % |
... | ... | |
1807 | 1805 |
raise errors.OpExecError("Cannot get version from the new node") |
1808 | 1806 |
|
1809 | 1807 |
# setup ssh on node |
1810 |
logger.Info("copy ssh key to node %s" % node)
|
|
1808 |
logging.info("Copy ssh key to node %s", node)
|
|
1811 | 1809 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
1812 | 1810 |
keyarray = [] |
1813 | 1811 |
keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, |
... | ... | |
1865 | 1863 |
if myself.name in dist_nodes: |
1866 | 1864 |
dist_nodes.remove(myself.name) |
1867 | 1865 |
|
1868 |
logger.Debug("Copying hosts and known_hosts to all nodes")
|
|
1866 |
logging.debug("Copying hosts and known_hosts to all nodes")
|
|
1869 | 1867 |
for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE): |
1870 | 1868 |
result = self.rpc.call_upload_file(dist_nodes, fname) |
1871 | 1869 |
for to_node in dist_nodes: |
1872 | 1870 |
if not result[to_node]: |
1873 |
logger.Error("copy of file %s to node %s failed" % |
|
1874 |
(fname, to_node)) |
|
1871 |
logging.error("Copy of file %s to node %s failed", fname, to_node) |
|
1875 | 1872 |
|
1876 | 1873 |
to_copy = [] |
1877 | 1874 |
if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors: |
... | ... | |
1879 | 1876 |
for fname in to_copy: |
1880 | 1877 |
result = self.rpc.call_upload_file([node], fname) |
1881 | 1878 |
if not result[node]: |
1882 |
logger.Error("could not copy file %s to node %s" % (fname, node))
|
|
1879 |
logging.error("Could not copy file %s to node %s", fname, node)
|
|
1883 | 1880 |
|
1884 | 1881 |
if self.op.readd: |
1885 | 1882 |
self.context.ReaddNode(new_node) |
... | ... | |
2036 | 2033 |
lu.cfg.SetDiskID(node_disk, node) |
2037 | 2034 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False) |
2038 | 2035 |
if not result: |
2039 |
logger.Error("could not prepare block device %s on node %s"
|
|
2040 |
" (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
|
|
2036 |
logging.error("Could not prepare block device %s on node %s"
|
|
2037 |
" (is_primary=False, pass=1)", inst_disk.iv_name, node)
|
|
2041 | 2038 |
if not ignore_secondaries: |
2042 | 2039 |
disks_ok = False |
2043 | 2040 |
|
... | ... | |
2051 | 2048 |
lu.cfg.SetDiskID(node_disk, node) |
2052 | 2049 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True) |
2053 | 2050 |
if not result: |
2054 |
logger.Error("could not prepare block device %s on node %s"
|
|
2055 |
" (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
|
|
2051 |
logging.error("Could not prepare block device %s on node %s"
|
|
2052 |
" (is_primary=True, pass=2)", inst_disk.iv_name, node)
|
|
2056 | 2053 |
disks_ok = False |
2057 | 2054 |
device_info.append((instance.primary_node, inst_disk.iv_name, result)) |
2058 | 2055 |
|
... | ... | |
2074 | 2071 |
if not disks_ok: |
2075 | 2072 |
_ShutdownInstanceDisks(lu, instance) |
2076 | 2073 |
if force is not None and not force: |
2077 |
logger.Error("If the message above refers to a secondary node,"
|
|
2078 |
" you can retry the operation using '--force'.") |
|
2074 |
logging.error("If the message above refers to a secondary node,"
|
|
2075 |
" you can retry the operation using '--force'.")
|
|
2079 | 2076 |
raise errors.OpExecError("Disk consistency error") |
2080 | 2077 |
|
2081 | 2078 |
|
... | ... | |
2148 | 2145 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
2149 | 2146 |
lu.cfg.SetDiskID(top_disk, node) |
2150 | 2147 |
if not lu.rpc.call_blockdev_shutdown(node, top_disk): |
2151 |
logger.Error("could not shutdown block device %s on node %s" %
|
|
2152 |
(disk.iv_name, node))
|
|
2148 |
logging.error("Could not shutdown block device %s on node %s",
|
|
2149 |
disk.iv_name, node)
|
|
2153 | 2150 |
if not ignore_primary or node != instance.primary_node: |
2154 | 2151 |
result = False |
2155 | 2152 |
return result |
... | ... | |
2389 | 2386 |
node_current = instance.primary_node |
2390 | 2387 |
self.cfg.MarkInstanceDown(instance.name) |
2391 | 2388 |
if not self.rpc.call_instance_shutdown(node_current, instance): |
2392 |
logger.Error("could not shutdown instance")
|
|
2389 |
logging.error("Could not shutdown instance")
|
|
2393 | 2390 |
|
2394 | 2391 |
_ShutdownInstanceDisks(self, instance) |
2395 | 2392 |
|
... | ... | |
2587 | 2584 |
msg = ("Could not run OS rename script for instance %s on node %s" |
2588 | 2585 |
" (but the instance has been renamed in Ganeti)" % |
2589 | 2586 |
(inst.name, inst.primary_node)) |
2590 |
logger.Error(msg)
|
|
2587 |
logging.error(msg)
|
|
2591 | 2588 |
finally: |
2592 | 2589 |
_ShutdownInstanceDisks(self, inst) |
2593 | 2590 |
|
... | ... | |
2635 | 2632 |
|
2636 | 2633 |
""" |
2637 | 2634 |
instance = self.instance |
2638 |
logger.Info("shutting down instance %s on node %s" %
|
|
2639 |
(instance.name, instance.primary_node))
|
|
2635 |
logging.info("Shutting down instance %s on node %s",
|
|
2636 |
instance.name, instance.primary_node)
|
|
2640 | 2637 |
|
2641 | 2638 |
if not self.rpc.call_instance_shutdown(instance.primary_node, instance): |
2642 | 2639 |
if self.op.ignore_failures: |
... | ... | |
2645 | 2642 |
raise errors.OpExecError("Could not shutdown instance %s on node %s" % |
2646 | 2643 |
(instance.name, instance.primary_node)) |
2647 | 2644 |
|
2648 |
logger.Info("removing block devices for instance %s" % instance.name)
|
|
2645 |
logging.info("Removing block devices for instance %s", instance.name)
|
|
2649 | 2646 |
|
2650 | 2647 |
if not _RemoveDisks(self, instance): |
2651 | 2648 |
if self.op.ignore_failures: |
... | ... | |
2653 | 2650 |
else: |
2654 | 2651 |
raise errors.OpExecError("Can't remove instance's disks") |
2655 | 2652 |
|
2656 |
logger.Info("removing instance %s out of cluster config" % instance.name)
|
|
2653 |
logging.info("Removing instance %s out of cluster config", instance.name)
|
|
2657 | 2654 |
|
2658 | 2655 |
self.cfg.RemoveInstance(instance.name) |
2659 | 2656 |
self.remove_locks[locking.LEVEL_INSTANCE] = instance.name |
... | ... | |
2919 | 2916 |
" aborting failover." % dev.iv_name) |
2920 | 2917 |
|
2921 | 2918 |
feedback_fn("* shutting down instance on source node") |
2922 |
logger.Info("Shutting down instance %s on node %s" %
|
|
2923 |
(instance.name, source_node))
|
|
2919 |
logging.info("Shutting down instance %s on node %s",
|
|
2920 |
instance.name, source_node)
|
|
2924 | 2921 |
|
2925 | 2922 |
if not self.rpc.call_instance_shutdown(source_node, instance): |
2926 | 2923 |
if self.op.ignore_consistency: |
2927 |
logger.Error("Could not shutdown instance %s on node %s. Proceeding"
|
|
2928 |
" anyway. Please make sure node %s is down" %
|
|
2929 |
(instance.name, source_node, source_node))
|
|
2924 |
logging.error("Could not shutdown instance %s on node %s. Proceeding"
|
|
2925 |
" anyway. Please make sure node %s is down",
|
|
2926 |
instance.name, source_node, source_node)
|
|
2930 | 2927 |
else: |
2931 | 2928 |
raise errors.OpExecError("Could not shutdown instance %s on node %s" % |
2932 | 2929 |
(instance.name, source_node)) |
... | ... | |
2942 | 2939 |
# Only start the instance if it's marked as up |
2943 | 2940 |
if instance.status == "up": |
2944 | 2941 |
feedback_fn("* activating the instance's disks on target node") |
2945 |
logger.Info("Starting instance %s on node %s" %
|
|
2946 |
(instance.name, target_node))
|
|
2942 |
logging.info("Starting instance %s on node %s",
|
|
2943 |
instance.name, target_node)
|
|
2947 | 2944 |
|
2948 | 2945 |
disks_ok, dummy = _AssembleInstanceDisks(self, instance, |
2949 | 2946 |
ignore_secondaries=True) |
... | ... | |
3126 | 3123 |
file_storage_dir) |
3127 | 3124 |
|
3128 | 3125 |
if not result: |
3129 |
logger.Error("Could not connect to node '%s'" % instance.primary_node)
|
|
3126 |
logging.error("Could not connect to node '%s'", instance.primary_node)
|
|
3130 | 3127 |
return False |
3131 | 3128 |
|
3132 | 3129 |
if not result[0]: |
3133 |
logger.Error("failed to create directory '%s'" % file_storage_dir)
|
|
3130 |
logging.error("Failed to create directory '%s'", file_storage_dir)
|
|
3134 | 3131 |
return False |
3135 | 3132 |
|
3136 | 3133 |
for device in instance.disks: |
3137 |
logger.Info("creating volume %s for instance %s" %
|
|
3138 |
(device.iv_name, instance.name))
|
|
3134 |
logging.info("Creating volume %s for instance %s",
|
|
3135 |
device.iv_name, instance.name)
|
|
3139 | 3136 |
#HARDCODE |
3140 | 3137 |
for secondary_node in instance.secondary_nodes: |
3141 | 3138 |
if not _CreateBlockDevOnSecondary(lu, secondary_node, instance, |
3142 | 3139 |
device, False, info): |
3143 |
logger.Error("failed to create volume %s (%s) on secondary node %s!" %
|
|
3144 |
(device.iv_name, device, secondary_node))
|
|
3140 |
logging.error("Failed to create volume %s (%s) on secondary node %s!",
|
|
3141 |
device.iv_name, device, secondary_node)
|
|
3145 | 3142 |
return False |
3146 | 3143 |
#HARDCODE |
3147 | 3144 |
if not _CreateBlockDevOnPrimary(lu, instance.primary_node, |
3148 | 3145 |
instance, device, info): |
3149 |
logger.Error("failed to create volume %s on primary!" % |
|
3150 |
device.iv_name) |
|
3146 |
logging.error("Failed to create volume %s on primary!", device.iv_name) |
|
3151 | 3147 |
return False |
3152 | 3148 |
|
3153 | 3149 |
return True |
... | ... | |
3168 | 3164 |
True or False showing the success of the removal proces |
3169 | 3165 |
|
3170 | 3166 |
""" |
3171 |
logger.Info("removing block devices for instance %s" % instance.name)
|
|
3167 |
logging.info("Removing block devices for instance %s", instance.name)
|
|
3172 | 3168 |
|
3173 | 3169 |
result = True |
3174 | 3170 |
for device in instance.disks: |
3175 | 3171 |
for node, disk in device.ComputeNodeTree(instance.primary_node): |
3176 | 3172 |
lu.cfg.SetDiskID(disk, node) |
3177 | 3173 |
if not lu.rpc.call_blockdev_remove(node, disk): |
3178 |
logger.Error("could not remove block device %s on node %s," |
|
3179 |
" continuing anyway" % |
|
3180 |
(device.iv_name, node)) |
|
3174 |
logging.error("Could not remove block device %s on node %s," |
|
3175 |
" continuing anyway", device.iv_name, node) |
|
3181 | 3176 |
result = False |
3182 | 3177 |
|
3183 | 3178 |
if instance.disk_template == constants.DT_FILE: |
3184 | 3179 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
3185 | 3180 |
if not lu.rpc.call_file_storage_dir_remove(instance.primary_node, |
3186 | 3181 |
file_storage_dir): |
3187 |
logger.Error("could not remove directory '%s'" % file_storage_dir)
|
|
3182 |
logging.error("Could not remove directory '%s'", file_storage_dir)
|
|
3188 | 3183 |
result = False |
3189 | 3184 |
|
3190 | 3185 |
return result |
... | ... | |
3420 | 3415 |
(self.op.iallocator, len(ial.nodes), |
3421 | 3416 |
ial.required_nodes)) |
3422 | 3417 |
self.op.pnode = ial.nodes[0] |
3423 |
logger.ToStdout("Selected nodes for the instance: %s" %
|
|
3424 |
(", ".join(ial.nodes),))
|
|
3425 |
logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
|
|
3426 |
(self.op.instance_name, self.op.iallocator, ial.nodes))
|
|
3418 |
feedback_fn("Selected nodes for the instance: %s" %
|
|
3419 |
(", ".join(ial.nodes),)) |
|
3420 |
logging.info("Selected nodes for instance %s via iallocator %s: %s",
|
|
3421 |
self.op.instance_name, self.op.iallocator, ial.nodes)
|
|
3427 | 3422 |
if ial.required_nodes == 2: |
3428 | 3423 |
self.op.snode = ial.nodes[1] |
3429 | 3424 |
|
... | ... | |
3703 | 3698 |
% self.op.mode) |
3704 | 3699 |
|
3705 | 3700 |
if self.op.start: |
3706 |
logger.Info("starting instance %s on node %s" % (instance, pnode_name))
|
|
3701 |
logging.info("Starting instance %s on node %s", instance, pnode_name)
|
|
3707 | 3702 |
feedback_fn("* starting instance...") |
3708 | 3703 |
if not self.rpc.call_instance_start(pnode_name, iobj, None): |
3709 | 3704 |
raise errors.OpExecError("Could not start instance") |
... | ... | |
3748 | 3743 |
if instance.name not in node_insts: |
3749 | 3744 |
raise errors.OpExecError("Instance %s is not running." % instance.name) |
3750 | 3745 |
|
3751 |
logger.Debug("connecting to console of %s on %s" % (instance.name, node))
|
|
3746 |
logging.debug("Connecting to console of %s on %s", instance.name, node)
|
|
3752 | 3747 |
|
3753 | 3748 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
3754 | 3749 |
console_cmd = hyper.GetShellCommandForConsole(instance) |
... | ... | |
3817 | 3812 |
" of nodes (%s), required %s" % |
3818 | 3813 |
(len(ial.nodes), ial.required_nodes)) |
3819 | 3814 |
self.op.remote_node = ial.nodes[0] |
3820 |
logger.ToStdout("Selected new secondary for the instance: %s" %
|
|
3821 |
self.op.remote_node)
|
|
3815 |
feedback_fn("Selected new secondary for the instance: %s" %
|
|
3816 |
self.op.remote_node) |
|
3822 | 3817 |
|
3823 | 3818 |
def BuildHooksEnv(self): |
3824 | 3819 |
"""Build hooks env. |
... | ... | |
4388 | 4383 |
if self.op.wait_for_sync: |
4389 | 4384 |
disk_abort = not _WaitForSync(self.cfg, instance, self.proc) |
4390 | 4385 |
if disk_abort: |
4391 |
logger.Error("Warning: disk sync-ing has not returned a good status.\n"
|
|
4392 |
" Please check the instance.")
|
|
4386 |
logging.error("Warning: disk sync-ing has not returned a good"
|
|
4387 |
" status.\nPlease check the instance.")
|
|
4393 | 4388 |
|
4394 | 4389 |
|
4395 | 4390 |
class LUQueryInstanceData(NoHooksLU): |
... | ... | |
4869 | 4864 |
new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk) |
4870 | 4865 |
|
4871 | 4866 |
if not new_dev_name: |
4872 |
logger.Error("could not snapshot block device %s on node %s" %
|
|
4873 |
(disk.logical_id[1], src_node))
|
|
4867 |
logging.error("Could not snapshot block device %s on node %s",
|
|
4868 |
disk.logical_id[1], src_node)
|
|
4874 | 4869 |
else: |
4875 | 4870 |
new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size, |
4876 | 4871 |
logical_id=(vgname, new_dev_name), |
... | ... | |
4890 | 4885 |
for dev in snap_disks: |
4891 | 4886 |
if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name, |
4892 | 4887 |
instance, cluster_name): |
4893 |
logger.Error("could not export block device %s from node %s to node %s"
|
|
4894 |
% (dev.logical_id[1], src_node, dst_node.name))
|
|
4888 |
logging.error("Could not export block device %s from node %s to"
|
|
4889 |
" node %s", dev.logical_id[1], src_node, dst_node.name)
|
|
4895 | 4890 |
if not self.rpc.call_blockdev_remove(src_node, dev): |
4896 |
logger.Error("could not remove snapshot block device %s from node %s" %
|
|
4897 |
(dev.logical_id[1], src_node))
|
|
4891 |
logging.error("Could not remove snapshot block device %s from node"
|
|
4892 |
" %s", dev.logical_id[1], src_node)
|
|
4898 | 4893 |
|
4899 | 4894 |
if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks): |
4900 |
logger.Error("could not finalize export for instance %s on node %s" %
|
|
4901 |
(instance.name, dst_node.name))
|
|
4895 |
logging.error("Could not finalize export for instance %s on node %s",
|
|
4896 |
instance.name, dst_node.name)
|
|
4902 | 4897 |
|
4903 | 4898 |
nodelist = self.cfg.GetNodeList() |
4904 | 4899 |
nodelist.remove(dst_node.name) |
... | ... | |
4911 | 4906 |
for node in exportlist: |
4912 | 4907 |
if instance.name in exportlist[node]: |
4913 | 4908 |
if not self.rpc.call_export_remove(node, instance.name): |
4914 |
logger.Error("could not remove older export for instance %s"
|
|
4915 |
" on node %s" % (instance.name, node))
|
|
4909 |
logging.error("Could not remove older export for instance %s"
|
|
4910 |
" on node %s", instance.name, node)
|
|
4916 | 4911 |
|
4917 | 4912 |
|
4918 | 4913 |
class LURemoveExport(NoHooksLU): |
... | ... | |
4953 | 4948 |
if instance_name in exportlist[node]: |
4954 | 4949 |
found = True |
4955 | 4950 |
if not self.rpc.call_export_remove(node, instance_name): |
4956 |
logger.Error("could not remove export for instance %s"
|
|
4957 |
" on node %s" % (instance_name, node))
|
|
4951 |
logging.error("Could not remove export for instance %s"
|
|
4952 |
" on node %s", instance_name, node)
|
|
4958 | 4953 |
|
4959 | 4954 |
if fqdn_warn and not found: |
4960 | 4955 |
feedback_fn("Export not found. If trying to remove an export belonging" |
Also available in: Unified diff