Revision b9bddb6b
b/lib/cmdlib.py | ||
---|---|---|
442 | 442 |
return _BuildInstanceHookEnv(**args) |
443 | 443 |
|
444 | 444 |
|
445 |
def _CheckInstanceBridgesExist(instance): |
|
445 |
def _CheckInstanceBridgesExist(lu, instance):
|
|
446 | 446 |
"""Check that the brigdes needed by an instance exist. |
447 | 447 |
|
448 | 448 |
""" |
... | ... | |
1164 | 1164 |
" state, not changing") |
1165 | 1165 |
|
1166 | 1166 |
|
1167 |
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
|
|
1167 |
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
|
|
1168 | 1168 |
"""Sleep and poll for an instance's disk to sync. |
1169 | 1169 |
|
1170 | 1170 |
""" |
... | ... | |
1172 | 1172 |
return True |
1173 | 1173 |
|
1174 | 1174 |
if not oneshot: |
1175 |
proc.LogInfo("Waiting for instance %s to sync disks." % instance.name) |
|
1175 |
lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
|
|
1176 | 1176 |
|
1177 | 1177 |
node = instance.primary_node |
1178 | 1178 |
|
1179 | 1179 |
for dev in instance.disks: |
1180 |
cfgw.SetDiskID(dev, node)
|
|
1180 |
lu.cfg.SetDiskID(dev, node)
|
|
1181 | 1181 |
|
1182 | 1182 |
retries = 0 |
1183 | 1183 |
while True: |
... | ... | |
1186 | 1186 |
cumul_degraded = False |
1187 | 1187 |
rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks) |
1188 | 1188 |
if not rstats: |
1189 |
proc.LogWarning("Can't get any data from node %s" % node) |
|
1189 |
lu.proc.LogWarning("Can't get any data from node %s" % node)
|
|
1190 | 1190 |
retries += 1 |
1191 | 1191 |
if retries >= 10: |
1192 | 1192 |
raise errors.RemoteError("Can't contact node %s for mirror data," |
... | ... | |
1197 | 1197 |
for i in range(len(rstats)): |
1198 | 1198 |
mstat = rstats[i] |
1199 | 1199 |
if mstat is None: |
1200 |
proc.LogWarning("Can't compute data for node %s/%s" % |
|
1201 |
(node, instance.disks[i].iv_name)) |
|
1200 |
lu.proc.LogWarning("Can't compute data for node %s/%s" %
|
|
1201 |
(node, instance.disks[i].iv_name))
|
|
1202 | 1202 |
continue |
1203 | 1203 |
# we ignore the ldisk parameter |
1204 | 1204 |
perc_done, est_time, is_degraded, _ = mstat |
... | ... | |
1210 | 1210 |
max_time = est_time |
1211 | 1211 |
else: |
1212 | 1212 |
rem_time = "no time estimate" |
1213 |
proc.LogInfo("- device %s: %5.2f%% done, %s" % |
|
1214 |
(instance.disks[i].iv_name, perc_done, rem_time)) |
|
1213 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
|
|
1214 |
(instance.disks[i].iv_name, perc_done, rem_time))
|
|
1215 | 1215 |
if done or oneshot: |
1216 | 1216 |
break |
1217 | 1217 |
|
1218 | 1218 |
time.sleep(min(60, max_time)) |
1219 | 1219 |
|
1220 | 1220 |
if done: |
1221 |
proc.LogInfo("Instance %s's disks are in sync." % instance.name) |
|
1221 |
lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
|
|
1222 | 1222 |
return not cumul_degraded |
1223 | 1223 |
|
1224 | 1224 |
|
1225 |
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
|
|
1225 |
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
|
|
1226 | 1226 |
"""Check that mirrors are not degraded. |
1227 | 1227 |
|
1228 | 1228 |
The ldisk parameter, if True, will change the test from the |
... | ... | |
1230 | 1230 |
the device(s)) to the ldisk (representing the local storage status). |
1231 | 1231 |
|
1232 | 1232 |
""" |
1233 |
cfgw.SetDiskID(dev, node)
|
|
1233 |
lu.cfg.SetDiskID(dev, node)
|
|
1234 | 1234 |
if ldisk: |
1235 | 1235 |
idx = 6 |
1236 | 1236 |
else: |
... | ... | |
1246 | 1246 |
result = result and (not rstats[idx]) |
1247 | 1247 |
if dev.children: |
1248 | 1248 |
for child in dev.children: |
1249 |
result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
|
|
1249 |
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
|
|
1250 | 1250 |
|
1251 | 1251 |
return result |
1252 | 1252 |
|
... | ... | |
1930 | 1930 |
"""Activate the disks. |
1931 | 1931 |
|
1932 | 1932 |
""" |
1933 |
disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
|
|
1933 |
disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
|
|
1934 | 1934 |
if not disks_ok: |
1935 | 1935 |
raise errors.OpExecError("Cannot activate block devices") |
1936 | 1936 |
|
1937 | 1937 |
return disks_info |
1938 | 1938 |
|
1939 | 1939 |
|
1940 |
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
|
|
1940 |
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
|
|
1941 | 1941 |
"""Prepare the block devices for an instance. |
1942 | 1942 |
|
1943 | 1943 |
This sets up the block devices on all nodes. |
... | ... | |
1967 | 1967 |
# 1st pass, assemble on all nodes in secondary mode |
1968 | 1968 |
for inst_disk in instance.disks: |
1969 | 1969 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
1970 |
cfg.SetDiskID(node_disk, node) |
|
1970 |
lu.cfg.SetDiskID(node_disk, node)
|
|
1971 | 1971 |
result = rpc.call_blockdev_assemble(node, node_disk, iname, False) |
1972 | 1972 |
if not result: |
1973 | 1973 |
logger.Error("could not prepare block device %s on node %s" |
... | ... | |
1982 | 1982 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
1983 | 1983 |
if node != instance.primary_node: |
1984 | 1984 |
continue |
1985 |
cfg.SetDiskID(node_disk, node) |
|
1985 |
lu.cfg.SetDiskID(node_disk, node)
|
|
1986 | 1986 |
result = rpc.call_blockdev_assemble(node, node_disk, iname, True) |
1987 | 1987 |
if not result: |
1988 | 1988 |
logger.Error("could not prepare block device %s on node %s" |
... | ... | |
1994 | 1994 |
# this is a workaround that would be fixed better by |
1995 | 1995 |
# improving the logical/physical id handling |
1996 | 1996 |
for disk in instance.disks: |
1997 |
cfg.SetDiskID(disk, instance.primary_node) |
|
1997 |
lu.cfg.SetDiskID(disk, instance.primary_node)
|
|
1998 | 1998 |
|
1999 | 1999 |
return disks_ok, device_info |
2000 | 2000 |
|
2001 | 2001 |
|
2002 |
def _StartInstanceDisks(cfg, instance, force):
|
|
2002 |
def _StartInstanceDisks(lu, instance, force):
|
|
2003 | 2003 |
"""Start the disks of an instance. |
2004 | 2004 |
|
2005 | 2005 |
""" |
2006 |
disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
|
|
2006 |
disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
|
|
2007 | 2007 |
ignore_secondaries=force) |
2008 | 2008 |
if not disks_ok: |
2009 |
_ShutdownInstanceDisks(instance, cfg)
|
|
2009 |
_ShutdownInstanceDisks(lu, instance)
|
|
2010 | 2010 |
if force is not None and not force: |
2011 | 2011 |
logger.Error("If the message above refers to a secondary node," |
2012 | 2012 |
" you can retry the operation using '--force'.") |
... | ... | |
2044 | 2044 |
|
2045 | 2045 |
""" |
2046 | 2046 |
instance = self.instance |
2047 |
_SafeShutdownInstanceDisks(instance, self.cfg)
|
|
2047 |
_SafeShutdownInstanceDisks(self, instance)
|
|
2048 | 2048 |
|
2049 | 2049 |
|
2050 |
def _SafeShutdownInstanceDisks(instance, cfg):
|
|
2050 |
def _SafeShutdownInstanceDisks(lu, instance):
|
|
2051 | 2051 |
"""Shutdown block devices of an instance. |
2052 | 2052 |
|
2053 | 2053 |
This function checks if an instance is running, before calling |
... | ... | |
2065 | 2065 |
raise errors.OpExecError("Instance is running, can't shutdown" |
2066 | 2066 |
" block devices.") |
2067 | 2067 |
|
2068 |
_ShutdownInstanceDisks(instance, cfg)
|
|
2068 |
_ShutdownInstanceDisks(lu, instance)
|
|
2069 | 2069 |
|
2070 | 2070 |
|
2071 |
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
|
|
2071 |
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
|
|
2072 | 2072 |
"""Shutdown block devices of an instance. |
2073 | 2073 |
|
2074 | 2074 |
This does the shutdown on all nodes of the instance. |
... | ... | |
2080 | 2080 |
result = True |
2081 | 2081 |
for disk in instance.disks: |
2082 | 2082 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
2083 |
cfg.SetDiskID(top_disk, node) |
|
2083 |
lu.cfg.SetDiskID(top_disk, node)
|
|
2084 | 2084 |
if not rpc.call_blockdev_shutdown(node, top_disk): |
2085 | 2085 |
logger.Error("could not shutdown block device %s on node %s" % |
2086 | 2086 |
(disk.iv_name, node)) |
... | ... | |
2089 | 2089 |
return result |
2090 | 2090 |
|
2091 | 2091 |
|
2092 |
def _CheckNodeFreeMemory(cfg, node, reason, requested, hypervisor):
|
|
2092 |
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
|
|
2093 | 2093 |
"""Checks if a node has enough free memory. |
2094 | 2094 |
|
2095 | 2095 |
This function check if a given node has the needed amount of free |
... | ... | |
2097 | 2097 |
information from the node, this function raise an OpPrereqError |
2098 | 2098 |
exception. |
2099 | 2099 |
|
2100 |
@type cfg: C{config.ConfigWriter}
|
|
2101 |
@param cfg: the ConfigWriter instance from which we get configuration data
|
|
2100 |
@type lu: C{LogicalUnit}
|
|
2101 |
@param lu: a logical unit from which we get configuration data
|
|
2102 | 2102 |
@type node: C{str} |
2103 | 2103 |
@param node: the node to check |
2104 | 2104 |
@type reason: C{str} |
... | ... | |
2111 | 2111 |
we cannot check the node |
2112 | 2112 |
|
2113 | 2113 |
""" |
2114 |
nodeinfo = rpc.call_node_info([node], cfg.GetVGName(), hypervisor) |
|
2114 |
nodeinfo = rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
|
|
2115 | 2115 |
if not nodeinfo or not isinstance(nodeinfo, dict): |
2116 | 2116 |
raise errors.OpPrereqError("Could not contact node %s for resource" |
2117 | 2117 |
" information" % (node,)) |
... | ... | |
2169 | 2169 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2170 | 2170 |
|
2171 | 2171 |
# check bridges existance |
2172 |
_CheckInstanceBridgesExist(instance) |
|
2172 |
_CheckInstanceBridgesExist(self, instance)
|
|
2173 | 2173 |
|
2174 |
_CheckNodeFreeMemory(self.cfg, instance.primary_node,
|
|
2174 |
_CheckNodeFreeMemory(self, instance.primary_node, |
|
2175 | 2175 |
"starting instance %s" % instance.name, |
2176 | 2176 |
instance.memory, instance.hypervisor) |
2177 | 2177 |
|
... | ... | |
2187 | 2187 |
|
2188 | 2188 |
node_current = instance.primary_node |
2189 | 2189 |
|
2190 |
_StartInstanceDisks(self.cfg, instance, force)
|
|
2190 |
_StartInstanceDisks(self, instance, force) |
|
2191 | 2191 |
|
2192 | 2192 |
if not rpc.call_instance_start(node_current, instance, extra_args): |
2193 |
_ShutdownInstanceDisks(instance, self.cfg)
|
|
2193 |
_ShutdownInstanceDisks(self, instance)
|
|
2194 | 2194 |
raise errors.OpExecError("Could not start instance") |
2195 | 2195 |
|
2196 | 2196 |
|
... | ... | |
2245 | 2245 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2246 | 2246 |
|
2247 | 2247 |
# check bridges existance |
2248 |
_CheckInstanceBridgesExist(instance) |
|
2248 |
_CheckInstanceBridgesExist(self, instance)
|
|
2249 | 2249 |
|
2250 | 2250 |
def Exec(self, feedback_fn): |
2251 | 2251 |
"""Reboot the instance. |
... | ... | |
2266 | 2266 |
else: |
2267 | 2267 |
if not rpc.call_instance_shutdown(node_current, instance): |
2268 | 2268 |
raise errors.OpExecError("could not shutdown instance for full reboot") |
2269 |
_ShutdownInstanceDisks(instance, self.cfg)
|
|
2270 |
_StartInstanceDisks(self.cfg, instance, ignore_secondaries)
|
|
2269 |
_ShutdownInstanceDisks(self, instance)
|
|
2270 |
_StartInstanceDisks(self, instance, ignore_secondaries) |
|
2271 | 2271 |
if not rpc.call_instance_start(node_current, instance, extra_args): |
2272 |
_ShutdownInstanceDisks(instance, self.cfg)
|
|
2272 |
_ShutdownInstanceDisks(self, instance)
|
|
2273 | 2273 |
raise errors.OpExecError("Could not start instance for full reboot") |
2274 | 2274 |
|
2275 | 2275 |
self.cfg.MarkInstanceUp(instance.name) |
... | ... | |
2324 | 2324 |
if not rpc.call_instance_shutdown(node_current, instance): |
2325 | 2325 |
logger.Error("could not shutdown instance") |
2326 | 2326 |
|
2327 |
_ShutdownInstanceDisks(instance, self.cfg)
|
|
2327 |
_ShutdownInstanceDisks(self, instance)
|
|
2328 | 2328 |
|
2329 | 2329 |
|
2330 | 2330 |
class LUReinstallInstance(LogicalUnit): |
... | ... | |
2405 | 2405 |
inst.os = self.op.os_type |
2406 | 2406 |
self.cfg.Update(inst) |
2407 | 2407 |
|
2408 |
_StartInstanceDisks(self.cfg, inst, None)
|
|
2408 |
_StartInstanceDisks(self, inst, None) |
|
2409 | 2409 |
try: |
2410 | 2410 |
feedback_fn("Running the instance OS create scripts...") |
2411 | 2411 |
if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"): |
... | ... | |
2413 | 2413 |
" on node %s" % |
2414 | 2414 |
(inst.name, inst.primary_node)) |
2415 | 2415 |
finally: |
2416 |
_ShutdownInstanceDisks(inst, self.cfg)
|
|
2416 |
_ShutdownInstanceDisks(self, inst)
|
|
2417 | 2417 |
|
2418 | 2418 |
|
2419 | 2419 |
class LURenameInstance(LogicalUnit): |
... | ... | |
2510 | 2510 |
" Ganeti)" % (old_file_storage_dir, |
2511 | 2511 |
new_file_storage_dir)) |
2512 | 2512 |
|
2513 |
_StartInstanceDisks(self.cfg, inst, None)
|
|
2513 |
_StartInstanceDisks(self, inst, None) |
|
2514 | 2514 |
try: |
2515 | 2515 |
if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name, |
2516 | 2516 |
"sda", "sdb"): |
... | ... | |
2519 | 2519 |
(inst.name, inst.primary_node)) |
2520 | 2520 |
logger.Error(msg) |
2521 | 2521 |
finally: |
2522 |
_ShutdownInstanceDisks(inst, self.cfg)
|
|
2522 |
_ShutdownInstanceDisks(self, inst)
|
|
2523 | 2523 |
|
2524 | 2524 |
|
2525 | 2525 |
class LURemoveInstance(LogicalUnit): |
... | ... | |
2577 | 2577 |
|
2578 | 2578 |
logger.Info("removing block devices for instance %s" % instance.name) |
2579 | 2579 |
|
2580 |
if not _RemoveDisks(instance, self.cfg):
|
|
2580 |
if not _RemoveDisks(self, instance):
|
|
2581 | 2581 |
if self.op.ignore_failures: |
2582 | 2582 |
feedback_fn("Warning: can't remove instance's disks") |
2583 | 2583 |
else: |
... | ... | |
2814 | 2814 |
|
2815 | 2815 |
target_node = secondary_nodes[0] |
2816 | 2816 |
# check memory requirements on the secondary node |
2817 |
_CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
|
|
2817 |
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" % |
|
2818 | 2818 |
instance.name, instance.memory, |
2819 | 2819 |
instance.hypervisor) |
2820 | 2820 |
|
... | ... | |
2840 | 2840 |
feedback_fn("* checking disk consistency between source and target") |
2841 | 2841 |
for dev in instance.disks: |
2842 | 2842 |
# for drbd, these are drbd over lvm |
2843 |
if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
|
|
2843 |
if not _CheckDiskConsistency(self, dev, target_node, False): |
|
2844 | 2844 |
if instance.status == "up" and not self.op.ignore_consistency: |
2845 | 2845 |
raise errors.OpExecError("Disk %s is degraded on target node," |
2846 | 2846 |
" aborting failover." % dev.iv_name) |
... | ... | |
2859 | 2859 |
(instance.name, source_node)) |
2860 | 2860 |
|
2861 | 2861 |
feedback_fn("* deactivating the instance's disks on source node") |
2862 |
if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
|
|
2862 |
if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
|
|
2863 | 2863 |
raise errors.OpExecError("Can't shut down the instance's disks.") |
2864 | 2864 |
|
2865 | 2865 |
instance.primary_node = target_node |
... | ... | |
2872 | 2872 |
logger.Info("Starting instance %s on node %s" % |
2873 | 2873 |
(instance.name, target_node)) |
2874 | 2874 |
|
2875 |
disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
|
|
2875 |
disks_ok, dummy = _AssembleInstanceDisks(self, instance,
|
|
2876 | 2876 |
ignore_secondaries=True) |
2877 | 2877 |
if not disks_ok: |
2878 |
_ShutdownInstanceDisks(instance, self.cfg)
|
|
2878 |
_ShutdownInstanceDisks(self, instance)
|
|
2879 | 2879 |
raise errors.OpExecError("Can't activate the instance's disks") |
2880 | 2880 |
|
2881 | 2881 |
feedback_fn("* starting the instance on the target node") |
2882 | 2882 |
if not rpc.call_instance_start(target_node, instance, None): |
2883 |
_ShutdownInstanceDisks(instance, self.cfg)
|
|
2883 |
_ShutdownInstanceDisks(self, instance)
|
|
2884 | 2884 |
raise errors.OpExecError("Could not start instance %s on node %s." % |
2885 | 2885 |
(instance.name, target_node)) |
2886 | 2886 |
|
2887 | 2887 |
|
2888 |
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
|
|
2888 |
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
|
|
2889 | 2889 |
"""Create a tree of block devices on the primary node. |
2890 | 2890 |
|
2891 | 2891 |
This always creates all devices. |
... | ... | |
2893 | 2893 |
""" |
2894 | 2894 |
if device.children: |
2895 | 2895 |
for child in device.children: |
2896 |
if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
|
|
2896 |
if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
|
|
2897 | 2897 |
return False |
2898 | 2898 |
|
2899 |
cfg.SetDiskID(device, node) |
|
2899 |
lu.cfg.SetDiskID(device, node)
|
|
2900 | 2900 |
new_id = rpc.call_blockdev_create(node, device, device.size, |
2901 | 2901 |
instance.name, True, info) |
2902 | 2902 |
if not new_id: |
... | ... | |
2906 | 2906 |
return True |
2907 | 2907 |
|
2908 | 2908 |
|
2909 |
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
|
|
2909 |
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
|
|
2910 | 2910 |
"""Create a tree of block devices on a secondary node. |
2911 | 2911 |
|
2912 | 2912 |
If this device type has to be created on secondaries, create it and |
... | ... | |
2919 | 2919 |
force = True |
2920 | 2920 |
if device.children: |
2921 | 2921 |
for child in device.children: |
2922 |
if not _CreateBlockDevOnSecondary(cfg, node, instance,
|
|
2922 |
if not _CreateBlockDevOnSecondary(lu, node, instance,
|
|
2923 | 2923 |
child, force, info): |
2924 | 2924 |
return False |
2925 | 2925 |
|
2926 | 2926 |
if not force: |
2927 | 2927 |
return True |
2928 |
cfg.SetDiskID(device, node) |
|
2928 |
lu.cfg.SetDiskID(device, node)
|
|
2929 | 2929 |
new_id = rpc.call_blockdev_create(node, device, device.size, |
2930 | 2930 |
instance.name, False, info) |
2931 | 2931 |
if not new_id: |
... | ... | |
2935 | 2935 |
return True |
2936 | 2936 |
|
2937 | 2937 |
|
2938 |
def _GenerateUniqueNames(cfg, exts):
|
|
2938 |
def _GenerateUniqueNames(lu, exts):
|
|
2939 | 2939 |
"""Generate a suitable LV name. |
2940 | 2940 |
|
2941 | 2941 |
This will generate a logical volume name for the given instance. |
... | ... | |
2943 | 2943 |
""" |
2944 | 2944 |
results = [] |
2945 | 2945 |
for val in exts: |
2946 |
new_id = cfg.GenerateUniqueID() |
|
2946 |
new_id = lu.cfg.GenerateUniqueID()
|
|
2947 | 2947 |
results.append("%s%s" % (new_id, val)) |
2948 | 2948 |
return results |
2949 | 2949 |
|
2950 | 2950 |
|
2951 |
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name,
|
|
2951 |
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
|
|
2952 | 2952 |
p_minor, s_minor): |
2953 | 2953 |
"""Generate a drbd8 device complete with its children. |
2954 | 2954 |
|
2955 | 2955 |
""" |
2956 |
port = cfg.AllocatePort() |
|
2957 |
vgname = cfg.GetVGName() |
|
2958 |
shared_secret = cfg.GenerateDRBDSecret() |
|
2956 |
port = lu.cfg.AllocatePort()
|
|
2957 |
vgname = lu.cfg.GetVGName()
|
|
2958 |
shared_secret = lu.cfg.GenerateDRBDSecret()
|
|
2959 | 2959 |
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, |
2960 | 2960 |
logical_id=(vgname, names[0])) |
2961 | 2961 |
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128, |
... | ... | |
2969 | 2969 |
return drbd_dev |
2970 | 2970 |
|
2971 | 2971 |
|
2972 |
def _GenerateDiskTemplate(cfg, template_name,
|
|
2972 |
def _GenerateDiskTemplate(lu, template_name,
|
|
2973 | 2973 |
instance_name, primary_node, |
2974 | 2974 |
secondary_nodes, disk_sz, swap_sz, |
2975 | 2975 |
file_storage_dir, file_driver): |
... | ... | |
2978 | 2978 |
""" |
2979 | 2979 |
#TODO: compute space requirements |
2980 | 2980 |
|
2981 |
vgname = cfg.GetVGName() |
|
2981 |
vgname = lu.cfg.GetVGName()
|
|
2982 | 2982 |
if template_name == constants.DT_DISKLESS: |
2983 | 2983 |
disks = [] |
2984 | 2984 |
elif template_name == constants.DT_PLAIN: |
2985 | 2985 |
if len(secondary_nodes) != 0: |
2986 | 2986 |
raise errors.ProgrammerError("Wrong template configuration") |
2987 | 2987 |
|
2988 |
names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
|
|
2988 |
names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
|
|
2989 | 2989 |
sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz, |
2990 | 2990 |
logical_id=(vgname, names[0]), |
2991 | 2991 |
iv_name = "sda") |
... | ... | |
2998 | 2998 |
raise errors.ProgrammerError("Wrong template configuration") |
2999 | 2999 |
remote_node = secondary_nodes[0] |
3000 | 3000 |
(minor_pa, minor_pb, |
3001 |
minor_sa, minor_sb) = cfg.AllocateDRBDMinor( |
|
3001 |
minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
|
|
3002 | 3002 |
[primary_node, primary_node, remote_node, remote_node], instance_name) |
3003 | 3003 |
|
3004 |
names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
|
|
3005 |
".sdb_data", ".sdb_meta"])
|
|
3006 |
drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
|
|
3004 |
names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
|
|
3005 |
".sdb_data", ".sdb_meta"]) |
|
3006 |
drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
|
|
3007 | 3007 |
disk_sz, names[0:2], "sda", |
3008 | 3008 |
minor_pa, minor_sa) |
3009 |
drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
|
|
3009 |
drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
|
|
3010 | 3010 |
swap_sz, names[2:4], "sdb", |
3011 | 3011 |
minor_pb, minor_sb) |
3012 | 3012 |
disks = [drbd_sda_dev, drbd_sdb_dev] |
... | ... | |
3033 | 3033 |
return "originstname+%s" % instance.name |
3034 | 3034 |
|
3035 | 3035 |
|
3036 |
def _CreateDisks(cfg, instance):
|
|
3036 |
def _CreateDisks(lu, instance):
|
|
3037 | 3037 |
"""Create all disks for an instance. |
3038 | 3038 |
|
3039 | 3039 |
This abstracts away some work from AddInstance. |
... | ... | |
3065 | 3065 |
(device.iv_name, instance.name)) |
3066 | 3066 |
#HARDCODE |
3067 | 3067 |
for secondary_node in instance.secondary_nodes: |
3068 |
if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
|
|
3068 |
if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
|
|
3069 | 3069 |
device, False, info): |
3070 | 3070 |
logger.Error("failed to create volume %s (%s) on secondary node %s!" % |
3071 | 3071 |
(device.iv_name, device, secondary_node)) |
3072 | 3072 |
return False |
3073 | 3073 |
#HARDCODE |
3074 |
if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
|
|
3074 |
if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
|
|
3075 | 3075 |
instance, device, info): |
3076 | 3076 |
logger.Error("failed to create volume %s on primary!" % |
3077 | 3077 |
device.iv_name) |
... | ... | |
3080 | 3080 |
return True |
3081 | 3081 |
|
3082 | 3082 |
|
3083 |
def _RemoveDisks(instance, cfg):
|
|
3083 |
def _RemoveDisks(lu, instance):
|
|
3084 | 3084 |
"""Remove all disks for an instance. |
3085 | 3085 |
|
3086 | 3086 |
This abstracts away some work from `AddInstance()` and |
... | ... | |
3100 | 3100 |
result = True |
3101 | 3101 |
for device in instance.disks: |
3102 | 3102 |
for node, disk in device.ComputeNodeTree(instance.primary_node): |
3103 |
cfg.SetDiskID(disk, node) |
|
3103 |
lu.cfg.SetDiskID(disk, node)
|
|
3104 | 3104 |
if not rpc.call_blockdev_remove(node, disk): |
3105 | 3105 |
logger.Error("could not remove block device %s on node %s," |
3106 | 3106 |
" continuing anyway" % |
... | ... | |
3468 | 3468 |
|
3469 | 3469 |
# memory check on primary node |
3470 | 3470 |
if self.op.start: |
3471 |
_CheckNodeFreeMemory(self.cfg, self.pnode.name,
|
|
3471 |
_CheckNodeFreeMemory(self, self.pnode.name, |
|
3472 | 3472 |
"creating instance %s" % self.op.instance_name, |
3473 | 3473 |
self.op.mem_size, self.op.hypervisor) |
3474 | 3474 |
|
... | ... | |
3543 | 3543 |
string_file_storage_dir, instance)) |
3544 | 3544 |
|
3545 | 3545 |
|
3546 |
disks = _GenerateDiskTemplate(self.cfg,
|
|
3546 |
disks = _GenerateDiskTemplate(self, |
|
3547 | 3547 |
self.op.disk_template, |
3548 | 3548 |
instance, pnode_name, |
3549 | 3549 |
self.secondaries, self.op.disk_size, |
... | ... | |
3572 | 3572 |
) |
3573 | 3573 |
|
3574 | 3574 |
feedback_fn("* creating instance disks...") |
3575 |
if not _CreateDisks(self.cfg, iobj):
|
|
3576 |
_RemoveDisks(iobj, self.cfg)
|
|
3575 |
if not _CreateDisks(self, iobj): |
|
3576 |
_RemoveDisks(self, iobj)
|
|
3577 | 3577 |
self.cfg.ReleaseDRBDMinors(instance) |
3578 | 3578 |
raise errors.OpExecError("Device creation failed, reverting...") |
3579 | 3579 |
|
... | ... | |
3587 | 3587 |
self.cfg.ReleaseDRBDMinors(instance) |
3588 | 3588 |
|
3589 | 3589 |
if self.op.wait_for_sync: |
3590 |
disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
|
|
3590 |
disk_abort = not _WaitForSync(self, iobj)
|
|
3591 | 3591 |
elif iobj.disk_template in constants.DTS_NET_MIRROR: |
3592 | 3592 |
# make sure the disks are not degraded (still sync-ing is ok) |
3593 | 3593 |
time.sleep(15) |
3594 | 3594 |
feedback_fn("* checking mirrors status") |
3595 |
disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
|
|
3595 |
disk_abort = not _WaitForSync(self, iobj, oneshot=True)
|
|
3596 | 3596 |
else: |
3597 | 3597 |
disk_abort = False |
3598 | 3598 |
|
3599 | 3599 |
if disk_abort: |
3600 |
_RemoveDisks(iobj, self.cfg)
|
|
3600 |
_RemoveDisks(self, iobj)
|
|
3601 | 3601 |
self.cfg.RemoveInstance(iobj.name) |
3602 | 3602 |
# Make sure the instance lock gets removed |
3603 | 3603 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name |
... | ... | |
3895 | 3895 |
if not dev.iv_name in self.op.disks: |
3896 | 3896 |
continue |
3897 | 3897 |
info("checking %s consistency on %s" % (dev.iv_name, oth_node)) |
3898 |
if not _CheckDiskConsistency(self.cfg, dev, oth_node,
|
|
3898 |
if not _CheckDiskConsistency(self, dev, oth_node, |
|
3899 | 3899 |
oth_node==instance.primary_node): |
3900 | 3900 |
raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe" |
3901 | 3901 |
" to replace disks on this node (%s)" % |
... | ... | |
3909 | 3909 |
size = dev.size |
3910 | 3910 |
cfg.SetDiskID(dev, tgt_node) |
3911 | 3911 |
lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]] |
3912 |
names = _GenerateUniqueNames(cfg, lv_names)
|
|
3912 |
names = _GenerateUniqueNames(self, lv_names)
|
|
3913 | 3913 |
lv_data = objects.Disk(dev_type=constants.LD_LV, size=size, |
3914 | 3914 |
logical_id=(vgname, names[0])) |
3915 | 3915 |
lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128, |
... | ... | |
3923 | 3923 |
# _Create...OnPrimary (which forces the creation), even if we |
3924 | 3924 |
# are talking about the secondary node |
3925 | 3925 |
for new_lv in new_lvs: |
3926 |
if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
|
|
3926 |
if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
|
|
3927 | 3927 |
_GetInstanceInfoText(instance)): |
3928 | 3928 |
raise errors.OpExecError("Failed to create new LV named '%s' on" |
3929 | 3929 |
" node '%s'" % |
... | ... | |
3991 | 3991 |
# does a combined result over all disks, so we don't check its |
3992 | 3992 |
# return value |
3993 | 3993 |
self.proc.LogStep(5, steps_total, "sync devices") |
3994 |
_WaitForSync(cfg, instance, self.proc, unlock=True)
|
|
3994 |
_WaitForSync(self, instance, unlock=True)
|
|
3995 | 3995 |
|
3996 | 3996 |
# so check manually all the devices |
3997 | 3997 |
for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): |
... | ... | |
4067 | 4067 |
if not dev.iv_name in self.op.disks: |
4068 | 4068 |
continue |
4069 | 4069 |
info("checking %s consistency on %s" % (dev.iv_name, pri_node)) |
4070 |
if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
|
|
4070 |
if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True): |
|
4071 | 4071 |
raise errors.OpExecError("Primary node (%s) has degraded storage," |
4072 | 4072 |
" unsafe to replace the secondary" % |
4073 | 4073 |
pri_node) |
... | ... | |
4081 | 4081 |
# _Create...OnPrimary (which forces the creation), even if we |
4082 | 4082 |
# are talking about the secondary node |
4083 | 4083 |
for new_lv in dev.children: |
4084 |
if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
|
|
4084 |
if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
|
|
4085 | 4085 |
_GetInstanceInfoText(instance)): |
4086 | 4086 |
raise errors.OpExecError("Failed to create new LV named '%s' on" |
4087 | 4087 |
" node '%s'" % |
... | ... | |
4113 | 4113 |
new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, |
4114 | 4114 |
logical_id=new_logical_id, |
4115 | 4115 |
children=dev.children) |
4116 |
if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
|
|
4116 |
if not _CreateBlockDevOnSecondary(self, new_node, instance,
|
|
4117 | 4117 |
new_drbd, False, |
4118 |
_GetInstanceInfoText(instance)): |
|
4118 |
_GetInstanceInfoText(instance)):
|
|
4119 | 4119 |
self.cfg.ReleaseDRBDMinors(instance.name) |
4120 | 4120 |
raise errors.OpExecError("Failed to create new DRBD on" |
4121 | 4121 |
" node '%s'" % new_node) |
... | ... | |
4177 | 4177 |
# does a combined result over all disks, so we don't check its |
4178 | 4178 |
# return value |
4179 | 4179 |
self.proc.LogStep(5, steps_total, "sync devices") |
4180 |
_WaitForSync(cfg, instance, self.proc, unlock=True)
|
|
4180 |
_WaitForSync(self, instance, unlock=True)
|
|
4181 | 4181 |
|
4182 | 4182 |
# so check manually all the devices |
4183 | 4183 |
for name, (dev, old_lvs, _) in iv_names.iteritems(): |
... | ... | |
4205 | 4205 |
|
4206 | 4206 |
# Activate the instance disks if we're replacing them on a down instance |
4207 | 4207 |
if instance.status == "down": |
4208 |
_StartInstanceDisks(self.cfg, instance, True)
|
|
4208 |
_StartInstanceDisks(self, instance, True) |
|
4209 | 4209 |
|
4210 | 4210 |
if instance.disk_template == constants.DT_DRBD8: |
4211 | 4211 |
if self.op.remote_node is None: |
... | ... | |
4219 | 4219 |
|
4220 | 4220 |
# Deactivate the instance disks if we're replacing them on a down instance |
4221 | 4221 |
if instance.status == "down": |
4222 |
_SafeShutdownInstanceDisks(instance, self.cfg)
|
|
4222 |
_SafeShutdownInstanceDisks(self, instance)
|
|
4223 | 4223 |
|
4224 | 4224 |
return ret |
4225 | 4225 |
|
... | ... | |
4870 | 4870 |
finally: |
4871 | 4871 |
if self.op.shutdown and instance.status == "up": |
4872 | 4872 |
if not rpc.call_instance_start(src_node, instance, None): |
4873 |
_ShutdownInstanceDisks(instance, self.cfg)
|
|
4873 |
_ShutdownInstanceDisks(self, instance)
|
|
4874 | 4874 |
raise errors.OpExecError("Could not start instance") |
4875 | 4875 |
|
4876 | 4876 |
# TODO: check for size |
Also available in: Unified diff