Revision 13f6af81 lib/cmdlib/instance.py
b/lib/cmdlib/instance.py | ||
---|---|---|
48 | 48 |
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \ |
49 | 49 |
ResultWithJobs |
50 | 50 |
|
51 |
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
|
|
51 |
from ganeti.cmdlib.common import INSTANCE_DOWN, \ |
|
52 | 52 |
INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \ |
53 | 53 |
_ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \ |
54 | 54 |
_LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \ |
... | ... | |
62 | 62 |
_CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \ |
63 | 63 |
_CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \ |
64 | 64 |
_AssembleInstanceDisks |
65 |
from ganeti.cmdlib.instance_operation import _GetInstanceConsole |
|
65 | 66 |
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \ |
66 | 67 |
_GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \ |
67 | 68 |
_NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \ |
68 | 69 |
_ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \ |
69 | 70 |
_GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \ |
70 |
_CheckInstanceBridgesExist, _CheckNicsBridgesExist |
|
71 |
_CheckInstanceBridgesExist, _CheckNicsBridgesExist, _CheckNodeHasOS
|
|
71 | 72 |
|
72 | 73 |
import ganeti.masterd.instance |
73 | 74 |
|
... | ... | |
320 | 321 |
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL) |
321 | 322 |
|
322 | 323 |
|
323 |
def _CheckNodeHasOS(lu, node, os_name, force_variant): |
|
324 |
"""Ensure that a node supports a given OS. |
|
325 |
|
|
326 |
@param lu: the LU on behalf of which we make the check |
|
327 |
@param node: the node to check |
|
328 |
@param os_name: the OS to query about |
|
329 |
@param force_variant: whether to ignore variant errors |
|
330 |
@raise errors.OpPrereqError: if the node is not supporting the OS |
|
331 |
|
|
332 |
""" |
|
333 |
result = lu.rpc.call_os_get(node, os_name) |
|
334 |
result.Raise("OS '%s' not in supported OS list for node %s" % |
|
335 |
(os_name, node), |
|
336 |
prereq=True, ecode=errors.ECODE_INVAL) |
|
337 |
if not force_variant: |
|
338 |
_CheckOSVariant(result.payload, os_name) |
|
339 |
|
|
340 |
|
|
341 | 324 |
class LUInstanceCreate(LogicalUnit): |
342 | 325 |
"""Create an instance. |
343 | 326 |
|
... | ... | |
1826 | 1809 |
(instance.name, target_node, msg)) |
1827 | 1810 |
|
1828 | 1811 |
|
1829 |
def _GetInstanceConsole(cluster, instance): |
|
1830 |
"""Returns console information for an instance. |
|
1831 |
|
|
1832 |
@type cluster: L{objects.Cluster} |
|
1833 |
@type instance: L{objects.Instance} |
|
1834 |
@rtype: dict |
|
1835 |
|
|
1836 |
""" |
|
1837 |
hyper = hypervisor.GetHypervisorClass(instance.hypervisor) |
|
1838 |
# beparams and hvparams are passed separately, to avoid editing the |
|
1839 |
# instance and then saving the defaults in the instance itself. |
|
1840 |
hvparams = cluster.FillHV(instance) |
|
1841 |
beparams = cluster.FillBE(instance) |
|
1842 |
console = hyper.GetInstanceConsole(instance, hvparams, beparams) |
|
1843 |
|
|
1844 |
assert console.instance == instance.name |
|
1845 |
assert console.Validate() |
|
1846 |
|
|
1847 |
return console.ToDict() |
|
1848 |
|
|
1849 |
|
|
1850 | 1812 |
class _InstanceQuery(_QueryBase): |
1851 | 1813 |
FIELDS = query.INSTANCE_FIELDS |
1852 | 1814 |
|
... | ... | |
2241 | 2203 |
return result |
2242 | 2204 |
|
2243 | 2205 |
|
2244 |
class LUInstanceStartup(LogicalUnit): |
|
2245 |
"""Starts an instance. |
|
2246 |
|
|
2247 |
""" |
|
2248 |
HPATH = "instance-start" |
|
2249 |
HTYPE = constants.HTYPE_INSTANCE |
|
2250 |
REQ_BGL = False |
|
2251 |
|
|
2252 |
def CheckArguments(self): |
|
2253 |
# extra beparams |
|
2254 |
if self.op.beparams: |
|
2255 |
# fill the beparams dict |
|
2256 |
objects.UpgradeBeParams(self.op.beparams) |
|
2257 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES) |
|
2258 |
|
|
2259 |
def ExpandNames(self): |
|
2260 |
self._ExpandAndLockInstance() |
|
2261 |
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE |
|
2262 |
|
|
2263 |
def DeclareLocks(self, level): |
|
2264 |
if level == locking.LEVEL_NODE_RES: |
|
2265 |
self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES) |
|
2266 |
|
|
2267 |
def BuildHooksEnv(self): |
|
2268 |
"""Build hooks env. |
|
2269 |
|
|
2270 |
This runs on master, primary and secondary nodes of the instance. |
|
2271 |
|
|
2272 |
""" |
|
2273 |
env = { |
|
2274 |
"FORCE": self.op.force, |
|
2275 |
} |
|
2276 |
|
|
2277 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
|
2278 |
|
|
2279 |
return env |
|
2280 |
|
|
2281 |
def BuildHooksNodes(self): |
|
2282 |
"""Build hooks nodes. |
|
2283 |
|
|
2284 |
""" |
|
2285 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2286 |
return (nl, nl) |
|
2287 |
|
|
2288 |
def CheckPrereq(self): |
|
2289 |
"""Check prerequisites. |
|
2290 |
|
|
2291 |
This checks that the instance is in the cluster. |
|
2292 |
|
|
2293 |
""" |
|
2294 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2295 |
assert self.instance is not None, \ |
|
2296 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2297 |
|
|
2298 |
# extra hvparams |
|
2299 |
if self.op.hvparams: |
|
2300 |
# check hypervisor parameter syntax (locally) |
|
2301 |
cluster = self.cfg.GetClusterInfo() |
|
2302 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) |
|
2303 |
filled_hvp = cluster.FillHV(instance) |
|
2304 |
filled_hvp.update(self.op.hvparams) |
|
2305 |
hv_type = hypervisor.GetHypervisorClass(instance.hypervisor) |
|
2306 |
hv_type.CheckParameterSyntax(filled_hvp) |
|
2307 |
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp) |
|
2308 |
|
|
2309 |
_CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
2310 |
|
|
2311 |
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline |
|
2312 |
|
|
2313 |
if self.primary_offline and self.op.ignore_offline_nodes: |
|
2314 |
self.LogWarning("Ignoring offline primary node") |
|
2315 |
|
|
2316 |
if self.op.hvparams or self.op.beparams: |
|
2317 |
self.LogWarning("Overridden parameters are ignored") |
|
2318 |
else: |
|
2319 |
_CheckNodeOnline(self, instance.primary_node) |
|
2320 |
|
|
2321 |
bep = self.cfg.GetClusterInfo().FillBE(instance) |
|
2322 |
bep.update(self.op.beparams) |
|
2323 |
|
|
2324 |
# check bridges existence |
|
2325 |
_CheckInstanceBridgesExist(self, instance) |
|
2326 |
|
|
2327 |
remote_info = self.rpc.call_instance_info(instance.primary_node, |
|
2328 |
instance.name, |
|
2329 |
instance.hypervisor) |
|
2330 |
remote_info.Raise("Error checking node %s" % instance.primary_node, |
|
2331 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
|
2332 |
if not remote_info.payload: # not running already |
|
2333 |
_CheckNodeFreeMemory(self, instance.primary_node, |
|
2334 |
"starting instance %s" % instance.name, |
|
2335 |
bep[constants.BE_MINMEM], instance.hypervisor) |
|
2336 |
|
|
2337 |
def Exec(self, feedback_fn): |
|
2338 |
"""Start the instance. |
|
2339 |
|
|
2340 |
""" |
|
2341 |
instance = self.instance |
|
2342 |
force = self.op.force |
|
2343 |
reason = self.op.reason |
|
2344 |
|
|
2345 |
if not self.op.no_remember: |
|
2346 |
self.cfg.MarkInstanceUp(instance.name) |
|
2347 |
|
|
2348 |
if self.primary_offline: |
|
2349 |
assert self.op.ignore_offline_nodes |
|
2350 |
self.LogInfo("Primary node offline, marked instance as started") |
|
2351 |
else: |
|
2352 |
node_current = instance.primary_node |
|
2353 |
|
|
2354 |
_StartInstanceDisks(self, instance, force) |
|
2355 |
|
|
2356 |
result = \ |
|
2357 |
self.rpc.call_instance_start(node_current, |
|
2358 |
(instance, self.op.hvparams, |
|
2359 |
self.op.beparams), |
|
2360 |
self.op.startup_paused, reason) |
|
2361 |
msg = result.fail_msg |
|
2362 |
if msg: |
|
2363 |
_ShutdownInstanceDisks(self, instance) |
|
2364 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
|
2365 |
|
|
2366 |
|
|
2367 |
class LUInstanceShutdown(LogicalUnit): |
|
2368 |
"""Shutdown an instance. |
|
2369 |
|
|
2370 |
""" |
|
2371 |
HPATH = "instance-stop" |
|
2372 |
HTYPE = constants.HTYPE_INSTANCE |
|
2373 |
REQ_BGL = False |
|
2374 |
|
|
2375 |
def ExpandNames(self): |
|
2376 |
self._ExpandAndLockInstance() |
|
2377 |
|
|
2378 |
def BuildHooksEnv(self): |
|
2379 |
"""Build hooks env. |
|
2380 |
|
|
2381 |
This runs on master, primary and secondary nodes of the instance. |
|
2382 |
|
|
2383 |
""" |
|
2384 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
|
2385 |
env["TIMEOUT"] = self.op.timeout |
|
2386 |
return env |
|
2387 |
|
|
2388 |
def BuildHooksNodes(self): |
|
2389 |
"""Build hooks nodes. |
|
2390 |
|
|
2391 |
""" |
|
2392 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2393 |
return (nl, nl) |
|
2394 |
|
|
2395 |
def CheckPrereq(self): |
|
2396 |
"""Check prerequisites. |
|
2397 |
|
|
2398 |
This checks that the instance is in the cluster. |
|
2399 |
|
|
2400 |
""" |
|
2401 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2402 |
assert self.instance is not None, \ |
|
2403 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2404 |
|
|
2405 |
if not self.op.force: |
|
2406 |
_CheckInstanceState(self, self.instance, INSTANCE_ONLINE) |
|
2407 |
else: |
|
2408 |
self.LogWarning("Ignoring offline instance check") |
|
2409 |
|
|
2410 |
self.primary_offline = \ |
|
2411 |
self.cfg.GetNodeInfo(self.instance.primary_node).offline |
|
2412 |
|
|
2413 |
if self.primary_offline and self.op.ignore_offline_nodes: |
|
2414 |
self.LogWarning("Ignoring offline primary node") |
|
2415 |
else: |
|
2416 |
_CheckNodeOnline(self, self.instance.primary_node) |
|
2417 |
|
|
2418 |
def Exec(self, feedback_fn): |
|
2419 |
"""Shutdown the instance. |
|
2420 |
|
|
2421 |
""" |
|
2422 |
instance = self.instance |
|
2423 |
node_current = instance.primary_node |
|
2424 |
timeout = self.op.timeout |
|
2425 |
reason = self.op.reason |
|
2426 |
|
|
2427 |
# If the instance is offline we shouldn't mark it as down, as that |
|
2428 |
# resets the offline flag. |
|
2429 |
if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE: |
|
2430 |
self.cfg.MarkInstanceDown(instance.name) |
|
2431 |
|
|
2432 |
if self.primary_offline: |
|
2433 |
assert self.op.ignore_offline_nodes |
|
2434 |
self.LogInfo("Primary node offline, marked instance as stopped") |
|
2435 |
else: |
|
2436 |
result = self.rpc.call_instance_shutdown(node_current, instance, timeout, |
|
2437 |
reason) |
|
2438 |
msg = result.fail_msg |
|
2439 |
if msg: |
|
2440 |
self.LogWarning("Could not shutdown instance: %s", msg) |
|
2441 |
|
|
2442 |
_ShutdownInstanceDisks(self, instance) |
|
2443 |
|
|
2444 |
|
|
2445 |
class LUInstanceReinstall(LogicalUnit): |
|
2446 |
"""Reinstall an instance. |
|
2447 |
|
|
2448 |
""" |
|
2449 |
HPATH = "instance-reinstall" |
|
2450 |
HTYPE = constants.HTYPE_INSTANCE |
|
2451 |
REQ_BGL = False |
|
2452 |
|
|
2453 |
def ExpandNames(self): |
|
2454 |
self._ExpandAndLockInstance() |
|
2455 |
|
|
2456 |
def BuildHooksEnv(self): |
|
2457 |
"""Build hooks env. |
|
2458 |
|
|
2459 |
This runs on master, primary and secondary nodes of the instance. |
|
2460 |
|
|
2461 |
""" |
|
2462 |
return _BuildInstanceHookEnvByObject(self, self.instance) |
|
2463 |
|
|
2464 |
def BuildHooksNodes(self): |
|
2465 |
"""Build hooks nodes. |
|
2466 |
|
|
2467 |
""" |
|
2468 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2469 |
return (nl, nl) |
|
2470 |
|
|
2471 |
def CheckPrereq(self): |
|
2472 |
"""Check prerequisites. |
|
2473 |
|
|
2474 |
This checks that the instance is in the cluster and is not running. |
|
2475 |
|
|
2476 |
""" |
|
2477 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2478 |
assert instance is not None, \ |
|
2479 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2480 |
_CheckNodeOnline(self, instance.primary_node, "Instance primary node" |
|
2481 |
" offline, cannot reinstall") |
|
2482 |
|
|
2483 |
if instance.disk_template == constants.DT_DISKLESS: |
|
2484 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
|
2485 |
self.op.instance_name, |
|
2486 |
errors.ECODE_INVAL) |
|
2487 |
_CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall") |
|
2488 |
|
|
2489 |
if self.op.os_type is not None: |
|
2490 |
# OS verification |
|
2491 |
pnode = _ExpandNodeName(self.cfg, instance.primary_node) |
|
2492 |
_CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant) |
|
2493 |
instance_os = self.op.os_type |
|
2494 |
else: |
|
2495 |
instance_os = instance.os |
|
2496 |
|
|
2497 |
nodelist = list(instance.all_nodes) |
|
2498 |
|
|
2499 |
if self.op.osparams: |
|
2500 |
i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams) |
|
2501 |
_CheckOSParams(self, True, nodelist, instance_os, i_osdict) |
|
2502 |
self.os_inst = i_osdict # the new dict (without defaults) |
|
2503 |
else: |
|
2504 |
self.os_inst = None |
|
2505 |
|
|
2506 |
self.instance = instance |
|
2507 |
|
|
2508 |
def Exec(self, feedback_fn): |
|
2509 |
"""Reinstall the instance. |
|
2510 |
|
|
2511 |
""" |
|
2512 |
inst = self.instance |
|
2513 |
|
|
2514 |
if self.op.os_type is not None: |
|
2515 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
|
2516 |
inst.os = self.op.os_type |
|
2517 |
# Write to configuration |
|
2518 |
self.cfg.Update(inst, feedback_fn) |
|
2519 |
|
|
2520 |
_StartInstanceDisks(self, inst, None) |
|
2521 |
try: |
|
2522 |
feedback_fn("Running the instance OS create scripts...") |
|
2523 |
# FIXME: pass debug option from opcode to backend |
|
2524 |
result = self.rpc.call_instance_os_add(inst.primary_node, |
|
2525 |
(inst, self.os_inst), True, |
|
2526 |
self.op.debug_level) |
|
2527 |
result.Raise("Could not install OS for instance %s on node %s" % |
|
2528 |
(inst.name, inst.primary_node)) |
|
2529 |
finally: |
|
2530 |
_ShutdownInstanceDisks(self, inst) |
|
2531 |
|
|
2532 |
|
|
2533 |
class LUInstanceReboot(LogicalUnit): |
|
2534 |
"""Reboot an instance. |
|
2535 |
|
|
2536 |
""" |
|
2537 |
HPATH = "instance-reboot" |
|
2538 |
HTYPE = constants.HTYPE_INSTANCE |
|
2539 |
REQ_BGL = False |
|
2540 |
|
|
2541 |
def ExpandNames(self): |
|
2542 |
self._ExpandAndLockInstance() |
|
2543 |
|
|
2544 |
def BuildHooksEnv(self): |
|
2545 |
"""Build hooks env. |
|
2546 |
|
|
2547 |
This runs on master, primary and secondary nodes of the instance. |
|
2548 |
|
|
2549 |
""" |
|
2550 |
env = { |
|
2551 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
|
2552 |
"REBOOT_TYPE": self.op.reboot_type, |
|
2553 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
|
2554 |
} |
|
2555 |
|
|
2556 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
|
2557 |
|
|
2558 |
return env |
|
2559 |
|
|
2560 |
def BuildHooksNodes(self): |
|
2561 |
"""Build hooks nodes. |
|
2562 |
|
|
2563 |
""" |
|
2564 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2565 |
return (nl, nl) |
|
2566 |
|
|
2567 |
def CheckPrereq(self): |
|
2568 |
"""Check prerequisites. |
|
2569 |
|
|
2570 |
This checks that the instance is in the cluster. |
|
2571 |
|
|
2572 |
""" |
|
2573 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2574 |
assert self.instance is not None, \ |
|
2575 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2576 |
_CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
2577 |
_CheckNodeOnline(self, instance.primary_node) |
|
2578 |
|
|
2579 |
# check bridges existence |
|
2580 |
_CheckInstanceBridgesExist(self, instance) |
|
2581 |
|
|
2582 |
def Exec(self, feedback_fn): |
|
2583 |
"""Reboot the instance. |
|
2584 |
|
|
2585 |
""" |
|
2586 |
instance = self.instance |
|
2587 |
ignore_secondaries = self.op.ignore_secondaries |
|
2588 |
reboot_type = self.op.reboot_type |
|
2589 |
reason = self.op.reason |
|
2590 |
|
|
2591 |
remote_info = self.rpc.call_instance_info(instance.primary_node, |
|
2592 |
instance.name, |
|
2593 |
instance.hypervisor) |
|
2594 |
remote_info.Raise("Error checking node %s" % instance.primary_node) |
|
2595 |
instance_running = bool(remote_info.payload) |
|
2596 |
|
|
2597 |
node_current = instance.primary_node |
|
2598 |
|
|
2599 |
if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
|
2600 |
constants.INSTANCE_REBOOT_HARD]: |
|
2601 |
for disk in instance.disks: |
|
2602 |
self.cfg.SetDiskID(disk, node_current) |
|
2603 |
result = self.rpc.call_instance_reboot(node_current, instance, |
|
2604 |
reboot_type, |
|
2605 |
self.op.shutdown_timeout, reason) |
|
2606 |
result.Raise("Could not reboot instance") |
|
2607 |
else: |
|
2608 |
if instance_running: |
|
2609 |
result = self.rpc.call_instance_shutdown(node_current, instance, |
|
2610 |
self.op.shutdown_timeout, |
|
2611 |
reason) |
|
2612 |
result.Raise("Could not shutdown instance for full reboot") |
|
2613 |
_ShutdownInstanceDisks(self, instance) |
|
2614 |
else: |
|
2615 |
self.LogInfo("Instance %s was already stopped, starting now", |
|
2616 |
instance.name) |
|
2617 |
_StartInstanceDisks(self, instance, ignore_secondaries) |
|
2618 |
result = self.rpc.call_instance_start(node_current, |
|
2619 |
(instance, None, None), False, |
|
2620 |
reason) |
|
2621 |
msg = result.fail_msg |
|
2622 |
if msg: |
|
2623 |
_ShutdownInstanceDisks(self, instance) |
|
2624 |
raise errors.OpExecError("Could not start instance for" |
|
2625 |
" full reboot: %s" % msg) |
|
2626 |
|
|
2627 |
self.cfg.MarkInstanceUp(instance.name) |
|
2628 |
|
|
2629 |
|
|
2630 |
class LUInstanceConsole(NoHooksLU): |
|
2631 |
"""Connect to an instance's console. |
|
2632 |
|
|
2633 |
This is somewhat special in that it returns the command line that |
|
2634 |
you need to run on the master node in order to connect to the |
|
2635 |
console. |
|
2636 |
|
|
2637 |
""" |
|
2638 |
REQ_BGL = False |
|
2639 |
|
|
2640 |
def ExpandNames(self): |
|
2641 |
self.share_locks = _ShareAll() |
|
2642 |
self._ExpandAndLockInstance() |
|
2643 |
|
|
2644 |
def CheckPrereq(self): |
|
2645 |
"""Check prerequisites. |
|
2646 |
|
|
2647 |
This checks that the instance is in the cluster. |
|
2648 |
|
|
2649 |
""" |
|
2650 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2651 |
assert self.instance is not None, \ |
|
2652 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2653 |
_CheckNodeOnline(self, self.instance.primary_node) |
|
2654 |
|
|
2655 |
def Exec(self, feedback_fn): |
|
2656 |
"""Connect to the console of an instance |
|
2657 |
|
|
2658 |
""" |
|
2659 |
instance = self.instance |
|
2660 |
node = instance.primary_node |
|
2661 |
|
|
2662 |
node_insts = self.rpc.call_instance_list([node], |
|
2663 |
[instance.hypervisor])[node] |
|
2664 |
node_insts.Raise("Can't get node information from %s" % node) |
|
2665 |
|
|
2666 |
if instance.name not in node_insts.payload: |
|
2667 |
if instance.admin_state == constants.ADMINST_UP: |
|
2668 |
state = constants.INSTST_ERRORDOWN |
|
2669 |
elif instance.admin_state == constants.ADMINST_DOWN: |
|
2670 |
state = constants.INSTST_ADMINDOWN |
|
2671 |
else: |
|
2672 |
state = constants.INSTST_ADMINOFFLINE |
|
2673 |
raise errors.OpExecError("Instance %s is not running (state %s)" % |
|
2674 |
(instance.name, state)) |
|
2675 |
|
|
2676 |
logging.debug("Connecting to console of %s on %s", instance.name, node) |
|
2677 |
|
|
2678 |
return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance) |
|
2679 |
|
|
2680 |
|
|
2681 | 2206 |
class LUInstanceMultiAlloc(NoHooksLU): |
2682 | 2207 |
"""Allocates multiple instances at the same time. |
2683 | 2208 |
|
Also available in: Unified diff