Revision 13f6af81
b/Makefile.am | ||
---|---|---|
317 | 317 |
lib/cmdlib/instance.py \ |
318 | 318 |
lib/cmdlib/instance_storage.py \ |
319 | 319 |
lib/cmdlib/instance_migration.py \ |
320 |
lib/cmdlib/instance_operation.py \ |
|
320 | 321 |
lib/cmdlib/instance_utils.py \ |
321 | 322 |
lib/cmdlib/backup.py \ |
322 | 323 |
lib/cmdlib/query.py \ |
b/lib/cmdlib/__init__.py | ||
---|---|---|
74 | 74 |
LUInstanceMove, \ |
75 | 75 |
LUInstanceQuery, \ |
76 | 76 |
LUInstanceQueryData, \ |
77 |
LUInstanceStartup, \ |
|
78 |
LUInstanceShutdown, \ |
|
79 |
LUInstanceReinstall, \ |
|
80 |
LUInstanceReboot, \ |
|
81 |
LUInstanceConsole, \ |
|
82 | 77 |
LUInstanceMultiAlloc, \ |
83 | 78 |
LUInstanceSetParams, \ |
84 | 79 |
LUInstanceChangeGroup |
... | ... | |
91 | 86 |
from ganeti.cmdlib.instance_migration import \ |
92 | 87 |
LUInstanceFailover, \ |
93 | 88 |
LUInstanceMigrate |
89 |
from ganeti.cmdlib.instance_operation import \ |
|
90 |
LUInstanceStartup, \ |
|
91 |
LUInstanceShutdown, \ |
|
92 |
LUInstanceReinstall, \ |
|
93 |
LUInstanceReboot, \ |
|
94 |
LUInstanceConsole |
|
94 | 95 |
from ganeti.cmdlib.backup import \ |
95 | 96 |
LUBackupQuery, \ |
96 | 97 |
LUBackupPrepare, \ |
b/lib/cmdlib/instance.py | ||
---|---|---|
48 | 48 |
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \ |
49 | 49 |
ResultWithJobs |
50 | 50 |
|
51 |
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
|
|
51 |
from ganeti.cmdlib.common import INSTANCE_DOWN, \ |
|
52 | 52 |
INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \ |
53 | 53 |
_ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \ |
54 | 54 |
_LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \ |
... | ... | |
62 | 62 |
_CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \ |
63 | 63 |
_CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \ |
64 | 64 |
_AssembleInstanceDisks |
65 |
from ganeti.cmdlib.instance_operation import _GetInstanceConsole |
|
65 | 66 |
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \ |
66 | 67 |
_GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \ |
67 | 68 |
_NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \ |
68 | 69 |
_ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \ |
69 | 70 |
_GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \ |
70 |
_CheckInstanceBridgesExist, _CheckNicsBridgesExist |
|
71 |
_CheckInstanceBridgesExist, _CheckNicsBridgesExist, _CheckNodeHasOS
|
|
71 | 72 |
|
72 | 73 |
import ganeti.masterd.instance |
73 | 74 |
|
... | ... | |
320 | 321 |
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL) |
321 | 322 |
|
322 | 323 |
|
323 |
def _CheckNodeHasOS(lu, node, os_name, force_variant): |
|
324 |
"""Ensure that a node supports a given OS. |
|
325 |
|
|
326 |
@param lu: the LU on behalf of which we make the check |
|
327 |
@param node: the node to check |
|
328 |
@param os_name: the OS to query about |
|
329 |
@param force_variant: whether to ignore variant errors |
|
330 |
@raise errors.OpPrereqError: if the node is not supporting the OS |
|
331 |
|
|
332 |
""" |
|
333 |
result = lu.rpc.call_os_get(node, os_name) |
|
334 |
result.Raise("OS '%s' not in supported OS list for node %s" % |
|
335 |
(os_name, node), |
|
336 |
prereq=True, ecode=errors.ECODE_INVAL) |
|
337 |
if not force_variant: |
|
338 |
_CheckOSVariant(result.payload, os_name) |
|
339 |
|
|
340 |
|
|
341 | 324 |
class LUInstanceCreate(LogicalUnit): |
342 | 325 |
"""Create an instance. |
343 | 326 |
|
... | ... | |
1826 | 1809 |
(instance.name, target_node, msg)) |
1827 | 1810 |
|
1828 | 1811 |
|
1829 |
def _GetInstanceConsole(cluster, instance): |
|
1830 |
"""Returns console information for an instance. |
|
1831 |
|
|
1832 |
@type cluster: L{objects.Cluster} |
|
1833 |
@type instance: L{objects.Instance} |
|
1834 |
@rtype: dict |
|
1835 |
|
|
1836 |
""" |
|
1837 |
hyper = hypervisor.GetHypervisorClass(instance.hypervisor) |
|
1838 |
# beparams and hvparams are passed separately, to avoid editing the |
|
1839 |
# instance and then saving the defaults in the instance itself. |
|
1840 |
hvparams = cluster.FillHV(instance) |
|
1841 |
beparams = cluster.FillBE(instance) |
|
1842 |
console = hyper.GetInstanceConsole(instance, hvparams, beparams) |
|
1843 |
|
|
1844 |
assert console.instance == instance.name |
|
1845 |
assert console.Validate() |
|
1846 |
|
|
1847 |
return console.ToDict() |
|
1848 |
|
|
1849 |
|
|
1850 | 1812 |
class _InstanceQuery(_QueryBase): |
1851 | 1813 |
FIELDS = query.INSTANCE_FIELDS |
1852 | 1814 |
|
... | ... | |
2241 | 2203 |
return result |
2242 | 2204 |
|
2243 | 2205 |
|
2244 |
class LUInstanceStartup(LogicalUnit): |
|
2245 |
"""Starts an instance. |
|
2246 |
|
|
2247 |
""" |
|
2248 |
HPATH = "instance-start" |
|
2249 |
HTYPE = constants.HTYPE_INSTANCE |
|
2250 |
REQ_BGL = False |
|
2251 |
|
|
2252 |
def CheckArguments(self): |
|
2253 |
# extra beparams |
|
2254 |
if self.op.beparams: |
|
2255 |
# fill the beparams dict |
|
2256 |
objects.UpgradeBeParams(self.op.beparams) |
|
2257 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES) |
|
2258 |
|
|
2259 |
def ExpandNames(self): |
|
2260 |
self._ExpandAndLockInstance() |
|
2261 |
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE |
|
2262 |
|
|
2263 |
def DeclareLocks(self, level): |
|
2264 |
if level == locking.LEVEL_NODE_RES: |
|
2265 |
self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES) |
|
2266 |
|
|
2267 |
def BuildHooksEnv(self): |
|
2268 |
"""Build hooks env. |
|
2269 |
|
|
2270 |
This runs on master, primary and secondary nodes of the instance. |
|
2271 |
|
|
2272 |
""" |
|
2273 |
env = { |
|
2274 |
"FORCE": self.op.force, |
|
2275 |
} |
|
2276 |
|
|
2277 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
|
2278 |
|
|
2279 |
return env |
|
2280 |
|
|
2281 |
def BuildHooksNodes(self): |
|
2282 |
"""Build hooks nodes. |
|
2283 |
|
|
2284 |
""" |
|
2285 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2286 |
return (nl, nl) |
|
2287 |
|
|
2288 |
def CheckPrereq(self): |
|
2289 |
"""Check prerequisites. |
|
2290 |
|
|
2291 |
This checks that the instance is in the cluster. |
|
2292 |
|
|
2293 |
""" |
|
2294 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2295 |
assert self.instance is not None, \ |
|
2296 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2297 |
|
|
2298 |
# extra hvparams |
|
2299 |
if self.op.hvparams: |
|
2300 |
# check hypervisor parameter syntax (locally) |
|
2301 |
cluster = self.cfg.GetClusterInfo() |
|
2302 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) |
|
2303 |
filled_hvp = cluster.FillHV(instance) |
|
2304 |
filled_hvp.update(self.op.hvparams) |
|
2305 |
hv_type = hypervisor.GetHypervisorClass(instance.hypervisor) |
|
2306 |
hv_type.CheckParameterSyntax(filled_hvp) |
|
2307 |
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp) |
|
2308 |
|
|
2309 |
_CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
2310 |
|
|
2311 |
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline |
|
2312 |
|
|
2313 |
if self.primary_offline and self.op.ignore_offline_nodes: |
|
2314 |
self.LogWarning("Ignoring offline primary node") |
|
2315 |
|
|
2316 |
if self.op.hvparams or self.op.beparams: |
|
2317 |
self.LogWarning("Overridden parameters are ignored") |
|
2318 |
else: |
|
2319 |
_CheckNodeOnline(self, instance.primary_node) |
|
2320 |
|
|
2321 |
bep = self.cfg.GetClusterInfo().FillBE(instance) |
|
2322 |
bep.update(self.op.beparams) |
|
2323 |
|
|
2324 |
# check bridges existence |
|
2325 |
_CheckInstanceBridgesExist(self, instance) |
|
2326 |
|
|
2327 |
remote_info = self.rpc.call_instance_info(instance.primary_node, |
|
2328 |
instance.name, |
|
2329 |
instance.hypervisor) |
|
2330 |
remote_info.Raise("Error checking node %s" % instance.primary_node, |
|
2331 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
|
2332 |
if not remote_info.payload: # not running already |
|
2333 |
_CheckNodeFreeMemory(self, instance.primary_node, |
|
2334 |
"starting instance %s" % instance.name, |
|
2335 |
bep[constants.BE_MINMEM], instance.hypervisor) |
|
2336 |
|
|
2337 |
def Exec(self, feedback_fn): |
|
2338 |
"""Start the instance. |
|
2339 |
|
|
2340 |
""" |
|
2341 |
instance = self.instance |
|
2342 |
force = self.op.force |
|
2343 |
reason = self.op.reason |
|
2344 |
|
|
2345 |
if not self.op.no_remember: |
|
2346 |
self.cfg.MarkInstanceUp(instance.name) |
|
2347 |
|
|
2348 |
if self.primary_offline: |
|
2349 |
assert self.op.ignore_offline_nodes |
|
2350 |
self.LogInfo("Primary node offline, marked instance as started") |
|
2351 |
else: |
|
2352 |
node_current = instance.primary_node |
|
2353 |
|
|
2354 |
_StartInstanceDisks(self, instance, force) |
|
2355 |
|
|
2356 |
result = \ |
|
2357 |
self.rpc.call_instance_start(node_current, |
|
2358 |
(instance, self.op.hvparams, |
|
2359 |
self.op.beparams), |
|
2360 |
self.op.startup_paused, reason) |
|
2361 |
msg = result.fail_msg |
|
2362 |
if msg: |
|
2363 |
_ShutdownInstanceDisks(self, instance) |
|
2364 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
|
2365 |
|
|
2366 |
|
|
2367 |
class LUInstanceShutdown(LogicalUnit): |
|
2368 |
"""Shutdown an instance. |
|
2369 |
|
|
2370 |
""" |
|
2371 |
HPATH = "instance-stop" |
|
2372 |
HTYPE = constants.HTYPE_INSTANCE |
|
2373 |
REQ_BGL = False |
|
2374 |
|
|
2375 |
def ExpandNames(self): |
|
2376 |
self._ExpandAndLockInstance() |
|
2377 |
|
|
2378 |
def BuildHooksEnv(self): |
|
2379 |
"""Build hooks env. |
|
2380 |
|
|
2381 |
This runs on master, primary and secondary nodes of the instance. |
|
2382 |
|
|
2383 |
""" |
|
2384 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
|
2385 |
env["TIMEOUT"] = self.op.timeout |
|
2386 |
return env |
|
2387 |
|
|
2388 |
def BuildHooksNodes(self): |
|
2389 |
"""Build hooks nodes. |
|
2390 |
|
|
2391 |
""" |
|
2392 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2393 |
return (nl, nl) |
|
2394 |
|
|
2395 |
def CheckPrereq(self): |
|
2396 |
"""Check prerequisites. |
|
2397 |
|
|
2398 |
This checks that the instance is in the cluster. |
|
2399 |
|
|
2400 |
""" |
|
2401 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2402 |
assert self.instance is not None, \ |
|
2403 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2404 |
|
|
2405 |
if not self.op.force: |
|
2406 |
_CheckInstanceState(self, self.instance, INSTANCE_ONLINE) |
|
2407 |
else: |
|
2408 |
self.LogWarning("Ignoring offline instance check") |
|
2409 |
|
|
2410 |
self.primary_offline = \ |
|
2411 |
self.cfg.GetNodeInfo(self.instance.primary_node).offline |
|
2412 |
|
|
2413 |
if self.primary_offline and self.op.ignore_offline_nodes: |
|
2414 |
self.LogWarning("Ignoring offline primary node") |
|
2415 |
else: |
|
2416 |
_CheckNodeOnline(self, self.instance.primary_node) |
|
2417 |
|
|
2418 |
def Exec(self, feedback_fn): |
|
2419 |
"""Shutdown the instance. |
|
2420 |
|
|
2421 |
""" |
|
2422 |
instance = self.instance |
|
2423 |
node_current = instance.primary_node |
|
2424 |
timeout = self.op.timeout |
|
2425 |
reason = self.op.reason |
|
2426 |
|
|
2427 |
# If the instance is offline we shouldn't mark it as down, as that |
|
2428 |
# resets the offline flag. |
|
2429 |
if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE: |
|
2430 |
self.cfg.MarkInstanceDown(instance.name) |
|
2431 |
|
|
2432 |
if self.primary_offline: |
|
2433 |
assert self.op.ignore_offline_nodes |
|
2434 |
self.LogInfo("Primary node offline, marked instance as stopped") |
|
2435 |
else: |
|
2436 |
result = self.rpc.call_instance_shutdown(node_current, instance, timeout, |
|
2437 |
reason) |
|
2438 |
msg = result.fail_msg |
|
2439 |
if msg: |
|
2440 |
self.LogWarning("Could not shutdown instance: %s", msg) |
|
2441 |
|
|
2442 |
_ShutdownInstanceDisks(self, instance) |
|
2443 |
|
|
2444 |
|
|
2445 |
class LUInstanceReinstall(LogicalUnit): |
|
2446 |
"""Reinstall an instance. |
|
2447 |
|
|
2448 |
""" |
|
2449 |
HPATH = "instance-reinstall" |
|
2450 |
HTYPE = constants.HTYPE_INSTANCE |
|
2451 |
REQ_BGL = False |
|
2452 |
|
|
2453 |
def ExpandNames(self): |
|
2454 |
self._ExpandAndLockInstance() |
|
2455 |
|
|
2456 |
def BuildHooksEnv(self): |
|
2457 |
"""Build hooks env. |
|
2458 |
|
|
2459 |
This runs on master, primary and secondary nodes of the instance. |
|
2460 |
|
|
2461 |
""" |
|
2462 |
return _BuildInstanceHookEnvByObject(self, self.instance) |
|
2463 |
|
|
2464 |
def BuildHooksNodes(self): |
|
2465 |
"""Build hooks nodes. |
|
2466 |
|
|
2467 |
""" |
|
2468 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2469 |
return (nl, nl) |
|
2470 |
|
|
2471 |
def CheckPrereq(self): |
|
2472 |
"""Check prerequisites. |
|
2473 |
|
|
2474 |
This checks that the instance is in the cluster and is not running. |
|
2475 |
|
|
2476 |
""" |
|
2477 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2478 |
assert instance is not None, \ |
|
2479 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2480 |
_CheckNodeOnline(self, instance.primary_node, "Instance primary node" |
|
2481 |
" offline, cannot reinstall") |
|
2482 |
|
|
2483 |
if instance.disk_template == constants.DT_DISKLESS: |
|
2484 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
|
2485 |
self.op.instance_name, |
|
2486 |
errors.ECODE_INVAL) |
|
2487 |
_CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall") |
|
2488 |
|
|
2489 |
if self.op.os_type is not None: |
|
2490 |
# OS verification |
|
2491 |
pnode = _ExpandNodeName(self.cfg, instance.primary_node) |
|
2492 |
_CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant) |
|
2493 |
instance_os = self.op.os_type |
|
2494 |
else: |
|
2495 |
instance_os = instance.os |
|
2496 |
|
|
2497 |
nodelist = list(instance.all_nodes) |
|
2498 |
|
|
2499 |
if self.op.osparams: |
|
2500 |
i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams) |
|
2501 |
_CheckOSParams(self, True, nodelist, instance_os, i_osdict) |
|
2502 |
self.os_inst = i_osdict # the new dict (without defaults) |
|
2503 |
else: |
|
2504 |
self.os_inst = None |
|
2505 |
|
|
2506 |
self.instance = instance |
|
2507 |
|
|
2508 |
def Exec(self, feedback_fn): |
|
2509 |
"""Reinstall the instance. |
|
2510 |
|
|
2511 |
""" |
|
2512 |
inst = self.instance |
|
2513 |
|
|
2514 |
if self.op.os_type is not None: |
|
2515 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
|
2516 |
inst.os = self.op.os_type |
|
2517 |
# Write to configuration |
|
2518 |
self.cfg.Update(inst, feedback_fn) |
|
2519 |
|
|
2520 |
_StartInstanceDisks(self, inst, None) |
|
2521 |
try: |
|
2522 |
feedback_fn("Running the instance OS create scripts...") |
|
2523 |
# FIXME: pass debug option from opcode to backend |
|
2524 |
result = self.rpc.call_instance_os_add(inst.primary_node, |
|
2525 |
(inst, self.os_inst), True, |
|
2526 |
self.op.debug_level) |
|
2527 |
result.Raise("Could not install OS for instance %s on node %s" % |
|
2528 |
(inst.name, inst.primary_node)) |
|
2529 |
finally: |
|
2530 |
_ShutdownInstanceDisks(self, inst) |
|
2531 |
|
|
2532 |
|
|
2533 |
class LUInstanceReboot(LogicalUnit): |
|
2534 |
"""Reboot an instance. |
|
2535 |
|
|
2536 |
""" |
|
2537 |
HPATH = "instance-reboot" |
|
2538 |
HTYPE = constants.HTYPE_INSTANCE |
|
2539 |
REQ_BGL = False |
|
2540 |
|
|
2541 |
def ExpandNames(self): |
|
2542 |
self._ExpandAndLockInstance() |
|
2543 |
|
|
2544 |
def BuildHooksEnv(self): |
|
2545 |
"""Build hooks env. |
|
2546 |
|
|
2547 |
This runs on master, primary and secondary nodes of the instance. |
|
2548 |
|
|
2549 |
""" |
|
2550 |
env = { |
|
2551 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
|
2552 |
"REBOOT_TYPE": self.op.reboot_type, |
|
2553 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
|
2554 |
} |
|
2555 |
|
|
2556 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
|
2557 |
|
|
2558 |
return env |
|
2559 |
|
|
2560 |
def BuildHooksNodes(self): |
|
2561 |
"""Build hooks nodes. |
|
2562 |
|
|
2563 |
""" |
|
2564 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
2565 |
return (nl, nl) |
|
2566 |
|
|
2567 |
def CheckPrereq(self): |
|
2568 |
"""Check prerequisites. |
|
2569 |
|
|
2570 |
This checks that the instance is in the cluster. |
|
2571 |
|
|
2572 |
""" |
|
2573 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2574 |
assert self.instance is not None, \ |
|
2575 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2576 |
_CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
2577 |
_CheckNodeOnline(self, instance.primary_node) |
|
2578 |
|
|
2579 |
# check bridges existence |
|
2580 |
_CheckInstanceBridgesExist(self, instance) |
|
2581 |
|
|
2582 |
def Exec(self, feedback_fn): |
|
2583 |
"""Reboot the instance. |
|
2584 |
|
|
2585 |
""" |
|
2586 |
instance = self.instance |
|
2587 |
ignore_secondaries = self.op.ignore_secondaries |
|
2588 |
reboot_type = self.op.reboot_type |
|
2589 |
reason = self.op.reason |
|
2590 |
|
|
2591 |
remote_info = self.rpc.call_instance_info(instance.primary_node, |
|
2592 |
instance.name, |
|
2593 |
instance.hypervisor) |
|
2594 |
remote_info.Raise("Error checking node %s" % instance.primary_node) |
|
2595 |
instance_running = bool(remote_info.payload) |
|
2596 |
|
|
2597 |
node_current = instance.primary_node |
|
2598 |
|
|
2599 |
if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
|
2600 |
constants.INSTANCE_REBOOT_HARD]: |
|
2601 |
for disk in instance.disks: |
|
2602 |
self.cfg.SetDiskID(disk, node_current) |
|
2603 |
result = self.rpc.call_instance_reboot(node_current, instance, |
|
2604 |
reboot_type, |
|
2605 |
self.op.shutdown_timeout, reason) |
|
2606 |
result.Raise("Could not reboot instance") |
|
2607 |
else: |
|
2608 |
if instance_running: |
|
2609 |
result = self.rpc.call_instance_shutdown(node_current, instance, |
|
2610 |
self.op.shutdown_timeout, |
|
2611 |
reason) |
|
2612 |
result.Raise("Could not shutdown instance for full reboot") |
|
2613 |
_ShutdownInstanceDisks(self, instance) |
|
2614 |
else: |
|
2615 |
self.LogInfo("Instance %s was already stopped, starting now", |
|
2616 |
instance.name) |
|
2617 |
_StartInstanceDisks(self, instance, ignore_secondaries) |
|
2618 |
result = self.rpc.call_instance_start(node_current, |
|
2619 |
(instance, None, None), False, |
|
2620 |
reason) |
|
2621 |
msg = result.fail_msg |
|
2622 |
if msg: |
|
2623 |
_ShutdownInstanceDisks(self, instance) |
|
2624 |
raise errors.OpExecError("Could not start instance for" |
|
2625 |
" full reboot: %s" % msg) |
|
2626 |
|
|
2627 |
self.cfg.MarkInstanceUp(instance.name) |
|
2628 |
|
|
2629 |
|
|
2630 |
class LUInstanceConsole(NoHooksLU): |
|
2631 |
"""Connect to an instance's console. |
|
2632 |
|
|
2633 |
This is somewhat special in that it returns the command line that |
|
2634 |
you need to run on the master node in order to connect to the |
|
2635 |
console. |
|
2636 |
|
|
2637 |
""" |
|
2638 |
REQ_BGL = False |
|
2639 |
|
|
2640 |
def ExpandNames(self): |
|
2641 |
self.share_locks = _ShareAll() |
|
2642 |
self._ExpandAndLockInstance() |
|
2643 |
|
|
2644 |
def CheckPrereq(self): |
|
2645 |
"""Check prerequisites. |
|
2646 |
|
|
2647 |
This checks that the instance is in the cluster. |
|
2648 |
|
|
2649 |
""" |
|
2650 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
2651 |
assert self.instance is not None, \ |
|
2652 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
2653 |
_CheckNodeOnline(self, self.instance.primary_node) |
|
2654 |
|
|
2655 |
def Exec(self, feedback_fn): |
|
2656 |
"""Connect to the console of an instance |
|
2657 |
|
|
2658 |
""" |
|
2659 |
instance = self.instance |
|
2660 |
node = instance.primary_node |
|
2661 |
|
|
2662 |
node_insts = self.rpc.call_instance_list([node], |
|
2663 |
[instance.hypervisor])[node] |
|
2664 |
node_insts.Raise("Can't get node information from %s" % node) |
|
2665 |
|
|
2666 |
if instance.name not in node_insts.payload: |
|
2667 |
if instance.admin_state == constants.ADMINST_UP: |
|
2668 |
state = constants.INSTST_ERRORDOWN |
|
2669 |
elif instance.admin_state == constants.ADMINST_DOWN: |
|
2670 |
state = constants.INSTST_ADMINDOWN |
|
2671 |
else: |
|
2672 |
state = constants.INSTST_ADMINOFFLINE |
|
2673 |
raise errors.OpExecError("Instance %s is not running (state %s)" % |
|
2674 |
(instance.name, state)) |
|
2675 |
|
|
2676 |
logging.debug("Connecting to console of %s on %s", instance.name, node) |
|
2677 |
|
|
2678 |
return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance) |
|
2679 |
|
|
2680 |
|
|
2681 | 2206 |
class LUInstanceMultiAlloc(NoHooksLU): |
2682 | 2207 |
"""Allocates multiple instances at the same time. |
2683 | 2208 |
|
b/lib/cmdlib/instance_operation.py | ||
---|---|---|
1 |
# |
|
2 |
# |
|
3 |
|
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. |
|
5 |
# |
|
6 |
# This program is free software; you can redistribute it and/or modify |
|
7 |
# it under the terms of the GNU General Public License as published by |
|
8 |
# the Free Software Foundation; either version 2 of the License, or |
|
9 |
# (at your option) any later version. |
|
10 |
# |
|
11 |
# This program is distributed in the hope that it will be useful, but |
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
14 |
# General Public License for more details. |
|
15 |
# |
|
16 |
# You should have received a copy of the GNU General Public License |
|
17 |
# along with this program; if not, write to the Free Software |
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
|
19 |
# 02110-1301, USA. |
|
20 |
|
|
21 |
|
|
22 |
"""Logical units dealing with instance operations (start/stop/...). |
|
23 |
|
|
24 |
Those operations have in common that they affect the operating system in a |
|
25 |
running instance directly. |
|
26 |
|
|
27 |
""" |
|
28 |
|
|
29 |
import logging |
|
30 |
|
|
31 |
from ganeti import constants |
|
32 |
from ganeti import errors |
|
33 |
from ganeti import hypervisor |
|
34 |
from ganeti import locking |
|
35 |
from ganeti import objects |
|
36 |
from ganeti import utils |
|
37 |
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU |
|
38 |
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \ |
|
39 |
_CheckHVParams, _CheckInstanceState, _CheckNodeOnline, _ExpandNodeName, \ |
|
40 |
_GetUpdatedParams, _CheckOSParams, _ShareAll |
|
41 |
from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \ |
|
42 |
_ShutdownInstanceDisks |
|
43 |
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \ |
|
44 |
_CheckInstanceBridgesExist, _CheckNodeFreeMemory, _CheckNodeHasOS |
|
45 |
|
|
46 |
|
|
47 |
class LUInstanceStartup(LogicalUnit): |
|
48 |
"""Starts an instance. |
|
49 |
|
|
50 |
""" |
|
51 |
HPATH = "instance-start" |
|
52 |
HTYPE = constants.HTYPE_INSTANCE |
|
53 |
REQ_BGL = False |
|
54 |
|
|
55 |
def CheckArguments(self): |
|
56 |
# extra beparams |
|
57 |
if self.op.beparams: |
|
58 |
# fill the beparams dict |
|
59 |
objects.UpgradeBeParams(self.op.beparams) |
|
60 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES) |
|
61 |
|
|
62 |
def ExpandNames(self): |
|
63 |
self._ExpandAndLockInstance() |
|
64 |
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE |
|
65 |
|
|
66 |
def DeclareLocks(self, level): |
|
67 |
if level == locking.LEVEL_NODE_RES: |
|
68 |
self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES) |
|
69 |
|
|
70 |
def BuildHooksEnv(self): |
|
71 |
"""Build hooks env. |
|
72 |
|
|
73 |
This runs on master, primary and secondary nodes of the instance. |
|
74 |
|
|
75 |
""" |
|
76 |
env = { |
|
77 |
"FORCE": self.op.force, |
|
78 |
} |
|
79 |
|
|
80 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
|
81 |
|
|
82 |
return env |
|
83 |
|
|
84 |
def BuildHooksNodes(self): |
|
85 |
"""Build hooks nodes. |
|
86 |
|
|
87 |
""" |
|
88 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
89 |
return (nl, nl) |
|
90 |
|
|
91 |
def CheckPrereq(self): |
|
92 |
"""Check prerequisites. |
|
93 |
|
|
94 |
This checks that the instance is in the cluster. |
|
95 |
|
|
96 |
""" |
|
97 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
98 |
assert self.instance is not None, \ |
|
99 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
100 |
|
|
101 |
# extra hvparams |
|
102 |
if self.op.hvparams: |
|
103 |
# check hypervisor parameter syntax (locally) |
|
104 |
cluster = self.cfg.GetClusterInfo() |
|
105 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) |
|
106 |
filled_hvp = cluster.FillHV(instance) |
|
107 |
filled_hvp.update(self.op.hvparams) |
|
108 |
hv_type = hypervisor.GetHypervisorClass(instance.hypervisor) |
|
109 |
hv_type.CheckParameterSyntax(filled_hvp) |
|
110 |
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp) |
|
111 |
|
|
112 |
_CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
113 |
|
|
114 |
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline |
|
115 |
|
|
116 |
if self.primary_offline and self.op.ignore_offline_nodes: |
|
117 |
self.LogWarning("Ignoring offline primary node") |
|
118 |
|
|
119 |
if self.op.hvparams or self.op.beparams: |
|
120 |
self.LogWarning("Overridden parameters are ignored") |
|
121 |
else: |
|
122 |
_CheckNodeOnline(self, instance.primary_node) |
|
123 |
|
|
124 |
bep = self.cfg.GetClusterInfo().FillBE(instance) |
|
125 |
bep.update(self.op.beparams) |
|
126 |
|
|
127 |
# check bridges existence |
|
128 |
_CheckInstanceBridgesExist(self, instance) |
|
129 |
|
|
130 |
remote_info = self.rpc.call_instance_info(instance.primary_node, |
|
131 |
instance.name, |
|
132 |
instance.hypervisor) |
|
133 |
remote_info.Raise("Error checking node %s" % instance.primary_node, |
|
134 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
|
135 |
if not remote_info.payload: # not running already |
|
136 |
_CheckNodeFreeMemory(self, instance.primary_node, |
|
137 |
"starting instance %s" % instance.name, |
|
138 |
bep[constants.BE_MINMEM], instance.hypervisor) |
|
139 |
|
|
140 |
def Exec(self, feedback_fn): |
|
141 |
"""Start the instance. |
|
142 |
|
|
143 |
""" |
|
144 |
instance = self.instance |
|
145 |
force = self.op.force |
|
146 |
reason = self.op.reason |
|
147 |
|
|
148 |
if not self.op.no_remember: |
|
149 |
self.cfg.MarkInstanceUp(instance.name) |
|
150 |
|
|
151 |
if self.primary_offline: |
|
152 |
assert self.op.ignore_offline_nodes |
|
153 |
self.LogInfo("Primary node offline, marked instance as started") |
|
154 |
else: |
|
155 |
node_current = instance.primary_node |
|
156 |
|
|
157 |
_StartInstanceDisks(self, instance, force) |
|
158 |
|
|
159 |
result = \ |
|
160 |
self.rpc.call_instance_start(node_current, |
|
161 |
(instance, self.op.hvparams, |
|
162 |
self.op.beparams), |
|
163 |
self.op.startup_paused, reason) |
|
164 |
msg = result.fail_msg |
|
165 |
if msg: |
|
166 |
_ShutdownInstanceDisks(self, instance) |
|
167 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
|
168 |
|
|
169 |
|
|
170 |
class LUInstanceShutdown(LogicalUnit): |
|
171 |
"""Shutdown an instance. |
|
172 |
|
|
173 |
""" |
|
174 |
HPATH = "instance-stop" |
|
175 |
HTYPE = constants.HTYPE_INSTANCE |
|
176 |
REQ_BGL = False |
|
177 |
|
|
178 |
def ExpandNames(self): |
|
179 |
self._ExpandAndLockInstance() |
|
180 |
|
|
181 |
def BuildHooksEnv(self): |
|
182 |
"""Build hooks env. |
|
183 |
|
|
184 |
This runs on master, primary and secondary nodes of the instance. |
|
185 |
|
|
186 |
""" |
|
187 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
|
188 |
env["TIMEOUT"] = self.op.timeout |
|
189 |
return env |
|
190 |
|
|
191 |
def BuildHooksNodes(self): |
|
192 |
"""Build hooks nodes. |
|
193 |
|
|
194 |
""" |
|
195 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
196 |
return (nl, nl) |
|
197 |
|
|
198 |
def CheckPrereq(self): |
|
199 |
"""Check prerequisites. |
|
200 |
|
|
201 |
This checks that the instance is in the cluster. |
|
202 |
|
|
203 |
""" |
|
204 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
205 |
assert self.instance is not None, \ |
|
206 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
207 |
|
|
208 |
if not self.op.force: |
|
209 |
_CheckInstanceState(self, self.instance, INSTANCE_ONLINE) |
|
210 |
else: |
|
211 |
self.LogWarning("Ignoring offline instance check") |
|
212 |
|
|
213 |
self.primary_offline = \ |
|
214 |
self.cfg.GetNodeInfo(self.instance.primary_node).offline |
|
215 |
|
|
216 |
if self.primary_offline and self.op.ignore_offline_nodes: |
|
217 |
self.LogWarning("Ignoring offline primary node") |
|
218 |
else: |
|
219 |
_CheckNodeOnline(self, self.instance.primary_node) |
|
220 |
|
|
221 |
def Exec(self, feedback_fn): |
|
222 |
"""Shutdown the instance. |
|
223 |
|
|
224 |
""" |
|
225 |
instance = self.instance |
|
226 |
node_current = instance.primary_node |
|
227 |
timeout = self.op.timeout |
|
228 |
reason = self.op.reason |
|
229 |
|
|
230 |
# If the instance is offline we shouldn't mark it as down, as that |
|
231 |
# resets the offline flag. |
|
232 |
if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE: |
|
233 |
self.cfg.MarkInstanceDown(instance.name) |
|
234 |
|
|
235 |
if self.primary_offline: |
|
236 |
assert self.op.ignore_offline_nodes |
|
237 |
self.LogInfo("Primary node offline, marked instance as stopped") |
|
238 |
else: |
|
239 |
result = self.rpc.call_instance_shutdown(node_current, instance, timeout, |
|
240 |
reason) |
|
241 |
msg = result.fail_msg |
|
242 |
if msg: |
|
243 |
self.LogWarning("Could not shutdown instance: %s", msg) |
|
244 |
|
|
245 |
_ShutdownInstanceDisks(self, instance) |
|
246 |
|
|
247 |
|
|
248 |
class LUInstanceReinstall(LogicalUnit): |
|
249 |
"""Reinstall an instance. |
|
250 |
|
|
251 |
""" |
|
252 |
HPATH = "instance-reinstall" |
|
253 |
HTYPE = constants.HTYPE_INSTANCE |
|
254 |
REQ_BGL = False |
|
255 |
|
|
256 |
def ExpandNames(self): |
|
257 |
self._ExpandAndLockInstance() |
|
258 |
|
|
259 |
def BuildHooksEnv(self): |
|
260 |
"""Build hooks env. |
|
261 |
|
|
262 |
This runs on master, primary and secondary nodes of the instance. |
|
263 |
|
|
264 |
""" |
|
265 |
return _BuildInstanceHookEnvByObject(self, self.instance) |
|
266 |
|
|
267 |
def BuildHooksNodes(self): |
|
268 |
"""Build hooks nodes. |
|
269 |
|
|
270 |
""" |
|
271 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
272 |
return (nl, nl) |
|
273 |
|
|
274 |
def CheckPrereq(self): |
|
275 |
"""Check prerequisites. |
|
276 |
|
|
277 |
This checks that the instance is in the cluster and is not running. |
|
278 |
|
|
279 |
""" |
|
280 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
281 |
assert instance is not None, \ |
|
282 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
283 |
_CheckNodeOnline(self, instance.primary_node, "Instance primary node" |
|
284 |
" offline, cannot reinstall") |
|
285 |
|
|
286 |
if instance.disk_template == constants.DT_DISKLESS: |
|
287 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
|
288 |
self.op.instance_name, |
|
289 |
errors.ECODE_INVAL) |
|
290 |
_CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall") |
|
291 |
|
|
292 |
if self.op.os_type is not None: |
|
293 |
# OS verification |
|
294 |
pnode = _ExpandNodeName(self.cfg, instance.primary_node) |
|
295 |
_CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant) |
|
296 |
instance_os = self.op.os_type |
|
297 |
else: |
|
298 |
instance_os = instance.os |
|
299 |
|
|
300 |
nodelist = list(instance.all_nodes) |
|
301 |
|
|
302 |
if self.op.osparams: |
|
303 |
i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams) |
|
304 |
_CheckOSParams(self, True, nodelist, instance_os, i_osdict) |
|
305 |
self.os_inst = i_osdict # the new dict (without defaults) |
|
306 |
else: |
|
307 |
self.os_inst = None |
|
308 |
|
|
309 |
self.instance = instance |
|
310 |
|
|
311 |
def Exec(self, feedback_fn): |
|
312 |
"""Reinstall the instance. |
|
313 |
|
|
314 |
""" |
|
315 |
inst = self.instance |
|
316 |
|
|
317 |
if self.op.os_type is not None: |
|
318 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
|
319 |
inst.os = self.op.os_type |
|
320 |
# Write to configuration |
|
321 |
self.cfg.Update(inst, feedback_fn) |
|
322 |
|
|
323 |
_StartInstanceDisks(self, inst, None) |
|
324 |
try: |
|
325 |
feedback_fn("Running the instance OS create scripts...") |
|
326 |
# FIXME: pass debug option from opcode to backend |
|
327 |
result = self.rpc.call_instance_os_add(inst.primary_node, |
|
328 |
(inst, self.os_inst), True, |
|
329 |
self.op.debug_level) |
|
330 |
result.Raise("Could not install OS for instance %s on node %s" % |
|
331 |
(inst.name, inst.primary_node)) |
|
332 |
finally: |
|
333 |
_ShutdownInstanceDisks(self, inst) |
|
334 |
|
|
335 |
|
|
336 |
class LUInstanceReboot(LogicalUnit): |
|
337 |
"""Reboot an instance. |
|
338 |
|
|
339 |
""" |
|
340 |
HPATH = "instance-reboot" |
|
341 |
HTYPE = constants.HTYPE_INSTANCE |
|
342 |
REQ_BGL = False |
|
343 |
|
|
344 |
def ExpandNames(self): |
|
345 |
self._ExpandAndLockInstance() |
|
346 |
|
|
347 |
def BuildHooksEnv(self): |
|
348 |
"""Build hooks env. |
|
349 |
|
|
350 |
This runs on master, primary and secondary nodes of the instance. |
|
351 |
|
|
352 |
""" |
|
353 |
env = { |
|
354 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
|
355 |
"REBOOT_TYPE": self.op.reboot_type, |
|
356 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
|
357 |
} |
|
358 |
|
|
359 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
|
360 |
|
|
361 |
return env |
|
362 |
|
|
363 |
def BuildHooksNodes(self): |
|
364 |
"""Build hooks nodes. |
|
365 |
|
|
366 |
""" |
|
367 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
|
368 |
return (nl, nl) |
|
369 |
|
|
370 |
def CheckPrereq(self): |
|
371 |
"""Check prerequisites. |
|
372 |
|
|
373 |
This checks that the instance is in the cluster. |
|
374 |
|
|
375 |
""" |
|
376 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
377 |
assert self.instance is not None, \ |
|
378 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
379 |
_CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
380 |
_CheckNodeOnline(self, instance.primary_node) |
|
381 |
|
|
382 |
# check bridges existence |
|
383 |
_CheckInstanceBridgesExist(self, instance) |
|
384 |
|
|
385 |
def Exec(self, feedback_fn): |
|
386 |
"""Reboot the instance. |
|
387 |
|
|
388 |
""" |
|
389 |
instance = self.instance |
|
390 |
ignore_secondaries = self.op.ignore_secondaries |
|
391 |
reboot_type = self.op.reboot_type |
|
392 |
reason = self.op.reason |
|
393 |
|
|
394 |
remote_info = self.rpc.call_instance_info(instance.primary_node, |
|
395 |
instance.name, |
|
396 |
instance.hypervisor) |
|
397 |
remote_info.Raise("Error checking node %s" % instance.primary_node) |
|
398 |
instance_running = bool(remote_info.payload) |
|
399 |
|
|
400 |
node_current = instance.primary_node |
|
401 |
|
|
402 |
if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
|
403 |
constants.INSTANCE_REBOOT_HARD]: |
|
404 |
for disk in instance.disks: |
|
405 |
self.cfg.SetDiskID(disk, node_current) |
|
406 |
result = self.rpc.call_instance_reboot(node_current, instance, |
|
407 |
reboot_type, |
|
408 |
self.op.shutdown_timeout, reason) |
|
409 |
result.Raise("Could not reboot instance") |
|
410 |
else: |
|
411 |
if instance_running: |
|
412 |
result = self.rpc.call_instance_shutdown(node_current, instance, |
|
413 |
self.op.shutdown_timeout, |
|
414 |
reason) |
|
415 |
result.Raise("Could not shutdown instance for full reboot") |
|
416 |
_ShutdownInstanceDisks(self, instance) |
|
417 |
else: |
|
418 |
self.LogInfo("Instance %s was already stopped, starting now", |
|
419 |
instance.name) |
|
420 |
_StartInstanceDisks(self, instance, ignore_secondaries) |
|
421 |
result = self.rpc.call_instance_start(node_current, |
|
422 |
(instance, None, None), False, |
|
423 |
reason) |
|
424 |
msg = result.fail_msg |
|
425 |
if msg: |
|
426 |
_ShutdownInstanceDisks(self, instance) |
|
427 |
raise errors.OpExecError("Could not start instance for" |
|
428 |
" full reboot: %s" % msg) |
|
429 |
|
|
430 |
self.cfg.MarkInstanceUp(instance.name) |
|
431 |
|
|
432 |
|
|
433 |
def _GetInstanceConsole(cluster, instance): |
|
434 |
"""Returns console information for an instance. |
|
435 |
|
|
436 |
@type cluster: L{objects.Cluster} |
|
437 |
@type instance: L{objects.Instance} |
|
438 |
@rtype: dict |
|
439 |
|
|
440 |
""" |
|
441 |
hyper = hypervisor.GetHypervisorClass(instance.hypervisor) |
|
442 |
# beparams and hvparams are passed separately, to avoid editing the |
|
443 |
# instance and then saving the defaults in the instance itself. |
|
444 |
hvparams = cluster.FillHV(instance) |
|
445 |
beparams = cluster.FillBE(instance) |
|
446 |
console = hyper.GetInstanceConsole(instance, hvparams, beparams) |
|
447 |
|
|
448 |
assert console.instance == instance.name |
|
449 |
assert console.Validate() |
|
450 |
|
|
451 |
return console.ToDict() |
|
452 |
|
|
453 |
|
|
454 |
class LUInstanceConsole(NoHooksLU): |
|
455 |
"""Connect to an instance's console. |
|
456 |
|
|
457 |
This is somewhat special in that it returns the command line that |
|
458 |
you need to run on the master node in order to connect to the |
|
459 |
console. |
|
460 |
|
|
461 |
""" |
|
462 |
REQ_BGL = False |
|
463 |
|
|
464 |
def ExpandNames(self): |
|
465 |
self.share_locks = _ShareAll() |
|
466 |
self._ExpandAndLockInstance() |
|
467 |
|
|
468 |
def CheckPrereq(self): |
|
469 |
"""Check prerequisites. |
|
470 |
|
|
471 |
This checks that the instance is in the cluster. |
|
472 |
|
|
473 |
""" |
|
474 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
475 |
assert self.instance is not None, \ |
|
476 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
|
477 |
_CheckNodeOnline(self, self.instance.primary_node) |
|
478 |
|
|
479 |
def Exec(self, feedback_fn): |
|
480 |
"""Connect to the console of an instance |
|
481 |
|
|
482 |
""" |
|
483 |
instance = self.instance |
|
484 |
node = instance.primary_node |
|
485 |
|
|
486 |
node_insts = self.rpc.call_instance_list([node], |
|
487 |
[instance.hypervisor])[node] |
|
488 |
node_insts.Raise("Can't get node information from %s" % node) |
|
489 |
|
|
490 |
if instance.name not in node_insts.payload: |
|
491 |
if instance.admin_state == constants.ADMINST_UP: |
|
492 |
state = constants.INSTST_ERRORDOWN |
|
493 |
elif instance.admin_state == constants.ADMINST_DOWN: |
|
494 |
state = constants.INSTST_ADMINDOWN |
|
495 |
else: |
|
496 |
state = constants.INSTST_ADMINOFFLINE |
|
497 |
raise errors.OpExecError("Instance %s is not running (state %s)" % |
|
498 |
(instance.name, state)) |
|
499 |
|
|
500 |
logging.debug("Connecting to console of %s on %s", instance.name, node) |
|
501 |
|
|
502 |
return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance) |
b/lib/cmdlib/instance_utils.py | ||
---|---|---|
513 | 513 |
result = lu.rpc.call_bridges_exist(target_node, brlist) |
514 | 514 |
result.Raise("Error checking bridges on destination node '%s'" % |
515 | 515 |
target_node, prereq=True, ecode=errors.ECODE_ENVIRON) |
516 |
|
|
517 |
|
|
518 |
def _CheckNodeHasOS(lu, node, os_name, force_variant): |
|
519 |
"""Ensure that a node supports a given OS. |
|
520 |
|
|
521 |
@param lu: the LU on behalf of which we make the check |
|
522 |
@param node: the node to check |
|
523 |
@param os_name: the OS to query about |
|
524 |
@param force_variant: whether to ignore variant errors |
|
525 |
@raise errors.OpPrereqError: if the node is not supporting the OS |
|
526 |
|
|
527 |
""" |
|
528 |
result = lu.rpc.call_os_get(node, os_name) |
|
529 |
result.Raise("OS '%s' not in supported OS list for node %s" % |
|
530 |
(os_name, node), |
|
531 |
prereq=True, ecode=errors.ECODE_INVAL) |
|
532 |
if not force_variant: |
|
533 |
_CheckOSVariant(result.payload, os_name) |
|
534 |
|
|
535 |
|
|
536 |
def _CheckOSVariant(os_obj, name): |
|
537 |
"""Check whether an OS name conforms to the os variants specification. |
|
538 |
|
|
539 |
@type os_obj: L{objects.OS} |
|
540 |
@param os_obj: OS object to check |
|
541 |
@type name: string |
|
542 |
@param name: OS name passed by the user, to check for validity |
|
543 |
|
|
544 |
""" |
|
545 |
variant = objects.OS.GetVariant(name) |
|
546 |
if not os_obj.supported_variants: |
|
547 |
if variant: |
|
548 |
raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'" |
|
549 |
" passed)" % (os_obj.name, variant), |
|
550 |
errors.ECODE_INVAL) |
|
551 |
return |
|
552 |
if not variant: |
|
553 |
raise errors.OpPrereqError("OS name must include a variant", |
|
554 |
errors.ECODE_INVAL) |
|
555 |
|
|
556 |
if variant not in os_obj.supported_variants: |
|
557 |
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL) |
Also available in: Unified diff