root / lib / cmdlib / instance_operation.py @ 83f54caa
History | View | Annotate | Download (19.2 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Logical units dealing with instance operations (start/stop/...).
|
23 |
|
24 |
Those operations have in common that they affect the operating system in a
|
25 |
running instance directly.
|
26 |
|
27 |
"""
|
28 |
|
29 |
import logging |
30 |
|
31 |
from ganeti import constants |
32 |
from ganeti import errors |
33 |
from ganeti import hypervisor |
34 |
from ganeti import locking |
35 |
from ganeti import objects |
36 |
from ganeti import ssh |
37 |
from ganeti import utils |
38 |
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU |
39 |
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \ |
40 |
CheckHVParams, CheckInstanceState, CheckNodeOnline, GetUpdatedParams, \ |
41 |
CheckOSParams, CheckOSImage, ShareAll |
42 |
from ganeti.cmdlib.instance_storage import StartInstanceDisks, \ |
43 |
ShutdownInstanceDisks, ImageDisks |
44 |
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ |
45 |
CheckInstanceBridgesExist, CheckNodeFreeMemory, CheckNodeHasOS |
46 |
from ganeti.hypervisor import hv_base |
47 |
|
48 |
|
49 |
class LUInstanceStartup(LogicalUnit): |
50 |
"""Starts an instance.
|
51 |
|
52 |
"""
|
53 |
HPATH = "instance-start"
|
54 |
HTYPE = constants.HTYPE_INSTANCE |
55 |
REQ_BGL = False
|
56 |
|
57 |
def CheckArguments(self): |
58 |
# extra beparams
|
59 |
if self.op.beparams: |
60 |
# fill the beparams dict
|
61 |
objects.UpgradeBeParams(self.op.beparams)
|
62 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
|
63 |
|
64 |
def ExpandNames(self): |
65 |
self._ExpandAndLockInstance()
|
66 |
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
|
67 |
|
68 |
def DeclareLocks(self, level): |
69 |
if level == locking.LEVEL_NODE_RES:
|
70 |
self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES) |
71 |
|
72 |
def BuildHooksEnv(self): |
73 |
"""Build hooks env.
|
74 |
|
75 |
This runs on master, primary and secondary nodes of the instance.
|
76 |
|
77 |
"""
|
78 |
env = { |
79 |
"FORCE": self.op.force, |
80 |
} |
81 |
|
82 |
env.update(BuildInstanceHookEnvByObject(self, self.instance)) |
83 |
|
84 |
return env
|
85 |
|
86 |
def BuildHooksNodes(self): |
87 |
"""Build hooks nodes.
|
88 |
|
89 |
"""
|
90 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
91 |
return (nl, nl)
|
92 |
|
93 |
def CheckPrereq(self): |
94 |
"""Check prerequisites.
|
95 |
|
96 |
This checks that the instance is in the cluster.
|
97 |
|
98 |
"""
|
99 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
100 |
assert self.instance is not None, \ |
101 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
102 |
|
103 |
cluster = self.cfg.GetClusterInfo()
|
104 |
# extra hvparams
|
105 |
if self.op.hvparams: |
106 |
# check hypervisor parameter syntax (locally)
|
107 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
|
108 |
filled_hvp = cluster.FillHV(self.instance)
|
109 |
filled_hvp.update(self.op.hvparams)
|
110 |
hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
|
111 |
hv_type.CheckParameterSyntax(filled_hvp) |
112 |
CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor, |
113 |
filled_hvp) |
114 |
|
115 |
CheckInstanceState(self, self.instance, INSTANCE_ONLINE) |
116 |
|
117 |
self.primary_offline = \
|
118 |
self.cfg.GetNodeInfo(self.instance.primary_node).offline |
119 |
|
120 |
if self.primary_offline and self.op.ignore_offline_nodes: |
121 |
self.LogWarning("Ignoring offline primary node") |
122 |
|
123 |
if self.op.hvparams or self.op.beparams: |
124 |
self.LogWarning("Overridden parameters are ignored") |
125 |
else:
|
126 |
CheckNodeOnline(self, self.instance.primary_node) |
127 |
|
128 |
bep = self.cfg.GetClusterInfo().FillBE(self.instance) |
129 |
bep.update(self.op.beparams)
|
130 |
|
131 |
# check bridges existence
|
132 |
CheckInstanceBridgesExist(self, self.instance) |
133 |
|
134 |
remote_info = self.rpc.call_instance_info(
|
135 |
self.instance.primary_node, self.instance.name, |
136 |
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor]) |
137 |
remote_info.Raise("Error checking node %s" %
|
138 |
self.cfg.GetNodeName(self.instance.primary_node), |
139 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
140 |
if remote_info.payload:
|
141 |
if hv_base.HvInstanceState.IsShutdown(remote_info.payload["state"]): |
142 |
raise errors.OpPrereqError("Instance '%s' was shutdown by the user," |
143 |
" please shutdown the instance before"
|
144 |
" starting it again" % self.instance.name, |
145 |
errors.ECODE_INVAL) |
146 |
else: # not running already |
147 |
CheckNodeFreeMemory( |
148 |
self, self.instance.primary_node, |
149 |
"starting instance %s" % self.instance.name, |
150 |
bep[constants.BE_MINMEM], self.instance.hypervisor,
|
151 |
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) |
152 |
|
153 |
def Exec(self, feedback_fn): |
154 |
"""Start the instance.
|
155 |
|
156 |
"""
|
157 |
if not self.op.no_remember: |
158 |
self.cfg.MarkInstanceUp(self.instance.uuid) |
159 |
|
160 |
if self.primary_offline: |
161 |
assert self.op.ignore_offline_nodes |
162 |
self.LogInfo("Primary node offline, marked instance as started") |
163 |
else:
|
164 |
StartInstanceDisks(self, self.instance, self.op.force) |
165 |
|
166 |
result = \ |
167 |
self.rpc.call_instance_start(self.instance.primary_node, |
168 |
(self.instance, self.op.hvparams, |
169 |
self.op.beparams),
|
170 |
self.op.startup_paused, self.op.reason) |
171 |
msg = result.fail_msg |
172 |
if msg:
|
173 |
ShutdownInstanceDisks(self, self.instance) |
174 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
175 |
|
176 |
|
177 |
class LUInstanceShutdown(LogicalUnit): |
178 |
"""Shutdown an instance.
|
179 |
|
180 |
"""
|
181 |
HPATH = "instance-stop"
|
182 |
HTYPE = constants.HTYPE_INSTANCE |
183 |
REQ_BGL = False
|
184 |
|
185 |
def ExpandNames(self): |
186 |
self._ExpandAndLockInstance()
|
187 |
|
188 |
def BuildHooksEnv(self): |
189 |
"""Build hooks env.
|
190 |
|
191 |
This runs on master, primary and secondary nodes of the instance.
|
192 |
|
193 |
"""
|
194 |
env = BuildInstanceHookEnvByObject(self, self.instance) |
195 |
env["TIMEOUT"] = self.op.timeout |
196 |
return env
|
197 |
|
198 |
def BuildHooksNodes(self): |
199 |
"""Build hooks nodes.
|
200 |
|
201 |
"""
|
202 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
203 |
return (nl, nl)
|
204 |
|
205 |
def CheckPrereq(self): |
206 |
"""Check prerequisites.
|
207 |
|
208 |
This checks that the instance is in the cluster.
|
209 |
|
210 |
"""
|
211 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
212 |
assert self.instance is not None, \ |
213 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
214 |
|
215 |
if not self.op.force: |
216 |
CheckInstanceState(self, self.instance, INSTANCE_ONLINE) |
217 |
else:
|
218 |
self.LogWarning("Ignoring offline instance check") |
219 |
|
220 |
self.primary_offline = \
|
221 |
self.cfg.GetNodeInfo(self.instance.primary_node).offline |
222 |
|
223 |
if self.primary_offline and self.op.ignore_offline_nodes: |
224 |
self.LogWarning("Ignoring offline primary node") |
225 |
else:
|
226 |
CheckNodeOnline(self, self.instance.primary_node) |
227 |
|
228 |
def Exec(self, feedback_fn): |
229 |
"""Shutdown the instance.
|
230 |
|
231 |
"""
|
232 |
# If the instance is offline we shouldn't mark it as down, as that
|
233 |
# resets the offline flag.
|
234 |
if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE: |
235 |
self.cfg.MarkInstanceDown(self.instance.uuid) |
236 |
|
237 |
if self.primary_offline: |
238 |
assert self.op.ignore_offline_nodes |
239 |
self.LogInfo("Primary node offline, marked instance as stopped") |
240 |
else:
|
241 |
result = self.rpc.call_instance_shutdown(
|
242 |
self.instance.primary_node,
|
243 |
self.instance,
|
244 |
self.op.timeout, self.op.reason) |
245 |
msg = result.fail_msg |
246 |
if msg:
|
247 |
self.LogWarning("Could not shutdown instance: %s", msg) |
248 |
|
249 |
ShutdownInstanceDisks(self, self.instance) |
250 |
|
251 |
|
252 |
class LUInstanceReinstall(LogicalUnit): |
253 |
"""Reinstall an instance.
|
254 |
|
255 |
"""
|
256 |
HPATH = "instance-reinstall"
|
257 |
HTYPE = constants.HTYPE_INSTANCE |
258 |
REQ_BGL = False
|
259 |
|
260 |
def CheckArguments(self): |
261 |
CheckOSImage(self.op)
|
262 |
|
263 |
def ExpandNames(self): |
264 |
self._ExpandAndLockInstance()
|
265 |
|
266 |
def BuildHooksEnv(self): |
267 |
"""Build hooks env.
|
268 |
|
269 |
This runs on master, primary and secondary nodes of the instance.
|
270 |
|
271 |
"""
|
272 |
return BuildInstanceHookEnvByObject(self, self.instance) |
273 |
|
274 |
def BuildHooksNodes(self): |
275 |
"""Build hooks nodes.
|
276 |
|
277 |
"""
|
278 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
279 |
return (nl, nl)
|
280 |
|
281 |
def CheckPrereq(self): |
282 |
"""Check prerequisites.
|
283 |
|
284 |
This checks that the instance is in the cluster and is not running.
|
285 |
|
286 |
"""
|
287 |
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
288 |
assert instance is not None, \ |
289 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
290 |
CheckNodeOnline(self, instance.primary_node, "Instance primary node" |
291 |
" offline, cannot reinstall")
|
292 |
|
293 |
if instance.disk_template == constants.DT_DISKLESS:
|
294 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
295 |
self.op.instance_name,
|
296 |
errors.ECODE_INVAL) |
297 |
CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall") |
298 |
|
299 |
# Handle OS parameters
|
300 |
self._MergeValidateOsParams(instance)
|
301 |
|
302 |
self.instance = instance
|
303 |
|
304 |
def _MergeValidateOsParams(self, instance): |
305 |
"Handle the OS parameter merging and validation for the target instance."
|
306 |
|
307 |
if self.op.os_type is not None: |
308 |
# OS verification
|
309 |
CheckNodeHasOS(self, instance.primary_node, self.op.os_type, |
310 |
self.op.force_variant)
|
311 |
instance_os = self.op.os_type
|
312 |
else:
|
313 |
instance_os = instance.os |
314 |
|
315 |
node_uuids = list(instance.all_nodes)
|
316 |
|
317 |
self.op.osparams = self.op.osparams or {} |
318 |
self.op.osparams_private = self.op.osparams_private or {} |
319 |
self.op.osparams_secret = self.op.osparams_secret or {} |
320 |
|
321 |
# Handle the use of 'default' values.
|
322 |
params_public = GetUpdatedParams(instance.osparams, self.op.osparams)
|
323 |
params_private = GetUpdatedParams(instance.osparams_private, |
324 |
self.op.osparams_private)
|
325 |
params_secret = self.op.osparams_secret
|
326 |
|
327 |
cluster = self.cfg.GetClusterInfo()
|
328 |
self.osparams = cluster.SimpleFillOS(
|
329 |
instance_os, |
330 |
params_public, |
331 |
os_params_private=params_private, |
332 |
os_params_secret=params_secret |
333 |
) |
334 |
|
335 |
CheckOSParams(self, True, node_uuids, instance_os, self.osparams) |
336 |
|
337 |
def _ReinstallOSScripts(self, instance, osparams, debug_level): |
338 |
"""Reinstall OS scripts on an instance.
|
339 |
|
340 |
@type instance: L{objects.Instance}
|
341 |
@param instance: instance of which the OS scripts should run
|
342 |
|
343 |
@type osparams: L{dict}
|
344 |
@param osparams: OS parameters
|
345 |
|
346 |
@type debug_level: non-negative int
|
347 |
@param debug_level: debug level
|
348 |
|
349 |
@rtype: NoneType
|
350 |
@return: None
|
351 |
@raise errors.OpExecError: in case of failure
|
352 |
|
353 |
"""
|
354 |
self.LogInfo("Running instance OS create scripts...") |
355 |
result = self.rpc.call_instance_os_add(instance.primary_node,
|
356 |
(instance, osparams), |
357 |
True,
|
358 |
debug_level) |
359 |
result.Raise("Could not install OS for instance '%s' on node '%s'" %
|
360 |
(instance.name, self.cfg.GetNodeName(instance.primary_node)))
|
361 |
|
362 |
def _ReinstallOSImage(self, instance, os_image): |
363 |
"""Reinstall OS image on an instance.
|
364 |
|
365 |
@type instance: L{objects.Instance}
|
366 |
@param instance: instance on which the OS image should be installed
|
367 |
|
368 |
@type os_image: string
|
369 |
@param os_image: OS image URL or absolute file path
|
370 |
|
371 |
@rtype: NoneType
|
372 |
@return: None
|
373 |
@raise errors.OpExecError: in case of failure
|
374 |
|
375 |
"""
|
376 |
master = self.cfg.GetMasterNode()
|
377 |
pnode = self.cfg.GetNodeInfo(instance.primary_node)
|
378 |
|
379 |
if not utils.IsUrl(os_image) and master != pnode.uuid: |
380 |
ssh_port = pnode.ndparams.get(constants.ND_SSH_PORT) |
381 |
srun = ssh.SshRunner(self.cfg.GetClusterName())
|
382 |
srun.CopyFileToNode(pnode.name, ssh_port, os_image) |
383 |
|
384 |
ImageDisks(self, instance, os_image)
|
385 |
|
386 |
def Exec(self, feedback_fn): |
387 |
"""Reinstall the instance.
|
388 |
|
389 |
"""
|
390 |
os_image = objects.GetOSImage(self.op.osparams)
|
391 |
|
392 |
if os_image is not None: |
393 |
feedback_fn("Using OS image '%s', not changing instance"
|
394 |
" configuration" % os_image)
|
395 |
else:
|
396 |
os_image = objects.GetOSImage(self.instance.osparams)
|
397 |
|
398 |
os_type = self.op.os_type
|
399 |
|
400 |
if os_type is not None: |
401 |
feedback_fn("Changing OS scripts to '%s'..." % os_type)
|
402 |
self.instance.os = os_type
|
403 |
self.cfg.Update(self.instance, feedback_fn) |
404 |
else:
|
405 |
os_type = self.instance.os
|
406 |
|
407 |
if not os_image and not os_type: |
408 |
self.LogInfo("No OS scripts or OS image specified or found in the" |
409 |
" instance's configuration, nothing to install")
|
410 |
else:
|
411 |
StartInstanceDisks(self, self.instance, None) |
412 |
try:
|
413 |
if os_image:
|
414 |
self._ReinstallOSImage(self.instance, os_image) |
415 |
|
416 |
if os_type:
|
417 |
self._ReinstallOSScripts(self.instance, self.osparams, |
418 |
self.op.debug_level)
|
419 |
finally:
|
420 |
ShutdownInstanceDisks(self, self.instance) |
421 |
|
422 |
|
423 |
class LUInstanceReboot(LogicalUnit): |
424 |
"""Reboot an instance.
|
425 |
|
426 |
"""
|
427 |
HPATH = "instance-reboot"
|
428 |
HTYPE = constants.HTYPE_INSTANCE |
429 |
REQ_BGL = False
|
430 |
|
431 |
def ExpandNames(self): |
432 |
self._ExpandAndLockInstance()
|
433 |
|
434 |
def BuildHooksEnv(self): |
435 |
"""Build hooks env.
|
436 |
|
437 |
This runs on master, primary and secondary nodes of the instance.
|
438 |
|
439 |
"""
|
440 |
env = { |
441 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
442 |
"REBOOT_TYPE": self.op.reboot_type, |
443 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
444 |
} |
445 |
|
446 |
env.update(BuildInstanceHookEnvByObject(self, self.instance)) |
447 |
|
448 |
return env
|
449 |
|
450 |
def BuildHooksNodes(self): |
451 |
"""Build hooks nodes.
|
452 |
|
453 |
"""
|
454 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
455 |
return (nl, nl)
|
456 |
|
457 |
def CheckPrereq(self): |
458 |
"""Check prerequisites.
|
459 |
|
460 |
This checks that the instance is in the cluster.
|
461 |
|
462 |
"""
|
463 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
464 |
assert self.instance is not None, \ |
465 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
466 |
CheckInstanceState(self, self.instance, INSTANCE_ONLINE) |
467 |
CheckNodeOnline(self, self.instance.primary_node) |
468 |
|
469 |
# check bridges existence
|
470 |
CheckInstanceBridgesExist(self, self.instance) |
471 |
|
472 |
def Exec(self, feedback_fn): |
473 |
"""Reboot the instance.
|
474 |
|
475 |
"""
|
476 |
cluster = self.cfg.GetClusterInfo()
|
477 |
remote_info = self.rpc.call_instance_info(
|
478 |
self.instance.primary_node, self.instance.name, |
479 |
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor]) |
480 |
remote_info.Raise("Error checking node %s" %
|
481 |
self.cfg.GetNodeName(self.instance.primary_node)) |
482 |
instance_running = bool(remote_info.payload)
|
483 |
|
484 |
current_node_uuid = self.instance.primary_node
|
485 |
|
486 |
if instance_running and \ |
487 |
self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
488 |
constants.INSTANCE_REBOOT_HARD]: |
489 |
result = self.rpc.call_instance_reboot(current_node_uuid, self.instance, |
490 |
self.op.reboot_type,
|
491 |
self.op.shutdown_timeout,
|
492 |
self.op.reason)
|
493 |
result.Raise("Could not reboot instance")
|
494 |
else:
|
495 |
if instance_running:
|
496 |
result = self.rpc.call_instance_shutdown(current_node_uuid,
|
497 |
self.instance,
|
498 |
self.op.shutdown_timeout,
|
499 |
self.op.reason)
|
500 |
result.Raise("Could not shutdown instance for full reboot")
|
501 |
ShutdownInstanceDisks(self, self.instance) |
502 |
else:
|
503 |
self.LogInfo("Instance %s was already stopped, starting now", |
504 |
self.instance.name)
|
505 |
StartInstanceDisks(self, self.instance, self.op.ignore_secondaries) |
506 |
result = self.rpc.call_instance_start(current_node_uuid,
|
507 |
(self.instance, None, None), False, |
508 |
self.op.reason)
|
509 |
msg = result.fail_msg |
510 |
if msg:
|
511 |
ShutdownInstanceDisks(self, self.instance) |
512 |
raise errors.OpExecError("Could not start instance for" |
513 |
" full reboot: %s" % msg)
|
514 |
|
515 |
self.cfg.MarkInstanceUp(self.instance.uuid) |
516 |
|
517 |
|
518 |
def GetInstanceConsole(cluster, instance, primary_node, node_group): |
519 |
"""Returns console information for an instance.
|
520 |
|
521 |
@type cluster: L{objects.Cluster}
|
522 |
@type instance: L{objects.Instance}
|
523 |
@type primary_node: L{objects.Node}
|
524 |
@type node_group: L{objects.NodeGroup}
|
525 |
@rtype: dict
|
526 |
|
527 |
"""
|
528 |
hyper = hypervisor.GetHypervisorClass(instance.hypervisor) |
529 |
# beparams and hvparams are passed separately, to avoid editing the
|
530 |
# instance and then saving the defaults in the instance itself.
|
531 |
hvparams = cluster.FillHV(instance) |
532 |
beparams = cluster.FillBE(instance) |
533 |
console = hyper.GetInstanceConsole(instance, primary_node, node_group, |
534 |
hvparams, beparams) |
535 |
|
536 |
assert console.instance == instance.name
|
537 |
console.Validate() |
538 |
|
539 |
return console.ToDict()
|
540 |
|
541 |
|
542 |
class LUInstanceConsole(NoHooksLU): |
543 |
"""Connect to an instance's console.
|
544 |
|
545 |
This is somewhat special in that it returns the command line that
|
546 |
you need to run on the master node in order to connect to the
|
547 |
console.
|
548 |
|
549 |
"""
|
550 |
REQ_BGL = False
|
551 |
|
552 |
def ExpandNames(self): |
553 |
self.share_locks = ShareAll()
|
554 |
self._ExpandAndLockInstance()
|
555 |
|
556 |
def CheckPrereq(self): |
557 |
"""Check prerequisites.
|
558 |
|
559 |
This checks that the instance is in the cluster.
|
560 |
|
561 |
"""
|
562 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
563 |
assert self.instance is not None, \ |
564 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
565 |
CheckNodeOnline(self, self.instance.primary_node) |
566 |
|
567 |
def Exec(self, feedback_fn): |
568 |
"""Connect to the console of an instance
|
569 |
|
570 |
"""
|
571 |
node_uuid = self.instance.primary_node
|
572 |
|
573 |
cluster_hvparams = self.cfg.GetClusterInfo().hvparams
|
574 |
node_insts = self.rpc.call_instance_list(
|
575 |
[node_uuid], [self.instance.hypervisor],
|
576 |
cluster_hvparams)[node_uuid] |
577 |
node_insts.Raise("Can't get node information from %s" %
|
578 |
self.cfg.GetNodeName(node_uuid))
|
579 |
|
580 |
if self.instance.name not in node_insts.payload: |
581 |
if self.instance.admin_state == constants.ADMINST_UP: |
582 |
state = constants.INSTST_ERRORDOWN |
583 |
elif self.instance.admin_state == constants.ADMINST_DOWN: |
584 |
state = constants.INSTST_ADMINDOWN |
585 |
else:
|
586 |
state = constants.INSTST_ADMINOFFLINE |
587 |
raise errors.OpExecError("Instance %s is not running (state %s)" % |
588 |
(self.instance.name, state))
|
589 |
|
590 |
logging.debug("Connecting to console of %s on %s", self.instance.name, |
591 |
self.cfg.GetNodeName(node_uuid))
|
592 |
|
593 |
node = self.cfg.GetNodeInfo(self.instance.primary_node) |
594 |
group = self.cfg.GetNodeGroup(node.group)
|
595 |
return GetInstanceConsole(self.cfg.GetClusterInfo(), |
596 |
self.instance, node, group)
|