Revision d0d7d7cf lib/cmdlib/instance_operation.py
b/lib/cmdlib/instance_operation.py | ||
---|---|---|
94 | 94 |
This checks that the instance is in the cluster. |
95 | 95 |
|
96 | 96 |
""" |
97 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
97 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
98 | 98 |
assert self.instance is not None, \ |
99 | 99 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
100 | 100 |
|
... | ... | |
103 | 103 |
if self.op.hvparams: |
104 | 104 |
# check hypervisor parameter syntax (locally) |
105 | 105 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) |
106 |
filled_hvp = cluster.FillHV(instance) |
|
106 |
filled_hvp = cluster.FillHV(self.instance)
|
|
107 | 107 |
filled_hvp.update(self.op.hvparams) |
108 |
hv_type = hypervisor.GetHypervisorClass(instance.hypervisor) |
|
108 |
hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
|
|
109 | 109 |
hv_type.CheckParameterSyntax(filled_hvp) |
110 |
CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp) |
|
110 |
CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor, |
|
111 |
filled_hvp) |
|
111 | 112 |
|
112 |
CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
113 |
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
|
|
113 | 114 |
|
114 |
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline |
|
115 |
self.primary_offline = \ |
|
116 |
self.cfg.GetNodeInfo(self.instance.primary_node).offline |
|
115 | 117 |
|
116 | 118 |
if self.primary_offline and self.op.ignore_offline_nodes: |
117 | 119 |
self.LogWarning("Ignoring offline primary node") |
... | ... | |
119 | 121 |
if self.op.hvparams or self.op.beparams: |
120 | 122 |
self.LogWarning("Overridden parameters are ignored") |
121 | 123 |
else: |
122 |
CheckNodeOnline(self, instance.primary_node) |
|
124 |
CheckNodeOnline(self, self.instance.primary_node)
|
|
123 | 125 |
|
124 |
bep = self.cfg.GetClusterInfo().FillBE(instance) |
|
126 |
bep = self.cfg.GetClusterInfo().FillBE(self.instance)
|
|
125 | 127 |
bep.update(self.op.beparams) |
126 | 128 |
|
127 | 129 |
# check bridges existence |
128 |
CheckInstanceBridgesExist(self, instance) |
|
130 |
CheckInstanceBridgesExist(self, self.instance)
|
|
129 | 131 |
|
130 | 132 |
remote_info = self.rpc.call_instance_info( |
131 |
instance.primary_node, instance.name, instance.hypervisor,
|
|
132 |
cluster.hvparams[instance.hypervisor])
|
|
133 |
self.instance.primary_node, self.instance.name,
|
|
134 |
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
|
|
133 | 135 |
remote_info.Raise("Error checking node %s" % |
134 |
self.cfg.GetNodeName(instance.primary_node), |
|
136 |
self.cfg.GetNodeName(self.instance.primary_node),
|
|
135 | 137 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
136 | 138 |
if not remote_info.payload: # not running already |
137 | 139 |
CheckNodeFreeMemory( |
138 |
self, instance.primary_node, "starting instance %s" % instance.name, |
|
139 |
bep[constants.BE_MINMEM], instance.hypervisor, |
|
140 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor]) |
|
140 |
self, self.instance.primary_node, |
|
141 |
"starting instance %s" % self.instance.name, |
|
142 |
bep[constants.BE_MINMEM], self.instance.hypervisor, |
|
143 |
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) |
|
141 | 144 |
|
142 | 145 |
def Exec(self, feedback_fn): |
143 | 146 |
"""Start the instance. |
144 | 147 |
|
145 | 148 |
""" |
146 |
instance = self.instance |
|
147 |
force = self.op.force |
|
148 |
reason = self.op.reason |
|
149 |
|
|
150 | 149 |
if not self.op.no_remember: |
151 |
self.cfg.MarkInstanceUp(instance.name) |
|
150 |
self.cfg.MarkInstanceUp(self.instance.name)
|
|
152 | 151 |
|
153 | 152 |
if self.primary_offline: |
154 | 153 |
assert self.op.ignore_offline_nodes |
155 | 154 |
self.LogInfo("Primary node offline, marked instance as started") |
156 | 155 |
else: |
157 |
StartInstanceDisks(self, instance, force)
|
|
156 |
StartInstanceDisks(self, self.instance, self.op.force)
|
|
158 | 157 |
|
159 | 158 |
result = \ |
160 |
self.rpc.call_instance_start(instance.primary_node, |
|
161 |
(instance, self.op.hvparams, |
|
159 |
self.rpc.call_instance_start(self.instance.primary_node,
|
|
160 |
(self.instance, self.op.hvparams,
|
|
162 | 161 |
self.op.beparams), |
163 |
self.op.startup_paused, reason) |
|
162 |
self.op.startup_paused, self.op.reason)
|
|
164 | 163 |
msg = result.fail_msg |
165 | 164 |
if msg: |
166 |
ShutdownInstanceDisks(self, instance) |
|
165 |
ShutdownInstanceDisks(self, self.instance)
|
|
167 | 166 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
168 | 167 |
|
169 | 168 |
|
... | ... | |
222 | 221 |
"""Shutdown the instance. |
223 | 222 |
|
224 | 223 |
""" |
225 |
instance = self.instance |
|
226 |
timeout = self.op.timeout |
|
227 |
reason = self.op.reason |
|
228 |
|
|
229 | 224 |
# If the instance is offline we shouldn't mark it as down, as that |
230 | 225 |
# resets the offline flag. |
231 |
if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE: |
|
232 |
self.cfg.MarkInstanceDown(instance.name) |
|
226 |
if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
|
|
227 |
self.cfg.MarkInstanceDown(self.instance.name)
|
|
233 | 228 |
|
234 | 229 |
if self.primary_offline: |
235 | 230 |
assert self.op.ignore_offline_nodes |
236 | 231 |
self.LogInfo("Primary node offline, marked instance as stopped") |
237 | 232 |
else: |
238 |
result = self.rpc.call_instance_shutdown(instance.primary_node, instance, |
|
239 |
timeout, reason) |
|
233 |
result = self.rpc.call_instance_shutdown(self.instance.primary_node, |
|
234 |
self.instance, |
|
235 |
self.op.timeout, self.op.reason) |
|
240 | 236 |
msg = result.fail_msg |
241 | 237 |
if msg: |
242 | 238 |
self.LogWarning("Could not shutdown instance: %s", msg) |
243 | 239 |
|
244 |
ShutdownInstanceDisks(self, instance) |
|
240 |
ShutdownInstanceDisks(self, self.instance)
|
|
245 | 241 |
|
246 | 242 |
|
247 | 243 |
class LUInstanceReinstall(LogicalUnit): |
... | ... | |
311 | 307 |
"""Reinstall the instance. |
312 | 308 |
|
313 | 309 |
""" |
314 |
inst = self.instance |
|
315 |
|
|
316 | 310 |
if self.op.os_type is not None: |
317 | 311 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
318 |
inst.os = self.op.os_type
|
|
312 |
self.instance.os = self.op.os_type
|
|
319 | 313 |
# Write to configuration |
320 |
self.cfg.Update(inst, feedback_fn)
|
|
314 |
self.cfg.Update(self.instance, feedback_fn)
|
|
321 | 315 |
|
322 |
StartInstanceDisks(self, inst, None)
|
|
316 |
StartInstanceDisks(self, self.instance, None)
|
|
323 | 317 |
try: |
324 | 318 |
feedback_fn("Running the instance OS create scripts...") |
325 | 319 |
# FIXME: pass debug option from opcode to backend |
326 |
result = self.rpc.call_instance_os_add(inst.primary_node,
|
|
327 |
(inst, self.os_inst), True,
|
|
328 |
self.op.debug_level) |
|
320 |
result = self.rpc.call_instance_os_add(self.instance.primary_node,
|
|
321 |
(self.instance, self.os_inst),
|
|
322 |
True, self.op.debug_level)
|
|
329 | 323 |
result.Raise("Could not install OS for instance %s on node %s" % |
330 |
(inst.name, self.cfg.GetNodeName(inst.primary_node))) |
|
324 |
(self.instance.name, |
|
325 |
self.cfg.GetNodeName(self.instance.primary_node))) |
|
331 | 326 |
finally: |
332 |
ShutdownInstanceDisks(self, inst)
|
|
327 |
ShutdownInstanceDisks(self, self.instance)
|
|
333 | 328 |
|
334 | 329 |
|
335 | 330 |
class LUInstanceReboot(LogicalUnit): |
... | ... | |
372 | 367 |
This checks that the instance is in the cluster. |
373 | 368 |
|
374 | 369 |
""" |
375 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
370 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
|
376 | 371 |
assert self.instance is not None, \ |
377 | 372 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
378 |
CheckInstanceState(self, instance, INSTANCE_ONLINE) |
|
379 |
CheckNodeOnline(self, instance.primary_node) |
|
373 |
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
|
|
374 |
CheckNodeOnline(self, self.instance.primary_node)
|
|
380 | 375 |
|
381 | 376 |
# check bridges existence |
382 |
CheckInstanceBridgesExist(self, instance) |
|
377 |
CheckInstanceBridgesExist(self, self.instance)
|
|
383 | 378 |
|
384 | 379 |
def Exec(self, feedback_fn): |
385 | 380 |
"""Reboot the instance. |
386 | 381 |
|
387 | 382 |
""" |
388 |
instance = self.instance |
|
389 |
ignore_secondaries = self.op.ignore_secondaries |
|
390 |
reboot_type = self.op.reboot_type |
|
391 |
reason = self.op.reason |
|
392 |
|
|
393 | 383 |
cluster = self.cfg.GetClusterInfo() |
394 | 384 |
remote_info = self.rpc.call_instance_info( |
395 |
instance.primary_node, instance.name, instance.hypervisor,
|
|
396 |
cluster.hvparams[instance.hypervisor])
|
|
385 |
self.instance.primary_node, self.instance.name,
|
|
386 |
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
|
|
397 | 387 |
remote_info.Raise("Error checking node %s" % |
398 |
self.cfg.GetNodeName(instance.primary_node)) |
|
388 |
self.cfg.GetNodeName(self.instance.primary_node))
|
|
399 | 389 |
instance_running = bool(remote_info.payload) |
400 | 390 |
|
401 |
current_node_uuid = instance.primary_node |
|
391 |
current_node_uuid = self.instance.primary_node
|
|
402 | 392 |
|
403 |
if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
|
404 |
constants.INSTANCE_REBOOT_HARD]: |
|
405 |
for disk in instance.disks: |
|
393 |
if instance_running and \ |
|
394 |
self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
|
395 |
constants.INSTANCE_REBOOT_HARD]: |
|
396 |
for disk in self.instance.disks: |
|
406 | 397 |
self.cfg.SetDiskID(disk, current_node_uuid) |
407 |
result = self.rpc.call_instance_reboot(current_node_uuid, instance, |
|
408 |
reboot_type, |
|
409 |
self.op.shutdown_timeout, reason) |
|
398 |
result = self.rpc.call_instance_reboot(current_node_uuid, self.instance, |
|
399 |
self.op.reboot_type, |
|
400 |
self.op.shutdown_timeout, |
|
401 |
self.op.reason) |
|
410 | 402 |
result.Raise("Could not reboot instance") |
411 | 403 |
else: |
412 | 404 |
if instance_running: |
413 |
result = self.rpc.call_instance_shutdown(current_node_uuid, instance, |
|
405 |
result = self.rpc.call_instance_shutdown(current_node_uuid, |
|
406 |
self.instance, |
|
414 | 407 |
self.op.shutdown_timeout, |
415 |
reason) |
|
408 |
self.op.reason)
|
|
416 | 409 |
result.Raise("Could not shutdown instance for full reboot") |
417 |
ShutdownInstanceDisks(self, instance) |
|
410 |
ShutdownInstanceDisks(self, self.instance)
|
|
418 | 411 |
else: |
419 | 412 |
self.LogInfo("Instance %s was already stopped, starting now", |
420 |
instance.name) |
|
421 |
StartInstanceDisks(self, instance, ignore_secondaries)
|
|
413 |
self.instance.name)
|
|
414 |
StartInstanceDisks(self, self.instance, self.op.ignore_secondaries)
|
|
422 | 415 |
result = self.rpc.call_instance_start(current_node_uuid, |
423 |
(instance, None, None), False, |
|
424 |
reason) |
|
416 |
(self.instance, None, None), False,
|
|
417 |
self.op.reason)
|
|
425 | 418 |
msg = result.fail_msg |
426 | 419 |
if msg: |
427 |
ShutdownInstanceDisks(self, instance) |
|
420 |
ShutdownInstanceDisks(self, self.instance)
|
|
428 | 421 |
raise errors.OpExecError("Could not start instance for" |
429 | 422 |
" full reboot: %s" % msg) |
430 | 423 |
|
431 |
self.cfg.MarkInstanceUp(instance.name) |
|
424 |
self.cfg.MarkInstanceUp(self.instance.name)
|
|
432 | 425 |
|
433 | 426 |
|
434 | 427 |
def GetInstanceConsole(cluster, instance, primary_node): |
... | ... | |
482 | 475 |
"""Connect to the console of an instance |
483 | 476 |
|
484 | 477 |
""" |
485 |
instance = self.instance |
|
486 |
node_uuid = instance.primary_node |
|
478 |
node_uuid = self.instance.primary_node |
|
487 | 479 |
|
488 | 480 |
cluster_hvparams = self.cfg.GetClusterInfo().hvparams |
489 |
node_insts = self.rpc.call_instance_list([node_uuid],
|
|
490 |
[instance.hypervisor],
|
|
491 |
cluster_hvparams)[node_uuid]
|
|
481 |
node_insts = self.rpc.call_instance_list( |
|
482 |
[node_uuid], [self.instance.hypervisor],
|
|
483 |
cluster_hvparams)[node_uuid] |
|
492 | 484 |
node_insts.Raise("Can't get node information from %s" % |
493 | 485 |
self.cfg.GetNodeName(node_uuid)) |
494 | 486 |
|
495 |
if instance.name not in node_insts.payload: |
|
496 |
if instance.admin_state == constants.ADMINST_UP: |
|
487 |
if self.instance.name not in node_insts.payload:
|
|
488 |
if self.instance.admin_state == constants.ADMINST_UP:
|
|
497 | 489 |
state = constants.INSTST_ERRORDOWN |
498 |
elif instance.admin_state == constants.ADMINST_DOWN: |
|
490 |
elif self.instance.admin_state == constants.ADMINST_DOWN:
|
|
499 | 491 |
state = constants.INSTST_ADMINDOWN |
500 | 492 |
else: |
501 | 493 |
state = constants.INSTST_ADMINOFFLINE |
502 | 494 |
raise errors.OpExecError("Instance %s is not running (state %s)" % |
503 |
(instance.name, state)) |
|
495 |
(self.instance.name, state))
|
|
504 | 496 |
|
505 |
logging.debug("Connecting to console of %s on %s", instance.name, |
|
497 |
logging.debug("Connecting to console of %s on %s", self.instance.name,
|
|
506 | 498 |
self.cfg.GetNodeName(node_uuid)) |
507 | 499 |
|
508 |
return GetInstanceConsole(self.cfg.GetClusterInfo(), instance, |
|
509 |
self.cfg.GetNodeInfo(instance.primary_node)) |
|
500 |
return GetInstanceConsole(self.cfg.GetClusterInfo(), self.instance, |
|
501 |
self.cfg.GetNodeInfo(self.instance.primary_node)) |
Also available in: Unified diff