Revision f1ea1bef
b/lib/cmdlib.py | ||
---|---|---|
5905 | 5905 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE |
5906 | 5906 |
|
5907 | 5907 |
self._migrater = TLMigrateInstance(self, self.op.instance_name, |
5908 |
self.op.cleanup, self.op.iallocator, |
|
5909 |
self.op.target_node) |
|
5908 |
self.op.cleanup) |
|
5910 | 5909 |
self.tasklets = [self._migrater] |
5911 | 5910 |
|
5912 | 5911 |
def DeclareLocks(self, level): |
... | ... | |
6152 | 6151 |
logging.debug("Migrating instance %s", inst.name) |
6153 | 6152 |
names.append(inst.name) |
6154 | 6153 |
|
6155 |
tasklets.append(TLMigrateInstance(self, inst.name, False, |
|
6156 |
self.op.iallocator, None)) |
|
6154 |
tasklets.append(TLMigrateInstance(self, inst.name, False)) |
|
6157 | 6155 |
|
6158 | 6156 |
if inst.disk_template in constants.DTS_EXT_MIRROR: |
6159 | 6157 |
# We need to lock all nodes, as the iallocator will choose the |
... | ... | |
6199 | 6197 |
this variable is initalized only after CheckPrereq has run |
6200 | 6198 |
|
6201 | 6199 |
""" |
6202 |
def __init__(self, lu, instance_name, cleanup, |
|
6203 |
iallocator=None, target_node=None): |
|
6200 |
def __init__(self, lu, instance_name, cleanup): |
|
6204 | 6201 |
"""Initializes this class. |
6205 | 6202 |
|
6206 | 6203 |
""" |
... | ... | |
6210 | 6207 |
self.instance_name = instance_name |
6211 | 6208 |
self.cleanup = cleanup |
6212 | 6209 |
self.live = False # will be overridden later |
6213 |
self.iallocator = iallocator |
|
6214 |
self.target_node = target_node |
|
6215 | 6210 |
|
6216 | 6211 |
def CheckPrereq(self): |
6217 | 6212 |
"""Check prerequisites. |
... | ... | |
6232 | 6227 |
if instance.disk_template in constants.DTS_EXT_MIRROR: |
6233 | 6228 |
_CheckIAllocatorOrNode(self.lu, "iallocator", "target_node") |
6234 | 6229 |
|
6235 |
if self.iallocator: |
|
6230 |
if self.lu.op.iallocator:
|
|
6236 | 6231 |
self._RunAllocator() |
6232 |
else: |
|
6233 |
# We set set self.target_node as it is required by |
|
6234 |
# BuildHooksEnv |
|
6235 |
self.target_node = self.lu.op.target_node |
|
6237 | 6236 |
|
6238 |
# self.target_node is already populated, either directly or by the |
|
6239 |
# iallocator run |
|
6240 | 6237 |
target_node = self.target_node |
6241 | 6238 |
|
6242 | 6239 |
if len(self.lu.tasklets) == 1: |
6243 | 6240 |
# It is safe to remove locks only when we're the only tasklet in the LU |
6244 |
nodes_keep = [instance.primary_node, self.target_node]
|
|
6241 |
nodes_keep = [instance.primary_node, target_node] |
|
6245 | 6242 |
nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE] |
6246 | 6243 |
if node not in nodes_keep] |
6247 | 6244 |
self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel) |
... | ... | |
6292 | 6289 |
self.instance.primary_node], |
6293 | 6290 |
) |
6294 | 6291 |
|
6295 |
ial.Run(self.iallocator) |
|
6292 |
ial.Run(self.lu.op.iallocator)
|
|
6296 | 6293 |
|
6297 | 6294 |
if not ial.success: |
6298 | 6295 |
raise errors.OpPrereqError("Can't compute nodes using" |
6299 | 6296 |
" iallocator '%s': %s" % |
6300 |
(self.iallocator, ial.info), |
|
6297 |
(self.lu.op.iallocator, ial.info),
|
|
6301 | 6298 |
errors.ECODE_NORES) |
6302 | 6299 |
if len(ial.result) != ial.required_nodes: |
6303 | 6300 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
6304 | 6301 |
" of nodes (%s), required %s" % |
6305 |
(self.iallocator, len(ial.result), |
|
6302 |
(self.lu.op.iallocator, len(ial.result),
|
|
6306 | 6303 |
ial.required_nodes), errors.ECODE_FAULT) |
6307 | 6304 |
self.target_node = ial.result[0] |
6308 | 6305 |
self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
6309 |
self.instance_name, self.iallocator, |
|
6306 |
self.instance_name, self.lu.op.iallocator,
|
|
6310 | 6307 |
utils.CommaJoin(ial.result)) |
6311 | 6308 |
|
6312 | 6309 |
if self.lu.op.live is not None and self.lu.op.mode is not None: |
Also available in: Unified diff