ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
+ ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
test = n_img.mfree < needed_mem
self._ErrorIf(test, self.ENODEN1, node,
"not enough memory to accomodate instance failovers"
- " should node %s fail", prinode)
+ " should node %s fail (%dMiB needed, %dMiB available)",
+ prinode, needed_mem, n_img.mfree)
- def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
- master_files):
- """Verifies and computes the node required file checksums.
+ @classmethod
+ def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
+ (files_all, files_all_opt, files_mc, files_vm)):
+ """Verifies file checksums collected from all nodes.
- @type ninfo: L{objects.Node}
- @param ninfo: the node to check
- @param nresult: the remote results for the node
- @param file_list: required list of files
- @param local_cksum: dictionary of local files and their checksums
- @param master_files: list of files that only masters should have
+ @param errorif: Callback for reporting errors
+ @param nodeinfo: List of L{objects.Node} objects
+ @param master_node: Name of master node
+ @param all_nvinfo: RPC results
"""
- node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ node_names = frozenset(node.name for node in nodeinfo)
- remote_cksum = nresult.get(constants.NV_FILELIST, None)
- test = not isinstance(remote_cksum, dict)
- _ErrorIf(test, self.ENODEFILECHECK, node,
- "node hasn't returned file checksum data")
- if test:
- return
+ assert master_node in node_names
+ assert (len(files_all | files_all_opt | files_mc | files_vm) ==
+ sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
+ "Found file listed in more than one file list"
- for file_name in file_list:
- node_is_mc = ninfo.master_candidate
- must_have = (file_name not in master_files) or node_is_mc
- # missing
- test1 = file_name not in remote_cksum
- # invalid checksum
- test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
- # existing and good
- test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
- _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
- "file '%s' missing", file_name)
- _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
- "file '%s' has wrong checksum", file_name)
- # not candidate and this is not a must-have file
- _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
- "file '%s' should not exist on non master"
- " candidates (and the file is outdated)", file_name)
- # all good, except non-master/non-must have combination
- _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
- "file '%s' should not exist"
- " on non master candidates", file_name)
+ # Define functions determining which nodes to consider for a file
+ file2nodefn = dict([(filename, fn)
+ for (files, fn) in [(files_all, None),
+ (files_all_opt, None),
+ (files_mc, lambda node: (node.master_candidate or
+ node.name == master_node)),
+ (files_vm, lambda node: node.vm_capable)]
+ for filename in files])
+
+ fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
+
+ for node in nodeinfo:
+ nresult = all_nvinfo[node.name]
+
+ if nresult.fail_msg or not nresult.payload:
+ node_files = None
+ else:
+ node_files = nresult.payload.get(constants.NV_FILELIST, None)
+
+ test = not (node_files and isinstance(node_files, dict))
+ errorif(test, cls.ENODEFILECHECK, node.name,
+ "Node did not return file checksum data")
+ if test:
+ continue
+
+ for (filename, checksum) in node_files.items():
+ # Check if the file should be considered for a node
+ fn = file2nodefn[filename]
+ if fn is None or fn(node):
+ fileinfo[filename].setdefault(checksum, set()).add(node.name)
+
+ for (filename, checksums) in fileinfo.items():
+ assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
+
+ # Nodes having the file
+ with_file = frozenset(node_name
+ for nodes in fileinfo[filename].values()
+ for node_name in nodes)
+
+ # Nodes missing file
+ missing_file = node_names - with_file
+
+ if filename in files_all_opt:
+ # All or no nodes
+ errorif(missing_file and missing_file != node_names,
+ cls.ECLUSTERFILECHECK, None,
+ "File %s is optional, but it must exist on all or no nodes (not"
+ " found on %s)",
+ filename, utils.CommaJoin(utils.NiceSort(missing_file)))
+ else:
+ errorif(missing_file, cls.ECLUSTERFILECHECK, None,
+ "File %s is missing from node(s) %s", filename,
+ utils.CommaJoin(utils.NiceSort(missing_file)))
+
+ # See if there are multiple versions of the file
+ test = len(checksums) > 1
+ if test:
+ variants = ["variant %s on %s" %
+ (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
+ for (idx, (checksum, nodes)) in
+ enumerate(sorted(checksums.items()))]
+ else:
+ variants = []
+
+ errorif(test, cls.ECLUSTERFILECHECK, None,
+ "File %s found with %s different checksums (%s)",
+ filename, len(checksums), "; ".join(variants))
def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
drbd_map):
node_vol_should = {}
# FIXME: verify OS list
+
+ # File verification
+ filemap = _ComputeAncillaryFiles(cluster, False)
+
# do local checksums
- master_files = [constants.CLUSTER_CONF_FILE]
master_node = self.master_node = self.cfg.GetMasterNode()
master_ip = self.cfg.GetMasterIP()
- file_names = ssconf.SimpleStore().GetFileList()
- file_names.extend(constants.ALL_CERT_FILES)
- file_names.extend(master_files)
- if cluster.modify_etc_hosts:
- file_names.append(constants.ETC_HOSTS)
-
- local_checksums = utils.FingerprintFiles(file_names)
-
# Compute the set of hypervisor parameters
hvp_data = []
for hv_name in hypervisors:
feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
node_verify_param = {
- constants.NV_FILELIST: file_names,
+ constants.NV_FILELIST:
+ utils.UniqueSequence(filename
+ for files in filemap
+ for filename in files),
constants.NV_NODELIST: [node.name for node in nodeinfo
if not node.offline],
constants.NV_HYPERVISOR: hypervisors,
feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
+ feedback_fn("* Verifying configuration file consistency")
+ self._VerifyFiles(_ErrorIf, nodeinfo, master_node, all_nvinfo, filemap)
+
feedback_fn("* Verifying node status")
refos_img = None
nimg.call_ok = self._VerifyNode(node_i, nresult)
self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
self._VerifyNodeNetwork(node_i, nresult)
- self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
- master_files)
-
self._VerifyOob(node_i, nresult)
if nimg.vm_capable:
self.ENODERPC, pnode, "instance %s, connection to"
" primary node failed", instance)
- _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
- "instance lives on offline node %s", inst_config.primary_node)
+ _ErrorIf(inst_config.admin_up and pnode_img.offline,
+ self.EINSTANCEBADNODE, instance,
+ "instance is marked as running and lives on offline node %s",
+ inst_config.primary_node)
# If the instance is non-redundant we cannot survive losing its primary
# node, so we are not N+1 compliant. On the other hand we have no disk
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
+ # TODO: we need a more general way to handle resetting
+ # cluster-level parameters to default values
+ if self.new_ndparams["oob_program"] == "":
+ self.new_ndparams["oob_program"] = \
+ constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
+
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
lu.proc.LogWarning(msg)
+def _ComputeAncillaryFiles(cluster, redist):
+ """Compute files external to Ganeti which need to be consistent.
+
+ @type redist: boolean
+ @param redist: Whether to include files which need to be redistributed
+
+ """
+ # Compute files for all nodes
+ files_all = set([
+ constants.SSH_KNOWN_HOSTS_FILE,
+ constants.CONFD_HMAC_KEY,
+ constants.CLUSTER_DOMAIN_SECRET_FILE,
+ ])
+
+ if not redist:
+ files_all.update(constants.ALL_CERT_FILES)
+ files_all.update(ssconf.SimpleStore().GetFileList())
+
+ if cluster.modify_etc_hosts:
+ files_all.add(constants.ETC_HOSTS)
+
+ # Files which must either exist on all nodes or on none
+ files_all_opt = set([
+ constants.RAPI_USERS_FILE,
+ ])
+
+ # Files which should only be on master candidates
+ files_mc = set()
+ if not redist:
+ files_mc.add(constants.CLUSTER_CONF_FILE)
+
+ # Files which should only be on VM-capable nodes
+ files_vm = set(filename
+ for hv_name in cluster.enabled_hypervisors
+ for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
+
+ # Filenames must be unique
+ assert (len(files_all | files_all_opt | files_mc | files_vm) ==
+ sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
+ "Found file listed in more than one file list"
+
+ return (files_all, files_all_opt, files_mc, files_vm)
+
+
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
"""Distribute additional files which are part of the cluster configuration.
@param additional_vm: whether the additional nodes are vm-capable or not
"""
- # 1. Gather target nodes
- myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
- dist_nodes = lu.cfg.GetOnlineNodeList()
- nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
- vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
+ # Gather target nodes
+ cluster = lu.cfg.GetClusterInfo()
+ master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
+
+ online_nodes = lu.cfg.GetOnlineNodeList()
+ vm_nodes = lu.cfg.GetVmCapableNodeList()
+
if additional_nodes is not None:
- dist_nodes.extend(additional_nodes)
+ online_nodes.extend(additional_nodes)
if additional_vm:
vm_nodes.extend(additional_nodes)
- if myself.name in dist_nodes:
- dist_nodes.remove(myself.name)
- if myself.name in vm_nodes:
- vm_nodes.remove(myself.name)
-
- # 2. Gather files to distribute
- dist_files = set([constants.ETC_HOSTS,
- constants.SSH_KNOWN_HOSTS_FILE,
- constants.RAPI_CERT_FILE,
- constants.RAPI_USERS_FILE,
- constants.CONFD_HMAC_KEY,
- constants.CLUSTER_DOMAIN_SECRET_FILE,
- ])
-
- vm_files = set()
- enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
- for hv_name in enabled_hypervisors:
- hv_class = hypervisor.GetHypervisor(hv_name)
- vm_files.update(hv_class.GetAncillaryFiles())
-
- # 3. Perform the files upload
- for fname in dist_files:
- _UploadHelper(lu, dist_nodes, fname)
- for fname in vm_files:
- _UploadHelper(lu, vm_nodes, fname)
+
+ # Never distribute to master node
+ for nodelist in [online_nodes, vm_nodes]:
+ if master_info.name in nodelist:
+ nodelist.remove(master_info.name)
+
+ # Gather file lists
+ (files_all, files_all_opt, files_mc, files_vm) = \
+ _ComputeAncillaryFiles(cluster, True)
+
+ # Never re-distribute configuration file from here
+ assert not (constants.CLUSTER_CONF_FILE in files_all or
+ constants.CLUSTER_CONF_FILE in files_vm)
+ assert not files_mc, "Master candidates not handled in this function"
+
+ filemap = [
+ (online_nodes, files_all),
+ (online_nodes, files_all_opt),
+ (vm_nodes, files_vm),
+ ]
+
+ # Upload the files
+ for (node_list, files) in filemap:
+ for fname in files:
+ _UploadHelper(lu, node_list, fname)
class LUClusterRedistConf(NoHooksLU):
bad_nodes.append(name)
elif result.payload:
for inst in result.payload:
- if all_info[inst].primary_node == name:
- live_data.update(result.payload)
+ if inst in all_info:
+ if all_info[inst].primary_node == name:
+ live_data.update(result.payload)
+ else:
+ wrongnode_inst.add(inst)
else:
- wrongnode_inst.add(inst)
+ # orphan instance; we don't list it here as we don't
+ # handle this case yet in the output of instance listing
+ logging.warning("Orphan instance '%s' found on node %s",
+ inst, name)
# else no instance is alive
else:
live_data = {}
if query.IQ_DISKUSAGE in self.requested_data:
disk_usage = dict((inst.name,
_ComputeDiskSize(inst.disk_template,
- [{"size": disk.size}
+ [{constants.IDISK_SIZE: disk.size}
for disk in inst.disks]))
for inst in instance_list)
else:
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ ignore_consistency = self.op.ignore_consistency
+ shutdown_timeout = self.op.shutdown_timeout
+ self._migrater = TLMigrateInstance(self, self.op.instance_name,
+ cleanup=False,
+ iallocator=self.op.iallocator,
+ target_node=self.op.target_node,
+ failover=True,
+ ignore_consistency=ignore_consistency,
+ shutdown_timeout=shutdown_timeout)
+ self.tasklets = [self._migrater]
+
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
This runs on master, primary and secondary nodes of the instance.
"""
- instance = self.instance
+ instance = self._migrater.instance
source_node = instance.primary_node
+ target_node = self._migrater.target_node
env = {
"IGNORE_CONSISTENCY": self.op.ignore_consistency,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
"OLD_PRIMARY": source_node,
- "NEW_PRIMARY": self.op.target_node,
+ "NEW_PRIMARY": target_node,
}
if instance.disk_template in constants.DTS_INT_MIRROR:
"""Build hooks nodes.
"""
- nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
- return (nl, nl + [self.instance.primary_node])
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the instance is in the cluster.
-
- """
- self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
- assert self.instance is not None, \
- "Cannot retrieve locked instance %s" % self.op.instance_name
-
- bep = self.cfg.GetClusterInfo().FillBE(instance)
- if instance.disk_template not in constants.DTS_MIRRORED:
- raise errors.OpPrereqError("Instance's disk layout is not"
- " mirrored, cannot failover.",
- errors.ECODE_STATE)
-
- if instance.disk_template in constants.DTS_EXT_MIRROR:
- _CheckIAllocatorOrNode(self, "iallocator", "target_node")
- if self.op.iallocator:
- self._RunAllocator()
- # Release all unnecessary node locks
- nodes_keep = [instance.primary_node, self.op.target_node]
- nodes_rel = [node for node in self.acquired_locks[locking.LEVEL_NODE]
- if node not in nodes_keep]
- self.context.glm.release(locking.LEVEL_NODE, nodes_rel)
- self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
-
- # self.op.target_node is already populated, either directly or by the
- # iallocator run
- target_node = self.op.target_node
-
- else:
- secondary_nodes = instance.secondary_nodes
- if not secondary_nodes:
- raise errors.ConfigurationError("No secondary node but using"
- " %s disk template" %
- instance.disk_template)
- target_node = secondary_nodes[0]
-
- if self.op.iallocator or (self.op.target_node and
- self.op.target_node != target_node):
- raise errors.OpPrereqError("Instances with disk template %s cannot"
- " be failed over to arbitrary nodes"
- " (neither an iallocator nor a target"
- " node can be passed)" %
- instance.disk_template, errors.ECODE_INVAL)
- _CheckNodeOnline(self, target_node)
- _CheckNodeNotDrained(self, target_node)
-
- # Save target_node so that we can use it in BuildHooksEnv
- self.op.target_node = target_node
-
- if instance.admin_up:
- # check memory requirements on the secondary node
- _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
- instance.name, bep[constants.BE_MEMORY],
- instance.hypervisor)
- else:
- self.LogInfo("Not checking memory on the secondary node as"
- " instance will not be started")
-
- # check bridge existance
- _CheckInstanceBridgesExist(self, instance, node=target_node)
-
- def Exec(self, feedback_fn):
- """Failover an instance.
-
- The failover is done by shutting it down on its present node and
- starting it on the secondary.
-
- """
- instance = self.instance
- primary_node = self.cfg.GetNodeInfo(instance.primary_node)
-
- source_node = instance.primary_node
- target_node = self.op.target_node
-
- if instance.admin_up:
- feedback_fn("* checking disk consistency between source and target")
- for dev in instance.disks:
- # for drbd, these are drbd over lvm
- if not _CheckDiskConsistency(self, dev, target_node, False):
- if not self.op.ignore_consistency:
- raise errors.OpExecError("Disk %s is degraded on target node,"
- " aborting failover." % dev.iv_name)
- else:
- feedback_fn("* not checking disk consistency as instance is not running")
-
- feedback_fn("* shutting down instance on source node")
- logging.info("Shutting down instance %s on node %s",
- instance.name, source_node)
-
- result = self.rpc.call_instance_shutdown(source_node, instance,
- self.op.shutdown_timeout)
- msg = result.fail_msg
- if msg:
- if self.op.ignore_consistency or primary_node.offline:
- self.proc.LogWarning("Could not shutdown instance %s on node %s."
- " Proceeding anyway. Please make sure node"
- " %s is down. Error details: %s",
- instance.name, source_node, source_node, msg)
- else:
- raise errors.OpExecError("Could not shutdown instance %s on"
- " node %s: %s" %
- (instance.name, source_node, msg))
-
- feedback_fn("* deactivating the instance's disks on source node")
- if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
- raise errors.OpExecError("Can't shut down the instance's disks.")
-
- instance.primary_node = target_node
- # distribute new instance config to the other nodes
- self.cfg.Update(instance, feedback_fn)
-
- # Only start the instance if it's marked as up
- if instance.admin_up:
- feedback_fn("* activating the instance's disks on target node")
- logging.info("Starting instance %s on node %s",
- instance.name, target_node)
-
- disks_ok, _ = _AssembleInstanceDisks(self, instance,
- ignore_secondaries=True)
- if not disks_ok:
- _ShutdownInstanceDisks(self, instance)
- raise errors.OpExecError("Can't activate the instance's disks")
-
- feedback_fn("* starting the instance on the target node")
- result = self.rpc.call_instance_start(target_node, instance, None, None)
- msg = result.fail_msg
- if msg:
- _ShutdownInstanceDisks(self, instance)
- raise errors.OpExecError("Could not start instance %s on node %s: %s" %
- (instance.name, target_node, msg))
-
- def _RunAllocator(self):
- """Run the allocator based on input opcode.
-
- """
- ial = IAllocator(self.cfg, self.rpc,
- mode=constants.IALLOCATOR_MODE_RELOC,
- name=self.instance.name,
- # TODO See why hail breaks with a single node below
- relocate_from=[self.instance.primary_node,
- self.instance.primary_node],
- )
-
- ial.Run(self.op.iallocator)
-
- if not ial.success:
- raise errors.OpPrereqError("Can't compute nodes using"
- " iallocator '%s': %s" %
- (self.op.iallocator, ial.info),
- errors.ECODE_NORES)
- if len(ial.result) != ial.required_nodes:
- raise errors.OpPrereqError("iallocator '%s' returned invalid number"
- " of nodes (%s), required %s" %
- (self.op.iallocator, len(ial.result),
- ial.required_nodes), errors.ECODE_FAULT)
- self.op.target_node = ial.result[0]
- self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
- self.instance.name, self.op.iallocator,
- utils.CommaJoin(ial.result))
+ instance = self._migrater.instance
+ nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
+ return (nl, nl + [instance.primary_node])
class LUInstanceMigrate(LogicalUnit):
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self._migrater = TLMigrateInstance(self, self.op.instance_name,
- self.op.cleanup, self.op.iallocator,
- self.op.target_node)
+ cleanup=self.op.cleanup,
+ iallocator=self.op.iallocator,
+ target_node=self.op.target_node,
+ failover=False,
+ fallback=self.op.allow_failover)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
logging.debug("Migrating instance %s", inst.name)
names.append(inst.name)
- tasklets.append(TLMigrateInstance(self, inst.name, False,
- self.op.iallocator, None))
+ tasklets.append(TLMigrateInstance(self, inst.name, cleanup=False,
+ iallocator=self.op.iallocator,
+ taget_node=None))
if inst.disk_template in constants.DTS_EXT_MIRROR:
# We need to lock all nodes, as the iallocator will choose the
@type live: boolean
@ivar live: whether the migration will be done live or non-live;
this variable is initalized only after CheckPrereq has run
+ @type cleanup: boolean
+ @ivar cleanup: Wheater we cleanup from a failed migration
+ @type iallocator: string
+ @ivar iallocator: The iallocator used to determine target_node
+ @type target_node: string
+ @ivar target_node: If given, the target_node to reallocate the instance to
+ @type failover: boolean
+ @ivar failover: Whether operation results in failover or migration
+ @type fallback: boolean
+ @ivar fallback: Whether fallback to failover is allowed if migration not
+ possible
+ @type ignore_consistency: boolean
+ @ivar ignore_consistency: Wheter we should ignore consistency between source
+ and target node
+ @type shutdown_timeout: int
+ @ivar shutdown_timeout: In case of failover timeout of the shutdown
"""
- def __init__(self, lu, instance_name, cleanup,
- iallocator=None, target_node=None):
+ def __init__(self, lu, instance_name, cleanup=False, iallocator=None,
+ target_node=None, failover=False, fallback=False,
+ ignore_consistency=False,
+ shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
"""Initializes this class.
"""
self.live = False # will be overridden later
self.iallocator = iallocator
self.target_node = target_node
+ self.failover = failover
+ self.fallback = fallback
+ self.ignore_consistency = ignore_consistency
+ self.shutdown_timeout = shutdown_timeout
def CheckPrereq(self):
"""Check prerequisites.
assert instance is not None
self.instance = instance
+ if (not self.cleanup and not instance.admin_up and not self.failover and
+ self.fallback):
+ self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
+ " to failover")
+ self.failover = True
+
if instance.disk_template not in constants.DTS_MIRRORED:
+ if self.failover:
+ text = "failovers"
+ else:
+ text = "migrations"
raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
- " migrations" % instance.disk_template,
+ " %s" % (instance.disk_template, text),
errors.ECODE_STATE)
if instance.disk_template in constants.DTS_EXT_MIRROR:
" %s disk template" %
instance.disk_template)
target_node = secondary_nodes[0]
- if self.lu.op.iallocator or (self.lu.op.target_node and
- self.lu.op.target_node != target_node):
+ if self.iallocator or (self.target_node and
+ self.target_node != target_node):
+ if self.failover:
+ text = "failed over"
+ else:
+ text = "migrated"
raise errors.OpPrereqError("Instances with disk template %s cannot"
- " be migrated over to arbitrary nodes"
+ " be %s over to arbitrary nodes"
" (neither an iallocator nor a target"
" node can be passed)" %
- instance.disk_template, errors.ECODE_INVAL)
+ (text, instance.disk_template),
+ errors.ECODE_INVAL)
i_be = self.cfg.GetClusterInfo().FillBE(instance)
# check memory requirements on the secondary node
- _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
- instance.name, i_be[constants.BE_MEMORY],
- instance.hypervisor)
+ if not self.failover or instance.admin_up:
+ _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
+ instance.name, i_be[constants.BE_MEMORY],
+ instance.hypervisor)
+ else:
+ self.lu.LogInfo("Not checking memory on the secondary node as"
+ " instance will not be started")
# check bridge existance
_CheckInstanceBridgesExist(self.lu, instance, node=target_node)
if not self.cleanup:
_CheckNodeNotDrained(self.lu, target_node)
- result = self.rpc.call_instance_migratable(instance.primary_node,
- instance)
- result.Raise("Can't migrate, please use failover",
- prereq=True, ecode=errors.ECODE_STATE)
+ if not self.failover:
+ result = self.rpc.call_instance_migratable(instance.primary_node,
+ instance)
+ if result.fail_msg and self.fallback:
+ self.lu.LogInfo("Can't migrate, instance offline, fallback to"
+ " failover")
+ self.failover = True
+ else:
+ result.Raise("Can't migrate, please use failover",
+ prereq=True, ecode=errors.ECODE_STATE)
+ assert not (self.failover and self.cleanup)
def _RunAllocator(self):
"""Run the allocator based on input opcode.
self.instance_name, self.iallocator,
utils.CommaJoin(ial.result))
- if self.lu.op.live is not None and self.lu.op.mode is not None:
- raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
- " parameters are accepted",
- errors.ECODE_INVAL)
- if self.lu.op.live is not None:
- if self.lu.op.live:
- self.lu.op.mode = constants.HT_MIGRATION_LIVE
- else:
- self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
- # reset the 'live' parameter to None so that repeated
- # invocations of CheckPrereq do not raise an exception
- self.lu.op.live = None
- elif self.lu.op.mode is None:
- # read the default value from the hypervisor
- i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, skip_globals=False)
- self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
-
- self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
+ if not self.failover:
+ if self.lu.op.live is not None and self.lu.op.mode is not None:
+ raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
+ " parameters are accepted",
+ errors.ECODE_INVAL)
+ if self.lu.op.live is not None:
+ if self.lu.op.live:
+ self.lu.op.mode = constants.HT_MIGRATION_LIVE
+ else:
+ self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
+ # reset the 'live' parameter to None so that repeated
+ # invocations of CheckPrereq do not raise an exception
+ self.lu.op.live = None
+ elif self.lu.op.mode is None:
+ # read the default value from the hypervisor
+ i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
+ skip_globals=False)
+ self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
+
+ self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
+ else:
+ # Failover is never live
+ self.live = False
def _WaitUntilSync(self):
"""Poll with custom rpc for disk sync.
(instance.name, msg))
self.feedback_fn("* migrating instance to %s" % target_node)
- time.sleep(10)
result = self.rpc.call_instance_migrate(source_node, instance,
self.nodes_ip[target_node],
self.live)
self._RevertDiskStatus()
raise errors.OpExecError("Could not migrate instance %s: %s" %
(instance.name, msg))
- time.sleep(10)
instance.primary_node = target_node
# distribute new instance config to the other nodes
self.feedback_fn("* done")
+ def _ExecFailover(self):
+ """Failover an instance.
+
+ The failover is done by shutting it down on its present node and
+ starting it on the secondary.
+
+ """
+ instance = self.instance
+ primary_node = self.cfg.GetNodeInfo(instance.primary_node)
+
+ source_node = instance.primary_node
+ target_node = self.target_node
+
+ if instance.admin_up:
+ self.feedback_fn("* checking disk consistency between source and target")
+ for dev in instance.disks:
+ # for drbd, these are drbd over lvm
+ if not _CheckDiskConsistency(self, dev, target_node, False):
+ if not self.ignore_consistency:
+ raise errors.OpExecError("Disk %s is degraded on target node,"
+ " aborting failover." % dev.iv_name)
+ else:
+ self.feedback_fn("* not checking disk consistency as instance is not"
+ " running")
+
+ self.feedback_fn("* shutting down instance on source node")
+ logging.info("Shutting down instance %s on node %s",
+ instance.name, source_node)
+
+ result = self.rpc.call_instance_shutdown(source_node, instance,
+ self.shutdown_timeout)
+ msg = result.fail_msg
+ if msg:
+ if self.ignore_consistency or primary_node.offline:
+ self.lu.LogWarning("Could not shutdown instance %s on node %s."
+ " Proceeding anyway. Please make sure node"
+ " %s is down. Error details: %s",
+ instance.name, source_node, source_node, msg)
+ else:
+ raise errors.OpExecError("Could not shutdown instance %s on"
+ " node %s: %s" %
+ (instance.name, source_node, msg))
+
+ self.feedback_fn("* deactivating the instance's disks on source node")
+ if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
+ raise errors.OpExecError("Can't shut down the instance's disks.")
+
+ instance.primary_node = target_node
+ # distribute new instance config to the other nodes
+ self.cfg.Update(instance, self.feedback_fn)
+
+ # Only start the instance if it's marked as up
+ if instance.admin_up:
+ self.feedback_fn("* activating the instance's disks on target node")
+ logging.info("Starting instance %s on node %s",
+ instance.name, target_node)
+
+ disks_ok, _ = _AssembleInstanceDisks(self, instance,
+ ignore_secondaries=True)
+ if not disks_ok:
+ _ShutdownInstanceDisks(self, instance)
+ raise errors.OpExecError("Can't activate the instance's disks")
+
+ self.feedback_fn("* starting the instance on the target node")
+ result = self.rpc.call_instance_start(target_node, instance, None, None)
+ msg = result.fail_msg
+ if msg:
+ _ShutdownInstanceDisks(self, instance)
+ raise errors.OpExecError("Could not start instance %s on node %s: %s" %
+ (instance.name, target_node, msg))
+
def Exec(self, feedback_fn):
"""Perform the migration.
"""
- feedback_fn("Migrating instance %s" % self.instance.name)
-
self.feedback_fn = feedback_fn
-
self.source_node = self.instance.primary_node
# FIXME: if we implement migrate-to-any in DRBD, this needs fixing
self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
}
- if self.cleanup:
- return self._ExecCleanup()
+ if self.failover:
+ feedback_fn("Failover instance %s" % self.instance.name)
+ self._ExecFailover()
else:
- return self._ExecMigration()
+ feedback_fn("Migrating instance %s" % self.instance.name)
+
+ if self.cleanup:
+ return self._ExecCleanup()
+ else:
+ return self._ExecMigration()
def _CreateBlockDev(lu, node, instance, device, force_create,
for i in range(disk_count)])
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- vg = disk.get("vg", vgname)
+ vg = disk.get(constants.IDISK_VG, vgname)
feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
- disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
+ disk_dev = objects.Disk(dev_type=constants.LD_LV,
+ size=disk[constants.IDISK_SIZE],
logical_id=(vg, names[idx]),
iv_name="disk/%d" % disk_index,
- mode=disk["mode"])
+ mode=disk[constants.IDISK_MODE])
disks.append(disk_dev)
elif template_name == constants.DT_DRBD8:
if len(secondary_nodes) != 1:
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- vg = disk.get("vg", vgname)
+ vg = disk.get(constants.IDISK_VG, vgname)
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
- disk["size"], vg, names[idx*2:idx*2+2],
+ disk[constants.IDISK_SIZE], vg,
+ names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
- minors[idx*2], minors[idx*2+1])
- disk_dev.mode = disk["mode"]
+ minors[idx * 2], minors[idx * 2 + 1])
+ disk_dev.mode = disk[constants.IDISK_MODE]
disks.append(disk_dev)
elif template_name == constants.DT_FILE:
if len(secondary_nodes) != 0:
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
+ disk_dev = objects.Disk(dev_type=constants.LD_FILE,
+ size=disk[constants.IDISK_SIZE],
iv_name="disk/%d" % disk_index,
logical_id=(file_driver,
"%s/disk%d" % (file_storage_dir,
disk_index)),
- mode=disk["mode"])
+ mode=disk[constants.IDISK_MODE])
disks.append(disk_dev)
elif template_name == constants.DT_SHARED_FILE:
if len(secondary_nodes) != 0:
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
+ disk_dev = objects.Disk(dev_type=constants.LD_FILE,
+ size=disk[constants.IDISK_SIZE],
iv_name="disk/%d" % disk_index,
logical_id=(file_driver,
"%s/disk%d" % (file_storage_dir,
disk_index)),
- mode=disk["mode"])
+ mode=disk[constants.IDISK_MODE])
disks.append(disk_dev)
elif template_name == constants.DT_BLOCK:
if len(secondary_nodes) != 0:
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"],
+ disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
+ size=disk[constants.IDISK_SIZE],
logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
- disk["adopt"]),
+ disk[constants.IDISK_ADOPT]),
iv_name="disk/%d" % disk_index,
- mode=disk["mode"])
+ mode=disk[constants.IDISK_MODE])
disks.append(disk_dev)
else:
"""
def _compute(disks, payload):
- """Universal algorithm
+ """Universal algorithm.
"""
vgs = {}
for disk in disks:
- vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
+ vgs[disk[constants.IDISK_VG]] = \
+ vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
return vgs
# Required free disk space as a function of disk and swap space
req_size_dict = {
constants.DT_DISKLESS: None,
- constants.DT_PLAIN: sum(d["size"] for d in disks),
+ constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
# 128 MB are added for drbd metadata for each disk
- constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
+ constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
constants.DT_FILE: None,
constants.DT_SHARED_FILE: 0,
constants.DT_BLOCK: 0,
has_adopt = has_no_adopt = False
for disk in self.op.disks:
utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
- if "adopt" in disk:
+ if constants.IDISK_ADOPT in disk:
has_adopt = True
else:
has_no_adopt = True
vcpus=self.be_full[constants.BE_VCPUS],
nics=_NICListToTuple(self, self.nics),
disk_template=self.op.disk_template,
- disks=[(d["size"], d["mode"]) for d in self.disks],
+ disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
+ for d in self.disks],
bep=self.be_full,
hvp=self.hv_full,
hypervisor_name=self.op.hypervisor,
# TODO: import the disk iv_name too
for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
- disks.append({"size": disk_sz})
+ disks.append({constants.IDISK_SIZE: disk_sz})
self.op.disks = disks
else:
raise errors.OpPrereqError("No disk info specified and the export"
# NIC buildup
self.nics = []
for idx, nic in enumerate(self.op.nics):
- nic_mode_req = nic.get("mode", None)
+ nic_mode_req = nic.get(constants.INIC_MODE, None)
nic_mode = nic_mode_req
if nic_mode is None:
nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
default_ip_mode = constants.VALUE_NONE
# ip validity checks
- ip = nic.get("ip", default_ip_mode)
+ ip = nic.get(constants.INIC_IP, default_ip_mode)
if ip is None or ip.lower() == constants.VALUE_NONE:
nic_ip = None
elif ip.lower() == constants.VALUE_AUTO:
errors.ECODE_INVAL)
# MAC address verification
- mac = nic.get("mac", constants.VALUE_AUTO)
+ mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
mac = utils.NormalizeAndValidateMac(mac)
self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
# disk checks/pre-build
+ default_vg = self.cfg.GetVGName()
self.disks = []
for disk in self.op.disks:
- mode = disk.get("mode", constants.DISK_RDWR)
+ mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
if mode not in constants.DISK_ACCESS_SET:
raise errors.OpPrereqError("Invalid disk access mode '%s'" %
mode, errors.ECODE_INVAL)
- size = disk.get("size", None)
+ size = disk.get(constants.IDISK_SIZE, None)
if size is None:
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
try:
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
- vg = disk.get("vg", self.cfg.GetVGName())
- new_disk = {"size": size, "mode": mode, "vg": vg}
- if "adopt" in disk:
- new_disk["adopt"] = disk["adopt"]
+ new_disk = {
+ constants.IDISK_SIZE: size,
+ constants.IDISK_MODE: mode,
+ constants.IDISK_VG: disk.get(constants.IDISK_VG, default_vg),
+ }
+ if constants.IDISK_ADOPT in disk:
+ new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
self.disks.append(new_disk)
if self.op.mode == constants.INSTANCE_IMPORT:
_CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
- all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
+ all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
+ disk[constants.IDISK_ADOPT])
+ for disk in self.disks])
if len(all_lvs) != len(self.disks):
raise errors.OpPrereqError("Duplicate volume names given for adoption",
errors.ECODE_INVAL)
errors.ECODE_STATE)
# update the size of disk based on what is found
for dsk in self.disks:
- dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
+ dsk[constants.IDISK_SIZE] = \
+ int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
+ dsk[constants.IDISK_ADOPT])][0]))
elif self.op.disk_template == constants.DT_BLOCK:
# Normalize and de-duplicate device paths
- all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks])
+ all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
+ for disk in self.disks])
if len(all_disks) != len(self.disks):
raise errors.OpPrereqError("Duplicate disk names given for adoption",
errors.ECODE_INVAL)
utils.CommaJoin(delta),
errors.ECODE_INVAL)
for dsk in self.disks:
- dsk["size"] = int(float(node_disks[dsk["adopt"]]))
+ dsk[constants.IDISK_SIZE] = \
+ int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
rename_to = []
for t_dsk, a_dsk in zip (tmp_disks, self.disks):
rename_to.append(t_dsk.logical_id)
- t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
+ t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
self.cfg.SetDiskID(t_dsk, pnode_name)
result = self.rpc.call_blockdev_rename(pnode_name,
zip(tmp_disks, rename_to))
def ExpandNames(self):
self.needed_locks = {}
- self.share_locks = dict.fromkeys(locking.LEVELS, 1)
- if self.op.instances:
- self.wanted_names = []
- for name in self.op.instances:
- full_name = _ExpandInstanceName(self.cfg, name)
- self.wanted_names.append(full_name)
- self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
+ # Use locking if requested or when non-static information is wanted
+ if not (self.op.static or self.op.use_locking):
+ self.LogWarning("Non-static data requested, locks need to be acquired")
+ self.op.use_locking = True
+
+ if self.op.instances or not self.op.use_locking:
+ # Expand instance names right here
+ self.wanted_names = _GetWantedInstances(self, self.op.instances)
else:
+ # Will use acquired locks
self.wanted_names = None
- self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
- self.needed_locks[locking.LEVEL_NODE] = []
- self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ if self.op.use_locking:
+ self.share_locks = dict.fromkeys(locking.LEVELS, 1)
+
+ if self.wanted_names is None:
+ self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
+ else:
+ self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
+
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.share_locks = dict.fromkeys(locking.LEVELS, 1)
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
- if level == locking.LEVEL_NODE:
+ if self.op.use_locking and level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""
if self.wanted_names is None:
+ assert self.op.use_locking, "Locking was not used"
self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
- self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
- in self.wanted_names]
+ self.wanted_instances = [self.cfg.GetInstanceInfo(name)
+ for name in self.wanted_names]
def _ComputeBlockdevStatus(self, node, instance_name, dev):
"""Returns the status of a block device
else:
dev_children = []
- data = {
+ return {
"iv_name": dev.iv_name,
"dev_type": dev.dev_type,
"logical_id": dev.logical_id,
"size": dev.size,
}
- return data
-
def Exec(self, feedback_fn):
"""Gather and return data"""
result = {}
disks = [self._ComputeDiskStatus(instance, None, device)
for device in instance.disks]
- idict = {
+ result[instance.name] = {
"name": instance.name,
"config_state": config_state,
"run_state": remote_state,
"uuid": instance.uuid,
}
- result[instance.name] = idict
-
return result
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
if disk_op == constants.DDM_ADD:
- mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
+ mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
if mode not in constants.DISK_ACCESS_SET:
raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
errors.ECODE_INVAL)
- size = disk_dict.get('size', None)
+ size = disk_dict.get(constants.IDISK_SIZE, None)
if size is None:
raise errors.OpPrereqError("Required disk parameter size missing",
errors.ECODE_INVAL)
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk size parameter: %s" %
str(err), errors.ECODE_INVAL)
- disk_dict['size'] = size
+ disk_dict[constants.IDISK_SIZE] = size
else:
# modification of disk
- if 'size' in disk_dict:
+ if constants.IDISK_SIZE in disk_dict:
raise errors.OpPrereqError("Disk size change not possible, use"
" grow-disk", errors.ECODE_INVAL)
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
# nic_dict should be a dict
- nic_ip = nic_dict.get('ip', None)
+ nic_ip = nic_dict.get(constants.INIC_IP, None)
if nic_ip is not None:
if nic_ip.lower() == constants.VALUE_NONE:
- nic_dict['ip'] = None
+ nic_dict[constants.INIC_IP] = None
else:
if not netutils.IPAddress.IsValid(nic_ip):
raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
errors.ECODE_INVAL)
nic_bridge = nic_dict.get('bridge', None)
- nic_link = nic_dict.get('link', None)
+ nic_link = nic_dict.get(constants.INIC_LINK, None)
if nic_bridge and nic_link:
raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
" at the same time", errors.ECODE_INVAL)
elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
nic_dict['bridge'] = None
elif nic_link and nic_link.lower() == constants.VALUE_NONE:
- nic_dict['link'] = None
+ nic_dict[constants.INIC_LINK] = None
if nic_op == constants.DDM_ADD:
- nic_mac = nic_dict.get('mac', None)
+ nic_mac = nic_dict.get(constants.INIC_MAC, None)
if nic_mac is None:
- nic_dict['mac'] = constants.VALUE_AUTO
+ nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
- if 'mac' in nic_dict:
- nic_mac = nic_dict['mac']
+ if constants.INIC_MAC in nic_dict:
+ nic_mac = nic_dict[constants.INIC_MAC]
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
nic_mac = utils.NormalizeAndValidateMac(nic_mac)
this_nic_override = nic_override[idx]
else:
this_nic_override = {}
- if 'ip' in this_nic_override:
- ip = this_nic_override['ip']
+ if constants.INIC_IP in this_nic_override:
+ ip = this_nic_override[constants.INIC_IP]
else:
ip = nic.ip
- if 'mac' in this_nic_override:
- mac = this_nic_override['mac']
+ if constants.INIC_MAC in this_nic_override:
+ mac = this_nic_override[constants.INIC_MAC]
else:
mac = nic.mac
if idx in self.nic_pnew:
link = nicparams[constants.NIC_LINK]
args['nics'].append((ip, mac, mode, link))
if constants.DDM_ADD in nic_override:
- ip = nic_override[constants.DDM_ADD].get('ip', None)
- mac = nic_override[constants.DDM_ADD]['mac']
+ ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
+ mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
nicparams = self.nic_pnew[constants.DDM_ADD]
mode = nicparams[constants.NIC_MODE]
link = nicparams[constants.NIC_LINK]
_CheckNodeNotDrained(self, self.op.remote_node)
# FIXME: here we assume that the old instance type is DT_PLAIN
assert instance.disk_template == constants.DT_PLAIN
- disks = [{"size": d.size, "vg": d.logical_id[0]}
+ disks = [{constants.IDISK_SIZE: d.size,
+ constants.IDISK_VG: d.logical_id[0]}
for d in instance.disks]
required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
_CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
else:
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
if new_nic_mode == constants.NIC_MODE_ROUTED:
- if 'ip' in nic_dict:
- nic_ip = nic_dict['ip']
+ if constants.INIC_IP in nic_dict:
+ nic_ip = nic_dict[constants.INIC_IP]
else:
nic_ip = old_nic_ip
if nic_ip is None:
raise errors.OpPrereqError('Cannot set the nic ip to None'
' on a routed nic', errors.ECODE_INVAL)
- if 'mac' in nic_dict:
- nic_mac = nic_dict['mac']
+ if constants.INIC_MAC in nic_dict:
+ nic_mac = nic_dict[constants.INIC_MAC]
if nic_mac is None:
raise errors.OpPrereqError('Cannot set the nic mac to None',
errors.ECODE_INVAL)
elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
# otherwise generate the mac
- nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
+ nic_dict[constants.INIC_MAC] = \
+ self.cfg.GenerateMAC(self.proc.GetECId())
else:
# or validate/reserve the current one
try:
snode = self.op.remote_node
# create a fake disk info for _GenerateDiskTemplate
- disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
+ disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode}
+ for d in instance.disks]
new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
instance.name, pnode, [snode],
disk_info, None, None, 0, feedback_fn)
(new_disk.size, new_disk.mode)))
else:
# change a given disk
- instance.disks[disk_op].mode = disk_dict['mode']
- result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
+ instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
+ result.append(("disk.mode/%d" % disk_op,
+ disk_dict[constants.IDISK_MODE]))
if self.op.disk_template:
r_shut = _ShutdownInstanceDisks(self, instance)
result.append(("nic.%d" % len(instance.nics), "remove"))
elif nic_op == constants.DDM_ADD:
# mac and bridge should be set, by now
- mac = nic_dict['mac']
- ip = nic_dict.get('ip', None)
+ mac = nic_dict[constants.INIC_MAC]
+ ip = nic_dict.get(constants.INIC_IP, None)
nicparams = self.nic_pinst[constants.DDM_ADD]
new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
instance.nics.append(new_nic)
self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
)))
else:
- for key in 'mac', 'ip':
+ for key in (constants.INIC_MAC, constants.INIC_IP):
if key in nic_dict:
setattr(instance.nics[nic_op], key, nic_dict[key])
if nic_op in self.nic_pinst:
This is an abstract class which is the parent of all the other tags LUs.
"""
-
def ExpandNames(self):
+ self.group_uuid = None
self.needed_locks = {}
if self.op.kind == constants.TAG_NODE:
self.op.name = _ExpandNodeName(self.cfg, self.op.name)
elif self.op.kind == constants.TAG_INSTANCE:
self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
+ elif self.op.kind == constants.TAG_NODEGROUP:
+ self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
# FIXME: Acquire BGL for cluster tag operations (as of this writing it's
# not possible to acquire the BGL based on opcode parameters)
self.target = self.cfg.GetNodeInfo(self.op.name)
elif self.op.kind == constants.TAG_INSTANCE:
self.target = self.cfg.GetInstanceInfo(self.op.name)
+ elif self.op.kind == constants.TAG_NODEGROUP:
+ self.target = self.cfg.GetNodeGroup(self.group_uuid)
else:
raise errors.OpPrereqError("Wrong tag type requested (%s)" %
str(self.op.kind), errors.ECODE_INVAL)
tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
nlist = cfg.GetAllNodesInfo().values()
tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
+ tgts.extend(("/nodegroup/%s" % n.name, n)
+ for n in cfg.GetAllNodeGroupsInfo().values())
results = []
for path, target in tgts:
for tag in target.GetTags():
"os": iinfo.os,
"nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
"nics": nic_data,
- "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
+ "disks": [{constants.IDISK_SIZE: dsk.size,
+ constants.IDISK_MODE: dsk.mode}
+ for dsk in iinfo.disks],
"disk_template": iinfo.disk_template,
"hypervisor": iinfo.hypervisor,
}
errors.ECODE_STATE)
self.required_nodes = 1
- disk_sizes = [{'size': disk.size} for disk in instance.disks]
+ disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
request = {