def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
memory, vcpus, nics, disk_template, disks,
- bep, hvp, hypervisor_name):
+ bep, hvp, hypervisor_name, tags):
"""Builds instance related env variables for hooks
This builds the hook environment from individual variables.
@param hvp: the hypervisor parameters for the instance
@type hypervisor_name: string
@param hypervisor_name: the hypervisor for the instance
+ @type tags: list
+ @param tags: list of instance tags as strings
@rtype: dict
@return: the hook environment for this instance
env["INSTANCE_DISK_COUNT"] = disk_count
+ if not tags:
+ tags = []
+
+ env["INSTANCE_TAGS"] = " ".join(tags)
+
for source, kind in [(bep, "BE"), (hvp, "HV")]:
for key, value in source.items():
env["INSTANCE_%s_%s" % (kind, key)] = value
'bep': bep,
'hvp': hvp,
'hypervisor_name': instance.hypervisor,
+ 'tags': instance.tags,
}
if override:
args.update(override)
def _VerifyCertificate(filename):
- """Verifies a certificate for LUClusterVerifyConfig.
+ """Verifies a certificate for L{LUClusterVerifyConfig}.
@type filename: string
@param filename: Path to PEM file
"""Verifies the cluster config.
"""
- REQ_BGL = False
+ REQ_BGL = True
def _VerifyHVP(self, hvp_data):
"""Verifies locally the syntax of the hypervisor parameters.
self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
def ExpandNames(self):
+ # Information can be safely retrieved as the BGL is acquired in exclusive
+ # mode
self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
self.all_node_info = self.cfg.GetAllNodesInfo()
self.all_inst_info = self.cfg.GetAllInstancesInfo()
feedback_fn("* Verifying all nodes belong to an existing group")
# We do this verification here because, should this bogus circumstance
- # occur, it would never be catched by VerifyGroup, which only acts on
+ # occur, it would never be caught by VerifyGroup, which only acts on
# nodes/instances reachable from existing node groups.
dangling_nodes = set(node.name for node in self.all_node_info.values()
# This raises errors.OpPrereqError on its own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
- all_node_info = self.cfg.GetAllNodesInfo()
- all_inst_info = self.cfg.GetAllInstancesInfo()
-
- node_names = set(node.name
- for node in all_node_info.values()
- if node.group == self.group_uuid)
-
- inst_names = [inst.name
- for inst in all_inst_info.values()
- if inst.primary_node in node_names]
-
- # In Exec(), we warn about mirrored instances that have primary and
- # secondary living in separate node groups. To fully verify that
- # volumes for these instances are healthy, we will need to do an
- # extra call to their secondaries. We ensure here those nodes will
- # be locked.
- for inst in inst_names:
- if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
- node_names.update(all_inst_info[inst].secondary_nodes)
+ # Get instances in node group; this is unsafe and needs verification later
+ inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
self.needed_locks = {
- locking.LEVEL_NODEGROUP: [self.group_uuid],
- locking.LEVEL_NODE: list(node_names),
locking.LEVEL_INSTANCE: inst_names,
- }
+ locking.LEVEL_NODEGROUP: [self.group_uuid],
+ locking.LEVEL_NODE: [],
+ }
self.share_locks = dict.fromkeys(locking.LEVELS, 1)
- def CheckPrereq(self):
- self.all_node_info = self.cfg.GetAllNodesInfo()
- self.all_inst_info = self.cfg.GetAllInstancesInfo()
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ # Get members of node group; this is unsafe and needs verification later
+ nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
+
+ all_inst_info = self.cfg.GetAllInstancesInfo()
- group_nodes = set(node.name
- for node in self.all_node_info.values()
- if node.group == self.group_uuid)
+ # In Exec(), we warn about mirrored instances that have primary and
+ # secondary living in separate node groups. To fully verify that
+ # volumes for these instances are healthy, we will need to do an
+ # extra call to their secondaries. We ensure here those nodes will
+ # be locked.
+ for inst in self.glm.list_owned(locking.LEVEL_INSTANCE):
+ # Important: access only the instances whose lock is owned
+ if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
+ nodes.update(all_inst_info[inst].secondary_nodes)
- group_instances = set(inst.name
- for inst in self.all_inst_info.values()
- if inst.primary_node in group_nodes)
+ self.needed_locks[locking.LEVEL_NODE] = nodes
+
+ def CheckPrereq(self):
+ group_nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
+ group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
unlocked_nodes = \
group_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
group_instances.difference(self.glm.list_owned(locking.LEVEL_INSTANCE))
if unlocked_nodes:
- raise errors.OpPrereqError("missing lock for nodes: %s" %
+ raise errors.OpPrereqError("Missing lock for nodes: %s" %
utils.CommaJoin(unlocked_nodes))
if unlocked_instances:
- raise errors.OpPrereqError("missing lock for instances: %s" %
+ raise errors.OpPrereqError("Missing lock for instances: %s" %
utils.CommaJoin(unlocked_instances))
+ self.all_node_info = self.cfg.GetAllNodesInfo()
+ self.all_inst_info = self.cfg.GetAllInstancesInfo()
+
self.my_node_names = utils.NiceSort(group_nodes)
self.my_inst_names = utils.NiceSort(group_instances)
all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
node_verify_param,
self.cfg.GetClusterName())
+ nvinfo_endtime = time.time()
+
if self.extra_lv_nodes and vg_name is not None:
extra_lv_nvinfo = \
self.rpc.call_node_verify(self.extra_lv_nodes,
self.cfg.GetClusterName())
else:
extra_lv_nvinfo = {}
- nvinfo_endtime = time.time()
all_drbd_map = self.cfg.ComputeDRBDMap()
mode=constants.IALLOCATOR_MODE_ALLOC,
name=self.op.instance_name,
disk_template=self.op.disk_template,
- tags=[],
+ tags=self.op.tags,
os=self.op.os_type,
vcpus=self.be_full[constants.BE_VCPUS],
memory=self.be_full[constants.BE_MEMORY],
bep=self.be_full,
hvp=self.hv_full,
hypervisor_name=self.op.hypervisor,
+ tags=self.op.tags,
))
return env
nics.append(ndict)
self.op.nics = nics
+ if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
+ self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
+
if (self.op.hypervisor is None and
einfo.has_option(constants.INISECT_INS, "hypervisor")):
self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
+
if einfo.has_section(constants.INISECT_HYP):
# use the export parameters but do not override the ones
# specified by the user
",".join(enabled_hvs)),
errors.ECODE_STATE)
+ # Check tag validity
+ for tag in self.op.tags:
+ objects.TaggableObject.ValidateTag(tag)
+
# check hypervisor parameter syntax (locally)
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
osparams=self.op.osparams,
)
+ if self.op.tags:
+ for tag in self.op.tags:
+ iobj.AddTag(tag)
+
if self.adopt_disks:
if self.op.disk_template == constants.DT_PLAIN:
# rename LVs to the newly-generated names; we need to construct
self.name = None
self.evac_nodes = None
self.instances = None
- self.reloc_mode = None
+ self.evac_mode = None
self.target_groups = []
# computed fields
self.required_nodes = None
hypervisor_name = self.hypervisor
elif self.mode == constants.IALLOCATOR_MODE_RELOC:
hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
- elif self.mode in (constants.IALLOCATOR_MODE_MEVAC,
- constants.IALLOCATOR_MODE_MRELOC):
+ else:
hypervisor_name = cluster_info.enabled_hypervisors[0]
node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
}
return request
- def _AddMultiRelocate(self):
- """Get data for multi-relocate requests.
+ def _AddNodeEvacuate(self):
+ """Get data for node-evacuate requests.
+
+ """
+ return {
+ "instances": self.instances,
+ "evac_mode": self.evac_mode,
+ }
+
+ def _AddChangeGroup(self):
+ """Get data for node-evacuate requests.
"""
return {
"instances": self.instances,
- "reloc_mode": self.reloc_mode,
"target_groups": self.target_groups,
}
self.in_text = serializer.Dump(self.in_data)
_STRING_LIST = ht.TListOf(ht.TString)
+ _JOBSET_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
+ # pylint: disable-msg=E1101
+ # Class '...' has no 'OP_ID' member
+ "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
+ opcodes.OpInstanceMigrate.OP_ID,
+ opcodes.OpInstanceReplaceDisks.OP_ID])
+ })))
_MODE_DATA = {
constants.IALLOCATOR_MODE_ALLOC:
(_AddNewInstance,
constants.IALLOCATOR_MODE_MEVAC:
(_AddEvacuateNodes, [("evac_nodes", _STRING_LIST)],
ht.TListOf(ht.TAnd(ht.TIsLength(2), _STRING_LIST))),
- constants.IALLOCATOR_MODE_MRELOC:
- (_AddMultiRelocate, [
+ constants.IALLOCATOR_MODE_NODE_EVAC:
+ (_AddNodeEvacuate, [
+ ("instances", _STRING_LIST),
+ ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
+ ], _JOBSET_LIST),
+ constants.IALLOCATOR_MODE_CHG_GROUP:
+ (_AddChangeGroup, [
("instances", _STRING_LIST),
- ("reloc_mode", ht.TElemOf(constants.IALLOCATOR_MRELOC_MODES)),
("target_groups", _STRING_LIST),
- ],
- ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
- # pylint: disable-msg=E1101
- # Class '...' has no 'OP_ID' member
- "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
- opcodes.OpInstanceMigrate.OP_ID,
- opcodes.OpInstanceReplaceDisks.OP_ID])
- })))),
+ ], _JOBSET_LIST),
}
def Run(self, name, validate=True, call_fn=None):
if not hasattr(self.op, "evac_nodes"):
raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
" opcode input", errors.ECODE_INVAL)
- elif self.op.mode == constants.IALLOCATOR_MODE_MRELOC:
- if self.op.instances:
- self.op.instances = _GetWantedInstances(self, self.op.instances)
- else:
- raise errors.OpPrereqError("Missing instances to relocate",
- errors.ECODE_INVAL)
+ elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
+ constants.IALLOCATOR_MODE_NODE_EVAC):
+ if not self.op.instances:
+ raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
+ self.op.instances = _GetWantedInstances(self, self.op.instances)
else:
raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
self.op.mode, errors.ECODE_INVAL)
ial = IAllocator(self.cfg, self.rpc,
mode=self.op.mode,
evac_nodes=self.op.evac_nodes)
- elif self.op.mode == constants.IALLOCATOR_MODE_MRELOC:
+ elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
ial = IAllocator(self.cfg, self.rpc,
mode=self.op.mode,
instances=self.op.instances,
- reloc_mode=self.op.reloc_mode,
target_groups=self.op.target_groups)
+ elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
+ ial = IAllocator(self.cfg, self.rpc,
+ mode=self.op.mode,
+ instances=self.op.instances,
+ evac_mode=self.op.evac_mode)
else:
raise errors.ProgrammerError("Uncatched mode %s in"
" LUTestAllocator.Exec", self.op.mode)