X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/bc5d02156c3f3a3ee748b40e67309b6eb5be7fcf..c4929a8bcca4a43dc6434394a91a8ea67d854844:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 5f257d3..04d26f9 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -721,6 +721,71 @@ def _GetUpdatedParams(old_params, update_dict, return params_copy +def _UpdateAndVerifySubDict(base, updates, type_check): + """Updates and verifies a dict with sub dicts of the same type. + + @param base: The dict with the old data + @param updates: The dict with the new data + @param type_check: Dict suitable to ForceDictType to verify correct types + @returns: A new dict with updated and verified values + + """ + def fn(old, value): + new = _GetUpdatedParams(old, value) + utils.ForceDictType(new, type_check) + return new + + ret = copy.deepcopy(base) + ret.update(dict((key, fn(base.get(key, {}), value)) + for key, value in updates.items())) + return ret + + +def _MergeAndVerifyHvState(op_input, obj_input): + """Combines the hv state from an opcode with the one of the object + + @param op_input: The input dict from the opcode + @param obj_input: The input dict from the objects + @return: The verified and updated dict + + """ + if op_input: + invalid_hvs = set(op_input) - constants.HYPER_TYPES + if invalid_hvs: + raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:" + " %s" % utils.CommaJoin(invalid_hvs), + errors.ECODE_INVAL) + if obj_input is None: + obj_input = {} + type_check = constants.HVSTS_PARAMETER_TYPES + return _UpdateAndVerifySubDict(obj_input, op_input, type_check) + + return None + + +def _MergeAndVerifyDiskState(op_input, obj_input): + """Combines the disk state from an opcode with the one of the object + + @param op_input: The input dict from the opcode + @param obj_input: The input dict from the objects + @return: The verified and updated dict + """ + if op_input: + invalid_dst = set(op_input) - constants.DS_VALID_TYPES + if invalid_dst: + raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" % + utils.CommaJoin(invalid_dst), + errors.ECODE_INVAL) + type_check = constants.DSS_PARAMETER_TYPES + if obj_input is None: + obj_input = {} + return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value, + type_check)) + for key, value in op_input.items()) + + return None + + def _ReleaseLocks(lu, level, names=None, keep=None): """Releases locks owned by an LU. @@ -951,6 +1016,26 @@ def _CheckInstanceState(lu, instance, req_states, msg=None): (instance.name, msg), errors.ECODE_STATE) +def _CheckMinMaxSpecs(name, ipolicy, value): + """Checks if value is in the desired range. + + @param name: name of the parameter for which we perform the check + @param ipolicy: dictionary containing min, max and std values + @param value: actual value that we want to use + @return: None or element not meeting the criteria + + + """ + if value in [None, constants.VALUE_AUTO]: + return None + max_v = ipolicy[constants.ISPECS_MAX].get(name, value) + min_v = ipolicy[constants.ISPECS_MIN].get(name, value) + if value > max_v or min_v > value: + return ("%s value %s is not in range [%s, %s]" % + (name, value, min_v, max_v)) + return None + + def _ExpandItemName(fn, name, kind): """Expand an item name. @@ -1164,6 +1249,14 @@ def _DecideSelfPromotion(lu, exceptions=None): return mc_now < mc_should +def _CalculateGroupIPolicy(cfg, group): + """Calculate instance policy for group. + + """ + cluster = cfg.GetClusterInfo() + return cluster.SimpleFillIPolicy(group.ipolicy) + + def _CheckNicsBridgesExist(lu, target_nics, target_node): """Check that the brigdes needed by a list of nics exist. @@ -2042,6 +2135,34 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): msg = "cannot reach the master IP" _ErrorIf(True, constants.CV_ENODENET, node, msg) + def _VerifyInstancePolicy(self, instance): + """Verify instance specs against instance policy set on node group level. + + + """ + cluster = self.cfg.GetClusterInfo() + full_beparams = cluster.FillBE(instance) + ipolicy = cluster.SimpleFillIPolicy(self.group_info.ipolicy) + + mem_size = full_beparams.get(constants.BE_MAXMEM, None) + cpu_count = full_beparams.get(constants.BE_VCPUS, None) + disk_count = len(instance.disks) + disk_sizes = [disk.size for disk in instance.disks] + nic_count = len(instance.nics) + + test_settings = [ + (constants.ISPEC_MEM_SIZE, mem_size), + (constants.ISPEC_CPU_COUNT, cpu_count), + (constants.ISPEC_DISK_COUNT, disk_count), + (constants.ISPEC_NIC_COUNT, nic_count), + ] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes) + + for (name, value) in test_settings: + test_result = _CheckMinMaxSpecs(name, ipolicy, value) + self._ErrorIf(test_result is not None, + constants.CV_EINSTANCEPOLICY, instance.name, + test_result) + def _VerifyInstance(self, instance, instanceconfig, node_image, diskstatus): """Verify an instance. @@ -2056,6 +2177,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): node_vol_should = {} instanceconfig.MapLVsByNode(node_vol_should) + self._VerifyInstancePolicy(instanceconfig) + for node in node_vol_should: n_img = node_image[node] if n_img.offline or n_img.rpc_fail or n_img.lvm_fail: @@ -3595,6 +3718,29 @@ class LUClusterSetParams(LogicalUnit): self.new_ndparams["oob_program"] = \ constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM] + if self.op.hv_state: + new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, + self.cluster.hv_state_static) + self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values)) + for hv, values in new_hv_state.items()) + + if self.op.disk_state: + new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, + self.cluster.disk_state_static) + self.new_disk_state = \ + dict((storage, dict((name, cluster.SimpleFillDiskState(values)) + for name, values in svalues.items())) + for storage, svalues in new_disk_state.items()) + + if self.op.ipolicy: + ipolicy = {} + for key, value in self.op.ipolicy.items(): + utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES) + ipolicy[key] = _GetUpdatedParams(cluster.ipolicy.get(key, {}), + value) + objects.InstancePolicy.CheckParameterSyntax(ipolicy) + self.new_ipolicy = ipolicy + if self.op.nicparams: utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES) self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams) @@ -3755,12 +3901,18 @@ class LUClusterSetParams(LogicalUnit): self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams if self.op.nicparams: self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams + if self.op.ipolicy: + self.cluster.ipolicy = self.new_ipolicy if self.op.osparams: self.cluster.osparams = self.new_osp if self.op.ndparams: self.cluster.ndparams = self.new_ndparams if self.op.diskparams: self.cluster.diskparams = self.new_diskparams + if self.op.hv_state: + self.cluster.hv_state_static = self.new_hv_state + if self.op.disk_state: + self.cluster.disk_state_static = self.new_disk_state if self.op.candidate_pool_size is not None: self.cluster.candidate_pool_size = self.op.candidate_pool_size @@ -5349,7 +5501,8 @@ class LUNodeSetParams(LogicalUnit): self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) all_mods = [self.op.offline, self.op.master_candidate, self.op.drained, self.op.master_capable, self.op.vm_capable, - self.op.secondary_ip, self.op.ndparams] + self.op.secondary_ip, self.op.ndparams, self.op.hv_state, + self.op.disk_state] if all_mods.count(None) == len(all_mods): raise errors.OpPrereqError("Please pass at least one modification", errors.ECODE_INVAL) @@ -5590,6 +5743,15 @@ class LUNodeSetParams(LogicalUnit): utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES) self.new_ndparams = new_ndparams + if self.op.hv_state: + self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, + self.node.hv_state_static) + + if self.op.disk_state: + self.new_disk_state = \ + _MergeAndVerifyDiskState(self.op.disk_state, + self.node.disk_state_static) + def Exec(self, feedback_fn): """Modifies a node. @@ -5606,6 +5768,12 @@ class LUNodeSetParams(LogicalUnit): if self.op.powered is not None: node.powered = self.op.powered + if self.op.hv_state: + node.hv_state_static = self.new_hv_state + + if self.op.disk_state: + node.disk_state_static = self.new_disk_state + for attr in ["master_capable", "vm_capable"]: val = getattr(self.op, attr) if val is not None: @@ -5713,13 +5881,14 @@ class LUClusterQuery(NoHooksLU): "architecture": (platform.architecture()[0], platform.machine()), "name": cluster.cluster_name, "master": cluster.master_node, - "default_hypervisor": cluster.enabled_hypervisors[0], + "default_hypervisor": cluster.primary_hypervisor, "enabled_hypervisors": cluster.enabled_hypervisors, "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name]) for hypervisor_name in cluster.enabled_hypervisors]), "os_hvp": os_hvp, "beparams": cluster.beparams, "osparams": cluster.osparams, + "ipolicy": cluster.ipolicy, "nicparams": cluster.nicparams, "ndparams": cluster.ndparams, "candidate_pool_size": cluster.candidate_pool_size, @@ -7448,6 +7617,18 @@ class TLMigrateInstance(Tasklet): self.lu.LogInfo("Not checking memory on the secondary node as" " instance will not be started") + # check if failover must be forced instead of migration + if (not self.cleanup and not self.failover and + i_be[constants.BE_ALWAYS_FAILOVER]): + if self.fallback: + self.lu.LogInfo("Instance configured to always failover; fallback" + " to failover") + self.failover = True + else: + raise errors.OpPrereqError("This instance has been configured to" + " always failover, please allow failover", + errors.ECODE_STATE) + # check bridge existance _CheckInstanceBridgesExist(self.lu, instance, node=target_node) @@ -8039,24 +8220,104 @@ def _GenerateUniqueNames(lu, exts): return results +def _ComputeLDParams(disk_template, disk_params): + """Computes Logical Disk parameters from Disk Template parameters. + + @type disk_template: string + @param disk_template: disk template, one of L{constants.DISK_TEMPLATES} + @type disk_params: dict + @param disk_params: disk template parameters; dict(template_name -> parameters + @rtype: list(dict) + @return: a list of dicts, one for each node of the disk hierarchy. Each dict + contains the LD parameters of the node. The tree is flattened in-order. + + """ + if disk_template not in constants.DISK_TEMPLATES: + raise errors.ProgrammerError("Unknown disk template %s" % disk_template) + + result = list() + dt_params = disk_params[disk_template] + if disk_template == constants.DT_DRBD8: + drbd_params = { + constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE], + constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS], + constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS], + constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG], + constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM], + constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM], + constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC], + constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD], + constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET], + constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET], + constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE], + constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE], + } + + drbd_params = \ + objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], + drbd_params) + + result.append(drbd_params) + + # data LV + data_params = { + constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES], + } + data_params = \ + objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], + data_params) + result.append(data_params) + + # metadata LV + meta_params = { + constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES], + } + meta_params = \ + objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], + meta_params) + result.append(meta_params) + + elif (disk_template == constants.DT_FILE or + disk_template == constants.DT_SHARED_FILE): + result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE]) + + elif disk_template == constants.DT_PLAIN: + params = { + constants.LDP_STRIPES: dt_params[constants.LV_STRIPES], + } + params = \ + objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], + params) + result.append(params) + + elif disk_template == constants.DT_BLOCK: + result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV]) + + return result + + def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names, - iv_name, p_minor, s_minor): + iv_name, p_minor, s_minor, drbd_params, data_params, + meta_params): """Generate a drbd8 device complete with its children. """ assert len(vgnames) == len(names) == 2 port = lu.cfg.AllocatePort() shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId()) + dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, - logical_id=(vgnames[0], names[0])) + logical_id=(vgnames[0], names[0]), + params=data_params) dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE, - logical_id=(vgnames[1], names[1])) + logical_id=(vgnames[1], names[1]), + params=meta_params) drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, logical_id=(primary, secondary, port, p_minor, s_minor, shared_secret), children=[dev_data, dev_meta], - iv_name=iv_name) + iv_name=iv_name, params=drbd_params) return drbd_dev @@ -8064,7 +8325,7 @@ def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node, secondary_nodes, disk_info, file_storage_dir, file_driver, - base_index, feedback_fn): + base_index, feedback_fn, disk_params): """Generate the entire disk layout for a given template type. """ @@ -8073,6 +8334,7 @@ def _GenerateDiskTemplate(lu, template_name, vgname = lu.cfg.GetVGName() disk_count = len(disk_info) disks = [] + ld_params = _ComputeLDParams(template_name, disk_params) if template_name == constants.DT_DISKLESS: pass elif template_name == constants.DT_PLAIN: @@ -8089,9 +8351,11 @@ def _GenerateDiskTemplate(lu, template_name, size=disk[constants.IDISK_SIZE], logical_id=(vg, names[idx]), iv_name="disk/%d" % disk_index, - mode=disk[constants.IDISK_MODE]) + mode=disk[constants.IDISK_MODE], + params=ld_params[0]) disks.append(disk_dev) elif template_name == constants.DT_DRBD8: + drbd_params, data_params, meta_params = ld_params if len(secondary_nodes) != 1: raise errors.ProgrammerError("Wrong template configuration") remote_node = secondary_nodes[0] @@ -8105,14 +8369,16 @@ def _GenerateDiskTemplate(lu, template_name, names.append(lv_prefix + "_meta") for idx, disk in enumerate(disk_info): disk_index = idx + base_index + drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG] data_vg = disk.get(constants.IDISK_VG, vgname) - meta_vg = disk.get(constants.IDISK_METAVG, data_vg) + meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg) disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node, disk[constants.IDISK_SIZE], [data_vg, meta_vg], names[idx * 2:idx * 2 + 2], "disk/%d" % disk_index, - minors[idx * 2], minors[idx * 2 + 1]) + minors[idx * 2], minors[idx * 2 + 1], + drbd_params, data_params, meta_params) disk_dev.mode = disk[constants.IDISK_MODE] disks.append(disk_dev) elif template_name == constants.DT_FILE: @@ -8129,7 +8395,8 @@ def _GenerateDiskTemplate(lu, template_name, logical_id=(file_driver, "%s/disk%d" % (file_storage_dir, disk_index)), - mode=disk[constants.IDISK_MODE]) + mode=disk[constants.IDISK_MODE], + params=ld_params[0]) disks.append(disk_dev) elif template_name == constants.DT_SHARED_FILE: if len(secondary_nodes) != 0: @@ -8145,7 +8412,8 @@ def _GenerateDiskTemplate(lu, template_name, logical_id=(file_driver, "%s/disk%d" % (file_storage_dir, disk_index)), - mode=disk[constants.IDISK_MODE]) + mode=disk[constants.IDISK_MODE], + params=ld_params[0]) disks.append(disk_dev) elif template_name == constants.DT_BLOCK: if len(secondary_nodes) != 0: @@ -8158,7 +8426,8 @@ def _GenerateDiskTemplate(lu, template_name, logical_id=(constants.BLOCKDEV_DRIVER_MANUAL, disk[constants.IDISK_ADOPT]), iv_name="disk/%d" % disk_index, - mode=disk[constants.IDISK_MODE]) + mode=disk[constants.IDISK_MODE], + params=ld_params[0]) disks.append(disk_dev) else: @@ -9112,8 +9381,9 @@ class LUInstanceCreate(LogicalUnit): constants.IDISK_SIZE: size, constants.IDISK_MODE: mode, constants.IDISK_VG: data_vg, - constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg), } + if constants.IDISK_METAVG in disk: + new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG] if constants.IDISK_ADOPT in disk: new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT] self.disks.append(new_disk) @@ -9198,8 +9468,19 @@ class LUInstanceCreate(LogicalUnit): _CheckNodeVmCapable(self, self.op.snode) self.secondaries.append(self.op.snode) + snode = self.cfg.GetNodeInfo(self.op.snode) + if pnode.group != snode.group: + self.LogWarning("The primary and secondary nodes are in two" + " different node groups; the disk parameters" + " from the first disk's node group will be" + " used") + nodenames = [pnode.name] + self.secondaries + # disk parameters (not customizable at instance or node level) + # just use the primary node parameters, ignoring the secondary. + self.diskparams = self.cfg.GetNodeGroup(pnode.group).diskparams + if not self.adopt_disks: # Check lv size requirements, if not adopting req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks) @@ -9318,7 +9599,8 @@ class LUInstanceCreate(LogicalUnit): self.instance_file_storage_dir, self.op.file_driver, 0, - feedback_fn) + feedback_fn, + self.diskparams) iobj = objects.Instance(name=instance, os=self.op.os_type, primary_node=pnode_name, @@ -9948,6 +10230,16 @@ class TLReplaceDisks(Tasklet): if not self.disks: self.disks = range(len(self.instance.disks)) + # TODO: compute disk parameters + primary_node_info = self.cfg.GetNodeInfo(instance.primary_node) + secondary_node_info = self.cfg.GetNodeInfo(secondary_node) + if primary_node_info.group != secondary_node_info.group: + self.lu.LogInfo("The instance primary and secondary nodes are in two" + " different node groups; the disk parameters of the" + " primary node's group will be applied.") + + self.diskparams = self.cfg.GetNodeGroup(primary_node_info.group).diskparams + for node in check_nodes: _CheckNodeOnline(self.lu, node) @@ -10107,12 +10399,14 @@ class TLReplaceDisks(Tasklet): lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]] names = _GenerateUniqueNames(self.lu, lv_names) + _, data_p, meta_p = _ComputeLDParams(constants.DT_DRBD8, self.diskparams) + vg_data = dev.children[0].logical_id[0] lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size, - logical_id=(vg_data, names[0])) + logical_id=(vg_data, names[0]), params=data_p) vg_meta = dev.children[1].logical_id[0] lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE, - logical_id=(vg_meta, names[1])) + logical_id=(vg_meta, names[1]), params=meta_p) new_lvs = [lv_data, lv_meta] old_lvs = [child.Copy() for child in dev.children] @@ -10369,10 +10663,12 @@ class TLReplaceDisks(Tasklet): iv_names[idx] = (dev, dev.children, new_net_id) logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor, new_net_id) + drbd_params, _, _ = _ComputeLDParams(constants.DT_DRBD8, self.diskparams) new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, logical_id=new_alone_id, children=dev.children, - size=dev.size) + size=dev.size, + params=drbd_params) try: _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd, _GetInstanceInfoText(self.instance), False) @@ -11298,6 +11594,8 @@ class LUInstanceSetParams(LogicalUnit): "Cannot retrieve locked instance %s" % self.op.instance_name pnode = instance.primary_node nodelist = list(instance.all_nodes) + pnode_info = self.cfg.GetNodeInfo(pnode) + self.diskparams = self.cfg.GetNodeGroup(pnode_info.group).diskparams # OS change if self.op.os_name and not self.op.force: @@ -11335,6 +11633,13 @@ class LUInstanceSetParams(LogicalUnit): required = _ComputeDiskSizePerVG(self.op.disk_template, disks) _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required) + snode_info = self.cfg.GetNodeInfo(self.op.remote_node) + if pnode_info.group != snode_info.group: + self.LogWarning("The primary and secondary nodes are in two" + " different node groups; the disk parameters" + " from the first disk's node group will be" + " used") + # hvparams processing if self.op.hvparams: hv_type = instance.hypervisor @@ -11595,7 +11900,8 @@ class LUInstanceSetParams(LogicalUnit): for d in instance.disks] new_disks = _GenerateDiskTemplate(self, self.op.disk_template, instance.name, pnode, [snode], - disk_info, None, None, 0, feedback_fn) + disk_info, None, None, 0, feedback_fn, + self.diskparams) info = _GetInstanceInfoText(instance) feedback_fn("Creating aditional volumes...") # first, create the missing data and meta devices @@ -11741,7 +12047,9 @@ class LUInstanceSetParams(LogicalUnit): [disk_dict], file_path, file_driver, - disk_idx_base, feedback_fn)[0] + disk_idx_base, + feedback_fn, + self.diskparams)[0] instance.disks.append(new_disk) info = _GetInstanceInfoText(instance) @@ -12486,6 +12794,11 @@ class LUGroupAdd(LogicalUnit): else: self.op.diskparams = self.cfg.GetClusterInfo().diskparams + if self.op.ipolicy: + cluster = self.cfg.GetClusterInfo() + full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy) + objects.InstancePolicy.CheckParameterSyntax(full_ipolicy) + def BuildHooksEnv(self): """Build hooks env. @@ -12509,7 +12822,8 @@ class LUGroupAdd(LogicalUnit): uuid=self.group_uuid, alloc_policy=self.op.alloc_policy, ndparams=self.op.ndparams, - diskparams=self.op.diskparams) + diskparams=self.op.diskparams, + ipolicy=self.op.ipolicy) self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False) del self.remove_locks[locking.LEVEL_NODEGROUP] @@ -12655,6 +12969,7 @@ class _GroupQuery(_QueryBase): lu.needed_locks = {} self._all_groups = lu.cfg.GetAllNodeGroupsInfo() + self._cluster = lu.cfg.GetClusterInfo() name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values()) if not self.names: @@ -12720,7 +13035,8 @@ class _GroupQuery(_QueryBase): # Do not pass on node information if it was not requested. group_to_nodes = None - return query.GroupQueryData([self._all_groups[uuid] + return query.GroupQueryData(self._cluster, + [self._all_groups[uuid] for uuid in self.wanted], group_to_nodes, group_to_instances) @@ -12758,6 +13074,9 @@ class LUGroupSetParams(LogicalUnit): self.op.ndparams, self.op.diskparams, self.op.alloc_policy, + self.op.hv_state, + self.op.disk_state, + self.op.ipolicy, ] if all_changes.count(None) == len(all_changes): @@ -12797,6 +13116,25 @@ class LUGroupSetParams(LogicalUnit): utils.ForceDictType(new_templ_params, constants.DISK_DT_TYPES) self.new_diskparams[templ] = new_templ_params + if self.op.hv_state: + self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, + self.group.hv_state_static) + + if self.op.disk_state: + self.new_disk_state = \ + _MergeAndVerifyDiskState(self.op.disk_state, + self.group.disk_state_static) + + if self.op.ipolicy: + g_ipolicy = {} + for key, value in self.op.ipolicy.iteritems(): + g_ipolicy[key] = _GetUpdatedParams(self.group.ipolicy.get(key, {}), + value, + use_none=True) + utils.ForceDictType(g_ipolicy[key], constants.ISPECS_PARAMETER_TYPES) + self.new_ipolicy = g_ipolicy + objects.InstancePolicy.CheckParameterSyntax(self.new_ipolicy) + def BuildHooksEnv(self): """Build hooks env. @@ -12830,6 +13168,15 @@ class LUGroupSetParams(LogicalUnit): if self.op.alloc_policy: self.group.alloc_policy = self.op.alloc_policy + if self.op.hv_state: + self.group.hv_state_static = self.new_hv_state + + if self.op.disk_state: + self.group.disk_state_static = self.new_disk_state + + if self.op.ipolicy: + self.group.ipolicy = self.new_ipolicy + self.cfg.Update(self.group, feedback_fn) return result @@ -13554,7 +13901,7 @@ class IAllocator(object): elif self.mode == constants.IALLOCATOR_MODE_RELOC: hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor else: - hypervisor_name = cluster_info.enabled_hypervisors[0] + hypervisor_name = cluster_info.primary_hypervisor node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()], [hypervisor_name])