64 |
64 |
import ganeti.masterd.instance # pylint: disable=W0611
|
65 |
65 |
|
66 |
66 |
|
67 |
|
#: Size of DRBD meta block device
|
68 |
|
DRBD_META_SIZE = 128
|
69 |
|
|
70 |
67 |
# States of instance
|
71 |
68 |
INSTANCE_DOWN = [constants.ADMINST_DOWN]
|
72 |
69 |
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
|
... | ... | |
1503 |
1500 |
return mc_now < mc_should
|
1504 |
1501 |
|
1505 |
1502 |
|
1506 |
|
def _CalculateGroupIPolicy(cluster, group):
|
1507 |
|
"""Calculate instance policy for group.
|
1508 |
|
|
1509 |
|
"""
|
1510 |
|
return cluster.SimpleFillIPolicy(group.ipolicy)
|
1511 |
|
|
1512 |
|
|
1513 |
1503 |
def _ComputeViolatingInstances(ipolicy, instances):
|
1514 |
1504 |
"""Computes a set of instances who violates given ipolicy.
|
1515 |
1505 |
|
... | ... | |
2427 |
2417 |
node_vol_should = {}
|
2428 |
2418 |
instanceconfig.MapLVsByNode(node_vol_should)
|
2429 |
2419 |
|
2430 |
|
ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
|
|
2420 |
cluster = self.cfg.GetClusterInfo()
|
|
2421 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
|
|
2422 |
self.group_info)
|
2431 |
2423 |
err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
|
2432 |
2424 |
_ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err))
|
2433 |
2425 |
|
... | ... | |
3997 |
3989 |
if compat.any(node in group.members
|
3998 |
3990 |
for node in inst.all_nodes)])
|
3999 |
3991 |
new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
|
4000 |
|
new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
|
4001 |
|
group),
|
|
3992 |
ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group)
|
|
3993 |
new = _ComputeNewInstanceViolations(ipol,
|
4002 |
3994 |
new_ipolicy, instances)
|
4003 |
3995 |
if new:
|
4004 |
3996 |
violations.update(new)
|
... | ... | |
5377 |
5369 |
live_data = {}
|
5378 |
5370 |
|
5379 |
5371 |
if query.IQ_DISKUSAGE in self.requested_data:
|
|
5372 |
gmi = ganeti.masterd.instance
|
5380 |
5373 |
disk_usage = dict((inst.name,
|
5381 |
|
_ComputeDiskSize(inst.disk_template,
|
5382 |
|
[{constants.IDISK_SIZE: disk.size}
|
5383 |
|
for disk in inst.disks]))
|
|
5374 |
gmi.ComputeDiskSize(inst.disk_template,
|
|
5375 |
[{constants.IDISK_SIZE: disk.size}
|
|
5376 |
for disk in inst.disks]))
|
5384 |
5377 |
for inst in instance_list)
|
5385 |
5378 |
else:
|
5386 |
5379 |
disk_usage = None
|
... | ... | |
7863 |
7856 |
_CheckNodeOnline(self, target_node)
|
7864 |
7857 |
_CheckNodeNotDrained(self, target_node)
|
7865 |
7858 |
_CheckNodeVmCapable(self, target_node)
|
7866 |
|
ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
|
7867 |
|
self.cfg.GetNodeGroup(node.group))
|
|
7859 |
cluster = self.cfg.GetClusterInfo()
|
|
7860 |
group_info = self.cfg.GetNodeGroup(node.group)
|
|
7861 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
|
7868 |
7862 |
_CheckTargetNodeIPolicy(self, ipolicy, instance, node,
|
7869 |
7863 |
ignore=self.op.ignore_ipolicy)
|
7870 |
7864 |
|
... | ... | |
8140 |
8134 |
# Check that the target node is correct in terms of instance policy
|
8141 |
8135 |
nodeinfo = self.cfg.GetNodeInfo(self.target_node)
|
8142 |
8136 |
group_info = self.cfg.GetNodeGroup(nodeinfo.group)
|
8143 |
|
ipolicy = _CalculateGroupIPolicy(cluster, group_info)
|
|
8137 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
|
|
8138 |
group_info)
|
8144 |
8139 |
_CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
|
8145 |
8140 |
ignore=self.ignore_ipolicy)
|
8146 |
8141 |
|
... | ... | |
8180 |
8175 |
errors.ECODE_INVAL)
|
8181 |
8176 |
nodeinfo = self.cfg.GetNodeInfo(target_node)
|
8182 |
8177 |
group_info = self.cfg.GetNodeGroup(nodeinfo.group)
|
8183 |
|
ipolicy = _CalculateGroupIPolicy(cluster, group_info)
|
|
8178 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
|
|
8179 |
group_info)
|
8184 |
8180 |
_CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
|
8185 |
8181 |
ignore=self.ignore_ipolicy)
|
8186 |
8182 |
|
... | ... | |
8865 |
8861 |
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
|
8866 |
8862 |
logical_id=(vgnames[0], names[0]),
|
8867 |
8863 |
params={})
|
8868 |
|
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
|
|
8864 |
dev_meta = objects.Disk(dev_type=constants.LD_LV,
|
|
8865 |
size=constants.DRBD_META_SIZE,
|
8869 |
8866 |
logical_id=(vgnames[1], names[1]),
|
8870 |
8867 |
params={})
|
8871 |
8868 |
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
|
... | ... | |
9211 |
9208 |
constants.DT_DISKLESS: {},
|
9212 |
9209 |
constants.DT_PLAIN: _compute(disks, 0),
|
9213 |
9210 |
# 128 MB are added for drbd metadata for each disk
|
9214 |
|
constants.DT_DRBD8: _compute(disks, DRBD_META_SIZE),
|
|
9211 |
constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
|
9215 |
9212 |
constants.DT_FILE: {},
|
9216 |
9213 |
constants.DT_SHARED_FILE: {},
|
9217 |
9214 |
}
|
... | ... | |
9223 |
9220 |
return req_size_dict[disk_template]
|
9224 |
9221 |
|
9225 |
9222 |
|
9226 |
|
def _ComputeDiskSize(disk_template, disks):
|
9227 |
|
"""Compute disk size requirements according to disk template
|
9228 |
|
|
9229 |
|
"""
|
9230 |
|
# Required free disk space as a function of disk and swap space
|
9231 |
|
req_size_dict = {
|
9232 |
|
constants.DT_DISKLESS: None,
|
9233 |
|
constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
|
9234 |
|
# 128 MB are added for drbd metadata for each disk
|
9235 |
|
constants.DT_DRBD8:
|
9236 |
|
sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
|
9237 |
|
constants.DT_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
|
9238 |
|
constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
|
9239 |
|
constants.DT_BLOCK: 0,
|
9240 |
|
constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
|
9241 |
|
}
|
9242 |
|
|
9243 |
|
if disk_template not in req_size_dict:
|
9244 |
|
raise errors.ProgrammerError("Disk template '%s' size requirement"
|
9245 |
|
" is unknown" % disk_template)
|
9246 |
|
|
9247 |
|
return req_size_dict[disk_template]
|
9248 |
|
|
9249 |
|
|
9250 |
9223 |
def _FilterVmNodes(lu, nodenames):
|
9251 |
9224 |
"""Filters out non-vm_capable nodes from a list.
|
9252 |
9225 |
|
... | ... | |
10067 |
10040 |
}
|
10068 |
10041 |
|
10069 |
10042 |
group_info = self.cfg.GetNodeGroup(pnode.group)
|
10070 |
|
ipolicy = _CalculateGroupIPolicy(cluster, group_info)
|
|
10043 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
|
10071 |
10044 |
res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
|
10072 |
10045 |
if not self.op.ignore_ipolicy and res:
|
10073 |
10046 |
raise errors.OpPrereqError(("Instance allocation to group %s violates"
|
... | ... | |
10864 |
10837 |
if self.remote_node_info:
|
10865 |
10838 |
# We change the node, lets verify it still meets instance policy
|
10866 |
10839 |
new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
|
10867 |
|
ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
|
10868 |
|
new_group_info)
|
|
10840 |
cluster = self.cfg.GetClusterInfo()
|
|
10841 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
|
|
10842 |
new_group_info)
|
10869 |
10843 |
_CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
|
10870 |
10844 |
ignore=self.ignore_ipolicy)
|
10871 |
10845 |
|
... | ... | |
11035 |
11009 |
logical_id=(vg_data, names[0]),
|
11036 |
11010 |
params=data_disk.params)
|
11037 |
11011 |
vg_meta = meta_disk.logical_id[0]
|
11038 |
|
lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
|
|
11012 |
lv_meta = objects.Disk(dev_type=constants.LD_LV,
|
|
11013 |
size=constants.DRBD_META_SIZE,
|
11039 |
11014 |
logical_id=(vg_meta, names[1]),
|
11040 |
11015 |
params=meta_disk.params)
|
11041 |
11016 |
|
... | ... | |
12554 |
12529 |
|
12555 |
12530 |
snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
|
12556 |
12531 |
snode_group = self.cfg.GetNodeGroup(snode_info.group)
|
12557 |
|
ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
|
|
12532 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
|
|
12533 |
snode_group)
|
12558 |
12534 |
_CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
|
12559 |
12535 |
ignore=self.op.ignore_ipolicy)
|
12560 |
12536 |
if pnode_info.group != snode_info.group:
|
... | ... | |
14154 |
14130 |
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
|
14155 |
14131 |
inst_filter = lambda inst: inst.name in owned_instances
|
14156 |
14132 |
instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
|
|
14133 |
gmi = ganeti.masterd.instance
|
14157 |
14134 |
violations = \
|
14158 |
|
_ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
|
14159 |
|
self.group),
|
|
14135 |
_ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
|
|
14136 |
self.group),
|
14160 |
14137 |
new_ipolicy, instances)
|
14161 |
14138 |
|
14162 |
14139 |
if violations:
|