+ def _UnlockedComputeDRBDMap(self):
+ """Compute the used DRBD minor/nodes.
+
+ @rtype: (dict, list)
+ @return: dictionary of node_name: dict of minor: instance_name;
+ the returned dict will have all the nodes in it (even if with
+ an empty list), and a list of duplicates; if the duplicates
+ list is not empty, the configuration is corrupted and its caller
+ should raise an exception
+
+ """
+ def _AppendUsedPorts(instance_name, disk, used):
+ duplicates = []
+ if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
+ nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
+ for node, port in ((nodeA, minorA), (nodeB, minorB)):
+ assert node in used, ("Node '%s' of instance '%s' not found"
+ " in node list" % (node, instance_name))
+ if port in used[node]:
+ duplicates.append((node, port, instance_name, used[node][port]))
+ else:
+ used[node][port] = instance_name
+ if disk.children:
+ for child in disk.children:
+ duplicates.extend(_AppendUsedPorts(instance_name, child, used))
+ return duplicates
+
+ duplicates = []
+ my_dict = dict((node, {}) for node in self._config_data.nodes)
+ for instance in self._config_data.instances.itervalues():
+ for disk in instance.disks:
+ duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
+ for (node, minor), instance in self._temporary_drbds.iteritems():
+ if minor in my_dict[node] and my_dict[node][minor] != instance:
+ duplicates.append((node, minor, instance, my_dict[node][minor]))
+ else:
+ my_dict[node][minor] = instance
+ return my_dict, duplicates
+
+ @locking.ssynchronized(_config_lock)
+ def ComputeDRBDMap(self):
+ """Compute the used DRBD minor/nodes.
+
+ This is just a wrapper over L{_UnlockedComputeDRBDMap}.
+
+ @return: dictionary of node_name: dict of minor: instance_name;
+ the returned dict will have all the nodes in it (even if with
+ an empty list).
+
+ """
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ if duplicates:
+ raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
+ str(duplicates))
+ return d_map
+
+ @locking.ssynchronized(_config_lock)
+ def AllocateDRBDMinor(self, nodes, instance):
+ """Allocate a drbd minor.
+
+ The free minor will be automatically computed from the existing
+ devices. A node can be given multiple times in order to allocate
+ multiple minors. The result is the list of minors, in the same
+ order as the passed nodes.
+
+ @type instance: string
+ @param instance: the instance for which we allocate minors
+
+ """
+ assert isinstance(instance, basestring), \
+ "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
+
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ if duplicates:
+ raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
+ str(duplicates))
+ result = []
+ for nname in nodes:
+ ndata = d_map[nname]
+ if not ndata:
+ # no minors used, we can start at 0
+ result.append(0)
+ ndata[0] = instance
+ self._temporary_drbds[(nname, 0)] = instance
+ continue
+ keys = ndata.keys()
+ keys.sort()
+ ffree = utils.FirstFree(keys)
+ if ffree is None:
+ # return the next minor
+ # TODO: implement high-limit check
+ minor = keys[-1] + 1
+ else:
+ minor = ffree
+ # double-check minor against current instances
+ assert minor not in d_map[nname], \
+ ("Attempt to reuse allocated DRBD minor %d on node %s,"
+ " already allocated to instance %s" %
+ (minor, nname, d_map[nname][minor]))
+ ndata[minor] = instance
+ # double-check minor against reservation
+ r_key = (nname, minor)
+ assert r_key not in self._temporary_drbds, \
+ ("Attempt to reuse reserved DRBD minor %d on node %s,"
+ " reserved for instance %s" %
+ (minor, nname, self._temporary_drbds[r_key]))
+ self._temporary_drbds[r_key] = instance
+ result.append(minor)
+ logging.debug("Request to allocate drbd minors, input: %s, returning %s",
+ nodes, result)
+ return result
+
+ def _UnlockedReleaseDRBDMinors(self, instance):
+ """Release temporary drbd minors allocated for a given instance.
+
+ @type instance: string
+ @param instance: the instance for which temporary minors should be
+ released
+
+ """
+ assert isinstance(instance, basestring), \
+ "Invalid argument passed to ReleaseDRBDMinors"
+ for key, name in self._temporary_drbds.items():
+ if name == instance:
+ del self._temporary_drbds[key]
+
+ @locking.ssynchronized(_config_lock)
+ def ReleaseDRBDMinors(self, instance):
+ """Release temporary drbd minors allocated for a given instance.
+
+ This should be called on the error paths, on the success paths
+ it's automatically called by the ConfigWriter add and update
+ functions.
+
+ This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
+
+ @type instance: string
+ @param instance: the instance for which temporary minors should be
+ released
+
+ """
+ self._UnlockedReleaseDRBDMinors(instance)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetConfigVersion(self):
+ """Get the configuration version.
+
+ @return: Config version
+
+ """
+ return self._config_data.version
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetClusterName(self):
+ """Get cluster name.
+
+ @return: Cluster name
+
+ """
+ return self._config_data.cluster.cluster_name
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterNode(self):
+ """Get the hostname of the master node for this cluster.
+
+ @return: Master hostname
+
+ """
+ return self._config_data.cluster.master_node
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterIP(self):
+ """Get the IP of the master node for this cluster.
+
+ @return: Master IP
+
+ """
+ return self._config_data.cluster.master_ip
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterNetdev(self):
+ """Get the master network device for this cluster.
+
+ """
+ return self._config_data.cluster.master_netdev
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetFileStorageDir(self):
+ """Get the file storage dir for this cluster.
+
+ """
+ return self._config_data.cluster.file_storage_dir
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetHypervisorType(self):
+ """Get the hypervisor type for this cluster.
+
+ """
+ return self._config_data.cluster.default_hypervisor
+
+ @locking.ssynchronized(_config_lock, shared=1)