from ganeti import rpc
from ganeti import objects
from ganeti import serializer
+from ganeti import uidpool
_config_lock = locking.SharedLock()
class ConfigWriter:
"""The interface to the cluster configuration.
+ @ivar _temporary_lvs: reservation manager for temporary LVs
+ @ivar _all_rms: a list of all temporary reservation managers
+
"""
def __init__(self, cfg_file=None, offline=False):
self.write_count = 0
self._temporary_drbds = {}
self._temporary_macs = TemporaryReservationManager()
self._temporary_secrets = TemporaryReservationManager()
+ self._temporary_lvs = TemporaryReservationManager()
+ self._all_rms = [self._temporary_ids, self._temporary_macs,
+ self._temporary_secrets, self._temporary_lvs]
# Note: in order to prevent errors when resolving our name in
# _DistributeConfig, we compute it here once and reuse it; it's
# better to raise an error before starting to modify the config
self._temporary_macs.Reserve(mac, ec_id)
@locking.ssynchronized(_config_lock, shared=1)
+ def ReserveLV(self, lv_name, ec_id):
+ """Reserve an VG/LV pair for an instance.
+
+ @type lv_name: string
+ @param lv_name: the logical volume name to reserve
+
+ """
+ all_lvs = self._AllLVs()
+ if lv_name in all_lvs:
+ raise errors.ReservationError("LV already in use")
+ else:
+ self._temporary_lvs.Reserve(lv_name, ec_id)
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GenerateDRBDSecret(self, ec_id):
"""Generate a DRBD secret.
if invalid_hvs:
result.append("enabled hypervisors contains invalid entries: %s" %
invalid_hvs)
+ missing_hvp = (set(data.cluster.enabled_hypervisors) -
+ set(data.cluster.hvparams.keys()))
+ if missing_hvp:
+ result.append("hypervisor parameters missing for the enabled"
+ " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
if data.cluster.master_node not in data.nodes:
result.append("cluster has invalid primary node '%s'" %
# per-instance checks
for instance_name in data.instances:
instance = data.instances[instance_name]
+ if instance.name != instance_name:
+ result.append("instance '%s' is indexed by wrong name '%s'" %
+ (instance.name, instance_name))
if instance.primary_node not in data.nodes:
result.append("instance '%s' has invalid primary node '%s'" %
(instance_name, instance.primary_node))
(mc_now, mc_max))
# node checks
- for node in data.nodes.values():
+ for node_name, node in data.nodes.items():
+ if node.name != node_name:
+ result.append("Node '%s' is indexed by wrong name '%s'" %
+ (node.name, node_name))
if [node.master_candidate, node.drained, node.offline].count(True) > 1:
result.append("Node %s state is invalid: master_candidate=%s,"
" drain=%s, offline=%s" %
"""
if not item.uuid:
item.uuid = self._GenerateUniqueID(ec_id)
- elif item.uuid in self._AllIDs(temporary=True):
- raise errors.ConfigurationError("Cannot add '%s': UUID already in use" %
- (item.name, item.uuid))
+ elif item.uuid in self._AllIDs(include_temporary=True):
+ raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
+ " in use" % (item.name, item.uuid))
def _SetInstanceStatus(self, instance_name, status):
"""Set the instance's status to a given value.
# rename the file paths in logical and physical id
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
disk.physical_id = disk.logical_id = (disk.logical_id[0],
- os.path.join(file_storage_dir,
- inst.name,
- disk.iv_name))
+ utils.PathJoin(file_storage_dir,
+ inst.name,
+ disk.iv_name))
self._config_data.instances[inst.name] = inst
self._WriteConfig()
"""
return self._UnlockedGetNodeList()
- @locking.ssynchronized(_config_lock, shared=1)
- def GetOnlineNodeList(self):
+ def _UnlockedGetOnlineNodeList(self):
"""Return the list of nodes which are online.
"""
return [node.name for node in all_nodes if not node.offline]
@locking.ssynchronized(_config_lock, shared=1)
+ def GetOnlineNodeList(self):
+ """Return the list of nodes which are online.
+
+ """
+ return self._UnlockedGetOnlineNodeList()
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GetAllNodesInfo(self):
"""Get the configuration of all nodes.
if self._last_cluster_serial < self._config_data.cluster.serial_no:
if not self._offline:
result = rpc.RpcRunner.call_write_ssconf_files(
- self._UnlockedGetNodeList(),
+ self._UnlockedGetOnlineNodeList(),
self._UnlockedGetSsconfValues())
for nname, nresu in result.items():
cluster = self._config_data.cluster
cluster_tags = fn(cluster.GetTags())
+
+ hypervisor_list = fn(cluster.enabled_hypervisors)
+
+ uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
+
return {
constants.SS_CLUSTER_NAME: cluster.cluster_name,
constants.SS_CLUSTER_TAGS: cluster_tags,
constants.SS_ONLINE_NODES: on_data,
constants.SS_INSTANCE_LIST: instance_data,
constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
+ constants.SS_HYPERVISOR_LIST: hypervisor_list,
+ constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
+ constants.SS_UID_POOL: uid_pool,
}
@locking.ssynchronized(_config_lock, shared=1)
"""Drop per-execution-context reservations
"""
- self._temporary_ids.DropECReservations(ec_id)
- self._temporary_macs.DropECReservations(ec_id)
- self._temporary_secrets.DropECReservations(ec_id)
+ for rm in self._all_rms:
+ rm.DropECReservations(ec_id)