====
+Version 2.9.0 beta1
+-------------------
+
+*(unreleased)*
+
+Incompatible/important changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- hroller now also plans for capacity to move non-redundant instances off
+ any node to be rebooted; the old behavior of completely ignoring any
+ non-redundant instances can be restored by adding the --ignore-non-redundant
+ option.
+- The cluster option '--no-lvm-storage' was removed in favor of the new option
+ '--enabled-disk-templates'.
+- On instance creation, disk templates no longer need to be specified
+ with '-t'. The default disk template will be taken from the list of
+ enabled disk templates.
+- The monitoring daemon is now running as root, in order to be able to collect
+ information only available to root (such as the state of Xen instances).
+- The ConfD client is now IPv6 compatible.
+- File and shared file storage is no longer dis/enabled at configure time,
+ but using the option '--enabled-disk-templates' at cluster initialization and
+ modification.
+- The default directories for file and shared file storage are not anymore
+ specified at configure time, but taken from the cluster's configuration.
+ They can be set at cluster initialization and modification with
+ '--file-storage-dir' and '--shared-file-storage-dir'.
+- Cluster verification now includes stricter checks regarding the
+ default file and shared file storage directories. It now checks that
+ the directories are explicitely allowed in the 'file-storage-paths' file and
+ that the directories exist on all nodes.
+
+New features
+~~~~~~~~~~~~
+
+- DRBD 8.4 support. Depending on the installed DRBD version, Ganeti now uses
+ the correct command syntax. It is possible to use different DRBD versions
+ on different nodes as long as they are compatible to each other. This
+ enables rolling upgrades of DRBD with no downtime. As permanent operation
+ of different DRBD versions within a node group is discouraged,
+ ``gnt-cluster verify`` will emit a warning if it detects such a situation.
+- New "inst-status-xen" data collector for the monitoring daemon, providing
+ information about the state of the xen instances on the nodes.
+- New "lv" data collector for the monitoring daemon, collecting data about the
+ logical volumes on the nodes, and pairing them with the name of the instances
+ they belong to.
+- New "diskstats" data collector, collecting the data from /proc/diskstats and
+ presenting them over the monitoring daemon interface.
+- The ConfD client is now IPv6 compatible.
+
+New dependencies
+~~~~~~~~~~~~~~~~
+The following new dependencies have been added.
+
+Python
+
+- ``python-mock`` (http://www.voidspace.org.uk/python/mock/) is now a required
+ for the unit tests (and only used for testing).
+
+
- Version 2.8.0 beta1
- -------------------
+ Version 2.8.0 rc1
+ -----------------
- *(Released Mon, 24 Jun 2013)*
+ *(Released Fri, 2 Aug 2013)*
Incompatible/important changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"[--show-cmd] <instance>", "Opens a console on the specified instance"),
"failover": (
FailoverInstance, ARGS_ONE_INSTANCE,
- [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
+ [FORCE_OPT, IGNORE_CONSIST_OPT] + SUBMIT_OPTS +
+ [SHUTDOWN_TIMEOUT_OPT,
DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
- IGNORE_IPOLICY_OPT],
+ IGNORE_IPOLICY_OPT, CLEANUP_OPT],
"[-f] <instance>", "Stops the instance, changes its primary node and"
" (if it was originally running) starts it on the new node"
" (the secondary for mirrored instances or any node"
"config_version": constants.CONFIG_VERSION,
"os_api_version": max(constants.OS_API_VERSIONS),
"export_version": constants.EXPORT_VERSION,
+ "vcs_version": constants.VCS_VERSION,
"architecture": runtime.GetArchInfo(),
"name": cluster.cluster_name,
- "master": cluster.master_node,
+ "master": self.cfg.GetMasterNodeName(),
"default_hypervisor": cluster.primary_hypervisor,
"enabled_hypervisors": cluster.enabled_hypervisors,
"hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
"""Check prerequisite.
"""
- cluster = self.cfg.GetClusterInfo()
- default_vg = self.cfg.GetVGName()
- ec_id = self.proc.GetECId()
+ if self.op.iallocator:
+ cluster = self.cfg.GetClusterInfo()
+ default_vg = self.cfg.GetVGName()
+ ec_id = self.proc.GetECId()
- if self.op.opportunistic_locking:
- # Only consider nodes for which a lock is held
- node_whitelist = self.cfg.GetNodeNames(
- list(self.owned_locks(locking.LEVEL_NODE)))
- else:
- node_whitelist = None
+ if self.op.opportunistic_locking:
+ # Only consider nodes for which a lock is held
- node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
++ node_whitelist = self.cfg.GetNodeNames(
++ list(self.owned_locks(locking.LEVEL_NODE)))
+ else:
+ node_whitelist = None
- insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
- _ComputeNics(op, cluster, None,
- self.cfg, ec_id),
- _ComputeFullBeParams(op, cluster),
- node_whitelist)
- for op in self.op.instances]
+ insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+ _ComputeNics(op, cluster, None,
+ self.cfg, ec_id),
+ _ComputeFullBeParams(op, cluster),
+ node_whitelist)
+ for op in self.op.instances]
- req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
- ial = iallocator.IAllocator(self.cfg, self.rpc, req)
+ req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
+ ial = iallocator.IAllocator(self.cfg, self.rpc, req)
- ial.Run(self.op.iallocator)
+ ial.Run(self.op.iallocator)
- if not ial.success:
- raise errors.OpPrereqError("Can't compute nodes using"
- " iallocator '%s': %s" %
- (self.op.iallocator, ial.info),
- errors.ECODE_NORES)
+ if not ial.success:
+ raise errors.OpPrereqError("Can't compute nodes using"
+ " iallocator '%s': %s" %
+ (self.op.iallocator, ial.info),
+ errors.ECODE_NORES)
- self.ia_result = ial.result
+ self.ia_result = ial.result
if self.op.dry_run:
self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
"""Executes the opcode.
"""
- op2inst = dict((op.instance_name, op) for op in self.op.instances)
- (allocatable, failed) = self.ia_result
-
jobs = []
- for (name, node_names) in allocatable:
- op = op2inst.pop(name)
+ if self.op.iallocator:
+ op2inst = dict((op.instance_name, op) for op in self.op.instances)
+ (allocatable, failed) = self.ia_result
- (op.pnode_uuid, op.pnode) = \
- ExpandNodeUuidAndName(self.cfg, None, node_names[0])
- if len(node_names) > 1:
- (op.snode_uuid, op.snode) = \
- ExpandNodeUuidAndName(self.cfg, None, node_names[1])
- for (name, nodes) in allocatable:
++ for (name, node_names) in allocatable:
+ op = op2inst.pop(name)
- jobs.append([op])
- if len(nodes) > 1:
- (op.pnode, op.snode) = nodes
- else:
- (op.pnode,) = nodes
++ (op.pnode_uuid, op.pnode) = \
++ ExpandNodeUuidAndName(self.cfg, None, node_names[0])
++ if len(node_names) > 1:
++ (op.snode_uuid, op.snode) = \
++ ExpandNodeUuidAndName(self.cfg, None, node_names[1])
- missing = set(op2inst.keys()) - set(failed)
- assert not missing, \
- "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
- jobs.append([op])
++ jobs.append([op])
+
- missing = set(op2inst.keys()) - set(failed)
- assert not missing, \
- "Iallocator did return incomplete result: %s" % \
- utils.CommaJoin(missing)
++ missing = set(op2inst.keys()) - set(failed)
++ assert not missing, \
++ "Iallocator did return incomplete result: %s" % \
++ utils.CommaJoin(missing)
+ else:
+ jobs.extend([op] for op in self.op.instances)
return ResultWithJobs(jobs, **self._ConstructPartialResult())
_ExpandNamesForMigration(self)
self._migrater = \
- TLMigrateInstance(self, self.op.instance_name, self.op.cleanup, True,
- False, self.op.ignore_consistency, True,
+ TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
- False, True, False, self.op.ignore_consistency, True,
++ self.op.cleanup, True, False,
++ self.op.ignore_consistency, True,
self.op.shutdown_timeout, self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
env = {
"IGNORE_CONSISTENCY": self.op.ignore_consistency,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
- "OLD_PRIMARY": source_node,
- "NEW_PRIMARY": target_node,
+ "OLD_PRIMARY": self.cfg.GetNodeName(source_node_uuid),
+ "NEW_PRIMARY": self.op.target_node,
+ "FAILOVER_CLEANUP": self.op.cleanup,
}
if instance.disk_template in constants.DTS_INT_MIRROR:
HV_KVM_MACHINE_VERSION = "machine_version"
HV_KVM_PATH = "kvm_path"
HV_VIF_TYPE = "vif_type"
+HV_VIF_SCRIPT = "vif_script"
+HV_XEN_CMD = "xen_cmd"
HV_VNET_HDR = "vnet_hdr"
+ HV_VIRIDIAN = "viridian"
HVS_PARAMETER_TYPES = {
HV_KVM_EXTRA: VTYPE_STRING,
HV_KVM_MACHINE_VERSION: VTYPE_STRING,
HV_VIF_TYPE: VTYPE_STRING,
+ HV_VIF_SCRIPT: VTYPE_STRING,
+ HV_XEN_CMD: VTYPE_STRING,
HV_VNET_HDR: VTYPE_BOOL,
+ HV_VIRIDIAN: VTYPE_BOOL,
}
HVS_PARAMETERS = frozenset(HVS_PARAMETER_TYPES.keys())
HV_CPU_CAP: 0,
HV_CPU_WEIGHT: 256,
HV_VIF_TYPE: HT_HVM_VIF_IOEMU,
+ HV_VIF_SCRIPT: "",
+ HV_VIRIDIAN: False,
+ HV_XEN_CMD: XEN_CMD_XM,
},
HT_KVM: {
HV_KVM_PATH: KVM_PATH,
(False, lambda x: 0 < x < 65535, "invalid weight", None, None),
constants.HV_VIF_TYPE:
hv_base.ParamInSet(False, constants.HT_HVM_VALID_VIF_TYPES),
+ constants.HV_VIF_SCRIPT: hv_base.OPT_FILE_CHECK,
+ constants.HV_VIRIDIAN: hv_base.NO_CHECK,
+ constants.HV_XEN_CMD:
+ hv_base.ParamInSet(True, constants.KNOWN_XEN_COMMANDS),
}
def _GetConfig(self, instance, startup_memory, block_devices):
| **failover** [-f] [\--ignore-consistency] [\--ignore-ipolicy]
| [\--shutdown-timeout=*N*]
| [{-n|\--target-node} *node* \| {-I|\--iallocator} *name*]
-| [\--submit] [\--cleanup]
++| [\--cleanup]
+| [\--submit] [\--print-job-id]
| {*instance*}
Failover will stop the instance (if running), change its primary node,
| [\--network6=*NETWORK6*]
| [\--gateway6=*GATEWAY6*]
| [\--mac-prefix=*MACPREFIX*]
-| [\--submit]
+| [\--submit] [\--print-job-id]
+ | [\--no-conflicts-check]
| {*network*}
Creates a new network with the given name. The network will be unused
C.defaultShutdownTimeout
, opIgnoreConsistency = False
, opTargetNode = Nothing
+ , opTargetNodeUuid = Nothing
, opIgnoreIpolicy = False
, opIallocator = Nothing
+ , opMigrationCleanup = False
}
])
| offSec ->
, pShutdownTimeout
, pIgnoreConsistency
, pMigrationTargetNode
+ , pMigrationTargetNodeUuid
, pIgnoreIpolicy
, pIallocator
+ , pMigrationCleanup
])
, ("OpInstanceMigrate",
[ pInstanceName
, ("config_version", showJSON C.configVersion)
, ("os_api_version", showJSON $ maximum C.osApiVersions)
, ("export_version", showJSON C.exportVersion)
+ , ("vcs_version", showJSON C.vcsVersion)
, ("architecture", showJSON arch_tuple)
, ("name", showJSON $ clusterClusterName cluster)
- , ("master", showJSON $ clusterMasterNode cluster)
+ , ("master", showJSON (case master of
+ Ok name -> name
+ _ -> undefined))
, ("default_hypervisor", def_hv)
, ("enabled_hypervisors", showJSON hypervisors)
, ("hvparams", showJSON $ clusterHvparams cluster)
case op_id of
"OP_TEST_DELAY" ->
OpCodes.OpTestDelay <$> arbitrary <*> arbitrary <*>
- genNodeNamesNE <*> arbitrary
+ genNodeNamesNE <*> return Nothing <*> arbitrary
"OP_INSTANCE_REPLACE_DISKS" ->
- OpCodes.OpInstanceReplaceDisks <$> genFQDN <*> arbitrary <*>
- arbitrary <*> arbitrary <*> genDiskIndices <*>
- genMaybe genNodeNameNE <*> genMaybe genNameNE
+ OpCodes.OpInstanceReplaceDisks <$> genFQDN <*> return Nothing <*>
+ arbitrary <*> arbitrary <*> arbitrary <*> genDiskIndices <*>
+ genMaybe genNodeNameNE <*> return Nothing <*> genMaybe genNameNE
"OP_INSTANCE_FAILOVER" ->
- OpCodes.OpInstanceFailover <$> genFQDN <*> arbitrary <*> arbitrary <*>
- genMaybe genNodeNameNE <*> arbitrary <*> genMaybe genNameNE <*>
- arbitrary
+ OpCodes.OpInstanceFailover <$> genFQDN <*> return Nothing <*>
+ arbitrary <*> arbitrary <*> genMaybe genNodeNameNE <*>
- return Nothing <*> arbitrary <*> genMaybe genNameNE
++ return Nothing <*> arbitrary <*> genMaybe genNameNE <*> arbitrary
"OP_INSTANCE_MIGRATE" ->
- OpCodes.OpInstanceMigrate <$> genFQDN <*> arbitrary <*> arbitrary <*>
- genMaybe genNodeNameNE <*> arbitrary <*>
- arbitrary <*> arbitrary <*> genMaybe genNameNE <*> arbitrary
+ OpCodes.OpInstanceMigrate <$> genFQDN <*> return Nothing <*>
+ arbitrary <*> arbitrary <*> genMaybe genNodeNameNE <*>
+ return Nothing <*> arbitrary <*> arbitrary <*> arbitrary <*>
+ genMaybe genNameNE <*> arbitrary
"OP_TAGS_GET" ->
OpCodes.OpTagsGet <$> arbitrary <*> arbitrary
"OP_TAGS_SEARCH" ->