return params_copy
-def _UpdateMinMaxISpecs(ipolicy, new_minmax, group_policy):
- use_none = use_default = group_policy
- minmax = ipolicy.setdefault(constants.ISPECS_MINMAX, {})
- for (key, value) in new_minmax.items():
- if key not in constants.ISPECS_MINMAX_KEYS:
- raise errors.OpPrereqError("Invalid key in new ipolicy/%s: %s" %
- (constants.ISPECS_MINMAX, key),
- errors.ECODE_INVAL)
- old_spec = minmax.get(key, {})
- minmax[key] = _GetUpdatedParams(old_spec, value, use_none=use_none,
- use_default=use_default)
- utils.ForceDictType(minmax[key], constants.ISPECS_PARAMETER_TYPES)
-
-
def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
"""Return the new version of an instance policy.
we should support removal of policy entries
"""
- use_none = use_default = group_policy
ipolicy = copy.deepcopy(old_ipolicy)
for key, value in new_ipolicy.items():
if key not in constants.IPOLICY_ALL_KEYS:
raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
errors.ECODE_INVAL)
- if key == constants.ISPECS_MINMAX:
- _UpdateMinMaxISpecs(ipolicy, value, group_policy)
- elif key == constants.ISPECS_STD:
- ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
- use_none=use_none,
- use_default=use_default)
- utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
+ if (not value or value == [constants.VALUE_DEFAULT] or
+ value == constants.VALUE_DEFAULT):
+ if group_policy:
+ if key in ipolicy:
+ del ipolicy[key]
+ else:
+ raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
+ " on the cluster'" % key,
+ errors.ECODE_INVAL)
else:
- if (not value or value == [constants.VALUE_DEFAULT] or
- value == constants.VALUE_DEFAULT):
+ if key in constants.IPOLICY_PARAMETERS:
+ # FIXME: we assume all such values are float
+ try:
+ ipolicy[key] = float(value)
+ except (TypeError, ValueError), err:
+ raise errors.OpPrereqError("Invalid value for attribute"
+ " '%s': '%s', error: %s" %
+ (key, value, err), errors.ECODE_INVAL)
+ elif key == constants.ISPECS_MINMAX:
+ for minmax in value:
+ for k in minmax.keys():
+ utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
+ ipolicy[key] = value
+ elif key == constants.ISPECS_STD:
if group_policy:
- del ipolicy[key]
- else:
- raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
- " on the cluster'" % key,
- errors.ECODE_INVAL)
+ msg = "%s cannot appear in group instance specs" % key
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+ ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
+ use_none=False, use_default=False)
+ utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
else:
- if key in constants.IPOLICY_PARAMETERS:
- # FIXME: we assume all such values are float
- try:
- ipolicy[key] = float(value)
- except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid value for attribute"
- " '%s': '%s', error: %s" %
- (key, value, err), errors.ECODE_INVAL)
- else:
- # FIXME: we assume all others are lists; this should be redone
- # in a nicer way
- ipolicy[key] = list(value)
+ # FIXME: we assume all others are lists; this should be redone
+ # in a nicer way
+ ipolicy[key] = list(value)
try:
objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
except errors.ConfigurationError, err:
ret.append("Disk template %s is not allowed (allowed templates: %s)" %
(disk_template, utils.CommaJoin(allowed_dts)))
- minmax = ipolicy[constants.ISPECS_MINMAX]
- return ret + filter(None,
- (_compute_fn(name, qualifier, minmax, value)
- for (name, qualifier, value) in test_settings))
+ min_errs = None
+ for minmax in ipolicy[constants.ISPECS_MINMAX]:
+ errs = filter(None,
+ (_compute_fn(name, qualifier, minmax, value)
+ for (name, qualifier, value) in test_settings))
+ if min_errs is None or len(errs) < len(min_errs):
+ min_errs = errs
+ assert min_errs is not None
+ return ret + min_errs
def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
"""
instance = self.instance
force = self.op.force
+ reason = self.op.reason
if not self.op.no_remember:
self.cfg.MarkInstanceUp(instance.name)
self.rpc.call_instance_start(node_current,
(instance, self.op.hvparams,
self.op.beparams),
- self.op.startup_paused)
+ self.op.startup_paused, reason)
msg = result.fail_msg
if msg:
_ShutdownInstanceDisks(self, instance)
else:
if instance_running:
result = self.rpc.call_instance_shutdown(node_current, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ reason)
result.Raise("Could not shutdown instance for full reboot")
_ShutdownInstanceDisks(self, instance)
else:
instance.name)
_StartInstanceDisks(self, instance, ignore_secondaries)
result = self.rpc.call_instance_start(node_current,
- (instance, None, None), False)
+ (instance, None, None), False,
+ reason)
msg = result.fail_msg
if msg:
_ShutdownInstanceDisks(self, instance)
instance = self.instance
node_current = instance.primary_node
timeout = self.op.timeout
+ reason = self.op.reason
# If the instance is offline we shouldn't mark it as down, as that
# resets the offline flag.
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as stopped")
else:
- result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
+ result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
+ reason)
msg = result.fail_msg
if msg:
self.LogWarning("Could not shutdown instance: %s", msg)
instance.name, instance.primary_node)
result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ self.op.reason)
msg = result.fail_msg
if msg:
if self.op.ignore_failures:
self.owned_locks(locking.LEVEL_NODE_RES))
result = self.rpc.call_instance_shutdown(source_node, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ self.op.reason)
msg = result.fail_msg
if msg:
if self.op.ignore_consistency:
raise errors.OpExecError("Can't activate the instance's disks")
result = self.rpc.call_instance_start(target_node,
- (instance, None, None), False)
+ (instance, None, None), False,
+ self.op.reason)
msg = result.fail_msg
if msg:
_ShutdownInstanceDisks(self, instance)
instance.name, source_node)
result = self.rpc.call_instance_shutdown(source_node, instance,
- self.shutdown_timeout)
+ self.shutdown_timeout,
+ self.lu.op.reason)
msg = result.fail_msg
if msg:
if self.ignore_consistency or primary_node.offline:
self.feedback_fn("* starting the instance on the target node %s" %
target_node)
result = self.rpc.call_instance_start(target_node, (instance, None, None),
- False)
+ False, self.lu.op.reason)
msg = result.fail_msg
if msg:
_ShutdownInstanceDisks(self.lu, instance)
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
+ @return: list of created devices
"""
- if device.CreateOnSecondary():
- force_create = True
+ created_devices = []
+ try:
+ if device.CreateOnSecondary():
+ force_create = True
- if device.children:
- for child in device.children:
- _CreateBlockDevInner(lu, node, instance, child, force_create,
- info, force_open, excl_stor)
+ if device.children:
+ for child in device.children:
+ devs = _CreateBlockDevInner(lu, node, instance, child, force_create,
+ info, force_open, excl_stor)
+ created_devices.extend(devs)
- if not force_create:
- return
+ if not force_create:
+ return created_devices
+
+ _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+ excl_stor)
+ # The device has been completely created, so there is no point in keeping
+ # its subdevices in the list. We just add the device itself instead.
+ created_devices = [(node, device)]
+ return created_devices
- _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
- excl_stor)
+ except errors.DeviceCreationError, e:
+ e.created_devices.extend(created_devices)
+ raise e
+ except errors.OpExecError, e:
+ raise errors.DeviceCreationError(str(e), created_devices)
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
except errors.OpExecError:
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
+ except errors.DeviceCreationError, e:
+ logging.warning("Creating disk %s for instance '%s' failed",
+ idx, instance.name)
+ disks_created.extend(e.created_devices)
for (node, disk) in disks_created:
lu.cfg.SetDiskID(disk, node)
result = lu.rpc.call_blockdev_remove(node, disk)
if result.fail_msg:
logging.warning("Failed to remove newly-created disk %s on node %s:"
" %s", device, node, result.fail_msg)
- raise
+ raise errors.OpExecError(e.message)
def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
logging.info("Starting instance %s on node %s", instance, pnode_name)
feedback_fn("* starting instance...")
result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
- False)
+ False, self.op.reason)
result.Raise("Could not start instance")
return list(iobj.all_nodes)
# shutdown the instance, but not the disks
feedback_fn("Shutting down instance %s" % instance.name)
result = self.rpc.call_instance_shutdown(src_node, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ self.op.reason)
# TODO: Maybe ignore failures if ignore_remove_failures is set
result.Raise("Could not shutdown instance %s on"
" node %s" % (instance.name, src_node))
assert not activate_disks
feedback_fn("Starting instance %s" % instance.name)
result = self.rpc.call_instance_start(src_node,
- (instance, None, None), False)
+ (instance, None, None), False,
+ self.op.reason)
msg = result.fail_msg
if msg:
feedback_fn("Failed to start instance: %s" % msg)