_CheckNicsBridgesExist(lu, instance.nics, node)
+def _CheckOSVariant(os, name):
+ """Check whether an OS name conforms to the os variants specification.
+
+ @type os: L{objects.OS}
+ @param os: OS object to check
+ @type name: string
+ @param name: OS name passed by the user, to check for validity
+
+ """
+ if not os.supported_variants:
+ return
+ try:
+ variant = name.split("+", 1)[1]
+ except IndexError:
+ raise errors.OpPrereqError("OS name must include a variant")
+
+ if variant not in os.supported_variants:
+ raise errors.OpPrereqError("Unsupported OS variant")
+
+
def _GetNodeInstancesInner(cfg, fn):
return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
if invalid_hvs:
raise errors.OpPrereqError("Enabled hypervisors contains invalid"
- " entries: %s" %
- utils.CommaJoin(invalid_hvs))
+ " entries: %s" % " ,".join(invalid_hvs))
else:
self.hv_list = cluster.enabled_hypervisors
_OP_REQP = ["output_fields", "names"]
REQ_BGL = False
_FIELDS_STATIC = utils.FieldSet()
- _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
+ _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
+ # Fields that need calculation of global os validity
+ _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
def ExpandNames(self):
if self.op.names:
for node_name, nr in rlist.items():
if nr.fail_msg or not nr.payload:
continue
- for name, path, status, diagnose in nr.payload:
+ for name, path, status, diagnose, variants in nr.payload:
if name not in all_os:
# build a list of nodes for this os containing empty lists
# for each node in node_list
all_os[name] = {}
for nname in good_nodes:
all_os[name][nname] = []
- all_os[name][node_name].append((path, status, diagnose))
+ all_os[name][node_name].append((path, status, diagnose, variants))
return all_os
def Exec(self, feedback_fn):
node_data = self.rpc.call_os_diagnose(valid_nodes)
pol = self._DiagnoseByOS(valid_nodes, node_data)
output = []
+ calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
+ calc_variants = "variants" in self.op.output_fields
+
for os_name, os_data in pol.items():
row = []
+ if calc_valid:
+ valid = True
+ variants = None
+ for osl in os_data.values():
+ valid = valid and osl and osl[0][1]
+ if not valid:
+ variants = None
+ break
+ if calc_variants:
+ node_variants = osl[0][3]
+ if variants is None:
+ variants = node_variants
+ else:
+ variants = [v for v in variants if v in node_variants]
+
for field in self.op.output_fields:
if field == "name":
val = os_name
elif field == "valid":
- val = utils.all([osl and osl[0][1] for osl in os_data.values()])
+ val = valid
elif field == "node_status":
# this is just a copy of the dict
val = {}
for node_name, nos_list in os_data.items():
val[node_name] = nos_list
+ elif field == "variants":
+ val = variants
else:
raise errors.ParameterError(field)
row.append(val)
# if we don't request only static fields, we need to lock the nodes
self.needed_locks[locking.LEVEL_NODE] = self.wanted
-
def CheckPrereq(self):
"""Check prerequisites.
# Boolean value that tells us whether we're offlining or draining the node
offline_or_drain = self.op.offline == True or self.op.drained == True
+ deoffline_or_drain = self.op.offline == False or self.op.drained == False
if (node.master_candidate and
(self.op.master_candidate == False or offline_or_drain)):
raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
" to master_candidate" % node.name)
+ # If we're being deofflined/drained, we'll MC ourself if needed
+ if (deoffline_or_drain and not offline_or_drain and not
+ self.op.master_candidate == True):
+ self.op.master_candidate = _DecideSelfPromotion(self)
+ if self.op.master_candidate:
+ self.LogInfo("Autopromoting node to master candidate")
+
return
def Exec(self, feedback_fn):
_OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check the arguments.
+
+ """
+ self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+ constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
def ExpandNames(self):
if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
constants.INSTANCE_REBOOT_HARD,
env = {
"IGNORE_SECONDARIES": self.op.ignore_secondaries,
"REBOOT_TYPE": self.op.reboot_type,
+ "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
for disk in instance.disks:
self.cfg.SetDiskID(disk, node_current)
result = self.rpc.call_instance_reboot(node_current, instance,
- reboot_type)
+ reboot_type,
+ self.shutdown_timeout)
result.Raise("Could not reboot instance")
else:
- result = self.rpc.call_instance_shutdown(node_current, instance)
+ result = self.rpc.call_instance_shutdown(node_current, instance,
+ self.shutdown_timeout)
result.Raise("Could not shutdown instance for full reboot")
_ShutdownInstanceDisks(self, instance)
_StartInstanceDisks(self, instance, ignore_secondaries)
_OP_REQP = ["instance_name"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check the arguments.
+
+ """
+ self.timeout = getattr(self.op, "timeout",
+ constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
def ExpandNames(self):
self._ExpandAndLockInstance()
"""
env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env["TIMEOUT"] = self.timeout
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return env, nl, nl
"""
instance = self.instance
node_current = instance.primary_node
+ timeout = self.timeout
self.cfg.MarkInstanceDown(instance.name)
- result = self.rpc.call_instance_shutdown(node_current, instance)
+ result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
msg = result.fail_msg
if msg:
self.proc.LogWarning("Could not shutdown instance: %s" % msg)
instance.primary_node))
self.op.os_type = getattr(self.op, "os_type", None)
+ self.op.force_variant = getattr(self.op, "force_variant", False)
if self.op.os_type is not None:
# OS verification
pnode = self.cfg.GetNodeInfo(
result = self.rpc.call_os_get(pnode.name, self.op.os_type)
result.Raise("OS '%s' not in supported OS list for primary node %s" %
(self.op.os_type, pnode.name), prereq=True)
+ if not self.op.force_variant:
+ _CheckOSVariant(result.payload, self.op.os_type)
self.instance = instance
_OP_REQP = ["instance_name", "ignore_failures"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check the arguments.
+
+ """
+ self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+ constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
"""
env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
nl = [self.cfg.GetMasterNode()]
return env, nl, nl
logging.info("Shutting down instance %s on node %s",
instance.name, instance.primary_node)
- result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
+ result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
+ self.shutdown_timeout)
msg = result.fail_msg
if msg:
if self.op.ignore_failures:
_OP_REQP = ["instance_name", "ignore_consistency"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check the arguments.
+
+ """
+ self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+ constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
"""
env = {
"IGNORE_CONSISTENCY": self.op.ignore_consistency,
+ "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
logging.info("Shutting down instance %s on node %s",
instance.name, source_node)
- result = self.rpc.call_instance_shutdown(source_node, instance)
+ result = self.rpc.call_instance_shutdown(source_node, instance,
+ self.shutdown_timeout)
msg = result.fail_msg
if msg:
if self.op.ignore_consistency:
_OP_REQP = ["instance_name", "target_node"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check the arguments.
+
+ """
+ self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+ constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
def ExpandNames(self):
self._ExpandAndLockInstance()
target_node = self.cfg.ExpandNodeName(self.op.target_node)
"""
env = {
"TARGET_NODE": self.op.target_node,
+ "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
self.LogInfo("Shutting down instance %s on source node %s",
instance.name, source_node)
- result = self.rpc.call_instance_shutdown(source_node, instance)
+ result = self.rpc.call_instance_shutdown(source_node, instance,
+ self.shutdown_timeout)
msg = result.fail_msg
if msg:
if self.op.ignore_consistency:
self.op.src_path = src_path = \
os.path.join(constants.EXPORT_DIR, src_path)
+ # On import force_variant must be True, because if we forced it at
+ # initial install, our only chance when importing it back is that it
+ # works again!
+ self.op.force_variant = True
+
else: # INSTANCE_CREATE
if getattr(self.op, "os_type", None) is None:
raise errors.OpPrereqError("No guest OS specified")
+ self.op.force_variant = getattr(self.op, "force_variant", False)
def _RunAllocator(self):
"""Run the allocator based on input opcode.
result = self.rpc.call_os_get(pnode.name, self.op.os_type)
result.Raise("OS '%s' not in supported os list for primary node %s" %
(self.op.os_type, pnode.name), prereq=True)
+ if not self.op.force_variant:
+ _CheckOSVariant(result.payload, self.op.os_type)
_CheckNicsBridgesExist(self, self.nics, self.pnode.name)
_OP_REQP = ["instance_name", "target_node", "shutdown"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check the arguments.
+
+ """
+ self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+ constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
def ExpandNames(self):
self._ExpandAndLockInstance()
# FIXME: lock only instance primary and destination node
env = {
"EXPORT_NODE": self.op.target_node,
"EXPORT_DO_SHUTDOWN": self.op.shutdown,
+ "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
if self.op.shutdown:
# shutdown the instance, but not the disks
feedback_fn("Shutting down instance %s" % instance.name)
- result = self.rpc.call_instance_shutdown(src_node, instance)
+ result = self.rpc.call_instance_shutdown(src_node, instance,
+ self.shutdown_timeout)
result.Raise("Could not shutdown instance %s on"
" node %s" % (instance.name, src_node))