import os
import os.path
-import sha
import time
import tempfile
import re
wanted.append(instance)
else:
- wanted = lu.cfg.GetInstanceList()
- return utils.NiceSort(wanted)
+ wanted = utils.NiceSort(lu.cfg.GetInstanceList())
+ return wanted
def _CheckOutputFields(static, dynamic, selected):
@param lu: the LU on behalf of which we make the check
@param node: the node to check
- @raise errors.OpPrereqError: if the nodes is offline
+ @raise errors.OpPrereqError: if the node is offline
"""
if lu.cfg.GetNodeInfo(node).offline:
raise errors.OpPrereqError("Can't use offline node %s" % node)
+def _CheckNodeNotDrained(lu, node):
+ """Ensure that a given node is not drained.
+
+ @param lu: the LU on behalf of which we make the check
+ @param node: the node to check
+ @raise errors.OpPrereqError: if the node is drained
+
+ """
+ if lu.cfg.GetNodeInfo(node).drained:
+ raise errors.OpPrereqError("Can't use drained node %s" % node)
+
+
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
- memory, vcpus, nics):
+ memory, vcpus, nics, disk_template, disks):
"""Builds instance related env variables for hooks
This builds the hook environment from individual variables.
@type nics: list
@param nics: list of tuples (ip, bridge, mac) representing
the NICs the instance has
+ @type disk_template: string
+ @param disk_template: the distk template of the instance
+ @type disks: list
+ @param disks: the list of (size, mode) pairs
@rtype: dict
@return: the hook environment for this instance
"INSTANCE_STATUS": str_status,
"INSTANCE_MEMORY": memory,
"INSTANCE_VCPUS": vcpus,
+ "INSTANCE_DISK_TEMPLATE": disk_template,
}
if nics:
nic_count = len(nics)
- for idx, (ip, bridge, mac) in enumerate(nics):
+ for idx, (ip, mac, mode, link) in enumerate(nics):
if ip is None:
ip = ""
env["INSTANCE_NIC%d_IP" % idx] = ip
- env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
- env["INSTANCE_NIC%d_HWADDR" % idx] = mac
+ env["INSTANCE_NIC%d_MAC" % idx] = mac
+ env["INSTANCE_NIC%d_MODE" % idx] = mode
+ env["INSTANCE_NIC%d_LINK" % idx] = link
+ if mode == constants.NIC_MODE_BRIDGED:
+ env["INSTANCE_NIC%d_BRIDGE" % idx] = link
else:
nic_count = 0
env["INSTANCE_NIC_COUNT"] = nic_count
+ if disks:
+ disk_count = len(disks)
+ for idx, (size, mode) in enumerate(disks):
+ env["INSTANCE_DISK%d_SIZE" % idx] = size
+ env["INSTANCE_DISK%d_MODE" % idx] = mode
+ else:
+ disk_count = 0
+
+ env["INSTANCE_DISK_COUNT"] = disk_count
+
return env
+def _PreBuildNICHooksList(lu, nics):
+ """Build a list of nic information tuples.
+
+ This list is suitable to be passed to _BuildInstanceHookEnv.
+
+ @type lu: L{LogicalUnit}
+ @param lu: the logical unit on whose behalf we execute
+ @type nics: list of L{objects.NIC}
+ @param nics: list of nics to convert to hooks tuples
+
+ """
+ hooks_nics = []
+ c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
+ for nic in nics:
+ ip = nic.ip
+ mac = nic.mac
+ filled_params = objects.FillDict(c_nicparams, nic.nicparams)
+ mode = filled_params[constants.NIC_MODE]
+ link = filled_params[constants.NIC_LINK]
+ hooks_nics.append((ip, mac, mode, link))
+ return hooks_nics
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
"""Builds instance related env variables for hooks from an object.
'status': instance.admin_up,
'memory': bep[constants.BE_MEMORY],
'vcpus': bep[constants.BE_VCPUS],
- 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
+ 'nics': _PreBuildNICHooksList(lu, instance.nics),
+ 'disk_template': instance.disk_template,
+ 'disks': [(disk.size, disk.mode) for disk in instance.disks],
}
if override:
args.update(override)
(mc_now, mc_max))
-def _CheckInstanceBridgesExist(lu, instance):
+def _CheckNicsBridgesExist(lu, target_nics, target_node,
+ profile=constants.PP_DEFAULT):
+ """Check that the brigdes needed by a list of nics exist.
+
+ """
+ c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
+ paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
+ for nic in target_nics]
+ brlist = [params[constants.NIC_LINK] for params in paramslist
+ if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
+ if brlist:
+ result = lu.rpc.call_bridges_exist(target_node, brlist)
+ result.Raise()
+ if not result.data:
+ raise errors.OpPrereqError("One or more target bridges %s does not"
+ " exist on destination node '%s'" %
+ (brlist, target_node))
+
+
+def _CheckInstanceBridgesExist(lu, instance, node=None):
"""Check that the brigdes needed by an instance exist.
"""
- # check bridges existance
- brlist = [nic.bridge for nic in instance.nics]
- result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
- result.Raise()
- if not result.data:
- raise errors.OpPrereqError("One or more target bridges %s does not"
- " exist on destination node '%s'" %
- (brlist, instance.primary_node))
+ if node is None:
+ node=instance.primary_node
+ _CheckNicsBridgesExist(lu, instance.nics, node)
class LUDestroyCluster(NoHooksLU):
def _VerifyNode(self, nodeinfo, file_list, local_cksum,
node_result, feedback_fn, master_files,
- drbd_map):
+ drbd_map, vg_name):
"""Run multiple tests against a node.
Test list:
@param drbd_map: the useddrbd minors for this node, in
form of minor: (instance, must_exist) which correspond to instances
and their running status
+ @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
"""
node = nodeinfo.name
# compares ganeti version
local_version = constants.PROTOCOL_VERSION
remote_version = node_result.get('version', None)
- if not remote_version:
+ if not (remote_version and isinstance(remote_version, (list, tuple)) and
+ len(remote_version) == 2):
feedback_fn(" - ERROR: connection to %s failed" % (node))
return True
- if local_version != remote_version:
- feedback_fn(" - ERROR: sw version mismatch: master %s, node(%s) %s" %
- (local_version, node, remote_version))
+ if local_version != remote_version[0]:
+ feedback_fn(" - ERROR: incompatible protocol versions: master %s,"
+ " node %s %s" % (local_version, node, remote_version[0]))
return True
- # checks vg existance and size > 20G
+ # node seems compatible, we can actually try to look into its results
bad = False
- vglist = node_result.get(constants.NV_VGLIST, None)
- if not vglist:
- feedback_fn(" - ERROR: unable to check volume groups on node %s." %
- (node,))
- bad = True
- else:
- vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
- constants.MIN_VG_SIZE)
- if vgstatus:
- feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
+
+ # full package version
+ if constants.RELEASE_VERSION != remote_version[1]:
+ feedback_fn(" - WARNING: software version mismatch: master %s,"
+ " node %s %s" %
+ (constants.RELEASE_VERSION, node, remote_version[1]))
+
+ # checks vg existence and size > 20G
+ if vg_name is not None:
+ vglist = node_result.get(constants.NV_VGLIST, None)
+ if not vglist:
+ feedback_fn(" - ERROR: unable to check volume groups on node %s." %
+ (node,))
bad = True
+ else:
+ vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
+ constants.MIN_VG_SIZE)
+ if vgstatus:
+ feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
+ bad = True
# checks config file checksum
(hv_name, hv_result))
# check used drbd list
- used_minors = node_result.get(constants.NV_DRBDLIST, [])
- for minor, (iname, must_exist) in drbd_map.items():
- if minor not in used_minors and must_exist:
- feedback_fn(" - ERROR: drbd minor %d of instance %s is not active" %
- (minor, iname))
- bad = True
- for minor in used_minors:
- if minor not in drbd_map:
- feedback_fn(" - ERROR: unallocated drbd minor %d is in use" % minor)
- bad = True
+ if vg_name is not None:
+ used_minors = node_result.get(constants.NV_DRBDLIST, [])
+ if not isinstance(used_minors, (tuple, list)):
+ feedback_fn(" - ERROR: cannot parse drbd status file: %s" %
+ str(used_minors))
+ else:
+ for minor, (iname, must_exist) in drbd_map.items():
+ if minor not in used_minors and must_exist:
+ feedback_fn(" - ERROR: drbd minor %d of instance %s is"
+ " not active" % (minor, iname))
+ bad = True
+ for minor in used_minors:
+ if minor not in drbd_map:
+ feedback_fn(" - ERROR: unallocated drbd minor %d is in use" %
+ minor)
+ bad = True
return bad
"""
all_nodes = self.cfg.GetNodeList()
- # TODO: populate the environment with useful information for verify hooks
- env = {}
+ env = {
+ "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
+ }
+ for node in self.cfg.GetAllNodesInfo().values():
+ env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
+
return env, [], all_nodes
def Exec(self, feedback_fn):
i_non_redundant = [] # Non redundant instances
i_non_a_balanced = [] # Non auto-balanced instances
n_offline = [] # List of offline nodes
+ n_drained = [] # List of nodes being drained
node_volume = {}
node_instance = {}
node_info = {}
constants.NV_NODENETTEST: [(node.name, node.primary_ip,
node.secondary_ip) for node in nodeinfo
if not node.offline],
- constants.NV_LVLIST: vg_name,
constants.NV_INSTANCELIST: hypervisors,
- constants.NV_VGLIST: None,
constants.NV_VERSION: None,
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
- constants.NV_DRBDLIST: None,
}
+ if vg_name is not None:
+ node_verify_param[constants.NV_VGLIST] = None
+ node_verify_param[constants.NV_LVLIST] = vg_name
+ node_verify_param[constants.NV_DRBDLIST] = None
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
self.cfg.GetClusterName())
ntype = "master"
elif node_i.master_candidate:
ntype = "master candidate"
+ elif node_i.drained:
+ ntype = "drained"
+ n_drained.append(node)
else:
ntype = "regular"
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
node_drbd = {}
for minor, instance in all_drbd_map[node].items():
- instance = instanceinfo[instance]
- node_drbd[minor] = (instance.name, instance.admin_up)
+ if instance not in instanceinfo:
+ feedback_fn(" - ERROR: ghost instance '%s' in temporary DRBD map" %
+ instance)
+ # ghost instance should not be running, but otherwise we
+ # don't give double warnings (both ghost instance and
+ # unallocated minor in use)
+ node_drbd[minor] = (instance, False)
+ else:
+ instance = instanceinfo[instance]
+ node_drbd[minor] = (instance.name, instance.admin_up)
result = self._VerifyNode(node_i, file_names, local_checksums,
nresult, feedback_fn, master_files,
- node_drbd)
+ node_drbd, vg_name)
bad = bad or result
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
- if isinstance(lvdata, basestring):
+ if vg_name is None:
+ node_volume[node] = {}
+ elif isinstance(lvdata, basestring):
feedback_fn(" - ERROR: LVM problem on node %s: %s" %
- (node, lvdata.encode('string_escape')))
+ (node, utils.SafeEncode(lvdata)))
bad = True
node_volume[node] = {}
elif not isinstance(lvdata, dict):
try:
node_info[node] = {
"mfree": int(nodeinfo['memory_free']),
- "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
"pinst": [],
"sinst": [],
# dictionary holding all instances this node is secondary for,
# secondary.
"sinst-by-pnode": {},
}
- except ValueError:
- feedback_fn(" - ERROR: invalid value returned from node %s" % (node,))
+ # FIXME: devise a free space model for file based instances as well
+ if vg_name is not None:
+ if (constants.NV_VGLIST not in nresult or
+ vg_name not in nresult[constants.NV_VGLIST]):
+ feedback_fn(" - ERROR: node %s didn't return data for the"
+ " volume group '%s' - it is either missing or broken" %
+ (node, vg_name))
+ bad = True
+ continue
+ node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
+ except (ValueError, KeyError):
+ feedback_fn(" - ERROR: invalid nodeinfo value returned"
+ " from node %s" % (node,))
bad = True
continue
if n_offline:
feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline))
+ if n_drained:
+ feedback_fn(" - NOTICE: %d drained node(s) found." % len(n_drained))
+
return not bad
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
if isinstance(lvs, basestring):
logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
res_nlvm[node] = lvs
+ continue
elif not isinstance(lvs, dict):
logging.warning("Connection to node %s failed or invalid data"
" returned", node)
result = self.rpc.call_upload_file(node_list,
constants.SSH_KNOWN_HOSTS_FILE)
for to_node, to_result in result.iteritems():
- if to_result.failed or not to_result.data:
- logging.error("Copy of file %s to node %s failed",
- constants.SSH_KNOWN_HOSTS_FILE, to_node)
+ msg = to_result.RemoteFailMsg()
+ if msg:
+ msg = ("Copy of file %s to node %s failed: %s" %
+ (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
+ self.proc.LogWarning(msg)
finally:
result = self.rpc.call_node_start_master(master, False)
_OP_REQP = []
REQ_BGL = False
- def CheckParameters(self):
+ def CheckArguments(self):
"""Check parameters
"""
if self.op.candidate_pool_size is not None:
try:
self.op.candidate_pool_size = int(self.op.candidate_pool_size)
- except ValueError, err:
+ except (ValueError, TypeError), err:
raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
str(err))
if self.op.candidate_pool_size < 1:
if the given volume group is valid.
"""
- # FIXME: This only works because there is only one parameter that can be
- # changed or removed.
if self.op.vg_name is not None and not self.op.vg_name:
instances = self.cfg.GetAllInstancesInfo().values()
for inst in instances:
(node, vgstatus))
self.cluster = cluster = self.cfg.GetClusterInfo()
- # validate beparams changes
+ # validate params changes
if self.op.beparams:
- utils.CheckBEParams(self.op.beparams)
- self.new_beparams = cluster.FillDict(
- cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
+ utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
+ self.new_beparams = objects.FillDict(
+ cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
+
+ if self.op.nicparams:
+ utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
+ self.new_nicparams = objects.FillDict(
+ cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
+ objects.NIC.CheckParameterSyntax(self.new_nicparams)
# hypervisor list/parameters
- self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
+ self.new_hvparams = objects.FillDict(cluster.hvparams, {})
if self.op.hvparams:
if not isinstance(self.op.hvparams, dict):
raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
hv_name in self.op.enabled_hypervisors)):
# either this is a new hypervisor, or its parameters have changed
hv_class = hypervisor.GetHypervisor(hv_name)
+ utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class.CheckParameterSyntax(hv_params)
_CheckHVParams(self, node_list, hv_name, hv_params)
"""
if self.op.vg_name is not None:
- if self.op.vg_name != self.cfg.GetVGName():
- self.cfg.SetVGName(self.op.vg_name)
+ new_volume = self.op.vg_name
+ if not new_volume:
+ new_volume = None
+ if new_volume != self.cfg.GetVGName():
+ self.cfg.SetVGName(new_volume)
else:
feedback_fn("Cluster LVM configuration already in desired"
" state, not changing")
if self.op.enabled_hypervisors is not None:
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
if self.op.beparams:
- self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
+ self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
+ if self.op.nicparams:
+ self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
+
if self.op.candidate_pool_size is not None:
self.cluster.candidate_pool_size = self.op.candidate_pool_size
_AdjustCandidatePool(self)
+def _RedistributeAncillaryFiles(lu, additional_nodes=None):
+ """Distribute additional files which are part of the cluster configuration.
+
+ ConfigWriter takes care of distributing the config and ssconf files, but
+ there are more files which should be distributed to all nodes. This function
+ makes sure those are copied.
+
+ @param lu: calling logical unit
+ @param additional_nodes: list of nodes not in the config to distribute to
+
+ """
+ # 1. Gather target nodes
+ myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
+ dist_nodes = lu.cfg.GetNodeList()
+ if additional_nodes is not None:
+ dist_nodes.extend(additional_nodes)
+ if myself.name in dist_nodes:
+ dist_nodes.remove(myself.name)
+ # 2. Gather files to distribute
+ dist_files = set([constants.ETC_HOSTS,
+ constants.SSH_KNOWN_HOSTS_FILE,
+ constants.RAPI_CERT_FILE,
+ constants.RAPI_USERS_FILE,
+ ])
+
+ enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
+ for hv_name in enabled_hypervisors:
+ hv_class = hypervisor.GetHypervisor(hv_name)
+ dist_files.update(hv_class.GetAncillaryFiles())
+
+ # 3. Perform the files upload
+ for fname in dist_files:
+ if os.path.exists(fname):
+ result = lu.rpc.call_upload_file(dist_nodes, fname)
+ for to_node, to_result in result.items():
+ msg = to_result.RemoteFailMsg()
+ if msg:
+ msg = ("Copy of file %s to node %s failed: %s" %
+ (fname, to_node, msg))
+ lu.proc.LogWarning(msg)
+
+
class LURedistributeConfig(NoHooksLU):
"""Force the redistribution of cluster configuration.
"""
self.cfg.Update(self.cfg.GetClusterInfo())
+ _RedistributeAncillaryFiles(self)
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
done = True
cumul_degraded = False
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
- if rstats.failed or not rstats.data:
- lu.LogWarning("Can't get any data from node %s", node)
+ msg = rstats.RemoteFailMsg()
+ if msg:
+ lu.LogWarning("Can't get any data from node %s: %s", node, msg)
retries += 1
if retries >= 10:
raise errors.RemoteError("Can't contact node %s for mirror data,"
" aborting." % node)
time.sleep(6)
continue
- rstats = rstats.data
+ rstats = rstats.payload
retries = 0
for i, mstat in enumerate(rstats):
if mstat is None:
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = lu.rpc.call_blockdev_find(node, dev)
- if rstats.failed or not rstats.data:
- logging.warning("Node %s: disk degraded, not found or node down", node)
+ msg = rstats.RemoteFailMsg()
+ if msg:
+ lu.LogWarning("Can't find disk on node %s: %s", node, msg)
+ result = False
+ elif not rstats.payload:
+ lu.LogWarning("Can't find disk on node %s", node)
result = False
else:
- result = result and (not rstats.data[idx])
+ result = result and (not rstats.payload[idx])
if dev.children:
for child in dev.children:
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
selected=self.op.output_fields)
# Lock all nodes, in shared mode
+ # Temporary removal of locks, should be reverted later
+ # TODO: reintroduce locks when they are lighter-weight
self.needed_locks = {}
- self.share_locks[locking.LEVEL_NODE] = 1
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ #self.share_locks[locking.LEVEL_NODE] = 1
+ #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
def CheckPrereq(self):
"""Check prerequisites.
@param rlist: a map with node names as keys and OS objects as values
@rtype: dict
- @returns: a dictionary with osnames as keys and as value another map, with
+ @return: a dictionary with osnames as keys and as value another map, with
nodes as keys and list of OS objects as values, eg::
{"debian-etch": {"node1": [<object>,...],
"""
all_os = {}
+ # we build here the list of nodes that didn't fail the RPC (at RPC
+ # level), so that nodes with a non-responding node daemon don't
+ # make all OSes invalid
+ good_nodes = [node_name for node_name in rlist
+ if not rlist[node_name].failed]
for node_name, nr in rlist.iteritems():
if nr.failed or not nr.data:
continue
# build a list of nodes for this os containing empty lists
# for each node in node_list
all_os[os_obj.name] = {}
- for nname in node_list:
+ for nname in good_nodes:
all_os[os_obj.name][nname] = []
all_os[os_obj.name][node_name].append(os_obj)
return all_os
"""Compute the list of OSes.
"""
- node_list = self.acquired_locks[locking.LEVEL_NODE]
- valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
- if node in node_list]
+ valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
node_data = self.rpc.call_os_diagnose(valid_nodes)
if node_data == False:
raise errors.OpExecError("Can't gather the list of OSes")
"""Logical unit for querying nodes.
"""
- _OP_REQP = ["output_fields", "names"]
+ _OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
_FIELDS_DYNAMIC = utils.FieldSet(
"dtotal", "dfree",
"mtotal", "mnode", "mfree",
"bootid",
- "ctotal",
+ "ctotal", "cnodes", "csockets",
)
_FIELDS_STATIC = utils.FieldSet(
"master_candidate",
"master",
"offline",
+ "drained",
)
def ExpandNames(self):
else:
self.wanted = locking.ALL_SET
- self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+ self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+ self.do_locking = self.do_node_query and self.op.use_locking
if self.do_locking:
# if we don't request only static fields, we need to lock the nodes
self.needed_locks[locking.LEVEL_NODE] = self.wanted
# begin data gathering
- if self.do_locking:
+ if self.do_node_query:
live_data = {}
node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
self.cfg.GetHypervisorType())
"dfree": fn(int, nodeinfo.get('vg_free', None)),
"ctotal": fn(int, nodeinfo.get('cpu_total', None)),
"bootid": nodeinfo.get('bootid', None),
+ "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
+ "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
}
else:
live_data[name] = {}
val = node.name == master_node
elif field == "offline":
val = node.offline
+ elif field == "drained":
+ val = node.drained
elif self._FIELDS_DYNAMIC.Matches(field):
val = live_data[node.name].get(field, None)
else:
primary_ip=primary_ip,
secondary_ip=secondary_ip,
master_candidate=master_candidate,
- offline=False)
+ offline=False, drained=False)
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
keyarray[2],
keyarray[3], keyarray[4], keyarray[5])
- if result.failed or not result.data:
- raise errors.OpExecError("Cannot transfer ssh keys to the new node")
+ msg = result.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Cannot transfer ssh keys to the"
+ " new node: %s" % msg)
# Add node to our /etc/hosts, and add key to known_hosts
- utils.AddHostToEtcHosts(new_node.name)
+ if self.cfg.GetClusterInfo().modify_etc_hosts:
+ utils.AddHostToEtcHosts(new_node.name)
if new_node.secondary_ip != new_node.primary_ip:
result = self.rpc.call_node_has_ip_address(new_node.name,
if result[verifier].data['nodelist']:
for failed in result[verifier].data['nodelist']:
feedback_fn("ssh/hostname verification failed %s -> %s" %
- (verifier, result[verifier]['nodelist'][failed]))
+ (verifier, result[verifier].data['nodelist'][failed]))
raise errors.OpExecError("ssh/hostname verification failed.")
- # Distribute updated /etc/hosts and known_hosts to all nodes,
- # including the node just added
- myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
- dist_nodes = self.cfg.GetNodeList()
- if not self.op.readd:
- dist_nodes.append(node)
- if myself.name in dist_nodes:
- dist_nodes.remove(myself.name)
-
- logging.debug("Copying hosts and known_hosts to all nodes")
- for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
- result = self.rpc.call_upload_file(dist_nodes, fname)
- for to_node, to_result in result.iteritems():
- if to_result.failed or not to_result.data:
- logging.error("Copy of file %s to node %s failed", fname, to_node)
-
- to_copy = []
- if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
- to_copy.append(constants.VNC_PASSWORD_FILE)
- for fname in to_copy:
- result = self.rpc.call_upload_file([node], fname)
- if result[node].failed or not result[node]:
- logging.error("Could not copy file %s to node %s", fname, node)
-
if self.op.readd:
+ _RedistributeAncillaryFiles(self)
self.context.ReaddNode(new_node)
else:
+ _RedistributeAncillaryFiles(self, additional_nodes=[node])
self.context.AddNode(new_node)
self.op.node_name = node_name
_CheckBooleanOpField(self.op, 'master_candidate')
_CheckBooleanOpField(self.op, 'offline')
- if self.op.master_candidate is None and self.op.offline is None:
+ _CheckBooleanOpField(self.op, 'drained')
+ all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
+ if all_mods.count(None) == 3:
raise errors.OpPrereqError("Please pass at least one modification")
- if self.op.offline == True and self.op.master_candidate == True:
- raise errors.OpPrereqError("Can't set the node into offline and"
- " master_candidate at the same time")
+ if all_mods.count(True) > 1:
+ raise errors.OpPrereqError("Can't set the node into more than one"
+ " state at the same time")
def ExpandNames(self):
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
"OP_TARGET": self.op.node_name,
"MASTER_CANDIDATE": str(self.op.master_candidate),
"OFFLINE": str(self.op.offline),
+ "DRAINED": str(self.op.drained),
}
nl = [self.cfg.GetMasterNode(),
self.op.node_name]
"""
node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
- if ((self.op.master_candidate == False or self.op.offline == True)
- and node.master_candidate):
+ if ((self.op.master_candidate == False or self.op.offline == True or
+ self.op.drained == True) and node.master_candidate):
# we will demote the node from master_candidate
if self.op.node_name == self.cfg.GetMasterNode():
raise errors.OpPrereqError("The master node has to be a"
- " master candidate and online")
+ " master candidate, online and not drained")
cp_size = self.cfg.GetClusterInfo().candidate_pool_size
num_candidates, _ = self.cfg.GetMasterCandidateStats()
if num_candidates <= cp_size:
else:
raise errors.OpPrereqError(msg)
- if (self.op.master_candidate == True and node.offline and
- not self.op.offline == False):
- raise errors.OpPrereqError("Can't set an offline node to"
- " master_candidate")
+ if (self.op.master_candidate == True and
+ ((node.offline and not self.op.offline == False) or
+ (node.drained and not self.op.drained == False))):
+ raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
+ " to master_candidate" % node.name)
return
node = self.node
result = []
+ changed_mc = False
if self.op.offline is not None:
node.offline = self.op.offline
result.append(("offline", str(self.op.offline)))
- if self.op.offline == True and node.master_candidate:
- node.master_candidate = False
- result.append(("master_candidate", "auto-demotion due to offline"))
+ if self.op.offline == True:
+ if node.master_candidate:
+ node.master_candidate = False
+ changed_mc = True
+ result.append(("master_candidate", "auto-demotion due to offline"))
+ if node.drained:
+ node.drained = False
+ result.append(("drained", "clear drained status due to offline"))
if self.op.master_candidate is not None:
node.master_candidate = self.op.master_candidate
+ changed_mc = True
result.append(("master_candidate", str(self.op.master_candidate)))
if self.op.master_candidate == False:
rrc = self.rpc.call_node_demote_from_mc(node.name)
- if (rrc.failed or not isinstance(rrc.data, (tuple, list))
- or len(rrc.data) != 2):
- self.LogWarning("Node rpc error: %s" % rrc.error)
- elif not rrc.data[0]:
- self.LogWarning("Node failed to demote itself: %s" % rrc.data[1])
+ msg = rrc.RemoteFailMsg()
+ if msg:
+ self.LogWarning("Node failed to demote itself: %s" % msg)
+
+ if self.op.drained is not None:
+ node.drained = self.op.drained
+ result.append(("drained", str(self.op.drained)))
+ if self.op.drained == True:
+ if node.master_candidate:
+ node.master_candidate = False
+ changed_mc = True
+ result.append(("master_candidate", "auto-demotion due to drain"))
+ if node.offline:
+ node.offline = False
+ result.append(("offline", "clear offline status due to drain"))
# this will trigger configuration file update, if needed
self.cfg.Update(node)
# this will trigger job queue propagation or cleanup
- if self.op.node_name != self.cfg.GetMasterNode():
+ if changed_mc:
self.context.ReaddNode(node)
return result
+class LUPowercycleNode(NoHooksLU):
+ """Powercycles a node.
+
+ """
+ _OP_REQP = ["node_name", "force"]
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ node_name = self.cfg.ExpandNodeName(self.op.node_name)
+ if node_name is None:
+ raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
+ self.op.node_name = node_name
+ if node_name == self.cfg.GetMasterNode() and not self.op.force:
+ raise errors.OpPrereqError("The node is the master and the force"
+ " parameter was not set")
+
+ def ExpandNames(self):
+ """Locking for PowercycleNode.
+
+ This is a last-resource option and shouldn't block on other
+ jobs. Therefore, we grab no locks.
+
+ """
+ self.needed_locks = {}
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This LU has no prereqs.
+
+ """
+ pass
+
+ def Exec(self, feedback_fn):
+ """Reboots a node.
+
+ """
+ result = self.rpc.call_node_powercycle(self.op.node_name,
+ self.cfg.GetHypervisorType())
+ msg = result.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Failed to schedule the reboot: %s" % msg)
+ return result.payload
+
+
class LUQueryClusterInfo(NoHooksLU):
"""Query cluster configuration.
"master": cluster.master_node,
"default_hypervisor": cluster.default_hypervisor,
"enabled_hypervisors": cluster.enabled_hypervisors,
- "hvparams": cluster.hvparams,
+ "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
+ for hypervisor in cluster.enabled_hypervisors]),
"beparams": cluster.beparams,
+ "nicparams": cluster.nicparams,
"candidate_pool_size": cluster.candidate_pool_size,
+ "master_netdev": cluster.master_netdev,
+ "volume_group_name": cluster.volume_group_name,
+ "file_storage_dir": cluster.file_storage_dir,
}
return result
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(node_disk, node)
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
- if result.failed or not result:
+ msg = result.RemoteFailMsg()
+ if msg:
lu.proc.LogWarning("Could not prepare block device %s on node %s"
- " (is_primary=False, pass=1)",
- inst_disk.iv_name, node)
+ " (is_primary=False, pass=1): %s",
+ inst_disk.iv_name, node, msg)
if not ignore_secondaries:
disks_ok = False
continue
lu.cfg.SetDiskID(node_disk, node)
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
- if result.failed or not result:
+ msg = result.RemoteFailMsg()
+ if msg:
lu.proc.LogWarning("Could not prepare block device %s on node %s"
- " (is_primary=True, pass=2)",
- inst_disk.iv_name, node)
+ " (is_primary=True, pass=2): %s",
+ inst_disk.iv_name, node, msg)
disks_ok = False
- device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
+ device_info.append((instance.primary_node, inst_disk.iv_name,
+ result.payload))
# leave the disks configured for the primary node
# this is a workaround that would be fixed better by
ignored.
"""
- result = True
+ all_result = True
for disk in instance.disks:
for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(top_disk, node)
result = lu.rpc.call_blockdev_shutdown(node, top_disk)
- if result.failed or not result.data:
- logging.error("Could not shutdown block device %s on node %s",
- disk.iv_name, node)
+ msg = result.RemoteFailMsg()
+ if msg:
+ lu.LogWarning("Could not shutdown block device %s on node %s: %s",
+ disk.iv_name, node, msg)
if not ignore_primary or node != instance.primary_node:
- result = False
- return result
+ all_result = False
+ return all_result
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
+ # extra beparams
+ self.beparams = getattr(self.op, "beparams", {})
+ if self.beparams:
+ if not isinstance(self.beparams, dict):
+ raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
+ " dict" % (type(self.beparams), ))
+ # fill the beparams dict
+ utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
+ self.op.beparams = self.beparams
+
+ # extra hvparams
+ self.hvparams = getattr(self.op, "hvparams", {})
+ if self.hvparams:
+ if not isinstance(self.hvparams, dict):
+ raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
+ " dict" % (type(self.hvparams), ))
+
+ # check hypervisor parameter syntax (locally)
+ cluster = self.cfg.GetClusterInfo()
+ utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
+ filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
+ instance.hvparams)
+ filled_hvp.update(self.hvparams)
+ hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+ hv_type.CheckParameterSyntax(filled_hvp)
+ _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+ self.op.hvparams = self.hvparams
+
_CheckNodeOnline(self, instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(instance)
# check bridges existance
_CheckInstanceBridgesExist(self, instance)
- _CheckNodeFreeMemory(self, instance.primary_node,
- "starting instance %s" % instance.name,
- bep[constants.BE_MEMORY], instance.hypervisor)
+ remote_info = self.rpc.call_instance_info(instance.primary_node,
+ instance.name,
+ instance.hypervisor)
+ remote_info.Raise()
+ if not remote_info.data:
+ _CheckNodeFreeMemory(self, instance.primary_node,
+ "starting instance %s" % instance.name,
+ bep[constants.BE_MEMORY], instance.hypervisor)
def Exec(self, feedback_fn):
"""Start the instance.
"""
instance = self.instance
force = self.op.force
- extra_args = getattr(self.op, "extra_args", "")
self.cfg.MarkInstanceUp(instance.name)
_StartInstanceDisks(self, instance, force)
- result = self.rpc.call_instance_start(node_current, instance, extra_args)
+ result = self.rpc.call_instance_start(node_current, instance,
+ self.hvparams, self.beparams)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
"""
env = {
"IGNORE_SECONDARIES": self.op.ignore_secondaries,
+ "REBOOT_TYPE": self.op.reboot_type,
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
instance = self.instance
ignore_secondaries = self.op.ignore_secondaries
reboot_type = self.op.reboot_type
- extra_args = getattr(self.op, "extra_args", "")
node_current = instance.primary_node
if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
constants.INSTANCE_REBOOT_HARD]:
+ for disk in instance.disks:
+ self.cfg.SetDiskID(disk, node_current)
result = self.rpc.call_instance_reboot(node_current, instance,
- reboot_type, extra_args)
- if result.failed or not result.data:
- raise errors.OpExecError("Could not reboot instance")
+ reboot_type)
+ msg = result.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Could not reboot instance: %s" % msg)
else:
- if not self.rpc.call_instance_shutdown(node_current, instance):
- raise errors.OpExecError("could not shutdown instance for full reboot")
+ result = self.rpc.call_instance_shutdown(node_current, instance)
+ msg = result.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Could not shutdown instance for"
+ " full reboot: %s" % msg)
_ShutdownInstanceDisks(self, instance)
_StartInstanceDisks(self, instance, ignore_secondaries)
- result = self.rpc.call_instance_start(node_current, instance, extra_args)
+ result = self.rpc.call_instance_start(node_current, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
node_current = instance.primary_node
self.cfg.MarkInstanceDown(instance.name)
result = self.rpc.call_instance_shutdown(node_current, instance)
- if result.failed or not result.data:
- self.proc.LogWarning("Could not shutdown instance")
+ msg = result.RemoteFailMsg()
+ if msg:
+ self.proc.LogWarning("Could not shutdown instance: %s" % msg)
_ShutdownInstanceDisks(self, instance)
remote_info = self.rpc.call_instance_info(instance.primary_node,
instance.name,
instance.hypervisor)
- if remote_info.failed or remote_info.data:
+ remote_info.Raise()
+ if remote_info.data:
raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
(self.op.instance_name,
instance.primary_node))
_StartInstanceDisks(self, inst, None)
try:
feedback_fn("Running the instance OS create scripts...")
- result = self.rpc.call_instance_os_add(inst.primary_node, inst)
+ result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
msg = result.RemoteFailMsg()
if msg:
raise errors.OpExecError("Could not install OS for instance %s"
instance.name, instance.primary_node)
result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
- if result.failed or not result.data:
+ msg = result.RemoteFailMsg()
+ if msg:
if self.op.ignore_failures:
- feedback_fn("Warning: can't shutdown instance")
+ feedback_fn("Warning: can't shutdown instance: %s" % msg)
else:
- raise errors.OpExecError("Could not shutdown instance %s on node %s" %
- (instance.name, instance.primary_node))
+ raise errors.OpExecError("Could not shutdown instance %s on"
+ " node %s: %s" %
+ (instance.name, instance.primary_node, msg))
logging.info("Removing block devices for instance %s", instance.name)
"""Logical unit for querying instances.
"""
- _OP_REQP = ["output_fields", "names"]
+ _OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
_FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
- "admin_state", "admin_ram",
+ "admin_state",
"disk_template", "ip", "mac", "bridge",
"sda_size", "sdb_size", "vcpus", "tags",
"network_port", "beparams",
- "(disk).(size)/([0-9]+)",
- "(disk).(sizes)",
- "(nic).(mac|ip|bridge)/([0-9]+)",
- "(nic).(macs|ips|bridges)",
- "(disk|nic).(count)",
+ r"(disk)\.(size)/([0-9]+)",
+ r"(disk)\.(sizes)", "disk_usage",
+ r"(nic)\.(mac|ip|bridge)/([0-9]+)",
+ r"(nic)\.(macs|ips|bridges)",
+ r"(disk|nic)\.(count)",
"serial_no", "hypervisor", "hvparams",] +
["hv/%s" % name
for name in constants.HVS_PARAMETERS] +
else:
self.wanted = locking.ALL_SET
- self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+ self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+ self.do_locking = self.do_node_query and self.op.use_locking
if self.do_locking:
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
self.needed_locks[locking.LEVEL_NODE] = []
"""
all_info = self.cfg.GetAllInstancesInfo()
- if self.do_locking:
- instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
- elif self.wanted != locking.ALL_SET:
- instance_names = self.wanted
- missing = set(instance_names).difference(all_info.keys())
- if missing:
- raise errors.OpExecError(
- "Some instances were removed before retrieving their data: %s"
- % missing)
+ if self.wanted == locking.ALL_SET:
+ # caller didn't specify instance names, so ordering is not important
+ if self.do_locking:
+ instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+ else:
+ instance_names = all_info.keys()
+ instance_names = utils.NiceSort(instance_names)
else:
- instance_names = all_info.keys()
+ # caller did specify names, so we must keep the ordering
+ if self.do_locking:
+ tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
+ else:
+ tgt_set = all_info.keys()
+ missing = set(self.wanted).difference(tgt_set)
+ if missing:
+ raise errors.OpExecError("Some instances were removed before"
+ " retrieving their data: %s" % missing)
+ instance_names = self.wanted
- instance_names = utils.NiceSort(instance_names)
instance_list = [all_info[iname] for iname in instance_names]
# begin data gathering
bad_nodes = []
off_nodes = []
- if self.do_locking:
+ if self.do_node_query:
live_data = {}
node_data = self.rpc.call_all_instances_info(nodes, hv_list)
for name in nodes:
val = instance.FindDisk(idx).size
except errors.OpPrereqError:
val = None
+ elif field == "disk_usage": # total disk usage per node
+ disk_sizes = [{'size': disk.size} for disk in instance.disks]
+ val = _ComputeDiskSize(instance.disk_template, disk_sizes)
elif field == "tags":
val = list(instance.GetTags())
elif field == "serial_no":
target_node = secondary_nodes[0]
_CheckNodeOnline(self, target_node)
+ _CheckNodeNotDrained(self, target_node)
# check memory requirements on the secondary node
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
instance.name, bep[constants.BE_MEMORY],
instance.hypervisor)
-
# check bridge existance
- brlist = [nic.bridge for nic in instance.nics]
- result = self.rpc.call_bridges_exist(target_node, brlist)
- result.Raise()
- if not result.data:
- raise errors.OpPrereqError("One or more target bridges %s does not"
- " exist on destination node '%s'" %
- (brlist, target_node))
+ _CheckInstanceBridgesExist(self, instance, node=target_node)
def Exec(self, feedback_fn):
"""Failover an instance.
instance.name, source_node)
result = self.rpc.call_instance_shutdown(source_node, instance)
- if result.failed or not result.data:
+ msg = result.RemoteFailMsg()
+ if msg:
if self.op.ignore_consistency:
self.proc.LogWarning("Could not shutdown instance %s on node %s."
- " Proceeding"
- " anyway. Please make sure node %s is down",
- instance.name, source_node, source_node)
+ " Proceeding anyway. Please make sure node"
+ " %s is down. Error details: %s",
+ instance.name, source_node, source_node, msg)
else:
- raise errors.OpExecError("Could not shutdown instance %s on node %s" %
- (instance.name, source_node))
+ raise errors.OpExecError("Could not shutdown instance %s on"
+ " node %s: %s" %
+ (instance.name, source_node, msg))
feedback_fn("* deactivating the instance's disks on source node")
if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
raise errors.OpExecError("Can't activate the instance's disks")
feedback_fn("* starting the instance on the target node")
- result = self.rpc.call_instance_start(target_node, instance, None)
+ result = self.rpc.call_instance_start(target_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
"""
env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env["MIGRATE_LIVE"] = self.op.live
+ env["MIGRATE_CLEANUP"] = self.op.cleanup
nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
return env, nl, nl
secondary_nodes = instance.secondary_nodes
if not secondary_nodes:
- raise errors.ProgrammerError("no secondary node but using "
- "drbd8 disk template")
+ raise errors.ConfigurationError("No secondary node but using"
+ " drbd8 disk template")
i_be = self.cfg.GetClusterInfo().FillBE(instance)
instance.hypervisor)
# check bridge existance
- brlist = [nic.bridge for nic in instance.nics]
- result = self.rpc.call_bridges_exist(target_node, brlist)
- if result.failed or not result.data:
- raise errors.OpPrereqError("One or more target bridges %s does not"
- " exist on destination node '%s'" %
- (brlist, target_node))
+ _CheckInstanceBridgesExist(self, instance, node=target_node)
if not self.op.cleanup:
+ _CheckNodeNotDrained(self, target_node)
result = self.rpc.call_instance_migratable(instance.primary_node,
instance)
msg = result.RemoteFailMsg()
if msg:
raise errors.OpExecError("Cannot resync disks on node %s: %s" %
(node, msg))
- node_done, node_percent = nres.data[1]
+ node_done, node_percent = nres.payload
all_done = all_done and node_done
if node_percent is not None:
min_percent = min(min_percent, node_percent)
msg = result.RemoteFailMsg()
if msg:
log_err = ("Failed fetching source migration information from %s: %s" %
- (source_node, msg))
+ (source_node, msg))
logging.error(log_err)
raise errors.OpExecError(log_err)
- self.migration_info = migration_info = result.data[1]
+ self.migration_info = migration_info = result.payload
# Then switch the disks to master/master mode
self._EnsureSecondary(target_node)
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
- L{backend.CreateBlockDevice} function where it specifies
+ L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
- L{backend.CreateBlockDevice} function where it specifies
+ L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
" node %s for instance %s: %s" %
(device, node, instance.name, msg))
if device.physical_id is None:
- device.physical_id = result.data[1]
+ device.physical_id = result.payload
def _GenerateUniqueNames(lu, exts):
disk_index = idx + base_index
disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
logical_id=(vgname, names[idx]),
- iv_name="disk/%d" % disk_index)
+ iv_name="disk/%d" % disk_index,
+ mode=disk["mode"])
disks.append(disk_dev)
elif template_name == constants.DT_DRBD8:
if len(secondary_nodes) != 1:
disk["size"], names[idx*2:idx*2+2],
"disk/%d" % disk_index,
minors[idx*2], minors[idx*2+1])
+ disk_dev.mode = disk["mode"]
disks.append(disk_dev)
elif template_name == constants.DT_FILE:
if len(secondary_nodes) != 0:
iv_name="disk/%d" % disk_index,
logical_id=(file_driver,
"%s/disk%d" % (file_storage_dir,
- idx)))
+ disk_index)),
+ mode=disk["mode"])
disks.append(disk_dev)
else:
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
"""
logging.info("Removing block devices for instance %s", instance.name)
- result = True
+ all_result = True
for device in instance.disks:
for node, disk in device.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(disk, node)
- result = lu.rpc.call_blockdev_remove(node, disk)
- if result.failed or not result.data:
- lu.proc.LogWarning("Could not remove block device %s on node %s,"
- " continuing anyway", device.iv_name, node)
- result = False
+ msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+ if msg:
+ lu.LogWarning("Could not remove block device %s on node %s,"
+ " continuing anyway: %s", device.iv_name, node, msg)
+ all_result = False
if instance.disk_template == constants.DT_FILE:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
file_storage_dir)
if result.failed or not result.data:
logging.error("Could not remove directory '%s'", file_storage_dir)
- result = False
+ all_result = False
- return result
+ return all_result
def _ComputeDiskSize(disk_template, disks):
hvparams)
for node in nodenames:
info = hvinfo[node]
- info.Raise()
- if not info.data or not isinstance(info.data, (tuple, list)):
- raise errors.OpPrereqError("Cannot get current information"
- " from node '%s' (%s)" % (node, info.data))
- if not info.data[0]:
- raise errors.OpPrereqError("Hypervisor parameter validation failed:"
- " %s" % info.data[1])
+ if info.offline:
+ continue
+ msg = info.RemoteFailMsg()
+ if msg:
+ raise errors.OpPrereqError("Hypervisor parameter validation"
+ " failed on node %s: %s" % (node, msg))
class LUCreateInstance(LogicalUnit):
",".join(enabled_hvs)))
# check hypervisor parameter syntax (locally)
-
- filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
+ utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
+ filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
self.op.hvparams)
hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
# fill and remember the beparams dict
- utils.CheckBEParams(self.op.beparams)
- self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
+ utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
+ self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
self.op.beparams)
#### instance parameters check
# NIC buildup
self.nics = []
- for nic in self.op.nics:
+ for idx, nic in enumerate(self.op.nics):
+ nic_mode_req = nic.get("mode", None)
+ nic_mode = nic_mode_req
+ if nic_mode is None:
+ nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
+
+ # in routed mode, for the first nic, the default ip is 'auto'
+ if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
+ default_ip_mode = constants.VALUE_AUTO
+ else:
+ default_ip_mode = constants.VALUE_NONE
+
# ip validity checks
- ip = nic.get("ip", None)
- if ip is None or ip.lower() == "none":
+ ip = nic.get("ip", default_ip_mode)
+ if ip is None or ip.lower() == constants.VALUE_NONE:
nic_ip = None
elif ip.lower() == constants.VALUE_AUTO:
nic_ip = hostname1.ip
" like a valid IP" % ip)
nic_ip = ip
+ # TODO: check the ip for uniqueness !!
+ if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
+ raise errors.OpPrereqError("Routed nic mode requires an ip address")
+
# MAC address verification
mac = nic.get("mac", constants.VALUE_AUTO)
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
raise errors.OpPrereqError("Invalid MAC address specified: %s" %
mac)
# bridge verification
- bridge = nic.get("bridge", self.cfg.GetDefBridge())
- self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
+ bridge = nic.get("bridge", None)
+ link = nic.get("link", None)
+ if bridge and link:
+ raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
+ elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
+ raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
+ elif bridge:
+ link = bridge
+
+ nicparams = {}
+ if nic_mode_req:
+ nicparams[constants.NIC_MODE] = nic_mode_req
+ if link:
+ nicparams[constants.NIC_LINK] = link
+
+ check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
+ nicparams)
+ objects.NIC.CheckParameterSyntax(check_params)
+ self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
# disk checks/pre-build
self.disks = []
"""
env = {
- "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
- "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
- "INSTANCE_ADD_MODE": self.op.mode,
+ "ADD_MODE": self.op.mode,
}
if self.op.mode == constants.INSTANCE_IMPORT:
- env["INSTANCE_SRC_NODE"] = self.op.src_node
- env["INSTANCE_SRC_PATH"] = self.op.src_path
- env["INSTANCE_SRC_IMAGES"] = self.src_images
+ env["SRC_NODE"] = self.op.src_node
+ env["SRC_PATH"] = self.op.src_path
+ env["SRC_IMAGES"] = self.src_images
- env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
+ env.update(_BuildInstanceHookEnv(
+ name=self.op.instance_name,
primary_node=self.op.pnode,
secondary_nodes=self.secondaries,
- status=self.instance_status,
+ status=self.op.start,
os_type=self.op.os_type,
memory=self.be_full[constants.BE_MEMORY],
vcpus=self.be_full[constants.BE_VCPUS],
- nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
+ nics=_PreBuildNICHooksList(self, self.nics),
+ disk_template=self.op.disk_template,
+ disks=[(d["size"], d["mode"]) for d in self.disks],
))
nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
raise errors.OpPrereqError("Cluster does not support lvm-based"
" instances")
-
if self.op.mode == constants.INSTANCE_IMPORT:
src_node = self.op.src_node
src_path = self.op.src_path
nic_mac_ini = 'nic%d_mac' % idx
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
+ # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
# ip ping checks (we use the same ip that was resolved in ExpandNames)
if self.op.start and not self.op.ip_check:
raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
raise errors.OpPrereqError("IP %s of instance %s already in use" %
(self.check_ip, self.op.instance_name))
+ #### mac address generation
+ # By generating here the mac address both the allocator and the hooks get
+ # the real final mac address rather than the 'auto' or 'generate' value.
+ # There is a race condition between the generation and the instance object
+ # creation, which means that we know the mac is valid now, but we're not
+ # sure it will be when we actually add the instance. If things go bad
+ # adding the instance will abort because of a duplicate mac, and the
+ # creation job will fail.
+ for nic in self.nics:
+ if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+ nic.mac = self.cfg.GenerateMAC()
+
#### allocator run
if self.op.iallocator is not None:
if pnode.offline:
raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
pnode.name)
+ if pnode.drained:
+ raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
+ pnode.name)
self.secondaries = []
if self.op.snode == pnode.name:
raise errors.OpPrereqError("The secondary node cannot be"
" the primary node.")
- self.secondaries.append(self.op.snode)
_CheckNodeOnline(self, self.op.snode)
+ _CheckNodeNotDrained(self, self.op.snode)
+ self.secondaries.append(self.op.snode)
nodenames = [pnode.name] + self.secondaries
raise errors.OpPrereqError("OS '%s' not in supported os list for"
" primary node" % self.op.os_type)
- # bridge check on primary node
- bridges = [n.bridge for n in self.nics]
- result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
- result.Raise()
- if not result.data:
- raise errors.OpPrereqError("One of the target bridges '%s' does not"
- " exist on destination node '%s'" %
- (",".join(bridges), pnode.name))
+ _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
# memory check on primary node
if self.op.start:
self.be_full[constants.BE_MEMORY],
self.op.hypervisor)
- self.instance_status = self.op.start
-
def Exec(self, feedback_fn):
"""Create and add the instance to the cluster.
instance = self.op.instance_name
pnode_name = self.pnode.name
- for nic in self.nics:
- if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- nic.mac = self.cfg.GenerateMAC()
-
ht_kind = self.op.hypervisor
if ht_kind in constants.HTS_REQ_PORT:
network_port = self.cfg.AllocatePort()
primary_node=pnode_name,
nics=self.nics, disks=disks,
disk_template=self.op.disk_template,
- admin_up=self.instance_status,
+ admin_up=False,
network_port=network_port,
beparams=self.op.beparams,
hvparams=self.op.hvparams,
if iobj.disk_template != constants.DT_DISKLESS:
if self.op.mode == constants.INSTANCE_CREATE:
feedback_fn("* running the instance OS create scripts...")
- result = self.rpc.call_instance_os_add(pnode_name, iobj)
+ result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
msg = result.RemoteFailMsg()
if msg:
raise errors.OpExecError("Could not add os for instance %s"
% self.op.mode)
if self.op.start:
+ iobj.admin_up = True
+ self.cfg.Update(iobj)
logging.info("Starting instance %s on node %s", instance, pnode_name)
feedback_fn("* starting instance...")
- result = self.rpc.call_instance_start(pnode_name, iobj, None)
+ result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
msg = result.RemoteFailMsg()
if msg:
raise errors.OpExecError("Could not start instance: %s" % msg)
logging.debug("Connecting to console of %s on %s", instance.name, node)
hyper = hypervisor.GetHypervisor(instance.hypervisor)
- console_cmd = hyper.GetShellCommandForConsole(instance)
+ cluster = self.cfg.GetClusterInfo()
+ # beparams and hvparams are passed separately, to avoid editing the
+ # instance and then saving the defaults in the instance itself.
+ hvparams = cluster.FillHV(instance)
+ beparams = cluster.FillBE(instance)
+ console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
# build ssh cmdline
return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
n1 = self.new_node = remote_node
n2 = self.oth_node = instance.primary_node
self.tgt_node = self.sec_node
+ _CheckNodeNotDrained(self, remote_node)
else:
raise errors.ProgrammerError("Unhandled disk replace mode")
for node in tgt_node, oth_node:
info("checking disk/%d on %s" % (idx, node))
cfg.SetDiskID(dev, node)
- if not self.rpc.call_blockdev_find(node, dev):
- raise errors.OpExecError("Can't find disk/%d on node %s" %
- (idx, node))
+ result = self.rpc.call_blockdev_find(node, dev)
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
+ (idx, node, msg))
# Step: check other node consistency
self.proc.LogStep(2, steps_total, "check peer consistency")
for dev, old_lvs, new_lvs in iv_names.itervalues():
info("detaching %s drbd from local storage" % dev.iv_name)
result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
- result.Raise()
- if not result.data:
+ msg = result.RemoteFailMsg()
+ if msg:
raise errors.OpExecError("Can't detach drbd from local storage on node"
- " %s for device %s" % (tgt_node, dev.iv_name))
+ " %s for device %s: %s" %
+ (tgt_node, dev.iv_name, msg))
#dev.children = []
#cfg.Update(instance)
# build the rename list based on what LVs exist on the node
rlist = []
for to_ren in old_lvs:
- find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
- if not find_res.failed and find_res.data is not None: # device exists
+ result = self.rpc.call_blockdev_find(tgt_node, to_ren)
+ if not result.RemoteFailMsg() and result.payload:
+ # device exists
rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
info("renaming the old LVs on the target node")
result = self.rpc.call_blockdev_rename(tgt_node, rlist)
- result.Raise()
- if not result.data:
- raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
+ msg = result.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Can't rename old LVs on node %s: %s" %
+ (tgt_node, msg))
# now we rename the new LVs to the old LVs
info("renaming the new LVs on the target node")
rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
result = self.rpc.call_blockdev_rename(tgt_node, rlist)
- result.Raise()
- if not result.data:
- raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
+ msg = result.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Can't rename new LVs on node %s: %s" %
+ (tgt_node, msg))
for old, new in zip(old_lvs, new_lvs):
new.logical_id = old.logical_id
# now that the new lvs have the old name, we can add them to the device
info("adding new mirror component on %s" % tgt_node)
result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
- if result.failed or not result.data:
+ msg = result.RemoteFailMsg()
+ if msg:
for new_lv in new_lvs:
- result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
- if result.failed or not result.data:
- warning("Can't rollback device %s", hint="manually cleanup unused"
- " logical volumes")
- raise errors.OpExecError("Can't add local storage to drbd")
+ msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
+ if msg:
+ warning("Can't rollback device %s: %s", dev, msg,
+ hint="cleanup manually the unused logical volumes")
+ raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
dev.children = new_lvs
cfg.Update(instance)
for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
cfg.SetDiskID(dev, instance.primary_node)
result = self.rpc.call_blockdev_find(instance.primary_node, dev)
- if result.failed or result.data[5]:
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find DRBD device %s: %s" %
+ (name, msg))
+ if result.payload[5]:
raise errors.OpExecError("DRBD device %s is degraded!" % name)
# Step: remove old storage
info("remove logical volumes for %s" % name)
for lv in old_lvs:
cfg.SetDiskID(lv, tgt_node)
- result = self.rpc.call_blockdev_remove(tgt_node, lv)
- if result.failed or not result.data:
- warning("Can't remove old LV", hint="manually remove unused LVs")
+ msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
+ if msg:
+ warning("Can't remove old LV: %s" % msg,
+ hint="manually remove unused LVs")
continue
def _ExecD8Secondary(self, feedback_fn):
info("checking disk/%d on %s" % (idx, pri_node))
cfg.SetDiskID(dev, pri_node)
result = self.rpc.call_blockdev_find(pri_node, dev)
- result.Raise()
- if not result.data:
- raise errors.OpExecError("Can't find disk/%d on node %s" %
- (idx, pri_node))
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
+ (idx, pri_node, msg))
# Step: check other node consistency
self.proc.LogStep(2, steps_total, "check peer consistency")
try:
_CreateSingleBlockDev(self, new_node, instance, new_drbd,
_GetInstanceInfoText(instance), False)
- except errors.BlockDeviceError:
+ except errors.GenericError:
self.cfg.ReleaseDRBDMinors(instance.name)
raise
# we have new devices, shutdown the drbd on the old secondary
info("shutting down drbd for disk/%d on old node" % idx)
cfg.SetDiskID(dev, old_node)
- result = self.rpc.call_blockdev_shutdown(old_node, dev)
- if result.failed or not result.data:
- warning("Failed to shutdown drbd for disk/%d on old node" % idx,
+ msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
+ if msg:
+ warning("Failed to shutdown drbd for disk/%d on old node: %s" %
+ (idx, msg),
hint="Please cleanup this device manually as soon as possible")
info("detaching primary drbds from the network (=> standalone)")
for idx, (dev, old_lvs, _) in iv_names.iteritems():
cfg.SetDiskID(dev, pri_node)
result = self.rpc.call_blockdev_find(pri_node, dev)
- result.Raise()
- if result.data[5]:
+ msg = result.RemoteFailMsg()
+ if not msg and not result.payload:
+ msg = "disk not found"
+ if msg:
+ raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
+ (idx, msg))
+ if result.payload[5]:
raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
self.proc.LogStep(6, steps_total, "removing old storage")
info("remove logical volumes for disk/%d" % idx)
for lv in old_lvs:
cfg.SetDiskID(lv, old_node)
- result = self.rpc.call_blockdev_remove(old_node, lv)
- if result.failed or not result.data:
- warning("Can't remove LV on old secondary",
+ msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
+ if msg:
+ warning("Can't remove LV on old secondary: %s", msg,
hint="Cleanup stale volumes by hand")
def Exec(self, feedback_fn):
for node in instance.all_nodes:
self.cfg.SetDiskID(disk, node)
result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
- result.Raise()
- if (not result.data or not isinstance(result.data, (list, tuple)) or
- len(result.data) != 2):
- raise errors.OpExecError("Grow request failed to node %s" % node)
- elif not result.data[0]:
+ msg = result.RemoteFailMsg()
+ if msg:
raise errors.OpExecError("Grow request failed to node %s: %s" %
- (node, result.data[1]))
+ (node, msg))
disk.RecordGrow(self.op.amount)
self.cfg.Update(instance)
if self.op.wait_for_sync:
if not static:
self.cfg.SetDiskID(dev, instance.primary_node)
dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
- dev_pstatus.Raise()
- dev_pstatus = dev_pstatus.data
+ if dev_pstatus.offline:
+ dev_pstatus = None
+ else:
+ msg = dev_pstatus.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Can't compute disk status for %s: %s" %
+ (instance.name, msg))
+ dev_pstatus = dev_pstatus.payload
else:
dev_pstatus = None
if snode and not static:
self.cfg.SetDiskID(dev, snode)
dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
- dev_sstatus.Raise()
- dev_sstatus = dev_sstatus.data
+ if dev_sstatus.offline:
+ dev_sstatus = None
+ else:
+ msg = dev_sstatus.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Can't compute disk status for %s: %s" %
+ (instance.name, msg))
+ dev_sstatus = dev_sstatus.payload
else:
dev_sstatus = None
self.op.hvparams or self.op.beparams):
raise errors.OpPrereqError("No changes submitted")
- utils.CheckBEParams(self.op.beparams)
-
# Disk validation
disk_addremove = 0
for disk_op, disk_dict in self.op.disks:
raise errors.OpPrereqError("Invalid disk index")
if disk_op == constants.DDM_ADD:
mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
- if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
+ if mode not in constants.DISK_ACCESS_SET:
raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
size = disk_dict.get('size', None)
if size is None:
# nic_dict should be a dict
nic_ip = nic_dict.get('ip', None)
if nic_ip is not None:
- if nic_ip.lower() == "none":
+ if nic_ip.lower() == constants.VALUE_NONE:
nic_dict['ip'] = None
else:
if not utils.IsValidIP(nic_ip):
raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
- # we can only check None bridges and assign the default one
+
nic_bridge = nic_dict.get('bridge', None)
- if nic_bridge is None:
- nic_dict['bridge'] = self.cfg.GetDefBridge()
- # but we can validate MACs
- nic_mac = nic_dict.get('mac', None)
- if nic_mac is not None:
- if self.cfg.IsMacInUse(nic_mac):
- raise errors.OpPrereqError("MAC address %s already in use"
- " in cluster" % nic_mac)
+ nic_link = nic_dict.get('link', None)
+ if nic_bridge and nic_link:
+ raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
+ elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
+ nic_dict['bridge'] = None
+ elif nic_link and nic_link.lower() == constants.VALUE_NONE:
+ nic_dict['link'] = None
+
+ if nic_op == constants.DDM_ADD:
+ nic_mac = nic_dict.get('mac', None)
+ if nic_mac is None:
+ nic_dict['mac'] = constants.VALUE_AUTO
+
+ if 'mac' in nic_dict:
+ nic_mac = nic_dict['mac']
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
if not utils.IsValidMac(nic_mac):
raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
+ if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
+ raise errors.OpPrereqError("'auto' is not a valid MAC address when"
+ " modifying an existing nic")
+
if nic_addremove > 1:
raise errors.OpPrereqError("Only one NIC add or remove operation"
" supported at a time")
args['memory'] = self.be_new[constants.BE_MEMORY]
if constants.BE_VCPUS in self.be_new:
args['vcpus'] = self.be_new[constants.BE_VCPUS]
- # FIXME: readd disk/nic changes
+ # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
+ # information at all.
+ if self.op.nics:
+ args['nics'] = []
+ nic_override = dict(self.op.nics)
+ c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
+ for idx, nic in enumerate(self.instance.nics):
+ if idx in nic_override:
+ this_nic_override = nic_override[idx]
+ else:
+ this_nic_override = {}
+ if 'ip' in this_nic_override:
+ ip = this_nic_override['ip']
+ else:
+ ip = nic.ip
+ if 'mac' in this_nic_override:
+ mac = this_nic_override['mac']
+ else:
+ mac = nic.mac
+ if idx in self.nic_pnew:
+ nicparams = self.nic_pnew[idx]
+ else:
+ nicparams = objects.FillDict(c_nicparams, nic.nicparams)
+ mode = nicparams[constants.NIC_MODE]
+ link = nicparams[constants.NIC_LINK]
+ args['nics'].append((ip, mac, mode, link))
+ if constants.DDM_ADD in nic_override:
+ ip = nic_override[constants.DDM_ADD].get('ip', None)
+ mac = nic_override[constants.DDM_ADD]['mac']
+ nicparams = self.nic_pnew[constants.DDM_ADD]
+ mode = nicparams[constants.NIC_MODE]
+ link = nicparams[constants.NIC_LINK]
+ args['nics'].append((ip, mac, mode, link))
+ elif constants.DDM_REMOVE in nic_override:
+ del args['nics'][-1]
+
env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return env, nl, nl
+ def _GetUpdatedParams(self, old_params, update_dict,
+ default_values, parameter_types):
+ """Return the new params dict for the given params.
+
+ @type old_params: dict
+ @type old_params: old parameters
+ @type update_dict: dict
+ @type update_dict: dict containing new parameter values,
+ or constants.VALUE_DEFAULT to reset the
+ parameter to its default value
+ @type default_values: dict
+ @param default_values: default values for the filled parameters
+ @type parameter_types: dict
+ @param parameter_types: dict mapping target dict keys to types
+ in constants.ENFORCEABLE_TYPES
+ @rtype: (dict, dict)
+ @return: (new_parameters, filled_parameters)
+
+ """
+ params_copy = copy.deepcopy(old_params)
+ for key, val in update_dict.iteritems():
+ if val == constants.VALUE_DEFAULT:
+ try:
+ del params_copy[key]
+ except KeyError:
+ pass
+ else:
+ params_copy[key] = val
+ utils.ForceDictType(params_copy, parameter_types)
+ params_filled = objects.FillDict(default_values, params_copy)
+ return (params_copy, params_filled)
+
def CheckPrereq(self):
"""Check prerequisites.
# checking the new params on the primary/secondary nodes
instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ cluster = self.cluster = self.cfg.GetClusterInfo()
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
pnode = instance.primary_node
# hvparams processing
if self.op.hvparams:
- i_hvdict = copy.deepcopy(instance.hvparams)
- for key, val in self.op.hvparams.iteritems():
- if val == constants.VALUE_DEFAULT:
- try:
- del i_hvdict[key]
- except KeyError:
- pass
- elif val == constants.VALUE_NONE:
- i_hvdict[key] = None
- else:
- i_hvdict[key] = val
- cluster = self.cfg.GetClusterInfo()
- hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
- i_hvdict)
+ i_hvdict, hv_new = self._GetUpdatedParams(
+ instance.hvparams, self.op.hvparams,
+ cluster.hvparams[instance.hypervisor],
+ constants.HVS_PARAMETER_TYPES)
# local check
hypervisor.GetHypervisor(
instance.hypervisor).CheckParameterSyntax(hv_new)
# beparams processing
if self.op.beparams:
- i_bedict = copy.deepcopy(instance.beparams)
- for key, val in self.op.beparams.iteritems():
- if val == constants.VALUE_DEFAULT:
- try:
- del i_bedict[key]
- except KeyError:
- pass
- else:
- i_bedict[key] = val
- cluster = self.cfg.GetClusterInfo()
- be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
- i_bedict)
+ i_bedict, be_new = self._GetUpdatedParams(
+ instance.beparams, self.op.beparams,
+ cluster.beparams[constants.PP_DEFAULT],
+ constants.BES_PARAMETER_TYPES)
self.be_new = be_new # the new actual values
self.be_inst = i_bedict # the new dict (without defaults)
else:
self.warn.append("Can't get info from primary node %s" % pnode)
else:
if not instance_info.failed and instance_info.data:
- current_mem = instance_info.data['memory']
+ current_mem = int(instance_info.data['memory'])
else:
# Assume instance not running
# (there is a slight race condition here, but it's not very probable,
" secondary node %s" % node)
# NIC processing
+ self.nic_pnew = {}
+ self.nic_pinst = {}
for nic_op, nic_dict in self.op.nics:
if nic_op == constants.DDM_REMOVE:
if not instance.nics:
raise errors.OpPrereqError("Invalid NIC index %s, valid values"
" are 0 to %d" %
(nic_op, len(instance.nics)))
- nic_bridge = nic_dict.get('bridge', None)
- if nic_bridge is not None:
- if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
+ old_nic_params = instance.nics[nic_op].nicparams
+ old_nic_ip = instance.nics[nic_op].ip
+ else:
+ old_nic_params = {}
+ old_nic_ip = None
+
+ update_params_dict = dict([(key, nic_dict[key])
+ for key in constants.NICS_PARAMETERS
+ if key in nic_dict])
+
+ if 'bridge' in nic_dict:
+ update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
+
+ new_nic_params, new_filled_nic_params = \
+ self._GetUpdatedParams(old_nic_params, update_params_dict,
+ cluster.nicparams[constants.PP_DEFAULT],
+ constants.NICS_PARAMETER_TYPES)
+ objects.NIC.CheckParameterSyntax(new_filled_nic_params)
+ self.nic_pinst[nic_op] = new_nic_params
+ self.nic_pnew[nic_op] = new_filled_nic_params
+ new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
+
+ if new_nic_mode == constants.NIC_MODE_BRIDGED:
+ nic_bridge = new_filled_nic_params[constants.NIC_LINK]
+ result = self.rpc.call_bridges_exist(pnode, [nic_bridge])
+ result.Raise()
+ if not result.data:
msg = ("Bridge '%s' doesn't exist on one of"
" the instance nodes" % nic_bridge)
if self.force:
self.warn.append(msg)
else:
raise errors.OpPrereqError(msg)
+ if new_nic_mode == constants.NIC_MODE_ROUTED:
+ if 'ip' in nic_dict:
+ nic_ip = nic_dict['ip']
+ else:
+ nic_ip = old_nic_ip
+ if nic_ip is None:
+ raise errors.OpPrereqError('Cannot set the nic ip to None'
+ ' on a routed nic')
+ if 'mac' in nic_dict:
+ nic_mac = nic_dict['mac']
+ if nic_mac is None:
+ raise errors.OpPrereqError('Cannot set the nic mac to None')
+ elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+ # otherwise generate the mac
+ nic_dict['mac'] = self.cfg.GenerateMAC()
+ else:
+ # or validate/reserve the current one
+ if self.cfg.IsMacInUse(nic_mac):
+ raise errors.OpPrereqError("MAC address %s already in use"
+ " in cluster" % nic_mac)
# DISK processing
if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
result = []
instance = self.instance
+ cluster = self.cluster
# disk changes
for disk_op, disk_dict in self.op.disks:
if disk_op == constants.DDM_REMOVE:
device_idx = len(instance.disks)
for node, disk in device.ComputeNodeTree(instance.primary_node):
self.cfg.SetDiskID(disk, node)
- rpc_result = self.rpc.call_blockdev_remove(node, disk)
- if rpc_result.failed or not rpc_result.data:
- self.proc.LogWarning("Could not remove disk/%d on node %s,"
- " continuing anyway", device_idx, node)
+ msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+ if msg:
+ self.LogWarning("Could not remove disk/%d on node %s: %s,"
+ " continuing anyway", device_idx, node, msg)
result.append(("disk/%d" % device_idx, "remove"))
elif disk_op == constants.DDM_ADD:
# add a new disk
file_path,
file_driver,
disk_idx_base)[0]
- new_disk.mode = disk_dict['mode']
instance.disks.append(new_disk)
info = _GetInstanceInfoText(instance)
del instance.nics[-1]
result.append(("nic.%d" % len(instance.nics), "remove"))
elif nic_op == constants.DDM_ADD:
- # add a new nic
- if 'mac' not in nic_dict:
- mac = constants.VALUE_GENERATE
- else:
- mac = nic_dict['mac']
- if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- mac = self.cfg.GenerateMAC()
- new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
- bridge=nic_dict.get('bridge', None))
+ # mac and bridge should be set, by now
+ mac = nic_dict['mac']
+ ip = nic_dict.get('ip', None)
+ nicparams = self.nic_pinst[constants.DDM_ADD]
+ new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
instance.nics.append(new_nic)
result.append(("nic.%d" % (len(instance.nics) - 1),
- "add:mac=%s,ip=%s,bridge=%s" %
- (new_nic.mac, new_nic.ip, new_nic.bridge)))
+ "add:mac=%s,ip=%s,mode=%s,link=%s" %
+ (new_nic.mac, new_nic.ip,
+ self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
+ self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
+ )))
else:
- # change a given nic
- for key in 'mac', 'ip', 'bridge':
+ for key in 'mac', 'ip':
if key in nic_dict:
setattr(instance.nics[nic_op], key, nic_dict[key])
- result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
+ if nic_op in self.nic_pnew:
+ instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
+ for key, val in nic_dict.iteritems():
+ result.append(("nic.%s/%d" % (key, nic_op), val))
# hvparams changes
if self.op.hvparams:
- instance.hvparams = self.hv_new
+ instance.hvparams = self.hv_inst
for key, val in self.op.hvparams.iteritems():
result.append(("hv/%s" % key, val))
# This is wrong node name, not a non-locked node
raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
_CheckNodeOnline(self, self.dst_node.name)
+ _CheckNodeNotDrained(self, self.dst_node.name)
# instance disk type verification
for disk in self.instance.disks:
if self.op.shutdown:
# shutdown the instance, but not the disks
result = self.rpc.call_instance_shutdown(src_node, instance)
- result.Raise()
- if not result.data:
- raise errors.OpExecError("Could not shutdown instance %s on node %s" %
- (instance.name, src_node))
+ msg = result.RemoteFailMsg()
+ if msg:
+ raise errors.OpExecError("Could not shutdown instance %s on"
+ " node %s: %s" %
+ (instance.name, src_node, msg))
vgname = self.cfg.GetVGName()
try:
for disk in instance.disks:
- # new_dev_name will be a snapshot of an lvm leaf of the one we passed
- new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
- if new_dev_name.failed or not new_dev_name.data:
- self.LogWarning("Could not snapshot block device %s on node %s",
- disk.logical_id[1], src_node)
+ # result.payload will be a snapshot of an lvm leaf of the one we passed
+ result = self.rpc.call_blockdev_snapshot(src_node, disk)
+ msg = result.RemoteFailMsg()
+ if msg:
+ self.LogWarning("Could not snapshot block device %s on node %s: %s",
+ disk.logical_id[1], src_node, msg)
snap_disks.append(False)
else:
+ disk_id = (vgname, result.payload)
new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
- logical_id=(vgname, new_dev_name.data),
- physical_id=(vgname, new_dev_name.data),
+ logical_id=disk_id, physical_id=disk_id,
iv_name=disk.iv_name)
snap_disks.append(new_dev)
finally:
if self.op.shutdown and instance.admin_up:
- result = self.rpc.call_instance_start(src_node, instance, None)
+ result = self.rpc.call_instance_start(src_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
if dev:
result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
instance, cluster_name, idx)
- if result.failed or not result.data:
+ msg = result.RemoteFailMsg()
+ if msg:
self.LogWarning("Could not export block device %s from node %s to"
- " node %s", dev.logical_id[1], src_node,
- dst_node.name)
- result = self.rpc.call_blockdev_remove(src_node, dev)
- if result.failed or not result.data:
+ " node %s: %s", dev.logical_id[1], src_node,
+ dst_node.name, msg)
+ msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
+ if msg:
self.LogWarning("Could not remove snapshot block device %s from node"
- " %s", dev.logical_id[1], src_node)
+ " %s: %s", dev.logical_id[1], src_node, msg)
result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
if result.failed or not result.data:
cluster_info = cfg.GetClusterInfo()
# cluster data
data = {
- "version": 1,
+ "version": constants.IALLOCATOR_VERSION,
"cluster_name": cfg.GetClusterName(),
"cluster_tags": list(cluster_info.GetTags()),
"enabled_hypervisors": list(cluster_info.enabled_hypervisors),
"primary_ip": ninfo.primary_ip,
"secondary_ip": ninfo.secondary_ip,
"offline": ninfo.offline,
+ "drained": ninfo.drained,
"master_candidate": ninfo.master_candidate,
}
# instance data
instance_data = {}
for iinfo, beinfo in i_list:
- nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
- for n in iinfo.nics]
+ nic_data = []
+ for nic in iinfo.nics:
+ filled_params = objects.FillDict(
+ cluster_info.nicparams[constants.PP_DEFAULT],
+ nic.nicparams)
+ nic_dict = {"mac": nic.mac,
+ "ip": nic.ip,
+ "mode": filled_params[constants.NIC_MODE],
+ "link": filled_params[constants.NIC_LINK],
+ }
+ if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
+ nic_dict["bridge"] = filled_params[constants.NIC_LINK]
+ nic_data.append(nic_dict)
pir = {
"tags": list(iinfo.GetTags()),
"admin_up": iinfo.admin_up,
"disk_template": iinfo.disk_template,
"hypervisor": iinfo.hypervisor,
}
+ pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
+ pir["disks"])
instance_data[iinfo.name] = pir
data["instances"] = instance_data