import shutil
import itertools
import operator
-import ipaddr
from ganeti import ssh
from ganeti import utils
self.owned_locks = context.glm.list_owned
self.context = context
self.rpc = rpc_runner
- # Dicts used to declare locking needs to mcpu
+
+ # Dictionaries used to declare locking needs to mcpu
self.needed_locks = None
self.share_locks = dict.fromkeys(locking.LEVELS, 0)
+ self.opportunistic_locks = dict.fromkeys(locking.LEVELS, False)
+
self.add_locks = {}
self.remove_locks = {}
+
# Used to force good behavior when calling helper functions
self.recalculate_locks = {}
+
# logging
self.Log = processor.Log # pylint: disable=C0103
self.LogWarning = processor.LogWarning # pylint: disable=C0103
try:
hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
except Exception, err: # pylint: disable=W0703
- lu.LogWarning("Errors occurred running hooks on %s: %s" % (node_name, err))
+ lu.LogWarning("Errors occurred running hooks on %s: %s",
+ node_name, err)
def _CheckOutputFields(static, dynamic, selected):
"""
if msg is None:
- msg = "can't use instance from outside %s states" % ", ".join(req_states)
+ msg = ("can't use instance from outside %s states" %
+ utils.CommaJoin(req_states))
if instance.admin_state not in req_states:
raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
(instance.name, instance.admin_state, msg),
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
+def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
+ network_type, mac_prefix, tags):
+ """Builds network related env variables for hooks
+
+ This builds the hook environment from individual variables.
+
+ @type name: string
+ @param name: the name of the network
+ @type subnet: string
+ @param subnet: the ipv4 subnet
+ @type gateway: string
+ @param gateway: the ipv4 gateway
+ @type network6: string
+ @param network6: the ipv6 subnet
+ @type gateway6: string
+ @param gateway6: the ipv6 gateway
+ @type network_type: string
+ @param network_type: the type of the network
+ @type mac_prefix: string
+ @param mac_prefix: the mac_prefix
+ @type tags: list
+ @param tags: the tags of the network
+
+ """
+ env = {}
+ if name:
+ env["NETWORK_NAME"] = name
+ if subnet:
+ env["NETWORK_SUBNET"] = subnet
+ if gateway:
+ env["NETWORK_GATEWAY"] = gateway
+ if network6:
+ env["NETWORK_SUBNET6"] = network6
+ if gateway6:
+ env["NETWORK_GATEWAY6"] = gateway6
+ if mac_prefix:
+ env["NETWORK_MAC_PREFIX"] = mac_prefix
+ if network_type:
+ env["NETWORK_TYPE"] = network_type
+ if tags:
+ env["NETWORK_TAGS"] = " ".join(tags)
+
+ return env
+
+
+def _BuildNetworkHookEnvByObject(net):
+ """Builds network related env varliables for hooks
+
+ @type net: L{objects.Network}
+ @param net: the network object
+
+ """
+ args = {
+ "name": net.name,
+ "subnet": net.network,
+ "gateway": net.gateway,
+ "network6": net.network6,
+ "gateway6": net.gateway6,
+ "network_type": net.network_type,
+ "mac_prefix": net.mac_prefix,
+ "tags": net.tags,
+ }
+
+ return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
+
+
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
minmem, maxmem, vcpus, nics, disk_template, disks,
bep, hvp, hypervisor_name, tags):
@type vcpus: string
@param vcpus: the count of VCPUs the instance has
@type nics: list
- @param nics: list of tuples (ip, mac, mode, link) representing
+ @param nics: list of tuples (ip, mac, mode, link, network) representing
the NICs the instance has
@type disk_template: string
@param disk_template: the disk template of the instance
}
if nics:
nic_count = len(nics)
- for idx, (ip, mac, mode, link) in enumerate(nics):
+ for idx, (ip, mac, mode, link, net, netinfo) in enumerate(nics):
if ip is None:
ip = ""
env["INSTANCE_NIC%d_IP" % idx] = ip
env["INSTANCE_NIC%d_MAC" % idx] = mac
env["INSTANCE_NIC%d_MODE" % idx] = mode
env["INSTANCE_NIC%d_LINK" % idx] = link
+ if network:
+ env["INSTANCE_NIC%d_NETWORK" % idx] = net
+ if netinfo:
+ nobj = objects.Network.FromDict(netinfo)
+ if nobj.network:
+ env["INSTANCE_NIC%d_NETWORK_SUBNET" % idx] = nobj.network
+ if nobj.gateway:
+ env["INSTANCE_NIC%d_NETWORK_GATEWAY" % idx] = nobj.gateway
+ if nobj.network6:
+ env["INSTANCE_NIC%d_NETWORK_SUBNET6" % idx] = nobj.network6
+ if nobj.gateway6:
+ env["INSTANCE_NIC%d_NETWORK_GATEWAY6" % idx] = nobj.gateway6
+ if nobj.mac_prefix:
+ env["INSTANCE_NIC%d_NETWORK_MAC_PREFIX" % idx] = nobj.mac_prefix
+ if nobj.network_type:
+ env["INSTANCE_NIC%d_NETWORK_TYPE" % idx] = nobj.network_type
+ if nobj.tags:
+ env["INSTANCE_NIC%d_NETWORK_TAGS" % idx] = " ".join(nobj.tags)
if mode == constants.NIC_MODE_BRIDGED:
env["INSTANCE_NIC%d_BRIDGE" % idx] = link
else:
return env
+def _NICToTuple(lu, nic):
+ """Build a tupple of nic information.
+
+ @type lu: L{LogicalUnit}
+ @param lu: the logical unit on whose behalf we execute
+ @type nic: L{objects.NIC}
+ @param nic: nic to convert to hooks tuple
+
+ """
+ ip = nic.ip
+ mac = nic.mac
+ cluster = lu.cfg.GetClusterInfo()
+ filled_params = cluster.SimpleFillNIC(nic.nicparams)
+ mode = filled_params[constants.NIC_MODE]
+ link = filled_params[constants.NIC_LINK]
+ net = nic.network
+ netinfo = None
+ if net:
+ net_uuid = lu.cfg.LookupNetwork(net)
+ if net_uuid:
+ nobj = lu.cfg.GetNetwork(net_uuid)
+ netinfo = objects.Network.ToDict(nobj)
+ return (ip, mac, mode, link, net, netinfo)
+
+
def _NICListToTuple(lu, nics):
"""Build a list of nic information tuples.
"""
hooks_nics = []
- cluster = lu.cfg.GetClusterInfo()
for nic in nics:
- ip = nic.ip
- mac = nic.mac
- filled_params = cluster.SimpleFillNIC(nic.nicparams)
- mode = filled_params[constants.NIC_MODE]
- link = filled_params[constants.NIC_LINK]
- hooks_nics.append((ip, mac, mode, link))
+ hooks_nics.append(_NICToTuple(lu, nic))
return hooks_nics
# Verify global configuration
jobs.append([
- opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
+ opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors),
])
# Always depend on global verification
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
self.group_info)
err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
- _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err))
+ _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
+ code=self.ETYPE_WARNING)
for node in node_vol_should:
n_img = node_image[node]
"""
env = {
- "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
+ "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()),
}
env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
if msg:
msg = ("Copy of file %s to node %s failed: %s" %
(fname, to_node, msg))
- lu.proc.LogWarning(msg)
+ lu.LogWarning(msg)
def _ComputeAncillaryFiles(cluster, redist):
disks = _ExpandCheckDisks(instance, disks)
if not oneshot:
- lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
+ lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
node = instance.primary_node
max_time = mstat.estimated_time
else:
rem_time = "no time estimate"
- lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
- (disks[i].iv_name, mstat.sync_percent, rem_time))
+ lu.LogInfo("- device %s: %5.2f%% done, %s",
+ disks[i].iv_name, mstat.sync_percent, rem_time)
# if we're done but degraded, let's do a few small retries, to
# make sure we see a stable and not transient situation; therefore
time.sleep(min(60, max_time))
if done:
- lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
+ lu.LogInfo("Instance %s's disks are in sync", instance.name)
+
return not cumul_degraded
def ExpandNames(self):
self.share_locks = _ShareAll()
- self.needed_locks = {}
if self.op.nodes:
- self.needed_locks[locking.LEVEL_NODE] = \
- _GetWantedNodes(self, self.op.nodes)
+ self.needed_locks = {
+ locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
+ }
else:
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+ }
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
if master_singlehomed and self.op.secondary_ip != node.primary_ip:
if self.op.force and node.name == master.name:
self.LogWarning("Transitioning from single-homed to multi-homed"
- " cluster. All nodes will require a secondary ip.")
+ " cluster; all nodes will require a secondary IP"
+ " address")
else:
raise errors.OpPrereqError("Changing the secondary ip on a"
" single-homed cluster requires the"
elif not master_singlehomed and self.op.secondary_ip == node.primary_ip:
if self.op.force and node.name == master.name:
self.LogWarning("Transitioning from multi-homed to single-homed"
- " cluster. Secondary IPs will have to be removed.")
+ " cluster; secondary IP addresses will have to be"
+ " removed")
else:
raise errors.OpPrereqError("Cannot set the secondary IP to be the"
" same as the primary IP on a multi-homed"
if msg:
is_offline_secondary = (node in instance.secondary_nodes and
result.offline)
- lu.proc.LogWarning("Could not prepare block device %s on node %s"
- " (is_primary=False, pass=1): %s",
- inst_disk.iv_name, node, msg)
+ lu.LogWarning("Could not prepare block device %s on node %s"
+ " (is_primary=False, pass=1): %s",
+ inst_disk.iv_name, node, msg)
if not (ignore_secondaries or is_offline_secondary):
disks_ok = False
True, idx)
msg = result.fail_msg
if msg:
- lu.proc.LogWarning("Could not prepare block device %s on node %s"
- " (is_primary=True, pass=2): %s",
- inst_disk.iv_name, node, msg)
+ lu.LogWarning("Could not prepare block device %s on node %s"
+ " (is_primary=True, pass=2): %s",
+ inst_disk.iv_name, node, msg)
disks_ok = False
else:
dev_path = result.payload
if not disks_ok:
_ShutdownInstanceDisks(lu, instance)
if force is not None and not force:
- lu.proc.LogWarning("", hint="If the message above refers to a"
- " secondary node,"
- " you can retry the operation using '--force'.")
+ lu.LogWarning("",
+ hint=("If the message above refers to a secondary node,"
+ " you can retry the operation using '--force'"))
raise errors.OpExecError("Disk consistency error")
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
- self.proc.LogWarning("Ignoring offline primary node")
+ self.LogWarning("Ignoring offline primary node")
if self.op.hvparams or self.op.beparams:
- self.proc.LogWarning("Overridden parameters are ignored")
+ self.LogWarning("Overridden parameters are ignored")
else:
_CheckNodeOnline(self, instance.primary_node)
if self.primary_offline:
assert self.op.ignore_offline_nodes
- self.proc.LogInfo("Primary node offline, marked instance as started")
+ self.LogInfo("Primary node offline, marked instance as started")
else:
node_current = instance.primary_node
self.cfg.GetNodeInfo(self.instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
- self.proc.LogWarning("Ignoring offline primary node")
+ self.LogWarning("Ignoring offline primary node")
else:
_CheckNodeOnline(self, self.instance.primary_node)
if self.primary_offline:
assert self.op.ignore_offline_nodes
- self.proc.LogInfo("Primary node offline, marked instance as stopped")
+ self.LogInfo("Primary node offline, marked instance as stopped")
else:
result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
msg = result.fail_msg
if msg:
- self.proc.LogWarning("Could not shutdown instance: %s" % msg)
+ self.LogWarning("Could not shutdown instance: %s", msg)
_ShutdownInstanceDisks(self, instance)
# Change the instance lock. This is definitely safe while we hold the BGL.
# Otherwise the new lock would have to be added in acquired mode.
assert self.REQ_BGL
+ assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
self.glm.remove(locking.LEVEL_INSTANCE, old_name)
self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
msg = ("Could not run OS rename script for instance %s on node %s"
" (but the instance has been renamed in Ganeti): %s" %
(inst.name, inst.primary_node, msg))
- self.proc.LogWarning(msg)
+ self.LogWarning(msg)
finally:
_ShutdownInstanceDisks(self, inst)
return self.iq.OldStyleQuery(self)
+def _ExpandNamesForMigration(lu):
+ """Expands names for use with L{TLMigrateInstance}.
+
+ @type lu: L{LogicalUnit}
+
+ """
+ if lu.op.target_node is not None:
+ lu.op.target_node = _ExpandNodeName(lu.cfg, lu.op.target_node)
+
+ lu.needed_locks[locking.LEVEL_NODE] = []
+ lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ lu.needed_locks[locking.LEVEL_NODE_RES] = []
+ lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+
+
+def _DeclareLocksForMigration(lu, level):
+ """Declares locks for L{TLMigrateInstance}.
+
+ @type lu: L{LogicalUnit}
+ @param level: Lock level
+
+ """
+ if level == locking.LEVEL_NODE:
+ instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
+ if instance.disk_template in constants.DTS_EXT_MIRROR:
+ if lu.op.target_node is None:
+ lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ else:
+ lu.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
+ lu.op.target_node]
+ del lu.recalculate_locks[locking.LEVEL_NODE]
+ else:
+ lu._LockInstancesNodes() # pylint: disable=W0212
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ lu.needed_locks[locking.LEVEL_NODE_RES] = \
+ _CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
+
+
class LUInstanceFailover(LogicalUnit):
"""Failover an instance.
def ExpandNames(self):
self._ExpandAndLockInstance()
+ _ExpandNamesForMigration(self)
- if self.op.target_node is not None:
- self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
-
- self.needed_locks[locking.LEVEL_NODE] = []
- self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
-
- self.needed_locks[locking.LEVEL_NODE_RES] = []
- self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+ self._migrater = \
+ TLMigrateInstance(self, self.op.instance_name, False, True, False,
+ self.op.ignore_consistency, True,
+ self.op.shutdown_timeout, self.op.ignore_ipolicy)
- ignore_consistency = self.op.ignore_consistency
- shutdown_timeout = self.op.shutdown_timeout
- self._migrater = TLMigrateInstance(self, self.op.instance_name,
- cleanup=False,
- failover=True,
- ignore_consistency=ignore_consistency,
- shutdown_timeout=shutdown_timeout,
- ignore_ipolicy=self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
- if level == locking.LEVEL_NODE:
- instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
- if instance.disk_template in constants.DTS_EXT_MIRROR:
- if self.op.target_node is None:
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
- else:
- self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
- self.op.target_node]
- del self.recalculate_locks[locking.LEVEL_NODE]
- else:
- self._LockInstancesNodes()
- elif level == locking.LEVEL_NODE_RES:
- # Copy node locks
- self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ _DeclareLocksForMigration(self, level)
def BuildHooksEnv(self):
"""Build hooks env.
def ExpandNames(self):
self._ExpandAndLockInstance()
-
- if self.op.target_node is not None:
- self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
-
- self.needed_locks[locking.LEVEL_NODE] = []
- self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
-
- self.needed_locks[locking.LEVEL_NODE] = []
- self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ _ExpandNamesForMigration(self)
self._migrater = \
- TLMigrateInstance(self, self.op.instance_name,
- cleanup=self.op.cleanup,
- failover=False,
- fallback=self.op.allow_failover,
- allow_runtime_changes=self.op.allow_runtime_changes,
- ignore_ipolicy=self.op.ignore_ipolicy)
+ TLMigrateInstance(self, self.op.instance_name, self.op.cleanup,
+ False, self.op.allow_failover, False,
+ self.op.allow_runtime_changes,
+ constants.DEFAULT_SHUTDOWN_TIMEOUT,
+ self.op.ignore_ipolicy)
+
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
- if level == locking.LEVEL_NODE:
- instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
- if instance.disk_template in constants.DTS_EXT_MIRROR:
- if self.op.target_node is None:
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
- else:
- self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
- self.op.target_node]
- del self.recalculate_locks[locking.LEVEL_NODE]
- else:
- self._LockInstancesNodes()
- elif level == locking.LEVEL_NODE_RES:
- # Copy node locks
- self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ _DeclareLocksForMigration(self, level)
def BuildHooksEnv(self):
"""Build hooks env.
msg = result.fail_msg
if msg:
if self.op.ignore_consistency:
- self.proc.LogWarning("Could not shutdown instance %s on node %s."
- " Proceeding anyway. Please make sure node"
- " %s is down. Error details: %s",
- instance.name, source_node, source_node, msg)
+ self.LogWarning("Could not shutdown instance %s on node %s."
+ " Proceeding anyway. Please make sure node"
+ " %s is down. Error details: %s",
+ instance.name, source_node, source_node, msg)
else:
raise errors.OpExecError("Could not shutdown instance %s on"
" node %s: %s" %
target_node=self.op.target_node,
allow_runtime_changes=allow_runtime_changes,
ignore_ipolicy=self.op.ignore_ipolicy)]
- for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
- ]
+ for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)]
# TODO: Run iallocator in this opcode and pass correct placement options to
# OpInstanceMigrate. Since other jobs can modify the cluster between
_MIGRATION_POLL_INTERVAL = 1 # seconds
_MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
- def __init__(self, lu, instance_name, cleanup=False,
- failover=False, fallback=False,
- ignore_consistency=False,
- allow_runtime_changes=True,
- shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
- ignore_ipolicy=False):
+ def __init__(self, lu, instance_name, cleanup, failover, fallback,
+ ignore_consistency, allow_runtime_changes, shutdown_timeout,
+ ignore_ipolicy):
"""Initializes this class.
"""
for i in range(disk_count)])
if template_name == constants.DT_PLAIN:
+
def logical_id_fn(idx, _, disk):
vg = disk.get(constants.IDISK_VG, vgname)
return (vg, names[idx])
+
elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
logical_id_fn = \
lambda _, disk_index, disk: (file_driver,
hypervisor=op.hypervisor)
-def _ComputeNics(op, cluster, default_ip, cfg, proc):
+def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
"""Computes the nics.
@param op: The instance opcode
@param cluster: Cluster configuration object
@param default_ip: The default ip to assign
@param cfg: An instance of the configuration object
- @param proc: The executer instance
+ @param ec_id: Execution context ID
@returns: The build up nics
"""
nics = []
- for idx, nic in enumerate(op.nics):
+ for nic in op.nics:
nic_mode_req = nic.get(constants.INIC_MODE, None)
nic_mode = nic_mode_req
if nic_mode is None or nic_mode == constants.VALUE_AUTO:
nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
- # in routed mode, for the first nic, the default ip is 'auto'
- if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
- default_ip_mode = constants.VALUE_AUTO
+ net = nic.get(constants.INIC_NETWORK, None)
+ link = nic.get(constants.NIC_LINK, None)
+ ip = nic.get(constants.INIC_IP, None)
+
+ if net is None or net.lower() == constants.VALUE_NONE:
+ net = None
else:
- default_ip_mode = constants.VALUE_NONE
+ if nic_mode_req is not None or link is not None:
+ raise errors.OpPrereqError("If network is given, no mode or link"
+ " is allowed to be passed",
+ errors.ECODE_INVAL)
# ip validity checks
- ip = nic.get(constants.INIC_IP, default_ip_mode)
if ip is None or ip.lower() == constants.VALUE_NONE:
nic_ip = None
elif ip.lower() == constants.VALUE_AUTO:
errors.ECODE_INVAL)
nic_ip = default_ip
else:
- if not netutils.IPAddress.IsValid(ip):
+ # We defer pool operations until later, so that the iallocator has
+ # filled in the instance's node(s) dimara
+ if ip.lower() == constants.NIC_IP_POOL:
+ if net is None:
+ raise errors.OpPrereqError("if ip=pool, parameter network"
+ " must be passed too",
+ errors.ECODE_INVAL)
+
+ elif not netutils.IPAddress.IsValid(ip):
raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
errors.ECODE_INVAL)
+
nic_ip = ip
# TODO: check the ip address for uniqueness
try:
# TODO: We need to factor this out
- cfg.ReserveMAC(mac, proc.GetECId())
+ cfg.ReserveMAC(mac, ec_id)
except errors.ReservationError:
raise errors.OpPrereqError("MAC address %s already in use"
" in cluster" % mac,
errors.ECODE_NOTUNIQUE)
# Build nic parameters
- link = nic.get(constants.INIC_LINK, None)
- if link == constants.VALUE_AUTO:
- link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
nicparams = {}
if nic_mode_req:
nicparams[constants.NIC_MODE] = nic_mode
check_params = cluster.SimpleFillNIC(nicparams)
objects.NIC.CheckParameterSyntax(check_params)
- nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
+ nics.append(objects.NIC(mac=mac, ip=nic_ip,
+ network=net, nicparams=nicparams))
return nics
"""Run the allocator based on input opcode.
"""
+ #TODO Export network to iallocator so that it chooses a pnode
+ # in a nodegroup that has the desired network connected to
req = _CreateInstanceAllocRequest(self.op, self.disks,
self.nics, self.be_full)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
# NIC buildup
self.nics = _ComputeNics(self.op, cluster, self.hostname1.ip, self.cfg,
- self.proc)
+ self.proc.GetECId())
# disk checks/pre-build
default_vg = self.cfg.GetVGName()
# creation job will fail.
for nic in self.nics:
if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
+ nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
#### allocator run
self.secondaries = []
+ # Fill in any IPs from IP pools. This must happen here, because we need to
+ # know the nic's primary node, as specified by the iallocator
+ for idx, nic in enumerate(self.nics):
+ net = nic.network
+ if net is not None:
+ netparams = self.cfg.GetGroupNetParams(net, self.pnode.name)
+ if netparams is None:
+ raise errors.OpPrereqError("No netparams found for network"
+ " %s. Propably not connected to"
+ " node's %s nodegroup" %
+ (net, self.pnode.name),
+ errors.ECODE_INVAL)
+ self.LogInfo("NIC/%d inherits netparams %s" %
+ (idx, netparams.values()))
+ nic.nicparams = dict(netparams)
+ if nic.ip is not None:
+ if nic.ip.lower() == constants.NIC_IP_POOL:
+ try:
+ nic.ip = self.cfg.GenerateIp(net, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
+ " from the address pool" % idx,
+ errors.ECODE_STATE)
+ self.LogInfo("Chose IP %s from network %s", nic.ip, net)
+ else:
+ try:
+ self.cfg.ReserveIp(net, nic.ip, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("IP address %s already in use"
+ " or does not belong to network %s" %
+ (nic.ip, net),
+ errors.ECODE_NOTUNIQUE)
+ else:
+ # net is None, ip None or given
+ if self.op.conflicts_check:
+ _CheckForConflictingIp(self, nic.ip, self.pnode.name)
+
# mirror node verification
if self.op.disk_template in constants.DTS_INT_MIRROR:
if self.op.snode == pnode.name:
if baddisks:
raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
" cannot be adopted" %
- (", ".join(baddisks),
+ (utils.CommaJoin(baddisks),
constants.ADOPTABLE_BLOCKDEV_ROOT),
errors.ECODE_INVAL)
"""
cluster = self.cfg.GetClusterInfo()
default_vg = self.cfg.GetVGName()
+ ec_id = self.proc.GetECId()
+
insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
_ComputeNics(op, cluster, None,
- self.cfg, self.proc),
+ self.cfg, ec_id),
_ComputeFullBeParams(op, cluster))
for op in self.op.instances]
+
req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node,
- self.op.disks, False, self.op.early_release,
+ self.op.disks, self.op.early_release,
self.op.ignore_ipolicy)
self.tasklets = [self.replacer]
"""
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
- disks, delay_iallocator, early_release, ignore_ipolicy):
+ disks, early_release, ignore_ipolicy):
"""Initializes this class.
"""
self.iallocator_name = iallocator_name
self.remote_node = remote_node
self.disks = disks
- self.delay_iallocator = delay_iallocator
self.early_release = early_release
self.ignore_ipolicy = ignore_ipolicy
len(instance.secondary_nodes),
errors.ECODE_FAULT)
- if not self.delay_iallocator:
- self._CheckPrereq2()
-
- def _CheckPrereq2(self):
- """Check prerequisites, second part.
-
- This function should always be part of CheckPrereq. It was separated and is
- now called from Exec because during node evacuation iallocator was only
- called with an unmodified cluster model, not taking planned changes into
- account.
-
- """
instance = self.instance
secondary_node = instance.secondary_nodes[0]
This dispatches the disk replacement to the appropriate handler.
"""
- if self.delay_iallocator:
- self._CheckPrereq2()
-
if __debug__:
# Verify owned locks before starting operation
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
continue
for node in nodes:
- self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
+ self.lu.LogInfo("Checking disk/%d on %s", idx, node)
self.cfg.SetDiskID(dev, node)
result = _BlockdevFind(self, node, dev, self.instance)
if idx not in self.disks:
continue
- self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
+ self.lu.LogInfo("Adding storage on %s for disk/%d", node_name, idx)
self.cfg.SetDiskID(dev, node_name)
def _RemoveOldStorage(self, node_name, iv_names):
for name, (_, old_lvs, _) in iv_names.iteritems():
- self.lu.LogInfo("Remove logical volumes for %s" % name)
+ self.lu.LogInfo("Remove logical volumes for %s", name)
for lv in old_lvs:
self.cfg.SetDiskID(lv, node_name)
msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
if msg:
- self.lu.LogWarning("Can't remove old LV: %s" % msg,
+ self.lu.LogWarning("Can't remove old LV: %s", msg,
hint="remove unused LVs manually")
def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
# Step: for each lv, detach+rename*2+attach
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
for dev, old_lvs, new_lvs in iv_names.itervalues():
- self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
+ self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
old_lvs)
self.cfg.SetDiskID(disk, self.target_node)
# Now that the new lvs have the old name, we can add them to the device
- self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
+ self.lu.LogInfo("Adding new mirror component on %s", self.target_node)
result = self.rpc.call_blockdev_addchildren(self.target_node,
(dev, self.instance), new_lvs)
msg = result.fail_msg
# We have new devices, shutdown the drbd on the old secondary
for idx, dev in enumerate(self.instance.disks):
- self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
+ self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
self.cfg.SetDiskID(dev, self.target_node)
msg = self.rpc.call_blockdev_shutdown(self.target_node,
(dev, self.instance)).fail_msg
errors.ECODE_STATE)
except errors.OpPrereqError, err:
if self.op.ignore_consistency:
- self.proc.LogWarning(str(err.args[0]))
+ self.LogWarning(str(err.args[0]))
else:
raise
disks=[],
mode=constants.REPLACE_DISK_CHG,
early_release=self.op.early_release)]
- for instance_name in self.instance_names
- ]
+ for instance_name in self.instance_names]
else:
raise errors.ProgrammerError("No iallocator or remote node")
if self.op.wait_for_sync:
disk_abort = not _WaitForSync(self, instance, disks=[disk])
if disk_abort:
- self.proc.LogWarning("Disk sync-ing has not returned a good"
- " status; please check the instance")
+ self.LogWarning("Disk syncing has not returned a good status; check"
+ " the instance")
if instance.admin_state != constants.ADMINST_UP:
_SafeShutdownInstanceDisks(self, instance, disks=[disk])
elif instance.admin_state != constants.ADMINST_UP:
- self.proc.LogWarning("Not shutting down the disk even if the instance is"
- " not supposed to be running because no wait for"
- " sync mode was requested")
+ self.LogWarning("Not shutting down the disk even if the instance is"
+ " not supposed to be running because no wait for"
+ " sync mode was requested")
assert self.owned_locks(locking.LEVEL_NODE_RES)
assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
"""
if op in (constants.DDM_ADD, constants.DDM_MODIFY):
ip = params.get(constants.INIC_IP, None)
- if ip is None:
- pass
- elif ip.lower() == constants.VALUE_NONE:
- params[constants.INIC_IP] = None
- elif not netutils.IPAddress.IsValid(ip):
- raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
- errors.ECODE_INVAL)
-
- bridge = params.get("bridge", None)
- link = params.get(constants.INIC_LINK, None)
- if bridge and link:
- raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
- " at the same time", errors.ECODE_INVAL)
- elif bridge and bridge.lower() == constants.VALUE_NONE:
- params["bridge"] = None
- elif link and link.lower() == constants.VALUE_NONE:
- params[constants.INIC_LINK] = None
+ req_net = params.get(constants.INIC_NETWORK, None)
+ link = params.get(constants.NIC_LINK, None)
+ mode = params.get(constants.NIC_MODE, None)
+ if req_net is not None:
+ if req_net.lower() == constants.VALUE_NONE:
+ params[constants.INIC_NETWORK] = None
+ req_net = None
+ elif link is not None or mode is not None:
+ raise errors.OpPrereqError("If network is given"
+ " mode or link should not",
+ errors.ECODE_INVAL)
if op == constants.DDM_ADD:
macaddr = params.get(constants.INIC_MAC, None)
if macaddr is None:
params[constants.INIC_MAC] = constants.VALUE_AUTO
+ if ip is not None:
+ if ip.lower() == constants.VALUE_NONE:
+ params[constants.INIC_IP] = None
+ else:
+ if ip.lower() == constants.NIC_IP_POOL:
+ if op == constants.DDM_ADD and req_net is None:
+ raise errors.OpPrereqError("If ip=pool, parameter network"
+ " cannot be none",
+ errors.ECODE_INVAL)
+ else:
+ if not netutils.IPAddress.IsValid(ip):
+ raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
+ errors.ECODE_INVAL)
+
if constants.INIC_MAC in params:
macaddr = params[constants.INIC_MAC]
if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
def ExpandNames(self):
self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODEGROUP] = []
# Can't even acquire node locks in shared mode as upcoming changes in
# Ganeti 2.6 will start to modify the node object on disk conversion
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ # Look node group to look up the ipolicy
+ self.share_locks[locking.LEVEL_NODEGROUP] = 1
def DeclareLocks(self, level):
- # TODO: Acquire group lock in shared mode (disk parameters)
- if level == locking.LEVEL_NODE:
+ if level == locking.LEVEL_NODEGROUP:
+ assert not self.needed_locks[locking.LEVEL_NODEGROUP]
+ # Acquire locks for the instance's nodegroups optimistically. Needs
+ # to be verified in CheckPrereq
+ self.needed_locks[locking.LEVEL_NODEGROUP] = \
+ self.cfg.GetInstanceNodeGroups(self.op.instance_name)
+ elif level == locking.LEVEL_NODE:
self._LockInstancesNodes()
if self.op.disk_template and self.op.remote_node:
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
This runs on the master, primary and secondaries.
"""
- args = dict()
+ args = {}
if constants.BE_MINMEM in self.be_new:
args["minmem"] = self.be_new[constants.BE_MINMEM]
if constants.BE_MAXMEM in self.be_new:
nics = []
for nic in self._new_nics:
- nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
- mode = nicparams[constants.NIC_MODE]
- link = nicparams[constants.NIC_LINK]
- nics.append((nic.ip, nic.mac, mode, link))
+ n = copy.deepcopy(nic)
+ nicparams = self.cluster.SimpleFillNIC(n.nicparams)
+ n.nicparams = nicparams
+ nics.append(_NICToTuple(self, n))
args["nics"] = nics
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
- def _PrepareNicModification(self, params, private, old_ip, old_params,
- cluster, pnode):
+ def _PrepareNicModification(self, params, private, old_ip, old_net,
+ old_params, cluster, pnode):
+
update_params_dict = dict([(key, params[key])
for key in constants.NICS_PARAMETERS
if key in params])
- if "bridge" in params:
- update_params_dict[constants.NIC_LINK] = params["bridge"]
+ req_link = update_params_dict.get(constants.NIC_LINK, None)
+ req_mode = update_params_dict.get(constants.NIC_MODE, None)
+
+ new_net = params.get(constants.INIC_NETWORK, old_net)
+ if new_net is not None:
+ netparams = self.cfg.GetGroupNetParams(new_net, pnode)
+ if netparams is None:
+ raise errors.OpPrereqError("No netparams found for the network"
+ " %s, probably not connected" % new_net,
+ errors.ECODE_INVAL)
+ new_params = dict(netparams)
+ else:
+ new_params = _GetUpdatedParams(old_params, update_params_dict)
- new_params = _GetUpdatedParams(old_params, update_params_dict)
utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
new_filled_params = cluster.SimpleFillNIC(new_params)
elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
# otherwise generate the MAC address
params[constants.INIC_MAC] = \
- self.cfg.GenerateMAC(self.proc.GetECId())
+ self.cfg.GenerateMAC(new_net, self.proc.GetECId())
else:
# or validate/reserve the current one
try:
raise errors.OpPrereqError("MAC address '%s' already in use"
" in cluster" % mac,
errors.ECODE_NOTUNIQUE)
+ elif new_net != old_net:
+
+ def get_net_prefix(net):
+ if net:
+ uuid = self.cfg.LookupNetwork(net)
+ if uuid:
+ nobj = self.cfg.GetNetwork(uuid)
+ return nobj.mac_prefix
+ return None
+
+ new_prefix = get_net_prefix(new_net)
+ old_prefix = get_net_prefix(old_net)
+ if old_prefix != new_prefix:
+ params[constants.INIC_MAC] = \
+ self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+
+ #if there is a change in nic-network configuration
+ new_ip = params.get(constants.INIC_IP, old_ip)
+ if (new_ip, new_net) != (old_ip, old_net):
+ if new_ip:
+ if new_net:
+ if new_ip.lower() == constants.NIC_IP_POOL:
+ try:
+ new_ip = self.cfg.GenerateIp(new_net, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("Unable to get a free IP"
+ " from the address pool",
+ errors.ECODE_STATE)
+ self.LogInfo("Chose IP %s from pool %s", new_ip, new_net)
+ params[constants.INIC_IP] = new_ip
+ elif new_ip != old_ip or new_net != old_net:
+ try:
+ self.LogInfo("Reserving IP %s in pool %s", new_ip, new_net)
+ self.cfg.ReserveIp(new_net, new_ip, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("IP %s not available in network %s" %
+ (new_ip, new_net),
+ errors.ECODE_NOTUNIQUE)
+ elif new_ip.lower() == constants.NIC_IP_POOL:
+ raise errors.OpPrereqError("ip=pool, but no network found",
+ errors.ECODE_INVAL)
+ else:
+ # new net is None
+ if self.op.conflicts_check:
+ _CheckForConflictingIp(self, new_ip, pnode)
+
+ if old_ip:
+ if old_net:
+ try:
+ self.cfg.ReleaseIp(old_net, old_ip, self.proc.GetECId())
+ except errors.AddressPoolError:
+ logging.warning("Release IP %s not contained in network %s",
+ old_ip, old_net)
+
+ # there are no changes in (net, ip) tuple
+ elif (old_net is not None and
+ (req_link is not None or req_mode is not None)):
+ raise errors.OpPrereqError("Not allowed to change link or mode of"
+ " a NIC that is connected to a network",
+ errors.ECODE_INVAL)
private.params = new_params
private.filled = new_filled_params
This only checks the instance list against the existing names.
"""
- # checking the new params on the primary/secondary nodes
-
+ assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+
cluster = self.cluster = self.cfg.GetClusterInfo()
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
+
pnode = instance.primary_node
+ assert pnode in self.owned_locks(locking.LEVEL_NODE)
nodelist = list(instance.all_nodes)
pnode_info = self.cfg.GetNodeInfo(pnode)
self.diskparams = self.cfg.GetInstanceDiskParams(instance)
+ #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
+ assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
+ group_info = self.cfg.GetNodeGroup(pnode_info.group)
+
+ # dictionary with instance information after the modification
+ ispec = {}
+
# Prepare disk/NIC modifications
self.diskmod = PrepareContainerMods(self.op.disks, None)
self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
" diskless instances", errors.ECODE_INVAL)
def _PrepareNicCreate(_, params, private):
- self._PrepareNicModification(params, private, None, {}, cluster, pnode)
+ self._PrepareNicModification(params, private, None, None,
+ {}, cluster, pnode)
return (None, None)
def _PrepareNicMod(_, nic, params, private):
- self._PrepareNicModification(params, private, nic.ip,
+ self._PrepareNicModification(params, private, nic.ip, nic.network,
nic.nicparams, cluster, pnode)
return None
+ def _PrepareNicRemove(_, params, __):
+ ip = params.ip
+ net = params.network
+ if net is not None and ip is not None:
+ self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
+
# Verify NIC changes (operating on copy)
nics = instance.nics[:]
ApplyContainerMods("NIC", nics, None, self.nicmod,
- _PrepareNicCreate, _PrepareNicMod, None)
+ _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
if len(nics) > constants.MAX_NICS:
raise errors.OpPrereqError("Instance has too many network interfaces"
" (%d), cannot add more" % constants.MAX_NICS,
raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
" more" % constants.MAX_DISKS,
errors.ECODE_STATE)
+ disk_sizes = [disk.size for disk in instance.disks]
+ disk_sizes.extend(params["size"] for (op, idx, params, private) in
+ self.diskmod if op == constants.DDM_ADD)
+ ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
+ ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
if self.op.offline is not None:
if self.op.offline:
ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
self._CreateNewNic, self._ApplyNicMods, None)
self._new_nics = nics
+ ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
else:
self._new_nics = None
+ ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
+
+ if not self.op.ignore_ipolicy:
+ ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
+ group_info)
+
+ # Fill ispec with backend parameters
+ ispec[constants.ISPEC_SPINDLE_USE] = \
+ self.be_new.get(constants.BE_SPINDLE_USE, None)
+ ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
+ None)
+
+ # Copy ispec to verify parameters with min/max values separately
+ ispec_max = ispec.copy()
+ ispec_max[constants.ISPEC_MEM_SIZE] = \
+ self.be_new.get(constants.BE_MAXMEM, None)
+ res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max)
+ ispec_min = ispec.copy()
+ ispec_min[constants.ISPEC_MEM_SIZE] = \
+ self.be_new.get(constants.BE_MINMEM, None)
+ res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min)
+
+ if (res_max or res_min):
+ # FIXME: Improve error message by including information about whether
+ # the upper or lower limit of the parameter fails the ipolicy.
+ msg = ("Instance allocation to group %s (%s) violates policy: %s" %
+ (group_info, group_info.name,
+ utils.CommaJoin(set(res_max + res_min))))
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
def _ConvertPlainToDrbd(self, feedback_fn):
"""Converts an instance from plain to drbd.
"""
mac = params[constants.INIC_MAC]
ip = params.get(constants.INIC_IP, None)
- nicparams = private.params
+ net = params.get(constants.INIC_NETWORK, None)
+ #TODO: not private.filled?? can a nic have no nicparams??
+ nicparams = private.filled
- return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
+ return (objects.NIC(mac=mac, ip=ip, network=net, nicparams=nicparams), [
("nic.%d" % idx,
- "add:mac=%s,ip=%s,mode=%s,link=%s" %
+ "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
(mac, ip, private.filled[constants.NIC_MODE],
- private.filled[constants.NIC_LINK])),
+ private.filled[constants.NIC_LINK],
+ net)),
])
@staticmethod
"""
changes = []
- for key in [constants.INIC_MAC, constants.INIC_IP]:
+ for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
if key in params:
changes.append(("nic.%s/%d" % (key, idx), params[key]))
setattr(nic, key, params[key])
- if private.params:
- nic.nicparams = private.params
+ if private.filled:
+ nic.nicparams = private.filled
- for (key, val) in params.items():
+ for (key, val) in nic.nicparams.items():
changes.append(("nic.%s/%d" % (key, idx), val))
return changes
self.cfg.MarkInstanceDown(instance.name)
result.append(("admin_state", constants.ADMINST_DOWN))
- self.cfg.Update(instance, feedback_fn)
+ self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
self.owned_locks(locking.LEVEL_NODE)), \
self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
lock_level = locking.LEVEL_NODEGROUP
lock_name = self.group_uuid
+ elif self.op.kind == constants.TAG_NETWORK:
+ self.network_uuid = self.cfg.LookupNetwork(self.op.name)
+ lock_level = locking.LEVEL_NETWORK
+ lock_name = self.network_uuid
else:
lock_level = None
lock_name = None
self.target = self.cfg.GetInstanceInfo(self.op.name)
elif self.op.kind == constants.TAG_NODEGROUP:
self.target = self.cfg.GetNodeGroup(self.group_uuid)
+ elif self.op.kind == constants.TAG_NETWORK:
+ self.target = self.cfg.GetNetwork(self.network_uuid)
else:
raise errors.OpPrereqError("Wrong tag type requested (%s)" %
str(self.op.kind), errors.ECODE_INVAL)
else:
top_value = self.op.repeat - 1
for i in range(self.op.repeat):
- self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
+ self.LogInfo("Test delay iteration %d/%d", i, top_value)
self._TestDelay()
self.op.mode, errors.ECODE_INVAL)
if self.op.direction == constants.IALLOCATOR_DIR_OUT:
- if self.op.allocator is None:
+ if self.op.iallocator is None:
raise errors.OpPrereqError("Missing allocator name",
errors.ECODE_INVAL)
elif self.op.direction != constants.IALLOCATOR_DIR_IN:
if self.op.direction == constants.IALLOCATOR_DIR_IN:
result = ial.in_text
else:
- ial.Run(self.op.allocator, validate=False)
+ ial.Run(self.op.iallocator, validate=False)
result = ial.out_text
return result
-# Network LUs
+
class LUNetworkAdd(LogicalUnit):
"""Logical unit for creating networks.
def ExpandNames(self):
self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
- self.needed_locks = {}
+
+ if self.op.conflicts_check:
+ self.share_locks[locking.LEVEL_NODE] = 1
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+ }
+ else:
+ self.needed_locks = {}
+
self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
def CheckPrereq(self):
raise errors.OpPrereqError("Network '%s' already defined" %
self.op.network, errors.ECODE_EXISTS)
+ if self.op.mac_prefix:
+ utils.NormalizeAndValidateMac(self.op.mac_prefix + ":00:00:00")
+
+ # Check tag validity
+ for tag in self.op.tags:
+ objects.TaggableObject.ValidateTag(tag)
def BuildHooksEnv(self):
"""Build hooks env.
"""
- env = {
- "NETWORK_NAME": self.op.network_name,
- "NETWORK_SUBNET": self.op.network,
- "NETWORK_GATEWAY": self.op.gateway,
- "NETWORK_SUBNET6": self.op.network6,
- "NETWORK_GATEWAY6": self.op.gateway6,
- "NETWORK_MAC_PREFIX": self.op.mac_prefix,
- "NETWORK_TYPE": self.op.network_type,
+ args = {
+ "name": self.op.network_name,
+ "subnet": self.op.network,
+ "gateway": self.op.gateway,
+ "network6": self.op.network6,
+ "gateway6": self.op.gateway6,
+ "mac_prefix": self.op.mac_prefix,
+ "network_type": self.op.network_type,
+ "tags": self.op.tags,
}
- return env
+ return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
def Exec(self, feedback_fn):
"""Add the ip pool to the cluster.
mac_prefix=self.op.mac_prefix,
network_type=self.op.network_type,
uuid=self.network_uuid,
- family=4)
+ family=constants.IP4_VERSION)
# Initialize the associated address pool
try:
pool = network.AddressPool.InitializeNetwork(nobj)
# Check if we need to reserve the nodes and the cluster master IP
# These may not be allocated to any instances in routed mode, as
# they wouldn't function anyway.
- for node in self.cfg.GetAllNodesInfo().values():
- for ip in [node.primary_ip, node.secondary_ip]:
- try:
- pool.Reserve(ip)
- self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
-
- except errors.AddressPoolError:
- pass
-
- master_ip = self.cfg.GetClusterInfo().master_ip
- try:
- pool.Reserve(master_ip)
- self.LogInfo("Reserved cluster master IP (%s)", master_ip)
- except errors.AddressPoolError:
- pass
+ if self.op.conflicts_check:
+ for node in self.cfg.GetAllNodesInfo().values():
+ for ip in [node.primary_ip, node.secondary_ip]:
+ try:
+ if pool.Contains(ip):
+ pool.Reserve(ip)
+ self.LogInfo("Reserved IP address of node '%s' (%s)",
+ node.name, ip)
+ except errors.AddressPoolError:
+ self.LogWarning("Cannot reserve IP address of node '%s' (%s)",
+ node.name, ip)
+
+ master_ip = self.cfg.GetClusterInfo().master_ip
+ try:
+ if pool.Contains(master_ip):
+ pool.Reserve(master_ip)
+ self.LogInfo("Reserved cluster master IP address (%s)", master_ip)
+ except errors.AddressPoolError:
+ self.LogWarning("Cannot reserve cluster master IP address (%s)",
+ master_ip)
if self.op.add_reserved_ips:
for ip in self.op.add_reserved_ips:
except errors.AddressPoolError, e:
raise errors.OpExecError("Cannot reserve IP %s. %s " % (ip, e))
+ if self.op.tags:
+ for tag in self.op.tags:
+ nobj.AddTag(tag)
+
self.cfg.AddNetwork(nobj, self.proc.GetECId(), check_uuid=False)
del self.remove_locks[locking.LEVEL_NETWORK]
def ExpandNames(self):
self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
+ if not self.network_uuid:
+ raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
+ errors.ECODE_INVAL)
+
+ self.share_locks[locking.LEVEL_NODEGROUP] = 1
self.needed_locks = {
locking.LEVEL_NETWORK: [self.network_uuid],
+ locking.LEVEL_NODEGROUP: locking.ALL_SET,
}
-
def CheckPrereq(self):
"""Check prerequisites.
cluster.
"""
- if not self.network_uuid:
- raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
- errors.ECODE_INVAL)
# Verify that the network is not conncted.
node_groups = [group.name
for group in self.cfg.GetAllNodeGroupsInfo().values()
- for network in group.networks.keys()
- if network == self.network_uuid]
+ for net in group.networks.keys()
+ if net == self.network_uuid]
if node_groups:
self.LogWarning("Nework '%s' is connected to the following"
raise errors.OpPrereqError("Cannot modify gateway and reserved ips"
" at once", errors.ECODE_INVAL)
-
def ExpandNames(self):
self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
self.network = self.cfg.GetNetwork(self.network_uuid)
- self.needed_locks = {
- locking.LEVEL_NETWORK: [self.network_uuid],
- }
-
-
if self.network is None:
raise errors.OpPrereqError("Could not retrieve network '%s' (UUID: %s)" %
(self.op.network_name, self.network_uuid),
errors.ECODE_INVAL)
+ self.needed_locks = {
+ locking.LEVEL_NETWORK: [self.network_uuid],
+ }
def CheckPrereq(self):
"""Check prerequisites.
self.mac_prefix = self.network.mac_prefix
self.network6 = self.network.network6
self.gateway6 = self.network.gateway6
+ self.tags = self.network.tags
self.pool = network.AddressPool(self.network)
if self.op.mac_prefix == constants.VALUE_NONE:
self.mac_prefix = None
else:
+ utils.NormalizeAndValidateMac(self.op.mac_prefix + ":00:00:00")
self.mac_prefix = self.op.mac_prefix
if self.op.gateway6:
else:
self.network6 = self.op.network6
-
-
def BuildHooksEnv(self):
"""Build hooks env.
"""
- env = {
- "NETWORK_NAME": self.op.network_name,
- "NETWORK_SUBNET": self.network.network,
- "NETWORK_GATEWAY": self.gateway,
- "NETWORK_SUBNET6": self.network6,
- "NETWORK_GATEWAY6": self.gateway6,
- "NETWORK_MAC_PREFIX": self.mac_prefix,
- "NETWORK_TYPE": self.network_type,
+ args = {
+ "name": self.op.network_name,
+ "subnet": self.network.network,
+ "gateway": self.gateway,
+ "network6": self.network6,
+ "gateway6": self.gateway6,
+ "mac_prefix": self.mac_prefix,
+ "network_type": self.network_type,
+ "tags": self.tags,
}
- return env
+ return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
def BuildHooksNodes(self):
"""Build hooks nodes.
# extend cfg.ReserveIp/ReleaseIp with the external flag
if self.op.gateway:
if self.gateway == self.network.gateway:
- self.LogWarning("Gateway is already %s" % self.gateway)
+ self.LogWarning("Gateway is already %s", self.gateway)
else:
if self.gateway:
self.pool.Reserve(self.gateway, external=True)
for ip in self.op.add_reserved_ips:
try:
if self.pool.IsReserved(ip):
- self.LogWarning("IP %s is already reserved" % ip)
+ self.LogWarning("IP address %s is already reserved", ip)
else:
self.pool.Reserve(ip, external=True)
- except errors.AddressPoolError, e:
- self.LogWarning("Cannot reserve ip %s. %s" % (ip, e))
+ except errors.AddressPoolError, err:
+ self.LogWarning("Cannot reserve IP address %s: %s", ip, err)
if self.op.remove_reserved_ips:
for ip in self.op.remove_reserved_ips:
continue
try:
if not self.pool.IsReserved(ip):
- self.LogWarning("IP %s is already unreserved" % ip)
+ self.LogWarning("IP address %s is already unreserved", ip)
else:
self.pool.Release(ip, external=True)
- except errors.AddressPoolError, e:
- self.LogWarning("Cannot release ip %s. %s" % (ip, e))
+ except errors.AddressPoolError, err:
+ self.LogWarning("Cannot release IP address %s: %s", ip, err)
if self.op.mac_prefix:
self.network.mac_prefix = self.mac_prefix
do_instances = query.NETQ_INST in self.requested_data
do_groups = do_instances or (query.NETQ_GROUP in self.requested_data)
do_stats = query.NETQ_STATS in self.requested_data
- cluster = lu.cfg.GetClusterInfo()
network_to_groups = None
network_to_instances = None
if do_groups:
all_groups = lu.cfg.GetAllNodeGroupsInfo()
network_to_groups = dict((uuid, []) for uuid in self.wanted)
- default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
if do_instances:
all_instances = lu.cfg.GetAllInstancesInfo()
all_nodes = lu.cfg.GetAllNodesInfo()
network_to_instances = dict((uuid, []) for uuid in self.wanted)
-
for group in all_groups.values():
if do_instances:
group_nodes = [node.name for node in all_nodes.values() if
netparams = group.networks[net_uuid]
mode = netparams[constants.NIC_MODE]
link = netparams[constants.NIC_LINK]
- info = group.name + '(' + mode + ', ' + link + ')'
+ info = group.name + "(" + mode + ", " + link + ")"
network_to_groups[net_uuid].append(info)
if do_instances:
"free_count": pool.GetFreeCount(),
"reserved_count": pool.GetReservedCount(),
"map": pool.GetMap(),
- "external_reservations": ", ".join(pool.GetExternalReservations()),
+ "external_reservations":
+ utils.CommaJoin(pool.GetExternalReservations()),
}
return query.NetworkQueryData([self._all_networks[uuid]
return self.nq.OldStyleQuery(self)
-
class LUNetworkConnect(LogicalUnit):
- def BuildHooksNodes(self):
- pass
+ """Connect a network to a nodegroup
+
+ """
+ HPATH = "network-connect"
+ HTYPE = constants.HTYPE_NETWORK
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.network_name = self.op.network_name
+ self.group_name = self.op.group_name
+ self.network_mode = self.op.network_mode
+ self.network_link = self.op.network_link
+
+ self.network_uuid = self.cfg.LookupNetwork(self.network_name)
+ self.network = self.cfg.GetNetwork(self.network_uuid)
+ if self.network is None:
+ raise errors.OpPrereqError("Network %s does not exist" %
+ self.network_name, errors.ECODE_INVAL)
+
+ self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
+ self.group = self.cfg.GetNodeGroup(self.group_uuid)
+ if self.group is None:
+ raise errors.OpPrereqError("Group %s does not exist" %
+ self.group_name, errors.ECODE_INVAL)
+
+ self.share_locks[locking.LEVEL_INSTANCE] = 1
+ self.needed_locks = {
+ locking.LEVEL_INSTANCE: [],
+ locking.LEVEL_NODEGROUP: [self.group_uuid],
+ }
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_INSTANCE:
+ assert not self.needed_locks[locking.LEVEL_INSTANCE]
+
+ # Lock instances optimistically, needs verification once group lock has
+ # been acquired
+ if self.op.conflicts_check:
+ self.needed_locks[locking.LEVEL_INSTANCE] = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid)
+ self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
def BuildHooksEnv(self):
- pass
+ ret = {
+ "GROUP_NAME": self.group_name,
+ "GROUP_NETWORK_MODE": self.network_mode,
+ "GROUP_NETWORK_LINK": self.network_link,
+ }
+ ret.update(_BuildNetworkHookEnvByObject(self.network))
+ return ret
+
+ def BuildHooksNodes(self):
+ nodes = self.cfg.GetNodeGroup(self.group_uuid).members
+ return (nodes, nodes)
+
+ def CheckPrereq(self):
+ l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
+ for i in value)
+
+ self.netparams = {
+ constants.NIC_MODE: self.network_mode,
+ constants.NIC_LINK: self.network_link,
+ }
+ objects.NIC.CheckParameterSyntax(self.netparams)
+
+ #if self.network_mode == constants.NIC_MODE_BRIDGED:
+ # _CheckNodeGroupBridgesExist(self, self.network_link, self.group_uuid)
+ self.connected = False
+ if self.network_uuid in self.group.networks:
+ self.LogWarning("Network '%s' is already mapped to group '%s'" %
+ (self.network_name, self.group.name))
+ self.connected = True
+ return
+
+ pool = network.AddressPool(self.network)
+ if self.op.conflicts_check:
+ groupinstances = []
+ for n in self.cfg.GetNodeGroupInstances(self.group_uuid):
+ groupinstances.append(self.cfg.GetInstanceInfo(n))
+ instances = [(instance.name, idx, nic.ip)
+ for instance in groupinstances
+ for idx, nic in enumerate(instance.nics)
+ if (not nic.network and pool.Contains(nic.ip))]
+ if instances:
+ self.LogWarning("Following occurences use IPs from network %s"
+ " that is about to connect to nodegroup %s: %s" %
+ (self.network_name, self.group.name,
+ l(instances)))
+ raise errors.OpPrereqError("Conflicting IPs found."
+ " Please remove/modify"
+ " corresponding NICs",
+ errors.ECODE_INVAL)
+
+ def Exec(self, feedback_fn):
+ if self.connected:
+ return
+
+ self.group.networks[self.network_uuid] = self.netparams
+ self.cfg.Update(self.group, feedback_fn)
class LUNetworkDisconnect(LogicalUnit):
- def BuildHooksNodes(self):
- pass
+ """Disconnect a network to a nodegroup
+
+ """
+ HPATH = "network-disconnect"
+ HTYPE = constants.HTYPE_NETWORK
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.network_name = self.op.network_name
+ self.group_name = self.op.group_name
+
+ self.network_uuid = self.cfg.LookupNetwork(self.network_name)
+ self.network = self.cfg.GetNetwork(self.network_uuid)
+ if self.network is None:
+ raise errors.OpPrereqError("Network %s does not exist" %
+ self.network_name, errors.ECODE_INVAL)
+
+ self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
+ self.group = self.cfg.GetNodeGroup(self.group_uuid)
+ if self.group is None:
+ raise errors.OpPrereqError("Group %s does not exist" %
+ self.group_name, errors.ECODE_INVAL)
+
+ self.needed_locks = {
+ locking.LEVEL_NODEGROUP: [self.group_uuid],
+ }
+ self.share_locks[locking.LEVEL_INSTANCE] = 1
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_INSTANCE:
+ assert not self.needed_locks[locking.LEVEL_INSTANCE]
+
+ # Lock instances optimistically, needs verification once group lock has
+ # been acquired
+ if self.op.conflicts_check:
+ self.needed_locks[locking.LEVEL_INSTANCE] = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid)
def BuildHooksEnv(self):
- pass
+ ret = {
+ "GROUP_NAME": self.group_name,
+ }
+ ret.update(_BuildNetworkHookEnvByObject(self.network))
+ return ret
+
+ def BuildHooksNodes(self):
+ nodes = self.cfg.GetNodeGroup(self.group_uuid).members
+ return (nodes, nodes)
+
+ def CheckPrereq(self):
+ l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
+ for i in value)
+
+ self.connected = True
+ if self.network_uuid not in self.group.networks:
+ self.LogWarning("Network '%s' is not mapped to group '%s'",
+ self.network_name, self.group.name)
+ self.connected = False
+ return
+
+ if self.op.conflicts_check:
+ groupinstances = []
+ for n in self.cfg.GetNodeGroupInstances(self.group_uuid):
+ groupinstances.append(self.cfg.GetInstanceInfo(n))
+ instances = [(instance.name, idx, nic.ip)
+ for instance in groupinstances
+ for idx, nic in enumerate(instance.nics)
+ if nic.network == self.network_name]
+ if instances:
+ self.LogWarning("Following occurences use IPs from network %s"
+ " that is about to disconnected from the nodegroup"
+ " %s: %s" %
+ (self.network_name, self.group.name,
+ l(instances)))
+ raise errors.OpPrereqError("Conflicting IPs."
+ " Please remove/modify"
+ " corresponding NICS",
+ errors.ECODE_INVAL)
+
+ def Exec(self, feedback_fn):
+ if not self.connected:
+ return
+
+ del self.group.networks[self.network_uuid]
+ self.cfg.Update(self.group, feedback_fn)
#: Query type implementations
except KeyError:
raise errors.OpPrereqError("Unknown query resource '%s'" % name,
errors.ECODE_INVAL)
+
+
+def _CheckForConflictingIp(lu, ip, node):
+ """In case of conflicting ip raise error.
+
+ @type ip: string
+ @param ip: ip address
+ @type node: string
+ @param node: node name
+
+ """
+ (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
+ if conf_net is not None:
+ raise errors.OpPrereqError("Conflicting IP found:"
+ " %s <> %s." % (ip, conf_net),
+ errors.ECODE_INVAL)
+
+ return (None, None)