_PNodeName = ("node_name", _NoDefault, _TNonEmptyString)
#: the migration type (live/non-live)
-_PMigrationLive = ("live", None, _TOr(_TNone,
+_PMigrationMode = ("mode", None, _TOr(_TNone,
_TElemOf(constants.HT_MIGRATION_MODES)))
+#: the obsolete 'live' mode (boolean)
+_PMigrationLive = ("live", None, _TMaybeBool)
+
# End types
class LogicalUnit(object):
"""
master = self.cfg.GetMasterNode()
- modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
# Run post hooks on master node before it's removed
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
result = self.rpc.call_node_stop_master(master, False)
result.Raise("Could not disable the master role")
- if modify_ssh_setup:
- priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
- utils.CreateBackup(priv_key)
- utils.CreateBackup(pub_key)
-
return master
_ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
"instance should not run on node %s", node)
- def _VerifyOrphanVolumes(self, node_vol_should, node_image):
+ def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
"""Verify if there are any unknown volumes in the cluster.
The .os, .swap and backup volumes are ignored. All other volumes are
reported as unknown.
+ @type reserved: L{ganeti.utils.FieldSet}
+ @param reserved: a FieldSet of reserved volume names
+
"""
for node, n_img in node_image.items():
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
# skip non-healthy nodes
continue
for volume in n_img.volumes:
- test = (node not in node_vol_should or
- volume not in node_vol_should[node])
+ test = ((node not in node_vol_should or
+ volume not in node_vol_should[node]) and
+ not reserved.Matches(volume))
self._ErrorIf(test, self.ENODEORPHANLV, node,
"volume %s is unknown", volume)
"instance lives on ghost node %s", node)
feedback_fn("* Verifying orphan volumes")
- self._VerifyOrphanVolumes(node_vol_should, node_image)
+ reserved = utils.FieldSet(*cluster.reserved_lvs)
+ self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
feedback_fn("* Verifying orphan instances")
self._VerifyOrphanInstances(instancelist, node_image)
"""Verify that the passed name is a valid one.
"""
- hostname = netutils.GetHostInfo(self.op.name)
+ hostname = netutils.GetHostname(name=self.op.name,
+ family=self.cfg.GetPrimaryIPFamily())
new_name = hostname.name
self.ip = new_ip = hostname.ip
self.LogWarning("Could not re-enable the master role on"
" the master, please restart manually: %s", msg)
+ return clustername
+
class LUSetClusterParams(LogicalUnit):
"""Change the parameters of the cluster.
("nicparams", None, _TOr(_TDict, _TNone)),
("drbd_helper", None, _TOr(_TString, _TNone)),
("default_iallocator", None, _TMaybeString),
+ ("reserved_lvs", None, _TOr(_TListOf(_TNonEmptyString), _TNone)),
]
REQ_BGL = False
if self.op.default_iallocator is not None:
self.cluster.default_iallocator = self.op.default_iallocator
+ if self.op.reserved_lvs is not None:
+ self.cluster.reserved_lvs = self.op.reserved_lvs
+
self.cfg.Update(self.cluster, feedback_fn)
def CheckArguments(self):
# validate/normalize the node name
- self.op.node_name = netutils.HostInfo.NormalizeName(self.op.node_name)
+ self.hostname = netutils.GetHostname(name=self.op.node_name,
+ family=self.cfg.GetPrimaryIPFamily())
+ self.op.node_name = self.hostname.name
def BuildHooksEnv(self):
"""Build hooks env.
Any errors are signaled by raising errors.OpPrereqError.
"""
- node_name = self.op.node_name
cfg = self.cfg
-
- dns_data = netutils.GetHostInfo(node_name)
-
- node = dns_data.name
- primary_ip = self.op.primary_ip = dns_data.ip
+ hostname = self.hostname
+ node = hostname.name
+ primary_ip = self.op.primary_ip = hostname.ip
if self.op.secondary_ip is None:
self.op.secondary_ip = primary_ip
- if not netutils.IsValidIP4(self.op.secondary_ip):
- raise errors.OpPrereqError("Invalid secondary IP given",
- errors.ECODE_INVAL)
+
secondary_ip = self.op.secondary_ip
+ if not netutils.IP4Address.IsValid(secondary_ip):
+ raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
+ " address" % secondary_ip, errors.ECODE_INVAL)
node_list = cfg.GetNodeList()
if not self.op.readd and node in node_list:
" node version %s" %
(constants.PROTOCOL_VERSION, result.payload))
- # setup ssh on node
- if self.cfg.GetClusterInfo().modify_ssh_setup:
- logging.info("Copy ssh key to node %s", node)
- priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
- keyarray = []
- keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
- constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
- priv_key, pub_key]
-
- for i in keyfiles:
- keyarray.append(utils.ReadFile(i))
-
- result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
- keyarray[2], keyarray[3], keyarray[4],
- keyarray[5])
- result.Raise("Cannot transfer ssh keys to the new node")
-
# Add node to our /etc/hosts, and add key to known_hosts
if self.cfg.GetClusterInfo().modify_etc_hosts:
# FIXME: this should be done via an rpc call to node daemon
- utils.AddHostToEtcHosts(new_node.name)
+ utils.AddHostToEtcHosts(self.hostname)
if new_node.secondary_ip != new_node.primary_ip:
result = self.rpc.call_node_has_ip_address(new_node.name,
# we can't change the master's node flags
if self.op.node_name == self.cfg.GetMasterNode():
raise errors.OpPrereqError("The master role can be changed"
- " only via masterfailover",
+ " only via master-failover",
errors.ECODE_INVAL)
if hv_name in cluster.enabled_hypervisors:
os_hvp[os_name][hv_name] = hv_params
+ # Convert ip_family to ip_version
+ primary_ip_version = constants.IP4_VERSION
+ if cluster.primary_ip_family == netutils.IP6Address.family:
+ primary_ip_version = constants.IP6_VERSION
+
result = {
"software_version": constants.RELEASE_VERSION,
"protocol_version": constants.PROTOCOL_VERSION,
"tags": list(cluster.GetTags()),
"uid_pool": cluster.uid_pool,
"default_iallocator": cluster.default_iallocator,
+ "reserved_lvs": cluster.reserved_lvs,
+ "primary_ip_version": primary_ip_version,
}
return result
_OP_PARAMS = [
_PInstanceName,
("new_name", _NoDefault, _TNonEmptyString),
- ("ignore_ip", False, _TBool),
- ("check_name", True, _TBool),
+ ("ip_check", False, _TBool),
+ ("name_check", True, _TBool),
]
+ def CheckArguments(self):
+ """Check arguments.
+
+ """
+ if self.op.ip_check and not self.op.name_check:
+ # TODO: make the ip check more flexible and not depend on the name check
+ raise errors.OpPrereqError("Cannot do ip check without a name check",
+ errors.ECODE_INVAL)
+
def BuildHooksEnv(self):
"""Build hooks env.
_CheckInstanceDown(self, instance, "cannot rename")
self.instance = instance
- # new name verification
- if self.op.check_name:
- name_info = netutils.GetHostInfo(self.op.new_name)
- self.op.new_name = name_info.name
-
new_name = self.op.new_name
+ if self.op.name_check:
+ hostname = netutils.GetHostname(name=new_name)
+ new_name = hostname.name
+ if (self.op.ip_check and
+ netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
+ raise errors.OpPrereqError("IP %s of instance %s already in use" %
+ (hostname.ip, new_name),
+ errors.ECODE_NOTUNIQUE)
instance_list = self.cfg.GetInstanceList()
if new_name in instance_list:
raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
new_name, errors.ECODE_EXISTS)
- if not self.op.ignore_ip:
- if netutils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
- raise errors.OpPrereqError("IP %s of instance %s already in use" %
- (name_info.ip, new_name),
- errors.ECODE_NOTUNIQUE)
-
def Exec(self, feedback_fn):
"""Reinstall the instance.
finally:
_ShutdownInstanceDisks(self, inst)
+ return inst.name
+
class LURemoveInstance(LogicalUnit):
"""Remove an instance.
HTYPE = constants.HTYPE_INSTANCE
_OP_PARAMS = [
_PInstanceName,
+ _PMigrationMode,
_PMigrationLive,
("cleanup", False, _TBool),
]
source_node = instance.primary_node
target_node = instance.secondary_nodes[0]
env = _BuildInstanceHookEnvByObject(self, instance)
- env["MIGRATE_LIVE"] = self.op.live
+ env["MIGRATE_LIVE"] = self._migrater.live
env["MIGRATE_CLEANUP"] = self.op.cleanup
env.update({
"OLD_PRIMARY": source_node,
HTYPE = constants.HTYPE_NODE
_OP_PARAMS = [
_PNodeName,
+ _PMigrationMode,
_PMigrationLive,
]
REQ_BGL = False
class TLMigrateInstance(Tasklet):
+ """Tasklet class for instance migration.
+
+ @type live: boolean
+ @ivar live: whether the migration will be done live or non-live;
+ this variable is initalized only after CheckPrereq has run
+
+ """
def __init__(self, lu, instance_name, cleanup):
"""Initializes this class.
self.instance = instance
- if self.lu.op.live is None:
+ if self.lu.op.live is not None and self.lu.op.mode is not None:
+ raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
+ " parameters are accepted",
+ errors.ECODE_INVAL)
+ if self.lu.op.live is not None:
+ if self.lu.op.live:
+ self.lu.op.mode = constants.HT_MIGRATION_LIVE
+ else:
+ self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
+ # reset the 'live' parameter to None so that repeated
+ # invocations of CheckPrereq do not raise an exception
+ self.lu.op.live = None
+ elif self.lu.op.mode is None:
# read the default value from the hypervisor
i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
- self.lu.op.live = i_hv[constants.HV_MIGRATION_MODE]
+ self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
- self.live = self.lu.op.live == constants.HT_MIGRATION_LIVE
+ self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
def _WaitUntilSync(self):
"""Poll with custom rpc for disk sync.
("os_type", None, _TMaybeString),
("force_variant", False, _TBool),
("source_handshake", None, _TOr(_TList, _TNone)),
- ("source_x509_ca", None, _TOr(_TList, _TNone)),
+ ("source_x509_ca", None, _TMaybeString),
("source_instance_name", None, _TMaybeString),
("src_node", None, _TMaybeString),
("src_path", None, _TMaybeString),
self.op.start = False
# validate/normalize the instance name
self.op.instance_name = \
- netutils.HostInfo.NormalizeName(self.op.instance_name)
+ netutils.Hostname.GetNormalizedName(self.op.instance_name)
if self.op.ip_check and not self.op.name_check:
# TODO: make the ip check more flexible and not depend on the name check
- raise errors.OpPrereqError("Cannot do ip checks without a name check",
+ raise errors.OpPrereqError("Cannot do ip check without a name check",
errors.ECODE_INVAL)
# check nics' parameter names
# instance name verification
if self.op.name_check:
- self.hostname1 = netutils.GetHostInfo(self.op.instance_name)
+ self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
self.op.instance_name = self.hostname1.name
# used in CheckPrereq for ip ping check
self.check_ip = self.hostname1.ip
raise errors.OpPrereqError("Missing source instance name",
errors.ECODE_INVAL)
- norm_name = netutils.HostInfo.NormalizeName(src_instance_name)
- self.source_instance_name = netutils.GetHostInfo(norm_name).name
+ self.source_instance_name = \
+ netutils.GetHostname(name=src_instance_name).name
else:
raise errors.OpPrereqError("Invalid instance creation mode %r" %
errors.ECODE_INVAL)
nic_ip = self.hostname1.ip
else:
- if not netutils.IsValidIP4(ip):
+ if not netutils.IP4Address.IsValid(ip):
raise errors.OpPrereqError("Given IP address '%s' doesn't look"
" like a valid IP" % ip,
errors.ECODE_INVAL)
if nic_ip.lower() == constants.VALUE_NONE:
nic_dict['ip'] = None
else:
- if not netutils.IsValidIP4(nic_ip):
+ if not netutils.IP4Address.IsValid(nic_ip):
raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
errors.ECODE_INVAL)
self.LogInfo("Executing")
if self.op.log_messages:
+ self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
for idx, msg in enumerate(self.op.log_messages):
self.LogInfo("Sending log message %s", idx + 1)
feedback_fn(constants.JQT_MSGPREFIX + msg)