-#!/usr/bin/python
+#
#
# Copyright (C) 2006, 2007 Google Inc.
" use 'gnt-cluster init' first.")
if self.REQ_MASTER:
master = sstore.GetMasterNode()
- if master != socket.gethostname():
+ if master != utils.HostInfo().name:
raise errors.OpPrereqError("Commands must be run on the master"
" node %s" % master)
This is a no-op, since we don't run hooks.
"""
- return
+ return {}, [], []
def _GetWantedNodes(lu, nodes):
- """Returns list of checked and expanded nodes.
+ """Returns list of checked and expanded node names.
Args:
nodes: List of nodes (strings) or None for all
"""
- if nodes is not None and not isinstance(nodes, list):
+ if not isinstance(nodes, list):
raise errors.OpPrereqError("Invalid argument type 'nodes'")
if nodes:
- wanted_nodes = []
+ wanted = []
for name in nodes:
- node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
+ node = lu.cfg.ExpandNodeName(name)
if node is None:
raise errors.OpPrereqError("No such node name '%s'" % name)
- wanted_nodes.append(node)
+ wanted.append(node)
- return wanted_nodes
else:
- return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
+ wanted = lu.cfg.GetNodeList()
+ return utils.NiceSort(wanted)
+
+
+def _GetWantedInstances(lu, instances):
+ """Returns list of checked and expanded instance names.
+
+ Args:
+ instances: List of instances (strings) or None for all
+
+ """
+ if not isinstance(instances, list):
+ raise errors.OpPrereqError("Invalid argument type 'instances'")
+
+ if instances:
+ wanted = []
+
+ for name in instances:
+ instance = lu.cfg.ExpandInstanceName(name)
+ if instance is None:
+ raise errors.OpPrereqError("No such instance name '%s'" % name)
+ wanted.append(instance)
+
+ else:
+ wanted = lu.cfg.GetInstanceList()
+ return utils.NiceSort(wanted)
def _CheckOutputFields(static, dynamic, selected):
secondary_nodes: List of secondary nodes as strings
"""
env = {
+ "OP_TARGET": name,
"INSTANCE_NAME": name,
"INSTANCE_PRIMARY": primary_node,
"INSTANCE_SECONDARIES": " ".join(secondary_nodes),
ourselves in the post-run node list.
"""
- env = {
- "CLUSTER": self.op.cluster_name,
- "MASTER": self.hostname['hostname_full'],
- }
- return env, [], [self.hostname['hostname_full']]
+ env = {"OP_TARGET": self.op.cluster_name}
+ return env, [], [self.hostname.name]
def CheckPrereq(self):
"""Verify that the passed name is a valid one.
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised")
- hostname_local = socket.gethostname()
- self.hostname = hostname = utils.LookupHostname(hostname_local)
- if not hostname:
- raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
- hostname_local)
+ self.hostname = hostname = utils.HostInfo()
- if hostname["hostname_full"] != hostname_local:
- raise errors.OpPrereqError("My own hostname (%s) does not match the"
- " resolver (%s): probably not using FQDN"
- " for hostname." %
- (hostname_local, hostname["hostname_full"]))
+ if hostname.ip.startswith("127."):
+ raise errors.OpPrereqError("This host's IP resolves to the private"
+ " range (%s). Please fix DNS or /etc/hosts." %
+ (hostname.ip,))
- self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
- if not clustername:
- raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
- % self.op.cluster_name)
+ self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
- result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
- if result.failed:
+ if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
+ constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("Inconsistency: this host's name resolves"
" to %s,\nbut this ip address does not"
" belong to this host."
- " Aborting." % hostname['ip'])
+ " Aborting." % hostname.ip)
secondary_ip = getattr(self.op, "secondary_ip", None)
if secondary_ip and not utils.IsValidIP(secondary_ip):
raise errors.OpPrereqError("Invalid secondary ip given")
- if secondary_ip and secondary_ip != hostname['ip']:
- result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
- if result.failed:
- raise errors.OpPrereqError("You gave %s as secondary IP,\n"
- "but it does not belong to this host." %
- secondary_ip)
+ if (secondary_ip and
+ secondary_ip != hostname.ip and
+ (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
+ constants.DEFAULT_NODED_PORT))):
+ raise errors.OpPrereqError("You gave %s as secondary IP,\n"
+ "but it does not belong to this host." %
+ secondary_ip)
self.secondary_ip = secondary_ip
# checks presence of the volume group given
hostname = self.hostname
# set up the simple store
- ss = ssconf.SimpleStore()
+ self.sstore = ss = ssconf.SimpleStore()
ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
- ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
- ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
+ ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
+ ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
- ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname'])
+ ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
# set up the inter-node password and certificate
_InitGanetiServerSetup(ss)
# start the master ip
- rpc.call_node_start_master(hostname['hostname_full'])
+ rpc.call_node_start_master(hostname.name)
# set up ssh config and /etc/hosts
f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
f.close()
sshkey = sshline.split(" ")[1]
- _UpdateEtcHosts(hostname['hostname_full'],
- hostname['ip'],
- )
+ _UpdateEtcHosts(hostname.name, hostname.ip)
- _UpdateKnownHosts(hostname['hostname_full'],
- hostname['ip'],
- sshkey,
- )
+ _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
- _InitSSHSetup(hostname['hostname'])
+ _InitSSHSetup(hostname.name)
# init of cluster config file
- cfgw = config.ConfigWriter()
- cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
+ self.cfg = cfgw = config.ConfigWriter()
+ cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
sshkey, self.op.mac_prefix,
self.op.vg_name, self.op.def_bridge)
return int(bad)
+class LURenameCluster(LogicalUnit):
+ """Rename the cluster.
+
+ """
+ HPATH = "cluster-rename"
+ HTYPE = constants.HTYPE_CLUSTER
+ _OP_REQP = ["name"]
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ """
+ env = {
+ "OP_TARGET": self.op.sstore.GetClusterName(),
+ "NEW_NAME": self.op.name,
+ }
+ mn = self.sstore.GetMasterNode()
+ return env, [mn], [mn]
+
+ def CheckPrereq(self):
+ """Verify that the passed name is a valid one.
+
+ """
+ hostname = utils.HostInfo(self.op.name)
+
+ new_name = hostname.name
+ self.ip = new_ip = hostname.ip
+ old_name = self.sstore.GetClusterName()
+ old_ip = self.sstore.GetMasterIP()
+ if new_name == old_name and new_ip == old_ip:
+ raise errors.OpPrereqError("Neither the name nor the IP address of the"
+ " cluster has changed")
+ if new_ip != old_ip:
+ result = utils.RunCmd(["fping", "-q", new_ip])
+ if not result.failed:
+ raise errors.OpPrereqError("The given cluster IP address (%s) is"
+ " reachable on the network. Aborting." %
+ new_ip)
+
+ self.op.name = new_name
+
+ def Exec(self, feedback_fn):
+ """Rename the cluster.
+
+ """
+ clustername = self.op.name
+ ip = self.ip
+ ss = self.sstore
+
+ # shutdown the master IP
+ master = ss.GetMasterNode()
+ if not rpc.call_node_stop_master(master):
+ raise errors.OpExecError("Could not disable the master role")
+
+ try:
+ # modify the sstore
+ ss.SetKey(ss.SS_MASTER_IP, ip)
+ ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
+
+ # Distribute updated ss config to all nodes
+ myself = self.cfg.GetNodeInfo(master)
+ dist_nodes = self.cfg.GetNodeList()
+ if myself.name in dist_nodes:
+ dist_nodes.remove(myself.name)
+
+ logger.Debug("Copying updated ssconf data to all nodes")
+ for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
+ fname = ss.KeyToFilename(keyname)
+ result = rpc.call_upload_file(dist_nodes, fname)
+ for to_node in dist_nodes:
+ if not result[to_node]:
+ logger.Error("copy of file %s to node %s failed" %
+ (fname, to_node))
+ finally:
+ if not rpc.call_node_start_master(master):
+ logger.Error("Could not re-enable the master role on the master,\n"
+ "please restart manually.")
+
+
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
"""Sleep and poll for an instance's disk to sync.
"""
env = {
+ "OP_TARGET": self.op.node_name,
"NODE_NAME": self.op.node_name,
}
all_nodes = self.cfg.GetNodeList()
"""Logical unit for querying nodes.
"""
- _OP_REQP = ["output_fields"]
+ _OP_REQP = ["output_fields", "names"]
def CheckPrereq(self):
"""Check prerequisites.
"""
self.dynamic_fields = frozenset(["dtotal", "dfree",
- "mtotal", "mnode", "mfree"])
+ "mtotal", "mnode", "mfree",
+ "bootid"])
- _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
+ _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
+ "pinst_list", "sinst_list",
+ "pip", "sip"],
dynamic=self.dynamic_fields,
selected=self.op.output_fields)
+ self.wanted = _GetWantedNodes(self, self.op.names)
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
- nodenames = utils.NiceSort(self.cfg.GetNodeList())
+ nodenames = self.wanted
nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
-
# begin data gathering
if self.dynamic_fields.intersection(self.op.output_fields):
"mfree": utils.TryConvert(int, nodeinfo['memory_free']),
"dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
"dfree": utils.TryConvert(int, nodeinfo['vg_free']),
+ "bootid": nodeinfo['bootid'],
}
else:
live_data[name] = {}
else:
live_data = dict.fromkeys(nodenames, {})
- node_to_primary = dict.fromkeys(nodenames, 0)
- node_to_secondary = dict.fromkeys(nodenames, 0)
+ node_to_primary = dict([(name, set()) for name in nodenames])
+ node_to_secondary = dict([(name, set()) for name in nodenames])
- if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields:
+ inst_fields = frozenset(("pinst_cnt", "pinst_list",
+ "sinst_cnt", "sinst_list"))
+ if inst_fields & frozenset(self.op.output_fields):
instancelist = self.cfg.GetInstanceList()
- for instance in instancelist:
- instanceinfo = self.cfg.GetInstanceInfo(instance)
- node_to_primary[instanceinfo.primary_node] += 1
- for secnode in instanceinfo.secondary_nodes:
- node_to_secondary[secnode] += 1
+ for instance_name in instancelist:
+ inst = self.cfg.GetInstanceInfo(instance_name)
+ if inst.primary_node in node_to_primary:
+ node_to_primary[inst.primary_node].add(inst.name)
+ for secnode in inst.secondary_nodes:
+ if secnode in node_to_secondary:
+ node_to_secondary[secnode].add(inst.name)
# end data gathering
for field in self.op.output_fields:
if field == "name":
val = node.name
- elif field == "pinst":
- val = node_to_primary[node.name]
- elif field == "sinst":
- val = node_to_secondary[node.name]
+ elif field == "pinst_list":
+ val = list(node_to_primary[node.name])
+ elif field == "sinst_list":
+ val = list(node_to_secondary[node.name])
+ elif field == "pinst_cnt":
+ val = len(node_to_primary[node.name])
+ elif field == "sinst_cnt":
+ val = len(node_to_secondary[node.name])
elif field == "pip":
val = node.primary_ip
elif field == "sip":
val = node.secondary_ip
elif field in self.dynamic_fields:
- val = live_data[node.name].get(field, "?")
+ val = live_data[node.name].get(field, None)
else:
raise errors.ParameterError(field)
- val = str(val)
node_output.append(val)
output.append(node_output)
"""Computes the list of nodes and their attributes.
"""
- nodenames = utils.NiceSort([node.name for node in self.nodes])
+ nodenames = self.nodes
volumes = rpc.call_node_volumes(nodenames)
ilist = [self.cfg.GetInstanceInfo(iname) for iname
"""
env = {
+ "OP_TARGET": self.op.node_name,
"NODE_NAME": self.op.node_name,
"NODE_PIP": self.op.primary_ip,
"NODE_SIP": self.op.secondary_ip,
node_name = self.op.node_name
cfg = self.cfg
- dns_data = utils.LookupHostname(node_name)
- if not dns_data:
- raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
+ dns_data = utils.HostInfo(node_name)
- node = dns_data['hostname']
- primary_ip = self.op.primary_ip = dns_data['ip']
+ node = dns_data.name
+ primary_ip = self.op.primary_ip = dns_data.ip
secondary_ip = getattr(self.op, "secondary_ip", None)
if secondary_ip is None:
secondary_ip = primary_ip
" new node doesn't have one")
# checks reachablity
- command = ["fping", "-q", primary_ip]
- result = utils.RunCmd(command)
- if result.failed:
+ if not utils.TcpPing(utils.HostInfo().name,
+ primary_ip,
+ constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("Node not reachable by ping")
if not newbie_singlehomed:
# check reachability from my secondary ip to newbie's secondary ip
- command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
- result = utils.RunCmd(command)
- if result.failed:
- raise errors.OpPrereqError("Node secondary ip not reachable by ping")
+ if not utils.TcpPing(myself.secondary_ip,
+ secondary_ip,
+ constants.DEFAULT_NODED_PORT):
+ raise errors.OpPrereqError(
+ "Node secondary ip not reachable by TCP based ping to noded port")
self.new_node = objects.Node(name=node,
primary_ip=primary_ip,
self.cfg.GetHostKey())
if new_node.secondary_ip != new_node.primary_ip:
- result = ssh.SSHCall(node, "root",
- "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
- if result.failed:
+ if not rpc.call_node_tcp_ping(new_node.name,
+ constants.LOCALHOST_IP_ADDRESS,
+ new_node.secondary_ip,
+ constants.DEFAULT_NODED_PORT,
+ 10, False):
raise errors.OpExecError("Node claims it doesn't have the"
" secondary ip you gave (%s).\n"
"Please fix and re-run this command." %
"""
env = {
+ "OP_TARGET": self.new_master,
"NEW_MASTER": self.new_master,
"OLD_MASTER": self.old_master,
}
This checks that we are not already the master.
"""
- self.new_master = socket.gethostname()
-
+ self.new_master = utils.HostInfo().name
self.old_master = self.sstore.GetMasterNode()
if self.old_master == self.new_master:
"""
filename = self.op.filename
- myname = socket.gethostname()
+ myname = utils.HostInfo().name
- for node in [node.name for node in self.nodes]:
+ for node in self.nodes:
if node == myname:
continue
if not ssh.CopyFileToNode(node, filename):
"""
data = []
for node in self.nodes:
- result = ssh.SSHCall(node.name, "root", self.op.command)
- data.append((node.name, result.output, result.exit_code))
+ result = ssh.SSHCall(node, "root", self.op.command)
+ data.append((node, result.output, result.exit_code))
return data
_ShutdownInstanceDisks(inst, self.cfg)
+class LURenameInstance(LogicalUnit):
+ """Rename an instance.
+
+ """
+ HPATH = "instance-rename"
+ HTYPE = constants.HTYPE_INSTANCE
+ _OP_REQP = ["instance_name", "new_name"]
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ This runs on master, primary and secondary nodes of the instance.
+
+ """
+ env = _BuildInstanceHookEnvByObject(self.instance)
+ env["INSTANCE_NEW_NAME"] = self.op.new_name
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+ list(self.instance.secondary_nodes))
+ return env, nl, nl
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks that the instance is in the cluster and is not running.
+
+ """
+ instance = self.cfg.GetInstanceInfo(
+ self.cfg.ExpandInstanceName(self.op.instance_name))
+ if instance is None:
+ raise errors.OpPrereqError("Instance '%s' not known" %
+ self.op.instance_name)
+ if instance.status != "down":
+ raise errors.OpPrereqError("Instance '%s' is marked to be up" %
+ self.op.instance_name)
+ remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+ if remote_info:
+ raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
+ (self.op.instance_name,
+ instance.primary_node))
+ self.instance = instance
+
+ # new name verification
+ name_info = utils.HostInfo(self.op.new_name)
+
+ self.op.new_name = new_name = name_info.name
+ if not getattr(self.op, "ignore_ip", False):
+ command = ["fping", "-q", name_info.ip]
+ result = utils.RunCmd(command)
+ if not result.failed:
+ raise errors.OpPrereqError("IP %s of instance %s already in use" %
+ (name_info.ip, new_name))
+
+
+ def Exec(self, feedback_fn):
+ """Reinstall the instance.
+
+ """
+ inst = self.instance
+ old_name = inst.name
+
+ self.cfg.RenameInstance(inst.name, self.op.new_name)
+
+ # re-read the instance from the configuration after rename
+ inst = self.cfg.GetInstanceInfo(self.op.new_name)
+
+ _StartInstanceDisks(self.cfg, inst, None)
+ try:
+ if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
+ "sda", "sdb"):
+ msg = ("Could run OS rename script for instance %s\n"
+ "on node %s\n"
+ "(but the instance has been renamed in Ganeti)" %
+ (inst.name, inst.primary_node))
+ logger.Error(msg)
+ finally:
+ _ShutdownInstanceDisks(inst, self.cfg)
+
+
class LURemoveInstance(LogicalUnit):
"""Remove an instance.
"""Logical unit for querying instances.
"""
- _OP_REQP = ["output_fields"]
+ _OP_REQP = ["output_fields", "names"]
def CheckPrereq(self):
"""Check prerequisites.
dynamic=self.dynamic_fields,
selected=self.op.output_fields)
+ self.wanted = _GetWantedInstances(self, self.op.names)
+
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
- instance_names = utils.NiceSort(self.cfg.GetInstanceList())
+ instance_names = self.wanted
instance_list = [self.cfg.GetInstanceInfo(iname) for iname
in instance_names]
elif field == "pnode":
val = instance.primary_node
elif field == "snodes":
- val = ",".join(instance.secondary_nodes) or "-"
+ val = list(instance.secondary_nodes)
elif field == "admin_state":
- if instance.status == "down":
- val = "no"
- else:
- val = "yes"
+ val = (instance.status != "down")
elif field == "oper_state":
if instance.primary_node in bad_nodes:
- val = "(node down)"
+ val = None
else:
- if live_data.get(instance.name):
- val = "running"
- else:
- val = "stopped"
+ val = bool(live_data.get(instance.name))
elif field == "admin_ram":
val = instance.memory
elif field == "oper_ram":
if instance.primary_node in bad_nodes:
- val = "(node down)"
+ val = None
elif instance.name in live_data:
val = live_data[instance.name].get("memory", "?")
else:
elif field == "sda_size" or field == "sdb_size":
disk = instance.FindDisk(field[:3])
if disk is None:
- val = "N/A"
+ val = None
else:
val = disk.size
else:
raise errors.ParameterError(field)
- val = str(val)
iout.append(val)
output.append(iout)
raise errors.OpPrereqError("Instance '%s' not known" %
self.op.instance_name)
+ if instance.disk_template != constants.DT_REMOTE_RAID1:
+ raise errors.OpPrereqError("Instance's disk layout is not"
+ " remote_raid1.")
+
+ secondary_nodes = instance.secondary_nodes
+ if not secondary_nodes:
+ raise errors.ProgrammerError("no secondary node but using "
+ "DT_REMOTE_RAID1 template")
+
# check memory requirements on the secondary node
- target_node = instance.secondary_nodes[0]
+ target_node = secondary_nodes[0]
nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
info = nodeinfo.get(target_node, None)
if not info:
size=swap_sz,
children = [sdb_dev_m1, sdb_dev_m2])
disks = [md_sda_dev, md_sdb_dev]
- elif template_name == "remote_raid1":
+ elif template_name == constants.DT_REMOTE_RAID1:
if len(secondary_nodes) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node = secondary_nodes[0]
HTYPE = constants.HTYPE_INSTANCE
_OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
"disk_template", "swap_size", "mode", "start", "vcpus",
- "wait_for_sync"]
+ "wait_for_sync", "ip_check"]
def BuildHooksEnv(self):
"""Build hooks env.
" primary node" % self.op.os_type)
# instance verification
- hostname1 = utils.LookupHostname(self.op.instance_name)
- if not hostname1:
- raise errors.OpPrereqError("Instance name '%s' not found in dns" %
- self.op.instance_name)
+ hostname1 = utils.HostInfo(self.op.instance_name)
- self.op.instance_name = instance_name = hostname1['hostname']
+ self.op.instance_name = instance_name = hostname1.name
instance_list = self.cfg.GetInstanceList()
if instance_name in instance_list:
raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
if ip is None or ip.lower() == "none":
inst_ip = None
elif ip.lower() == "auto":
- inst_ip = hostname1['ip']
+ inst_ip = hostname1.ip
else:
if not utils.IsValidIP(ip):
raise errors.OpPrereqError("given IP address '%s' doesn't look"
inst_ip = ip
self.inst_ip = inst_ip
- command = ["fping", "-q", hostname1['ip']]
- result = utils.RunCmd(command)
- if not result.failed:
- raise errors.OpPrereqError("IP %s of instance %s already in use" %
- (hostname1['ip'], instance_name))
+ if self.op.start and not self.op.ip_check:
+ raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
+ " adding an instance in start mode")
+
+ if self.op.ip_check:
+ if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
+ constants.DEFAULT_NODED_PORT):
+ raise errors.OpPrereqError("IP %s of instance %s already in use" %
+ (hostname1.ip, instance_name))
# bridge verification
bridge = getattr(self.op, "bridge", None)
if self.op.wait_for_sync:
disk_abort = not _WaitForSync(self.cfg, iobj)
- elif iobj.disk_template == "remote_raid1":
+ elif iobj.disk_template == constants.DT_REMOTE_RAID1:
# make sure the disks are not degraded (still sync-ing is ok)
time.sleep(15)
feedback_fn("* checking mirrors status")
# start of work
remote_node = self.op.remote_node
cfg = self.cfg
- vgname = cfg.GetVGName()
for dev in instance.disks:
size = dev.size
lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
return result
-class LUQueryNodeData(NoHooksLU):
- """Logical unit for querying node data.
-
- """
- _OP_REQP = ["nodes"]
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This only checks the optional node list against the existing names.
-
- """
- self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
-
- def Exec(self, feedback_fn):
- """Compute and return the list of nodes.
-
- """
- ilist = [self.cfg.GetInstanceInfo(iname) for iname
- in self.cfg.GetInstanceList()]
- result = []
- for node in self.wanted_nodes:
- result.append((node.name, node.primary_ip, node.secondary_ip,
- [inst.name for inst in ilist
- if inst.primary_node == node.name],
- [inst.name for inst in ilist
- if node.name in inst.secondary_nodes],
- ))
- return result
-
-
class LUSetInstanceParms(LogicalUnit):
"""Modifies an instances's parameters.
that node.
"""
- return rpc.call_export_list([node.name for node in self.nodes])
+ return rpc.call_export_list(self.nodes)
class LUExportInstance(LogicalUnit):
self.op.name = name
self.target = self.cfg.GetNodeInfo(name)
elif self.op.kind == constants.TAG_INSTANCE:
- name = self.cfg.ExpandInstanceName(name)
+ name = self.cfg.ExpandInstanceName(self.op.name)
if name is None:
raise errors.OpPrereqError("Invalid instance name (%s)" %
(self.op.name,))
return self.target.GetTags()
-class LUAddTag(TagsLU):
+class LUAddTags(TagsLU):
"""Sets a tag on a given object.
"""
- _OP_REQP = ["kind", "name", "tag"]
+ _OP_REQP = ["kind", "name", "tags"]
def CheckPrereq(self):
"""Check prerequisites.
"""
TagsLU.CheckPrereq(self)
- objects.TaggableObject.ValidateTag(self.op.tag)
+ for tag in self.op.tags:
+ objects.TaggableObject.ValidateTag(tag)
def Exec(self, feedback_fn):
"""Sets the tag.
"""
try:
- self.target.AddTag(self.op.tag)
+ for tag in self.op.tags:
+ self.target.AddTag(tag)
except errors.TagError, err:
raise errors.OpExecError("Error while setting tag: %s" % str(err))
try:
" aborted. Please retry.")
-class LUDelTag(TagsLU):
- """Delete a tag from a given object.
+class LUDelTags(TagsLU):
+ """Delete a list of tags from a given object.
"""
- _OP_REQP = ["kind", "name", "tag"]
+ _OP_REQP = ["kind", "name", "tags"]
def CheckPrereq(self):
"""Check prerequisites.
"""
TagsLU.CheckPrereq(self)
- objects.TaggableObject.ValidateTag(self.op.tag)
- if self.op.tag not in self.target.GetTags():
- raise errors.OpPrereqError("Tag not found")
+ for tag in self.op.tags:
+ objects.TaggableObject.ValidateTag(tag)
+ del_tags = frozenset(self.op.tags)
+ cur_tags = self.target.GetTags()
+ if not del_tags <= cur_tags:
+ diff_tags = del_tags - cur_tags
+ diff_names = ["'%s'" % tag for tag in diff_tags]
+ diff_names.sort()
+ raise errors.OpPrereqError("Tag(s) %s not found" %
+ (",".join(diff_names)))
def Exec(self, feedback_fn):
"""Remove the tag from the object.
"""
- self.target.RemoveTag(self.op.tag)
+ for tag in self.op.tags:
+ self.target.RemoveTag(tag)
try:
self.cfg.Update(self.target)
except errors.ConfigurationError: