# 02110-1301, USA.
-"""Module implementing the commands used by gnt-* programs."""
+"""Module implementing the master-side code."""
# pylint: disable-msg=W0613,W0201
raise errors.OpPrereqError, ("Cluster not initialized yet,"
" use 'gnt-cluster init' first.")
if self.REQ_MASTER:
- master = cfg.GetMaster()
+ master = sstore.GetMasterNode()
if master != socket.gethostname():
raise errors.OpPrereqError, ("Commands must be run on the master"
" node %s" % master)
return
+def _GetWantedNodes(lu, nodes):
+ """Returns list of checked and expanded nodes.
+
+ Args:
+ nodes: List of nodes (strings) or None for all
+
+ """
+ if nodes is not None and not isinstance(nodes, list):
+ raise errors.OpPrereqError, "Invalid argument type 'nodes'"
+
+ if nodes:
+ wanted_nodes = []
+
+ for name in nodes:
+ node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
+ if node is None:
+ raise errors.OpPrereqError, ("No such node name '%s'" % name)
+ wanted_nodes.append(node)
+
+ return wanted_nodes
+ else:
+ return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
+
+
+def _CheckOutputFields(static, dynamic, selected):
+ """Checks whether all selected fields are valid.
+
+ Args:
+ static: Static fields
+ dynamic: Dynamic fields
+
+ """
+ static_fields = frozenset(static)
+ dynamic_fields = frozenset(dynamic)
+
+ all_fields = static_fields | dynamic_fields
+
+ if not all_fields.issuperset(selected):
+ raise errors.OpPrereqError, ("Unknown output fields selected: %s"
+ % ",".join(frozenset(selected).
+ difference(all_fields)))
+
+
def _UpdateEtcHosts(fullnode, ip):
"""Ensure a node has a correct entry in /etc/hosts.
if vgsize is None:
return "volume group '%s' missing" % vgname
elif vgsize < 20480:
- return ("volume group '%s' too small (20480MiB required, %dMib found" %
- vgname, vgsize)
+ return ("volume group '%s' too small (20480MiB required, %dMib found)" %
+ (vgname, vgsize))
return None
(result.cmd, result.exit_code, result.output))
-def _InitClusterInterface(fullname, name, ip):
- """Initialize the master startup script.
-
- """
- f = file(constants.CLUSTER_NAME_FILE, 'w')
- f.write("%s\n" % fullname)
- f.close()
-
- f = file(constants.MASTER_INITD_SCRIPT, 'w')
- f.write ("#!/bin/sh\n")
- f.write ("\n")
- f.write ("# Start Ganeti Master Virtual Address\n")
- f.write ("\n")
- f.write ("DESC=\"Ganeti Master IP\"\n")
- f.write ("MASTERNAME=\"%s\"\n" % name)
- f.write ("MASTERIP=\"%s\"\n" % ip)
- f.write ("case \"$1\" in\n")
- f.write (" start)\n")
- f.write (" if fping -q -c 3 ${MASTERIP} &>/dev/null; then\n")
- f.write (" echo \"$MASTERNAME no-go - there is already a master.\"\n")
- f.write (" rm -f %s\n" % constants.MASTER_CRON_LINK)
- f.write (" scp ${MASTERNAME}:%s %s\n" %
- (constants.CLUSTER_CONF_FILE, constants.CLUSTER_CONF_FILE))
- f.write (" else\n")
- f.write (" echo -n \"Starting $DESC: \"\n")
- f.write (" ip address add ${MASTERIP}/32 dev xen-br0"
- " label xen-br0:0\n")
- f.write (" arping -q -U -c 3 -I xen-br0 -s ${MASTERIP} ${MASTERIP}\n")
- f.write (" echo \"$MASTERNAME.\"\n")
- f.write (" fi\n")
- f.write (" ;;\n")
- f.write (" stop)\n")
- f.write (" echo -n \"Stopping $DESC: \"\n")
- f.write (" ip address del ${MASTERIP}/32 dev xen-br0\n")
- f.write (" echo \"$MASTERNAME.\"\n")
- f.write (" ;;\n")
- f.write (" *)\n")
- f.write (" echo \"Usage: $0 {start|stop}\" >&2\n")
- f.write (" exit 1\n")
- f.write (" ;;\n")
- f.write ("esac\n")
- f.write ("\n")
- f.write ("exit 0\n")
- f.flush()
- os.fsync(f.fileno())
- f.close()
- os.chmod(constants.MASTER_INITD_SCRIPT, 0755)
-
-
class LUInitCluster(LogicalUnit):
"""Initialise the cluster.
HPATH = "cluster-init"
HTYPE = constants.HTYPE_CLUSTER
_OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
- "def_bridge"]
+ "def_bridge", "master_netdev"]
REQ_CLUSTER = False
def BuildHooksEnv(self):
ourselves in the post-run node list.
"""
-
env = {"CLUSTER": self.op.cluster_name,
- "MASTER": self.hostname}
+ "MASTER": self.hostname['hostname_full']}
return env, [], [self.hostname['hostname_full']]
def CheckPrereq(self):
raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" %
self.op.hypervisor_type)
+ result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
+ if result.failed:
+ raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" %
+ (self.op.master_netdev, result.output))
+
def Exec(self, feedback_fn):
"""Initialize the cluster.
clustername = self.clustername
hostname = self.hostname
- # adds the cluste name file and master startup script
- _InitClusterInterface(clustername['hostname_full'],
- clustername['hostname'],
- clustername['ip'])
-
# set up the simple store
ss = ssconf.SimpleStore()
ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
+ ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
+ ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
+ ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
# set up the inter-node password and certificate
_InitGanetiServerSetup(ss)
Any errors are signalled by raising errors.OpPrereqError.
"""
- master = self.cfg.GetMaster()
+ master = self.sstore.GetMasterNode()
nodelist = self.cfg.GetNodeList()
if len(nodelist) > 0 and nodelist != [master]:
- raise errors.OpPrereqError, ("There are still %d node(s) in "
- "this cluster." % (len(nodelist) - 1))
+ raise errors.OpPrereqError, ("There are still %d node(s) in "
+ "this cluster." % (len(nodelist) - 1))
def Exec(self, feedback_fn):
"""Destroys the cluster.
"""
utils.CreateBackup('/root/.ssh/id_dsa')
utils.CreateBackup('/root/.ssh/id_dsa.pub')
- rpc.call_node_leave_cluster(self.cfg.GetMaster())
+ rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
class LUVerifyCluster(NoHooksLU):
node: name of the node to check
file_list: required list of files
local_cksum: dictionary of local files and their checksums
+
"""
# compares ganeti version
local_version = constants.PROTOCOL_VERSION
bad = True
return bad
-
def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
"""Verify the list of running instances.
bad = True
return bad
- def _VerifyNodeConfigFiles(self, ismaster, node, file_list, feedback_fn):
- """Verify the list of node config files"""
-
- bad = False
- for file_name in constants.MASTER_CONFIGFILES:
- if ismaster and file_name not in file_list:
- feedback_fn(" - ERROR: master config file %s missing from master"
- " node %s" % (file_name, node))
- bad = True
- elif not ismaster and file_name in file_list:
- feedback_fn(" - ERROR: master config file %s should not exist"
- " on non-master node %s" % (file_name, node))
- bad = True
-
- for file_name in constants.NODE_CONFIGFILES:
- if file_name not in file_list:
- feedback_fn(" - ERROR: config file %s missing from node %s" %
- (file_name, node))
- bad = True
-
- return bad
-
def CheckPrereq(self):
"""Check prerequisites.
feedback_fn("* Verifying global settings")
self.cfg.VerifyConfig()
- master = self.cfg.GetMaster()
+ master = self.sstore.GetMasterNode()
vg_name = self.cfg.GetVGName()
nodelist = utils.NiceSort(self.cfg.GetNodeList())
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
# FIXME: verify OS list
# do local checksums
- file_names = constants.CLUSTER_CONF_FILES
+ file_names = list(self.sstore.GetFileList())
+ file_names.append(constants.SSL_CERT_FILE)
+ file_names.append(constants.CLUSTER_CONF_FILE)
local_checksums = utils.FingerprintFiles(file_names)
feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
- all_configfile = rpc.call_configfile_list(nodelist)
all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
all_instanceinfo = rpc.call_instance_list(nodelist)
all_vglist = rpc.call_vg_list(nodelist)
all_vglist[node], all_nvinfo[node],
all_rversion[node], feedback_fn)
bad = bad or result
- # node_configfile
- nodeconfigfile = all_configfile[node]
-
- if not nodeconfigfile:
- feedback_fn(" - ERROR: connection to %s failed" % (node))
- bad = True
- continue
-
- bad = bad or self._VerifyNodeConfigFiles(node==master, node,
- nodeconfigfile, feedback_fn)
# node_volume
volumeinfo = all_volumeinfo[node]
"""Check that mirrors are not degraded.
"""
-
cfgw.SetDiskID(dev, node)
result = True
Any errors are signalled by raising errors.OpPrereqError.
"""
-
node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
if node is None:
logger.Error("Error: Node '%s' is unknown." % self.op.node_name)
instance_list = self.cfg.GetInstanceList()
- masternode = self.cfg.GetMaster()
+ masternode = self.sstore.GetMasterNode()
if node.name == masternode:
raise errors.OpPrereqError, ("Node is the master node,"
" you need to failover first.")
This checks that the fields required are valid output fields.
"""
- self.static_fields = frozenset(["name", "pinst", "sinst", "pip", "sip"])
self.dynamic_fields = frozenset(["dtotal", "dfree",
"mtotal", "mnode", "mfree"])
- self.all_fields = self.static_fields | self.dynamic_fields
- if not self.all_fields.issuperset(self.op.output_fields):
- raise errors.OpPrereqError, ("Unknown output fields selected: %s"
- % ",".join(frozenset(self.op.output_fields).
- difference(self.all_fields)))
+ _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
+ dynamic=self.dynamic_fields,
+ selected=self.op.output_fields)
def Exec(self, feedback_fn):
return output
-def _CheckNodesDirs(node_list, paths):
- """Verify if the given nodes have the same files.
+class LUQueryNodeVolumes(NoHooksLU):
+ """Logical unit for getting volumes on node(s).
- Args:
- node_list: the list of node names to check
- paths: the list of directories to checksum and compare
+ """
+ _OP_REQP = ["nodes", "output_fields"]
- Returns:
- list of (node, different_file, message); if empty, the files are in sync
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks that the fields required are valid output fields.
+
+ """
+ self.nodes = _GetWantedNodes(self, self.op.nodes)
+
+ _CheckOutputFields(static=["node"],
+ dynamic=["phys", "vg", "name", "size", "instance"],
+ selected=self.op.output_fields)
- """
- file_names = []
- for dir_name in paths:
- flist = [os.path.join(dir_name, name) for name in os.listdir(dir_name)]
- flist = [name for name in flist if os.path.isfile(name)]
- file_names.extend(flist)
-
- local_checksums = utils.FingerprintFiles(file_names)
-
- results = []
- verify_params = {'filelist': file_names}
- all_node_results = rpc.call_node_verify(node_list, verify_params)
- for node_name in node_list:
- node_result = all_node_results.get(node_name, False)
- if not node_result or 'filelist' not in node_result:
- results.append((node_name, "'all files'", "node communication error"))
- continue
- remote_checksums = node_result['filelist']
- for fname in local_checksums:
- if fname not in remote_checksums:
- results.append((node_name, fname, "missing file"))
- elif remote_checksums[fname] != local_checksums[fname]:
- results.append((node_name, fname, "wrong checksum"))
- return results
+
+ def Exec(self, feedback_fn):
+ """Computes the list of nodes and their attributes.
+
+ """
+ nodenames = utils.NiceSort([node.name for node in self.nodes])
+ volumes = rpc.call_node_volumes(nodenames)
+
+ ilist = [self.cfg.GetInstanceInfo(iname) for iname
+ in self.cfg.GetInstanceList()]
+
+ lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
+
+ output = []
+ for node in nodenames:
+ node_vols = volumes[node][:]
+ node_vols.sort(key=lambda vol: vol['dev'])
+
+ for vol in node_vols:
+ node_output = []
+ for field in self.op.output_fields:
+ if field == "node":
+ val = node
+ elif field == "phys":
+ val = vol['dev']
+ elif field == "vg":
+ val = vol['vg']
+ elif field == "name":
+ val = vol['name']
+ elif field == "size":
+ val = int(float(vol['size']))
+ elif field == "instance":
+ for inst in ilist:
+ if node not in lv_by_node[inst]:
+ continue
+ if vol['name'] in lv_by_node[inst][node]:
+ val = inst.name
+ break
+ else:
+ val = '-'
+ else:
+ raise errors.ParameterError, field
+ node_output.append(str(val))
+
+ output.append(node_output)
+
+ return output
class LUAddNode(LogicalUnit):
# check that the type of the node (single versus dual homed) is the
# same as for the master
- myself = cfg.GetNodeInfo(cfg.GetMaster())
+ myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
master_singlehomed = myself.secondary_ip == myself.primary_ip
newbie_singlehomed = secondary_ip == primary_ip
if master_singlehomed != newbie_singlehomed:
# Distribute updated /etc/hosts and known_hosts to all nodes,
# including the node just added
- myself = self.cfg.GetNodeInfo(self.cfg.GetMaster())
+ myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
dist_nodes = self.cfg.GetNodeList() + [node]
if myself.name in dist_nodes:
dist_nodes.remove(myself.name)
logger.Error("copy of file %s to node %s failed" %
(fname, to_node))
- to_copy = [constants.MASTER_CRON_FILE,
- constants.MASTER_INITD_SCRIPT,
- constants.CLUSTER_NAME_FILE]
- to_copy.extend(ss.GetFileList())
+ to_copy = ss.GetFileList()
for fname in to_copy:
if not ssh.CopyFileToNode(node, fname):
logger.Error("could not copy file %s to node %s" % (fname, node))
"""
self.new_master = socket.gethostname()
- self.old_master = self.cfg.GetMaster()
+ self.old_master = self.sstore.GetMasterNode()
if self.old_master == self.new_master:
raise errors.OpPrereqError, ("This commands must be run on the node"
master.
"""
-
#TODO: do not rely on gethostname returning the FQDN
logger.Info("setting master to %s, old master: %s" %
(self.new_master, self.old_master))
logger.Error("could disable the master role on the old master"
" %s, please disable manually" % self.old_master)
+ ss = self.sstore
+ ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
+ if not rpc.call_upload_file(self.cfg.GetNodeList(),
+ ss.KeyToFilename(ss.SS_MASTER_NODE)):
+ logger.Error("could not distribute the new simple store master file"
+ " to the other nodes, please check.")
+
if not rpc.call_node_start_master(self.new_master):
logger.Error("could not start the master role on the new master"
" %s, please check" % self.new_master)
+ feedback_fn("Error in activating the master IP on the new master,\n"
+ "please fix manually.")
- self.cfg.SetMaster(self.new_master)
class LUQueryClusterInfo(NoHooksLU):
"config_version": constants.CONFIG_VERSION,
"os_api_version": constants.OS_API_VERSION,
"export_version": constants.EXPORT_VERSION,
- "master": self.cfg.GetMaster(),
+ "master": self.sstore.GetMasterNode(),
"architecture": (platform.architecture()[0], platform.machine()),
"instances": [(instance.name, instance.primary_node)
for instance in instances],
"""
if not os.path.exists(self.op.filename):
raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
- if self.op.nodes:
- nodes = self.op.nodes
- else:
- nodes = self.cfg.GetNodeList()
- self.nodes = []
- for node in nodes:
- nname = self.cfg.ExpandNodeName(node)
- if nname is None:
- raise errors.OpPrereqError, ("Node '%s' is unknown." % node)
- self.nodes.append(nname)
+
+ self.nodes = _GetWantedNodes(self, self.op.nodes)
def Exec(self, feedback_fn):
"""Copy a file from master to some nodes.
It checks that the given list of nodes is valid.
"""
- if self.op.nodes:
- nodes = self.op.nodes
- else:
- nodes = self.cfg.GetNodeList()
- self.nodes = []
- for node in nodes:
- nname = self.cfg.ExpandNodeName(node)
- if nname is None:
- raise errors.OpPrereqError, ("Node '%s' is unknown." % node)
- self.nodes.append(nname)
+ self.nodes = _GetWantedNodes(self, self.op.nodes)
def Exec(self, feedback_fn):
"""Run a command on some nodes.
"""
data = []
for node in self.nodes:
- result = utils.RunCmd(["ssh", node, self.op.command])
- data.append((node, result.cmd, result.output, result.exit_code))
+ result = utils.RunCmd(["ssh", node.name, self.op.command])
+ data.append((node.name, result.cmd, result.output, result.exit_code))
return data
return disks_ok, device_info
+def _StartInstanceDisks(cfg, instance, force):
+ disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
+ ignore_secondaries=force)
+ if not disks_ok:
+ _ShutdownInstanceDisks(instance, cfg)
+ if force is not None and not force:
+ logger.Error("If the message above refers to a secondary node,"
+ " you can retry the operation using '--force'.")
+ raise errors.OpExecError, ("Disk consistency error")
+
+
class LUDeactivateInstanceDisks(NoHooksLU):
"""Shutdown an instance's disks.
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
"FORCE": self.op.force,
}
- nl = ([self.cfg.GetMaster(), self.instance.primary_node] +
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
list(self.instance.secondary_nodes))
return env, nl, nl
(instance.name, node_current, memory,
freememory))
- disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
- ignore_secondaries=force)
- if not disks_ok:
- _ShutdownInstanceDisks(instance, self.cfg)
- if not force:
- logger.Error("If the message above refers to a secondary node,"
- " you can retry the operation using '--force'.")
- raise errors.OpExecError, ("Disk consistency error")
+ _StartInstanceDisks(self.cfg, instance, force)
if not rpc.call_instance_start(node_current, instance, extra_args):
_ShutdownInstanceDisks(instance, self.cfg)
"INSTANCE_PRIMARY": self.instance.primary_node,
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
}
- nl = ([self.cfg.GetMaster(), self.instance.primary_node] +
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
list(self.instance.secondary_nodes))
return env, nl, nl
_ShutdownInstanceDisks(instance, self.cfg)
+class LUReinstallInstance(LogicalUnit):
+ """Reinstall an instance.
+
+ """
+ HPATH = "instance-reinstall"
+ HTYPE = constants.HTYPE_INSTANCE
+ _OP_REQP = ["instance_name"]
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ This runs on master, primary and secondary nodes of the instance.
+
+ """
+ env = {
+ "INSTANCE_NAME": self.op.instance_name,
+ "INSTANCE_PRIMARY": self.instance.primary_node,
+ "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
+ }
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+ list(self.instance.secondary_nodes))
+ return env, nl, nl
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks that the instance is in the cluster and is not running.
+
+ """
+ instance = self.cfg.GetInstanceInfo(
+ self.cfg.ExpandInstanceName(self.op.instance_name))
+ if instance is None:
+ raise errors.OpPrereqError, ("Instance '%s' not known" %
+ self.op.instance_name)
+ if instance.disk_template == constants.DT_DISKLESS:
+ raise errors.OpPrereqError, ("Instance '%s' has no disks" %
+ self.op.instance_name)
+ if instance.status != "down":
+ raise errors.OpPrereqError, ("Instance '%s' is marked to be up" %
+ self.op.instance_name)
+ remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+ if remote_info:
+ raise errors.OpPrereqError, ("Instance '%s' is running on the node %s" %
+ (self.op.instance_name,
+ instance.primary_node))
+ self.instance = instance
+
+ def Exec(self, feedback_fn):
+ """Reinstall the instance.
+
+ """
+ inst = self.instance
+
+ _StartInstanceDisks(self.cfg, inst, None)
+ try:
+ feedback_fn("Running the instance OS create scripts...")
+ if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
+ raise errors.OpExecError, ("Could not install OS for instance %s "
+ "on node %s" %
+ (inst.name, inst.primary_node))
+ finally:
+ _ShutdownInstanceDisks(inst, self.cfg)
+
+
class LURemoveInstance(LogicalUnit):
"""Remove an instance.
"INSTANCE_PRIMARY": self.instance.primary_node,
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
}
- nl = ([self.cfg.GetMaster(), self.instance.primary_node] +
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
list(self.instance.secondary_nodes))
return env, nl, nl
"""Logical unit for querying instances.
"""
- OP_REQP = ["output_fields"]
+ _OP_REQP = ["output_fields"]
def CheckPrereq(self):
"""Check prerequisites.
This checks that the fields required are valid output fields.
"""
-
- self.static_fields = frozenset(["name", "os", "pnode", "snodes",
- "admin_state", "admin_ram",
- "disk_template", "ip", "mac", "bridge"])
self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
- self.all_fields = self.static_fields | self.dynamic_fields
-
- if not self.all_fields.issuperset(self.op.output_fields):
- raise errors.OpPrereqError, ("Unknown output fields selected: %s"
- % ",".join(frozenset(self.op.output_fields).
- difference(self.all_fields)))
+ _CheckOutputFields(static=["name", "os", "pnode", "snodes",
+ "admin_state", "admin_ram",
+ "disk_template", "ip", "mac", "bridge"],
+ dynamic=self.dynamic_fields,
+ selected=self.op.output_fields)
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
-
instance_names = utils.NiceSort(self.cfg.GetInstanceList())
instance_list = [self.cfg.GetInstanceInfo(iname) for iname
in instance_names]
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
"IGNORE_CONSISTENCY": self.op.ignore_consistency,
}
- nl = [self.cfg.GetMaster()] + list(self.instance.secondary_nodes)
+ nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
return env, nl, nl
def CheckPrereq(self):
if not rpc.call_instance_start(target_node, instance, None):
_ShutdownInstanceDisks(instance, self.cfg)
raise errors.OpExecError("Could not start instance %s on node %s." %
- (instance, target_node))
+ (instance.name, target_node))
def _CreateBlockDevOnPrimary(cfg, node, device):
This always creates all devices.
"""
-
if device.children:
for child in device.children:
if not _CreateBlockDevOnPrimary(cfg, node, child):
if self.inst_ip:
env["INSTANCE_IP"] = self.inst_ip
- nl = ([self.cfg.GetMaster(), self.op.pnode] +
+ nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
self.secondaries)
return env, nl, nl
# check primary node
pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
if pnode is None:
- raise errors.OpPrereqError, ("Primary node '%s' is uknown" %
+ raise errors.OpPrereqError, ("Primary node '%s' is unknown" %
self.op.pnode)
self.op.pnode = pnode.name
self.pnode = pnode
"NEW_SECONDARY": self.op.remote_node,
"DISK_NAME": self.op.disk_name,
}
- nl = [self.cfg.GetMaster(), self.instance.primary_node,
+ nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
self.op.remote_node,] + list(self.instance.secondary_nodes)
return env, nl, nl
"DISK_ID": self.op.disk_id,
"OLD_SECONDARY": self.old_secondary,
}
- nl = [self.cfg.GetMaster(),
+ nl = [self.sstore.GetMasterNode(),
self.instance.primary_node] + list(self.instance.secondary_nodes)
return env, nl, nl
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": self.instance.secondary_nodes[0],
}
- nl = [self.cfg.GetMaster(),
+ nl = [self.sstore.GetMasterNode(),
self.instance.primary_node] + list(self.instance.secondary_nodes)
return env, nl, nl
# start of work
remote_node = self.op.remote_node
cfg = self.cfg
+ vgname = cfg.GetVGName()
for dev in instance.disks:
size = dev.size
- new_drbd = _GenerateMDDRBDBranch(cfg, self.cfg.GetVGName(),
- instance.primary_node, remote_node, size,
+ new_drbd = _GenerateMDDRBDBranch(cfg, vgname, instance.primary_node,
+ remote_node, size,
"%s-%s" % (instance.name, dev.iv_name))
iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
logger.Info("adding new mirror component on secondary for %s" %
# call the primary node to add the mirror to md
logger.Info("adding new mirror component to md")
if not rpc.call_blockdev_addchild(instance.primary_node, dev,
- new_drbd):
+ new_drbd):
logger.Error("Can't add mirror compoment to md!")
cfg.SetDiskID(new_drbd, remote_node)
if not rpc.call_blockdev_remove(remote_node, new_drbd):
def Exec(self, feedback_fn):
"""Gather and return data"""
-
result = {}
for instance in self.wanted_instances:
remote_info = rpc.call_instance_info(instance.primary_node,
This only checks the optional node list against the existing names.
"""
- if not isinstance(self.op.nodes, list):
- raise errors.OpPrereqError, "Invalid argument type 'nodes'"
- if self.op.nodes:
- self.wanted_nodes = []
- names = self.op.nodes
- for name in names:
- node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(name))
- if node is None:
- raise errors.OpPrereqError, ("No such node name '%s'" % name)
- self.wanted_nodes.append(node)
- else:
- self.wanted_nodes = [self.cfg.GetNodeInfo(name) for name
- in self.cfg.GetNodeList()]
- return
+ self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
def Exec(self, feedback_fn):
"""Compute and return the list of nodes.
"""
-
ilist = [self.cfg.GetInstanceInfo(iname) for iname
in self.cfg.GetInstanceList()]
result = []
if self.bridge:
env["BRIDGE"] = self.bridge
- nl = [self.cfg.GetMaster(),
+ nl = [self.sstore.GetMasterNode(),
self.instance.primary_node] + list(self.instance.secondary_nodes)
return env, nl, nl
"""Check that the nodelist contains only existing nodes.
"""
- nodes = getattr(self.op, "nodes", None)
- if not nodes:
- self.op.nodes = self.cfg.GetNodeList()
- else:
- expnodes = [self.cfg.ExpandNodeName(node) for node in nodes]
- if expnodes.count(None) > 0:
- raise errors.OpPrereqError, ("At least one of the given nodes %s"
- " is unknown" % self.op.nodes)
- self.op.nodes = expnodes
+ self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
def Exec(self, feedback_fn):
-
"""Compute the list of all the exported system images.
Returns:
that node.
"""
- return rpc.call_export_list(self.op.nodes)
+ return rpc.call_export_list([node.name for node in self.nodes])
class LUExportInstance(LogicalUnit):
"EXPORT_NODE": self.op.target_node,
"EXPORT_DO_SHUTDOWN": self.op.shutdown,
}
- nl = [self.cfg.GetMaster(), self.instance.primary_node,
+ nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
self.op.target_node]
return env, nl, nl
self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
if self.dst_node is None:
- raise errors.OpPrereqError, ("Destination node '%s' is uknown." %
+ raise errors.OpPrereqError, ("Destination node '%s' is unknown." %
self.op.target_node)
self.op.target_node = self.dst_node.name