--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2006, 2007 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Ganeti master script
+
+Exit codes, for both start and stop:
+ - 0: master setup successful
+ - 1: some generic error (this exit code can also be thrown by exceptions)
+ - 11: node is not master, nothing to do
+ - 12: node setup incomplete, cannot start
+ - 13: node should be master, but someone has the ip address already
+
+Only exit codes 0 and 11 represent an ok state. Code 1 was left for
+generic errors as other python code can cause exit with code 1.
+
+"""
+
+import os
+import sys
+import socket
+
+from optparse import OptionParser
+
+from ganeti import logger
+from ganeti import constants
+from ganeti import errors
+from ganeti import ssconf
+from ganeti import utils
+
+EXIT_OK = 0
+EXIT_SOME_ERROR = 1
+EXIT_NOTMASTER = 11
+EXIT_NODESETUP_ERROR = 12
+EXIT_DUPLICATE_IP = 13
+EXIT_ARGS_ERROR = 14
+
+
+def ParseOptions():
+ """Parse the command line options.
+
+ Returns:
+ (options, args) as from OptionParser.parse_args()
+
+ """
+ parser = OptionParser(description="Ganeti master",
+ usage="%prog [-d]",
+ version="%%prog (ganeti) %s" %
+ constants.RELEASE_VERSION)
+
+ parser.add_option("-d", "--debug", dest="debug",
+ help="Enable some debug messages",
+ default=False, action="store_true")
+ options, args = parser.parse_args()
+
+ if len(args) != 1 or args[0] not in ("start", "stop"):
+ sys.stderr.write("Usage: %s [-d] start|stop\n" % sys.argv[0])
+ sys.exit(EXIT_ARGS_ERROR)
+
+ return options, args
+
+
+def CheckNodeSetup(debug):
+ """Checks the node setup.
+
+ If the node setup if ok, this function will return the tuple
+ (master_hostname, master_netdev, master_ip). Otherwise the return
+ value will be None.
+
+ """
+ for fname in (constants.SSL_CERT_FILE,):
+ if not os.path.isfile(fname):
+ if debug:
+ sys.stderr.write("Missing config file %s.\n" % fname)
+ return None
+ try:
+ ss = ssconf.SimpleStore()
+ port = ss.GetNodeDaemonPort()
+ pwdata = ss.GetNodeDaemonPassword()
+ master_name = ss.GetMasterNode()
+ master_netdev = ss.GetMasterNetdev()
+ master_ip = ss.GetMasterIP()
+ except errors.ConfigurationError, err:
+ if debug:
+ sys.stderr.write("Cluster configuration incomplete: '%s'\n" % str(err))
+ return None
+ return (master_name, master_netdev, master_ip)
+
+
+def StartMaster(master_netdev, master_ip, debug):
+ """Starts the master.
+
+ """
+ result = utils.RunCmd(["fping", "-q", master_ip])
+ if not result.failed:
+ r2 = utils.RunCmd(["fping", "-q", "-S127.0.0.1", master_ip])
+ if not result.failed:
+ # we already have the ip:
+ if debug:
+ sys.stderr.write("Notice: already started.\n")
+ return EXIT_OK
+ else:
+ return EXIT_DUPLICATE_IP
+ result = utils.RunCmd(["ip", "address", "add", "%s/32" % master_ip,
+ "dev", master_netdev, "label",
+ "%s:0" % master_netdev])
+ if result.failed:
+ if debug:
+ sys.stderr.write("Can't activate master IP: %s\n" % result.output)
+ return EXIT_SOME_ERROR
+
+ result = utils.RunCmd(["arping", "-q", "-U", "-c 3", "-I", master_netdev,
+ "-s", master_ip, master_ip])
+ # we'll ignore the exit code of arping
+ return EXIT_OK
+
+
+def StopMaster(master_netdev, master_ip, debug):
+ """Stops the master.
+
+ """
+ result = utils.RunCmd(["ip", "address", "del", "%s/32" % master_ip,
+ "dev", master_netdev])
+ if result.failed:
+ if debug:
+ sys.stderr.write("Can't remove the master IP, error: %s" % result.output)
+ # but otherwise ignore the failure
+ return EXIT_OK
+
+
+def main():
+ """Main function.
+
+ """
+ options, args = ParseOptions()
+ debug = options.debug
+ result = CheckNodeSetup(debug)
+ if not result:
+ if debug:
+ sys.stderr.write("Node configuration incomplete.\n")
+ return EXIT_NODESETUP_ERROR
+
+ master_node, master_netdev, master_ip = result
+ if socket.gethostname() != master_node and args[0] == "start":
+ if debug:
+ sys.stderr.write("Not master, ignoring request.\n")
+ return EXIT_NOTMASTER
+
+ if args[0] == "start":
+ fn = StartMaster
+ else:
+ fn = StopMaster
+
+ return fn(master_netdev, master_ip, debug)
+
+
+if __name__=='__main__':
+ exit_code = main()
+ sys.exit(exit_code)
# ganeti node daemon starter script
# based on skeleton from Debian GNU/Linux
-PATH=/sbin:/bin:/usr/sbin:/usr/bin
+PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
DAEMON=/usr/local/sbin/ganeti-noded
NAME=ganeti-noded
SCRIPTNAME=/etc/init.d/ganeti
from ganeti import constants
from ganeti import bdev
from ganeti import objects
+from ganeti import ssconf
def ListConfigFiles():
"""Activate local node as master node.
There are two needed steps for this:
- - register the master init script, and also run it now
+ - run the master script
- register the cron script
"""
- result = utils.RunCmd(["update-rc.d", constants.MASTER_INITD_NAME,
- "defaults", "21", "79"])
-
- if result.failed:
- logger.Error("could not register the master init.d script with command"
- " %s, error %s" % (result.cmd, result.output))
- return False
-
- result = utils.RunCmd([constants.MASTER_INITD_SCRIPT, "start"])
+ result = utils.RunCmd([constants.MASTER_SCRIPT, "-d", "start"])
if result.failed:
logger.Error("could not activate cluster interface with command %s,"
- " error %s" % (result.cmd, result.output))
+ " error: '%s'" % (result.cmd, result.output))
return False
utils.RemoveFile(constants.MASTER_CRON_LINK)
"""Deactivate this node as master.
This does two things:
- - remove links to master's startup script
+ - run the master stop script
- remove link to master cron script.
"""
- result = utils.RunCmd(["update-rc.d", "-f",
- constants.MASTER_INITD_NAME, "remove"])
- if result.failed:
- logger.Error("could not unregister the master script with command"
- " %s, error %s" % (result.cmd, result.output))
- return False
-
- output = utils.RunCmd([constants.MASTER_INITD_SCRIPT, "stop"])
+ result = utils.RunCmd([constants.MASTER_SCRIPT, "-d", "stop"])
if result.failed:
logger.Error("could not deactivate cluster interface with command %s,"
- " error %s" % (result.cmd, result.output))
+ " error: '%s'" % (result.cmd, result.output))
return False
utils.RemoveFile(constants.MASTER_CRON_LINK)
if dirpath == constants.DATA_DIR:
for i in filenames:
os.unlink(os.path.join(dirpath, i))
- utils.RemoveFile(constants.CLUSTER_NAME_FILE)
f = open('/root/.ssh/id_dsa.pub', 'r')
try:
file_name)
return False
- if file_name not in [constants.CLUSTER_CONF_FILE, "/etc/hosts",
- "/etc/ssh/ssh_known_hosts"]:
+ allowed_files = [constants.CLUSTER_CONF_FILE, "/etc/hosts",
+ "/etc/ssh/ssh_known_hosts"]
+ allowed_files.extend(ssconf.SimpleStore().GetFileList())
+ if file_name not in allowed_files:
logger.Error("Filename passed to UploadFile not in allowed"
" upload targets: '%s'" % file_name)
return False
# 02110-1301, USA.
-"""Module implementing the commands used by gnt-* programs."""
+"""Module implementing the master-side code."""
# pylint: disable-msg=W0613,W0201
raise errors.OpPrereqError, ("Cluster not initialized yet,"
" use 'gnt-cluster init' first.")
if self.REQ_MASTER:
- master = cfg.GetMaster()
+ master = sstore.GetMasterNode()
if master != socket.gethostname():
raise errors.OpPrereqError, ("Commands must be run on the master"
" node %s" % master)
(result.cmd, result.exit_code, result.output))
-def _InitClusterInterface(fullname, name, ip):
- """Initialize the master startup script.
-
- """
- f = file(constants.CLUSTER_NAME_FILE, 'w')
- f.write("%s\n" % fullname)
- f.close()
-
- f = file(constants.MASTER_INITD_SCRIPT, 'w')
- f.write ("#!/bin/sh\n")
- f.write ("\n")
- f.write ("# Start Ganeti Master Virtual Address\n")
- f.write ("\n")
- f.write ("DESC=\"Ganeti Master IP\"\n")
- f.write ("MASTERNAME=\"%s\"\n" % name)
- f.write ("MASTERIP=\"%s\"\n" % ip)
- f.write ("case \"$1\" in\n")
- f.write (" start)\n")
- f.write (" if fping -q -c 3 ${MASTERIP} &>/dev/null; then\n")
- f.write (" echo \"$MASTERNAME no-go - there is already a master.\"\n")
- f.write (" rm -f %s\n" % constants.MASTER_CRON_LINK)
- f.write (" scp ${MASTERNAME}:%s %s\n" %
- (constants.CLUSTER_CONF_FILE, constants.CLUSTER_CONF_FILE))
- f.write (" else\n")
- f.write (" echo -n \"Starting $DESC: \"\n")
- f.write (" ip address add ${MASTERIP}/32 dev xen-br0"
- " label xen-br0:0\n")
- f.write (" arping -q -U -c 3 -I xen-br0 -s ${MASTERIP} ${MASTERIP}\n")
- f.write (" echo \"$MASTERNAME.\"\n")
- f.write (" fi\n")
- f.write (" ;;\n")
- f.write (" stop)\n")
- f.write (" echo -n \"Stopping $DESC: \"\n")
- f.write (" ip address del ${MASTERIP}/32 dev xen-br0\n")
- f.write (" echo \"$MASTERNAME.\"\n")
- f.write (" ;;\n")
- f.write (" *)\n")
- f.write (" echo \"Usage: $0 {start|stop}\" >&2\n")
- f.write (" exit 1\n")
- f.write (" ;;\n")
- f.write ("esac\n")
- f.write ("\n")
- f.write ("exit 0\n")
- f.flush()
- os.fsync(f.fileno())
- f.close()
- os.chmod(constants.MASTER_INITD_SCRIPT, 0755)
-
-
class LUInitCluster(LogicalUnit):
"""Initialise the cluster.
HPATH = "cluster-init"
HTYPE = constants.HTYPE_CLUSTER
_OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
- "def_bridge"]
+ "def_bridge", "master_netdev"]
REQ_CLUSTER = False
def BuildHooksEnv(self):
"""
env = {"CLUSTER": self.op.cluster_name,
- "MASTER": self.hostname}
+ "MASTER": self.hostname['hostname_full']}
return env, [], [self.hostname['hostname_full']]
def CheckPrereq(self):
raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" %
self.op.hypervisor_type)
+ result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
+ if result.failed:
+ raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" %
+ (self.op.master_netdev, result.output))
+
def Exec(self, feedback_fn):
"""Initialize the cluster.
clustername = self.clustername
hostname = self.hostname
- # adds the cluste name file and master startup script
- _InitClusterInterface(clustername['hostname_full'],
- clustername['hostname'],
- clustername['ip'])
-
# set up the simple store
ss = ssconf.SimpleStore()
ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
+ ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
+ ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
+ ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
# set up the inter-node password and certificate
_InitGanetiServerSetup(ss)
Any errors are signalled by raising errors.OpPrereqError.
"""
- master = self.cfg.GetMaster()
+ master = self.sstore.GetMasterNode()
nodelist = self.cfg.GetNodeList()
if len(nodelist) > 0 and nodelist != [master]:
- raise errors.OpPrereqError, ("There are still %d node(s) in "
- "this cluster." % (len(nodelist) - 1))
+ raise errors.OpPrereqError, ("There are still %d node(s) in "
+ "this cluster." % (len(nodelist) - 1))
def Exec(self, feedback_fn):
"""Destroys the cluster.
"""
utils.CreateBackup('/root/.ssh/id_dsa')
utils.CreateBackup('/root/.ssh/id_dsa.pub')
- rpc.call_node_leave_cluster(self.cfg.GetMaster())
+ rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
class LUVerifyCluster(NoHooksLU):
feedback_fn("* Verifying global settings")
self.cfg.VerifyConfig()
- master = self.cfg.GetMaster()
+ master = self.sstore.GetMasterNode()
vg_name = self.cfg.GetVGName()
nodelist = utils.NiceSort(self.cfg.GetNodeList())
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
instance_list = self.cfg.GetInstanceList()
- masternode = self.cfg.GetMaster()
+ masternode = self.sstore.GetMasterNode()
if node.name == masternode:
raise errors.OpPrereqError, ("Node is the master node,"
" you need to failover first.")
# check that the type of the node (single versus dual homed) is the
# same as for the master
- myself = cfg.GetNodeInfo(cfg.GetMaster())
+ myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
master_singlehomed = myself.secondary_ip == myself.primary_ip
newbie_singlehomed = secondary_ip == primary_ip
if master_singlehomed != newbie_singlehomed:
# Distribute updated /etc/hosts and known_hosts to all nodes,
# including the node just added
- myself = self.cfg.GetNodeInfo(self.cfg.GetMaster())
+ myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
dist_nodes = self.cfg.GetNodeList() + [node]
if myself.name in dist_nodes:
dist_nodes.remove(myself.name)
logger.Error("copy of file %s to node %s failed" %
(fname, to_node))
- to_copy = [constants.MASTER_CRON_FILE,
- constants.MASTER_INITD_SCRIPT,
- constants.CLUSTER_NAME_FILE]
+ to_copy = [constants.MASTER_CRON_FILE]
to_copy.extend(ss.GetFileList())
for fname in to_copy:
if not ssh.CopyFileToNode(node, fname):
"""
self.new_master = socket.gethostname()
- self.old_master = self.cfg.GetMaster()
+ self.old_master = self.sstore.GetMasterNode()
if self.old_master == self.new_master:
raise errors.OpPrereqError, ("This commands must be run on the node"
logger.Error("could disable the master role on the old master"
" %s, please disable manually" % self.old_master)
+ ss = self.sstore
+ ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
+ if not rpc.call_upload_file(self.cfg.GetNodeList(),
+ ss.KeyToFilename(ss.SS_MASTER_NODE)):
+ logger.Error("could not distribute the new simple store master file"
+ " to the other nodes, please check.")
+
if not rpc.call_node_start_master(self.new_master):
logger.Error("could not start the master role on the new master"
" %s, please check" % self.new_master)
+ feedback_fn("Error in activating the master IP on the new master,\n"
+ "please fix manually.")
- self.cfg.SetMaster(self.new_master)
class LUQueryClusterInfo(NoHooksLU):
"config_version": constants.CONFIG_VERSION,
"os_api_version": constants.OS_API_VERSION,
"export_version": constants.EXPORT_VERSION,
- "master": self.cfg.GetMaster(),
+ "master": self.sstore.GetMasterNode(),
"architecture": (platform.architecture()[0], platform.machine()),
"instances": [(instance.name, instance.primary_node)
for instance in instances],
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
"FORCE": self.op.force,
}
- nl = ([self.cfg.GetMaster(), self.instance.primary_node] +
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
list(self.instance.secondary_nodes))
return env, nl, nl
"INSTANCE_PRIMARY": self.instance.primary_node,
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
}
- nl = ([self.cfg.GetMaster(), self.instance.primary_node] +
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
list(self.instance.secondary_nodes))
return env, nl, nl
"INSTANCE_PRIMARY": self.instance.primary_node,
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
}
- nl = ([self.cfg.GetMaster(), self.instance.primary_node] +
+ nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
list(self.instance.secondary_nodes))
return env, nl, nl
"INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
"IGNORE_CONSISTENCY": self.op.ignore_consistency,
}
- nl = [self.cfg.GetMaster()] + list(self.instance.secondary_nodes)
+ nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
return env, nl, nl
def CheckPrereq(self):
if self.inst_ip:
env["INSTANCE_IP"] = self.inst_ip
- nl = ([self.cfg.GetMaster(), self.op.pnode] +
+ nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
self.secondaries)
return env, nl, nl
"NEW_SECONDARY": self.op.remote_node,
"DISK_NAME": self.op.disk_name,
}
- nl = [self.cfg.GetMaster(), self.instance.primary_node,
+ nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
self.op.remote_node,] + list(self.instance.secondary_nodes)
return env, nl, nl
"DISK_ID": self.op.disk_id,
"OLD_SECONDARY": self.old_secondary,
}
- nl = [self.cfg.GetMaster(),
+ nl = [self.sstore.GetMasterNode(),
self.instance.primary_node] + list(self.instance.secondary_nodes)
return env, nl, nl
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": self.instance.secondary_nodes[0],
}
- nl = [self.cfg.GetMaster(),
+ nl = [self.sstore.GetMasterNode(),
self.instance.primary_node] + list(self.instance.secondary_nodes)
return env, nl, nl
# start of work
remote_node = self.op.remote_node
cfg = self.cfg
+ vgname = cfg.GetVGName()
for dev in instance.disks:
size = dev.size
- new_drbd = _GenerateMDDRBDBranch(cfg, self.cfg.GetVGName(),
- instance.primary_node, remote_node, size,
+ new_drbd = _GenerateMDDRBDBranch(cfg, vgname, instance.primary_node,
+ remote_node, size,
"%s-%s" % (instance.name, dev.iv_name))
iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
logger.Info("adding new mirror component on secondary for %s" %
# call the primary node to add the mirror to md
logger.Info("adding new mirror component to md")
if not rpc.call_blockdev_addchild(instance.primary_node, dev,
- new_drbd):
+ new_drbd):
logger.Error("Can't add mirror compoment to md!")
cfg.SetDiskID(new_drbd, remote_node)
if not rpc.call_blockdev_remove(remote_node, new_drbd):
if self.bridge:
env["BRIDGE"] = self.bridge
- nl = [self.cfg.GetMaster(),
+ nl = [self.sstore.GetMasterNode(),
self.instance.primary_node] + list(self.instance.secondary_nodes)
return env, nl, nl
"EXPORT_NODE": self.op.target_node,
"EXPORT_DO_SHUTDOWN": self.op.shutdown,
}
- nl = [self.cfg.GetMaster(), self.instance.primary_node,
+ nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
self.op.target_node]
return env, nl, nl
self._OpenConfig()
self._ReleaseLock()
return self._config_data.cluster.mac_prefix
-
- def GetMaster(self):
- """Get the name of the master.
-
- """
- self._OpenConfig()
- self._ReleaseLock()
- return self._config_data.cluster.master_node
-
- def SetMaster(self, master_node):
- """Change the master of the cluster.
-
- As with all changes, the configuration data will be distributed to
- all nodes.
-
- This function is used for manual master failover.
-
- """
- self._OpenConfig()
- self._config_data.cluster.master_node = master_node
- self._WriteConfig()
- self._ReleaseLock()
# file paths
DATA_DIR = "/var/lib/ganeti"
CLUSTER_CONF_FILE = DATA_DIR + "/config.data"
-CLUSTER_NAME_FILE = DATA_DIR + "/cluster-name"
SSL_CERT_FILE = DATA_DIR + "/server.pem"
HYPERCONF_FILE = DATA_DIR + "/hypervisor"
WATCHER_STATEFILE = DATA_DIR + "/restart_state"
DEFAULT_NODED_PORT = 1811
FIRST_DRBD_PORT = 11000
LAST_DRBD_PORT = 14999
-MASTER_INITD_SCRIPT = "/etc/init.d/ganeti-master"
-MASTER_INITD_NAME = "ganeti-master"
+MASTER_SCRIPT = "ganeti-master"
LOG_DIR = "/var/log/ganeti"
LOG_OS_DIR = LOG_DIR + "/os"
MASTER_CRON_FILE,
]
-MASTER_CONFIGFILES = [MASTER_CRON_LINK,
- "/etc/rc2.d/S21%s" % MASTER_INITD_NAME]
+MASTER_CONFIGFILES = [MASTER_CRON_LINK,]
NODE_CONFIGFILES = [NODE_INITD_SCRIPT,
"/etc/rc2.d/S20%s" % NODE_INITD_NAME,
lu.CheckPrereq()
do_hooks = lu_class.HPATH is not None
if do_hooks:
- hm = HooksMaster(rpc.call_hooks_runner, self.cfg, lu)
+ hm = HooksMaster(rpc.call_hooks_runner, self.cfg, self.sstore, lu)
hm.RunPhase(constants.HOOKS_PHASE_PRE)
result = lu.Exec(feedback_fn)
if do_hooks:
lu = lu_class(self, op, self.cfg, self.sstore)
lu.CheckPrereq()
#if do_hooks:
- # hm = HooksMaster(rpc.call_hooks_runner, self.cfg, lu)
+ # hm = HooksMaster(rpc.call_hooks_runner, self.cfg, self.sstore, lu)
# hm.RunPhase(constants.HOOKS_PHASE_PRE)
result = lu.Exec(feedback_fn)
#if do_hooks:
which behaves the same works.
"""
- def __init__(self, callfn, cfg, lu):
+ def __init__(self, callfn, cfg, sstore, lu):
self.callfn = callfn
self.cfg = cfg
+ self.sstore = sstore
self.lu = lu
self.op = lu.op
self.hpath = self.lu.HPATH
if self.cfg is not None:
env["GANETI_CLUSTER"] = self.cfg.GetClusterName()
- env["GANETI_MASTER"] = self.cfg.GetMaster()
+ if self.sstore is not None:
+ env["GANETI_MASTER"] = self.sstore.GetMasterNode()
for key in env:
if not isinstance(env[key], str):
"""Initialise the cluster."""
OP_ID = "OP_CLUSTER_INIT"
__slots__ = ["cluster_name", "secondary_ip", "hypervisor_type",
- "vg_name", "mac_prefix", "def_bridge"]
+ "vg_name", "mac_prefix", "def_bridge", "master_netdev"]
class OpDestroyCluster(OpCode):
_SS_FILEPREFIX = "ssconf_"
SS_HYPERVISOR = "hypervisor"
SS_NODED_PASS = "node_pass"
- _VALID_KEYS = (SS_HYPERVISOR, SS_NODED_PASS,)
+ SS_MASTER_NODE = "master_node"
+ SS_MASTER_IP = "master_ip"
+ SS_MASTER_NETDEV = "master_netdev"
+ _VALID_KEYS = (SS_HYPERVISOR, SS_NODED_PASS, SS_MASTER_NODE, SS_MASTER_IP,
+ SS_MASTER_NETDEV)
_MAX_SIZE = 4096
def __init__(self, cfg_location=None):
"""
return self._ReadFile(self.SS_NODED_PASS)
+ def GetMasterNode(self):
+ """Get the hostname of the master node for this cluster.
+
+ """
+ return self._ReadFile(self.SS_MASTER_NODE)
+
+ def GetMasterIP(self):
+ """Get the IP of the master node for this cluster.
+
+ """
+ return self._ReadFile(self.SS_MASTER_IP)
+
+ def GetMasterNetdev(self):
+ """Get the netdev to which we'll add the master ip.
+
+ """
+ return self._ReadFile(self.SS_MASTER_NETDEV)
+
def SetKey(self, key, value):
"""Set the value of a key.
hypervisor_type=opts.hypervisor_type,
vg_name=opts.vg_name,
mac_prefix=opts.mac_prefix,
- def_bridge=opts.def_bridge)
+ def_bridge=opts.def_bridge,
+ master_netdev=opts.master_netdev)
SubmitOpCode(op)
return 0
" to connect the instances to [xen-br0]",
metavar="BRIDGE",
default="xen-br0",),
+ make_option("--master-netdev", dest="master_netdev",
+ help="Specify the node interface (cluster-wide)"
+ " on which the master IP address will be added "
+ " [xen-br0]",
+ metavar="NETDEV",
+ default="xen-br0",),
],
"[opts...] <cluster_name>",
"Initialises a new cluster configuration"),
from ganeti import cmdlib
from ganeti.constants import HKR_SUCCESS, HKR_FAIL, HKR_SKIP
-from fake_config import FakeConfig
+from mocks import FakeConfig, FakeSStore
class FakeLU(cmdlib.LogicalUnit):
HPATH = "test"
def testTotalFalse(self):
"""Test complete rpc failure"""
cfg = FakeConfig()
+ sstore = FakeSStore()
op = opcodes.OpCode()
- lu = FakeLU(None, op, cfg, None)
- hm = mcpu.HooksMaster(self._call_false, cfg, lu)
+ lu = FakeLU(None, op, cfg, sstore)
+ hm = mcpu.HooksMaster(self._call_false, cfg, sstore, lu)
self.failUnlessRaises(errors.HooksFailure,
hm.RunPhase, constants.HOOKS_PHASE_PRE)
hm.RunPhase(constants.HOOKS_PHASE_POST)
def testIndividualFalse(self):
"""Test individual rpc failure"""
cfg = FakeConfig()
+ sstore = FakeSStore()
op = opcodes.OpCode()
- lu = FakeLU(None, op, cfg, None)
- hm = mcpu.HooksMaster(self._call_nodes_false, cfg, lu)
+ lu = FakeLU(None, op, cfg, sstore)
+ hm = mcpu.HooksMaster(self._call_nodes_false, cfg, sstore, lu)
self.failUnlessRaises(errors.HooksFailure,
hm.RunPhase, constants.HOOKS_PHASE_PRE)
hm.RunPhase(constants.HOOKS_PHASE_POST)
"""Test individual rpc failure"""
cfg = FakeConfig()
op = opcodes.OpCode()
- lu = FakeLU(None, op, cfg, None)
- hm = mcpu.HooksMaster(self._call_script_fail, cfg, lu)
+ sstore = FakeSStore()
+ lu = FakeLU(None, op, cfg, sstore)
+ hm = mcpu.HooksMaster(self._call_script_fail, cfg, sstore, lu)
self.failUnlessRaises(errors.HooksAbort,
hm.RunPhase, constants.HOOKS_PHASE_PRE)
hm.RunPhase(constants.HOOKS_PHASE_POST)
"""Test individual rpc failure"""
cfg = FakeConfig()
op = opcodes.OpCode()
- lu = FakeLU(None, op, cfg, None)
- hm = mcpu.HooksMaster(self._call_script_succeed, cfg, lu)
+ sstore = FakeSStore()
+ lu = FakeLU(None, op, cfg, sstore)
+ hm = mcpu.HooksMaster(self._call_script_succeed, cfg, sstore, lu)
for phase in (constants.HOOKS_PHASE_PRE, constants.HOOKS_PHASE_POST):
hm.RunPhase(phase)
def GetMaster(self):
return socket.gethostname()
+
+
+class FakeSStore:
+ """Fake simplestore object"""
+
+ def GetMasterNode(self):
+ return socket.gethostname()