TEST_FILES = \
test/data/bdev-both.txt \
+ test/data/bdev-8.3-both.txt \
test/data/bdev-disk.txt \
test/data/bdev-net.txt \
- test/data/proc_drbd8.txt
+ test/data/proc_drbd8.txt \
+ test/data/proc_drbd83.txt
dist_TESTS = \
test/ganeti.bdev_unittest.py \
TESTS = $(dist_TESTS) $(nodist_TESTS)
-TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir)
+TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir) $(PYTHON)
RAPI_RESOURCES = $(wildcard lib/rapi/*.py)
import Queue
import random
import signal
-import simplejson
import logging
from cStringIO import StringIO
from ganeti import workerpool
from ganeti import rpc
from ganeti import bootstrap
+from ganeti import serializer
CLIENT_REQUEST_WORKERS = 16
logging.debug("client closed connection")
break
- request = simplejson.loads(msg)
+ request = serializer.LoadJson(msg)
logging.debug("request: %s", request)
if not isinstance(request, dict):
logging.error("wrong request received: %s", msg)
luxi.KEY_RESULT: result,
}
logging.debug("response: %s", response)
- self.send_message(simplejson.dumps(response))
+ self.send_message(serializer.DumpJson(response))
def read_message(self):
while not self._msgs:
"""
parser = OptionParser(description="Ganeti node daemon",
- usage="%prog [-f] [-d]",
+ usage="%prog [-f] [-d] [-b ADDRESS]",
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
+ parser.add_option("-b", "--bind", dest="bind_address",
+ help="Bind address",
+ default="", metavar="ADDRESS")
+
options, args = parser.parse_args()
return options, args
queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
mainloop = daemon.Mainloop()
- server = NodeHttpServer(mainloop, "", port,
+ server = NodeHttpServer(mainloop, options.bind_address, port,
ssl_params=ssl_params, ssl_verify_peer=True)
server.Start()
try:
return not result.failed
+def EnsureDaemon(daemon):
+ """Check for and start daemon if not alive.
+
+ """
+ pidfile = utils.DaemonPidFileName(daemon)
+ pid = utils.ReadPidFile(pidfile)
+ if pid == 0 or not utils.IsProcessAlive(pid): # no file or dead pid
+ logging.debug("Daemon '%s' not alive, trying to restart", daemon)
+ result = utils.RunCmd([daemon])
+ if not result:
+ logging.error("Can't start daemon '%s', failure %s, output: %s",
+ daemon, result.fail_reason, result.output)
+
+
class WatcherState(object):
"""Interface to a state file recording restart attempts.
all_results = cli.PollJob(job_id, cl=client, feedback_fn=logging.debug)
+ logging.debug("Got data from cluster, writing instance status file")
+
result = all_results[0]
smap = {}
instances = {}
+
+ # write the upfile
+ up_data = "".join(["%s %s\n" % (fields[0], fields[1]) for fields in result])
+ utils.WriteFile(file_name=constants.INSTANCE_UPFILE, data=up_data)
+
for fields in result:
(name, status, autostart, snodes) = fields
master = client.QueryConfigValues(["master_node"])[0]
if master != utils.HostInfo().name:
raise NotMasterError("This is not the master node")
+ # first archive old jobs
+ self.ArchiveJobs(opts.job_age)
+ # and only then submit new ones
self.instances, self.bootids, self.smap = GetClusterData()
self.started_instances = set()
self.opts = opts
"""
notepad = self.notepad
- self.ArchiveJobs(self.opts.job_age)
self.CheckInstances(notepad)
self.CheckDisks(notepad)
self.VerifyDisks()
- def ArchiveJobs(self, age):
+ @staticmethod
+ def ArchiveJobs(age):
"""Archive old jobs.
"""
utils.SetupLogging(constants.LOG_WATCHER, debug=options.debug,
stderr_logging=options.debug)
- update_file = True
+ update_file = False
try:
+ # on master or not, try to start the node dameon (use _PID but is
+ # the same as daemon name)
+ EnsureDaemon(constants.NODED_PID)
+
notepad = WatcherState()
try:
try:
except errors.OpPrereqError:
# this is, from cli.GetClient, a not-master case
logging.debug("Not on master, exiting")
+ update_file = True
sys.exit(constants.EXIT_SUCCESS)
except luxi.NoMasterError, err:
logging.warning("Master seems to be down (%s), trying to restart",
str(err))
if not StartMaster():
logging.critical("Can't start the master, exiting")
- update_file = False
sys.exit(constants.EXIT_FAILURE)
# else retry the connection
client = cli.GetClient()
+ # we are on master now (use _PID but is the same as daemon name)
+ EnsureDaemon(constants.RAPI_PID)
+
try:
watcher = Watcher(options, notepad)
except errors.ConfigurationError:
# Just exit if there's no configuration
+ update_file = True
sys.exit(constants.EXIT_SUCCESS)
watcher.Run()
+ update_file = True
+
finally:
if update_file:
notepad.Save()
except errors.ResolverError, err:
logging.error("Cannot resolve hostname '%s', exiting.", err.args[0])
sys.exit(constants.EXIT_NODESETUP_ERROR)
+ except errors.JobQueueFull:
+ logging.error("Job queue is full, can't query cluster state")
+ except errors.JobQueueDrainError:
+ logging.error("Job queue is drained, can't maintain cluster state")
except Exception, err:
logging.error(str(err), exc_info=True)
sys.exit(constants.EXIT_FAILURE)
GANETIRUNDIR="@LOCALSTATEDIR@/run/ganeti"
+GANETI_DEFAULTS_FILE="@SYSCONFDIR@/default/ganeti"
+
NODED_NAME="ganeti-noded"
NODED="@PREFIX@/sbin/${NODED_NAME}"
NODED_PID="${GANETIRUNDIR}/${NODED_NAME}.pid"
+NODED_ARGS=""
MASTERD_NAME="ganeti-masterd"
MASTERD="@PREFIX@/sbin/${MASTERD_NAME}"
MASTERD_PID="${GANETIRUNDIR}/${MASTERD_NAME}.pid"
+MASTERD_ARGS=""
RAPI_NAME="ganeti-rapi"
RAPI="@PREFIX@/sbin/${RAPI_NAME}"
RAPI_PID="${GANETIRUNDIR}/${RAPI_NAME}.pid"
+RAPI_ARGS=""
SCRIPTNAME="@SYSCONFDIR@/init.d/ganeti"
. /lib/lsb/init-functions
+if [ -s $GANETI_DEFAULTS_FILE ]; then
+ . $GANETI_DEFAULTS_FILE
+fi
+
check_config() {
for fname in \
"@LOCALSTATEDIR@/lib/ganeti/server.pem"
start)
log_daemon_msg "Starting $DESC" "$NAME"
check_config
- start_action $NODED $NODED_PID
- start_action $MASTERD $MASTERD_PID
- start_action $RAPI $RAPI_PID
- ;;
+ start_action $NODED $NODED_PID $NODED_ARGS
+ start_action $MASTERD $MASTERD_PID $MASTERD_ARGS
+ start_action $RAPI $RAPI_PID $RAPI_ARGS
+ ;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
stop_action $RAPI $RAPI_PID
stop_action $MASTERD $MASTERD_PID
stop_action $NODED $NODED_PID
- ;;
+ ;;
restart|force-reload)
log_daemon_msg "Reloading $DESC"
stop_action $RAPI $RAPI_PID
start_action $NODED $NODED_PID
start_action $MASTERD $MASTERD_PID
start_action $RAPI $RAPI_PID
- ;;
+ ;;
*)
log_success_msg "Usage: $SCRIPTNAME {start|stop|force-reload|restart}"
exit 1
- ;;
+ ;;
esac
exit 0
result['NIC_%d_FRONTEND_TYPE' % idx] = \
instance.hvparams[constants.HV_NIC_TYPE]
+ for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
+ for key, value in source.items():
+ result["INSTANCE_%s_%s" % (kind, key)] = str(value)
+
return result
def BlockdevGrow(disk, amount):
"""
UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
- LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+st:([^/]+)/(\S+)"
+ LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
"\s+ds:([^/]+)/(\S+)\s+.*$")
SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
"\sfinish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
# value types
value = pyp.Word(pyp.alphanums + '_-/.:')
quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
- addr_port = (pyp.Word(pyp.nums + '.') + pyp.Literal(':').suppress() +
- number)
+ addr_type = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
+ pyp.Optional(pyp.Literal("ipv6")).suppress())
+ addr_port = (addr_type + pyp.Word(pyp.nums + '.') +
+ pyp.Literal(':').suppress() + number)
# meta device, extended syntax
meta_value = ((value ^ quoted) + pyp.Literal('[').suppress() +
number + pyp.Word(']').suppress())
+ # device name, extended syntax
+ device_value = pyp.Literal("minor").suppress() + number
# a statement
stmt = (~rbrace + keyword + ~lbrace +
- pyp.Optional(addr_port ^ value ^ quoted ^ meta_value) +
+ pyp.Optional(addr_port ^ value ^ quoted ^ meta_value ^
+ device_value) +
pyp.Optional(defa) + semi +
pyp.Optional(pyp.restOfLine).suppress())
import os
import os.path
-import sha
import re
import logging
import tempfile
format = separator.replace("%", "%%").join(format_fields)
for row in data:
+ if row is None:
+ continue
for idx, val in enumerate(row):
if unitfields.Matches(fields[idx]):
try:
for line in data:
args = []
+ if line is None:
+ line = ['-' for _ in fields]
for idx in xrange(len(fields)):
if separator is None:
args.append(mlens[idx])
import os
import os.path
-import sha
import time
import tempfile
import re
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
- memory, vcpus, nics, disk_template, disks):
+ memory, vcpus, nics, disk_template, disks,
+ bep, hvp, hypervisor):
"""Builds instance related env variables for hooks
This builds the hook environment from individual variables.
@param disk_template: the distk template of the instance
@type disks: list
@param disks: the list of (size, mode) pairs
+ @type bep: dict
+ @param bep: the backend parameters for the instance
+ @type hvp: dict
+ @param hvp: the hypervisor parameters for the instance
+ @type hypervisor: string
+ @param hypervisor: the hypervisor for the instance
@rtype: dict
@return: the hook environment for this instance
"INSTANCE_MEMORY": memory,
"INSTANCE_VCPUS": vcpus,
"INSTANCE_DISK_TEMPLATE": disk_template,
+ "INSTANCE_HYPERVISOR": hypervisor,
}
if nics:
env["INSTANCE_DISK_COUNT"] = disk_count
+ for source, kind in [(bep, "BE"), (hvp, "HV")]:
+ for key, value in source.items():
+ env["INSTANCE_%s_%s" % (kind, key)] = value
+
return env
@return: the hook environment dictionary
"""
- bep = lu.cfg.GetClusterInfo().FillBE(instance)
+ cluster = lu.cfg.GetClusterInfo()
+ bep = cluster.FillBE(instance)
+ hvp = cluster.FillHV(instance)
args = {
'name': instance.name,
'primary_node': instance.primary_node,
'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
'disk_template': instance.disk_template,
'disks': [(disk.size, disk.mode) for disk in instance.disks],
+ 'bep': bep,
+ 'hvp': hvp,
+ 'hypervisor': instance.hypervisor,
}
if override:
args.update(override)
"""
if self.op.vg_name is not None:
- if self.op.vg_name != self.cfg.GetVGName():
- self.cfg.SetVGName(self.op.vg_name)
+ new_volume = self.op.vg_name
+ if not new_volume:
+ new_volume = None
+ if new_volume != self.cfg.GetVGName():
+ self.cfg.SetVGName(new_volume)
else:
feedback_fn("Cluster LVM configuration already in desired"
" state, not changing")
for hypervisor in cluster.enabled_hypervisors]),
"beparams": cluster.beparams,
"candidate_pool_size": cluster.candidate_pool_size,
+ "default_bridge": cluster.default_bridge,
+ "master_netdev": cluster.master_netdev,
+ "volume_group_name": cluster.volume_group_name,
+ "file_storage_dir": cluster.file_storage_dir,
}
return result
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
+ # extra beparams
+ self.beparams = getattr(self.op, "beparams", {})
+ if self.beparams:
+ if not isinstance(self.beparams, dict):
+ raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
+ " dict" % (type(self.beparams), ))
+ # fill the beparams dict
+ utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
+ self.op.beparams = self.beparams
+
+ # extra hvparams
+ self.hvparams = getattr(self.op, "hvparams", {})
+ if self.hvparams:
+ if not isinstance(self.hvparams, dict):
+ raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
+ " dict" % (type(self.hvparams), ))
+
+ # check hypervisor parameter syntax (locally)
+ cluster = self.cfg.GetClusterInfo()
+ utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
+ filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
+ instance.hvparams)
+ filled_hvp.update(self.hvparams)
+ hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+ hv_type.CheckParameterSyntax(filled_hvp)
+ _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+ self.op.hvparams = self.hvparams
+
_CheckNodeOnline(self, instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(instance)
# check bridges existance
_CheckInstanceBridgesExist(self, instance)
- _CheckNodeFreeMemory(self, instance.primary_node,
- "starting instance %s" % instance.name,
- bep[constants.BE_MEMORY], instance.hypervisor)
+ remote_info = self.rpc.call_instance_info(instance.primary_node,
+ instance.name,
+ instance.hypervisor)
+ remote_info.Raise()
+ if not remote_info.data:
+ _CheckNodeFreeMemory(self, instance.primary_node,
+ "starting instance %s" % instance.name,
+ bep[constants.BE_MEMORY], instance.hypervisor)
def Exec(self, feedback_fn):
"""Start the instance.
_StartInstanceDisks(self, instance, force)
- result = self.rpc.call_instance_start(node_current, instance)
+ result = self.rpc.call_instance_start(node_current, instance,
+ self.hvparams, self.beparams)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
" full reboot: %s" % msg)
_ShutdownInstanceDisks(self, instance)
_StartInstanceDisks(self, instance, ignore_secondaries)
- result = self.rpc.call_instance_start(node_current, instance)
+ result = self.rpc.call_instance_start(node_current, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
remote_info = self.rpc.call_instance_info(instance.primary_node,
instance.name,
instance.hypervisor)
- if remote_info.failed or remote_info.data:
+ remote_info.Raise()
+ if remote_info.data:
raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
(self.op.instance_name,
instance.primary_node))
target_node = secondary_nodes[0]
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
- # check memory requirements on the secondary node
- _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
- instance.name, bep[constants.BE_MEMORY],
- instance.hypervisor)
+
+ if instance.admin_up:
+ # check memory requirements on the secondary node
+ _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
+ instance.name, bep[constants.BE_MEMORY],
+ instance.hypervisor)
+ else:
+ self.LogInfo("Not checking memory on the secondary node as"
+ " instance will not be started")
# check bridge existance
brlist = [nic.bridge for nic in instance.nics]
raise errors.OpExecError("Can't activate the instance's disks")
feedback_fn("* starting the instance on the target node")
- result = self.rpc.call_instance_start(target_node, instance)
+ result = self.rpc.call_instance_start(target_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
self.op.hvparams)
hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
+ self.hv_full = filled_hvp
# fill and remember the beparams dict
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
disk_template=self.op.disk_template,
disks=[(d["size"], d["mode"]) for d in self.disks],
+ bep=self.be_full,
+ hvp=self.hv_full,
+ hypervisor=self.op.hypervisor,
))
nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
self.cfg.Update(iobj)
logging.info("Starting instance %s on node %s", instance, pnode_name)
feedback_fn("* starting instance...")
- result = self.rpc.call_instance_start(pnode_name, iobj)
+ result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
msg = result.RemoteFailMsg()
if msg:
raise errors.OpExecError("Could not start instance: %s" % msg)
finally:
if self.op.shutdown and instance.admin_up:
- result = self.rpc.call_instance_start(src_node, instance)
+ result = self.rpc.call_instance_start(src_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
SSL_CERT_FILE = DATA_DIR + "/server.pem"
RAPI_CERT_FILE = DATA_DIR + "/rapi.pem"
WATCHER_STATEFILE = DATA_DIR + "/watcher.data"
+INSTANCE_UPFILE = RUN_GANETI_DIR + "/instance-status"
SSH_KNOWN_HOSTS_FILE = DATA_DIR + "/known_hosts"
RAPI_USERS_FILE = DATA_DIR + "/rapi_users"
QUEUE_DIR = DATA_DIR + "/queue"
VNC_PASSWORD_FILE = _autoconf.SYSCONFDIR + "/ganeti/vnc-cluster-password"
VNC_DEFAULT_BIND_ADDRESS = '0.0.0.0'
-# Device types
+# NIC types
HT_NIC_RTL8139 = "rtl8139"
HT_NIC_NE2K_PCI = "ne2k_pci"
HT_NIC_NE2K_ISA = "ne2k_isa"
HT_NIC_PCNET = "pcnet"
HT_NIC_E1000 = "e1000"
HT_NIC_PARAVIRTUAL = HT_DISK_PARAVIRTUAL = "paravirtual"
-HT_DISK_IOEMU = "ioemu"
-HT_DISK_IDE = "ide"
-HT_DISK_SCSI = "scsi"
-HT_DISK_SD = "sd"
-HT_DISK_MTD = "mtd"
-HT_DISK_PFLASH = "pflash"
HT_HVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
HT_NIC_NE2K_ISA, HT_NIC_PARAVIRTUAL])
-HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
HT_KVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
HT_NIC_NE2K_ISA, HT_NIC_I82551,
HT_NIC_I85557B, HT_NIC_I8259ER,
HT_NIC_PCNET, HT_NIC_E1000,
HT_NIC_PARAVIRTUAL])
+# Disk types
+HT_DISK_IOEMU = "ioemu"
+HT_DISK_IDE = "ide"
+HT_DISK_SCSI = "scsi"
+HT_DISK_SD = "sd"
+HT_DISK_MTD = "mtd"
+HT_DISK_PFLASH = "pflash"
+
+HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
HT_KVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IDE,
HT_DISK_SCSI, HT_DISK_SD, HT_DISK_MTD,
HT_DISK_PFLASH])
+# Mouse types:
+HT_MOUSE_MOUSE = "mouse"
+HT_MOUSE_TABLET = "tablet"
+
+HT_KVM_VALID_MOUSE_TYPES = frozenset([HT_MOUSE_MOUSE, HT_MOUSE_TABLET])
+
+# Boot order
+HT_BO_CDROM = "cdrom"
+HT_BO_DISK = "disk"
+HT_BO_NETWORK = "network"
+
+HT_KVM_VALID_BO_TYPES = frozenset([HT_BO_CDROM, HT_BO_DISK, HT_BO_NETWORK])
+
# Cluster Verify steps
VERIFY_NPLUSONE_MEM = 'nplusone_mem'
VERIFY_OPTIONAL_CHECKS = frozenset([VERIFY_NPLUSONE_MEM])
HV_VNC_X509: '',
HV_VNC_X509_VERIFY: False,
HV_CDROM_IMAGE_PATH: '',
- HV_BOOT_ORDER: "disk",
+ HV_BOOT_ORDER: HT_BO_DISK,
HV_NIC_TYPE: HT_NIC_PARAVIRTUAL,
HV_DISK_TYPE: HT_DISK_PARAVIRTUAL,
HV_USB_MOUSE: '',
"""
+import re
+
+
from ganeti import errors
"""
pass
+
+ def GetLinuxNodeInfo(self):
+ """For linux systems, return actual OS information.
+
+ This is an abstraction for all non-hypervisor-based classes, where
+ the node actually sees all the memory and CPUs via the /proc
+ interface and standard commands. The other case if for example
+ xen, where you only see the hardware resources via xen-specific
+ tools.
+
+ @return: a dict with the following keys (values in MiB):
+ - memory_total: the total memory size on the node
+ - memory_free: the available memory on the node for instances
+ - memory_dom0: the memory used by the node itself, if available
+
+ """
+ try:
+ fh = file("/proc/meminfo")
+ try:
+ data = fh.readlines()
+ finally:
+ fh.close()
+ except EnvironmentError, err:
+ raise errors.HypervisorError("Failed to list node info: %s" % (err,))
+
+ result = {}
+ sum_free = 0
+ try:
+ for line in data:
+ splitfields = line.split(":", 1)
+
+ if len(splitfields) > 1:
+ key = splitfields[0].strip()
+ val = splitfields[1].strip()
+ if key == 'MemTotal':
+ result['memory_total'] = int(val.split()[0])/1024
+ elif key in ('MemFree', 'Buffers', 'Cached'):
+ sum_free += int(val.split()[0])/1024
+ elif key == 'Active':
+ result['memory_dom0'] = int(val.split()[0])/1024
+ except (ValueError, TypeError), err:
+ raise errors.HypervisorError("Failed to compute memory usage: %s" %
+ (err,))
+ result['memory_free'] = sum_free
+
+ cpu_total = 0
+ try:
+ fh = open("/proc/cpuinfo")
+ try:
+ cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
+ fh.read()))
+ finally:
+ fh.close()
+ except EnvironmentError, err:
+ raise errors.HypervisorError("Failed to list node info: %s" % (err,))
+ result['cpu_total'] = cpu_total
+ # FIXME: export correct data here
+ result['cpu_nodes'] = 1
+ result['cpu_sockets'] = 1
+
+ return result
def GetNodeInfo(self):
"""Return information about the node.
+ This is just a wrapper over the base GetLinuxNodeInfo method.
+
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
"""
- # global ram usage from the xm info command
- # memory : 3583
- # free_memory : 747
- # note: in xen 3, memory has changed to total_memory
- try:
- fh = file("/proc/meminfo")
- try:
- data = fh.readlines()
- finally:
- fh.close()
- except IOError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
-
- result = {}
- sum_free = 0
- for line in data:
- splitfields = line.split(":", 1)
-
- if len(splitfields) > 1:
- key = splitfields[0].strip()
- val = splitfields[1].strip()
- if key == 'MemTotal':
- result['memory_total'] = int(val.split()[0])/1024
- elif key in ('MemFree', 'Buffers', 'Cached'):
- sum_free += int(val.split()[0])/1024
- elif key == 'Active':
- result['memory_dom0'] = int(val.split()[0])/1024
- result['memory_free'] = sum_free
-
+ result = self.GetLinuxNodeInfo()
# substract running instances
all_instances = self.GetAllInstancesInfo()
result['memory_free'] -= min(result['memory_free'],
sum([row[2] for row in all_instances]))
-
- cpu_total = 0
- try:
- fh = open("/proc/cpuinfo")
- try:
- cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
- fh.read()))
- finally:
- fh.close()
- except EnvironmentError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
- result['cpu_total'] = cpu_total
- # FIXME: export correct data here
- result['cpu_nodes'] = 1
- result['cpu_sockets'] = 1
-
return result
@classmethod
kvm_cmd.extend(['-no-acpi'])
hvp = instance.hvparams
- boot_disk = hvp[constants.HV_BOOT_ORDER] == "disk"
- boot_cdrom = hvp[constants.HV_BOOT_ORDER] == "cdrom"
- boot_network = hvp[constants.HV_BOOT_ORDER] == "network"
+ boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
+ boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM
+ boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
if boot_network:
kvm_cmd.extend(['-boot', 'n'])
def GetNodeInfo(self):
"""Return information about the node.
+ This is just a wrapper over the base GetLinuxNodeInfo method.
+
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
"""
- # global ram usage from the xm info command
- # memory : 3583
- # free_memory : 747
- # note: in xen 3, memory has changed to total_memory
- try:
- fh = file("/proc/meminfo")
- try:
- data = fh.readlines()
- finally:
- fh.close()
- except EnvironmentError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
-
- result = {}
- sum_free = 0
- for line in data:
- splitfields = line.split(":", 1)
-
- if len(splitfields) > 1:
- key = splitfields[0].strip()
- val = splitfields[1].strip()
- if key == 'MemTotal':
- result['memory_total'] = int(val.split()[0])/1024
- elif key in ('MemFree', 'Buffers', 'Cached'):
- sum_free += int(val.split()[0])/1024
- elif key == 'Active':
- result['memory_dom0'] = int(val.split()[0])/1024
- result['memory_free'] = sum_free
-
- cpu_total = 0
- try:
- fh = open("/proc/cpuinfo")
- try:
- cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
- fh.read()))
- finally:
- fh.close()
- except EnvironmentError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
- result['cpu_total'] = cpu_total
- # FIXME: export correct data here
- result['cpu_nodes'] = 1
- result['cpu_sockets'] = 1
-
- return result
+ return self.GetLinuxNodeInfo()
@classmethod
def GetShellCommandForConsole(cls, instance, hvparams, beparams):
" an absolute path, if defined")
boot_order = hvparams[constants.HV_BOOT_ORDER]
- if boot_order not in ('cdrom', 'disk', 'network'):
- raise errors.HypervisorError("The boot order must be 'cdrom', 'disk' or"
- " 'network'")
+ if boot_order not in constants.HT_KVM_VALID_BO_TYPES:
+ raise errors.HypervisorError(\
+ "The boot order must be one of %s" %
+ utils.CommaJoin(constants.HT_KVM_VALID_BO_TYPES))
- if boot_order == 'cdrom' and not iso_path:
- raise errors.HypervisorError("Cannot boot from cdrom without an ISO path")
+ if boot_order == constants.HT_BO_CDROM and not iso_path:
+ raise errors.HypervisorError("Cannot boot from cdrom without an"
+ " ISO path")
nic_type = hvparams[constants.HV_NIC_TYPE]
if nic_type not in constants.HT_KVM_VALID_NIC_TYPES:
- raise errors.HypervisorError("Invalid NIC type %s specified for the KVM"
- " hypervisor. Please choose one of: %s" %
- (nic_type,
- constants.HT_KVM_VALID_NIC_TYPES))
- elif boot_order == 'network' and nic_type == constants.HT_NIC_PARAVIRTUAL:
+ raise errors.HypervisorError(\
+ "Invalid NIC type %s specified for the KVM"
+ " hypervisor. Please choose one of: %s" %
+ (nic_type, utils.CommaJoin(constants.HT_KVM_VALID_NIC_TYPES)))
+ elif (boot_order == constants.HT_BO_NETWORK and
+ nic_type == constants.HT_NIC_PARAVIRTUAL):
raise errors.HypervisorError("Cannot boot from a paravirtual NIC. Please"
- " change the nic type.")
+ " change the NIC type.")
disk_type = hvparams[constants.HV_DISK_TYPE]
if disk_type not in constants.HT_KVM_VALID_DISK_TYPES:
- raise errors.HypervisorError("Invalid disk type %s specified for the KVM"
- " hypervisor. Please choose one of: %s" %
- (disk_type,
- constants.HT_KVM_VALID_DISK_TYPES))
+ raise errors.HypervisorError(\
+ "Invalid disk type %s specified for the KVM"
+ " hypervisor. Please choose one of: %s" %
+ (disk_type, utils.CommaJoin(constants.HT_KVM_VALID_DISK_TYPES)))
mouse_type = hvparams[constants.HV_USB_MOUSE]
- if mouse_type and mouse_type not in ('mouse', 'tablet'):
- raise errors.HypervisorError("Invalid usb mouse type %s specified for"
- " the KVM hyervisor. Please choose"
- " 'mouse' or 'tablet'" % mouse_type)
+ if mouse_type and mouse_type not in constants.HT_KVM_VALID_MOUSE_TYPES:
+ raise errors.HypervisorError(\
+ "Invalid usb mouse type %s specified for the KVM hypervisor. Please"
+ " choose one of %s" %
+ utils.CommaJoin(constants.HT_KVM_VALID_MOUSE_TYPES))
def ValidateParameters(self, hvparams):
"""Check the given parameters for validity.
# device type checks
nic_type = hvparams[constants.HV_NIC_TYPE]
if nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
- raise errors.HypervisorError("Invalid NIC type %s specified for the Xen"
- " HVM hypervisor. Please choose one of: %s"
- % (nic_type,
- constants.HT_HVM_VALID_NIC_TYPES))
+ raise errors.HypervisorError(\
+ "Invalid NIC type %s specified for the Xen"
+ " HVM hypervisor. Please choose one of: %s"
+ % (nic_type, utils.CommaJoin(constants.HT_HVM_VALID_NIC_TYPES)))
disk_type = hvparams[constants.HV_DISK_TYPE]
if disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
- raise errors.HypervisorError("Invalid disk type %s specified for the Xen"
- " HVM hypervisor. Please choose one of: %s"
- % (disk_type,
- constants.HT_HVM_VALID_DISK_TYPES))
+ raise errors.HypervisorError(\
+ "Invalid disk type %s specified for the Xen"
+ " HVM hypervisor. Please choose one of: %s"
+ % (disk_type, utils.CommaJoin(constants.HT_HVM_VALID_DISK_TYPES)))
# vnc_bind_address verification
vnc_bind_address = hvparams[constants.HV_VNC_BIND_ADDRESS]
if vnc_bind_address:
"""Startup an instance."""
OP_ID = "OP_INSTANCE_STARTUP"
OP_DSC_FIELD = "instance_name"
- __slots__ = ["instance_name", "force"]
+ __slots__ = ["instance_name", "force", "hvparams", "beparams"]
class OpShutdownInstance(OpCode):
"""
+import logging
+
import ganeti.cli
-import ganeti.opcodes
from ganeti import luxi
from ganeti import rapi
from ganeti import http
+from ganeti import ssconf
+from ganeti import constants
+from ganeti import opcodes
+from ganeti import errors
def BuildUriList(ids, uri_format, uri_fields=("name", "uri")):
"""Helper function to retrieve tags.
"""
- op = ganeti.opcodes.OpGetTags(kind=kind, name=name)
- tags = ganeti.cli.SubmitOpCode(op)
+ if kind == constants.TAG_INSTANCE or kind == constants.TAG_NODE:
+ if not name:
+ raise http.HttpBadRequest("Missing name on tag request")
+ cl = GetClient()
+ if kind == constants.TAG_INSTANCE:
+ fn = cl.QueryInstances
+ else:
+ fn = cl.QueryNodes
+ result = fn(names=[name], fields=["tags"], use_locking=False)
+ if not result or not result[0]:
+ raise http.HttpBadGateway("Invalid response from tag query")
+ tags = result[0][0]
+ elif kind == constants.TAG_CLUSTER:
+ ssc = ssconf.SimpleStore()
+ tags = ssc.GetClusterTags()
+
return list(tags)
"""Helper function to set tags.
"""
- cl = luxi.Client()
- return cl.SubmitJob([ganeti.opcodes.OpAddTags(kind=kind, name=name,
- tags=tags)])
+ return SubmitJob([opcodes.OpAddTags(kind=kind, name=name, tags=tags)])
def _Tags_DELETE(kind, tags, name=""):
"""Helper function to delete tags.
"""
- cl = luxi.Client()
- return cl.SubmitJob([ganeti.opcodes.OpDelTags(kind=kind, name=name,
- tags=tags)])
+ return SubmitJob([opcodes.OpDelTags(kind=kind, name=name, tags=tags)])
def MapBulkFields(itemslist, fields):
return result
+def SubmitJob(op, cl=None):
+ """Generic wrapper for submit job, for better http compatibility.
+
+ @type op: list
+ @param op: the list of opcodes for the job
+ @type cl: None or luxi.Client
+ @param cl: optional luxi client to use
+ @rtype: string
+ @return: the job ID
+
+ """
+ try:
+ if cl is None:
+ cl = GetClient()
+ return cl.SubmitJob(op)
+ except errors.JobQueueFull:
+ raise http.HttpServiceUnavailable("Job queue is full, needs archiving")
+ except errors.JobQueueDrainError:
+ raise http.HttpServiceUnavailable("Job queue is drained, cannot submit")
+ except luxi.NoMasterError, err:
+ raise http.HttpBadGateway("Master seems to unreachable: %s" % str(err))
+ except luxi.TimeoutError, err:
+ raise http.HttpGatewayTimeout("Timeout while talking to the master"
+ " daemon. Error: %s" % str(err))
+
+def GetClient():
+ """Geric wrapper for luxi.Client(), for better http compatiblity.
+
+ """
+ try:
+ return luxi.Client()
+ except luxi.NoMasterError, err:
+ raise http.HttpBadGateway("Master seems to unreachable: %s" % str(err))
+
+
+def FeedbackFn(ts, log_type, log_msg):
+ """Feedback logging function for http case.
+
+ We don't have a stdout for printing log messages, so log them to the
+ http log at least.
+
+ """
+ logging.info("%s: %s", log_type, log_msg)
+
+
class R_Generic(object):
"""Generic class for resources.
"""
-import ganeti.opcodes
+from ganeti import opcodes
from ganeti import http
-from ganeti import luxi
from ganeti import constants
+from ganeti import cli
from ganeti.rapi import baserlib
+
I_FIELDS = ["name", "admin_state", "os",
"pnode", "snodes",
"disk_template",
}
"""
- client = luxi.Client()
+ client = baserlib.GetClient()
return client.QueryClusterInfo()
Example: ["debian-etch"]
"""
- op = ganeti.opcodes.OpDiagnoseOS(output_fields=["name", "valid"],
- names=[])
- diagnose_data = ganeti.cli.SubmitOpCode(op)
+ cl = baserlib.GetClient()
+ op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
+ job_id = baserlib.SubmitJob([op], cl)
+ # we use custom feedback function, instead of print we log the status
+ result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
+ diagnose_data = result[0]
if not isinstance(diagnose_data, list):
- raise http.HttpInternalServerError(message="Can't get OS list")
+ raise http.HttpBadGateway(message="Can't get OS list")
return [row[0] for row in diagnose_data if row[1]]
"""
fields = ["id"]
+ cl = baserlib.GetClient()
# Convert the list of lists to the list of ids
- result = [job_id for [job_id] in luxi.Client().QueryJobs(None, fields)]
+ result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
return baserlib.BuildUriList(result, "/2/jobs/%s",
uri_fields=("id", "uri"))
"received_ts", "start_ts", "end_ts",
]
job_id = self.items[0]
- result = luxi.Client().QueryJobs([job_id, ], fields)[0]
+ result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
if result is None:
raise http.HttpNotFound()
return baserlib.MapFields(fields, result)
"""
job_id = self.items[0]
- result = luxi.Client().CancelJob(job_id)
+ result = baserlib.GetClient().CancelJob(job_id)
return result
@return: a dictionary with 'name' and 'uri' keys for each of them
"""
- client = luxi.Client()
+ client = baserlib.GetClient()
if self.useBulk():
bulkdata = client.QueryNodes([], N_FIELDS, False)
"""
node_name = self.items[0]
- client = luxi.Client()
+ client = baserlib.GetClient()
result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
use_locking=self.useLocking())
@return: a dictionary with 'name' and 'uri' keys for each of them.
"""
- client = luxi.Client()
+ client = baserlib.GetClient()
use_locking = self.useLocking()
if self.useBulk():
"ip": fn("ip", None),
"bridge": fn("bridge", None)}]
- op = ganeti.opcodes.OpCreateInstance(
- mode=constants.INSTANCE_CREATE,
- instance_name=fn('name'),
- disks=disks,
- disk_template=fn('disk_template'),
- os_type=fn('os'),
- pnode=fn('pnode', None),
- snode=fn('snode', None),
- iallocator=fn('iallocator', None),
- nics=nics,
- start=fn('start', True),
- ip_check=fn('ip_check', True),
- wait_for_sync=True,
- hypervisor=fn('hypervisor', None),
- hvparams=hvparams,
- beparams=beparams,
- file_storage_dir=fn('file_storage_dir', None),
- file_driver=fn('file_driver', 'loop'),
- )
-
- job_id = ganeti.cli.SendJob([op])
- return job_id
+ op = opcodes.OpCreateInstance(
+ mode=constants.INSTANCE_CREATE,
+ instance_name=fn('name'),
+ disks=disks,
+ disk_template=fn('disk_template'),
+ os_type=fn('os'),
+ pnode=fn('pnode', None),
+ snode=fn('snode', None),
+ iallocator=fn('iallocator', None),
+ nics=nics,
+ start=fn('start', True),
+ ip_check=fn('ip_check', True),
+ wait_for_sync=True,
+ hypervisor=fn('hypervisor', None),
+ hvparams=hvparams,
+ beparams=beparams,
+ file_storage_dir=fn('file_storage_dir', None),
+ file_driver=fn('file_driver', 'loop'),
+ )
+
+ return baserlib.SubmitJob([op])
class R_2_instances_name(baserlib.R_Generic):
"""Send information about an instance.
"""
- client = luxi.Client()
+ client = baserlib.GetClient()
instance_name = self.items[0]
result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
use_locking=self.useLocking())
"""Delete an instance.
"""
- op = ganeti.opcodes.OpRemoveInstance(instance_name=self.items[0],
- ignore_failures=False)
- job_id = ganeti.cli.SendJob([op])
- return job_id
+ op = opcodes.OpRemoveInstance(instance_name=self.items[0],
+ ignore_failures=False)
+ return baserlib.SubmitJob([op])
class R_2_instances_name_reboot(baserlib.R_Generic):
[constants.INSTANCE_REBOOT_HARD])[0]
ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
[False])[0])
- op = ganeti.opcodes.OpRebootInstance(
- instance_name=instance_name,
- reboot_type=reboot_type,
- ignore_secondaries=ignore_secondaries)
-
- job_id = ganeti.cli.SendJob([op])
+ op = opcodes.OpRebootInstance(instance_name=instance_name,
+ reboot_type=reboot_type,
+ ignore_secondaries=ignore_secondaries)
- return job_id
+ return baserlib.SubmitJob([op])
class R_2_instances_name_startup(baserlib.R_Generic):
"""
instance_name = self.items[0]
force_startup = bool(self.queryargs.get('force', [False])[0])
- op = ganeti.opcodes.OpStartupInstance(instance_name=instance_name,
- force=force_startup)
+ op = opcodes.OpStartupInstance(instance_name=instance_name,
+ force=force_startup)
- job_id = ganeti.cli.SendJob([op])
-
- return job_id
+ return baserlib.SubmitJob([op])
class R_2_instances_name_shutdown(baserlib.R_Generic):
"""
instance_name = self.items[0]
- op = ganeti.opcodes.OpShutdownInstance(instance_name=instance_name)
-
- job_id = ganeti.cli.SendJob([op])
+ op = opcodes.OpShutdownInstance(instance_name=instance_name)
- return job_id
+ return baserlib.SubmitJob([op])
class _R_Tags(baserlib.R_Generic):
self._cfg = cfg
self.port = utils.GetNodeDaemonPort()
- def _InstDict(self, instance):
+ def _InstDict(self, instance, hvp=None, bep=None):
"""Convert the given instance to a dict.
This is done via the instance's ToDict() method and additionally
@type instance: L{objects.Instance}
@param instance: an Instance object
+ @type hvp: dict or None
+ @param hvp: a dictionary with overriden hypervisor parameters
+ @type bep: dict or None
+ @param bep: a dictionary with overriden backend parameters
@rtype: dict
@return: the instance dict, with the hvparams filled with the
cluster defaults
idict = instance.ToDict()
cluster = self._cfg.GetClusterInfo()
idict["hvparams"] = cluster.FillHV(instance)
+ if hvp is not None:
+ idict["hvparams"].update(hvp)
idict["beparams"] = cluster.FillBE(instance)
+ if bep is not None:
+ idict["beparams"].update(bep)
return idict
def _ConnectList(self, client, node_list, call):
"""
return self._SingleNodeCall(node, "bridges_exist", [bridges_list])
- def call_instance_start(self, node, instance):
+ def call_instance_start(self, node, instance, hvp, bep):
"""Starts an instance.
This is a single-node call.
"""
- return self._SingleNodeCall(node, "instance_start",
- [self._InstDict(instance)])
+ idict = self._InstDict(instance, hvp=hvp, bep=bep)
+ return self._SingleNodeCall(node, "instance_start", [idict])
def call_instance_shutdown(self, node, instance):
"""Stops an instance.
nl = data.splitlines(False)
return nl
+ def GetClusterTags(self):
+ """Return the cluster tags.
+
+ """
+ data = self._ReadFile(constants.SS_CLUSTER_TAGS)
+ nl = data.splitlines(False)
+ return nl
+
def GetMasterAndMyself(ss=None):
"""Get the master node and my own hostname.
if env is not None:
cmd_env.update(env)
- if output is None:
- out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
- else:
- status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
- out = err = ""
+ try:
+ if output is None:
+ out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
+ else:
+ status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
+ out = err = ""
+ except OSError, err:
+ if err.errno == errno.ENOENT:
+ raise errors.OpExecError("Can't execute '%s': not found (%s)" %
+ (strcmd, err))
+ else:
+ raise
if status >= 0:
exitcode = status
dir_name, base_name = os.path.split(file_name)
fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
+ do_remove = True
# here we need to make sure we remove the temp file, if any error
# leaves it in place
try:
os.utime(new_name, (atime, mtime))
if not dry_run:
os.rename(new_name, file_name)
+ do_remove = False
finally:
if close:
os.close(fd)
result = None
else:
result = fd
- RemoveFile(new_name)
+ if do_remove:
+ RemoveFile(new_name)
return result
"""Return a 'safe' version of a source string.
This function mangles the input string and returns a version that
- should be safe to disply/encode as ASCII. To this end, we first
+ should be safe to display/encode as ASCII. To this end, we first
convert it to ASCII using the 'backslashreplace' encoding which
- should get rid of any non-ASCII chars, and then we again encode it
- via 'string_escape' which converts '\n' into '\\n' so that log
- messages remain one-line.
+ should get rid of any non-ASCII chars, and then we process it
+ through a loop copied from the string repr sources in the python; we
+ don't use string_escape anymore since that escape single quotes and
+ backslashes too, and that is too much; and that escaping is not
+ stable, i.e. string_escape(string_escape(x)) != string_escape(x).
@type text: str or unicode
@param text: input data
@return: a safe version of text
"""
- text = text.encode('ascii', 'backslashreplace')
- text = text.encode('string_escape')
- return text
+ if isinstance(text, unicode):
+ # onli if unicode; if str already, we handle it below
+ text = text.encode('ascii', 'backslashreplace')
+ resu = ""
+ for char in text:
+ c = ord(char)
+ if char == '\t':
+ resu += r'\t'
+ elif char == '\n':
+ resu += r'\n'
+ elif char == '\r':
+ resu += r'\'r'
+ elif c < 32 or c >= 127: # non-printable
+ resu += "\\x%02x" % (c & 0xff)
+ else:
+ resu += char
+ return resu
+
+
+def CommaJoin(names):
+ """Nicely join a set of identifiers.
+
+ @param names: set, list or tuple
+ @return: a string with the formatted results
+
+ """
+ return ", ".join(["'%s'" % val for val in names])
def LockedMethod(fn):
<cmdsynopsis>
<command>submit-job</command>
- <arg choice="req">opcodes_file</arg>
+ <arg choice="req" rep="repeat">opcodes_file</arg>
</cmdsynopsis>
<para>
- This command builds a list of opcodes from a JSON-format file
- and submits them as a single job to the master daemon. It can
+ This command builds a list of opcodes from JSON-format files
+ and submits for each file a job to the master daemon. It can
be used to test some options that are not available via the
command line.
</para>
<arg>--all</arg>
</group>
<sbr>
+ <arg>-H <option>key=value...</option></arg>
+ <arg>-B <option>key=value...</option></arg>
+ <sbr>
<arg>--submit</arg>
<sbr>
<arg choice="opt"
</para>
<para>
+ The <option>-H</option> and <option>-B</option> options
+ specify extra, temporary hypervisor and backend parameters
+ that can be used to start an instance with modified
+ parameters. They can be useful for quick testing without
+ having to modify an instance back and forth, e.g.:
+ <screen>
+# gnt-instance start -H root_args="single" instance1
+# gnt-instance start -B memory=2048 instance2
+ </screen>
+ The first form will start the instance
+ <userinput>instance1</userinput> in single-user mode, and
+ the instance <userinput>instance2</userinput> with 2GB of
+ RAM (this time only, unless that is the actual instance
+ memory size already).
+ </para>
+
+ <para>
The <option>--submit</option> option is used to send the job to
the master daemon but not wait for its completion. The job
ID will be shown so that it can be examined via
ToStdout("Cluster parameters:")
ToStdout(" - candidate pool size: %s", result["candidate_pool_size"])
+ ToStdout(" - master netdev: %s", result["master_netdev"])
+ ToStdout(" - default bridge: %s", result["default_bridge"])
+ ToStdout(" - lvm volume group: %s", result["volume_group_name"])
+ ToStdout(" - file storage path: %s", result["file_storage_dir"])
ToStdout("Default instance parameters:")
for gr_name, gr_dict in result["beparams"].items():
if not opts.lvm_storage and opts.vg_name:
ToStdout("Options --no-lvm-storage and --vg-name conflict.")
return 1
+ elif not opts.lvm_storage:
+ vg_name = ''
hvlist = opts.enabled_hypervisors
if hvlist is not None:
beparams = opts.beparams
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
- op = opcodes.OpSetClusterParams(vg_name=opts.vg_name,
+ op = opcodes.OpSetClusterParams(vg_name=vg_name,
enabled_hypervisors=hvlist,
hvparams=hvparams,
beparams=beparams,
"""
cl = cli.GetClient()
- fname = args[0]
- op_data = simplejson.loads(open(fname).read())
- op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
- jid = cli.SendJob(op_list, cl=cl)
- ToStdout("Job id: %s", jid)
- cli.PollJob(jid, cl=cl)
+ job_data = []
+ job_ids = []
+ for fname in args:
+ op_data = simplejson.loads(open(fname).read())
+ op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
+ job_data.append((fname, op_list))
+ for fname, op_list in job_data:
+ jid = cli.SendJob(op_list, cl=cl)
+ ToStdout("File '%s', job id: %s", fname, jid)
+ job_ids.append(jid)
+ for jid in job_ids:
+ ToStdout("Waiting for job id %s", jid)
+ cli.PollJob(jid, cl=cl)
return 0
help="Select nodes to sleep on"),
],
"[opts...] <duration>", "Executes a TestDelay OpCode"),
- 'submit-job': (GenericOpCodes, ARGS_ONE,
+ 'submit-job': (GenericOpCodes, ARGS_ATLEAST(1),
[DEBUG_OPT,
],
- "<op_list_file>", "Submits a job built from a json-file"
- " with a list of serialized opcodes"),
+ "<op_list_file...>", "Submits jobs built from json files"
+ " containing a list of serialized opcodes"),
'allocator': (TestAllocator, ARGS_ONE,
[DEBUG_OPT,
make_option("--dir", dest="direction",
for name in inames:
op = opcodes.OpStartupInstance(instance_name=name,
force=opts.force)
+ # do not add these parameters to the opcode unless they're defined
+ if opts.hvparams:
+ op.hvparams = opts.hvparams
+ if opts.beparams:
+ op.beparams = opts.beparams
jex.QueueJob(name, op)
jex.WaitOrShow(not opts.submit_only)
return 0
m_node_opt, m_pri_node_opt, m_sec_node_opt,
m_clust_opt, m_inst_opt,
SUBMIT_OPT,
+ keyval_option("-H", "--hypervisor", type="keyval",
+ default={}, dest="hvparams",
+ help="Temporary hypervisor parameters"),
+ keyval_option("-B", "--backend", type="keyval",
+ default={}, dest="beparams",
+ help="Temporary backend parameters"),
],
- "<instance>", "Starts an instance"),
+ "<instance>", "Starts an instance"),
'reboot': (RebootInstance, ARGS_ANY,
[DEBUG_OPT, m_force_multi,
headers = None
# change raw values to nicer strings
- for row in output:
+ for row_id, row in enumerate(output):
+ if row is None:
+ ToStderr("No such job: %s" % args[row_id])
+ continue
+
for idx, field in enumerate(selected_fields):
val = row[idx]
if field == "status":
--- /dev/null
+disk {
+ size 0s _is_default; # bytes
+ on-io-error detach;
+ fencing dont-care _is_default;
+ max-bio-bvecs 0 _is_default;
+}
+net {
+ timeout 60 _is_default; # 1/10 seconds
+ max-epoch-size 2048 _is_default;
+ max-buffers 2048 _is_default;
+ unplug-watermark 128 _is_default;
+ connect-int 10 _is_default; # seconds
+ ping-int 10 _is_default; # seconds
+ sndbuf-size 131070 _is_default; # bytes
+ ko-count 0 _is_default;
+ after-sb-0pri discard-zero-changes;
+ after-sb-1pri consensus;
+ after-sb-2pri disconnect _is_default;
+ rr-conflict disconnect _is_default;
+ ping-timeout 5 _is_default; # 1/10 seconds
+}
+syncer {
+ rate 61440k; # bytes/second
+ after -1 _is_default;
+ al-extents 257;
+}
+protocol C;
+_this_host {
+ device minor 0;
+ disk "/dev/xenvg/test.data";
+ meta-disk "/dev/xenvg/test.meta" [ 0 ];
+ address ipv4 192.168.1.1:11000;
+}
+_remote_host {
+ address ipv4 192.168.1.2:11000;
+}
"""Test drbdsetup show parser creation"""
bdev.DRBD8._GetShowParser()
- def testParserBoth(self):
+ def testParserBoth80(self):
"""Test drbdsetup show parser for disk and network"""
data = self._ReadTestData("bdev-both.txt")
result = bdev.DRBD8._GetDevInfo(data)
"Wrong local disk info")
self.failUnless(self._has_net(result, ("192.168.1.1", 11000),
("192.168.1.2", 11000)),
- "Wrong network info")
+ "Wrong network info (8.0.x)")
+
+ def testParserBoth83(self):
+ """Test drbdsetup show parser for disk and network"""
+ data = self._ReadTestData("bdev-8.3-both.txt")
+ result = bdev.DRBD8._GetDevInfo(data)
+ self.failUnless(self._has_disk(result, "/dev/xenvg/test.data",
+ "/dev/xenvg/test.meta"),
+ "Wrong local disk info")
+ self.failUnless(self._has_net(result, ("192.168.1.1", 11000),
+ ("192.168.1.2", 11000)),
+ "Wrong network info (8.2.x)")
def testParserNet(self):
"""Test drbdsetup show parser for disk and network"""
"""Read in txt data"""
testutils.GanetiTestCase.setUp(self)
proc_data = self._TestDataFilename("proc_drbd8.txt")
+ proc83_data = self._TestDataFilename("proc_drbd83.txt")
self.proc_data = bdev.DRBD8._GetProcData(filename=proc_data)
+ self.proc83_data = bdev.DRBD8._GetProcData(filename=proc83_data)
self.mass_data = bdev.DRBD8._MassageProcData(self.proc_data)
+ self.mass83_data = bdev.DRBD8._MassageProcData(self.proc83_data)
def testIOErrors(self):
"""Test handling of errors while reading the proc file."""
def testMinorNotFound(self):
"""Test not-found-minor in /proc"""
self.failUnless(9 not in self.mass_data)
+ self.failUnless(9 not in self.mass83_data)
def testLineNotMatch(self):
"""Test wrong line passed to DRBD8Status"""
def testMinor0(self):
"""Test connected, primary device"""
- stats = bdev.DRBD8Status(self.mass_data[0])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_connected and stats.is_primary and
- stats.peer_secondary and stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[0])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_connected and stats.is_primary and
+ stats.peer_secondary and stats.is_disk_uptodate)
def testMinor1(self):
"""Test connected, secondary device"""
- stats = bdev.DRBD8Status(self.mass_data[1])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_connected and stats.is_secondary and
- stats.peer_primary and stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[1])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_connected and stats.is_secondary and
+ stats.peer_primary and stats.is_disk_uptodate)
def testMinor2(self):
"""Test unconfigured device"""
- stats = bdev.DRBD8Status(self.mass_data[2])
- self.failIf(stats.is_in_use)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[2])
+ self.failIf(stats.is_in_use)
def testMinor4(self):
"""Test WFconn device"""
- stats = bdev.DRBD8Status(self.mass_data[4])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_wfconn and stats.is_primary and
- stats.rrole == 'Unknown' and
- stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[4])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_wfconn and stats.is_primary and
+ stats.rrole == 'Unknown' and
+ stats.is_disk_uptodate)
def testMinor6(self):
"""Test diskless device"""
- stats = bdev.DRBD8Status(self.mass_data[6])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_connected and stats.is_secondary and
- stats.peer_primary and stats.is_diskless)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[6])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_connected and stats.is_secondary and
+ stats.peer_primary and stats.is_diskless)
def testMinor8(self):
"""Test standalone device"""
- stats = bdev.DRBD8Status(self.mass_data[8])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_standalone and
- stats.rrole == 'Unknown' and
- stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[8])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_standalone and
+ stats.rrole == 'Unknown' and
+ stats.is_disk_uptodate)
if __name__ == '__main__':
unittest.main()
import unittest
+import re
from ganeti import constants
constants.CONFIG_REVISION))
+class TestParameterNames(unittest.TestCase):
+ """HV/BE parameter tests"""
+ VALID_NAME = re.compile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+ def testNoDashes(self):
+ for kind, source in [('hypervisor', constants.HVS_PARAMETER_TYPES),
+ ('backend', constants.BES_PARAMETER_TYPES)]:
+ for key in source:
+ self.failUnless(self.VALID_NAME.match(key),
+ "The %s parameter '%s' contains invalid characters" %
+ (kind, key))
+
if __name__ == '__main__':
unittest.main()
import shutil
import re
import select
+import string
import ganeti
import testutils
ParseUnit, AddAuthorizedKey, RemoveAuthorizedKey, \
ShellQuote, ShellQuoteArgs, TcpPing, ListVisibleFiles, \
SetEtcHostsEntry, RemoveEtcHostsEntry, FirstFree, OwnIpAddress, \
- TailFile, ForceDictType
+ TailFile, ForceDictType, SafeEncode
from ganeti.errors import LockError, UnitParseError, GenericError, \
ProgrammerError
self.assertRaises(errors.TypeEnforcementError, self._fdt, {'d': '4 L'})
+class TestSafeEncode(unittest.TestCase):
+ """Test case for SafeEncode"""
+
+ def testAscii(self):
+ for txt in [string.digits, string.letters, string.punctuation]:
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+ def testDoubleEncode(self):
+ for i in range(255):
+ txt = SafeEncode(chr(i))
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+ def testUnicode(self):
+ # 1024 is high enough to catch non-direct ASCII mappings
+ for i in range(1024):
+ txt = SafeEncode(unichr(i))
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+
if __name__ == '__main__':
unittest.main()