TEST_FILES = \
test/data/bdev-both.txt \
+ test/data/bdev-8.3-both.txt \
test/data/bdev-disk.txt \
test/data/bdev-net.txt \
- test/data/proc_drbd8.txt
+ test/data/proc_drbd8.txt \
+ test/data/proc_drbd83.txt
dist_TESTS = \
test/ganeti.bdev_unittest.py \
TESTS = $(dist_TESTS) $(nodist_TESTS)
-TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir)
+TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir) $(PYTHON)
RAPI_RESOURCES = $(wildcard lib/rapi/*.py)
import Queue
import random
import signal
-import simplejson
import logging
from cStringIO import StringIO
from ganeti import workerpool
from ganeti import rpc
from ganeti import bootstrap
+from ganeti import serializer
CLIENT_REQUEST_WORKERS = 16
logging.debug("client closed connection")
break
- request = simplejson.loads(msg)
+ request = serializer.LoadJson(msg)
logging.debug("request: %s", request)
if not isinstance(request, dict):
logging.error("wrong request received: %s", msg)
luxi.KEY_RESULT: result,
}
logging.debug("response: %s", response)
- self.send_message(simplejson.dumps(response))
+ self.send_message(serializer.DumpJson(response))
def read_message(self):
while not self._msgs:
"""
parser = OptionParser(description="Ganeti node daemon",
- usage="%prog [-f] [-d]",
+ usage="%prog [-f] [-d] [-b ADDRESS]",
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
+ parser.add_option("-b", "--bind", dest="bind_address",
+ help="Bind address",
+ default="", metavar="ADDRESS")
+
options, args = parser.parse_args()
return options, args
queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
mainloop = daemon.Mainloop()
- server = NodeHttpServer(mainloop, "", port,
+ server = NodeHttpServer(mainloop, options.bind_address, port,
ssl_params=ssl_params, ssl_verify_peer=True)
server.Start()
try:
all_results = cli.PollJob(job_id, cl=client, feedback_fn=logging.debug)
+ logging.debug("Got data from cluster, writing instance status file")
+
result = all_results[0]
smap = {}
instances = {}
+
+ # write the upfile
+ up_data = "".join(["%s %s\n" % (fields[0], fields[1]) for fields in result])
+ utils.WriteFile(file_name=constants.INSTANCE_UPFILE, data=up_data)
+
for fields in result:
(name, status, autostart, snodes) = fields
GANETIRUNDIR="@LOCALSTATEDIR@/run/ganeti"
+GANETI_DEFAULTS_FILE="@SYSCONFDIR@/default/ganeti"
+
NODED_NAME="ganeti-noded"
NODED="@PREFIX@/sbin/${NODED_NAME}"
NODED_PID="${GANETIRUNDIR}/${NODED_NAME}.pid"
+NODED_ARGS=""
MASTERD_NAME="ganeti-masterd"
MASTERD="@PREFIX@/sbin/${MASTERD_NAME}"
MASTERD_PID="${GANETIRUNDIR}/${MASTERD_NAME}.pid"
+MASTERD_ARGS=""
RAPI_NAME="ganeti-rapi"
RAPI="@PREFIX@/sbin/${RAPI_NAME}"
RAPI_PID="${GANETIRUNDIR}/${RAPI_NAME}.pid"
+RAPI_ARGS=""
SCRIPTNAME="@SYSCONFDIR@/init.d/ganeti"
. /lib/lsb/init-functions
+if [ -s $GANETI_DEFAULTS_FILE ]; then
+ . $GANETI_DEFAULTS_FILE
+fi
+
check_config() {
for fname in \
"@LOCALSTATEDIR@/lib/ganeti/server.pem"
start)
log_daemon_msg "Starting $DESC" "$NAME"
check_config
- start_action $NODED $NODED_PID
- start_action $MASTERD $MASTERD_PID
- start_action $RAPI $RAPI_PID
- ;;
+ start_action $NODED $NODED_PID $NODED_ARGS
+ start_action $MASTERD $MASTERD_PID $MASTERD_ARGS
+ start_action $RAPI $RAPI_PID $RAPI_ARGS
+ ;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
stop_action $RAPI $RAPI_PID
stop_action $MASTERD $MASTERD_PID
stop_action $NODED $NODED_PID
- ;;
+ ;;
restart|force-reload)
log_daemon_msg "Reloading $DESC"
stop_action $RAPI $RAPI_PID
start_action $NODED $NODED_PID
start_action $MASTERD $MASTERD_PID
start_action $RAPI $RAPI_PID
- ;;
+ ;;
*)
log_success_msg "Usage: $SCRIPTNAME {start|stop|force-reload|restart}"
exit 1
- ;;
+ ;;
esac
exit 0
"""
UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
- LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+st:([^/]+)/(\S+)"
+ LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
"\s+ds:([^/]+)/(\S+)\s+.*$")
SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
"\sfinish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
# value types
value = pyp.Word(pyp.alphanums + '_-/.:')
quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
- addr_port = (pyp.Word(pyp.nums + '.') + pyp.Literal(':').suppress() +
- number)
+ addr_type = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
+ pyp.Optional(pyp.Literal("ipv6")).suppress())
+ addr_port = (addr_type + pyp.Word(pyp.nums + '.') +
+ pyp.Literal(':').suppress() + number)
# meta device, extended syntax
meta_value = ((value ^ quoted) + pyp.Literal('[').suppress() +
number + pyp.Word(']').suppress())
+ # device name, extended syntax
+ device_value = pyp.Literal("minor").suppress() + number
# a statement
stmt = (~rbrace + keyword + ~lbrace +
- pyp.Optional(addr_port ^ value ^ quoted ^ meta_value) +
+ pyp.Optional(addr_port ^ value ^ quoted ^ meta_value ^
+ device_value) +
pyp.Optional(defa) + semi +
pyp.Optional(pyp.restOfLine).suppress())
import os
import os.path
-import sha
import re
import logging
import tempfile
format = separator.replace("%", "%%").join(format_fields)
for row in data:
+ if row is None:
+ continue
for idx, val in enumerate(row):
if unitfields.Matches(fields[idx]):
try:
for line in data:
args = []
+ if line is None:
+ line = ['-' for _ in fields]
for idx in xrange(len(fields)):
if separator is None:
args.append(mlens[idx])
import os
import os.path
-import sha
import time
import tempfile
import re
"""
if self.op.vg_name is not None:
- if self.op.vg_name != self.cfg.GetVGName():
- self.cfg.SetVGName(self.op.vg_name)
+ new_volume = self.op.vg_name
+ if not new_volume:
+ new_volume = None
+ if new_volume != self.cfg.GetVGName():
+ self.cfg.SetVGName(new_volume)
else:
feedback_fn("Cluster LVM configuration already in desired"
" state, not changing")
for hypervisor in cluster.enabled_hypervisors]),
"beparams": cluster.beparams,
"candidate_pool_size": cluster.candidate_pool_size,
+ "default_bridge": cluster.default_bridge,
+ "master_netdev": cluster.master_netdev,
+ "volume_group_name": cluster.volume_group_name,
+ "file_storage_dir": cluster.file_storage_dir,
}
return result
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
+ # extra beparams
+ self.beparams = getattr(self.op, "beparams", {})
+ if self.beparams:
+ if not isinstance(self.beparams, dict):
+ raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
+ " dict" % (type(self.beparams), ))
+ # fill the beparams dict
+ utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
+ self.op.beparams = self.beparams
+
+ # extra hvparams
+ self.hvparams = getattr(self.op, "hvparams", {})
+ if self.hvparams:
+ if not isinstance(self.hvparams, dict):
+ raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
+ " dict" % (type(self.hvparams), ))
+
+ # check hypervisor parameter syntax (locally)
+ cluster = self.cfg.GetClusterInfo()
+ utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
+ filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
+ instance.hvparams)
+ filled_hvp.update(self.hvparams)
+ hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+ hv_type.CheckParameterSyntax(filled_hvp)
+ _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+ self.op.hvparams = self.hvparams
+
_CheckNodeOnline(self, instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(instance)
# check bridges existance
_CheckInstanceBridgesExist(self, instance)
- _CheckNodeFreeMemory(self, instance.primary_node,
- "starting instance %s" % instance.name,
- bep[constants.BE_MEMORY], instance.hypervisor)
+ remote_info = self.rpc.call_instance_info(instance.primary_node,
+ instance.name,
+ instance.hypervisor)
+ remote_info.Raise()
+ if not remote_info.data:
+ _CheckNodeFreeMemory(self, instance.primary_node,
+ "starting instance %s" % instance.name,
+ bep[constants.BE_MEMORY], instance.hypervisor)
def Exec(self, feedback_fn):
"""Start the instance.
_StartInstanceDisks(self, instance, force)
- result = self.rpc.call_instance_start(node_current, instance)
+ result = self.rpc.call_instance_start(node_current, instance,
+ self.hvparams, self.beparams)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
" full reboot: %s" % msg)
_ShutdownInstanceDisks(self, instance)
_StartInstanceDisks(self, instance, ignore_secondaries)
- result = self.rpc.call_instance_start(node_current, instance)
+ result = self.rpc.call_instance_start(node_current, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
remote_info = self.rpc.call_instance_info(instance.primary_node,
instance.name,
instance.hypervisor)
- if remote_info.failed or remote_info.data:
+ remote_info.Raise()
+ if remote_info.data:
raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
(self.op.instance_name,
instance.primary_node))
raise errors.OpExecError("Can't activate the instance's disks")
feedback_fn("* starting the instance on the target node")
- result = self.rpc.call_instance_start(target_node, instance)
+ result = self.rpc.call_instance_start(target_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
self.cfg.Update(iobj)
logging.info("Starting instance %s on node %s", instance, pnode_name)
feedback_fn("* starting instance...")
- result = self.rpc.call_instance_start(pnode_name, iobj)
+ result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
msg = result.RemoteFailMsg()
if msg:
raise errors.OpExecError("Could not start instance: %s" % msg)
finally:
if self.op.shutdown and instance.admin_up:
- result = self.rpc.call_instance_start(src_node, instance)
+ result = self.rpc.call_instance_start(src_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
SSL_CERT_FILE = DATA_DIR + "/server.pem"
RAPI_CERT_FILE = DATA_DIR + "/rapi.pem"
WATCHER_STATEFILE = DATA_DIR + "/watcher.data"
+INSTANCE_UPFILE = RUN_GANETI_DIR + "/instance-status"
SSH_KNOWN_HOSTS_FILE = DATA_DIR + "/known_hosts"
RAPI_USERS_FILE = DATA_DIR + "/rapi_users"
QUEUE_DIR = DATA_DIR + "/queue"
VNC_PASSWORD_FILE = _autoconf.SYSCONFDIR + "/ganeti/vnc-cluster-password"
VNC_DEFAULT_BIND_ADDRESS = '0.0.0.0'
-# Device types
+# NIC types
HT_NIC_RTL8139 = "rtl8139"
HT_NIC_NE2K_PCI = "ne2k_pci"
HT_NIC_NE2K_ISA = "ne2k_isa"
HT_NIC_PCNET = "pcnet"
HT_NIC_E1000 = "e1000"
HT_NIC_PARAVIRTUAL = HT_DISK_PARAVIRTUAL = "paravirtual"
-HT_DISK_IOEMU = "ioemu"
-HT_DISK_IDE = "ide"
-HT_DISK_SCSI = "scsi"
-HT_DISK_SD = "sd"
-HT_DISK_MTD = "mtd"
-HT_DISK_PFLASH = "pflash"
HT_HVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
HT_NIC_NE2K_ISA, HT_NIC_PARAVIRTUAL])
-HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
HT_KVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
HT_NIC_NE2K_ISA, HT_NIC_I82551,
HT_NIC_I85557B, HT_NIC_I8259ER,
HT_NIC_PCNET, HT_NIC_E1000,
HT_NIC_PARAVIRTUAL])
+# Disk types
+HT_DISK_IOEMU = "ioemu"
+HT_DISK_IDE = "ide"
+HT_DISK_SCSI = "scsi"
+HT_DISK_SD = "sd"
+HT_DISK_MTD = "mtd"
+HT_DISK_PFLASH = "pflash"
+
+HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
HT_KVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IDE,
HT_DISK_SCSI, HT_DISK_SD, HT_DISK_MTD,
HT_DISK_PFLASH])
+# Mouse types:
+HT_MOUSE_MOUSE = "mouse"
+HT_MOUSE_TABLET = "tablet"
+
+HT_KVM_VALID_MOUSE_TYPES = frozenset([HT_MOUSE_MOUSE, HT_MOUSE_TABLET])
+
+# Boot order
+HT_BO_CDROM = "cdrom"
+HT_BO_DISK = "disk"
+HT_BO_NETWORK = "network"
+
+HT_KVM_VALID_BO_TYPES = frozenset([HT_BO_CDROM, HT_BO_DISK, HT_BO_NETWORK])
+
# Cluster Verify steps
VERIFY_NPLUSONE_MEM = 'nplusone_mem'
VERIFY_OPTIONAL_CHECKS = frozenset([VERIFY_NPLUSONE_MEM])
HV_VNC_X509: '',
HV_VNC_X509_VERIFY: False,
HV_CDROM_IMAGE_PATH: '',
- HV_BOOT_ORDER: "disk",
+ HV_BOOT_ORDER: HT_BO_DISK,
HV_NIC_TYPE: HT_NIC_PARAVIRTUAL,
HV_DISK_TYPE: HT_DISK_PARAVIRTUAL,
HV_USB_MOUSE: '',
"""
+import re
+
+
from ganeti import errors
"""
pass
+
+ def GetLinuxNodeInfo(self):
+ """For linux systems, return actual OS information.
+
+ This is an abstraction for all non-hypervisor-based classes, where
+ the node actually sees all the memory and CPUs via the /proc
+ interface and standard commands. The other case if for example
+ xen, where you only see the hardware resources via xen-specific
+ tools.
+
+ @return: a dict with the following keys (values in MiB):
+ - memory_total: the total memory size on the node
+ - memory_free: the available memory on the node for instances
+ - memory_dom0: the memory used by the node itself, if available
+
+ """
+ try:
+ fh = file("/proc/meminfo")
+ try:
+ data = fh.readlines()
+ finally:
+ fh.close()
+ except EnvironmentError, err:
+ raise errors.HypervisorError("Failed to list node info: %s" % (err,))
+
+ result = {}
+ sum_free = 0
+ try:
+ for line in data:
+ splitfields = line.split(":", 1)
+
+ if len(splitfields) > 1:
+ key = splitfields[0].strip()
+ val = splitfields[1].strip()
+ if key == 'MemTotal':
+ result['memory_total'] = int(val.split()[0])/1024
+ elif key in ('MemFree', 'Buffers', 'Cached'):
+ sum_free += int(val.split()[0])/1024
+ elif key == 'Active':
+ result['memory_dom0'] = int(val.split()[0])/1024
+ except (ValueError, TypeError), err:
+ raise errors.HypervisorError("Failed to compute memory usage: %s" %
+ (err,))
+ result['memory_free'] = sum_free
+
+ cpu_total = 0
+ try:
+ fh = open("/proc/cpuinfo")
+ try:
+ cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
+ fh.read()))
+ finally:
+ fh.close()
+ except EnvironmentError, err:
+ raise errors.HypervisorError("Failed to list node info: %s" % (err,))
+ result['cpu_total'] = cpu_total
+ # FIXME: export correct data here
+ result['cpu_nodes'] = 1
+ result['cpu_sockets'] = 1
+
+ return result
def GetNodeInfo(self):
"""Return information about the node.
+ This is just a wrapper over the base GetLinuxNodeInfo method.
+
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
"""
- # global ram usage from the xm info command
- # memory : 3583
- # free_memory : 747
- # note: in xen 3, memory has changed to total_memory
- try:
- fh = file("/proc/meminfo")
- try:
- data = fh.readlines()
- finally:
- fh.close()
- except IOError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
-
- result = {}
- sum_free = 0
- for line in data:
- splitfields = line.split(":", 1)
-
- if len(splitfields) > 1:
- key = splitfields[0].strip()
- val = splitfields[1].strip()
- if key == 'MemTotal':
- result['memory_total'] = int(val.split()[0])/1024
- elif key in ('MemFree', 'Buffers', 'Cached'):
- sum_free += int(val.split()[0])/1024
- elif key == 'Active':
- result['memory_dom0'] = int(val.split()[0])/1024
- result['memory_free'] = sum_free
-
+ result = self.GetLinuxNodeInfo()
# substract running instances
all_instances = self.GetAllInstancesInfo()
result['memory_free'] -= min(result['memory_free'],
sum([row[2] for row in all_instances]))
-
- cpu_total = 0
- try:
- fh = open("/proc/cpuinfo")
- try:
- cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
- fh.read()))
- finally:
- fh.close()
- except EnvironmentError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
- result['cpu_total'] = cpu_total
- # FIXME: export correct data here
- result['cpu_nodes'] = 1
- result['cpu_sockets'] = 1
-
return result
@classmethod
kvm_cmd.extend(['-no-acpi'])
hvp = instance.hvparams
- boot_disk = hvp[constants.HV_BOOT_ORDER] == "disk"
- boot_cdrom = hvp[constants.HV_BOOT_ORDER] == "cdrom"
- boot_network = hvp[constants.HV_BOOT_ORDER] == "network"
+ boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
+ boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM
+ boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
if boot_network:
kvm_cmd.extend(['-boot', 'n'])
def GetNodeInfo(self):
"""Return information about the node.
+ This is just a wrapper over the base GetLinuxNodeInfo method.
+
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
"""
- # global ram usage from the xm info command
- # memory : 3583
- # free_memory : 747
- # note: in xen 3, memory has changed to total_memory
- try:
- fh = file("/proc/meminfo")
- try:
- data = fh.readlines()
- finally:
- fh.close()
- except EnvironmentError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
-
- result = {}
- sum_free = 0
- for line in data:
- splitfields = line.split(":", 1)
-
- if len(splitfields) > 1:
- key = splitfields[0].strip()
- val = splitfields[1].strip()
- if key == 'MemTotal':
- result['memory_total'] = int(val.split()[0])/1024
- elif key in ('MemFree', 'Buffers', 'Cached'):
- sum_free += int(val.split()[0])/1024
- elif key == 'Active':
- result['memory_dom0'] = int(val.split()[0])/1024
- result['memory_free'] = sum_free
-
- cpu_total = 0
- try:
- fh = open("/proc/cpuinfo")
- try:
- cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
- fh.read()))
- finally:
- fh.close()
- except EnvironmentError, err:
- raise errors.HypervisorError("Failed to list node info: %s" % err)
- result['cpu_total'] = cpu_total
- # FIXME: export correct data here
- result['cpu_nodes'] = 1
- result['cpu_sockets'] = 1
-
- return result
+ return self.GetLinuxNodeInfo()
@classmethod
def GetShellCommandForConsole(cls, instance, hvparams, beparams):
" an absolute path, if defined")
boot_order = hvparams[constants.HV_BOOT_ORDER]
- if boot_order not in ('cdrom', 'disk', 'network'):
- raise errors.HypervisorError("The boot order must be 'cdrom', 'disk' or"
- " 'network'")
+ if boot_order not in constants.HT_KVM_VALID_BO_TYPES:
+ raise errors.HypervisorError(\
+ "The boot order must be one of %s" %
+ utils.CommaJoin(constants.HT_KVM_VALID_BO_TYPES))
- if boot_order == 'cdrom' and not iso_path:
- raise errors.HypervisorError("Cannot boot from cdrom without an ISO path")
+ if boot_order == constants.HT_BO_CDROM and not iso_path:
+ raise errors.HypervisorError("Cannot boot from cdrom without an"
+ " ISO path")
nic_type = hvparams[constants.HV_NIC_TYPE]
if nic_type not in constants.HT_KVM_VALID_NIC_TYPES:
- raise errors.HypervisorError("Invalid NIC type %s specified for the KVM"
- " hypervisor. Please choose one of: %s" %
- (nic_type,
- constants.HT_KVM_VALID_NIC_TYPES))
- elif boot_order == 'network' and nic_type == constants.HT_NIC_PARAVIRTUAL:
+ raise errors.HypervisorError(\
+ "Invalid NIC type %s specified for the KVM"
+ " hypervisor. Please choose one of: %s" %
+ (nic_type, utils.CommaJoin(constants.HT_KVM_VALID_NIC_TYPES)))
+ elif (boot_order == constants.HT_BO_NETWORK and
+ nic_type == constants.HT_NIC_PARAVIRTUAL):
raise errors.HypervisorError("Cannot boot from a paravirtual NIC. Please"
- " change the nic type.")
+ " change the NIC type.")
disk_type = hvparams[constants.HV_DISK_TYPE]
if disk_type not in constants.HT_KVM_VALID_DISK_TYPES:
- raise errors.HypervisorError("Invalid disk type %s specified for the KVM"
- " hypervisor. Please choose one of: %s" %
- (disk_type,
- constants.HT_KVM_VALID_DISK_TYPES))
+ raise errors.HypervisorError(\
+ "Invalid disk type %s specified for the KVM"
+ " hypervisor. Please choose one of: %s" %
+ (disk_type, utils.CommaJoin(constants.HT_KVM_VALID_DISK_TYPES)))
mouse_type = hvparams[constants.HV_USB_MOUSE]
- if mouse_type and mouse_type not in ('mouse', 'tablet'):
- raise errors.HypervisorError("Invalid usb mouse type %s specified for"
- " the KVM hyervisor. Please choose"
- " 'mouse' or 'tablet'" % mouse_type)
+ if mouse_type and mouse_type not in constants.HT_KVM_VALID_MOUSE_TYPES:
+ raise errors.HypervisorError(\
+ "Invalid usb mouse type %s specified for the KVM hypervisor. Please"
+ " choose one of %s" %
+ utils.CommaJoin(constants.HT_KVM_VALID_MOUSE_TYPES))
def ValidateParameters(self, hvparams):
"""Check the given parameters for validity.
# device type checks
nic_type = hvparams[constants.HV_NIC_TYPE]
if nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
- raise errors.HypervisorError("Invalid NIC type %s specified for the Xen"
- " HVM hypervisor. Please choose one of: %s"
- % (nic_type,
- constants.HT_HVM_VALID_NIC_TYPES))
+ raise errors.HypervisorError(\
+ "Invalid NIC type %s specified for the Xen"
+ " HVM hypervisor. Please choose one of: %s"
+ % (nic_type, utils.CommaJoin(constants.HT_HVM_VALID_NIC_TYPES)))
disk_type = hvparams[constants.HV_DISK_TYPE]
if disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
- raise errors.HypervisorError("Invalid disk type %s specified for the Xen"
- " HVM hypervisor. Please choose one of: %s"
- % (disk_type,
- constants.HT_HVM_VALID_DISK_TYPES))
+ raise errors.HypervisorError(\
+ "Invalid disk type %s specified for the Xen"
+ " HVM hypervisor. Please choose one of: %s"
+ % (disk_type, utils.CommaJoin(constants.HT_HVM_VALID_DISK_TYPES)))
# vnc_bind_address verification
vnc_bind_address = hvparams[constants.HV_VNC_BIND_ADDRESS]
if vnc_bind_address:
"""Startup an instance."""
OP_ID = "OP_INSTANCE_STARTUP"
OP_DSC_FIELD = "instance_name"
- __slots__ = ["instance_name", "force"]
+ __slots__ = ["instance_name", "force", "hvparams", "beparams"]
class OpShutdownInstance(OpCode):
self._cfg = cfg
self.port = utils.GetNodeDaemonPort()
- def _InstDict(self, instance):
+ def _InstDict(self, instance, hvp=None, bep=None):
"""Convert the given instance to a dict.
This is done via the instance's ToDict() method and additionally
@type instance: L{objects.Instance}
@param instance: an Instance object
+ @type hvp: dict or None
+ @param hvp: a dictionary with overriden hypervisor parameters
+ @type bep: dict or None
+ @param bep: a dictionary with overriden backend parameters
@rtype: dict
@return: the instance dict, with the hvparams filled with the
cluster defaults
idict = instance.ToDict()
cluster = self._cfg.GetClusterInfo()
idict["hvparams"] = cluster.FillHV(instance)
+ if hvp is not None:
+ idict["hvparams"].update(hvp)
idict["beparams"] = cluster.FillBE(instance)
+ if bep is not None:
+ idict["beparams"].update(bep)
return idict
def _ConnectList(self, client, node_list, call):
"""
return self._SingleNodeCall(node, "bridges_exist", [bridges_list])
- def call_instance_start(self, node, instance):
+ def call_instance_start(self, node, instance, hvp, bep):
"""Starts an instance.
This is a single-node call.
"""
- return self._SingleNodeCall(node, "instance_start",
- [self._InstDict(instance)])
+ idict = self._InstDict(instance, hvp=hvp, bep=bep)
+ return self._SingleNodeCall(node, "instance_start", [idict])
def call_instance_shutdown(self, node, instance):
"""Stops an instance.
if env is not None:
cmd_env.update(env)
- if output is None:
- out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
- else:
- status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
- out = err = ""
+ try:
+ if output is None:
+ out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
+ else:
+ status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
+ out = err = ""
+ except OSError, err:
+ if err.errno == errno.ENOENT:
+ raise errors.OpExecError("Can't execute '%s': not found (%s)" %
+ (strcmd, err))
+ else:
+ raise
if status >= 0:
exitcode = status
dir_name, base_name = os.path.split(file_name)
fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
+ do_remove = True
# here we need to make sure we remove the temp file, if any error
# leaves it in place
try:
os.utime(new_name, (atime, mtime))
if not dry_run:
os.rename(new_name, file_name)
+ do_remove = False
finally:
if close:
os.close(fd)
result = None
else:
result = fd
- RemoveFile(new_name)
+ if do_remove:
+ RemoveFile(new_name)
return result
"""Return a 'safe' version of a source string.
This function mangles the input string and returns a version that
- should be safe to disply/encode as ASCII. To this end, we first
+ should be safe to display/encode as ASCII. To this end, we first
convert it to ASCII using the 'backslashreplace' encoding which
- should get rid of any non-ASCII chars, and then we again encode it
- via 'string_escape' which converts '\n' into '\\n' so that log
- messages remain one-line.
+ should get rid of any non-ASCII chars, and then we process it
+ through a loop copied from the string repr sources in the python; we
+ don't use string_escape anymore since that escape single quotes and
+ backslashes too, and that is too much; and that escaping is not
+ stable, i.e. string_escape(string_escape(x)) != string_escape(x).
@type text: str or unicode
@param text: input data
@return: a safe version of text
"""
- text = text.encode('ascii', 'backslashreplace')
- text = text.encode('string_escape')
- return text
+ if isinstance(text, unicode):
+ # onli if unicode; if str already, we handle it below
+ text = text.encode('ascii', 'backslashreplace')
+ resu = ""
+ for char in text:
+ c = ord(char)
+ if char == '\t':
+ resu += r'\t'
+ elif char == '\n':
+ resu += r'\n'
+ elif char == '\r':
+ resu += r'\'r'
+ elif c < 32 or c >= 127: # non-printable
+ resu += "\\x%02x" % (c & 0xff)
+ else:
+ resu += char
+ return resu
+
+
+def CommaJoin(names):
+ """Nicely join a set of identifiers.
+
+ @param names: set, list or tuple
+ @return: a string with the formatted results
+
+ """
+ return ", ".join(["'%s'" % val for val in names])
def LockedMethod(fn):
<cmdsynopsis>
<command>submit-job</command>
- <arg choice="req">opcodes_file</arg>
+ <arg choice="req" rep="repeat">opcodes_file</arg>
</cmdsynopsis>
<para>
- This command builds a list of opcodes from a JSON-format file
- and submits them as a single job to the master daemon. It can
+ This command builds a list of opcodes from JSON-format files
+ and submits for each file a job to the master daemon. It can
be used to test some options that are not available via the
command line.
</para>
<arg>--all</arg>
</group>
<sbr>
+ <arg>-H <option>key=value...</option></arg>
+ <arg>-B <option>key=value...</option></arg>
+ <sbr>
<arg>--submit</arg>
<sbr>
<arg choice="opt"
</para>
<para>
+ The <option>-H</option> and <option>-B</option> options
+ specify extra, temporary hypervisor and backend parameters
+ that can be used to start an instance with modified
+ parameters. They can be useful for quick testing without
+ having to modify an instance back and forth, e.g.:
+ <screen>
+# gnt-instance start -H root_args="single" instance1
+# gnt-instance start -B memory=2048 instance2
+ </screen>
+ The first form will start the instance
+ <userinput>instance1</userinput> in single-user mode, and
+ the instance <userinput>instance2</userinput> with 2GB of
+ RAM (this time only, unless that is the actual instance
+ memory size already).
+ </para>
+
+ <para>
The <option>--submit</option> option is used to send the job to
the master daemon but not wait for its completion. The job
ID will be shown so that it can be examined via
ToStdout("Cluster parameters:")
ToStdout(" - candidate pool size: %s", result["candidate_pool_size"])
+ ToStdout(" - master netdev: %s", result["master_netdev"])
+ ToStdout(" - default bridge: %s", result["default_bridge"])
+ ToStdout(" - lvm volume group: %s", result["volume_group_name"])
+ ToStdout(" - file storage path: %s", result["file_storage_dir"])
ToStdout("Default instance parameters:")
for gr_name, gr_dict in result["beparams"].items():
if not opts.lvm_storage and opts.vg_name:
ToStdout("Options --no-lvm-storage and --vg-name conflict.")
return 1
+ elif not opts.lvm_storage:
+ vg_name = ''
hvlist = opts.enabled_hypervisors
if hvlist is not None:
beparams = opts.beparams
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
- op = opcodes.OpSetClusterParams(vg_name=opts.vg_name,
+ op = opcodes.OpSetClusterParams(vg_name=vg_name,
enabled_hypervisors=hvlist,
hvparams=hvparams,
beparams=beparams,
"""
cl = cli.GetClient()
- fname = args[0]
- op_data = simplejson.loads(open(fname).read())
- op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
- jid = cli.SendJob(op_list, cl=cl)
- ToStdout("Job id: %s", jid)
- cli.PollJob(jid, cl=cl)
+ job_data = []
+ job_ids = []
+ for fname in args:
+ op_data = simplejson.loads(open(fname).read())
+ op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
+ job_data.append((fname, op_list))
+ for fname, op_list in job_data:
+ jid = cli.SendJob(op_list, cl=cl)
+ ToStdout("File '%s', job id: %s", fname, jid)
+ job_ids.append(jid)
+ for jid in job_ids:
+ ToStdout("Waiting for job id %s", jid)
+ cli.PollJob(jid, cl=cl)
return 0
help="Select nodes to sleep on"),
],
"[opts...] <duration>", "Executes a TestDelay OpCode"),
- 'submit-job': (GenericOpCodes, ARGS_ONE,
+ 'submit-job': (GenericOpCodes, ARGS_ATLEAST(1),
[DEBUG_OPT,
],
- "<op_list_file>", "Submits a job built from a json-file"
- " with a list of serialized opcodes"),
+ "<op_list_file...>", "Submits jobs built from json files"
+ " containing a list of serialized opcodes"),
'allocator': (TestAllocator, ARGS_ONE,
[DEBUG_OPT,
make_option("--dir", dest="direction",
for name in inames:
op = opcodes.OpStartupInstance(instance_name=name,
force=opts.force)
+ # do not add these parameters to the opcode unless they're defined
+ if opts.hvparams:
+ op.hvparams = opts.hvparams
+ if opts.beparams:
+ op.beparams = opts.beparams
jex.QueueJob(name, op)
jex.WaitOrShow(not opts.submit_only)
return 0
m_node_opt, m_pri_node_opt, m_sec_node_opt,
m_clust_opt, m_inst_opt,
SUBMIT_OPT,
+ keyval_option("-H", "--hypervisor", type="keyval",
+ default={}, dest="hvparams",
+ help="Temporary hypervisor parameters"),
+ keyval_option("-B", "--backend", type="keyval",
+ default={}, dest="beparams",
+ help="Temporary backend parameters"),
],
- "<instance>", "Starts an instance"),
+ "<instance>", "Starts an instance"),
'reboot': (RebootInstance, ARGS_ANY,
[DEBUG_OPT, m_force_multi,
headers = None
# change raw values to nicer strings
- for row in output:
+ for row_id, row in enumerate(output):
+ if row is None:
+ ToStderr("No such job: %s" % args[row_id])
+ continue
+
for idx, field in enumerate(selected_fields):
val = row[idx]
if field == "status":
--- /dev/null
+disk {
+ size 0s _is_default; # bytes
+ on-io-error detach;
+ fencing dont-care _is_default;
+ max-bio-bvecs 0 _is_default;
+}
+net {
+ timeout 60 _is_default; # 1/10 seconds
+ max-epoch-size 2048 _is_default;
+ max-buffers 2048 _is_default;
+ unplug-watermark 128 _is_default;
+ connect-int 10 _is_default; # seconds
+ ping-int 10 _is_default; # seconds
+ sndbuf-size 131070 _is_default; # bytes
+ ko-count 0 _is_default;
+ after-sb-0pri discard-zero-changes;
+ after-sb-1pri consensus;
+ after-sb-2pri disconnect _is_default;
+ rr-conflict disconnect _is_default;
+ ping-timeout 5 _is_default; # 1/10 seconds
+}
+syncer {
+ rate 61440k; # bytes/second
+ after -1 _is_default;
+ al-extents 257;
+}
+protocol C;
+_this_host {
+ device minor 0;
+ disk "/dev/xenvg/test.data";
+ meta-disk "/dev/xenvg/test.meta" [ 0 ];
+ address ipv4 192.168.1.1:11000;
+}
+_remote_host {
+ address ipv4 192.168.1.2:11000;
+}
"""Test drbdsetup show parser creation"""
bdev.DRBD8._GetShowParser()
- def testParserBoth(self):
+ def testParserBoth80(self):
"""Test drbdsetup show parser for disk and network"""
data = self._ReadTestData("bdev-both.txt")
result = bdev.DRBD8._GetDevInfo(data)
"Wrong local disk info")
self.failUnless(self._has_net(result, ("192.168.1.1", 11000),
("192.168.1.2", 11000)),
- "Wrong network info")
+ "Wrong network info (8.0.x)")
+
+ def testParserBoth83(self):
+ """Test drbdsetup show parser for disk and network"""
+ data = self._ReadTestData("bdev-8.3-both.txt")
+ result = bdev.DRBD8._GetDevInfo(data)
+ self.failUnless(self._has_disk(result, "/dev/xenvg/test.data",
+ "/dev/xenvg/test.meta"),
+ "Wrong local disk info")
+ self.failUnless(self._has_net(result, ("192.168.1.1", 11000),
+ ("192.168.1.2", 11000)),
+ "Wrong network info (8.2.x)")
def testParserNet(self):
"""Test drbdsetup show parser for disk and network"""
"""Read in txt data"""
testutils.GanetiTestCase.setUp(self)
proc_data = self._TestDataFilename("proc_drbd8.txt")
+ proc83_data = self._TestDataFilename("proc_drbd83.txt")
self.proc_data = bdev.DRBD8._GetProcData(filename=proc_data)
+ self.proc83_data = bdev.DRBD8._GetProcData(filename=proc83_data)
self.mass_data = bdev.DRBD8._MassageProcData(self.proc_data)
+ self.mass83_data = bdev.DRBD8._MassageProcData(self.proc83_data)
def testIOErrors(self):
"""Test handling of errors while reading the proc file."""
def testMinorNotFound(self):
"""Test not-found-minor in /proc"""
self.failUnless(9 not in self.mass_data)
+ self.failUnless(9 not in self.mass83_data)
def testLineNotMatch(self):
"""Test wrong line passed to DRBD8Status"""
def testMinor0(self):
"""Test connected, primary device"""
- stats = bdev.DRBD8Status(self.mass_data[0])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_connected and stats.is_primary and
- stats.peer_secondary and stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[0])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_connected and stats.is_primary and
+ stats.peer_secondary and stats.is_disk_uptodate)
def testMinor1(self):
"""Test connected, secondary device"""
- stats = bdev.DRBD8Status(self.mass_data[1])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_connected and stats.is_secondary and
- stats.peer_primary and stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[1])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_connected and stats.is_secondary and
+ stats.peer_primary and stats.is_disk_uptodate)
def testMinor2(self):
"""Test unconfigured device"""
- stats = bdev.DRBD8Status(self.mass_data[2])
- self.failIf(stats.is_in_use)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[2])
+ self.failIf(stats.is_in_use)
def testMinor4(self):
"""Test WFconn device"""
- stats = bdev.DRBD8Status(self.mass_data[4])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_wfconn and stats.is_primary and
- stats.rrole == 'Unknown' and
- stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[4])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_wfconn and stats.is_primary and
+ stats.rrole == 'Unknown' and
+ stats.is_disk_uptodate)
def testMinor6(self):
"""Test diskless device"""
- stats = bdev.DRBD8Status(self.mass_data[6])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_connected and stats.is_secondary and
- stats.peer_primary and stats.is_diskless)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[6])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_connected and stats.is_secondary and
+ stats.peer_primary and stats.is_diskless)
def testMinor8(self):
"""Test standalone device"""
- stats = bdev.DRBD8Status(self.mass_data[8])
- self.failUnless(stats.is_in_use)
- self.failUnless(stats.is_standalone and
- stats.rrole == 'Unknown' and
- stats.is_disk_uptodate)
+ for data in [self.mass_data, self.mass83_data]:
+ stats = bdev.DRBD8Status(data[8])
+ self.failUnless(stats.is_in_use)
+ self.failUnless(stats.is_standalone and
+ stats.rrole == 'Unknown' and
+ stats.is_disk_uptodate)
if __name__ == '__main__':
unittest.main()
import shutil
import re
import select
+import string
import ganeti
import testutils
ParseUnit, AddAuthorizedKey, RemoveAuthorizedKey, \
ShellQuote, ShellQuoteArgs, TcpPing, ListVisibleFiles, \
SetEtcHostsEntry, RemoveEtcHostsEntry, FirstFree, OwnIpAddress, \
- TailFile, ForceDictType
+ TailFile, ForceDictType, SafeEncode
from ganeti.errors import LockError, UnitParseError, GenericError, \
ProgrammerError
self.assertRaises(errors.TypeEnforcementError, self._fdt, {'d': '4 L'})
+class TestSafeEncode(unittest.TestCase):
+ """Test case for SafeEncode"""
+
+ def testAscii(self):
+ for txt in [string.digits, string.letters, string.punctuation]:
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+ def testDoubleEncode(self):
+ for i in range(255):
+ txt = SafeEncode(chr(i))
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+ def testUnicode(self):
+ # 1024 is high enough to catch non-direct ASCII mappings
+ for i in range(1024):
+ txt = SafeEncode(unichr(i))
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+
if __name__ == '__main__':
unittest.main()