#
#
-# Copyright (C) 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2008, 2009, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
import fcntl
import shutil
import socket
+import stat
import StringIO
+import fdsend
+from bitarray import bitarray
+try:
+ import affinity # pylint: disable=F0401
+except ImportError:
+ affinity = None
from ganeti import utils
from ganeti import constants
_KVM_NETWORK_SCRIPT = constants.SYSCONFDIR + "/ganeti/kvm-vif-bridge"
+_KVM_START_PAUSED_FLAG = "-S"
# TUN/TAP driver constants, taken from <linux/if_tun.h>
# They are architecture-independent and already hardcoded in qemu-kvm source,
IFF_NO_PI = 0x1000
IFF_VNET_HDR = 0x4000
+FREE = bitarray("0")
def _ProbeTapVnetHdr(fd):
"""Check whether to enable the IFF_VNET_HDR flag.
"""QEMU Messaging Protocol (QMP) message.
"""
-
def __init__(self, data):
"""Creates a new QMP message based on the passed data.
is not contained in the message
"""
-
- if field_name in self.data:
- return self.data[field_name]
-
- return None
+ return self.data.get(field_name, None)
def __setitem__(self, field_name, field_value):
"""Set the value of the required field_name to field_value.
return QmpMessage(data)
def __str__(self):
- # The protocol expects the JSON object to be sent as a single
- # line, hence the need for indent=False.
- return serializer.DumpJson(self.data, indent=False)
+ # The protocol expects the JSON object to be sent as a single line.
+ return serializer.DumpJson(self.data)
def __eq__(self, other):
# When comparing two QmpMessages, we are interested in comparing
_FIRST_MESSAGE_KEY = "QMP"
_EVENT_KEY = "event"
_ERROR_KEY = "error"
+ _RETURN_KEY = RETURN_KEY = "return"
+ _ACTUAL_KEY = ACTUAL_KEY = "actual"
_ERROR_CLASS_KEY = "class"
_ERROR_DATA_KEY = "data"
_ERROR_DESC_KEY = "desc"
self._connected = False
self._buf = ""
+ def _check_socket(self):
+ sock_stat = None
+ try:
+ sock_stat = os.stat(self.monitor_filename)
+ except EnvironmentError, err:
+ if err.errno == errno.ENOENT:
+ raise errors.HypervisorError("No qmp socket found")
+ else:
+ raise errors.HypervisorError("Error checking qmp socket: %s",
+ utils.ErrnoOrStr(err))
+ if not stat.S_ISSOCK(sock_stat.st_mode):
+ raise errors.HypervisorError("Qmp socket is not a socket")
+
def _check_connection(self):
"""Make sure that the connection is established.
@raise errors.ProgrammerError: when there are data serialization errors
"""
- self.sock.connect(self.monitor_filename)
+ if self._connected:
+ raise errors.ProgrammerError("Cannot connect twice")
+
+ self._check_socket()
+
+ # Check file existance/stuff
+ try:
+ self.sock.connect(self.monitor_filename)
+ except EnvironmentError:
+ raise errors.HypervisorError("Can't connect to qmp socket")
self._connected = True
# Check if we receive a correct greeting message from the server
class KVMHypervisor(hv_base.BaseHypervisor):
- """KVM hypervisor interface"""
+ """KVM hypervisor interface
+
+ """
CAN_MIGRATE = True
_ROOT_DIR = constants.RUN_GANETI_DIR + "/kvm-hypervisor"
# a separate directory, called 'chroot-quarantine'.
_CHROOT_QUARANTINE_DIR = _ROOT_DIR + "/chroot-quarantine"
_DIRS = [_ROOT_DIR, _PIDS_DIR, _UIDS_DIR, _CTRL_DIR, _CONF_DIR, _NICS_DIR,
- _CHROOT_DIR, _CHROOT_QUARANTINE_DIR]
+ _CHROOT_DIR, _CHROOT_QUARANTINE_DIR, _KEYMAP_DIR]
PARAMETERS = {
constants.HV_KERNEL_PATH: hv_base.OPT_FILE_CHECK,
constants.HT_KVM_SPICE_VALID_VIDEO_STREAM_DETECTION_OPTIONS),
constants.HV_KVM_SPICE_AUDIO_COMPR: hv_base.NO_CHECK,
constants.HV_KVM_SPICE_USE_TLS: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_TLS_CIPHERS: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_USE_VDAGENT: hv_base.NO_CHECK,
constants.HV_KVM_FLOPPY_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_CDROM_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_KVM_CDROM2_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_KVM_USE_CHROOT: hv_base.NO_CHECK,
constants.HV_MEM_PATH: hv_base.OPT_DIR_CHECK,
constants.HV_REBOOT_BEHAVIOR:
- hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS)
+ hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS),
+ constants.HV_CPU_MASK: hv_base.OPT_MULTI_CPU_MASK_CHECK,
}
_MIGRATION_STATUS_RE = re.compile("Migration\s+status:\s+(\w+)",
re.M | re.I)
+ _MIGRATION_PROGRESS_RE = \
+ re.compile(r"\s*transferred\s+ram:\s+(?P<transferred>\d+)\s+kbytes\s*\n"
+ r"\s*remaining\s+ram:\s+(?P<remaining>\d+)\s+kbytes\s*\n"
+ r"\s*total\s+ram:\s+(?P<total>\d+)\s+kbytes\s*\n", re.I)
+
_MIGRATION_INFO_MAX_BAD_ANSWERS = 5
_MIGRATION_INFO_RETRY_DELAY = 2
- _VERSION_RE = re.compile(r"\b(\d+)\.(\d+)\.(\d+)\b")
+ _VERSION_RE = re.compile(r"\b(\d+)\.(\d+)(\.(\d+))?\b")
+
+ _CPU_INFO_RE = re.compile(r"cpu\s+\#(\d+).*thread_id\s*=\s*(\d+)", re.I)
+ _CPU_INFO_CMD = "info cpus"
+ _CONT_CMD = "cont"
+
+ _INFO_PCI_RE = re.compile(r'Bus.*device[ ]*(\d+).*')
+ _INFO_PCI_CMD = "info pci"
+
ANCILLARY_FILES = [
_KVM_NETWORK_SCRIPT,
]
+ ANCILLARY_FILES_OPT = [
+ _KVM_NETWORK_SCRIPT,
+ ]
def __init__(self):
hv_base.BaseHypervisor.__init__(self)
@type tap: str
"""
-
if instance.tags:
tags = " ".join(instance.tags)
else:
" Network configuration script output: %s" %
(tap, result.fail_reason, result.output))
+ @staticmethod
+ def _VerifyAffinityPackage():
+ if affinity is None:
+ raise errors.HypervisorError("affinity Python package not"
+ " found; cannot use CPU pinning under KVM")
+
+ @staticmethod
+ def _BuildAffinityCpuMask(cpu_list):
+ """Create a CPU mask suitable for sched_setaffinity from a list of
+ CPUs.
+
+ See man taskset for more info on sched_setaffinity masks.
+ For example: [ 0, 2, 5, 6 ] will return 101 (0x65, 0..01100101).
+
+ @type cpu_list: list of int
+ @param cpu_list: list of physical CPU numbers to map to vCPUs in order
+ @rtype: int
+ @return: a bit mask of CPU affinities
+
+ """
+ if cpu_list == constants.CPU_PINNING_OFF:
+ return constants.CPU_PINNING_ALL_KVM
+ else:
+ return sum(2 ** cpu for cpu in cpu_list)
+
+ @classmethod
+ def _AssignCpuAffinity(cls, cpu_mask, process_id, thread_dict):
+ """Change CPU affinity for running VM according to given CPU mask.
+
+ @param cpu_mask: CPU mask as given by the user. e.g. "0-2,4:all:1,3"
+ @type cpu_mask: string
+ @param process_id: process ID of KVM process. Used to pin entire VM
+ to physical CPUs.
+ @type process_id: int
+ @param thread_dict: map of virtual CPUs to KVM thread IDs
+ @type thread_dict: dict int:int
+
+ """
+ # Convert the string CPU mask to a list of list of int's
+ cpu_list = utils.ParseMultiCpuMask(cpu_mask)
+
+ if len(cpu_list) == 1:
+ all_cpu_mapping = cpu_list[0]
+ if all_cpu_mapping == constants.CPU_PINNING_OFF:
+ # If CPU pinning has 1 entry that's "all", then do nothing
+ pass
+ else:
+ # If CPU pinning has one non-all entry, map the entire VM to
+ # one set of physical CPUs
+ cls._VerifyAffinityPackage()
+ affinity.set_process_affinity_mask(process_id,
+ cls._BuildAffinityCpuMask(all_cpu_mapping))
+ else:
+ # The number of vCPUs mapped should match the number of vCPUs
+ # reported by KVM. This was already verified earlier, so
+ # here only as a sanity check.
+ assert len(thread_dict) == len(cpu_list)
+ cls._VerifyAffinityPackage()
+
+ # For each vCPU, map it to the proper list of physical CPUs
+ for vcpu, i in zip(cpu_list, range(len(cpu_list))):
+ affinity.set_process_affinity_mask(thread_dict[i],
+ cls._BuildAffinityCpuMask(vcpu))
+
+ def _GetVcpuThreadIds(self, instance_name):
+ """Get a mapping of vCPU no. to thread IDs for the instance
+
+ @type instance_name: string
+ @param instance_name: instance in question
+ @rtype: dictionary of int:int
+ @return: a dictionary mapping vCPU numbers to thread IDs
+
+ """
+ result = {}
+ output = self._CallMonitorCommand(instance_name, self._CPU_INFO_CMD)
+ for line in output.stdout.splitlines():
+ match = self._CPU_INFO_RE.search(line)
+ if not match:
+ continue
+ grp = map(int, match.groups())
+ result[grp[0]] = grp[1]
+
+ return result
+
+ def _ExecuteCpuAffinity(self, instance_name, cpu_mask):
+ """Complete CPU pinning.
+
+ @type instance_name: string
+ @param instance_name: name of instance
+ @type cpu_mask: string
+ @param cpu_mask: CPU pinning mask as entered by user
+
+ """
+ # Get KVM process ID, to be used if need to pin entire VM
+ _, pid, _ = self._InstancePidAlive(instance_name)
+ # Get vCPU thread IDs, to be used if need to pin vCPUs separately
+ thread_dict = self._GetVcpuThreadIds(instance_name)
+ # Run CPU pinning, based on configured mask
+ self._AssignCpuAffinity(cpu_mask, pid, thread_dict)
+
def ListInstances(self):
"""Get the list of running instances.
return None
_, memory, vcpus = self._InstancePidInfo(pid)
- stat = "---b-"
+ istat = "---b-"
times = "0"
- return (instance_name, pid, memory, vcpus, stat, times)
+ try:
+ qmp = QmpConnection(self._InstanceQmpMonitor(instance_name))
+ qmp.connect()
+ vcpus = len(qmp.Execute("query-cpus")[qmp.RETURN_KEY])
+ # Will fail if ballooning is not enabled, but we can then just resort to
+ # the value above.
+ mem_bytes = qmp.Execute("query-balloon")[qmp.RETURN_KEY][qmp.ACTUAL_KEY]
+ memory = mem_bytes / 1048576
+ except errors.HypervisorError:
+ pass
+
+ return (instance_name, pid, memory, vcpus, istat, times)
def GetAllInstancesInfo(self):
"""Get properties of all instances.
try:
info = self.GetInstanceInfo(name)
except errors.HypervisorError:
+ # Ignore exceptions due to instances being shut down
continue
if info:
data.append(info)
return data
- def _GenerateKVMRuntime(self, instance, block_devices, startup_paused):
- """Generate KVM information to start an instance.
-
- """
- # pylint: disable=R0914,R0915
- _, v_major, v_min, _ = self._GetKVMVersion()
-
- pidfile = self._InstancePidFile(instance.name)
- kvm = constants.KVM_PATH
- kvm_cmd = [kvm]
- # used just by the vnc server, if enabled
- kvm_cmd.extend(["-name", instance.name])
- kvm_cmd.extend(["-m", instance.beparams[constants.BE_MEMORY]])
- kvm_cmd.extend(["-smp", instance.beparams[constants.BE_VCPUS]])
- kvm_cmd.extend(["-pidfile", pidfile])
- kvm_cmd.extend(["-daemonize"])
- if not instance.hvparams[constants.HV_ACPI]:
- kvm_cmd.extend(["-no-acpi"])
- if startup_paused:
- kvm_cmd.extend(["-S"])
- if instance.hvparams[constants.HV_REBOOT_BEHAVIOR] == \
- constants.INSTANCE_REBOOT_EXIT:
- kvm_cmd.extend(["-no-reboot"])
+ def _GenerateKVMBlockDevicesOptions(self, instance, kvm_cmd, block_devices):
hvp = instance.hvparams
boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
- boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM
- boot_floppy = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_FLOPPY
- boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
- self.ValidateParameters(hvp)
-
- if hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_ENABLED:
- kvm_cmd.extend(["-enable-kvm"])
- elif hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_DISABLED:
- kvm_cmd.extend(["-disable-kvm"])
+ _, v_major, v_min, _ = self._GetKVMVersion()
- if boot_network:
- kvm_cmd.extend(["-boot", "n"])
+ # whether this is an older KVM version that uses the boot=on flag
+ # on devices
+ needs_boot_flag = (v_major, v_min) < (0, 14)
disk_type = hvp[constants.HV_DISK_TYPE]
if disk_type == constants.HT_DISK_PARAVIRTUAL:
if_val = ",if=virtio"
+ if (v_major, v_min) >= (0, 12):
+ disk_model = "virtio-blk-pci"
+ else:
+ disk_model = "virtio"
else:
if_val = ",if=%s" % disk_type
+ disk_model = disk_type
# Cache mode
disk_cache = hvp[constants.HV_DISK_CACHE]
if instance.disk_template in constants.DTS_EXT_MIRROR:
if boot_disk:
kvm_cmd.extend(["-boot", "c"])
boot_disk = False
- if (v_major, v_min) < (0, 14) and disk_type != constants.HT_DISK_IDE:
+ if needs_boot_flag and disk_type != constants.HT_DISK_IDE:
boot_val = ",boot=on"
+ drive_val = "file=%s,format=raw%s%s" % \
+ (dev_path, boot_val, cache_val)
+ if cfdev.idx is not None:
+ #TODO: name id after model
+ drive_val += (",if=none,id=drive%d" % cfdev.idx)
+ if cfdev.pci is not None:
+ drive_val += (",bus=0,unit=%d" % cfdev.pci)
+ else:
+ drive_val += if_val
- drive_val = "file=%s,format=raw%s%s%s" % (dev_path, if_val, boot_val,
- cache_val)
kvm_cmd.extend(["-drive", drive_val])
+ if cfdev.idx is not None:
+ dev_val = ("%s,drive=drive%d,id=virtio-blk-pci.%d" %
+ (disk_model, cfdev.idx, cfdev.idx))
+ if cfdev.pci is not None:
+ dev_val += ",bus=pci.0,addr=%s" % hex(cfdev.pci)
+ kvm_cmd.extend(["-device", dev_val])
+
+ return kvm_cmd
+
+ def _GenerateKVMRuntime(self, instance, block_devices, startup_paused):
+ """Generate KVM information to start an instance.
+
+ @attention: this function must not have any side-effects; for
+ example, it must not write to the filesystem, or read values
+ from the current system the are expected to differ between
+ nodes, since it is only run once at instance startup;
+ actions/kvm arguments that can vary between systems should be
+ done in L{_ExecuteKVMRuntime}
+
+ """
+ # pylint: disable=R0914,R0915
+ _, v_major, v_min, _ = self._GetKVMVersion()
+
+ pidfile = self._InstancePidFile(instance.name)
+ kvm = constants.KVM_PATH
+ kvm_cmd = [kvm]
+ # used just by the vnc server, if enabled
+ kvm_cmd.extend(["-name", instance.name])
+ kvm_cmd.extend(["-m", instance.beparams[constants.BE_MAXMEM]])
+ kvm_cmd.extend(["-smp", instance.beparams[constants.BE_VCPUS]])
+ kvm_cmd.extend(["-pidfile", pidfile])
+ kvm_cmd.extend(["-balloon", "virtio"])
+ kvm_cmd.extend(["-daemonize"])
+ if not instance.hvparams[constants.HV_ACPI]:
+ kvm_cmd.extend(["-no-acpi"])
+ if instance.hvparams[constants.HV_REBOOT_BEHAVIOR] == \
+ constants.INSTANCE_REBOOT_EXIT:
+ kvm_cmd.extend(["-no-reboot"])
+
+ hvp = instance.hvparams
+ kernel_path = hvp[constants.HV_KERNEL_PATH]
+ if kernel_path:
+ boot_cdrom = boot_floppy = boot_network = False
+ else:
+ boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM
+ boot_floppy = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_FLOPPY
+ boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
+
+ self.ValidateParameters(hvp)
+
+ if startup_paused:
+ kvm_cmd.extend([_KVM_START_PAUSED_FLAG])
+
+ if hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_ENABLED:
+ kvm_cmd.extend(["-enable-kvm"])
+ elif hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_DISABLED:
+ kvm_cmd.extend(["-disable-kvm"])
+
+ if boot_network:
+ kvm_cmd.extend(["-boot", "n"])
+
+ # whether this is an older KVM version that uses the boot=on flag
+ # on devices
+ needs_boot_flag = (v_major, v_min) < (0, 14)
+
+ disk_type = hvp[constants.HV_DISK_TYPE]
+ if not instance.hotplug_info:
+ kvm_cmd = self._GenerateKVMBlockDevicesOptions(instance, kvm_cmd,
+ block_devices)
+
#Now we can specify a different device type for CDROM devices.
cdrom_disk_type = hvp[constants.HV_KVM_CDROM_DISK_TYPE]
if not cdrom_disk_type:
iso_image = hvp[constants.HV_CDROM_IMAGE_PATH]
if iso_image:
options = ",format=raw,media=cdrom"
+ # set cdrom 'if' type
if boot_cdrom:
- kvm_cmd.extend(["-boot", "d"])
- if cdrom_disk_type != constants.HT_DISK_IDE:
- options = "%s,boot=on,if=%s" % (options, constants.HT_DISK_IDE)
- else:
- options = "%s,boot=on" % options
+ actual_cdrom_type = constants.HT_DISK_IDE
+ elif cdrom_disk_type == constants.HT_DISK_PARAVIRTUAL:
+ actual_cdrom_type = "virtio"
else:
- if cdrom_disk_type == constants.HT_DISK_PARAVIRTUAL:
- if_val = ",if=virtio"
- else:
- if_val = ",if=%s" % cdrom_disk_type
- options = "%s%s" % (options, if_val)
- drive_val = "file=%s%s" % (iso_image, options)
+ actual_cdrom_type = cdrom_disk_type
+ if_val = ",if=%s" % actual_cdrom_type
+ # set boot flag, if needed
+ boot_val = ""
+ if boot_cdrom:
+ kvm_cmd.extend(["-boot", "d"])
+ if needs_boot_flag:
+ boot_val = ",boot=on"
+ # and finally build the entire '-drive' value
+ drive_val = "file=%s%s%s%s" % (iso_image, options, if_val, boot_val)
kvm_cmd.extend(["-drive", drive_val])
iso_image2 = hvp[constants.HV_KVM_CDROM2_IMAGE_PATH]
if_val = ",if=virtio"
else:
if_val = ",if=%s" % cdrom_disk_type
- options = "%s%s" % (options, if_val)
- drive_val = "file=%s%s" % (iso_image2, options)
+ drive_val = "file=%s%s%s" % (iso_image2, options, if_val)
kvm_cmd.extend(["-drive", drive_val])
floppy_image = hvp[constants.HV_KVM_FLOPPY_IMAGE_PATH]
drive_val = "file=%s%s" % (floppy_image, options)
kvm_cmd.extend(["-drive", drive_val])
- kernel_path = hvp[constants.HV_KERNEL_PATH]
if kernel_path:
kvm_cmd.extend(["-kernel", kernel_path])
initrd_path = hvp[constants.HV_INITRD_PATH]
if mem_path:
kvm_cmd.extend(["-mem-path", mem_path, "-mem-prealloc"])
+ monitor_dev = ("unix:%s,server,nowait" %
+ self._InstanceMonitor(instance.name))
+ kvm_cmd.extend(["-monitor", monitor_dev])
+ if hvp[constants.HV_SERIAL_CONSOLE]:
+ serial_dev = ("unix:%s,server,nowait" %
+ self._InstanceSerial(instance.name))
+ kvm_cmd.extend(["-serial", serial_dev])
+ else:
+ kvm_cmd.extend(["-serial", "none"])
+
mouse_type = hvp[constants.HV_USB_MOUSE]
vnc_bind_address = hvp[constants.HV_VNC_BIND_ADDRESS]
+ spice_bind = hvp[constants.HV_KVM_SPICE_BIND]
+ spice_ip_version = None
if mouse_type:
kvm_cmd.extend(["-usb"])
elif vnc_bind_address:
kvm_cmd.extend(["-usbdevice", constants.HT_MOUSE_TABLET])
- keymap = hvp[constants.HV_KEYMAP]
- if keymap:
- keymap_path = self._InstanceKeymapFile(instance.name)
- # If a keymap file is specified, KVM won't use its internal defaults. By
- # first including the "en-us" layout, an error on loading the actual
- # layout (e.g. because it can't be found) won't lead to a non-functional
- # keyboard. A keyboard with incorrect keys is still better than none.
- utils.WriteFile(keymap_path, data="include en-us\ninclude %s\n" % keymap)
- kvm_cmd.extend(["-k", keymap_path])
-
if vnc_bind_address:
if netutils.IP4Address.IsValid(vnc_bind_address):
if instance.network_port > constants.VNC_BASE_PORT:
vnc_arg = "unix:%s/%s.vnc" % (vnc_bind_address, instance.name)
kvm_cmd.extend(["-vnc", vnc_arg])
- else:
- kvm_cmd.extend(["-nographic"])
-
- monitor_dev = ("unix:%s,server,nowait" %
- self._InstanceMonitor(instance.name))
- kvm_cmd.extend(["-monitor", monitor_dev])
- if hvp[constants.HV_SERIAL_CONSOLE]:
- serial_dev = ("unix:%s,server,nowait" %
- self._InstanceSerial(instance.name))
- kvm_cmd.extend(["-serial", serial_dev])
- else:
- kvm_cmd.extend(["-serial", "none"])
-
- spice_bind = hvp[constants.HV_KVM_SPICE_BIND]
- spice_ip_version = None
- if spice_bind:
+ elif spice_bind:
+ # FIXME: this is wrong here; the iface ip address differs
+ # between systems, so it should be done in _ExecuteKVMRuntime
if netutils.IsValidInterface(spice_bind):
# The user specified a network interface, we have to figure out the IP
# address.
# we have both ipv4 and ipv6, let's use the cluster default IP
# version
cluster_family = ssconf.SimpleStore().GetPrimaryIPFamily()
- spice_ip_version = netutils.IPAddress.GetVersionFromAddressFamily(
- cluster_family)
+ spice_ip_version = \
+ netutils.IPAddress.GetVersionFromAddressFamily(cluster_family)
elif addresses[constants.IP4_VERSION]:
spice_ip_version = constants.IP4_VERSION
elif addresses[constants.IP6_VERSION]:
instance.network_port, constants.SPICE_CACERT_FILE)
spice_arg = "%s,x509-key-file=%s,x509-cert-file=%s" % (spice_arg,
constants.SPICE_CERT_FILE, constants.SPICE_CERT_FILE)
+ tls_ciphers = hvp[constants.HV_KVM_SPICE_TLS_CIPHERS]
+ if tls_ciphers:
+ spice_arg = "%s,tls-ciphers=%s" % (spice_arg, tls_ciphers)
else:
spice_arg = "%s,port=%s" % (spice_arg, instance.network_port)
# Audio compression, by default in qemu-kvm it is on
if not hvp[constants.HV_KVM_SPICE_AUDIO_COMPR]:
spice_arg = "%s,playback-compression=off" % spice_arg
+ if not hvp[constants.HV_KVM_SPICE_USE_VDAGENT]:
+ spice_arg = "%s,agent-mouse=off" % spice_arg
+ else:
+ # Enable the spice agent communication channel between the host and the
+ # agent.
+ kvm_cmd.extend(["-device", "virtio-serial-pci"])
+ kvm_cmd.extend(["-device", "virtserialport,chardev=spicechannel0,"
+ "name=com.redhat.spice.0"])
+ kvm_cmd.extend(["-chardev", "spicevmc,id=spicechannel0,name=vdagent"])
logging.info("KVM: SPICE will listen on port %s", instance.network_port)
kvm_cmd.extend(["-spice", spice_arg])
# Tell kvm to use the paravirtualized graphic card, optimized for SPICE
kvm_cmd.extend(["-vga", "qxl"])
+ else:
+ kvm_cmd.extend(["-nographic"])
+
if hvp[constants.HV_USE_LOCALTIME]:
kvm_cmd.extend(["-localtime"])
kvm_nics = instance.nics
hvparams = hvp
- return (kvm_cmd, kvm_nics, hvparams)
+ if instance.hotplug_info:
+ return (kvm_cmd, kvm_nics, hvparams, block_devices)
+ else:
+ return (kvm_cmd, kvm_nics, hvparams)
def _WriteKVMRuntime(self, instance_name, data):
"""Write an instance's KVM runtime
"""Save an instance's KVM runtime
"""
- kvm_cmd, kvm_nics, hvparams = kvm_runtime
+ if instance.hotplug_info:
+ kvm_cmd, kvm_nics, hvparams, block_devices = kvm_runtime
+ serialized_blockdevs = [(blk.ToDict(), link)
+ for blk,link in block_devices]
+ else:
+ kvm_cmd, kvm_nics, hvparams = kvm_runtime
+
serialized_nics = [nic.ToDict() for nic in kvm_nics]
- serialized_form = serializer.Dump((kvm_cmd, serialized_nics, hvparams))
+
+ if instance.hotplug_info:
+ serialized_form = serializer.Dump((kvm_cmd, serialized_nics,
+ hvparams, serialized_blockdevs))
+ else:
+ serialized_form = serializer.Dump((kvm_cmd, serialized_nics, hvparams))
+
self._WriteKVMRuntime(instance.name, serialized_form)
def _LoadKVMRuntime(self, instance, serialized_runtime=None):
if not serialized_runtime:
serialized_runtime = self._ReadKVMRuntime(instance.name)
loaded_runtime = serializer.Load(serialized_runtime)
- kvm_cmd, serialized_nics, hvparams = loaded_runtime
+ if instance.hotplug_info:
+ kvm_cmd, serialized_nics, hvparams, serialized_blockdevs = loaded_runtime
+ block_devices = [(objects.Disk.FromDict(sdisk), link)
+ for sdisk, link in serialized_blockdevs]
+ else:
+ kvm_cmd, serialized_nics, hvparams = loaded_runtime
+
kvm_nics = [objects.NIC.FromDict(snic) for snic in serialized_nics]
- return (kvm_cmd, kvm_nics, hvparams)
+
+ if instance.hotplug_info:
+ return (kvm_cmd, kvm_nics, hvparams, block_devices)
+ else:
+ return (kvm_cmd, kvm_nics, hvparams)
def _RunKVMCmd(self, name, kvm_cmd, tap_fds=None):
"""Run the KVM cmd and check for errors
raise errors.HypervisorError("Failed to start instance %s" % name)
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None):
- """Execute a KVM cmd, after completing it with some last minute data
+ """Execute a KVM cmd, after completing it with some last minute data.
@type incoming: tuple of strings
@param incoming: (target_host_ip, port)
temp_files = []
- kvm_cmd, kvm_nics, up_hvp = kvm_runtime
+ if instance.hotplug_info:
+ kvm_cmd, kvm_nics, up_hvp, block_devices = kvm_runtime
+ else:
+ kvm_cmd, kvm_nics, up_hvp = kvm_runtime
+
up_hvp = objects.FillDict(conf_hvp, up_hvp)
_, v_major, v_min, _ = self._GetKVMVersion()
if security_model == constants.HT_SM_USER:
kvm_cmd.extend(["-runas", conf_hvp[constants.HV_SECURITY_DOMAIN]])
+ keymap = conf_hvp[constants.HV_KEYMAP]
+ if keymap:
+ keymap_path = self._InstanceKeymapFile(name)
+ # If a keymap file is specified, KVM won't use its internal defaults. By
+ # first including the "en-us" layout, an error on loading the actual
+ # layout (e.g. because it can't be found) won't lead to a non-functional
+ # keyboard. A keyboard with incorrect keys is still better than none.
+ utils.WriteFile(keymap_path, data="include en-us\ninclude %s\n" % keymap)
+ kvm_cmd.extend(["-k", keymap_path])
+
+ if instance.hotplug_info:
+ kvm_cmd = self._GenerateKVMBlockDevicesOptions(instance, kvm_cmd,
+ block_devices)
+
# We have reasons to believe changing something like the nic driver/type
# upon migration won't exactly fly with the instance kernel, so for nic
# related parameters we'll use up_hvp
tapfds.append(tapfd)
taps.append(tapname)
if (v_major, v_min) >= (0, 12):
- nic_val = "%s,mac=%s,netdev=netdev%s" % (nic_model, nic.mac, nic_seq)
- tap_val = "type=tap,id=netdev%s,fd=%d%s" % (nic_seq, tapfd, tap_extra)
+ nic_val = "%s,mac=%s" % (nic_model, nic.mac)
+ if nic.idx:
+ nic_val += (",netdev=netdev%d,id=virtio-net-pci.%d" %
+ (nic.idx, nic.idx))
+ if nic.pci is not None:
+ nic_val += (",bus=pci.0,addr=%s" % hex(nic.pci))
+ else:
+ nic_val += (",netdev=netdev%d,id=virtio-net-pci.%d" %
+ (nic_seq, nic_seq))
+ tap_val = ("type=tap,id=netdev%d,fd=%d%s" %
+ (nic.idx or nic_seq, tapfd, tap_extra))
kvm_cmd.extend(["-netdev", tap_val, "-device", nic_val])
else:
nic_val = "nic,vlan=%s,macaddr=%s,model=%s" % (nic_seq,
continue
self._ConfigureNIC(instance, nic_seq, nic, taps[nic_seq])
+ # CPU affinity requires kvm to start paused, so we set this flag if the
+ # instance is not already paused and if we are not going to accept a
+ # migrating instance. In the latter case, pausing is not needed.
+ start_kvm_paused = not (_KVM_START_PAUSED_FLAG in kvm_cmd) and not incoming
+ if start_kvm_paused:
+ kvm_cmd.extend([_KVM_START_PAUSED_FLAG])
+
+ # Note: CPU pinning is using up_hvp since changes take effect
+ # during instance startup anyway, and to avoid problems when soft
+ # rebooting the instance.
+ cpu_pinning = False
+ if up_hvp.get(constants.HV_CPU_MASK, None):
+ cpu_pinning = True
+
if security_model == constants.HT_SM_POOL:
ss = ssconf.SimpleStore()
uid_pool = uidpool.ParseUidPool(ss.GetUidPool(), separator="\n")
# for connection.
spice_password_file = conf_hvp[constants.HV_KVM_SPICE_PASSWORD_FILE]
if spice_password_file:
+ spice_pwd = ""
try:
spice_pwd = utils.ReadOneLineFile(spice_password_file, strict=True)
- qmp = QmpConnection(self._InstanceQmpMonitor(instance.name))
- qmp.connect()
- arguments = {
- "protocol": "spice",
- "password": spice_pwd,
- }
- qmp.Execute("set_password", arguments)
except EnvironmentError, err:
raise errors.HypervisorError("Failed to open SPICE password file %s: %s"
% (spice_password_file, err))
+ qmp = QmpConnection(self._InstanceQmpMonitor(instance.name))
+ qmp.connect()
+ arguments = {
+ "protocol": "spice",
+ "password": spice_pwd,
+ }
+ qmp.Execute("set_password", arguments)
+
for filename in temp_files:
utils.RemoveFile(filename)
+ # If requested, set CPU affinity and resume instance execution
+ if cpu_pinning:
+ self._ExecuteCpuAffinity(instance.name, up_hvp[constants.HV_CPU_MASK])
+
+ start_memory = self._InstanceStartupMemory(instance)
+ if start_memory < instance.beparams[constants.BE_MAXMEM]:
+ self.BalloonInstanceMemory(instance, start_memory)
+
+ if start_kvm_paused:
+ # To control CPU pinning, ballooning, and vnc/spice passwords
+ # the VM was started in a frozen state. If freezing was not
+ # explicitly requested resume the vm status.
+ self._CallMonitorCommand(instance.name, self._CONT_CMD)
+
def StartInstance(self, instance, block_devices, startup_paused):
"""Start an instance.
return result
+ def _FindFreePCISlot(self, instance_name):
+ slots = bitarray(32)
+ slots.setall(False)
+ output = self._CallMonitorCommand(instance_name, self._INFO_PCI_CMD)
+ for line in output.stdout.splitlines():
+ match = self._INFO_PCI_RE.search(line)
+ if match:
+ slot = int(match.group(1))
+ slots[slot] = True
+
+ free = slots.search(FREE, 1)
+ if not free:
+ raise errors.HypervisorError("All PCI slots occupied")
+
+ return int(free[0])
+
+ def _HotplugEnabled(self, instance_name):
+ if not self._InstancePidAlive(instance_name)[2]:
+ logging.info("Cannot hotplug. Instance %s not alive", instance_name)
+ return False
+
+ _, v_major, v_min, _ = self._GetKVMVersion()
+ return (v_major, v_min) >= (1, 0)
+
+ def HotAddDisk(self, instance, disk, dev_path, _):
+ """Hotadd new disk to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ disk.pci = self._FindFreePCISlot(instance.name)
+ idx = disk.idx
+ command = ("drive_add dummy file=%s,if=none,id=drive%d,format=raw" %
+ (dev_path, idx))
+
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+
+ command = ("device_add virtio-blk-pci,bus=pci.0,addr=%s,"
+ "drive=drive%d,id=virtio-blk-pci.%d"
+ % (hex(disk.pci), idx, idx))
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ block_devices.append((disk, dev_path))
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+ return disk.pci
+
+ def HotDelDisk(self, instance, disk, _):
+ """Hotdel disk to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ idx = disk.idx
+
+ command = "device_del virtio-blk-pci.%d" % idx
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ command = "drive_del drive%d" % idx
+ logging.info("Run cmd %s", command)
+ #output = self._CallMonitorCommand(instance.name, command)
+ #for line in output.stdout.splitlines():
+ # logging.info("%s" % line)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ rem = [(d, p) for d, p in block_devices
+ if d.idx is not None and d.idx == idx]
+ try:
+ block_devices.remove(rem[0])
+ except (ValueError, IndexError):
+ logging.info("Disk with %d idx disappeared from runtime file", idx)
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+ def HotAddNic(self, instance, nic, seq):
+ """Hotadd new nic to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ nic.pci = self._FindFreePCISlot(instance.name)
+ mac = nic.mac
+ idx = nic.idx
+
+ (tap, fd) = _OpenTap()
+ logging.info("%s %d", tap, fd)
+
+ self._PassTapFd(instance, fd, nic)
+
+ command = ("netdev_add tap,id=netdev%d,fd=netdev%d"
+ % (idx, idx))
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ command = ("device_add virtio-net-pci,bus=pci.0,addr=%s,mac=%s,"
+ "netdev=netdev%d,id=virtio-net-pci.%d"
+ % (hex(nic.pci), mac, idx, idx))
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ self._ConfigureNIC(instance, seq, nic, tap)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ kvm_nics.append(nic)
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+ return nic.pci
+
+ def HotDelNic(self, instance, nic, _):
+ """Hotadd new nic to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ idx = nic.idx
+
+ command = "device_del virtio-net-pci.%d" % idx
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ command = "netdev_del netdev%d" % idx
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ rem = [n for n in kvm_nics if n.idx is not None and n.idx == nic.idx]
+ try:
+ kvm_nics.remove(rem[0])
+ except (ValueError, IndexError):
+ logging.info("NIC with %d idx disappeared from runtime file", nic.idx)
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+
+ def _PassTapFd(self, instance, fd, nic):
+ monsock = utils.ShellQuote(self._InstanceMonitor(instance.name))
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.connect(monsock)
+ idx = nic.idx
+ command = "getfd netdev%d\n" % idx
+ fds = [fd]
+ logging.info("%s", fds)
+ fdsend.sendfds(s, command, fds = fds)
+ s.close()
+
+ @classmethod
+ def _ParseKVMVersion(cls, text):
+ """Parse the KVM version from the --help output.
+
+ @type text: string
+ @param text: output of kvm --help
+ @return: (version, v_maj, v_min, v_rev)
+ @raise errors.HypervisorError: when the KVM version cannot be retrieved
+
+ """
+ match = cls._VERSION_RE.search(text.splitlines()[0])
+ if not match:
+ raise errors.HypervisorError("Unable to get KVM version")
+
+ v_all = match.group(0)
+ v_maj = int(match.group(1))
+ v_min = int(match.group(2))
+ if match.group(4):
+ v_rev = int(match.group(4))
+ else:
+ v_rev = 0
+ return (v_all, v_maj, v_min, v_rev)
+
@classmethod
def _GetKVMVersion(cls):
"""Return the installed KVM version.
@return: (version, v_maj, v_min, v_rev)
- @raise L{errors.HypervisorError}: when the KVM version cannot be retrieved
+ @raise errors.HypervisorError: when the KVM version cannot be retrieved
"""
result = utils.RunCmd([constants.KVM_PATH, "--help"])
if result.failed:
raise errors.HypervisorError("Unable to get KVM version")
- match = cls._VERSION_RE.search(result.output.splitlines()[0])
- if not match:
- raise errors.HypervisorError("Unable to get KVM version")
-
- return (match.group(0), int(match.group(1)), int(match.group(2)),
- int(match.group(3)))
+ return cls._ParseKVMVersion(result.output)
def StopInstance(self, instance, force=False, retry=False, name=None):
"""Stop an instance.
incoming_address = (target, instance.hvparams[constants.HV_MIGRATION_PORT])
self._ExecuteKVMRuntime(instance, kvm_runtime, incoming=incoming_address)
- def FinalizeMigration(self, instance, info, success):
- """Finalize an instance migration.
+ def FinalizeMigrationDst(self, instance, info, success):
+ """Finalize the instance migration on the target node.
Stop the incoming mode KVM.
"""
instance_name = instance.name
port = instance.hvparams[constants.HV_MIGRATION_PORT]
- pidfile, pid, alive = self._InstancePidAlive(instance_name)
+ _, _, alive = self._InstancePidAlive(instance_name)
if not alive:
raise errors.HypervisorError("Instance not running, cannot migrate")
migrate_command = "migrate -d tcp:%s:%s" % (target, port)
self._CallMonitorCommand(instance_name, migrate_command)
+ def FinalizeMigrationSource(self, instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+
+ """
+ if success:
+ pidfile, pid, _ = self._InstancePidAlive(instance.name)
+ utils.KillProcess(pid)
+ self._RemoveInstanceRuntimeFiles(pidfile, instance.name)
+ elif live:
+ self._CallMonitorCommand(instance.name, self._CONT_CMD)
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
info_command = "info migrate"
- done = False
- broken_answers = 0
- while not done:
- result = self._CallMonitorCommand(instance_name, info_command)
+ for _ in range(self._MIGRATION_INFO_MAX_BAD_ANSWERS):
+ result = self._CallMonitorCommand(instance.name, info_command)
match = self._MIGRATION_STATUS_RE.search(result.stdout)
if not match:
- broken_answers += 1
if not result.stdout:
logging.info("KVM: empty 'info migrate' result")
else:
logging.warning("KVM: unknown 'info migrate' result: %s",
result.stdout)
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
else:
status = match.group(1)
- if status == "completed":
- done = True
- elif status == "active":
- # reset the broken answers count
- broken_answers = 0
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
- elif status == "failed" or status == "cancelled":
- if not live:
- self._CallMonitorCommand(instance_name, 'cont')
- raise errors.HypervisorError("Migration %s at the kvm level" %
- status)
- else:
- logging.warning("KVM: unknown migration status '%s'", status)
- broken_answers += 1
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
- if broken_answers >= self._MIGRATION_INFO_MAX_BAD_ANSWERS:
- raise errors.HypervisorError("Too many 'info migrate' broken answers")
+ if status in constants.HV_KVM_MIGRATION_VALID_STATUSES:
+ migration_status = objects.MigrationStatus(status=status)
+ match = self._MIGRATION_PROGRESS_RE.search(result.stdout)
+ if match:
+ migration_status.transferred_ram = match.group("transferred")
+ migration_status.total_ram = match.group("total")
- utils.KillProcess(pid)
- self._RemoveInstanceRuntimeFiles(pidfile, instance_name)
+ return migration_status
+
+ logging.warning("KVM: unknown migration status '%s'", status)
+
+ time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
+
+ return objects.MigrationStatus(status=constants.HV_MIGRATION_FAILED,
+ info="Too many 'info migrate' broken answers")
+
+ def BalloonInstanceMemory(self, instance, mem):
+ """Balloon an instance memory to a certain value.
+
+ @type instance: L{objects.Instance}
+ @param instance: instance to be accepted
+ @type mem: int
+ @param mem: actual memory size to use for instance runtime
+
+ """
+ self._CallMonitorCommand(instance.name, "balloon %d" % mem)
def GetNodeInfo(self):
"""Return information about the node.