import struct
import fcntl
import shutil
+import socket
+import stat
+import StringIO
+import fdsend
+from bitarray import bitarray
+try:
+ import affinity # pylint: disable=F0401
+except ImportError:
+ affinity = None
from ganeti import utils
from ganeti import constants
_KVM_NETWORK_SCRIPT = constants.SYSCONFDIR + "/ganeti/kvm-vif-bridge"
+_KVM_START_PAUSED_FLAG = "-S"
# TUN/TAP driver constants, taken from <linux/if_tun.h>
# They are architecture-independent and already hardcoded in qemu-kvm source,
IFF_NO_PI = 0x1000
IFF_VNET_HDR = 0x4000
+FREE = bitarray("0")
def _ProbeTapVnetHdr(fd):
"""Check whether to enable the IFF_VNET_HDR flag.
return (ifname, tapfd)
+class QmpMessage:
+ """QEMU Messaging Protocol (QMP) message.
+
+ """
+ def __init__(self, data):
+ """Creates a new QMP message based on the passed data.
+
+ """
+ if not isinstance(data, dict):
+ raise TypeError("QmpMessage must be initialized with a dict")
+
+ self.data = data
+
+ def __getitem__(self, field_name):
+ """Get the value of the required field if present, or None.
+
+ Overrides the [] operator to provide access to the message data,
+ returning None if the required item is not in the message
+ @return: the value of the field_name field, or None if field_name
+ is not contained in the message
+
+ """
+ return self.data.get(field_name, None)
+
+ def __setitem__(self, field_name, field_value):
+ """Set the value of the required field_name to field_value.
+
+ """
+ self.data[field_name] = field_value
+
+ @staticmethod
+ def BuildFromJsonString(json_string):
+ """Build a QmpMessage from a JSON encoded string.
+
+ @type json_string: str
+ @param json_string: JSON string representing the message
+ @rtype: L{QmpMessage}
+ @return: a L{QmpMessage} built from json_string
+
+ """
+ # Parse the string
+ data = serializer.LoadJson(json_string)
+ return QmpMessage(data)
+
+ def __str__(self):
+ # The protocol expects the JSON object to be sent as a single line.
+ return serializer.DumpJson(self.data)
+
+ def __eq__(self, other):
+ # When comparing two QmpMessages, we are interested in comparing
+ # their internal representation of the message data
+ return self.data == other.data
+
+
+class QmpConnection:
+ """Connection to the QEMU Monitor using the QEMU Monitor Protocol (QMP).
+
+ """
+ _FIRST_MESSAGE_KEY = "QMP"
+ _EVENT_KEY = "event"
+ _ERROR_KEY = "error"
+ _RETURN_KEY = RETURN_KEY = "return"
+ _ACTUAL_KEY = ACTUAL_KEY = "actual"
+ _ERROR_CLASS_KEY = "class"
+ _ERROR_DATA_KEY = "data"
+ _ERROR_DESC_KEY = "desc"
+ _EXECUTE_KEY = "execute"
+ _ARGUMENTS_KEY = "arguments"
+ _CAPABILITIES_COMMAND = "qmp_capabilities"
+ _MESSAGE_END_TOKEN = "\r\n"
+ _SOCKET_TIMEOUT = 5
+
+ def __init__(self, monitor_filename):
+ """Instantiates the QmpConnection object.
+
+ @type monitor_filename: string
+ @param monitor_filename: the filename of the UNIX raw socket on which the
+ QMP monitor is listening
+
+ """
+ self.monitor_filename = monitor_filename
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ # We want to fail if the server doesn't send a complete message
+ # in a reasonable amount of time
+ self.sock.settimeout(self._SOCKET_TIMEOUT)
+ self._connected = False
+ self._buf = ""
+
+ def _check_socket(self):
+ sock_stat = None
+ try:
+ sock_stat = os.stat(self.monitor_filename)
+ except EnvironmentError, err:
+ if err.errno == errno.ENOENT:
+ raise errors.HypervisorError("No qmp socket found")
+ else:
+ raise errors.HypervisorError("Error checking qmp socket: %s",
+ utils.ErrnoOrStr(err))
+ if not stat.S_ISSOCK(sock_stat.st_mode):
+ raise errors.HypervisorError("Qmp socket is not a socket")
+
+ def _check_connection(self):
+ """Make sure that the connection is established.
+
+ """
+ if not self._connected:
+ raise errors.ProgrammerError("To use a QmpConnection you need to first"
+ " invoke connect() on it")
+
+ def connect(self):
+ """Connects to the QMP monitor.
+
+ Connects to the UNIX socket and makes sure that we can actually send and
+ receive data to the kvm instance via QMP.
+
+ @raise errors.HypervisorError: when there are communication errors
+ @raise errors.ProgrammerError: when there are data serialization errors
+
+ """
+ if self._connected:
+ raise errors.ProgrammerError("Cannot connect twice")
+
+ self._check_socket()
+
+ # Check file existance/stuff
+ try:
+ self.sock.connect(self.monitor_filename)
+ except EnvironmentError:
+ raise errors.HypervisorError("Can't connect to qmp socket")
+ self._connected = True
+
+ # Check if we receive a correct greeting message from the server
+ # (As per the QEMU Protocol Specification 0.1 - section 2.2)
+ greeting = self._Recv()
+ if not greeting[self._FIRST_MESSAGE_KEY]:
+ self._connected = False
+ raise errors.HypervisorError("kvm: qmp communication error (wrong"
+ " server greeting")
+
+ # Let's put the monitor in command mode using the qmp_capabilities
+ # command, or else no command will be executable.
+ # (As per the QEMU Protocol Specification 0.1 - section 4)
+ self.Execute(self._CAPABILITIES_COMMAND)
+
+ def _ParseMessage(self, buf):
+ """Extract and parse a QMP message from the given buffer.
+
+ Seeks for a QMP message in the given buf. If found, it parses it and
+ returns it together with the rest of the characters in the buf.
+ If no message is found, returns None and the whole buffer.
+
+ @raise errors.ProgrammerError: when there are data serialization errors
+
+ """
+ message = None
+ # Check if we got the message end token (CRLF, as per the QEMU Protocol
+ # Specification 0.1 - Section 2.1.1)
+ pos = buf.find(self._MESSAGE_END_TOKEN)
+ if pos >= 0:
+ try:
+ message = QmpMessage.BuildFromJsonString(buf[:pos + 1])
+ except Exception, err:
+ raise errors.ProgrammerError("QMP data serialization error: %s" % err)
+ buf = buf[pos + 1:]
+
+ return (message, buf)
+
+ def _Recv(self):
+ """Receives a message from QMP and decodes the received JSON object.
+
+ @rtype: QmpMessage
+ @return: the received message
+ @raise errors.HypervisorError: when there are communication errors
+ @raise errors.ProgrammerError: when there are data serialization errors
+
+ """
+ self._check_connection()
+
+ # Check if there is already a message in the buffer
+ (message, self._buf) = self._ParseMessage(self._buf)
+ if message:
+ return message
+
+ recv_buffer = StringIO.StringIO(self._buf)
+ recv_buffer.seek(len(self._buf))
+ try:
+ while True:
+ data = self.sock.recv(4096)
+ if not data:
+ break
+ recv_buffer.write(data)
+
+ (message, self._buf) = self._ParseMessage(recv_buffer.getvalue())
+ if message:
+ return message
+
+ except socket.timeout, err:
+ raise errors.HypervisorError("Timeout while receiving a QMP message: "
+ "%s" % (err))
+ except socket.error, err:
+ raise errors.HypervisorError("Unable to receive data from KVM using the"
+ " QMP protocol: %s" % err)
+
+ def _Send(self, message):
+ """Encodes and sends a message to KVM using QMP.
+
+ @type message: QmpMessage
+ @param message: message to send to KVM
+ @raise errors.HypervisorError: when there are communication errors
+ @raise errors.ProgrammerError: when there are data serialization errors
+
+ """
+ self._check_connection()
+ try:
+ message_str = str(message)
+ except Exception, err:
+ raise errors.ProgrammerError("QMP data deserialization error: %s" % err)
+
+ try:
+ self.sock.sendall(message_str)
+ except socket.timeout, err:
+ raise errors.HypervisorError("Timeout while sending a QMP message: "
+ "%s (%s)" % (err.string, err.errno))
+ except socket.error, err:
+ raise errors.HypervisorError("Unable to send data from KVM using the"
+ " QMP protocol: %s" % err)
+
+ def Execute(self, command, arguments=None):
+ """Executes a QMP command and returns the response of the server.
+
+ @type command: str
+ @param command: the command to execute
+ @type arguments: dict
+ @param arguments: dictionary of arguments to be passed to the command
+ @rtype: dict
+ @return: dictionary representing the received JSON object
+ @raise errors.HypervisorError: when there are communication errors
+ @raise errors.ProgrammerError: when there are data serialization errors
+
+ """
+ self._check_connection()
+ message = QmpMessage({self._EXECUTE_KEY: command})
+ if arguments:
+ message[self._ARGUMENTS_KEY] = arguments
+ self._Send(message)
+
+ # Events can occur between the sending of the command and the reception
+ # of the response, so we need to filter out messages with the event key.
+ while True:
+ response = self._Recv()
+ err = response[self._ERROR_KEY]
+ if err:
+ raise errors.HypervisorError("kvm: error executing the %s"
+ " command: %s (%s, %s):" %
+ (command,
+ err[self._ERROR_DESC_KEY],
+ err[self._ERROR_CLASS_KEY],
+ err[self._ERROR_DATA_KEY]))
+
+ elif not response[self._EVENT_KEY]:
+ return response
+
+
class KVMHypervisor(hv_base.BaseHypervisor):
- """KVM hypervisor interface"""
+ """KVM hypervisor interface
+
+ """
CAN_MIGRATE = True
_ROOT_DIR = constants.RUN_GANETI_DIR + "/kvm-hypervisor"
x in constants.VALID_IP_VERSIONS),
"the SPICE IP version should be 4 or 6",
None, None),
+ constants.HV_KVM_SPICE_PASSWORD_FILE: hv_base.OPT_FILE_CHECK,
+ constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR:
+ hv_base.ParamInSet(False,
+ constants.HT_KVM_SPICE_VALID_LOSSLESS_IMG_COMPR_OPTIONS),
+ constants.HV_KVM_SPICE_JPEG_IMG_COMPR:
+ hv_base.ParamInSet(False,
+ constants.HT_KVM_SPICE_VALID_LOSSY_IMG_COMPR_OPTIONS),
+ constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR:
+ hv_base.ParamInSet(False,
+ constants.HT_KVM_SPICE_VALID_LOSSY_IMG_COMPR_OPTIONS),
+ constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION:
+ hv_base.ParamInSet(False,
+ constants.HT_KVM_SPICE_VALID_VIDEO_STREAM_DETECTION_OPTIONS),
+ constants.HV_KVM_SPICE_AUDIO_COMPR: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_USE_TLS: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_TLS_CIPHERS: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_USE_VDAGENT: hv_base.NO_CHECK,
constants.HV_KVM_FLOPPY_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_CDROM_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_KVM_CDROM2_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_KVM_USE_CHROOT: hv_base.NO_CHECK,
constants.HV_MEM_PATH: hv_base.OPT_DIR_CHECK,
constants.HV_REBOOT_BEHAVIOR:
- hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS)
+ hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS),
+ constants.HV_CPU_MASK: hv_base.OPT_MULTI_CPU_MASK_CHECK,
}
_MIGRATION_STATUS_RE = re.compile("Migration\s+status:\s+(\w+)",
re.M | re.I)
+ _MIGRATION_PROGRESS_RE = \
+ re.compile(r"\s*transferred\s+ram:\s+(?P<transferred>\d+)\s+kbytes\s*\n"
+ r"\s*remaining\s+ram:\s+(?P<remaining>\d+)\s+kbytes\s*\n"
+ r"\s*total\s+ram:\s+(?P<total>\d+)\s+kbytes\s*\n", re.I)
+
_MIGRATION_INFO_MAX_BAD_ANSWERS = 5
_MIGRATION_INFO_RETRY_DELAY = 2
_VERSION_RE = re.compile(r"\b(\d+)\.(\d+)(\.(\d+))?\b")
+ _CPU_INFO_RE = re.compile(r"cpu\s+\#(\d+).*thread_id\s*=\s*(\d+)", re.I)
+ _CPU_INFO_CMD = "info cpus"
+ _CONT_CMD = "cont"
+
+ _INFO_PCI_RE = re.compile(r'Bus.*device[ ]*(\d+).*')
+ _INFO_PCI_CMD = "info pci"
+
+
ANCILLARY_FILES = [
_KVM_NETWORK_SCRIPT,
]
+ ANCILLARY_FILES_OPT = [
+ _KVM_NETWORK_SCRIPT,
+ ]
def __init__(self):
hv_base.BaseHypervisor.__init__(self)
"""
return utils.PathJoin(cls._CTRL_DIR, "%s.serial" % instance_name)
+ @classmethod
+ def _InstanceQmpMonitor(cls, instance_name):
+ """Returns the instance serial QMP socket name
+
+ """
+ return utils.PathJoin(cls._CTRL_DIR, "%s.qmp" % instance_name)
+
@staticmethod
def _SocatUnixConsoleParams():
"""Returns the correct parameters for socat
utils.RemoveFile(pidfile)
utils.RemoveFile(cls._InstanceMonitor(instance_name))
utils.RemoveFile(cls._InstanceSerial(instance_name))
+ utils.RemoveFile(cls._InstanceQmpMonitor(instance_name))
utils.RemoveFile(cls._InstanceKVMRuntime(instance_name))
utils.RemoveFile(cls._InstanceKeymapFile(instance_name))
uid_file = cls._InstanceUidFile(instance_name)
@type tap: str
"""
-
if instance.tags:
tags = " ".join(instance.tags)
else:
" Network configuration script output: %s" %
(tap, result.fail_reason, result.output))
+ @staticmethod
+ def _VerifyAffinityPackage():
+ if affinity is None:
+ raise errors.HypervisorError("affinity Python package not"
+ " found; cannot use CPU pinning under KVM")
+
+ @staticmethod
+ def _BuildAffinityCpuMask(cpu_list):
+ """Create a CPU mask suitable for sched_setaffinity from a list of
+ CPUs.
+
+ See man taskset for more info on sched_setaffinity masks.
+ For example: [ 0, 2, 5, 6 ] will return 101 (0x65, 0..01100101).
+
+ @type cpu_list: list of int
+ @param cpu_list: list of physical CPU numbers to map to vCPUs in order
+ @rtype: int
+ @return: a bit mask of CPU affinities
+
+ """
+ if cpu_list == constants.CPU_PINNING_OFF:
+ return constants.CPU_PINNING_ALL_KVM
+ else:
+ return sum(2 ** cpu for cpu in cpu_list)
+
+ @classmethod
+ def _AssignCpuAffinity(cls, cpu_mask, process_id, thread_dict):
+ """Change CPU affinity for running VM according to given CPU mask.
+
+ @param cpu_mask: CPU mask as given by the user. e.g. "0-2,4:all:1,3"
+ @type cpu_mask: string
+ @param process_id: process ID of KVM process. Used to pin entire VM
+ to physical CPUs.
+ @type process_id: int
+ @param thread_dict: map of virtual CPUs to KVM thread IDs
+ @type thread_dict: dict int:int
+
+ """
+ # Convert the string CPU mask to a list of list of int's
+ cpu_list = utils.ParseMultiCpuMask(cpu_mask)
+
+ if len(cpu_list) == 1:
+ all_cpu_mapping = cpu_list[0]
+ if all_cpu_mapping == constants.CPU_PINNING_OFF:
+ # If CPU pinning has 1 entry that's "all", then do nothing
+ pass
+ else:
+ # If CPU pinning has one non-all entry, map the entire VM to
+ # one set of physical CPUs
+ cls._VerifyAffinityPackage()
+ affinity.set_process_affinity_mask(process_id,
+ cls._BuildAffinityCpuMask(all_cpu_mapping))
+ else:
+ # The number of vCPUs mapped should match the number of vCPUs
+ # reported by KVM. This was already verified earlier, so
+ # here only as a sanity check.
+ assert len(thread_dict) == len(cpu_list)
+ cls._VerifyAffinityPackage()
+
+ # For each vCPU, map it to the proper list of physical CPUs
+ for vcpu, i in zip(cpu_list, range(len(cpu_list))):
+ affinity.set_process_affinity_mask(thread_dict[i],
+ cls._BuildAffinityCpuMask(vcpu))
+
+ def _GetVcpuThreadIds(self, instance_name):
+ """Get a mapping of vCPU no. to thread IDs for the instance
+
+ @type instance_name: string
+ @param instance_name: instance in question
+ @rtype: dictionary of int:int
+ @return: a dictionary mapping vCPU numbers to thread IDs
+
+ """
+ result = {}
+ output = self._CallMonitorCommand(instance_name, self._CPU_INFO_CMD)
+ for line in output.stdout.splitlines():
+ match = self._CPU_INFO_RE.search(line)
+ if not match:
+ continue
+ grp = map(int, match.groups())
+ result[grp[0]] = grp[1]
+
+ return result
+
+ def _ExecuteCpuAffinity(self, instance_name, cpu_mask):
+ """Complete CPU pinning.
+
+ @type instance_name: string
+ @param instance_name: name of instance
+ @type cpu_mask: string
+ @param cpu_mask: CPU pinning mask as entered by user
+
+ """
+ # Get KVM process ID, to be used if need to pin entire VM
+ _, pid, _ = self._InstancePidAlive(instance_name)
+ # Get vCPU thread IDs, to be used if need to pin vCPUs separately
+ thread_dict = self._GetVcpuThreadIds(instance_name)
+ # Run CPU pinning, based on configured mask
+ self._AssignCpuAffinity(cpu_mask, pid, thread_dict)
+
def ListInstances(self):
"""Get the list of running instances.
return None
_, memory, vcpus = self._InstancePidInfo(pid)
- stat = "---b-"
+ istat = "---b-"
times = "0"
- return (instance_name, pid, memory, vcpus, stat, times)
+ try:
+ qmp = QmpConnection(self._InstanceQmpMonitor(instance_name))
+ qmp.connect()
+ vcpus = len(qmp.Execute("query-cpus")[qmp.RETURN_KEY])
+ # Will fail if ballooning is not enabled, but we can then just resort to
+ # the value above.
+ mem_bytes = qmp.Execute("query-balloon")[qmp.RETURN_KEY][qmp.ACTUAL_KEY]
+ memory = mem_bytes / 1048576
+ except errors.HypervisorError:
+ pass
+
+ return (instance_name, pid, memory, vcpus, istat, times)
def GetAllInstancesInfo(self):
"""Get properties of all instances.
try:
info = self.GetInstanceInfo(name)
except errors.HypervisorError:
+ # Ignore exceptions due to instances being shut down
continue
if info:
data.append(info)
return data
+ def _GenerateKVMBlockDevicesOptions(self, instance, kvm_cmd, block_devices):
+
+ hvp = instance.hvparams
+ boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
+
+ _, v_major, v_min, _ = self._GetKVMVersion()
+
+ # whether this is an older KVM version that uses the boot=on flag
+ # on devices
+ needs_boot_flag = (v_major, v_min) < (0, 14)
+
+ disk_type = hvp[constants.HV_DISK_TYPE]
+ if disk_type == constants.HT_DISK_PARAVIRTUAL:
+ if_val = ",if=virtio"
+ if (v_major, v_min) >= (0, 12):
+ disk_model = "virtio-blk-pci"
+ else:
+ disk_model = "virtio"
+ else:
+ if_val = ",if=%s" % disk_type
+ disk_model = disk_type
+ # Cache mode
+ disk_cache = hvp[constants.HV_DISK_CACHE]
+ if instance.disk_template in constants.DTS_EXT_MIRROR:
+ if disk_cache != "none":
+ # TODO: make this a hard error, instead of a silent overwrite
+ logging.warning("KVM: overriding disk_cache setting '%s' with 'none'"
+ " to prevent shared storage corruption on migration",
+ disk_cache)
+ cache_val = ",cache=none"
+ elif disk_cache != constants.HT_CACHE_DEFAULT:
+ cache_val = ",cache=%s" % disk_cache
+ else:
+ cache_val = ""
+ for cfdev, dev_path in block_devices:
+ if cfdev.mode != constants.DISK_RDWR:
+ raise errors.HypervisorError("Instance has read-only disks which"
+ " are not supported by KVM")
+ # TODO: handle FD_LOOP and FD_BLKTAP (?)
+ boot_val = ""
+ if boot_disk:
+ kvm_cmd.extend(["-boot", "c"])
+ boot_disk = False
+ if needs_boot_flag and disk_type != constants.HT_DISK_IDE:
+ boot_val = ",boot=on"
+ drive_val = "file=%s,format=raw%s%s" % \
+ (dev_path, boot_val, cache_val)
+ if cfdev.idx is not None:
+ #TODO: name id after model
+ drive_val += (",if=none,id=drive%d" % cfdev.idx)
+ if cfdev.pci is not None:
+ drive_val += (",bus=0,unit=%d" % cfdev.pci)
+ else:
+ drive_val += if_val
+
+ kvm_cmd.extend(["-drive", drive_val])
+
+ if cfdev.idx is not None:
+ dev_val = ("%s,drive=drive%d,id=virtio-blk-pci.%d" %
+ (disk_model, cfdev.idx, cfdev.idx))
+ if cfdev.pci is not None:
+ dev_val += ",bus=pci.0,addr=%s" % hex(cfdev.pci)
+ kvm_cmd.extend(["-device", dev_val])
+
+ return kvm_cmd
+
def _GenerateKVMRuntime(self, instance, block_devices, startup_paused):
"""Generate KVM information to start an instance.
done in L{_ExecuteKVMRuntime}
"""
+ # pylint: disable=R0914,R0915
_, v_major, v_min, _ = self._GetKVMVersion()
pidfile = self._InstancePidFile(instance.name)
kvm_cmd = [kvm]
# used just by the vnc server, if enabled
kvm_cmd.extend(["-name", instance.name])
- kvm_cmd.extend(["-m", instance.beparams[constants.BE_MEMORY]])
+ kvm_cmd.extend(["-m", instance.beparams[constants.BE_MAXMEM]])
kvm_cmd.extend(["-smp", instance.beparams[constants.BE_VCPUS]])
kvm_cmd.extend(["-pidfile", pidfile])
+ kvm_cmd.extend(["-balloon", "virtio"])
kvm_cmd.extend(["-daemonize"])
if not instance.hvparams[constants.HV_ACPI]:
kvm_cmd.extend(["-no-acpi"])
- if startup_paused:
- kvm_cmd.extend(["-S"])
if instance.hvparams[constants.HV_REBOOT_BEHAVIOR] == \
constants.INSTANCE_REBOOT_EXIT:
kvm_cmd.extend(["-no-reboot"])
hvp = instance.hvparams
- boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
- boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM
- boot_floppy = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_FLOPPY
- boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
+ kernel_path = hvp[constants.HV_KERNEL_PATH]
+ if kernel_path:
+ boot_cdrom = boot_floppy = boot_network = False
+ else:
+ boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM
+ boot_floppy = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_FLOPPY
+ boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
self.ValidateParameters(hvp)
+ if startup_paused:
+ kvm_cmd.extend([_KVM_START_PAUSED_FLAG])
+
if hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_ENABLED:
kvm_cmd.extend(["-enable-kvm"])
elif hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_DISABLED:
if boot_network:
kvm_cmd.extend(["-boot", "n"])
- disk_type = hvp[constants.HV_DISK_TYPE]
- if disk_type == constants.HT_DISK_PARAVIRTUAL:
- if_val = ",if=virtio"
- else:
- if_val = ",if=%s" % disk_type
- # Cache mode
- disk_cache = hvp[constants.HV_DISK_CACHE]
- if instance.disk_template in constants.DTS_EXT_MIRROR:
- if disk_cache != "none":
- # TODO: make this a hard error, instead of a silent overwrite
- logging.warning("KVM: overriding disk_cache setting '%s' with 'none'"
- " to prevent shared storage corruption on migration",
- disk_cache)
- cache_val = ",cache=none"
- elif disk_cache != constants.HT_CACHE_DEFAULT:
- cache_val = ",cache=%s" % disk_cache
- else:
- cache_val = ""
- for cfdev, dev_path in block_devices:
- if cfdev.mode != constants.DISK_RDWR:
- raise errors.HypervisorError("Instance has read-only disks which"
- " are not supported by KVM")
- # TODO: handle FD_LOOP and FD_BLKTAP (?)
- boot_val = ""
- if boot_disk:
- kvm_cmd.extend(["-boot", "c"])
- boot_disk = False
- if (v_major, v_min) < (0, 14) and disk_type != constants.HT_DISK_IDE:
- boot_val = ",boot=on"
+ # whether this is an older KVM version that uses the boot=on flag
+ # on devices
+ needs_boot_flag = (v_major, v_min) < (0, 14)
- drive_val = "file=%s,format=raw%s%s%s" % (dev_path, if_val, boot_val,
- cache_val)
- kvm_cmd.extend(["-drive", drive_val])
+ disk_type = hvp[constants.HV_DISK_TYPE]
+ if not instance.hotplug_info:
+ kvm_cmd = self._GenerateKVMBlockDevicesOptions(instance, kvm_cmd,
+ block_devices)
#Now we can specify a different device type for CDROM devices.
cdrom_disk_type = hvp[constants.HV_KVM_CDROM_DISK_TYPE]
iso_image = hvp[constants.HV_CDROM_IMAGE_PATH]
if iso_image:
options = ",format=raw,media=cdrom"
+ # set cdrom 'if' type
if boot_cdrom:
- kvm_cmd.extend(["-boot", "d"])
- if cdrom_disk_type != constants.HT_DISK_IDE:
- options = "%s,boot=on,if=%s" % (options, constants.HT_DISK_IDE)
- else:
- options = "%s,boot=on" % options
+ actual_cdrom_type = constants.HT_DISK_IDE
+ elif cdrom_disk_type == constants.HT_DISK_PARAVIRTUAL:
+ actual_cdrom_type = "virtio"
else:
- if cdrom_disk_type == constants.HT_DISK_PARAVIRTUAL:
- if_val = ",if=virtio"
- else:
- if_val = ",if=%s" % cdrom_disk_type
- options = "%s%s" % (options, if_val)
- drive_val = "file=%s%s" % (iso_image, options)
+ actual_cdrom_type = cdrom_disk_type
+ if_val = ",if=%s" % actual_cdrom_type
+ # set boot flag, if needed
+ boot_val = ""
+ if boot_cdrom:
+ kvm_cmd.extend(["-boot", "d"])
+ if needs_boot_flag:
+ boot_val = ",boot=on"
+ # and finally build the entire '-drive' value
+ drive_val = "file=%s%s%s%s" % (iso_image, options, if_val, boot_val)
kvm_cmd.extend(["-drive", drive_val])
iso_image2 = hvp[constants.HV_KVM_CDROM2_IMAGE_PATH]
if_val = ",if=virtio"
else:
if_val = ",if=%s" % cdrom_disk_type
- options = "%s%s" % (options, if_val)
- drive_val = "file=%s%s" % (iso_image2, options)
+ drive_val = "file=%s%s%s" % (iso_image2, options, if_val)
kvm_cmd.extend(["-drive", drive_val])
floppy_image = hvp[constants.HV_KVM_FLOPPY_IMAGE_PATH]
drive_val = "file=%s%s" % (floppy_image, options)
kvm_cmd.extend(["-drive", drive_val])
- kernel_path = hvp[constants.HV_KERNEL_PATH]
if kernel_path:
kvm_cmd.extend(["-kernel", kernel_path])
initrd_path = hvp[constants.HV_INITRD_PATH]
# we have both ipv4 and ipv6, let's use the cluster default IP
# version
cluster_family = ssconf.SimpleStore().GetPrimaryIPFamily()
- spice_ip_version = netutils.IPAddress.GetVersionFromAddressFamily(
- cluster_family)
+ spice_ip_version = \
+ netutils.IPAddress.GetVersionFromAddressFamily(cluster_family)
elif addresses[constants.IP4_VERSION]:
spice_ip_version = constants.IP4_VERSION
elif addresses[constants.IP6_VERSION]:
# ValidateParameters checked it.
spice_address = spice_bind
- spice_arg = "addr=%s,port=%s,disable-ticketing" % (spice_address,
- instance.network_port)
+ spice_arg = "addr=%s" % spice_address
+ if hvp[constants.HV_KVM_SPICE_USE_TLS]:
+ spice_arg = "%s,tls-port=%s,x509-cacert-file=%s" % (spice_arg,
+ instance.network_port, constants.SPICE_CACERT_FILE)
+ spice_arg = "%s,x509-key-file=%s,x509-cert-file=%s" % (spice_arg,
+ constants.SPICE_CERT_FILE, constants.SPICE_CERT_FILE)
+ tls_ciphers = hvp[constants.HV_KVM_SPICE_TLS_CIPHERS]
+ if tls_ciphers:
+ spice_arg = "%s,tls-ciphers=%s" % (spice_arg, tls_ciphers)
+ else:
+ spice_arg = "%s,port=%s" % (spice_arg, instance.network_port)
+
+ if not hvp[constants.HV_KVM_SPICE_PASSWORD_FILE]:
+ spice_arg = "%s,disable-ticketing" % spice_arg
+
if spice_ip_version:
spice_arg = "%s,ipv%s" % (spice_arg, spice_ip_version)
+ # Image compression options
+ img_lossless = hvp[constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR]
+ img_jpeg = hvp[constants.HV_KVM_SPICE_JPEG_IMG_COMPR]
+ img_zlib_glz = hvp[constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR]
+ if img_lossless:
+ spice_arg = "%s,image-compression=%s" % (spice_arg, img_lossless)
+ if img_jpeg:
+ spice_arg = "%s,jpeg-wan-compression=%s" % (spice_arg, img_jpeg)
+ if img_zlib_glz:
+ spice_arg = "%s,zlib-glz-wan-compression=%s" % (spice_arg, img_zlib_glz)
+
+ # Video stream detection
+ video_streaming = hvp[constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION]
+ if video_streaming:
+ spice_arg = "%s,streaming-video=%s" % (spice_arg, video_streaming)
+
+ # Audio compression, by default in qemu-kvm it is on
+ if not hvp[constants.HV_KVM_SPICE_AUDIO_COMPR]:
+ spice_arg = "%s,playback-compression=off" % spice_arg
+ if not hvp[constants.HV_KVM_SPICE_USE_VDAGENT]:
+ spice_arg = "%s,agent-mouse=off" % spice_arg
+ else:
+ # Enable the spice agent communication channel between the host and the
+ # agent.
+ kvm_cmd.extend(["-device", "virtio-serial-pci"])
+ kvm_cmd.extend(["-device", "virtserialport,chardev=spicechannel0,"
+ "name=com.redhat.spice.0"])
+ kvm_cmd.extend(["-chardev", "spicevmc,id=spicechannel0,name=vdagent"])
+
logging.info("KVM: SPICE will listen on port %s", instance.network_port)
kvm_cmd.extend(["-spice", spice_arg])
+ # Tell kvm to use the paravirtualized graphic card, optimized for SPICE
+ kvm_cmd.extend(["-vga", "qxl"])
+
else:
kvm_cmd.extend(["-nographic"])
kvm_nics = instance.nics
hvparams = hvp
- return (kvm_cmd, kvm_nics, hvparams)
+ if instance.hotplug_info:
+ return (kvm_cmd, kvm_nics, hvparams, block_devices)
+ else:
+ return (kvm_cmd, kvm_nics, hvparams)
def _WriteKVMRuntime(self, instance_name, data):
"""Write an instance's KVM runtime
"""Save an instance's KVM runtime
"""
- kvm_cmd, kvm_nics, hvparams = kvm_runtime
+ if instance.hotplug_info:
+ kvm_cmd, kvm_nics, hvparams, block_devices = kvm_runtime
+ serialized_blockdevs = [(blk.ToDict(), link)
+ for blk,link in block_devices]
+ else:
+ kvm_cmd, kvm_nics, hvparams = kvm_runtime
+
serialized_nics = [nic.ToDict() for nic in kvm_nics]
- serialized_form = serializer.Dump((kvm_cmd, serialized_nics, hvparams))
+
+ if instance.hotplug_info:
+ serialized_form = serializer.Dump((kvm_cmd, serialized_nics,
+ hvparams, serialized_blockdevs))
+ else:
+ serialized_form = serializer.Dump((kvm_cmd, serialized_nics, hvparams))
+
self._WriteKVMRuntime(instance.name, serialized_form)
def _LoadKVMRuntime(self, instance, serialized_runtime=None):
if not serialized_runtime:
serialized_runtime = self._ReadKVMRuntime(instance.name)
loaded_runtime = serializer.Load(serialized_runtime)
- kvm_cmd, serialized_nics, hvparams = loaded_runtime
+ if instance.hotplug_info:
+ kvm_cmd, serialized_nics, hvparams, serialized_blockdevs = loaded_runtime
+ block_devices = [(objects.Disk.FromDict(sdisk), link)
+ for sdisk, link in serialized_blockdevs]
+ else:
+ kvm_cmd, serialized_nics, hvparams = loaded_runtime
+
kvm_nics = [objects.NIC.FromDict(snic) for snic in serialized_nics]
- return (kvm_cmd, kvm_nics, hvparams)
+
+ if instance.hotplug_info:
+ return (kvm_cmd, kvm_nics, hvparams, block_devices)
+ else:
+ return (kvm_cmd, kvm_nics, hvparams)
def _RunKVMCmd(self, name, kvm_cmd, tap_fds=None):
"""Run the KVM cmd and check for errors
temp_files = []
- kvm_cmd, kvm_nics, up_hvp = kvm_runtime
+ if instance.hotplug_info:
+ kvm_cmd, kvm_nics, up_hvp, block_devices = kvm_runtime
+ else:
+ kvm_cmd, kvm_nics, up_hvp = kvm_runtime
+
up_hvp = objects.FillDict(conf_hvp, up_hvp)
_, v_major, v_min, _ = self._GetKVMVersion()
utils.WriteFile(keymap_path, data="include en-us\ninclude %s\n" % keymap)
kvm_cmd.extend(["-k", keymap_path])
+ if instance.hotplug_info:
+ kvm_cmd = self._GenerateKVMBlockDevicesOptions(instance, kvm_cmd,
+ block_devices)
+
# We have reasons to believe changing something like the nic driver/type
# upon migration won't exactly fly with the instance kernel, so for nic
# related parameters we'll use up_hvp
tapfds.append(tapfd)
taps.append(tapname)
if (v_major, v_min) >= (0, 12):
- nic_val = "%s,mac=%s,netdev=netdev%s" % (nic_model, nic.mac, nic_seq)
- tap_val = "type=tap,id=netdev%s,fd=%d%s" % (nic_seq, tapfd, tap_extra)
+ nic_val = "%s,mac=%s" % (nic_model, nic.mac)
+ if nic.idx:
+ nic_val += (",netdev=netdev%d,id=virtio-net-pci.%d" %
+ (nic.idx, nic.idx))
+ if nic.pci is not None:
+ nic_val += (",bus=pci.0,addr=%s" % hex(nic.pci))
+ else:
+ nic_val += (",netdev=netdev%d,id=virtio-net-pci.%d" %
+ (nic_seq, nic_seq))
+ tap_val = ("type=tap,id=netdev%d,fd=%d%s" %
+ (nic.idx or nic_seq, tapfd, tap_extra))
kvm_cmd.extend(["-netdev", tap_val, "-device", nic_val])
else:
nic_val = "nic,vlan=%s,macaddr=%s,model=%s" % (nic_seq,
utils.EnsureDirs([(self._InstanceChrootDir(name),
constants.SECURE_DIR_MODE)])
+ # Automatically enable QMP if version is >= 0.14
+ if (v_major, v_min) >= (0, 14):
+ logging.debug("Enabling QMP")
+ kvm_cmd.extend(["-qmp", "unix:%s,server,nowait" %
+ self._InstanceQmpMonitor(instance.name)])
+
# Configure the network now for starting instances and bridged interfaces,
# during FinalizeMigration for incoming instances' routed interfaces
for nic_seq, nic in enumerate(kvm_nics):
continue
self._ConfigureNIC(instance, nic_seq, nic, taps[nic_seq])
+ # CPU affinity requires kvm to start paused, so we set this flag if the
+ # instance is not already paused and if we are not going to accept a
+ # migrating instance. In the latter case, pausing is not needed.
+ start_kvm_paused = not (_KVM_START_PAUSED_FLAG in kvm_cmd) and not incoming
+ if start_kvm_paused:
+ kvm_cmd.extend([_KVM_START_PAUSED_FLAG])
+
+ # Note: CPU pinning is using up_hvp since changes take effect
+ # during instance startup anyway, and to avoid problems when soft
+ # rebooting the instance.
+ cpu_pinning = False
+ if up_hvp.get(constants.HV_CPU_MASK, None):
+ cpu_pinning = True
+
if security_model == constants.HT_SM_POOL:
ss = ssconf.SimpleStore()
uid_pool = uidpool.ParseUidPool(ss.GetUidPool(), separator="\n")
change_cmd = "change vnc password %s" % vnc_pwd
self._CallMonitorCommand(instance.name, change_cmd)
+ # Setting SPICE password. We are not vulnerable to malicious passwordless
+ # connection attempts because SPICE by default does not allow connections
+ # if neither a password nor the "disable_ticketing" options are specified.
+ # As soon as we send the password via QMP, that password is a valid ticket
+ # for connection.
+ spice_password_file = conf_hvp[constants.HV_KVM_SPICE_PASSWORD_FILE]
+ if spice_password_file:
+ spice_pwd = ""
+ try:
+ spice_pwd = utils.ReadOneLineFile(spice_password_file, strict=True)
+ except EnvironmentError, err:
+ raise errors.HypervisorError("Failed to open SPICE password file %s: %s"
+ % (spice_password_file, err))
+
+ qmp = QmpConnection(self._InstanceQmpMonitor(instance.name))
+ qmp.connect()
+ arguments = {
+ "protocol": "spice",
+ "password": spice_pwd,
+ }
+ qmp.Execute("set_password", arguments)
+
for filename in temp_files:
utils.RemoveFile(filename)
+ # If requested, set CPU affinity and resume instance execution
+ if cpu_pinning:
+ self._ExecuteCpuAffinity(instance.name, up_hvp[constants.HV_CPU_MASK])
+
+ start_memory = self._InstanceStartupMemory(instance)
+ if start_memory < instance.beparams[constants.BE_MAXMEM]:
+ self.BalloonInstanceMemory(instance, start_memory)
+
+ if start_kvm_paused:
+ # To control CPU pinning, ballooning, and vnc/spice passwords
+ # the VM was started in a frozen state. If freezing was not
+ # explicitly requested resume the vm status.
+ self._CallMonitorCommand(instance.name, self._CONT_CMD)
+
def StartInstance(self, instance, block_devices, startup_paused):
"""Start an instance.
return result
+ def _FindFreePCISlot(self, instance_name):
+ slots = bitarray(32)
+ slots.setall(False)
+ output = self._CallMonitorCommand(instance_name, self._INFO_PCI_CMD)
+ for line in output.stdout.splitlines():
+ match = self._INFO_PCI_RE.search(line)
+ if match:
+ slot = int(match.group(1))
+ slots[slot] = True
+
+ free = slots.search(FREE, 1)
+ if not free:
+ raise errors.HypervisorError("All PCI slots occupied")
+
+ return int(free[0])
+
+ def _HotplugEnabled(self, instance_name):
+ if not self._InstancePidAlive(instance_name)[2]:
+ logging.info("Cannot hotplug. Instance %s not alive", instance_name)
+ return False
+
+ _, v_major, v_min, _ = self._GetKVMVersion()
+ return (v_major, v_min) >= (1, 0)
+
+ def HotAddDisk(self, instance, disk, dev_path, _):
+ """Hotadd new disk to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ disk.pci = self._FindFreePCISlot(instance.name)
+ idx = disk.idx
+ command = ("drive_add dummy file=%s,if=none,id=drive%d,format=raw" %
+ (dev_path, idx))
+
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+
+ command = ("device_add virtio-blk-pci,bus=pci.0,addr=%s,"
+ "drive=drive%d,id=virtio-blk-pci.%d"
+ % (hex(disk.pci), idx, idx))
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ block_devices.append((disk, dev_path))
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+ return disk.pci
+
+ def HotDelDisk(self, instance, disk, _):
+ """Hotdel disk to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ idx = disk.idx
+
+ command = "device_del virtio-blk-pci.%d" % idx
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ command = "drive_del drive%d" % idx
+ logging.info("Run cmd %s", command)
+ #output = self._CallMonitorCommand(instance.name, command)
+ #for line in output.stdout.splitlines():
+ # logging.info("%s" % line)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ rem = [(d, p) for d, p in block_devices
+ if d.idx is not None and d.idx == idx]
+ try:
+ block_devices.remove(rem[0])
+ except (ValueError, IndexError):
+ logging.info("Disk with %d idx disappeared from runtime file", idx)
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+ def HotAddNic(self, instance, nic, seq):
+ """Hotadd new nic to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ nic.pci = self._FindFreePCISlot(instance.name)
+ mac = nic.mac
+ idx = nic.idx
+
+ (tap, fd) = _OpenTap()
+ logging.info("%s %d", tap, fd)
+
+ self._PassTapFd(instance, fd, nic)
+
+ command = ("netdev_add tap,id=netdev%d,fd=netdev%d"
+ % (idx, idx))
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ command = ("device_add virtio-net-pci,bus=pci.0,addr=%s,mac=%s,"
+ "netdev=netdev%d,id=virtio-net-pci.%d"
+ % (hex(nic.pci), mac, idx, idx))
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ self._ConfigureNIC(instance, seq, nic, tap)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ kvm_nics.append(nic)
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+ return nic.pci
+
+ def HotDelNic(self, instance, nic, _):
+ """Hotadd new nic to the VM
+
+ """
+ if self._HotplugEnabled(instance.name):
+ idx = nic.idx
+
+ command = "device_del virtio-net-pci.%d" % idx
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ command = "netdev_del netdev%d" % idx
+ logging.info("Run cmd %s", command)
+ output = self._CallMonitorCommand(instance.name, command)
+ for line in output.stdout.splitlines():
+ logging.info("%s", line)
+
+ (kvm_cmd, kvm_nics,
+ hvparams, block_devices) = self._LoadKVMRuntime(instance)
+ rem = [n for n in kvm_nics if n.idx is not None and n.idx == nic.idx]
+ try:
+ kvm_nics.remove(rem[0])
+ except (ValueError, IndexError):
+ logging.info("NIC with %d idx disappeared from runtime file", nic.idx)
+ new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+ self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+
+ def _PassTapFd(self, instance, fd, nic):
+ monsock = utils.ShellQuote(self._InstanceMonitor(instance.name))
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.connect(monsock)
+ idx = nic.idx
+ command = "getfd netdev%d\n" % idx
+ fds = [fd]
+ logging.info("%s", fds)
+ fdsend.sendfds(s, command, fds = fds)
+ s.close()
+
@classmethod
def _ParseKVMVersion(cls, text):
"""Parse the KVM version from the --help output.
@type text: string
@param text: output of kvm --help
@return: (version, v_maj, v_min, v_rev)
- @raise L{errors.HypervisorError}: when the KVM version cannot be retrieved
+ @raise errors.HypervisorError: when the KVM version cannot be retrieved
"""
match = cls._VERSION_RE.search(text.splitlines()[0])
"""Return the installed KVM version.
@return: (version, v_maj, v_min, v_rev)
- @raise L{errors.HypervisorError}: when the KVM version cannot be retrieved
+ @raise errors.HypervisorError: when the KVM version cannot be retrieved
"""
result = utils.RunCmd([constants.KVM_PATH, "--help"])
incoming_address = (target, instance.hvparams[constants.HV_MIGRATION_PORT])
self._ExecuteKVMRuntime(instance, kvm_runtime, incoming=incoming_address)
- def FinalizeMigration(self, instance, info, success):
- """Finalize an instance migration.
+ def FinalizeMigrationDst(self, instance, info, success):
+ """Finalize the instance migration on the target node.
Stop the incoming mode KVM.
"""
instance_name = instance.name
port = instance.hvparams[constants.HV_MIGRATION_PORT]
- pidfile, pid, alive = self._InstancePidAlive(instance_name)
+ _, _, alive = self._InstancePidAlive(instance_name)
if not alive:
raise errors.HypervisorError("Instance not running, cannot migrate")
migrate_command = "migrate -d tcp:%s:%s" % (target, port)
self._CallMonitorCommand(instance_name, migrate_command)
+ def FinalizeMigrationSource(self, instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+
+ """
+ if success:
+ pidfile, pid, _ = self._InstancePidAlive(instance.name)
+ utils.KillProcess(pid)
+ self._RemoveInstanceRuntimeFiles(pidfile, instance.name)
+ elif live:
+ self._CallMonitorCommand(instance.name, self._CONT_CMD)
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
info_command = "info migrate"
- done = False
- broken_answers = 0
- while not done:
- result = self._CallMonitorCommand(instance_name, info_command)
+ for _ in range(self._MIGRATION_INFO_MAX_BAD_ANSWERS):
+ result = self._CallMonitorCommand(instance.name, info_command)
match = self._MIGRATION_STATUS_RE.search(result.stdout)
if not match:
- broken_answers += 1
if not result.stdout:
logging.info("KVM: empty 'info migrate' result")
else:
logging.warning("KVM: unknown 'info migrate' result: %s",
result.stdout)
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
else:
status = match.group(1)
- if status == "completed":
- done = True
- elif status == "active":
- # reset the broken answers count
- broken_answers = 0
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
- elif status == "failed" or status == "cancelled":
- if not live:
- self._CallMonitorCommand(instance_name, 'cont')
- raise errors.HypervisorError("Migration %s at the kvm level" %
- status)
- else:
- logging.warning("KVM: unknown migration status '%s'", status)
- broken_answers += 1
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
- if broken_answers >= self._MIGRATION_INFO_MAX_BAD_ANSWERS:
- raise errors.HypervisorError("Too many 'info migrate' broken answers")
+ if status in constants.HV_KVM_MIGRATION_VALID_STATUSES:
+ migration_status = objects.MigrationStatus(status=status)
+ match = self._MIGRATION_PROGRESS_RE.search(result.stdout)
+ if match:
+ migration_status.transferred_ram = match.group("transferred")
+ migration_status.total_ram = match.group("total")
- utils.KillProcess(pid)
- self._RemoveInstanceRuntimeFiles(pidfile, instance_name)
+ return migration_status
+
+ logging.warning("KVM: unknown migration status '%s'", status)
+
+ time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
+
+ return objects.MigrationStatus(status=constants.HV_MIGRATION_FAILED,
+ info="Too many 'info migrate' broken answers")
+
+ def BalloonInstanceMemory(self, instance, mem):
+ """Balloon an instance memory to a certain value.
+
+ @type instance: L{objects.Instance}
+ @param instance: instance to be accepted
+ @type mem: int
+ @param mem: actual memory size to use for instance runtime
+
+ """
+ self._CallMonitorCommand(instance.name, "balloon %d" % mem)
def GetNodeInfo(self):
"""Return information about the node.
- This is just a wrapper over the base GetLinuxNodeInfo method.
-
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
+ - hv_version: the hypervisor version in the form (major, minor,
+ revision)
"""
- return self.GetLinuxNodeInfo()
+ result = self.GetLinuxNodeInfo()
+ _, v_major, v_min, v_rev = self._GetKVMVersion()
+ result[constants.HV_NODEINFO_KEY_VERSION] = (v_major, v_min, v_rev)
+ return result
@classmethod
def GetInstanceConsole(cls, instance, hvparams, beparams):
port=instance.network_port,
display=display)
+ spice_bind = hvparams[constants.HV_KVM_SPICE_BIND]
+ if spice_bind:
+ return objects.InstanceConsole(instance=instance.name,
+ kind=constants.CONS_SPICE,
+ host=spice_bind,
+ port=instance.network_port)
+
return objects.InstanceConsole(instance=instance.name,
kind=constants.CONS_MESSAGE,
message=("No serial shell for instance %s" %
" security model is 'none' or 'pool'")
spice_bind = hvparams[constants.HV_KVM_SPICE_BIND]
+ spice_ip_version = hvparams[constants.HV_KVM_SPICE_IP_VERSION]
if spice_bind:
- spice_ip_version = hvparams[constants.HV_KVM_SPICE_IP_VERSION]
if spice_ip_version != constants.IFACE_NO_IP_VERSION_SPECIFIED:
# if an IP version is specified, the spice_bind parameter must be an
# IP of that family
raise errors.HypervisorError("spice: got an IPv6 address (%s), but"
" the specified IP version is %s" %
(spice_bind, spice_ip_version))
+ else:
+ # All the other SPICE parameters depend on spice_bind being set. Raise an
+ # error if any of them is set without it.
+ spice_additional_params = frozenset([
+ constants.HV_KVM_SPICE_IP_VERSION,
+ constants.HV_KVM_SPICE_PASSWORD_FILE,
+ constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR,
+ constants.HV_KVM_SPICE_JPEG_IMG_COMPR,
+ constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR,
+ constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION,
+ constants.HV_KVM_SPICE_USE_TLS,
+ ])
+ for param in spice_additional_params:
+ if hvparams[param]:
+ raise errors.HypervisorError("spice: %s requires %s to be set" %
+ (param, constants.HV_KVM_SPICE_BIND))
@classmethod
def ValidateParameters(cls, hvparams):