+_DISK_LETTERS = string.ascii_lowercase
+
+_FILE_DRIVER_MAP = {
+ constants.FD_LOOP: "file",
+ constants.FD_BLKTAP: "tap:aio",
+ }
+
+
+def _CreateConfigCpus(cpu_mask):
+ """Create a CPU config string for Xen's config file.
+
+ """
+ # Convert the string CPU mask to a list of list of int's
+ cpu_list = utils.ParseMultiCpuMask(cpu_mask)
+
+ if len(cpu_list) == 1:
+ all_cpu_mapping = cpu_list[0]
+ if all_cpu_mapping == constants.CPU_PINNING_OFF:
+ # If CPU pinning has 1 entry that's "all", then remove the
+ # parameter from the config file
+ return None
+ else:
+ # If CPU pinning has one non-all entry, mapping all vCPUS (the entire
+ # VM) to one physical CPU, using format 'cpu = "C"'
+ return "cpu = \"%s\"" % ",".join(map(str, all_cpu_mapping))
+ else:
+
+ def _GetCPUMap(vcpu):
+ if vcpu[0] == constants.CPU_PINNING_ALL_VAL:
+ cpu_map = constants.CPU_PINNING_ALL_XEN
+ else:
+ cpu_map = ",".join(map(str, vcpu))
+ return "\"%s\"" % cpu_map
+
+ # build the result string in format 'cpus = [ "c", "c", "c" ]',
+ # where each c is a physical CPU number, a range, a list, or any
+ # combination
+ return "cpus = [ %s ]" % ", ".join(map(_GetCPUMap, cpu_list))
+
+
+def _RunInstanceList(fn, instance_list_errors):
+ """Helper function for L{_GetInstanceList} to retrieve the list of instances
+ from xen.
+
+ @type fn: callable
+ @param fn: Function to query xen for the list of instances
+ @type instance_list_errors: list
+ @param instance_list_errors: Error list
+ @rtype: list
+
+ """
+ result = fn()
+ if result.failed:
+ logging.error("Retrieving the instance list from xen failed (%s): %s",
+ result.fail_reason, result.output)
+ instance_list_errors.append(result)
+ raise utils.RetryAgain()
+
+ # skip over the heading
+ return result.stdout.splitlines()
+
+
+def _ParseXmList(lines, include_node):
+ """Parses the output of C{xm list}.
+
+ @type lines: list
+ @param lines: Output lines of C{xm list}
+ @type include_node: boolean
+ @param include_node: If True, return information for Dom0
+ @return: list of tuple containing (name, id, memory, vcpus, state, time
+ spent)
+
+ """
+ result = []
+
+ # Iterate through all lines while ignoring header
+ for line in lines[1:]:
+ # The format of lines is:
+ # Name ID Mem(MiB) VCPUs State Time(s)
+ # Domain-0 0 3418 4 r----- 266.2
+ data = line.split()
+ if len(data) != 6:
+ raise errors.HypervisorError("Can't parse output of xm list,"
+ " line: %s" % line)
+ try:
+ data[1] = int(data[1])
+ data[2] = int(data[2])
+ data[3] = int(data[3])
+ data[5] = float(data[5])
+ except (TypeError, ValueError), err:
+ raise errors.HypervisorError("Can't parse output of xm list,"
+ " line: %s, error: %s" % (line, err))
+
+ # skip the Domain-0 (optional)
+ if include_node or data[0] != _DOM0_NAME:
+ result.append(data)
+
+ return result
+
+
+def _GetInstanceList(fn, include_node, _timeout=5):
+ """Return the list of running instances.
+
+ See L{_RunInstanceList} and L{_ParseXmList} for parameter details.
+
+ """
+ instance_list_errors = []
+ try:
+ lines = utils.Retry(_RunInstanceList, (0.3, 1.5, 1.0), _timeout,
+ args=(fn, instance_list_errors))
+ except utils.RetryTimeout:
+ if instance_list_errors:
+ instance_list_result = instance_list_errors.pop()
+
+ errmsg = ("listing instances failed, timeout exceeded (%s): %s" %
+ (instance_list_result.fail_reason, instance_list_result.output))
+ else:
+ errmsg = "listing instances failed"
+
+ raise errors.HypervisorError(errmsg)
+
+ return _ParseXmList(lines, include_node)
+
+
+def _ParseNodeInfo(info):
+ """Return information about the node.
+
+ @return: a dict with the following keys (memory values in MiB):
+ - memory_total: the total memory size on the node
+ - memory_free: the available memory on the node for instances
+ - nr_cpus: total number of CPUs
+ - nr_nodes: in a NUMA system, the number of domains
+ - nr_sockets: the number of physical CPU sockets in the node
+ - hv_version: the hypervisor version in the form (major, minor)
+
+ """
+ result = {}
+ cores_per_socket = threads_per_core = nr_cpus = None
+ xen_major, xen_minor = None, None
+ memory_total = None
+ memory_free = None
+
+ for line in info.splitlines():
+ fields = line.split(":", 1)
+
+ if len(fields) < 2:
+ continue
+
+ (key, val) = map(lambda s: s.strip(), fields)
+
+ # Note: in Xen 3, memory has changed to total_memory
+ if key in ("memory", "total_memory"):
+ memory_total = int(val)
+ elif key == "free_memory":
+ memory_free = int(val)
+ elif key == "nr_cpus":
+ nr_cpus = result["cpu_total"] = int(val)
+ elif key == "nr_nodes":
+ result["cpu_nodes"] = int(val)
+ elif key == "cores_per_socket":
+ cores_per_socket = int(val)
+ elif key == "threads_per_core":
+ threads_per_core = int(val)
+ elif key == "xen_major":
+ xen_major = int(val)
+ elif key == "xen_minor":
+ xen_minor = int(val)
+
+ if None not in [cores_per_socket, threads_per_core, nr_cpus]:
+ result["cpu_sockets"] = nr_cpus / (cores_per_socket * threads_per_core)
+
+ if memory_free is not None:
+ result["memory_free"] = memory_free
+
+ if memory_total is not None:
+ result["memory_total"] = memory_total
+
+ if not (xen_major is None or xen_minor is None):
+ result[constants.HV_NODEINFO_KEY_VERSION] = (xen_major, xen_minor)
+
+ return result
+
+
+def _MergeInstanceInfo(info, fn):
+ """Updates node information from L{_ParseNodeInfo} with instance info.
+
+ @type info: dict
+ @param info: Result from L{_ParseNodeInfo}
+ @type fn: callable
+ @param fn: Function returning result of running C{xm list}
+ @rtype: dict
+
+ """
+ total_instmem = 0
+
+ for (name, _, mem, vcpus, _, _) in fn(True):
+ if name == _DOM0_NAME:
+ info["memory_dom0"] = mem
+ info["dom0_cpus"] = vcpus
+
+ # Include Dom0 in total memory usage
+ total_instmem += mem
+
+ memory_free = info.get("memory_free")
+ memory_total = info.get("memory_total")
+
+ # Calculate memory used by hypervisor
+ if None not in [memory_total, memory_free, total_instmem]:
+ info["memory_hv"] = memory_total - memory_free - total_instmem
+
+ return info
+
+
+def _GetNodeInfo(info, fn):
+ """Combines L{_MergeInstanceInfo} and L{_ParseNodeInfo}.
+
+ """
+ return _MergeInstanceInfo(_ParseNodeInfo(info), fn)
+
+
+def _GetConfigFileDiskData(block_devices, blockdev_prefix,
+ _letters=_DISK_LETTERS):
+ """Get disk directives for Xen config file.
+
+ This method builds the xen config disk directive according to the
+ given disk_template and block_devices.
+
+ @param block_devices: list of tuples (cfdev, rldev):
+ - cfdev: dict containing ganeti config disk part
+ - rldev: ganeti.block.bdev.BlockDev object
+ @param blockdev_prefix: a string containing blockdevice prefix,
+ e.g. "sd" for /dev/sda
+
+ @return: string containing disk directive for xen instance config file
+
+ """
+ if len(block_devices) > len(_letters):
+ raise errors.HypervisorError("Too many disks")
+
+ disk_data = []
+
+ for sd_suffix, (cfdev, dev_path) in zip(_letters, block_devices):
+ sd_name = blockdev_prefix + sd_suffix
+
+ if cfdev.mode == constants.DISK_RDWR:
+ mode = "w"
+ else:
+ mode = "r"
+
+ if cfdev.dev_type == constants.LD_FILE:
+ driver = _FILE_DRIVER_MAP[cfdev.physical_id[0]]
+ else:
+ driver = "phy"
+
+ disk_data.append("'%s:%s,%s,%s'" % (driver, dev_path, sd_name, mode))
+
+ return disk_data