root / hw / xen_pt_config_init.c @ a1bc20df
History | View | Annotate | Download (57.2 kB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2007, Neocleus Corporation.
|
3 |
* Copyright (c) 2007, Intel Corporation.
|
4 |
*
|
5 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
6 |
* the COPYING file in the top-level directory.
|
7 |
*
|
8 |
* Alex Novik <alex@neocleus.com>
|
9 |
* Allen Kay <allen.m.kay@intel.com>
|
10 |
* Guy Zana <guy@neocleus.com>
|
11 |
*
|
12 |
* This file implements direct PCI assignment to a HVM guest
|
13 |
*/
|
14 |
|
15 |
#include "qemu-timer.h" |
16 |
#include "xen_backend.h" |
17 |
#include "xen_pt.h" |
18 |
|
19 |
#define XEN_PT_MERGE_VALUE(value, data, val_mask) \
|
20 |
(((value) & (val_mask)) | ((data) & ~(val_mask))) |
21 |
|
22 |
#define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ |
23 |
|
24 |
/* prototype */
|
25 |
|
26 |
static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, |
27 |
uint32_t real_offset, uint32_t *data); |
28 |
|
29 |
|
30 |
/* helper */
|
31 |
|
32 |
/* A return value of 1 means the capability should NOT be exposed to guest. */
|
33 |
static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) |
34 |
{ |
35 |
switch (grp_id) {
|
36 |
case PCI_CAP_ID_EXP:
|
37 |
/* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
|
38 |
* Controller looks trivial, e.g., the PCI Express Capabilities
|
39 |
* Register is 0. We should not try to expose it to guest.
|
40 |
*
|
41 |
* The datasheet is available at
|
42 |
* http://download.intel.com/design/network/datashts/82599_datasheet.pdf
|
43 |
*
|
44 |
* See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
|
45 |
* PCI Express Capability Structure of the VF of Intel 82599 10GbE
|
46 |
* Controller looks trivial, e.g., the PCI Express Capabilities
|
47 |
* Register is 0, so the Capability Version is 0 and
|
48 |
* xen_pt_pcie_size_init() would fail.
|
49 |
*/
|
50 |
if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
|
51 |
d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { |
52 |
return 1; |
53 |
} |
54 |
break;
|
55 |
} |
56 |
return 0; |
57 |
} |
58 |
|
59 |
/* find emulate register group entry */
|
60 |
XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) |
61 |
{ |
62 |
XenPTRegGroup *entry = NULL;
|
63 |
|
64 |
/* find register group entry */
|
65 |
QLIST_FOREACH(entry, &s->reg_grps, entries) { |
66 |
/* check address */
|
67 |
if ((entry->base_offset <= address)
|
68 |
&& ((entry->base_offset + entry->size) > address)) { |
69 |
return entry;
|
70 |
} |
71 |
} |
72 |
|
73 |
/* group entry not found */
|
74 |
return NULL; |
75 |
} |
76 |
|
77 |
/* find emulate register entry */
|
78 |
XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) |
79 |
{ |
80 |
XenPTReg *reg_entry = NULL;
|
81 |
XenPTRegInfo *reg = NULL;
|
82 |
uint32_t real_offset = 0;
|
83 |
|
84 |
/* find register entry */
|
85 |
QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { |
86 |
reg = reg_entry->reg; |
87 |
real_offset = reg_grp->base_offset + reg->offset; |
88 |
/* check address */
|
89 |
if ((real_offset <= address)
|
90 |
&& ((real_offset + reg->size) > address)) { |
91 |
return reg_entry;
|
92 |
} |
93 |
} |
94 |
|
95 |
return NULL; |
96 |
} |
97 |
|
98 |
|
99 |
/****************
|
100 |
* general register functions
|
101 |
*/
|
102 |
|
103 |
/* register initialization function */
|
104 |
|
105 |
static int xen_pt_common_reg_init(XenPCIPassthroughState *s, |
106 |
XenPTRegInfo *reg, uint32_t real_offset, |
107 |
uint32_t *data) |
108 |
{ |
109 |
*data = reg->init_val; |
110 |
return 0; |
111 |
} |
112 |
|
113 |
/* Read register functions */
|
114 |
|
115 |
static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
116 |
uint8_t *value, uint8_t valid_mask) |
117 |
{ |
118 |
XenPTRegInfo *reg = cfg_entry->reg; |
119 |
uint8_t valid_emu_mask = 0;
|
120 |
|
121 |
/* emulate byte register */
|
122 |
valid_emu_mask = reg->emu_mask & valid_mask; |
123 |
*value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); |
124 |
|
125 |
return 0; |
126 |
} |
127 |
static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
128 |
uint16_t *value, uint16_t valid_mask) |
129 |
{ |
130 |
XenPTRegInfo *reg = cfg_entry->reg; |
131 |
uint16_t valid_emu_mask = 0;
|
132 |
|
133 |
/* emulate word register */
|
134 |
valid_emu_mask = reg->emu_mask & valid_mask; |
135 |
*value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); |
136 |
|
137 |
return 0; |
138 |
} |
139 |
static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
140 |
uint32_t *value, uint32_t valid_mask) |
141 |
{ |
142 |
XenPTRegInfo *reg = cfg_entry->reg; |
143 |
uint32_t valid_emu_mask = 0;
|
144 |
|
145 |
/* emulate long register */
|
146 |
valid_emu_mask = reg->emu_mask & valid_mask; |
147 |
*value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); |
148 |
|
149 |
return 0; |
150 |
} |
151 |
|
152 |
/* Write register functions */
|
153 |
|
154 |
static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
155 |
uint8_t *val, uint8_t dev_value, |
156 |
uint8_t valid_mask) |
157 |
{ |
158 |
XenPTRegInfo *reg = cfg_entry->reg; |
159 |
uint8_t writable_mask = 0;
|
160 |
uint8_t throughable_mask = 0;
|
161 |
|
162 |
/* modify emulate register */
|
163 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
164 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
165 |
|
166 |
/* create value for writing to I/O device register */
|
167 |
throughable_mask = ~reg->emu_mask & valid_mask; |
168 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
169 |
|
170 |
return 0; |
171 |
} |
172 |
static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
173 |
uint16_t *val, uint16_t dev_value, |
174 |
uint16_t valid_mask) |
175 |
{ |
176 |
XenPTRegInfo *reg = cfg_entry->reg; |
177 |
uint16_t writable_mask = 0;
|
178 |
uint16_t throughable_mask = 0;
|
179 |
|
180 |
/* modify emulate register */
|
181 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
182 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
183 |
|
184 |
/* create value for writing to I/O device register */
|
185 |
throughable_mask = ~reg->emu_mask & valid_mask; |
186 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
187 |
|
188 |
return 0; |
189 |
} |
190 |
static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
191 |
uint32_t *val, uint32_t dev_value, |
192 |
uint32_t valid_mask) |
193 |
{ |
194 |
XenPTRegInfo *reg = cfg_entry->reg; |
195 |
uint32_t writable_mask = 0;
|
196 |
uint32_t throughable_mask = 0;
|
197 |
|
198 |
/* modify emulate register */
|
199 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
200 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
201 |
|
202 |
/* create value for writing to I/O device register */
|
203 |
throughable_mask = ~reg->emu_mask & valid_mask; |
204 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
205 |
|
206 |
return 0; |
207 |
} |
208 |
|
209 |
|
210 |
/* XenPTRegInfo declaration
|
211 |
* - only for emulated register (either a part or whole bit).
|
212 |
* - for passthrough register that need special behavior (like interacting with
|
213 |
* other component), set emu_mask to all 0 and specify r/w func properly.
|
214 |
* - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
|
215 |
*/
|
216 |
|
217 |
/********************
|
218 |
* Header Type0
|
219 |
*/
|
220 |
|
221 |
static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, |
222 |
XenPTRegInfo *reg, uint32_t real_offset, |
223 |
uint32_t *data) |
224 |
{ |
225 |
*data = s->real_device.vendor_id; |
226 |
return 0; |
227 |
} |
228 |
static int xen_pt_device_reg_init(XenPCIPassthroughState *s, |
229 |
XenPTRegInfo *reg, uint32_t real_offset, |
230 |
uint32_t *data) |
231 |
{ |
232 |
*data = s->real_device.device_id; |
233 |
return 0; |
234 |
} |
235 |
static int xen_pt_status_reg_init(XenPCIPassthroughState *s, |
236 |
XenPTRegInfo *reg, uint32_t real_offset, |
237 |
uint32_t *data) |
238 |
{ |
239 |
XenPTRegGroup *reg_grp_entry = NULL;
|
240 |
XenPTReg *reg_entry = NULL;
|
241 |
uint32_t reg_field = 0;
|
242 |
|
243 |
/* find Header register group */
|
244 |
reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); |
245 |
if (reg_grp_entry) {
|
246 |
/* find Capabilities Pointer register */
|
247 |
reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); |
248 |
if (reg_entry) {
|
249 |
/* check Capabilities Pointer register */
|
250 |
if (reg_entry->data) {
|
251 |
reg_field |= PCI_STATUS_CAP_LIST; |
252 |
} else {
|
253 |
reg_field &= ~PCI_STATUS_CAP_LIST; |
254 |
} |
255 |
} else {
|
256 |
xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
|
257 |
" for Capabilities Pointer register."
|
258 |
" (%s)\n", __func__);
|
259 |
return -1; |
260 |
} |
261 |
} else {
|
262 |
xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
|
263 |
" for Header. (%s)\n", __func__);
|
264 |
return -1; |
265 |
} |
266 |
|
267 |
*data = reg_field; |
268 |
return 0; |
269 |
} |
270 |
static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, |
271 |
XenPTRegInfo *reg, uint32_t real_offset, |
272 |
uint32_t *data) |
273 |
{ |
274 |
/* read PCI_HEADER_TYPE */
|
275 |
*data = reg->init_val | 0x80;
|
276 |
return 0; |
277 |
} |
278 |
|
279 |
/* initialize Interrupt Pin register */
|
280 |
static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, |
281 |
XenPTRegInfo *reg, uint32_t real_offset, |
282 |
uint32_t *data) |
283 |
{ |
284 |
*data = xen_pt_pci_read_intx(s); |
285 |
return 0; |
286 |
} |
287 |
|
288 |
/* Command register */
|
289 |
static int xen_pt_cmd_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
290 |
uint16_t *value, uint16_t valid_mask) |
291 |
{ |
292 |
XenPTRegInfo *reg = cfg_entry->reg; |
293 |
uint16_t valid_emu_mask = 0;
|
294 |
uint16_t emu_mask = reg->emu_mask; |
295 |
|
296 |
if (s->is_virtfn) {
|
297 |
emu_mask |= PCI_COMMAND_MEMORY; |
298 |
} |
299 |
|
300 |
/* emulate word register */
|
301 |
valid_emu_mask = emu_mask & valid_mask; |
302 |
*value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); |
303 |
|
304 |
return 0; |
305 |
} |
306 |
static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
307 |
uint16_t *val, uint16_t dev_value, |
308 |
uint16_t valid_mask) |
309 |
{ |
310 |
XenPTRegInfo *reg = cfg_entry->reg; |
311 |
uint16_t writable_mask = 0;
|
312 |
uint16_t throughable_mask = 0;
|
313 |
uint16_t emu_mask = reg->emu_mask; |
314 |
|
315 |
if (s->is_virtfn) {
|
316 |
emu_mask |= PCI_COMMAND_MEMORY; |
317 |
} |
318 |
|
319 |
/* modify emulate register */
|
320 |
writable_mask = ~reg->ro_mask & valid_mask; |
321 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
322 |
|
323 |
/* create value for writing to I/O device register */
|
324 |
throughable_mask = ~emu_mask & valid_mask; |
325 |
|
326 |
if (*val & PCI_COMMAND_INTX_DISABLE) {
|
327 |
throughable_mask |= PCI_COMMAND_INTX_DISABLE; |
328 |
} else {
|
329 |
if (s->machine_irq) {
|
330 |
throughable_mask |= PCI_COMMAND_INTX_DISABLE; |
331 |
} |
332 |
} |
333 |
|
334 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
335 |
|
336 |
return 0; |
337 |
} |
338 |
|
339 |
/* BAR */
|
340 |
#define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ |
341 |
#define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ |
342 |
#define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ |
343 |
#define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ |
344 |
|
345 |
static bool is_64bit_bar(PCIIORegion *r) |
346 |
{ |
347 |
return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
|
348 |
} |
349 |
|
350 |
static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
|
351 |
{ |
352 |
if (is_64bit_bar(r)) {
|
353 |
uint64_t size64; |
354 |
size64 = (r + 1)->size;
|
355 |
size64 <<= 32;
|
356 |
size64 += r->size; |
357 |
return size64;
|
358 |
} |
359 |
return r->size;
|
360 |
} |
361 |
|
362 |
static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
|
363 |
XenPTRegInfo *reg) |
364 |
{ |
365 |
PCIDevice *d = &s->dev; |
366 |
XenPTRegion *region = NULL;
|
367 |
PCIIORegion *r; |
368 |
int index = 0; |
369 |
|
370 |
/* check 64bit BAR */
|
371 |
index = xen_pt_bar_offset_to_index(reg->offset); |
372 |
if ((0 < index) && (index < PCI_ROM_SLOT)) { |
373 |
int type = s->real_device.io_regions[index - 1].type; |
374 |
|
375 |
if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
|
376 |
&& (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { |
377 |
region = &s->bases[index - 1];
|
378 |
if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
|
379 |
return XEN_PT_BAR_FLAG_UPPER;
|
380 |
} |
381 |
} |
382 |
} |
383 |
|
384 |
/* check unused BAR */
|
385 |
r = &d->io_regions[index]; |
386 |
if (!xen_pt_get_bar_size(r)) {
|
387 |
return XEN_PT_BAR_FLAG_UNUSED;
|
388 |
} |
389 |
|
390 |
/* for ExpROM BAR */
|
391 |
if (index == PCI_ROM_SLOT) {
|
392 |
return XEN_PT_BAR_FLAG_MEM;
|
393 |
} |
394 |
|
395 |
/* check BAR I/O indicator */
|
396 |
if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
|
397 |
return XEN_PT_BAR_FLAG_IO;
|
398 |
} else {
|
399 |
return XEN_PT_BAR_FLAG_MEM;
|
400 |
} |
401 |
} |
402 |
|
403 |
static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) |
404 |
{ |
405 |
if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
|
406 |
return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
|
407 |
} else {
|
408 |
return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
|
409 |
} |
410 |
} |
411 |
|
412 |
static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, |
413 |
uint32_t real_offset, uint32_t *data) |
414 |
{ |
415 |
uint32_t reg_field = 0;
|
416 |
int index;
|
417 |
|
418 |
index = xen_pt_bar_offset_to_index(reg->offset); |
419 |
if (index < 0 || index >= PCI_NUM_REGIONS) { |
420 |
XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
|
421 |
return -1; |
422 |
} |
423 |
|
424 |
/* set BAR flag */
|
425 |
s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, reg); |
426 |
if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
|
427 |
reg_field = XEN_PT_INVALID_REG; |
428 |
} |
429 |
|
430 |
*data = reg_field; |
431 |
return 0; |
432 |
} |
433 |
static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
434 |
uint32_t *value, uint32_t valid_mask) |
435 |
{ |
436 |
XenPTRegInfo *reg = cfg_entry->reg; |
437 |
uint32_t valid_emu_mask = 0;
|
438 |
uint32_t bar_emu_mask = 0;
|
439 |
int index;
|
440 |
|
441 |
/* get BAR index */
|
442 |
index = xen_pt_bar_offset_to_index(reg->offset); |
443 |
if (index < 0 || index >= PCI_NUM_REGIONS) { |
444 |
XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
|
445 |
return -1; |
446 |
} |
447 |
|
448 |
/* use fixed-up value from kernel sysfs */
|
449 |
*value = base_address_with_flags(&s->real_device.io_regions[index]); |
450 |
|
451 |
/* set emulate mask depend on BAR flag */
|
452 |
switch (s->bases[index].bar_flag) {
|
453 |
case XEN_PT_BAR_FLAG_MEM:
|
454 |
bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; |
455 |
break;
|
456 |
case XEN_PT_BAR_FLAG_IO:
|
457 |
bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; |
458 |
break;
|
459 |
case XEN_PT_BAR_FLAG_UPPER:
|
460 |
bar_emu_mask = XEN_PT_BAR_ALLF; |
461 |
break;
|
462 |
default:
|
463 |
break;
|
464 |
} |
465 |
|
466 |
/* emulate BAR */
|
467 |
valid_emu_mask = bar_emu_mask & valid_mask; |
468 |
*value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); |
469 |
|
470 |
return 0; |
471 |
} |
472 |
static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
473 |
uint32_t *val, uint32_t dev_value, |
474 |
uint32_t valid_mask) |
475 |
{ |
476 |
XenPTRegInfo *reg = cfg_entry->reg; |
477 |
XenPTRegion *base = NULL;
|
478 |
PCIDevice *d = &s->dev; |
479 |
const PCIIORegion *r;
|
480 |
uint32_t writable_mask = 0;
|
481 |
uint32_t throughable_mask = 0;
|
482 |
uint32_t bar_emu_mask = 0;
|
483 |
uint32_t bar_ro_mask = 0;
|
484 |
uint32_t r_size = 0;
|
485 |
int index = 0; |
486 |
|
487 |
index = xen_pt_bar_offset_to_index(reg->offset); |
488 |
if (index < 0 || index >= PCI_NUM_REGIONS) { |
489 |
XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
|
490 |
return -1; |
491 |
} |
492 |
|
493 |
r = &d->io_regions[index]; |
494 |
base = &s->bases[index]; |
495 |
r_size = xen_pt_get_emul_size(base->bar_flag, r->size); |
496 |
|
497 |
/* set emulate mask and read-only mask values depend on the BAR flag */
|
498 |
switch (s->bases[index].bar_flag) {
|
499 |
case XEN_PT_BAR_FLAG_MEM:
|
500 |
bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; |
501 |
if (!r_size) {
|
502 |
/* low 32 bits mask for 64 bit bars */
|
503 |
bar_ro_mask = XEN_PT_BAR_ALLF; |
504 |
} else {
|
505 |
bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
|
506 |
} |
507 |
break;
|
508 |
case XEN_PT_BAR_FLAG_IO:
|
509 |
bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; |
510 |
bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
|
511 |
break;
|
512 |
case XEN_PT_BAR_FLAG_UPPER:
|
513 |
bar_emu_mask = XEN_PT_BAR_ALLF; |
514 |
bar_ro_mask = r_size ? r_size - 1 : 0; |
515 |
break;
|
516 |
default:
|
517 |
break;
|
518 |
} |
519 |
|
520 |
/* modify emulate register */
|
521 |
writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; |
522 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
523 |
|
524 |
/* check whether we need to update the virtual region address or not */
|
525 |
switch (s->bases[index].bar_flag) {
|
526 |
case XEN_PT_BAR_FLAG_UPPER:
|
527 |
case XEN_PT_BAR_FLAG_MEM:
|
528 |
/* nothing to do */
|
529 |
break;
|
530 |
case XEN_PT_BAR_FLAG_IO:
|
531 |
/* nothing to do */
|
532 |
break;
|
533 |
default:
|
534 |
break;
|
535 |
} |
536 |
|
537 |
/* create value for writing to I/O device register */
|
538 |
throughable_mask = ~bar_emu_mask & valid_mask; |
539 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
540 |
|
541 |
return 0; |
542 |
} |
543 |
|
544 |
/* write Exp ROM BAR */
|
545 |
static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, |
546 |
XenPTReg *cfg_entry, uint32_t *val, |
547 |
uint32_t dev_value, uint32_t valid_mask) |
548 |
{ |
549 |
XenPTRegInfo *reg = cfg_entry->reg; |
550 |
XenPTRegion *base = NULL;
|
551 |
PCIDevice *d = (PCIDevice *)&s->dev; |
552 |
uint32_t writable_mask = 0;
|
553 |
uint32_t throughable_mask = 0;
|
554 |
pcibus_t r_size = 0;
|
555 |
uint32_t bar_emu_mask = 0;
|
556 |
uint32_t bar_ro_mask = 0;
|
557 |
|
558 |
r_size = d->io_regions[PCI_ROM_SLOT].size; |
559 |
base = &s->bases[PCI_ROM_SLOT]; |
560 |
/* align memory type resource size */
|
561 |
r_size = xen_pt_get_emul_size(base->bar_flag, r_size); |
562 |
|
563 |
/* set emulate mask and read-only mask */
|
564 |
bar_emu_mask = reg->emu_mask; |
565 |
bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
|
566 |
|
567 |
/* modify emulate register */
|
568 |
writable_mask = ~bar_ro_mask & valid_mask; |
569 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
570 |
|
571 |
/* create value for writing to I/O device register */
|
572 |
throughable_mask = ~bar_emu_mask & valid_mask; |
573 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
574 |
|
575 |
return 0; |
576 |
} |
577 |
|
578 |
/* Header Type0 reg static information table */
|
579 |
static XenPTRegInfo xen_pt_emu_reg_header0[] = {
|
580 |
/* Vendor ID reg */
|
581 |
{ |
582 |
.offset = PCI_VENDOR_ID, |
583 |
.size = 2,
|
584 |
.init_val = 0x0000,
|
585 |
.ro_mask = 0xFFFF,
|
586 |
.emu_mask = 0xFFFF,
|
587 |
.init = xen_pt_vendor_reg_init, |
588 |
.u.w.read = xen_pt_word_reg_read, |
589 |
.u.w.write = xen_pt_word_reg_write, |
590 |
}, |
591 |
/* Device ID reg */
|
592 |
{ |
593 |
.offset = PCI_DEVICE_ID, |
594 |
.size = 2,
|
595 |
.init_val = 0x0000,
|
596 |
.ro_mask = 0xFFFF,
|
597 |
.emu_mask = 0xFFFF,
|
598 |
.init = xen_pt_device_reg_init, |
599 |
.u.w.read = xen_pt_word_reg_read, |
600 |
.u.w.write = xen_pt_word_reg_write, |
601 |
}, |
602 |
/* Command reg */
|
603 |
{ |
604 |
.offset = PCI_COMMAND, |
605 |
.size = 2,
|
606 |
.init_val = 0x0000,
|
607 |
.ro_mask = 0xF880,
|
608 |
.emu_mask = 0x0740,
|
609 |
.init = xen_pt_common_reg_init, |
610 |
.u.w.read = xen_pt_cmd_reg_read, |
611 |
.u.w.write = xen_pt_cmd_reg_write, |
612 |
}, |
613 |
/* Capabilities Pointer reg */
|
614 |
{ |
615 |
.offset = PCI_CAPABILITY_LIST, |
616 |
.size = 1,
|
617 |
.init_val = 0x00,
|
618 |
.ro_mask = 0xFF,
|
619 |
.emu_mask = 0xFF,
|
620 |
.init = xen_pt_ptr_reg_init, |
621 |
.u.b.read = xen_pt_byte_reg_read, |
622 |
.u.b.write = xen_pt_byte_reg_write, |
623 |
}, |
624 |
/* Status reg */
|
625 |
/* use emulated Cap Ptr value to initialize,
|
626 |
* so need to be declared after Cap Ptr reg
|
627 |
*/
|
628 |
{ |
629 |
.offset = PCI_STATUS, |
630 |
.size = 2,
|
631 |
.init_val = 0x0000,
|
632 |
.ro_mask = 0x06FF,
|
633 |
.emu_mask = 0x0010,
|
634 |
.init = xen_pt_status_reg_init, |
635 |
.u.w.read = xen_pt_word_reg_read, |
636 |
.u.w.write = xen_pt_word_reg_write, |
637 |
}, |
638 |
/* Cache Line Size reg */
|
639 |
{ |
640 |
.offset = PCI_CACHE_LINE_SIZE, |
641 |
.size = 1,
|
642 |
.init_val = 0x00,
|
643 |
.ro_mask = 0x00,
|
644 |
.emu_mask = 0xFF,
|
645 |
.init = xen_pt_common_reg_init, |
646 |
.u.b.read = xen_pt_byte_reg_read, |
647 |
.u.b.write = xen_pt_byte_reg_write, |
648 |
}, |
649 |
/* Latency Timer reg */
|
650 |
{ |
651 |
.offset = PCI_LATENCY_TIMER, |
652 |
.size = 1,
|
653 |
.init_val = 0x00,
|
654 |
.ro_mask = 0x00,
|
655 |
.emu_mask = 0xFF,
|
656 |
.init = xen_pt_common_reg_init, |
657 |
.u.b.read = xen_pt_byte_reg_read, |
658 |
.u.b.write = xen_pt_byte_reg_write, |
659 |
}, |
660 |
/* Header Type reg */
|
661 |
{ |
662 |
.offset = PCI_HEADER_TYPE, |
663 |
.size = 1,
|
664 |
.init_val = 0x00,
|
665 |
.ro_mask = 0xFF,
|
666 |
.emu_mask = 0x00,
|
667 |
.init = xen_pt_header_type_reg_init, |
668 |
.u.b.read = xen_pt_byte_reg_read, |
669 |
.u.b.write = xen_pt_byte_reg_write, |
670 |
}, |
671 |
/* Interrupt Line reg */
|
672 |
{ |
673 |
.offset = PCI_INTERRUPT_LINE, |
674 |
.size = 1,
|
675 |
.init_val = 0x00,
|
676 |
.ro_mask = 0x00,
|
677 |
.emu_mask = 0xFF,
|
678 |
.init = xen_pt_common_reg_init, |
679 |
.u.b.read = xen_pt_byte_reg_read, |
680 |
.u.b.write = xen_pt_byte_reg_write, |
681 |
}, |
682 |
/* Interrupt Pin reg */
|
683 |
{ |
684 |
.offset = PCI_INTERRUPT_PIN, |
685 |
.size = 1,
|
686 |
.init_val = 0x00,
|
687 |
.ro_mask = 0xFF,
|
688 |
.emu_mask = 0xFF,
|
689 |
.init = xen_pt_irqpin_reg_init, |
690 |
.u.b.read = xen_pt_byte_reg_read, |
691 |
.u.b.write = xen_pt_byte_reg_write, |
692 |
}, |
693 |
/* BAR 0 reg */
|
694 |
/* mask of BAR need to be decided later, depends on IO/MEM type */
|
695 |
{ |
696 |
.offset = PCI_BASE_ADDRESS_0, |
697 |
.size = 4,
|
698 |
.init_val = 0x00000000,
|
699 |
.init = xen_pt_bar_reg_init, |
700 |
.u.dw.read = xen_pt_bar_reg_read, |
701 |
.u.dw.write = xen_pt_bar_reg_write, |
702 |
}, |
703 |
/* BAR 1 reg */
|
704 |
{ |
705 |
.offset = PCI_BASE_ADDRESS_1, |
706 |
.size = 4,
|
707 |
.init_val = 0x00000000,
|
708 |
.init = xen_pt_bar_reg_init, |
709 |
.u.dw.read = xen_pt_bar_reg_read, |
710 |
.u.dw.write = xen_pt_bar_reg_write, |
711 |
}, |
712 |
/* BAR 2 reg */
|
713 |
{ |
714 |
.offset = PCI_BASE_ADDRESS_2, |
715 |
.size = 4,
|
716 |
.init_val = 0x00000000,
|
717 |
.init = xen_pt_bar_reg_init, |
718 |
.u.dw.read = xen_pt_bar_reg_read, |
719 |
.u.dw.write = xen_pt_bar_reg_write, |
720 |
}, |
721 |
/* BAR 3 reg */
|
722 |
{ |
723 |
.offset = PCI_BASE_ADDRESS_3, |
724 |
.size = 4,
|
725 |
.init_val = 0x00000000,
|
726 |
.init = xen_pt_bar_reg_init, |
727 |
.u.dw.read = xen_pt_bar_reg_read, |
728 |
.u.dw.write = xen_pt_bar_reg_write, |
729 |
}, |
730 |
/* BAR 4 reg */
|
731 |
{ |
732 |
.offset = PCI_BASE_ADDRESS_4, |
733 |
.size = 4,
|
734 |
.init_val = 0x00000000,
|
735 |
.init = xen_pt_bar_reg_init, |
736 |
.u.dw.read = xen_pt_bar_reg_read, |
737 |
.u.dw.write = xen_pt_bar_reg_write, |
738 |
}, |
739 |
/* BAR 5 reg */
|
740 |
{ |
741 |
.offset = PCI_BASE_ADDRESS_5, |
742 |
.size = 4,
|
743 |
.init_val = 0x00000000,
|
744 |
.init = xen_pt_bar_reg_init, |
745 |
.u.dw.read = xen_pt_bar_reg_read, |
746 |
.u.dw.write = xen_pt_bar_reg_write, |
747 |
}, |
748 |
/* Expansion ROM BAR reg */
|
749 |
{ |
750 |
.offset = PCI_ROM_ADDRESS, |
751 |
.size = 4,
|
752 |
.init_val = 0x00000000,
|
753 |
.ro_mask = 0x000007FE,
|
754 |
.emu_mask = 0xFFFFF800,
|
755 |
.init = xen_pt_bar_reg_init, |
756 |
.u.dw.read = xen_pt_long_reg_read, |
757 |
.u.dw.write = xen_pt_exp_rom_bar_reg_write, |
758 |
}, |
759 |
{ |
760 |
.size = 0,
|
761 |
}, |
762 |
}; |
763 |
|
764 |
|
765 |
/*********************************
|
766 |
* Vital Product Data Capability
|
767 |
*/
|
768 |
|
769 |
/* Vital Product Data Capability Structure reg static information table */
|
770 |
static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
|
771 |
{ |
772 |
.offset = PCI_CAP_LIST_NEXT, |
773 |
.size = 1,
|
774 |
.init_val = 0x00,
|
775 |
.ro_mask = 0xFF,
|
776 |
.emu_mask = 0xFF,
|
777 |
.init = xen_pt_ptr_reg_init, |
778 |
.u.b.read = xen_pt_byte_reg_read, |
779 |
.u.b.write = xen_pt_byte_reg_write, |
780 |
}, |
781 |
{ |
782 |
.size = 0,
|
783 |
}, |
784 |
}; |
785 |
|
786 |
|
787 |
/**************************************
|
788 |
* Vendor Specific Capability
|
789 |
*/
|
790 |
|
791 |
/* Vendor Specific Capability Structure reg static information table */
|
792 |
static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
|
793 |
{ |
794 |
.offset = PCI_CAP_LIST_NEXT, |
795 |
.size = 1,
|
796 |
.init_val = 0x00,
|
797 |
.ro_mask = 0xFF,
|
798 |
.emu_mask = 0xFF,
|
799 |
.init = xen_pt_ptr_reg_init, |
800 |
.u.b.read = xen_pt_byte_reg_read, |
801 |
.u.b.write = xen_pt_byte_reg_write, |
802 |
}, |
803 |
{ |
804 |
.size = 0,
|
805 |
}, |
806 |
}; |
807 |
|
808 |
|
809 |
/*****************************
|
810 |
* PCI Express Capability
|
811 |
*/
|
812 |
|
813 |
static inline uint8_t get_capability_version(XenPCIPassthroughState *s, |
814 |
uint32_t offset) |
815 |
{ |
816 |
uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); |
817 |
return flags & PCI_EXP_FLAGS_VERS;
|
818 |
} |
819 |
|
820 |
static inline uint8_t get_device_type(XenPCIPassthroughState *s, |
821 |
uint32_t offset) |
822 |
{ |
823 |
uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); |
824 |
return (flags & PCI_EXP_FLAGS_TYPE) >> 4; |
825 |
} |
826 |
|
827 |
/* initialize Link Control register */
|
828 |
static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, |
829 |
XenPTRegInfo *reg, uint32_t real_offset, |
830 |
uint32_t *data) |
831 |
{ |
832 |
uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); |
833 |
uint8_t dev_type = get_device_type(s, real_offset - reg->offset); |
834 |
|
835 |
/* no need to initialize in case of Root Complex Integrated Endpoint
|
836 |
* with cap_ver 1.x
|
837 |
*/
|
838 |
if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { |
839 |
*data = XEN_PT_INVALID_REG; |
840 |
} |
841 |
|
842 |
*data = reg->init_val; |
843 |
return 0; |
844 |
} |
845 |
/* initialize Device Control 2 register */
|
846 |
static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, |
847 |
XenPTRegInfo *reg, uint32_t real_offset, |
848 |
uint32_t *data) |
849 |
{ |
850 |
uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); |
851 |
|
852 |
/* no need to initialize in case of cap_ver 1.x */
|
853 |
if (cap_ver == 1) { |
854 |
*data = XEN_PT_INVALID_REG; |
855 |
} |
856 |
|
857 |
*data = reg->init_val; |
858 |
return 0; |
859 |
} |
860 |
/* initialize Link Control 2 register */
|
861 |
static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, |
862 |
XenPTRegInfo *reg, uint32_t real_offset, |
863 |
uint32_t *data) |
864 |
{ |
865 |
uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); |
866 |
uint32_t reg_field = 0;
|
867 |
|
868 |
/* no need to initialize in case of cap_ver 1.x */
|
869 |
if (cap_ver == 1) { |
870 |
reg_field = XEN_PT_INVALID_REG; |
871 |
} else {
|
872 |
/* set Supported Link Speed */
|
873 |
uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset |
874 |
+ PCI_EXP_LNKCAP); |
875 |
reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; |
876 |
} |
877 |
|
878 |
*data = reg_field; |
879 |
return 0; |
880 |
} |
881 |
|
882 |
/* PCI Express Capability Structure reg static information table */
|
883 |
static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
|
884 |
/* Next Pointer reg */
|
885 |
{ |
886 |
.offset = PCI_CAP_LIST_NEXT, |
887 |
.size = 1,
|
888 |
.init_val = 0x00,
|
889 |
.ro_mask = 0xFF,
|
890 |
.emu_mask = 0xFF,
|
891 |
.init = xen_pt_ptr_reg_init, |
892 |
.u.b.read = xen_pt_byte_reg_read, |
893 |
.u.b.write = xen_pt_byte_reg_write, |
894 |
}, |
895 |
/* Device Capabilities reg */
|
896 |
{ |
897 |
.offset = PCI_EXP_DEVCAP, |
898 |
.size = 4,
|
899 |
.init_val = 0x00000000,
|
900 |
.ro_mask = 0x1FFCFFFF,
|
901 |
.emu_mask = 0x10000000,
|
902 |
.init = xen_pt_common_reg_init, |
903 |
.u.dw.read = xen_pt_long_reg_read, |
904 |
.u.dw.write = xen_pt_long_reg_write, |
905 |
}, |
906 |
/* Device Control reg */
|
907 |
{ |
908 |
.offset = PCI_EXP_DEVCTL, |
909 |
.size = 2,
|
910 |
.init_val = 0x2810,
|
911 |
.ro_mask = 0x8400,
|
912 |
.emu_mask = 0xFFFF,
|
913 |
.init = xen_pt_common_reg_init, |
914 |
.u.w.read = xen_pt_word_reg_read, |
915 |
.u.w.write = xen_pt_word_reg_write, |
916 |
}, |
917 |
/* Link Control reg */
|
918 |
{ |
919 |
.offset = PCI_EXP_LNKCTL, |
920 |
.size = 2,
|
921 |
.init_val = 0x0000,
|
922 |
.ro_mask = 0xFC34,
|
923 |
.emu_mask = 0xFFFF,
|
924 |
.init = xen_pt_linkctrl_reg_init, |
925 |
.u.w.read = xen_pt_word_reg_read, |
926 |
.u.w.write = xen_pt_word_reg_write, |
927 |
}, |
928 |
/* Device Control 2 reg */
|
929 |
{ |
930 |
.offset = 0x28,
|
931 |
.size = 2,
|
932 |
.init_val = 0x0000,
|
933 |
.ro_mask = 0xFFE0,
|
934 |
.emu_mask = 0xFFFF,
|
935 |
.init = xen_pt_devctrl2_reg_init, |
936 |
.u.w.read = xen_pt_word_reg_read, |
937 |
.u.w.write = xen_pt_word_reg_write, |
938 |
}, |
939 |
/* Link Control 2 reg */
|
940 |
{ |
941 |
.offset = 0x30,
|
942 |
.size = 2,
|
943 |
.init_val = 0x0000,
|
944 |
.ro_mask = 0xE040,
|
945 |
.emu_mask = 0xFFFF,
|
946 |
.init = xen_pt_linkctrl2_reg_init, |
947 |
.u.w.read = xen_pt_word_reg_read, |
948 |
.u.w.write = xen_pt_word_reg_write, |
949 |
}, |
950 |
{ |
951 |
.size = 0,
|
952 |
}, |
953 |
}; |
954 |
|
955 |
|
956 |
/*********************************
|
957 |
* Power Management Capability
|
958 |
*/
|
959 |
|
960 |
/* read Power Management Control/Status register */
|
961 |
static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
962 |
uint16_t *value, uint16_t valid_mask) |
963 |
{ |
964 |
XenPTRegInfo *reg = cfg_entry->reg; |
965 |
uint16_t valid_emu_mask = reg->emu_mask; |
966 |
|
967 |
valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; |
968 |
|
969 |
valid_emu_mask = valid_emu_mask & valid_mask; |
970 |
*value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); |
971 |
|
972 |
return 0; |
973 |
} |
974 |
/* write Power Management Control/Status register */
|
975 |
static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s, |
976 |
XenPTReg *cfg_entry, uint16_t *val, |
977 |
uint16_t dev_value, uint16_t valid_mask) |
978 |
{ |
979 |
XenPTRegInfo *reg = cfg_entry->reg; |
980 |
uint16_t emu_mask = reg->emu_mask; |
981 |
uint16_t writable_mask = 0;
|
982 |
uint16_t throughable_mask = 0;
|
983 |
|
984 |
emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; |
985 |
|
986 |
/* modify emulate register */
|
987 |
writable_mask = emu_mask & ~reg->ro_mask & valid_mask; |
988 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
989 |
|
990 |
/* create value for writing to I/O device register */
|
991 |
throughable_mask = ~emu_mask & valid_mask; |
992 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
993 |
|
994 |
return 0; |
995 |
} |
996 |
|
997 |
/* Power Management Capability reg static information table */
|
998 |
static XenPTRegInfo xen_pt_emu_reg_pm[] = {
|
999 |
/* Next Pointer reg */
|
1000 |
{ |
1001 |
.offset = PCI_CAP_LIST_NEXT, |
1002 |
.size = 1,
|
1003 |
.init_val = 0x00,
|
1004 |
.ro_mask = 0xFF,
|
1005 |
.emu_mask = 0xFF,
|
1006 |
.init = xen_pt_ptr_reg_init, |
1007 |
.u.b.read = xen_pt_byte_reg_read, |
1008 |
.u.b.write = xen_pt_byte_reg_write, |
1009 |
}, |
1010 |
/* Power Management Capabilities reg */
|
1011 |
{ |
1012 |
.offset = PCI_CAP_FLAGS, |
1013 |
.size = 2,
|
1014 |
.init_val = 0x0000,
|
1015 |
.ro_mask = 0xFFFF,
|
1016 |
.emu_mask = 0xF9C8,
|
1017 |
.init = xen_pt_common_reg_init, |
1018 |
.u.w.read = xen_pt_word_reg_read, |
1019 |
.u.w.write = xen_pt_word_reg_write, |
1020 |
}, |
1021 |
/* PCI Power Management Control/Status reg */
|
1022 |
{ |
1023 |
.offset = PCI_PM_CTRL, |
1024 |
.size = 2,
|
1025 |
.init_val = 0x0008,
|
1026 |
.ro_mask = 0xE1FC,
|
1027 |
.emu_mask = 0x8100,
|
1028 |
.init = xen_pt_common_reg_init, |
1029 |
.u.w.read = xen_pt_pmcsr_reg_read, |
1030 |
.u.w.write = xen_pt_pmcsr_reg_write, |
1031 |
}, |
1032 |
{ |
1033 |
.size = 0,
|
1034 |
}, |
1035 |
}; |
1036 |
|
1037 |
|
1038 |
/********************************
|
1039 |
* MSI Capability
|
1040 |
*/
|
1041 |
|
1042 |
/* Helper */
|
1043 |
static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags) |
1044 |
{ |
1045 |
/* check the offset whether matches the type or not */
|
1046 |
bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT);
|
1047 |
bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT);
|
1048 |
return is_32 || is_64;
|
1049 |
} |
1050 |
|
1051 |
/* Message Control register */
|
1052 |
static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, |
1053 |
XenPTRegInfo *reg, uint32_t real_offset, |
1054 |
uint32_t *data) |
1055 |
{ |
1056 |
PCIDevice *d = &s->dev; |
1057 |
XenPTMSI *msi = s->msi; |
1058 |
uint16_t reg_field = 0;
|
1059 |
|
1060 |
/* use I/O device register's value as initial value */
|
1061 |
reg_field = pci_get_word(d->config + real_offset); |
1062 |
|
1063 |
if (reg_field & PCI_MSI_FLAGS_ENABLE) {
|
1064 |
XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
|
1065 |
xen_host_pci_set_word(&s->real_device, real_offset, |
1066 |
reg_field & ~PCI_MSI_FLAGS_ENABLE); |
1067 |
} |
1068 |
msi->flags |= reg_field; |
1069 |
msi->ctrl_offset = real_offset; |
1070 |
msi->initialized = false;
|
1071 |
msi->mapped = false;
|
1072 |
|
1073 |
*data = reg->init_val; |
1074 |
return 0; |
1075 |
} |
1076 |
static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, |
1077 |
XenPTReg *cfg_entry, uint16_t *val, |
1078 |
uint16_t dev_value, uint16_t valid_mask) |
1079 |
{ |
1080 |
XenPTRegInfo *reg = cfg_entry->reg; |
1081 |
XenPTMSI *msi = s->msi; |
1082 |
uint16_t writable_mask = 0;
|
1083 |
uint16_t throughable_mask = 0;
|
1084 |
uint16_t raw_val; |
1085 |
|
1086 |
/* Currently no support for multi-vector */
|
1087 |
if (*val & PCI_MSI_FLAGS_QSIZE) {
|
1088 |
XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
|
1089 |
} |
1090 |
|
1091 |
/* modify emulate register */
|
1092 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
1093 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
1094 |
msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE; |
1095 |
|
1096 |
/* create value for writing to I/O device register */
|
1097 |
raw_val = *val; |
1098 |
throughable_mask = ~reg->emu_mask & valid_mask; |
1099 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
1100 |
|
1101 |
/* update MSI */
|
1102 |
if (raw_val & PCI_MSI_FLAGS_ENABLE) {
|
1103 |
/* setup MSI pirq for the first time */
|
1104 |
if (!msi->initialized) {
|
1105 |
/* Init physical one */
|
1106 |
XEN_PT_LOG(&s->dev, "setup MSI\n");
|
1107 |
if (xen_pt_msi_setup(s)) {
|
1108 |
/* We do not broadcast the error to the framework code, so
|
1109 |
* that MSI errors are contained in MSI emulation code and
|
1110 |
* QEMU can go on running.
|
1111 |
* Guest MSI would be actually not working.
|
1112 |
*/
|
1113 |
*val &= ~PCI_MSI_FLAGS_ENABLE; |
1114 |
XEN_PT_WARN(&s->dev, "Can not map MSI.\n");
|
1115 |
return 0; |
1116 |
} |
1117 |
if (xen_pt_msi_update(s)) {
|
1118 |
*val &= ~PCI_MSI_FLAGS_ENABLE; |
1119 |
XEN_PT_WARN(&s->dev, "Can not bind MSI\n");
|
1120 |
return 0; |
1121 |
} |
1122 |
msi->initialized = true;
|
1123 |
msi->mapped = true;
|
1124 |
} |
1125 |
msi->flags |= PCI_MSI_FLAGS_ENABLE; |
1126 |
} else {
|
1127 |
msi->flags &= ~PCI_MSI_FLAGS_ENABLE; |
1128 |
} |
1129 |
|
1130 |
/* pass through MSI_ENABLE bit */
|
1131 |
*val &= ~PCI_MSI_FLAGS_ENABLE; |
1132 |
*val |= raw_val & PCI_MSI_FLAGS_ENABLE; |
1133 |
|
1134 |
return 0; |
1135 |
} |
1136 |
|
1137 |
/* initialize Message Upper Address register */
|
1138 |
static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, |
1139 |
XenPTRegInfo *reg, uint32_t real_offset, |
1140 |
uint32_t *data) |
1141 |
{ |
1142 |
/* no need to initialize in case of 32 bit type */
|
1143 |
if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
|
1144 |
*data = XEN_PT_INVALID_REG; |
1145 |
} else {
|
1146 |
*data = reg->init_val; |
1147 |
} |
1148 |
|
1149 |
return 0; |
1150 |
} |
1151 |
/* this function will be called twice (for 32 bit and 64 bit type) */
|
1152 |
/* initialize Message Data register */
|
1153 |
static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, |
1154 |
XenPTRegInfo *reg, uint32_t real_offset, |
1155 |
uint32_t *data) |
1156 |
{ |
1157 |
uint32_t flags = s->msi->flags; |
1158 |
uint32_t offset = reg->offset; |
1159 |
|
1160 |
/* check the offset whether matches the type or not */
|
1161 |
if (xen_pt_msgdata_check_type(offset, flags)) {
|
1162 |
*data = reg->init_val; |
1163 |
} else {
|
1164 |
*data = XEN_PT_INVALID_REG; |
1165 |
} |
1166 |
return 0; |
1167 |
} |
1168 |
|
1169 |
/* write Message Address register */
|
1170 |
static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, |
1171 |
XenPTReg *cfg_entry, uint32_t *val, |
1172 |
uint32_t dev_value, uint32_t valid_mask) |
1173 |
{ |
1174 |
XenPTRegInfo *reg = cfg_entry->reg; |
1175 |
uint32_t writable_mask = 0;
|
1176 |
uint32_t throughable_mask = 0;
|
1177 |
uint32_t old_addr = cfg_entry->data; |
1178 |
|
1179 |
/* modify emulate register */
|
1180 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
1181 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
1182 |
s->msi->addr_lo = cfg_entry->data; |
1183 |
|
1184 |
/* create value for writing to I/O device register */
|
1185 |
throughable_mask = ~reg->emu_mask & valid_mask; |
1186 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
1187 |
|
1188 |
/* update MSI */
|
1189 |
if (cfg_entry->data != old_addr) {
|
1190 |
if (s->msi->mapped) {
|
1191 |
xen_pt_msi_update(s); |
1192 |
} |
1193 |
} |
1194 |
|
1195 |
return 0; |
1196 |
} |
1197 |
/* write Message Upper Address register */
|
1198 |
static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, |
1199 |
XenPTReg *cfg_entry, uint32_t *val, |
1200 |
uint32_t dev_value, uint32_t valid_mask) |
1201 |
{ |
1202 |
XenPTRegInfo *reg = cfg_entry->reg; |
1203 |
uint32_t writable_mask = 0;
|
1204 |
uint32_t throughable_mask = 0;
|
1205 |
uint32_t old_addr = cfg_entry->data; |
1206 |
|
1207 |
/* check whether the type is 64 bit or not */
|
1208 |
if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
|
1209 |
XEN_PT_ERR(&s->dev, |
1210 |
"Can't write to the upper address without 64 bit support\n");
|
1211 |
return -1; |
1212 |
} |
1213 |
|
1214 |
/* modify emulate register */
|
1215 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
1216 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
1217 |
/* update the msi_info too */
|
1218 |
s->msi->addr_hi = cfg_entry->data; |
1219 |
|
1220 |
/* create value for writing to I/O device register */
|
1221 |
throughable_mask = ~reg->emu_mask & valid_mask; |
1222 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
1223 |
|
1224 |
/* update MSI */
|
1225 |
if (cfg_entry->data != old_addr) {
|
1226 |
if (s->msi->mapped) {
|
1227 |
xen_pt_msi_update(s); |
1228 |
} |
1229 |
} |
1230 |
|
1231 |
return 0; |
1232 |
} |
1233 |
|
1234 |
|
1235 |
/* this function will be called twice (for 32 bit and 64 bit type) */
|
1236 |
/* write Message Data register */
|
1237 |
static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, |
1238 |
XenPTReg *cfg_entry, uint16_t *val, |
1239 |
uint16_t dev_value, uint16_t valid_mask) |
1240 |
{ |
1241 |
XenPTRegInfo *reg = cfg_entry->reg; |
1242 |
XenPTMSI *msi = s->msi; |
1243 |
uint16_t writable_mask = 0;
|
1244 |
uint16_t throughable_mask = 0;
|
1245 |
uint16_t old_data = cfg_entry->data; |
1246 |
uint32_t offset = reg->offset; |
1247 |
|
1248 |
/* check the offset whether matches the type or not */
|
1249 |
if (!xen_pt_msgdata_check_type(offset, msi->flags)) {
|
1250 |
/* exit I/O emulator */
|
1251 |
XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
|
1252 |
return -1; |
1253 |
} |
1254 |
|
1255 |
/* modify emulate register */
|
1256 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
1257 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
1258 |
/* update the msi_info too */
|
1259 |
msi->data = cfg_entry->data; |
1260 |
|
1261 |
/* create value for writing to I/O device register */
|
1262 |
throughable_mask = ~reg->emu_mask & valid_mask; |
1263 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
1264 |
|
1265 |
/* update MSI */
|
1266 |
if (cfg_entry->data != old_data) {
|
1267 |
if (msi->mapped) {
|
1268 |
xen_pt_msi_update(s); |
1269 |
} |
1270 |
} |
1271 |
|
1272 |
return 0; |
1273 |
} |
1274 |
|
1275 |
/* MSI Capability Structure reg static information table */
|
1276 |
static XenPTRegInfo xen_pt_emu_reg_msi[] = {
|
1277 |
/* Next Pointer reg */
|
1278 |
{ |
1279 |
.offset = PCI_CAP_LIST_NEXT, |
1280 |
.size = 1,
|
1281 |
.init_val = 0x00,
|
1282 |
.ro_mask = 0xFF,
|
1283 |
.emu_mask = 0xFF,
|
1284 |
.init = xen_pt_ptr_reg_init, |
1285 |
.u.b.read = xen_pt_byte_reg_read, |
1286 |
.u.b.write = xen_pt_byte_reg_write, |
1287 |
}, |
1288 |
/* Message Control reg */
|
1289 |
{ |
1290 |
.offset = PCI_MSI_FLAGS, |
1291 |
.size = 2,
|
1292 |
.init_val = 0x0000,
|
1293 |
.ro_mask = 0xFF8E,
|
1294 |
.emu_mask = 0x007F,
|
1295 |
.init = xen_pt_msgctrl_reg_init, |
1296 |
.u.w.read = xen_pt_word_reg_read, |
1297 |
.u.w.write = xen_pt_msgctrl_reg_write, |
1298 |
}, |
1299 |
/* Message Address reg */
|
1300 |
{ |
1301 |
.offset = PCI_MSI_ADDRESS_LO, |
1302 |
.size = 4,
|
1303 |
.init_val = 0x00000000,
|
1304 |
.ro_mask = 0x00000003,
|
1305 |
.emu_mask = 0xFFFFFFFF,
|
1306 |
.no_wb = 1,
|
1307 |
.init = xen_pt_common_reg_init, |
1308 |
.u.dw.read = xen_pt_long_reg_read, |
1309 |
.u.dw.write = xen_pt_msgaddr32_reg_write, |
1310 |
}, |
1311 |
/* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
|
1312 |
{ |
1313 |
.offset = PCI_MSI_ADDRESS_HI, |
1314 |
.size = 4,
|
1315 |
.init_val = 0x00000000,
|
1316 |
.ro_mask = 0x00000000,
|
1317 |
.emu_mask = 0xFFFFFFFF,
|
1318 |
.no_wb = 1,
|
1319 |
.init = xen_pt_msgaddr64_reg_init, |
1320 |
.u.dw.read = xen_pt_long_reg_read, |
1321 |
.u.dw.write = xen_pt_msgaddr64_reg_write, |
1322 |
}, |
1323 |
/* Message Data reg (16 bits of data for 32-bit devices) */
|
1324 |
{ |
1325 |
.offset = PCI_MSI_DATA_32, |
1326 |
.size = 2,
|
1327 |
.init_val = 0x0000,
|
1328 |
.ro_mask = 0x0000,
|
1329 |
.emu_mask = 0xFFFF,
|
1330 |
.no_wb = 1,
|
1331 |
.init = xen_pt_msgdata_reg_init, |
1332 |
.u.w.read = xen_pt_word_reg_read, |
1333 |
.u.w.write = xen_pt_msgdata_reg_write, |
1334 |
}, |
1335 |
/* Message Data reg (16 bits of data for 64-bit devices) */
|
1336 |
{ |
1337 |
.offset = PCI_MSI_DATA_64, |
1338 |
.size = 2,
|
1339 |
.init_val = 0x0000,
|
1340 |
.ro_mask = 0x0000,
|
1341 |
.emu_mask = 0xFFFF,
|
1342 |
.no_wb = 1,
|
1343 |
.init = xen_pt_msgdata_reg_init, |
1344 |
.u.w.read = xen_pt_word_reg_read, |
1345 |
.u.w.write = xen_pt_msgdata_reg_write, |
1346 |
}, |
1347 |
{ |
1348 |
.size = 0,
|
1349 |
}, |
1350 |
}; |
1351 |
|
1352 |
|
1353 |
/**************************************
|
1354 |
* MSI-X Capability
|
1355 |
*/
|
1356 |
|
1357 |
/* Message Control register for MSI-X */
|
1358 |
static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, |
1359 |
XenPTRegInfo *reg, uint32_t real_offset, |
1360 |
uint32_t *data) |
1361 |
{ |
1362 |
PCIDevice *d = &s->dev; |
1363 |
uint16_t reg_field = 0;
|
1364 |
|
1365 |
/* use I/O device register's value as initial value */
|
1366 |
reg_field = pci_get_word(d->config + real_offset); |
1367 |
|
1368 |
if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
|
1369 |
XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n");
|
1370 |
xen_host_pci_set_word(&s->real_device, real_offset, |
1371 |
reg_field & ~PCI_MSIX_FLAGS_ENABLE); |
1372 |
} |
1373 |
|
1374 |
s->msix->ctrl_offset = real_offset; |
1375 |
|
1376 |
*data = reg->init_val; |
1377 |
return 0; |
1378 |
} |
1379 |
static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, |
1380 |
XenPTReg *cfg_entry, uint16_t *val, |
1381 |
uint16_t dev_value, uint16_t valid_mask) |
1382 |
{ |
1383 |
XenPTRegInfo *reg = cfg_entry->reg; |
1384 |
uint16_t writable_mask = 0;
|
1385 |
uint16_t throughable_mask = 0;
|
1386 |
int debug_msix_enabled_old;
|
1387 |
|
1388 |
/* modify emulate register */
|
1389 |
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
1390 |
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); |
1391 |
|
1392 |
/* create value for writing to I/O device register */
|
1393 |
throughable_mask = ~reg->emu_mask & valid_mask; |
1394 |
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
1395 |
|
1396 |
/* update MSI-X */
|
1397 |
if ((*val & PCI_MSIX_FLAGS_ENABLE)
|
1398 |
&& !(*val & PCI_MSIX_FLAGS_MASKALL)) { |
1399 |
xen_pt_msix_update(s); |
1400 |
} |
1401 |
|
1402 |
debug_msix_enabled_old = s->msix->enabled; |
1403 |
s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); |
1404 |
if (s->msix->enabled != debug_msix_enabled_old) {
|
1405 |
XEN_PT_LOG(&s->dev, "%s MSI-X\n",
|
1406 |
s->msix->enabled ? "enable" : "disable"); |
1407 |
} |
1408 |
|
1409 |
return 0; |
1410 |
} |
1411 |
|
1412 |
/* MSI-X Capability Structure reg static information table */
|
1413 |
static XenPTRegInfo xen_pt_emu_reg_msix[] = {
|
1414 |
/* Next Pointer reg */
|
1415 |
{ |
1416 |
.offset = PCI_CAP_LIST_NEXT, |
1417 |
.size = 1,
|
1418 |
.init_val = 0x00,
|
1419 |
.ro_mask = 0xFF,
|
1420 |
.emu_mask = 0xFF,
|
1421 |
.init = xen_pt_ptr_reg_init, |
1422 |
.u.b.read = xen_pt_byte_reg_read, |
1423 |
.u.b.write = xen_pt_byte_reg_write, |
1424 |
}, |
1425 |
/* Message Control reg */
|
1426 |
{ |
1427 |
.offset = PCI_MSI_FLAGS, |
1428 |
.size = 2,
|
1429 |
.init_val = 0x0000,
|
1430 |
.ro_mask = 0x3FFF,
|
1431 |
.emu_mask = 0x0000,
|
1432 |
.init = xen_pt_msixctrl_reg_init, |
1433 |
.u.w.read = xen_pt_word_reg_read, |
1434 |
.u.w.write = xen_pt_msixctrl_reg_write, |
1435 |
}, |
1436 |
{ |
1437 |
.size = 0,
|
1438 |
}, |
1439 |
}; |
1440 |
|
1441 |
|
1442 |
/****************************
|
1443 |
* Capabilities
|
1444 |
*/
|
1445 |
|
1446 |
/* capability structure register group size functions */
|
1447 |
|
1448 |
static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, |
1449 |
const XenPTRegGroupInfo *grp_reg,
|
1450 |
uint32_t base_offset, uint8_t *size) |
1451 |
{ |
1452 |
*size = grp_reg->grp_size; |
1453 |
return 0; |
1454 |
} |
1455 |
/* get Vendor Specific Capability Structure register group size */
|
1456 |
static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, |
1457 |
const XenPTRegGroupInfo *grp_reg,
|
1458 |
uint32_t base_offset, uint8_t *size) |
1459 |
{ |
1460 |
*size = pci_get_byte(s->dev.config + base_offset + 0x02);
|
1461 |
return 0; |
1462 |
} |
1463 |
/* get PCI Express Capability Structure register group size */
|
1464 |
static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, |
1465 |
const XenPTRegGroupInfo *grp_reg,
|
1466 |
uint32_t base_offset, uint8_t *size) |
1467 |
{ |
1468 |
PCIDevice *d = &s->dev; |
1469 |
uint8_t version = get_capability_version(s, base_offset); |
1470 |
uint8_t type = get_device_type(s, base_offset); |
1471 |
uint8_t pcie_size = 0;
|
1472 |
|
1473 |
|
1474 |
/* calculate size depend on capability version and device/port type */
|
1475 |
/* in case of PCI Express Base Specification Rev 1.x */
|
1476 |
if (version == 1) { |
1477 |
/* The PCI Express Capabilities, Device Capabilities, and Device
|
1478 |
* Status/Control registers are required for all PCI Express devices.
|
1479 |
* The Link Capabilities and Link Status/Control are required for all
|
1480 |
* Endpoints that are not Root Complex Integrated Endpoints. Endpoints
|
1481 |
* are not required to implement registers other than those listed
|
1482 |
* above and terminate the capability structure.
|
1483 |
*/
|
1484 |
switch (type) {
|
1485 |
case PCI_EXP_TYPE_ENDPOINT:
|
1486 |
case PCI_EXP_TYPE_LEG_END:
|
1487 |
pcie_size = 0x14;
|
1488 |
break;
|
1489 |
case PCI_EXP_TYPE_RC_END:
|
1490 |
/* has no link */
|
1491 |
pcie_size = 0x0C;
|
1492 |
break;
|
1493 |
/* only EndPoint passthrough is supported */
|
1494 |
case PCI_EXP_TYPE_ROOT_PORT:
|
1495 |
case PCI_EXP_TYPE_UPSTREAM:
|
1496 |
case PCI_EXP_TYPE_DOWNSTREAM:
|
1497 |
case PCI_EXP_TYPE_PCI_BRIDGE:
|
1498 |
case PCI_EXP_TYPE_PCIE_BRIDGE:
|
1499 |
case PCI_EXP_TYPE_RC_EC:
|
1500 |
default:
|
1501 |
XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
|
1502 |
return -1; |
1503 |
} |
1504 |
} |
1505 |
/* in case of PCI Express Base Specification Rev 2.0 */
|
1506 |
else if (version == 2) { |
1507 |
switch (type) {
|
1508 |
case PCI_EXP_TYPE_ENDPOINT:
|
1509 |
case PCI_EXP_TYPE_LEG_END:
|
1510 |
case PCI_EXP_TYPE_RC_END:
|
1511 |
/* For Functions that do not implement the registers,
|
1512 |
* these spaces must be hardwired to 0b.
|
1513 |
*/
|
1514 |
pcie_size = 0x3C;
|
1515 |
break;
|
1516 |
/* only EndPoint passthrough is supported */
|
1517 |
case PCI_EXP_TYPE_ROOT_PORT:
|
1518 |
case PCI_EXP_TYPE_UPSTREAM:
|
1519 |
case PCI_EXP_TYPE_DOWNSTREAM:
|
1520 |
case PCI_EXP_TYPE_PCI_BRIDGE:
|
1521 |
case PCI_EXP_TYPE_PCIE_BRIDGE:
|
1522 |
case PCI_EXP_TYPE_RC_EC:
|
1523 |
default:
|
1524 |
XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
|
1525 |
return -1; |
1526 |
} |
1527 |
} else {
|
1528 |
XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
|
1529 |
return -1; |
1530 |
} |
1531 |
|
1532 |
*size = pcie_size; |
1533 |
return 0; |
1534 |
} |
1535 |
/* get MSI Capability Structure register group size */
|
1536 |
static int xen_pt_msi_size_init(XenPCIPassthroughState *s, |
1537 |
const XenPTRegGroupInfo *grp_reg,
|
1538 |
uint32_t base_offset, uint8_t *size) |
1539 |
{ |
1540 |
PCIDevice *d = &s->dev; |
1541 |
uint16_t msg_ctrl = 0;
|
1542 |
uint8_t msi_size = 0xa;
|
1543 |
|
1544 |
msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS)); |
1545 |
|
1546 |
/* check if 64-bit address is capable of per-vector masking */
|
1547 |
if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
|
1548 |
msi_size += 4;
|
1549 |
} |
1550 |
if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
|
1551 |
msi_size += 10;
|
1552 |
} |
1553 |
|
1554 |
s->msi = g_new0(XenPTMSI, 1);
|
1555 |
s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; |
1556 |
|
1557 |
*size = msi_size; |
1558 |
return 0; |
1559 |
} |
1560 |
/* get MSI-X Capability Structure register group size */
|
1561 |
static int xen_pt_msix_size_init(XenPCIPassthroughState *s, |
1562 |
const XenPTRegGroupInfo *grp_reg,
|
1563 |
uint32_t base_offset, uint8_t *size) |
1564 |
{ |
1565 |
int rc = 0; |
1566 |
|
1567 |
rc = xen_pt_msix_init(s, base_offset); |
1568 |
|
1569 |
if (rc < 0) { |
1570 |
XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
|
1571 |
return rc;
|
1572 |
} |
1573 |
|
1574 |
*size = grp_reg->grp_size; |
1575 |
return 0; |
1576 |
} |
1577 |
|
1578 |
|
1579 |
static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { |
1580 |
/* Header Type0 reg group */
|
1581 |
{ |
1582 |
.grp_id = 0xFF,
|
1583 |
.grp_type = XEN_PT_GRP_TYPE_EMU, |
1584 |
.grp_size = 0x40,
|
1585 |
.size_init = xen_pt_reg_grp_size_init, |
1586 |
.emu_regs = xen_pt_emu_reg_header0, |
1587 |
}, |
1588 |
/* PCI PowerManagement Capability reg group */
|
1589 |
{ |
1590 |
.grp_id = PCI_CAP_ID_PM, |
1591 |
.grp_type = XEN_PT_GRP_TYPE_EMU, |
1592 |
.grp_size = PCI_PM_SIZEOF, |
1593 |
.size_init = xen_pt_reg_grp_size_init, |
1594 |
.emu_regs = xen_pt_emu_reg_pm, |
1595 |
}, |
1596 |
/* AGP Capability Structure reg group */
|
1597 |
{ |
1598 |
.grp_id = PCI_CAP_ID_AGP, |
1599 |
.grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
1600 |
.grp_size = 0x30,
|
1601 |
.size_init = xen_pt_reg_grp_size_init, |
1602 |
}, |
1603 |
/* Vital Product Data Capability Structure reg group */
|
1604 |
{ |
1605 |
.grp_id = PCI_CAP_ID_VPD, |
1606 |
.grp_type = XEN_PT_GRP_TYPE_EMU, |
1607 |
.grp_size = 0x08,
|
1608 |
.size_init = xen_pt_reg_grp_size_init, |
1609 |
.emu_regs = xen_pt_emu_reg_vpd, |
1610 |
}, |
1611 |
/* Slot Identification reg group */
|
1612 |
{ |
1613 |
.grp_id = PCI_CAP_ID_SLOTID, |
1614 |
.grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
1615 |
.grp_size = 0x04,
|
1616 |
.size_init = xen_pt_reg_grp_size_init, |
1617 |
}, |
1618 |
/* MSI Capability Structure reg group */
|
1619 |
{ |
1620 |
.grp_id = PCI_CAP_ID_MSI, |
1621 |
.grp_type = XEN_PT_GRP_TYPE_EMU, |
1622 |
.grp_size = 0xFF,
|
1623 |
.size_init = xen_pt_msi_size_init, |
1624 |
.emu_regs = xen_pt_emu_reg_msi, |
1625 |
}, |
1626 |
/* PCI-X Capabilities List Item reg group */
|
1627 |
{ |
1628 |
.grp_id = PCI_CAP_ID_PCIX, |
1629 |
.grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
1630 |
.grp_size = 0x18,
|
1631 |
.size_init = xen_pt_reg_grp_size_init, |
1632 |
}, |
1633 |
/* Vendor Specific Capability Structure reg group */
|
1634 |
{ |
1635 |
.grp_id = PCI_CAP_ID_VNDR, |
1636 |
.grp_type = XEN_PT_GRP_TYPE_EMU, |
1637 |
.grp_size = 0xFF,
|
1638 |
.size_init = xen_pt_vendor_size_init, |
1639 |
.emu_regs = xen_pt_emu_reg_vendor, |
1640 |
}, |
1641 |
/* SHPC Capability List Item reg group */
|
1642 |
{ |
1643 |
.grp_id = PCI_CAP_ID_SHPC, |
1644 |
.grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
1645 |
.grp_size = 0x08,
|
1646 |
.size_init = xen_pt_reg_grp_size_init, |
1647 |
}, |
1648 |
/* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
|
1649 |
{ |
1650 |
.grp_id = PCI_CAP_ID_SSVID, |
1651 |
.grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
1652 |
.grp_size = 0x08,
|
1653 |
.size_init = xen_pt_reg_grp_size_init, |
1654 |
}, |
1655 |
/* AGP 8x Capability Structure reg group */
|
1656 |
{ |
1657 |
.grp_id = PCI_CAP_ID_AGP3, |
1658 |
.grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
1659 |
.grp_size = 0x30,
|
1660 |
.size_init = xen_pt_reg_grp_size_init, |
1661 |
}, |
1662 |
/* PCI Express Capability Structure reg group */
|
1663 |
{ |
1664 |
.grp_id = PCI_CAP_ID_EXP, |
1665 |
.grp_type = XEN_PT_GRP_TYPE_EMU, |
1666 |
.grp_size = 0xFF,
|
1667 |
.size_init = xen_pt_pcie_size_init, |
1668 |
.emu_regs = xen_pt_emu_reg_pcie, |
1669 |
}, |
1670 |
/* MSI-X Capability Structure reg group */
|
1671 |
{ |
1672 |
.grp_id = PCI_CAP_ID_MSIX, |
1673 |
.grp_type = XEN_PT_GRP_TYPE_EMU, |
1674 |
.grp_size = 0x0C,
|
1675 |
.size_init = xen_pt_msix_size_init, |
1676 |
.emu_regs = xen_pt_emu_reg_msix, |
1677 |
}, |
1678 |
{ |
1679 |
.grp_size = 0,
|
1680 |
}, |
1681 |
}; |
1682 |
|
1683 |
/* initialize Capabilities Pointer or Next Pointer register */
|
1684 |
static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, |
1685 |
XenPTRegInfo *reg, uint32_t real_offset, |
1686 |
uint32_t *data) |
1687 |
{ |
1688 |
int i;
|
1689 |
uint8_t *config = s->dev.config; |
1690 |
uint32_t reg_field = pci_get_byte(config + real_offset); |
1691 |
uint8_t cap_id = 0;
|
1692 |
|
1693 |
/* find capability offset */
|
1694 |
while (reg_field) {
|
1695 |
for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { |
1696 |
if (xen_pt_hide_dev_cap(&s->real_device,
|
1697 |
xen_pt_emu_reg_grps[i].grp_id)) { |
1698 |
continue;
|
1699 |
} |
1700 |
|
1701 |
cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID); |
1702 |
if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
|
1703 |
if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
|
1704 |
goto out;
|
1705 |
} |
1706 |
/* ignore the 0 hardwired capability, find next one */
|
1707 |
break;
|
1708 |
} |
1709 |
} |
1710 |
|
1711 |
/* next capability */
|
1712 |
reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT); |
1713 |
} |
1714 |
|
1715 |
out:
|
1716 |
*data = reg_field; |
1717 |
return 0; |
1718 |
} |
1719 |
|
1720 |
|
1721 |
/*************
|
1722 |
* Main
|
1723 |
*/
|
1724 |
|
1725 |
static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
|
1726 |
{ |
1727 |
uint8_t id; |
1728 |
unsigned max_cap = PCI_CAP_MAX;
|
1729 |
uint8_t pos = PCI_CAPABILITY_LIST; |
1730 |
uint8_t status = 0;
|
1731 |
|
1732 |
if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
|
1733 |
return 0; |
1734 |
} |
1735 |
if ((status & PCI_STATUS_CAP_LIST) == 0) { |
1736 |
return 0; |
1737 |
} |
1738 |
|
1739 |
while (max_cap--) {
|
1740 |
if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
|
1741 |
break;
|
1742 |
} |
1743 |
if (pos < PCI_CONFIG_HEADER_SIZE) {
|
1744 |
break;
|
1745 |
} |
1746 |
|
1747 |
pos &= ~3;
|
1748 |
if (xen_host_pci_get_byte(&s->real_device,
|
1749 |
pos + PCI_CAP_LIST_ID, &id)) { |
1750 |
break;
|
1751 |
} |
1752 |
|
1753 |
if (id == 0xff) { |
1754 |
break;
|
1755 |
} |
1756 |
if (id == cap) {
|
1757 |
return pos;
|
1758 |
} |
1759 |
|
1760 |
pos += PCI_CAP_LIST_NEXT; |
1761 |
} |
1762 |
return 0; |
1763 |
} |
1764 |
|
1765 |
static int xen_pt_config_reg_init(XenPCIPassthroughState *s, |
1766 |
XenPTRegGroup *reg_grp, XenPTRegInfo *reg) |
1767 |
{ |
1768 |
XenPTReg *reg_entry; |
1769 |
uint32_t data = 0;
|
1770 |
int rc = 0; |
1771 |
|
1772 |
reg_entry = g_new0(XenPTReg, 1);
|
1773 |
reg_entry->reg = reg; |
1774 |
|
1775 |
if (reg->init) {
|
1776 |
/* initialize emulate register */
|
1777 |
rc = reg->init(s, reg_entry->reg, |
1778 |
reg_grp->base_offset + reg->offset, &data); |
1779 |
if (rc < 0) { |
1780 |
free(reg_entry); |
1781 |
return rc;
|
1782 |
} |
1783 |
if (data == XEN_PT_INVALID_REG) {
|
1784 |
/* free unused BAR register entry */
|
1785 |
free(reg_entry); |
1786 |
return 0; |
1787 |
} |
1788 |
/* set register value */
|
1789 |
reg_entry->data = data; |
1790 |
} |
1791 |
/* list add register entry */
|
1792 |
QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); |
1793 |
|
1794 |
return 0; |
1795 |
} |
1796 |
|
1797 |
int xen_pt_config_init(XenPCIPassthroughState *s)
|
1798 |
{ |
1799 |
int i, rc;
|
1800 |
|
1801 |
QLIST_INIT(&s->reg_grps); |
1802 |
|
1803 |
for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { |
1804 |
uint32_t reg_grp_offset = 0;
|
1805 |
XenPTRegGroup *reg_grp_entry = NULL;
|
1806 |
|
1807 |
if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) { |
1808 |
if (xen_pt_hide_dev_cap(&s->real_device,
|
1809 |
xen_pt_emu_reg_grps[i].grp_id)) { |
1810 |
continue;
|
1811 |
} |
1812 |
|
1813 |
reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); |
1814 |
|
1815 |
if (!reg_grp_offset) {
|
1816 |
continue;
|
1817 |
} |
1818 |
} |
1819 |
|
1820 |
reg_grp_entry = g_new0(XenPTRegGroup, 1);
|
1821 |
QLIST_INIT(®_grp_entry->reg_tbl_list); |
1822 |
QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); |
1823 |
|
1824 |
reg_grp_entry->base_offset = reg_grp_offset; |
1825 |
reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; |
1826 |
if (xen_pt_emu_reg_grps[i].size_init) {
|
1827 |
/* get register group size */
|
1828 |
rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, |
1829 |
reg_grp_offset, |
1830 |
®_grp_entry->size); |
1831 |
if (rc < 0) { |
1832 |
xen_pt_config_delete(s); |
1833 |
return rc;
|
1834 |
} |
1835 |
} |
1836 |
|
1837 |
if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
|
1838 |
if (xen_pt_emu_reg_grps[i].emu_regs) {
|
1839 |
int j = 0; |
1840 |
XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; |
1841 |
/* initialize capability register */
|
1842 |
for (j = 0; regs->size != 0; j++, regs++) { |
1843 |
/* initialize capability register */
|
1844 |
rc = xen_pt_config_reg_init(s, reg_grp_entry, regs); |
1845 |
if (rc < 0) { |
1846 |
xen_pt_config_delete(s); |
1847 |
return rc;
|
1848 |
} |
1849 |
} |
1850 |
} |
1851 |
} |
1852 |
} |
1853 |
|
1854 |
return 0; |
1855 |
} |
1856 |
|
1857 |
/* delete all emulate register */
|
1858 |
void xen_pt_config_delete(XenPCIPassthroughState *s)
|
1859 |
{ |
1860 |
struct XenPTRegGroup *reg_group, *next_grp;
|
1861 |
struct XenPTReg *reg, *next_reg;
|
1862 |
|
1863 |
/* free MSI/MSI-X info table */
|
1864 |
if (s->msix) {
|
1865 |
xen_pt_msix_delete(s); |
1866 |
} |
1867 |
if (s->msi) {
|
1868 |
g_free(s->msi); |
1869 |
} |
1870 |
|
1871 |
/* free all register group entry */
|
1872 |
QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { |
1873 |
/* free all register entry */
|
1874 |
QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { |
1875 |
QLIST_REMOVE(reg, entries); |
1876 |
g_free(reg); |
1877 |
} |
1878 |
|
1879 |
QLIST_REMOVE(reg_group, entries); |
1880 |
g_free(reg_group); |
1881 |
} |
1882 |
} |