root / hw / net / vmxnet3.c @ 49ab747f
History | View | Annotate | Download (71.5 kB)
1 |
/*
|
---|---|
2 |
* QEMU VMWARE VMXNET3 paravirtual NIC
|
3 |
*
|
4 |
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
|
5 |
*
|
6 |
* Developed by Daynix Computing LTD (http://www.daynix.com)
|
7 |
*
|
8 |
* Authors:
|
9 |
* Dmitry Fleytman <dmitry@daynix.com>
|
10 |
* Tamir Shomer <tamirs@daynix.com>
|
11 |
* Yan Vugenfirer <yan@daynix.com>
|
12 |
*
|
13 |
* This work is licensed under the terms of the GNU GPL, version 2.
|
14 |
* See the COPYING file in the top-level directory.
|
15 |
*
|
16 |
*/
|
17 |
|
18 |
#include "hw/hw.h" |
19 |
#include "hw/pci/pci.h" |
20 |
#include "net/net.h" |
21 |
#include "net/tap.h" |
22 |
#include "net/checksum.h" |
23 |
#include "sysemu/sysemu.h" |
24 |
#include "qemu-common.h" |
25 |
#include "qemu/bswap.h" |
26 |
#include "hw/pci/msix.h" |
27 |
#include "hw/pci/msi.h" |
28 |
|
29 |
#include "vmxnet3.h" |
30 |
#include "vmxnet_debug.h" |
31 |
#include "vmware_utils.h" |
32 |
#include "vmxnet_tx_pkt.h" |
33 |
#include "vmxnet_rx_pkt.h" |
34 |
|
35 |
#define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1 |
36 |
#define VMXNET3_MSIX_BAR_SIZE 0x2000 |
37 |
|
38 |
#define VMXNET3_BAR0_IDX (0) |
39 |
#define VMXNET3_BAR1_IDX (1) |
40 |
#define VMXNET3_MSIX_BAR_IDX (2) |
41 |
|
42 |
#define VMXNET3_OFF_MSIX_TABLE (0x000) |
43 |
#define VMXNET3_OFF_MSIX_PBA (0x800) |
44 |
|
45 |
/* Link speed in Mbps should be shifted by 16 */
|
46 |
#define VMXNET3_LINK_SPEED (1000 << 16) |
47 |
|
48 |
/* Link status: 1 - up, 0 - down. */
|
49 |
#define VMXNET3_LINK_STATUS_UP 0x1 |
50 |
|
51 |
/* Least significant bit should be set for revision and version */
|
52 |
#define VMXNET3_DEVICE_VERSION 0x1 |
53 |
#define VMXNET3_DEVICE_REVISION 0x1 |
54 |
|
55 |
/* Macros for rings descriptors access */
|
56 |
#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
|
57 |
(vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
58 |
|
59 |
#define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \
|
60 |
(vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
|
61 |
|
62 |
#define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \
|
63 |
(vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
64 |
|
65 |
#define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \
|
66 |
(vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
|
67 |
|
68 |
#define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \
|
69 |
(vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
70 |
|
71 |
#define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \
|
72 |
(vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
|
73 |
|
74 |
#define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \
|
75 |
(vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
|
76 |
|
77 |
#define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \
|
78 |
(vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
|
79 |
|
80 |
#define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \
|
81 |
(vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
|
82 |
|
83 |
#define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \
|
84 |
(vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
|
85 |
|
86 |
/* Macros for guest driver shared area access */
|
87 |
#define VMXNET3_READ_DRV_SHARED64(shpa, field) \
|
88 |
(vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
89 |
|
90 |
#define VMXNET3_READ_DRV_SHARED32(shpa, field) \
|
91 |
(vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
92 |
|
93 |
#define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \
|
94 |
(vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
|
95 |
|
96 |
#define VMXNET3_READ_DRV_SHARED16(shpa, field) \
|
97 |
(vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
98 |
|
99 |
#define VMXNET3_READ_DRV_SHARED8(shpa, field) \
|
100 |
(vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
101 |
|
102 |
#define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \
|
103 |
(vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
|
104 |
|
105 |
#define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag))
|
106 |
|
107 |
#define TYPE_VMXNET3 "vmxnet3" |
108 |
#define VMXNET3(obj) OBJECT_CHECK(VMXNET3State, (obj), TYPE_VMXNET3)
|
109 |
|
110 |
/* Cyclic ring abstraction */
|
111 |
typedef struct { |
112 |
hwaddr pa; |
113 |
size_t size; |
114 |
size_t cell_size; |
115 |
size_t next; |
116 |
uint8_t gen; |
117 |
} Vmxnet3Ring; |
118 |
|
119 |
static inline void vmxnet3_ring_init(Vmxnet3Ring *ring, |
120 |
hwaddr pa, |
121 |
size_t size, |
122 |
size_t cell_size, |
123 |
bool zero_region)
|
124 |
{ |
125 |
ring->pa = pa; |
126 |
ring->size = size; |
127 |
ring->cell_size = cell_size; |
128 |
ring->gen = VMXNET3_INIT_GEN; |
129 |
ring->next = 0;
|
130 |
|
131 |
if (zero_region) {
|
132 |
vmw_shmem_set(pa, 0, size * cell_size);
|
133 |
} |
134 |
} |
135 |
|
136 |
#define VMXNET3_RING_DUMP(macro, ring_name, ridx, r) \
|
137 |
macro("%s#%d: base %" PRIx64 " size %lu cell_size %lu gen %d next %lu", \ |
138 |
(ring_name), (ridx), \ |
139 |
(r)->pa, (r)->size, (r)->cell_size, (r)->gen, (r)->next) |
140 |
|
141 |
static inline void vmxnet3_ring_inc(Vmxnet3Ring *ring) |
142 |
{ |
143 |
if (++ring->next >= ring->size) {
|
144 |
ring->next = 0;
|
145 |
ring->gen ^= 1;
|
146 |
} |
147 |
} |
148 |
|
149 |
static inline void vmxnet3_ring_dec(Vmxnet3Ring *ring) |
150 |
{ |
151 |
if (ring->next-- == 0) { |
152 |
ring->next = ring->size - 1;
|
153 |
ring->gen ^= 1;
|
154 |
} |
155 |
} |
156 |
|
157 |
static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring) |
158 |
{ |
159 |
return ring->pa + ring->next * ring->cell_size;
|
160 |
} |
161 |
|
162 |
static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff) |
163 |
{ |
164 |
vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); |
165 |
} |
166 |
|
167 |
static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff) |
168 |
{ |
169 |
vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); |
170 |
} |
171 |
|
172 |
static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring) |
173 |
{ |
174 |
return ring->next;
|
175 |
} |
176 |
|
177 |
static inline uint8_t vmxnet3_ring_curr_gen(Vmxnet3Ring *ring) |
178 |
{ |
179 |
return ring->gen;
|
180 |
} |
181 |
|
182 |
/* Debug trace-related functions */
|
183 |
static inline void |
184 |
vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr)
|
185 |
{ |
186 |
VMW_PKPRN("TX DESCR: "
|
187 |
"addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " |
188 |
"dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, "
|
189 |
"eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d",
|
190 |
le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd, |
191 |
descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om, |
192 |
descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci); |
193 |
} |
194 |
|
195 |
static inline void |
196 |
vmxnet3_dump_virt_hdr(struct virtio_net_hdr *vhdr)
|
197 |
{ |
198 |
VMW_PKPRN("VHDR: flags 0x%x, gso_type: 0x%x, hdr_len: %d, gso_size: %d, "
|
199 |
"csum_start: %d, csum_offset: %d",
|
200 |
vhdr->flags, vhdr->gso_type, vhdr->hdr_len, vhdr->gso_size, |
201 |
vhdr->csum_start, vhdr->csum_offset); |
202 |
} |
203 |
|
204 |
static inline void |
205 |
vmxnet3_dump_rx_descr(struct Vmxnet3_RxDesc *descr)
|
206 |
{ |
207 |
VMW_PKPRN("RX DESCR: addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " |
208 |
"dtype: %d, ext1: %d, btype: %d",
|
209 |
le64_to_cpu(descr->addr), descr->len, descr->gen, |
210 |
descr->rsvd, descr->dtype, descr->ext1, descr->btype); |
211 |
} |
212 |
|
213 |
/* Device state and helper functions */
|
214 |
#define VMXNET3_RX_RINGS_PER_QUEUE (2) |
215 |
|
216 |
typedef struct { |
217 |
Vmxnet3Ring tx_ring; |
218 |
Vmxnet3Ring comp_ring; |
219 |
|
220 |
uint8_t intr_idx; |
221 |
hwaddr tx_stats_pa; |
222 |
struct UPT1_TxStats txq_stats;
|
223 |
} Vmxnet3TxqDescr; |
224 |
|
225 |
typedef struct { |
226 |
Vmxnet3Ring rx_ring[VMXNET3_RX_RINGS_PER_QUEUE]; |
227 |
Vmxnet3Ring comp_ring; |
228 |
uint8_t intr_idx; |
229 |
hwaddr rx_stats_pa; |
230 |
struct UPT1_RxStats rxq_stats;
|
231 |
} Vmxnet3RxqDescr; |
232 |
|
233 |
typedef struct { |
234 |
bool is_masked;
|
235 |
bool is_pending;
|
236 |
bool is_asserted;
|
237 |
} Vmxnet3IntState; |
238 |
|
239 |
typedef struct { |
240 |
PCIDevice parent_obj; |
241 |
NICState *nic; |
242 |
NICConf conf; |
243 |
MemoryRegion bar0; |
244 |
MemoryRegion bar1; |
245 |
MemoryRegion msix_bar; |
246 |
|
247 |
Vmxnet3RxqDescr rxq_descr[VMXNET3_DEVICE_MAX_RX_QUEUES]; |
248 |
Vmxnet3TxqDescr txq_descr[VMXNET3_DEVICE_MAX_TX_QUEUES]; |
249 |
|
250 |
/* Whether MSI-X support was installed successfully */
|
251 |
bool msix_used;
|
252 |
/* Whether MSI support was installed successfully */
|
253 |
bool msi_used;
|
254 |
hwaddr drv_shmem; |
255 |
hwaddr temp_shared_guest_driver_memory; |
256 |
|
257 |
uint8_t txq_num; |
258 |
|
259 |
/* This boolean tells whether RX packet being indicated has to */
|
260 |
/* be split into head and body chunks from different RX rings */
|
261 |
bool rx_packets_compound;
|
262 |
|
263 |
bool rx_vlan_stripping;
|
264 |
bool lro_supported;
|
265 |
|
266 |
uint8_t rxq_num; |
267 |
|
268 |
/* Network MTU */
|
269 |
uint32_t mtu; |
270 |
|
271 |
/* Maximum number of fragments for indicated TX packets */
|
272 |
uint32_t max_tx_frags; |
273 |
|
274 |
/* Maximum number of fragments for indicated RX packets */
|
275 |
uint16_t max_rx_frags; |
276 |
|
277 |
/* Index for events interrupt */
|
278 |
uint8_t event_int_idx; |
279 |
|
280 |
/* Whether automatic interrupts masking enabled */
|
281 |
bool auto_int_masking;
|
282 |
|
283 |
bool peer_has_vhdr;
|
284 |
|
285 |
/* TX packets to QEMU interface */
|
286 |
struct VmxnetTxPkt *tx_pkt;
|
287 |
uint32_t offload_mode; |
288 |
uint32_t cso_or_gso_size; |
289 |
uint16_t tci; |
290 |
bool needs_vlan;
|
291 |
|
292 |
struct VmxnetRxPkt *rx_pkt;
|
293 |
|
294 |
bool tx_sop;
|
295 |
bool skip_current_tx_pkt;
|
296 |
|
297 |
uint32_t device_active; |
298 |
uint32_t last_command; |
299 |
|
300 |
uint32_t link_status_and_speed; |
301 |
|
302 |
Vmxnet3IntState interrupt_states[VMXNET3_MAX_INTRS]; |
303 |
|
304 |
uint32_t temp_mac; /* To store the low part first */
|
305 |
|
306 |
MACAddr perm_mac; |
307 |
uint32_t vlan_table[VMXNET3_VFT_SIZE]; |
308 |
uint32_t rx_mode; |
309 |
MACAddr *mcast_list; |
310 |
uint32_t mcast_list_len; |
311 |
uint32_t mcast_list_buff_size; /* needed for live migration. */
|
312 |
} VMXNET3State; |
313 |
|
314 |
/* Interrupt management */
|
315 |
|
316 |
/*
|
317 |
*This function returns sign whether interrupt line is in asserted state
|
318 |
* This depends on the type of interrupt used. For INTX interrupt line will
|
319 |
* be asserted until explicit deassertion, for MSI(X) interrupt line will
|
320 |
* be deasserted automatically due to notification semantics of the MSI(X)
|
321 |
* interrupts
|
322 |
*/
|
323 |
static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx) |
324 |
{ |
325 |
PCIDevice *d = PCI_DEVICE(s); |
326 |
|
327 |
if (s->msix_used && msix_enabled(d)) {
|
328 |
VMW_IRPRN("Sending MSI-X notification for vector %u", int_idx);
|
329 |
msix_notify(d, int_idx); |
330 |
return false; |
331 |
} |
332 |
if (s->msi_used && msi_enabled(d)) {
|
333 |
VMW_IRPRN("Sending MSI notification for vector %u", int_idx);
|
334 |
msi_notify(d, int_idx); |
335 |
return false; |
336 |
} |
337 |
|
338 |
VMW_IRPRN("Asserting line for interrupt %u", int_idx);
|
339 |
qemu_set_irq(d->irq[int_idx], 1);
|
340 |
return true; |
341 |
} |
342 |
|
343 |
static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx) |
344 |
{ |
345 |
PCIDevice *d = PCI_DEVICE(s); |
346 |
|
347 |
/*
|
348 |
* This function should never be called for MSI(X) interrupts
|
349 |
* because deassertion never required for message interrupts
|
350 |
*/
|
351 |
assert(!s->msix_used || !msix_enabled(d)); |
352 |
/*
|
353 |
* This function should never be called for MSI(X) interrupts
|
354 |
* because deassertion never required for message interrupts
|
355 |
*/
|
356 |
assert(!s->msi_used || !msi_enabled(d)); |
357 |
|
358 |
VMW_IRPRN("Deasserting line for interrupt %u", lidx);
|
359 |
qemu_set_irq(d->irq[lidx], 0);
|
360 |
} |
361 |
|
362 |
static void vmxnet3_update_interrupt_line_state(VMXNET3State *s, int lidx) |
363 |
{ |
364 |
if (!s->interrupt_states[lidx].is_pending &&
|
365 |
s->interrupt_states[lidx].is_asserted) { |
366 |
VMW_IRPRN("New interrupt line state for index %d is DOWN", lidx);
|
367 |
_vmxnet3_deassert_interrupt_line(s, lidx); |
368 |
s->interrupt_states[lidx].is_asserted = false;
|
369 |
return;
|
370 |
} |
371 |
|
372 |
if (s->interrupt_states[lidx].is_pending &&
|
373 |
!s->interrupt_states[lidx].is_masked && |
374 |
!s->interrupt_states[lidx].is_asserted) { |
375 |
VMW_IRPRN("New interrupt line state for index %d is UP", lidx);
|
376 |
s->interrupt_states[lidx].is_asserted = |
377 |
_vmxnet3_assert_interrupt_line(s, lidx); |
378 |
s->interrupt_states[lidx].is_pending = false;
|
379 |
return;
|
380 |
} |
381 |
} |
382 |
|
383 |
static void vmxnet3_trigger_interrupt(VMXNET3State *s, int lidx) |
384 |
{ |
385 |
PCIDevice *d = PCI_DEVICE(s); |
386 |
s->interrupt_states[lidx].is_pending = true;
|
387 |
vmxnet3_update_interrupt_line_state(s, lidx); |
388 |
|
389 |
if (s->msix_used && msix_enabled(d) && s->auto_int_masking) {
|
390 |
goto do_automask;
|
391 |
} |
392 |
|
393 |
if (s->msi_used && msi_enabled(d) && s->auto_int_masking) {
|
394 |
goto do_automask;
|
395 |
} |
396 |
|
397 |
return;
|
398 |
|
399 |
do_automask:
|
400 |
s->interrupt_states[lidx].is_masked = true;
|
401 |
vmxnet3_update_interrupt_line_state(s, lidx); |
402 |
} |
403 |
|
404 |
static bool vmxnet3_interrupt_asserted(VMXNET3State *s, int lidx) |
405 |
{ |
406 |
return s->interrupt_states[lidx].is_asserted;
|
407 |
} |
408 |
|
409 |
static void vmxnet3_clear_interrupt(VMXNET3State *s, int int_idx) |
410 |
{ |
411 |
s->interrupt_states[int_idx].is_pending = false;
|
412 |
if (s->auto_int_masking) {
|
413 |
s->interrupt_states[int_idx].is_masked = true;
|
414 |
} |
415 |
vmxnet3_update_interrupt_line_state(s, int_idx); |
416 |
} |
417 |
|
418 |
static void |
419 |
vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked) |
420 |
{ |
421 |
s->interrupt_states[lidx].is_masked = is_masked; |
422 |
vmxnet3_update_interrupt_line_state(s, lidx); |
423 |
} |
424 |
|
425 |
static bool vmxnet3_verify_driver_magic(hwaddr dshmem) |
426 |
{ |
427 |
return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC);
|
428 |
} |
429 |
|
430 |
#define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF) |
431 |
#define VMXNET3_MAKE_BYTE(byte_num, val) \
|
432 |
(((uint32_t)((val) & 0xFF)) << (byte_num)*8) |
433 |
|
434 |
static void vmxnet3_set_variable_mac(VMXNET3State *s, uint32_t h, uint32_t l) |
435 |
{ |
436 |
s->conf.macaddr.a[0] = VMXNET3_GET_BYTE(l, 0); |
437 |
s->conf.macaddr.a[1] = VMXNET3_GET_BYTE(l, 1); |
438 |
s->conf.macaddr.a[2] = VMXNET3_GET_BYTE(l, 2); |
439 |
s->conf.macaddr.a[3] = VMXNET3_GET_BYTE(l, 3); |
440 |
s->conf.macaddr.a[4] = VMXNET3_GET_BYTE(h, 0); |
441 |
s->conf.macaddr.a[5] = VMXNET3_GET_BYTE(h, 1); |
442 |
|
443 |
VMW_CFPRN("Variable MAC: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a));
|
444 |
|
445 |
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); |
446 |
} |
447 |
|
448 |
static uint64_t vmxnet3_get_mac_low(MACAddr *addr)
|
449 |
{ |
450 |
return VMXNET3_MAKE_BYTE(0, addr->a[0]) | |
451 |
VMXNET3_MAKE_BYTE(1, addr->a[1]) | |
452 |
VMXNET3_MAKE_BYTE(2, addr->a[2]) | |
453 |
VMXNET3_MAKE_BYTE(3, addr->a[3]); |
454 |
} |
455 |
|
456 |
static uint64_t vmxnet3_get_mac_high(MACAddr *addr)
|
457 |
{ |
458 |
return VMXNET3_MAKE_BYTE(0, addr->a[4]) | |
459 |
VMXNET3_MAKE_BYTE(1, addr->a[5]); |
460 |
} |
461 |
|
462 |
static void |
463 |
vmxnet3_inc_tx_consumption_counter(VMXNET3State *s, int qidx)
|
464 |
{ |
465 |
vmxnet3_ring_inc(&s->txq_descr[qidx].tx_ring); |
466 |
} |
467 |
|
468 |
static inline void |
469 |
vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx) |
470 |
{ |
471 |
vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]); |
472 |
} |
473 |
|
474 |
static inline void |
475 |
vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx)
|
476 |
{ |
477 |
vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring); |
478 |
} |
479 |
|
480 |
static void |
481 |
vmxnet3_inc_rx_completion_counter(VMXNET3State *s, int qidx)
|
482 |
{ |
483 |
vmxnet3_ring_inc(&s->rxq_descr[qidx].comp_ring); |
484 |
} |
485 |
|
486 |
static void |
487 |
vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx)
|
488 |
{ |
489 |
vmxnet3_ring_dec(&s->rxq_descr[qidx].comp_ring); |
490 |
} |
491 |
|
492 |
static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32 tx_ridx) |
493 |
{ |
494 |
struct Vmxnet3_TxCompDesc txcq_descr;
|
495 |
|
496 |
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
|
497 |
|
498 |
txcq_descr.txdIdx = tx_ridx; |
499 |
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring); |
500 |
|
501 |
vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr); |
502 |
|
503 |
/* Flush changes in TX descriptor before changing the counter value */
|
504 |
smp_wmb(); |
505 |
|
506 |
vmxnet3_inc_tx_completion_counter(s, qidx); |
507 |
vmxnet3_trigger_interrupt(s, s->txq_descr[qidx].intr_idx); |
508 |
} |
509 |
|
510 |
static bool |
511 |
vmxnet3_setup_tx_offloads(VMXNET3State *s) |
512 |
{ |
513 |
switch (s->offload_mode) {
|
514 |
case VMXNET3_OM_NONE:
|
515 |
vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); |
516 |
break;
|
517 |
|
518 |
case VMXNET3_OM_CSUM:
|
519 |
vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); |
520 |
VMW_PKPRN("L4 CSO requested\n");
|
521 |
break;
|
522 |
|
523 |
case VMXNET3_OM_TSO:
|
524 |
vmxnet_tx_pkt_build_vheader(s->tx_pkt, true, true, |
525 |
s->cso_or_gso_size); |
526 |
vmxnet_tx_pkt_update_ip_checksums(s->tx_pkt); |
527 |
VMW_PKPRN("GSO offload requested.");
|
528 |
break;
|
529 |
|
530 |
default:
|
531 |
assert(false);
|
532 |
return false; |
533 |
} |
534 |
|
535 |
return true; |
536 |
} |
537 |
|
538 |
static void |
539 |
vmxnet3_tx_retrieve_metadata(VMXNET3State *s, |
540 |
const struct Vmxnet3_TxDesc *txd) |
541 |
{ |
542 |
s->offload_mode = txd->om; |
543 |
s->cso_or_gso_size = txd->msscof; |
544 |
s->tci = txd->tci; |
545 |
s->needs_vlan = txd->ti; |
546 |
} |
547 |
|
548 |
typedef enum { |
549 |
VMXNET3_PKT_STATUS_OK, |
550 |
VMXNET3_PKT_STATUS_ERROR, |
551 |
VMXNET3_PKT_STATUS_DISCARD,/* only for tx */
|
552 |
VMXNET3_PKT_STATUS_OUT_OF_BUF /* only for rx */
|
553 |
} Vmxnet3PktStatus; |
554 |
|
555 |
static void |
556 |
vmxnet3_on_tx_done_update_stats(VMXNET3State *s, int qidx,
|
557 |
Vmxnet3PktStatus status) |
558 |
{ |
559 |
size_t tot_len = vmxnet_tx_pkt_get_total_len(s->tx_pkt); |
560 |
struct UPT1_TxStats *stats = &s->txq_descr[qidx].txq_stats;
|
561 |
|
562 |
switch (status) {
|
563 |
case VMXNET3_PKT_STATUS_OK:
|
564 |
switch (vmxnet_tx_pkt_get_packet_type(s->tx_pkt)) {
|
565 |
case ETH_PKT_BCAST:
|
566 |
stats->bcastPktsTxOK++; |
567 |
stats->bcastBytesTxOK += tot_len; |
568 |
break;
|
569 |
case ETH_PKT_MCAST:
|
570 |
stats->mcastPktsTxOK++; |
571 |
stats->mcastBytesTxOK += tot_len; |
572 |
break;
|
573 |
case ETH_PKT_UCAST:
|
574 |
stats->ucastPktsTxOK++; |
575 |
stats->ucastBytesTxOK += tot_len; |
576 |
break;
|
577 |
default:
|
578 |
assert(false);
|
579 |
} |
580 |
|
581 |
if (s->offload_mode == VMXNET3_OM_TSO) {
|
582 |
/*
|
583 |
* According to VMWARE headers this statistic is a number
|
584 |
* of packets after segmentation but since we don't have
|
585 |
* this information in QEMU model, the best we can do is to
|
586 |
* provide number of non-segmented packets
|
587 |
*/
|
588 |
stats->TSOPktsTxOK++; |
589 |
stats->TSOBytesTxOK += tot_len; |
590 |
} |
591 |
break;
|
592 |
|
593 |
case VMXNET3_PKT_STATUS_DISCARD:
|
594 |
stats->pktsTxDiscard++; |
595 |
break;
|
596 |
|
597 |
case VMXNET3_PKT_STATUS_ERROR:
|
598 |
stats->pktsTxError++; |
599 |
break;
|
600 |
|
601 |
default:
|
602 |
assert(false);
|
603 |
} |
604 |
} |
605 |
|
606 |
static void |
607 |
vmxnet3_on_rx_done_update_stats(VMXNET3State *s, |
608 |
int qidx,
|
609 |
Vmxnet3PktStatus status) |
610 |
{ |
611 |
struct UPT1_RxStats *stats = &s->rxq_descr[qidx].rxq_stats;
|
612 |
size_t tot_len = vmxnet_rx_pkt_get_total_len(s->rx_pkt); |
613 |
|
614 |
switch (status) {
|
615 |
case VMXNET3_PKT_STATUS_OUT_OF_BUF:
|
616 |
stats->pktsRxOutOfBuf++; |
617 |
break;
|
618 |
|
619 |
case VMXNET3_PKT_STATUS_ERROR:
|
620 |
stats->pktsRxError++; |
621 |
break;
|
622 |
case VMXNET3_PKT_STATUS_OK:
|
623 |
switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) {
|
624 |
case ETH_PKT_BCAST:
|
625 |
stats->bcastPktsRxOK++; |
626 |
stats->bcastBytesRxOK += tot_len; |
627 |
break;
|
628 |
case ETH_PKT_MCAST:
|
629 |
stats->mcastPktsRxOK++; |
630 |
stats->mcastBytesRxOK += tot_len; |
631 |
break;
|
632 |
case ETH_PKT_UCAST:
|
633 |
stats->ucastPktsRxOK++; |
634 |
stats->ucastBytesRxOK += tot_len; |
635 |
break;
|
636 |
default:
|
637 |
assert(false);
|
638 |
} |
639 |
|
640 |
if (tot_len > s->mtu) {
|
641 |
stats->LROPktsRxOK++; |
642 |
stats->LROBytesRxOK += tot_len; |
643 |
} |
644 |
break;
|
645 |
default:
|
646 |
assert(false);
|
647 |
} |
648 |
} |
649 |
|
650 |
static inline bool |
651 |
vmxnet3_pop_next_tx_descr(VMXNET3State *s, |
652 |
int qidx,
|
653 |
struct Vmxnet3_TxDesc *txd,
|
654 |
uint32_t *descr_idx) |
655 |
{ |
656 |
Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring; |
657 |
|
658 |
vmxnet3_ring_read_curr_cell(ring, txd); |
659 |
if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
|
660 |
/* Only read after generation field verification */
|
661 |
smp_rmb(); |
662 |
/* Re-read to be sure we got the latest version */
|
663 |
vmxnet3_ring_read_curr_cell(ring, txd); |
664 |
VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
|
665 |
*descr_idx = vmxnet3_ring_curr_cell_idx(ring); |
666 |
vmxnet3_inc_tx_consumption_counter(s, qidx); |
667 |
return true; |
668 |
} |
669 |
|
670 |
return false; |
671 |
} |
672 |
|
673 |
static bool |
674 |
vmxnet3_send_packet(VMXNET3State *s, uint32_t qidx) |
675 |
{ |
676 |
Vmxnet3PktStatus status = VMXNET3_PKT_STATUS_OK; |
677 |
|
678 |
if (!vmxnet3_setup_tx_offloads(s)) {
|
679 |
status = VMXNET3_PKT_STATUS_ERROR; |
680 |
goto func_exit;
|
681 |
} |
682 |
|
683 |
/* debug prints */
|
684 |
vmxnet3_dump_virt_hdr(vmxnet_tx_pkt_get_vhdr(s->tx_pkt)); |
685 |
vmxnet_tx_pkt_dump(s->tx_pkt); |
686 |
|
687 |
if (!vmxnet_tx_pkt_send(s->tx_pkt, qemu_get_queue(s->nic))) {
|
688 |
status = VMXNET3_PKT_STATUS_DISCARD; |
689 |
goto func_exit;
|
690 |
} |
691 |
|
692 |
func_exit:
|
693 |
vmxnet3_on_tx_done_update_stats(s, qidx, status); |
694 |
return (status == VMXNET3_PKT_STATUS_OK);
|
695 |
} |
696 |
|
697 |
static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) |
698 |
{ |
699 |
struct Vmxnet3_TxDesc txd;
|
700 |
uint32_t txd_idx; |
701 |
uint32_t data_len; |
702 |
hwaddr data_pa; |
703 |
|
704 |
for (;;) {
|
705 |
if (!vmxnet3_pop_next_tx_descr(s, qidx, &txd, &txd_idx)) {
|
706 |
break;
|
707 |
} |
708 |
|
709 |
vmxnet3_dump_tx_descr(&txd); |
710 |
|
711 |
if (!s->skip_current_tx_pkt) {
|
712 |
data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE;
|
713 |
data_pa = le64_to_cpu(txd.addr); |
714 |
|
715 |
if (!vmxnet_tx_pkt_add_raw_fragment(s->tx_pkt,
|
716 |
data_pa, |
717 |
data_len)) { |
718 |
s->skip_current_tx_pkt = true;
|
719 |
} |
720 |
} |
721 |
|
722 |
if (s->tx_sop) {
|
723 |
vmxnet3_tx_retrieve_metadata(s, &txd); |
724 |
s->tx_sop = false;
|
725 |
} |
726 |
|
727 |
if (txd.eop) {
|
728 |
if (!s->skip_current_tx_pkt) {
|
729 |
vmxnet_tx_pkt_parse(s->tx_pkt); |
730 |
|
731 |
if (s->needs_vlan) {
|
732 |
vmxnet_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci); |
733 |
} |
734 |
|
735 |
vmxnet3_send_packet(s, qidx); |
736 |
} else {
|
737 |
vmxnet3_on_tx_done_update_stats(s, qidx, |
738 |
VMXNET3_PKT_STATUS_ERROR); |
739 |
} |
740 |
|
741 |
vmxnet3_complete_packet(s, qidx, txd_idx); |
742 |
s->tx_sop = true;
|
743 |
s->skip_current_tx_pkt = false;
|
744 |
vmxnet_tx_pkt_reset(s->tx_pkt); |
745 |
} |
746 |
} |
747 |
} |
748 |
|
749 |
static inline void |
750 |
vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx, |
751 |
struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
|
752 |
{ |
753 |
Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx]; |
754 |
*didx = vmxnet3_ring_curr_cell_idx(ring); |
755 |
vmxnet3_ring_read_curr_cell(ring, dbuf); |
756 |
} |
757 |
|
758 |
static inline uint8_t |
759 |
vmxnet3_get_rx_ring_gen(VMXNET3State *s, int qidx, int ridx) |
760 |
{ |
761 |
return s->rxq_descr[qidx].rx_ring[ridx].gen;
|
762 |
} |
763 |
|
764 |
static inline hwaddr |
765 |
vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen)
|
766 |
{ |
767 |
uint8_t ring_gen; |
768 |
struct Vmxnet3_RxCompDesc rxcd;
|
769 |
|
770 |
hwaddr daddr = |
771 |
vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); |
772 |
|
773 |
cpu_physical_memory_read(daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); |
774 |
ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); |
775 |
|
776 |
if (rxcd.gen != ring_gen) {
|
777 |
*descr_gen = ring_gen; |
778 |
vmxnet3_inc_rx_completion_counter(s, qidx); |
779 |
return daddr;
|
780 |
} |
781 |
|
782 |
return 0; |
783 |
} |
784 |
|
785 |
static inline void |
786 |
vmxnet3_revert_rxc_descr(VMXNET3State *s, int qidx)
|
787 |
{ |
788 |
vmxnet3_dec_rx_completion_counter(s, qidx); |
789 |
} |
790 |
|
791 |
#define RXQ_IDX (0) |
792 |
#define RX_HEAD_BODY_RING (0) |
793 |
#define RX_BODY_ONLY_RING (1) |
794 |
|
795 |
static bool |
796 |
vmxnet3_get_next_head_rx_descr(VMXNET3State *s, |
797 |
struct Vmxnet3_RxDesc *descr_buf,
|
798 |
uint32_t *descr_idx, |
799 |
uint32_t *ridx) |
800 |
{ |
801 |
for (;;) {
|
802 |
uint32_t ring_gen; |
803 |
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, |
804 |
descr_buf, descr_idx); |
805 |
|
806 |
/* If no more free descriptors - return */
|
807 |
ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING); |
808 |
if (descr_buf->gen != ring_gen) {
|
809 |
return false; |
810 |
} |
811 |
|
812 |
/* Only read after generation field verification */
|
813 |
smp_rmb(); |
814 |
/* Re-read to be sure we got the latest version */
|
815 |
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, |
816 |
descr_buf, descr_idx); |
817 |
|
818 |
/* Mark current descriptor as used/skipped */
|
819 |
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); |
820 |
|
821 |
/* If this is what we are looking for - return */
|
822 |
if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) {
|
823 |
*ridx = RX_HEAD_BODY_RING; |
824 |
return true; |
825 |
} |
826 |
} |
827 |
} |
828 |
|
829 |
static bool |
830 |
vmxnet3_get_next_body_rx_descr(VMXNET3State *s, |
831 |
struct Vmxnet3_RxDesc *d,
|
832 |
uint32_t *didx, |
833 |
uint32_t *ridx) |
834 |
{ |
835 |
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); |
836 |
|
837 |
/* Try to find corresponding descriptor in head/body ring */
|
838 |
if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING)) {
|
839 |
/* Only read after generation field verification */
|
840 |
smp_rmb(); |
841 |
/* Re-read to be sure we got the latest version */
|
842 |
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); |
843 |
if (d->btype == VMXNET3_RXD_BTYPE_BODY) {
|
844 |
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); |
845 |
*ridx = RX_HEAD_BODY_RING; |
846 |
return true; |
847 |
} |
848 |
} |
849 |
|
850 |
/*
|
851 |
* If there is no free descriptors on head/body ring or next free
|
852 |
* descriptor is a head descriptor switch to body only ring
|
853 |
*/
|
854 |
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); |
855 |
|
856 |
/* If no more free descriptors - return */
|
857 |
if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_BODY_ONLY_RING)) {
|
858 |
/* Only read after generation field verification */
|
859 |
smp_rmb(); |
860 |
/* Re-read to be sure we got the latest version */
|
861 |
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); |
862 |
assert(d->btype == VMXNET3_RXD_BTYPE_BODY); |
863 |
*ridx = RX_BODY_ONLY_RING; |
864 |
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_BODY_ONLY_RING); |
865 |
return true; |
866 |
} |
867 |
|
868 |
return false; |
869 |
} |
870 |
|
871 |
static inline bool |
872 |
vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head,
|
873 |
struct Vmxnet3_RxDesc *descr_buf,
|
874 |
uint32_t *descr_idx, |
875 |
uint32_t *ridx) |
876 |
{ |
877 |
if (is_head || !s->rx_packets_compound) {
|
878 |
return vmxnet3_get_next_head_rx_descr(s, descr_buf, descr_idx, ridx);
|
879 |
} else {
|
880 |
return vmxnet3_get_next_body_rx_descr(s, descr_buf, descr_idx, ridx);
|
881 |
} |
882 |
} |
883 |
|
884 |
static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt, |
885 |
struct Vmxnet3_RxCompDesc *rxcd)
|
886 |
{ |
887 |
int csum_ok, is_gso;
|
888 |
bool isip4, isip6, istcp, isudp;
|
889 |
struct virtio_net_hdr *vhdr;
|
890 |
uint8_t offload_type; |
891 |
|
892 |
if (vmxnet_rx_pkt_is_vlan_stripped(pkt)) {
|
893 |
rxcd->ts = 1;
|
894 |
rxcd->tci = vmxnet_rx_pkt_get_vlan_tag(pkt); |
895 |
} |
896 |
|
897 |
if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) {
|
898 |
goto nocsum;
|
899 |
} |
900 |
|
901 |
vhdr = vmxnet_rx_pkt_get_vhdr(pkt); |
902 |
/*
|
903 |
* Checksum is valid when lower level tell so or when lower level
|
904 |
* requires checksum offload telling that packet produced/bridged
|
905 |
* locally and did travel over network after last checksum calculation
|
906 |
* or production
|
907 |
*/
|
908 |
csum_ok = VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_DATA_VALID) || |
909 |
VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM); |
910 |
|
911 |
offload_type = vhdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN; |
912 |
is_gso = (offload_type != VIRTIO_NET_HDR_GSO_NONE) ? 1 : 0; |
913 |
|
914 |
if (!csum_ok && !is_gso) {
|
915 |
goto nocsum;
|
916 |
} |
917 |
|
918 |
vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); |
919 |
if ((!istcp && !isudp) || (!isip4 && !isip6)) {
|
920 |
goto nocsum;
|
921 |
} |
922 |
|
923 |
rxcd->cnc = 0;
|
924 |
rxcd->v4 = isip4 ? 1 : 0; |
925 |
rxcd->v6 = isip6 ? 1 : 0; |
926 |
rxcd->tcp = istcp ? 1 : 0; |
927 |
rxcd->udp = isudp ? 1 : 0; |
928 |
rxcd->fcs = rxcd->tuc = rxcd->ipc = 1;
|
929 |
return;
|
930 |
|
931 |
nocsum:
|
932 |
rxcd->cnc = 1;
|
933 |
return;
|
934 |
} |
935 |
|
936 |
static void |
937 |
vmxnet3_physical_memory_writev(const struct iovec *iov, |
938 |
size_t start_iov_off, |
939 |
hwaddr target_addr, |
940 |
size_t bytes_to_copy) |
941 |
{ |
942 |
size_t curr_off = 0;
|
943 |
size_t copied = 0;
|
944 |
|
945 |
while (bytes_to_copy) {
|
946 |
if (start_iov_off < (curr_off + iov->iov_len)) {
|
947 |
size_t chunk_len = |
948 |
MIN((curr_off + iov->iov_len) - start_iov_off, bytes_to_copy); |
949 |
|
950 |
cpu_physical_memory_write(target_addr + copied, |
951 |
iov->iov_base + start_iov_off - curr_off, |
952 |
chunk_len); |
953 |
|
954 |
copied += chunk_len; |
955 |
start_iov_off += chunk_len; |
956 |
curr_off = start_iov_off; |
957 |
bytes_to_copy -= chunk_len; |
958 |
} else {
|
959 |
curr_off += iov->iov_len; |
960 |
} |
961 |
iov++; |
962 |
} |
963 |
} |
964 |
|
965 |
static bool |
966 |
vmxnet3_indicate_packet(VMXNET3State *s) |
967 |
{ |
968 |
struct Vmxnet3_RxDesc rxd;
|
969 |
bool is_head = true; |
970 |
uint32_t rxd_idx; |
971 |
uint32_t rx_ridx = 0;
|
972 |
|
973 |
struct Vmxnet3_RxCompDesc rxcd;
|
974 |
uint32_t new_rxcd_gen = VMXNET3_INIT_GEN; |
975 |
hwaddr new_rxcd_pa = 0;
|
976 |
hwaddr ready_rxcd_pa = 0;
|
977 |
struct iovec *data = vmxnet_rx_pkt_get_iovec(s->rx_pkt);
|
978 |
size_t bytes_copied = 0;
|
979 |
size_t bytes_left = vmxnet_rx_pkt_get_total_len(s->rx_pkt); |
980 |
uint16_t num_frags = 0;
|
981 |
size_t chunk_size; |
982 |
|
983 |
vmxnet_rx_pkt_dump(s->rx_pkt); |
984 |
|
985 |
while (bytes_left > 0) { |
986 |
|
987 |
/* cannot add more frags to packet */
|
988 |
if (num_frags == s->max_rx_frags) {
|
989 |
break;
|
990 |
} |
991 |
|
992 |
new_rxcd_pa = vmxnet3_pop_rxc_descr(s, RXQ_IDX, &new_rxcd_gen); |
993 |
if (!new_rxcd_pa) {
|
994 |
break;
|
995 |
} |
996 |
|
997 |
if (!vmxnet3_get_next_rx_descr(s, is_head, &rxd, &rxd_idx, &rx_ridx)) {
|
998 |
break;
|
999 |
} |
1000 |
|
1001 |
chunk_size = MIN(bytes_left, rxd.len); |
1002 |
vmxnet3_physical_memory_writev(data, bytes_copied, |
1003 |
le64_to_cpu(rxd.addr), chunk_size); |
1004 |
bytes_copied += chunk_size; |
1005 |
bytes_left -= chunk_size; |
1006 |
|
1007 |
vmxnet3_dump_rx_descr(&rxd); |
1008 |
|
1009 |
if (0 != ready_rxcd_pa) { |
1010 |
cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd));
|
1011 |
} |
1012 |
|
1013 |
memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc)); |
1014 |
rxcd.rxdIdx = rxd_idx; |
1015 |
rxcd.len = chunk_size; |
1016 |
rxcd.sop = is_head; |
1017 |
rxcd.gen = new_rxcd_gen; |
1018 |
rxcd.rqID = RXQ_IDX + rx_ridx * s->rxq_num; |
1019 |
|
1020 |
if (0 == bytes_left) { |
1021 |
vmxnet3_rx_update_descr(s->rx_pkt, &rxcd); |
1022 |
} |
1023 |
|
1024 |
VMW_RIPRN("RX Completion descriptor: rxRing: %lu rxIdx %lu len %lu "
|
1025 |
"sop %d csum_correct %lu",
|
1026 |
(unsigned long) rx_ridx, |
1027 |
(unsigned long) rxcd.rxdIdx, |
1028 |
(unsigned long) rxcd.len, |
1029 |
(int) rxcd.sop,
|
1030 |
(unsigned long) rxcd.tuc); |
1031 |
|
1032 |
is_head = false;
|
1033 |
ready_rxcd_pa = new_rxcd_pa; |
1034 |
new_rxcd_pa = 0;
|
1035 |
} |
1036 |
|
1037 |
if (0 != ready_rxcd_pa) { |
1038 |
rxcd.eop = 1;
|
1039 |
rxcd.err = (0 != bytes_left);
|
1040 |
cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd));
|
1041 |
|
1042 |
/* Flush RX descriptor changes */
|
1043 |
smp_wmb(); |
1044 |
} |
1045 |
|
1046 |
if (0 != new_rxcd_pa) { |
1047 |
vmxnet3_revert_rxc_descr(s, RXQ_IDX); |
1048 |
} |
1049 |
|
1050 |
vmxnet3_trigger_interrupt(s, s->rxq_descr[RXQ_IDX].intr_idx); |
1051 |
|
1052 |
if (bytes_left == 0) { |
1053 |
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_OK); |
1054 |
return true; |
1055 |
} else if (num_frags == s->max_rx_frags) { |
1056 |
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_ERROR); |
1057 |
return false; |
1058 |
} else {
|
1059 |
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, |
1060 |
VMXNET3_PKT_STATUS_OUT_OF_BUF); |
1061 |
return false; |
1062 |
} |
1063 |
} |
1064 |
|
1065 |
static void |
1066 |
vmxnet3_io_bar0_write(void *opaque, hwaddr addr,
|
1067 |
uint64_t val, unsigned size)
|
1068 |
{ |
1069 |
VMXNET3State *s = opaque; |
1070 |
|
1071 |
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD,
|
1072 |
VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) { |
1073 |
int tx_queue_idx =
|
1074 |
VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD, |
1075 |
VMXNET3_REG_ALIGN); |
1076 |
assert(tx_queue_idx <= s->txq_num); |
1077 |
vmxnet3_process_tx_queue(s, tx_queue_idx); |
1078 |
return;
|
1079 |
} |
1080 |
|
1081 |
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR,
|
1082 |
VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { |
1083 |
int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR,
|
1084 |
VMXNET3_REG_ALIGN); |
1085 |
|
1086 |
VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val);
|
1087 |
|
1088 |
vmxnet3_on_interrupt_mask_changed(s, l, val); |
1089 |
return;
|
1090 |
} |
1091 |
|
1092 |
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD,
|
1093 |
VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) || |
1094 |
VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2, |
1095 |
VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) { |
1096 |
return;
|
1097 |
} |
1098 |
|
1099 |
VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d", |
1100 |
(uint64_t) addr, val, size); |
1101 |
} |
1102 |
|
1103 |
static uint64_t
|
1104 |
vmxnet3_io_bar0_read(void *opaque, hwaddr addr, unsigned size) |
1105 |
{ |
1106 |
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR,
|
1107 |
VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { |
1108 |
assert(false);
|
1109 |
} |
1110 |
|
1111 |
VMW_CBPRN("BAR0 unknown read [%" PRIx64 "], size %d", addr, size); |
1112 |
return 0; |
1113 |
} |
1114 |
|
1115 |
static void vmxnet3_reset_interrupt_states(VMXNET3State *s) |
1116 |
{ |
1117 |
int i;
|
1118 |
for (i = 0; i < ARRAY_SIZE(s->interrupt_states); i++) { |
1119 |
s->interrupt_states[i].is_asserted = false;
|
1120 |
s->interrupt_states[i].is_pending = false;
|
1121 |
s->interrupt_states[i].is_masked = true;
|
1122 |
} |
1123 |
} |
1124 |
|
1125 |
static void vmxnet3_reset_mac(VMXNET3State *s) |
1126 |
{ |
1127 |
memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a));
|
1128 |
VMW_CFPRN("MAC address set to: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a));
|
1129 |
} |
1130 |
|
1131 |
static void vmxnet3_deactivate_device(VMXNET3State *s) |
1132 |
{ |
1133 |
VMW_CBPRN("Deactivating vmxnet3...");
|
1134 |
s->device_active = false;
|
1135 |
} |
1136 |
|
1137 |
static void vmxnet3_reset(VMXNET3State *s) |
1138 |
{ |
1139 |
VMW_CBPRN("Resetting vmxnet3...");
|
1140 |
|
1141 |
vmxnet3_deactivate_device(s); |
1142 |
vmxnet3_reset_interrupt_states(s); |
1143 |
vmxnet_tx_pkt_reset(s->tx_pkt); |
1144 |
s->drv_shmem = 0;
|
1145 |
s->tx_sop = true;
|
1146 |
s->skip_current_tx_pkt = false;
|
1147 |
} |
1148 |
|
1149 |
static void vmxnet3_update_rx_mode(VMXNET3State *s) |
1150 |
{ |
1151 |
s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, |
1152 |
devRead.rxFilterConf.rxMode); |
1153 |
VMW_CFPRN("RX mode: 0x%08X", s->rx_mode);
|
1154 |
} |
1155 |
|
1156 |
static void vmxnet3_update_vlan_filters(VMXNET3State *s) |
1157 |
{ |
1158 |
int i;
|
1159 |
|
1160 |
/* Copy configuration from shared memory */
|
1161 |
VMXNET3_READ_DRV_SHARED(s->drv_shmem, |
1162 |
devRead.rxFilterConf.vfTable, |
1163 |
s->vlan_table, |
1164 |
sizeof(s->vlan_table));
|
1165 |
|
1166 |
/* Invert byte order when needed */
|
1167 |
for (i = 0; i < ARRAY_SIZE(s->vlan_table); i++) { |
1168 |
s->vlan_table[i] = le32_to_cpu(s->vlan_table[i]); |
1169 |
} |
1170 |
|
1171 |
/* Dump configuration for debugging purposes */
|
1172 |
VMW_CFPRN("Configured VLANs:");
|
1173 |
for (i = 0; i < sizeof(s->vlan_table) * 8; i++) { |
1174 |
if (VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, i)) {
|
1175 |
VMW_CFPRN("\tVLAN %d is present", i);
|
1176 |
} |
1177 |
} |
1178 |
} |
1179 |
|
1180 |
static void vmxnet3_update_mcast_filters(VMXNET3State *s) |
1181 |
{ |
1182 |
uint16_t list_bytes = |
1183 |
VMXNET3_READ_DRV_SHARED16(s->drv_shmem, |
1184 |
devRead.rxFilterConf.mfTableLen); |
1185 |
|
1186 |
s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]); |
1187 |
|
1188 |
s->mcast_list = g_realloc(s->mcast_list, list_bytes); |
1189 |
if (NULL == s->mcast_list) { |
1190 |
if (0 == s->mcast_list_len) { |
1191 |
VMW_CFPRN("Current multicast list is empty");
|
1192 |
} else {
|
1193 |
VMW_ERPRN("Failed to allocate multicast list of %d elements",
|
1194 |
s->mcast_list_len); |
1195 |
} |
1196 |
s->mcast_list_len = 0;
|
1197 |
} else {
|
1198 |
int i;
|
1199 |
hwaddr mcast_list_pa = |
1200 |
VMXNET3_READ_DRV_SHARED64(s->drv_shmem, |
1201 |
devRead.rxFilterConf.mfTablePA); |
1202 |
|
1203 |
cpu_physical_memory_read(mcast_list_pa, s->mcast_list, list_bytes); |
1204 |
VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len);
|
1205 |
for (i = 0; i < s->mcast_list_len; i++) { |
1206 |
VMW_CFPRN("\t" VMXNET_MF, VMXNET_MA(s->mcast_list[i].a));
|
1207 |
} |
1208 |
} |
1209 |
} |
1210 |
|
1211 |
static void vmxnet3_setup_rx_filtering(VMXNET3State *s) |
1212 |
{ |
1213 |
vmxnet3_update_rx_mode(s); |
1214 |
vmxnet3_update_vlan_filters(s); |
1215 |
vmxnet3_update_mcast_filters(s); |
1216 |
} |
1217 |
|
1218 |
static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s)
|
1219 |
{ |
1220 |
uint32_t interrupt_mode = VMXNET3_IT_AUTO | (VMXNET3_IMM_AUTO << 2);
|
1221 |
VMW_CFPRN("Interrupt config is 0x%X", interrupt_mode);
|
1222 |
return interrupt_mode;
|
1223 |
} |
1224 |
|
1225 |
static void vmxnet3_fill_stats(VMXNET3State *s) |
1226 |
{ |
1227 |
int i;
|
1228 |
for (i = 0; i < s->txq_num; i++) { |
1229 |
cpu_physical_memory_write(s->txq_descr[i].tx_stats_pa, |
1230 |
&s->txq_descr[i].txq_stats, |
1231 |
sizeof(s->txq_descr[i].txq_stats));
|
1232 |
} |
1233 |
|
1234 |
for (i = 0; i < s->rxq_num; i++) { |
1235 |
cpu_physical_memory_write(s->rxq_descr[i].rx_stats_pa, |
1236 |
&s->rxq_descr[i].rxq_stats, |
1237 |
sizeof(s->rxq_descr[i].rxq_stats));
|
1238 |
} |
1239 |
} |
1240 |
|
1241 |
static void vmxnet3_adjust_by_guest_type(VMXNET3State *s) |
1242 |
{ |
1243 |
struct Vmxnet3_GOSInfo gos;
|
1244 |
|
1245 |
VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos, |
1246 |
&gos, sizeof(gos));
|
1247 |
s->rx_packets_compound = |
1248 |
(gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true; |
1249 |
|
1250 |
VMW_CFPRN("Guest type specifics: RXCOMPOUND: %d", s->rx_packets_compound);
|
1251 |
} |
1252 |
|
1253 |
static void |
1254 |
vmxnet3_dump_conf_descr(const char *name, |
1255 |
struct Vmxnet3_VariableLenConfDesc *pm_descr)
|
1256 |
{ |
1257 |
VMW_CFPRN("%s descriptor dump: Version %u, Length %u",
|
1258 |
name, pm_descr->confVer, pm_descr->confLen); |
1259 |
|
1260 |
}; |
1261 |
|
1262 |
static void vmxnet3_update_pm_state(VMXNET3State *s) |
1263 |
{ |
1264 |
struct Vmxnet3_VariableLenConfDesc pm_descr;
|
1265 |
|
1266 |
pm_descr.confLen = |
1267 |
VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen); |
1268 |
pm_descr.confVer = |
1269 |
VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer); |
1270 |
pm_descr.confPA = |
1271 |
VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA); |
1272 |
|
1273 |
vmxnet3_dump_conf_descr("PM State", &pm_descr);
|
1274 |
} |
1275 |
|
1276 |
static void vmxnet3_update_features(VMXNET3State *s) |
1277 |
{ |
1278 |
uint32_t guest_features; |
1279 |
int rxcso_supported;
|
1280 |
|
1281 |
guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, |
1282 |
devRead.misc.uptFeatures); |
1283 |
|
1284 |
rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM); |
1285 |
s->rx_vlan_stripping = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXVLAN); |
1286 |
s->lro_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_LRO); |
1287 |
|
1288 |
VMW_CFPRN("Features configuration: LRO: %d, RXCSUM: %d, VLANSTRIP: %d",
|
1289 |
s->lro_supported, rxcso_supported, |
1290 |
s->rx_vlan_stripping); |
1291 |
if (s->peer_has_vhdr) {
|
1292 |
tap_set_offload(qemu_get_queue(s->nic)->peer, |
1293 |
rxcso_supported, |
1294 |
s->lro_supported, |
1295 |
s->lro_supported, |
1296 |
0,
|
1297 |
0);
|
1298 |
} |
1299 |
} |
1300 |
|
1301 |
static void vmxnet3_activate_device(VMXNET3State *s) |
1302 |
{ |
1303 |
int i;
|
1304 |
static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1; |
1305 |
hwaddr qdescr_table_pa; |
1306 |
uint64_t pa; |
1307 |
uint32_t size; |
1308 |
|
1309 |
/* Verify configuration consistency */
|
1310 |
if (!vmxnet3_verify_driver_magic(s->drv_shmem)) {
|
1311 |
VMW_ERPRN("Device configuration received from driver is invalid");
|
1312 |
return;
|
1313 |
} |
1314 |
|
1315 |
vmxnet3_adjust_by_guest_type(s); |
1316 |
vmxnet3_update_features(s); |
1317 |
vmxnet3_update_pm_state(s); |
1318 |
vmxnet3_setup_rx_filtering(s); |
1319 |
/* Cache fields from shared memory */
|
1320 |
s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu); |
1321 |
VMW_CFPRN("MTU is %u", s->mtu);
|
1322 |
|
1323 |
s->max_rx_frags = |
1324 |
VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG); |
1325 |
|
1326 |
VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags);
|
1327 |
|
1328 |
s->event_int_idx = |
1329 |
VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx); |
1330 |
VMW_CFPRN("Events interrupt line is %u", s->event_int_idx);
|
1331 |
|
1332 |
s->auto_int_masking = |
1333 |
VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask); |
1334 |
VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking); |
1335 |
|
1336 |
s->txq_num = |
1337 |
VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues); |
1338 |
s->rxq_num = |
1339 |
VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues); |
1340 |
|
1341 |
VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
|
1342 |
assert(s->txq_num <= VMXNET3_DEVICE_MAX_TX_QUEUES); |
1343 |
|
1344 |
qdescr_table_pa = |
1345 |
VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA); |
1346 |
VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa);
|
1347 |
|
1348 |
/*
|
1349 |
* Worst-case scenario is a packet that holds all TX rings space so
|
1350 |
* we calculate total size of all TX rings for max TX fragments number
|
1351 |
*/
|
1352 |
s->max_tx_frags = 0;
|
1353 |
|
1354 |
/* TX queues */
|
1355 |
for (i = 0; i < s->txq_num; i++) { |
1356 |
hwaddr qdescr_pa = |
1357 |
qdescr_table_pa + i * sizeof(struct Vmxnet3_TxQueueDesc); |
1358 |
|
1359 |
/* Read interrupt number for this TX queue */
|
1360 |
s->txq_descr[i].intr_idx = |
1361 |
VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx); |
1362 |
|
1363 |
VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx);
|
1364 |
|
1365 |
/* Read rings memory locations for TX queues */
|
1366 |
pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA); |
1367 |
size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize); |
1368 |
|
1369 |
vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size, |
1370 |
sizeof(struct Vmxnet3_TxDesc), false); |
1371 |
VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring);
|
1372 |
|
1373 |
s->max_tx_frags += size; |
1374 |
|
1375 |
/* TXC ring */
|
1376 |
pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA); |
1377 |
size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize); |
1378 |
vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size, |
1379 |
sizeof(struct Vmxnet3_TxCompDesc), true); |
1380 |
VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
|
1381 |
|
1382 |
s->txq_descr[i].tx_stats_pa = |
1383 |
qdescr_pa + offsetof(struct Vmxnet3_TxQueueDesc, stats);
|
1384 |
|
1385 |
memset(&s->txq_descr[i].txq_stats, 0,
|
1386 |
sizeof(s->txq_descr[i].txq_stats));
|
1387 |
|
1388 |
/* Fill device-managed parameters for queues */
|
1389 |
VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa, |
1390 |
ctrl.txThreshold, |
1391 |
VMXNET3_DEF_TX_THRESHOLD); |
1392 |
} |
1393 |
|
1394 |
/* Preallocate TX packet wrapper */
|
1395 |
VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags);
|
1396 |
vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr); |
1397 |
vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); |
1398 |
|
1399 |
/* Read rings memory locations for RX queues */
|
1400 |
for (i = 0; i < s->rxq_num; i++) { |
1401 |
int j;
|
1402 |
hwaddr qd_pa = |
1403 |
qdescr_table_pa + s->txq_num * sizeof(struct Vmxnet3_TxQueueDesc) + |
1404 |
i * sizeof(struct Vmxnet3_RxQueueDesc); |
1405 |
|
1406 |
/* Read interrupt number for this RX queue */
|
1407 |
s->rxq_descr[i].intr_idx = |
1408 |
VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx); |
1409 |
|
1410 |
VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx);
|
1411 |
|
1412 |
/* Read rings memory locations */
|
1413 |
for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) { |
1414 |
/* RX rings */
|
1415 |
pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]); |
1416 |
size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]); |
1417 |
vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size, |
1418 |
sizeof(struct Vmxnet3_RxDesc), false); |
1419 |
VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d", |
1420 |
i, j, pa, size); |
1421 |
} |
1422 |
|
1423 |
/* RXC ring */
|
1424 |
pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA); |
1425 |
size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize); |
1426 |
vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size, |
1427 |
sizeof(struct Vmxnet3_RxCompDesc), true); |
1428 |
VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size); |
1429 |
|
1430 |
s->rxq_descr[i].rx_stats_pa = |
1431 |
qd_pa + offsetof(struct Vmxnet3_RxQueueDesc, stats);
|
1432 |
memset(&s->rxq_descr[i].rxq_stats, 0,
|
1433 |
sizeof(s->rxq_descr[i].rxq_stats));
|
1434 |
} |
1435 |
|
1436 |
/* Make sure everything is in place before device activation */
|
1437 |
smp_wmb(); |
1438 |
|
1439 |
vmxnet3_reset_mac(s); |
1440 |
|
1441 |
s->device_active = true;
|
1442 |
} |
1443 |
|
1444 |
static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd) |
1445 |
{ |
1446 |
s->last_command = cmd; |
1447 |
|
1448 |
switch (cmd) {
|
1449 |
case VMXNET3_CMD_GET_PERM_MAC_HI:
|
1450 |
VMW_CBPRN("Set: Get upper part of permanent MAC");
|
1451 |
break;
|
1452 |
|
1453 |
case VMXNET3_CMD_GET_PERM_MAC_LO:
|
1454 |
VMW_CBPRN("Set: Get lower part of permanent MAC");
|
1455 |
break;
|
1456 |
|
1457 |
case VMXNET3_CMD_GET_STATS:
|
1458 |
VMW_CBPRN("Set: Get device statistics");
|
1459 |
vmxnet3_fill_stats(s); |
1460 |
break;
|
1461 |
|
1462 |
case VMXNET3_CMD_ACTIVATE_DEV:
|
1463 |
VMW_CBPRN("Set: Activating vmxnet3 device");
|
1464 |
vmxnet3_activate_device(s); |
1465 |
break;
|
1466 |
|
1467 |
case VMXNET3_CMD_UPDATE_RX_MODE:
|
1468 |
VMW_CBPRN("Set: Update rx mode");
|
1469 |
vmxnet3_update_rx_mode(s); |
1470 |
break;
|
1471 |
|
1472 |
case VMXNET3_CMD_UPDATE_VLAN_FILTERS:
|
1473 |
VMW_CBPRN("Set: Update VLAN filters");
|
1474 |
vmxnet3_update_vlan_filters(s); |
1475 |
break;
|
1476 |
|
1477 |
case VMXNET3_CMD_UPDATE_MAC_FILTERS:
|
1478 |
VMW_CBPRN("Set: Update MAC filters");
|
1479 |
vmxnet3_update_mcast_filters(s); |
1480 |
break;
|
1481 |
|
1482 |
case VMXNET3_CMD_UPDATE_FEATURE:
|
1483 |
VMW_CBPRN("Set: Update features");
|
1484 |
vmxnet3_update_features(s); |
1485 |
break;
|
1486 |
|
1487 |
case VMXNET3_CMD_UPDATE_PMCFG:
|
1488 |
VMW_CBPRN("Set: Update power management config");
|
1489 |
vmxnet3_update_pm_state(s); |
1490 |
break;
|
1491 |
|
1492 |
case VMXNET3_CMD_GET_LINK:
|
1493 |
VMW_CBPRN("Set: Get link");
|
1494 |
break;
|
1495 |
|
1496 |
case VMXNET3_CMD_RESET_DEV:
|
1497 |
VMW_CBPRN("Set: Reset device");
|
1498 |
vmxnet3_reset(s); |
1499 |
break;
|
1500 |
|
1501 |
case VMXNET3_CMD_QUIESCE_DEV:
|
1502 |
VMW_CBPRN("Set: VMXNET3_CMD_QUIESCE_DEV - pause the device");
|
1503 |
vmxnet3_deactivate_device(s); |
1504 |
break;
|
1505 |
|
1506 |
case VMXNET3_CMD_GET_CONF_INTR:
|
1507 |
VMW_CBPRN("Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration");
|
1508 |
break;
|
1509 |
|
1510 |
default:
|
1511 |
VMW_CBPRN("Received unknown command: %" PRIx64, cmd);
|
1512 |
break;
|
1513 |
} |
1514 |
} |
1515 |
|
1516 |
static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
|
1517 |
{ |
1518 |
uint64_t ret; |
1519 |
|
1520 |
switch (s->last_command) {
|
1521 |
case VMXNET3_CMD_ACTIVATE_DEV:
|
1522 |
ret = (s->device_active) ? 0 : -1; |
1523 |
VMW_CFPRN("Device active: %" PRIx64, ret);
|
1524 |
break;
|
1525 |
|
1526 |
case VMXNET3_CMD_GET_LINK:
|
1527 |
ret = s->link_status_and_speed; |
1528 |
VMW_CFPRN("Link and speed: %" PRIx64, ret);
|
1529 |
break;
|
1530 |
|
1531 |
case VMXNET3_CMD_GET_PERM_MAC_LO:
|
1532 |
ret = vmxnet3_get_mac_low(&s->perm_mac); |
1533 |
break;
|
1534 |
|
1535 |
case VMXNET3_CMD_GET_PERM_MAC_HI:
|
1536 |
ret = vmxnet3_get_mac_high(&s->perm_mac); |
1537 |
break;
|
1538 |
|
1539 |
case VMXNET3_CMD_GET_CONF_INTR:
|
1540 |
ret = vmxnet3_get_interrupt_config(s); |
1541 |
break;
|
1542 |
|
1543 |
default:
|
1544 |
VMW_WRPRN("Received request for unknown command: %x", s->last_command);
|
1545 |
ret = -1;
|
1546 |
break;
|
1547 |
} |
1548 |
|
1549 |
return ret;
|
1550 |
} |
1551 |
|
1552 |
static void vmxnet3_set_events(VMXNET3State *s, uint32_t val) |
1553 |
{ |
1554 |
uint32_t events; |
1555 |
|
1556 |
VMW_CBPRN("Setting events: 0x%x", val);
|
1557 |
events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val; |
1558 |
VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); |
1559 |
} |
1560 |
|
1561 |
static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val) |
1562 |
{ |
1563 |
uint32_t events; |
1564 |
|
1565 |
VMW_CBPRN("Clearing events: 0x%x", val);
|
1566 |
events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val; |
1567 |
VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); |
1568 |
} |
1569 |
|
1570 |
static void |
1571 |
vmxnet3_io_bar1_write(void *opaque,
|
1572 |
hwaddr addr, |
1573 |
uint64_t val, |
1574 |
unsigned size)
|
1575 |
{ |
1576 |
VMXNET3State *s = opaque; |
1577 |
|
1578 |
switch (addr) {
|
1579 |
/* Vmxnet3 Revision Report Selection */
|
1580 |
case VMXNET3_REG_VRRS:
|
1581 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d", |
1582 |
val, size); |
1583 |
break;
|
1584 |
|
1585 |
/* UPT Version Report Selection */
|
1586 |
case VMXNET3_REG_UVRS:
|
1587 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d", |
1588 |
val, size); |
1589 |
break;
|
1590 |
|
1591 |
/* Driver Shared Address Low */
|
1592 |
case VMXNET3_REG_DSAL:
|
1593 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d", |
1594 |
val, size); |
1595 |
/*
|
1596 |
* Guest driver will first write the low part of the shared
|
1597 |
* memory address. We save it to temp variable and set the
|
1598 |
* shared address only after we get the high part
|
1599 |
*/
|
1600 |
if (0 == val) { |
1601 |
s->device_active = false;
|
1602 |
} |
1603 |
s->temp_shared_guest_driver_memory = val; |
1604 |
s->drv_shmem = 0;
|
1605 |
break;
|
1606 |
|
1607 |
/* Driver Shared Address High */
|
1608 |
case VMXNET3_REG_DSAH:
|
1609 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d", |
1610 |
val, size); |
1611 |
/*
|
1612 |
* Set the shared memory between guest driver and device.
|
1613 |
* We already should have low address part.
|
1614 |
*/
|
1615 |
s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32);
|
1616 |
break;
|
1617 |
|
1618 |
/* Command */
|
1619 |
case VMXNET3_REG_CMD:
|
1620 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d", |
1621 |
val, size); |
1622 |
vmxnet3_handle_command(s, val); |
1623 |
break;
|
1624 |
|
1625 |
/* MAC Address Low */
|
1626 |
case VMXNET3_REG_MACL:
|
1627 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d", |
1628 |
val, size); |
1629 |
s->temp_mac = val; |
1630 |
break;
|
1631 |
|
1632 |
/* MAC Address High */
|
1633 |
case VMXNET3_REG_MACH:
|
1634 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d", |
1635 |
val, size); |
1636 |
vmxnet3_set_variable_mac(s, val, s->temp_mac); |
1637 |
break;
|
1638 |
|
1639 |
/* Interrupt Cause Register */
|
1640 |
case VMXNET3_REG_ICR:
|
1641 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d", |
1642 |
val, size); |
1643 |
assert(false);
|
1644 |
break;
|
1645 |
|
1646 |
/* Event Cause Register */
|
1647 |
case VMXNET3_REG_ECR:
|
1648 |
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d", |
1649 |
val, size); |
1650 |
vmxnet3_ack_events(s, val); |
1651 |
break;
|
1652 |
|
1653 |
default:
|
1654 |
VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d", |
1655 |
addr, val, size); |
1656 |
break;
|
1657 |
} |
1658 |
} |
1659 |
|
1660 |
static uint64_t
|
1661 |
vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size) |
1662 |
{ |
1663 |
VMXNET3State *s = opaque; |
1664 |
uint64_t ret = 0;
|
1665 |
|
1666 |
switch (addr) {
|
1667 |
/* Vmxnet3 Revision Report Selection */
|
1668 |
case VMXNET3_REG_VRRS:
|
1669 |
VMW_CBPRN("Read BAR1 [VMXNET3_REG_VRRS], size %d", size);
|
1670 |
ret = VMXNET3_DEVICE_REVISION; |
1671 |
break;
|
1672 |
|
1673 |
/* UPT Version Report Selection */
|
1674 |
case VMXNET3_REG_UVRS:
|
1675 |
VMW_CBPRN("Read BAR1 [VMXNET3_REG_UVRS], size %d", size);
|
1676 |
ret = VMXNET3_DEVICE_VERSION; |
1677 |
break;
|
1678 |
|
1679 |
/* Command */
|
1680 |
case VMXNET3_REG_CMD:
|
1681 |
VMW_CBPRN("Read BAR1 [VMXNET3_REG_CMD], size %d", size);
|
1682 |
ret = vmxnet3_get_command_status(s); |
1683 |
break;
|
1684 |
|
1685 |
/* MAC Address Low */
|
1686 |
case VMXNET3_REG_MACL:
|
1687 |
VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACL], size %d", size);
|
1688 |
ret = vmxnet3_get_mac_low(&s->conf.macaddr); |
1689 |
break;
|
1690 |
|
1691 |
/* MAC Address High */
|
1692 |
case VMXNET3_REG_MACH:
|
1693 |
VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACH], size %d", size);
|
1694 |
ret = vmxnet3_get_mac_high(&s->conf.macaddr); |
1695 |
break;
|
1696 |
|
1697 |
/*
|
1698 |
* Interrupt Cause Register
|
1699 |
* Used for legacy interrupts only so interrupt index always 0
|
1700 |
*/
|
1701 |
case VMXNET3_REG_ICR:
|
1702 |
VMW_CBPRN("Read BAR1 [VMXNET3_REG_ICR], size %d", size);
|
1703 |
if (vmxnet3_interrupt_asserted(s, 0)) { |
1704 |
vmxnet3_clear_interrupt(s, 0);
|
1705 |
ret = true;
|
1706 |
} else {
|
1707 |
ret = false;
|
1708 |
} |
1709 |
break;
|
1710 |
|
1711 |
default:
|
1712 |
VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size); |
1713 |
break;
|
1714 |
} |
1715 |
|
1716 |
return ret;
|
1717 |
} |
1718 |
|
1719 |
static int |
1720 |
vmxnet3_can_receive(NetClientState *nc) |
1721 |
{ |
1722 |
VMXNET3State *s = qemu_get_nic_opaque(nc); |
1723 |
return s->device_active &&
|
1724 |
VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP); |
1725 |
} |
1726 |
|
1727 |
static inline bool |
1728 |
vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data) |
1729 |
{ |
1730 |
uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK; |
1731 |
if (IS_SPECIAL_VLAN_ID(vlan_tag)) {
|
1732 |
return true; |
1733 |
} |
1734 |
|
1735 |
return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag);
|
1736 |
} |
1737 |
|
1738 |
static bool |
1739 |
vmxnet3_is_allowed_mcast_group(VMXNET3State *s, const uint8_t *group_mac)
|
1740 |
{ |
1741 |
int i;
|
1742 |
for (i = 0; i < s->mcast_list_len; i++) { |
1743 |
if (!memcmp(group_mac, s->mcast_list[i].a, sizeof(s->mcast_list[i]))) { |
1744 |
return true; |
1745 |
} |
1746 |
} |
1747 |
return false; |
1748 |
} |
1749 |
|
1750 |
static bool |
1751 |
vmxnet3_rx_filter_may_indicate(VMXNET3State *s, const void *data, |
1752 |
size_t size) |
1753 |
{ |
1754 |
struct eth_header *ehdr = PKT_GET_ETH_HDR(data);
|
1755 |
|
1756 |
if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_PROMISC)) {
|
1757 |
return true; |
1758 |
} |
1759 |
|
1760 |
if (!vmxnet3_is_registered_vlan(s, data)) {
|
1761 |
return false; |
1762 |
} |
1763 |
|
1764 |
switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) {
|
1765 |
case ETH_PKT_UCAST:
|
1766 |
if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_UCAST)) {
|
1767 |
return false; |
1768 |
} |
1769 |
if (memcmp(s->conf.macaddr.a, ehdr->h_dest, ETH_ALEN)) {
|
1770 |
return false; |
1771 |
} |
1772 |
break;
|
1773 |
|
1774 |
case ETH_PKT_BCAST:
|
1775 |
if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_BCAST)) {
|
1776 |
return false; |
1777 |
} |
1778 |
break;
|
1779 |
|
1780 |
case ETH_PKT_MCAST:
|
1781 |
if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_ALL_MULTI)) {
|
1782 |
return true; |
1783 |
} |
1784 |
if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_MCAST)) {
|
1785 |
return false; |
1786 |
} |
1787 |
if (!vmxnet3_is_allowed_mcast_group(s, ehdr->h_dest)) {
|
1788 |
return false; |
1789 |
} |
1790 |
break;
|
1791 |
|
1792 |
default:
|
1793 |
assert(false);
|
1794 |
} |
1795 |
|
1796 |
return true; |
1797 |
} |
1798 |
|
1799 |
static ssize_t
|
1800 |
vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
1801 |
{ |
1802 |
VMXNET3State *s = qemu_get_nic_opaque(nc); |
1803 |
size_t bytes_indicated; |
1804 |
|
1805 |
if (!vmxnet3_can_receive(nc)) {
|
1806 |
VMW_PKPRN("Cannot receive now");
|
1807 |
return -1; |
1808 |
} |
1809 |
|
1810 |
if (s->peer_has_vhdr) {
|
1811 |
vmxnet_rx_pkt_set_vhdr(s->rx_pkt, (struct virtio_net_hdr *)buf);
|
1812 |
buf += sizeof(struct virtio_net_hdr); |
1813 |
size -= sizeof(struct virtio_net_hdr); |
1814 |
} |
1815 |
|
1816 |
vmxnet_rx_pkt_set_packet_type(s->rx_pkt, |
1817 |
get_eth_packet_type(PKT_GET_ETH_HDR(buf))); |
1818 |
|
1819 |
if (vmxnet3_rx_filter_may_indicate(s, buf, size)) {
|
1820 |
vmxnet_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); |
1821 |
bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1;
|
1822 |
if (bytes_indicated < size) {
|
1823 |
VMW_PKPRN("RX: %lu of %lu bytes indicated", bytes_indicated, size);
|
1824 |
} |
1825 |
} else {
|
1826 |
VMW_PKPRN("Packet dropped by RX filter");
|
1827 |
bytes_indicated = size; |
1828 |
} |
1829 |
|
1830 |
assert(size > 0);
|
1831 |
assert(bytes_indicated != 0);
|
1832 |
return bytes_indicated;
|
1833 |
} |
1834 |
|
1835 |
static void vmxnet3_cleanup(NetClientState *nc) |
1836 |
{ |
1837 |
VMXNET3State *s = qemu_get_nic_opaque(nc); |
1838 |
s->nic = NULL;
|
1839 |
} |
1840 |
|
1841 |
static void vmxnet3_set_link_status(NetClientState *nc) |
1842 |
{ |
1843 |
VMXNET3State *s = qemu_get_nic_opaque(nc); |
1844 |
|
1845 |
if (nc->link_down) {
|
1846 |
s->link_status_and_speed &= ~VMXNET3_LINK_STATUS_UP; |
1847 |
} else {
|
1848 |
s->link_status_and_speed |= VMXNET3_LINK_STATUS_UP; |
1849 |
} |
1850 |
|
1851 |
vmxnet3_set_events(s, VMXNET3_ECR_LINK); |
1852 |
vmxnet3_trigger_interrupt(s, s->event_int_idx); |
1853 |
} |
1854 |
|
1855 |
static NetClientInfo net_vmxnet3_info = {
|
1856 |
.type = NET_CLIENT_OPTIONS_KIND_NIC, |
1857 |
.size = sizeof(NICState),
|
1858 |
.can_receive = vmxnet3_can_receive, |
1859 |
.receive = vmxnet3_receive, |
1860 |
.cleanup = vmxnet3_cleanup, |
1861 |
.link_status_changed = vmxnet3_set_link_status, |
1862 |
}; |
1863 |
|
1864 |
static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s) |
1865 |
{ |
1866 |
NetClientState *peer = qemu_get_queue(s->nic)->peer; |
1867 |
|
1868 |
if ((NULL != peer) && |
1869 |
(peer->info->type == NET_CLIENT_OPTIONS_KIND_TAP) && |
1870 |
tap_has_vnet_hdr(peer)) { |
1871 |
return true; |
1872 |
} |
1873 |
|
1874 |
VMW_WRPRN("Peer has no virtio extension. Task offloads will be emulated.");
|
1875 |
return false; |
1876 |
} |
1877 |
|
1878 |
static void vmxnet3_net_uninit(VMXNET3State *s) |
1879 |
{ |
1880 |
g_free(s->mcast_list); |
1881 |
vmxnet_tx_pkt_reset(s->tx_pkt); |
1882 |
vmxnet_tx_pkt_uninit(s->tx_pkt); |
1883 |
vmxnet_rx_pkt_uninit(s->rx_pkt); |
1884 |
qemu_del_net_client(qemu_get_queue(s->nic)); |
1885 |
} |
1886 |
|
1887 |
static void vmxnet3_net_init(VMXNET3State *s) |
1888 |
{ |
1889 |
DeviceState *d = DEVICE(s); |
1890 |
|
1891 |
VMW_CBPRN("vmxnet3_net_init called...");
|
1892 |
|
1893 |
qemu_macaddr_default_if_unset(&s->conf.macaddr); |
1894 |
|
1895 |
/* Windows guest will query the address that was set on init */
|
1896 |
memcpy(&s->perm_mac.a, &s->conf.macaddr.a, sizeof(s->perm_mac.a));
|
1897 |
|
1898 |
s->mcast_list = NULL;
|
1899 |
s->mcast_list_len = 0;
|
1900 |
|
1901 |
s->link_status_and_speed = VMXNET3_LINK_SPEED | VMXNET3_LINK_STATUS_UP; |
1902 |
|
1903 |
VMW_CFPRN("Permanent MAC: " MAC_FMT, MAC_ARG(s->perm_mac.a));
|
1904 |
|
1905 |
s->nic = qemu_new_nic(&net_vmxnet3_info, &s->conf, |
1906 |
object_get_typename(OBJECT(s)), |
1907 |
d->id, s); |
1908 |
|
1909 |
s->peer_has_vhdr = vmxnet3_peer_has_vnet_hdr(s); |
1910 |
s->tx_sop = true;
|
1911 |
s->skip_current_tx_pkt = false;
|
1912 |
s->tx_pkt = NULL;
|
1913 |
s->rx_pkt = NULL;
|
1914 |
s->rx_vlan_stripping = false;
|
1915 |
s->lro_supported = false;
|
1916 |
|
1917 |
if (s->peer_has_vhdr) {
|
1918 |
tap_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer, |
1919 |
sizeof(struct virtio_net_hdr)); |
1920 |
|
1921 |
tap_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1);
|
1922 |
} |
1923 |
|
1924 |
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); |
1925 |
} |
1926 |
|
1927 |
static void |
1928 |
vmxnet3_unuse_msix_vectors(VMXNET3State *s, int num_vectors)
|
1929 |
{ |
1930 |
PCIDevice *d = PCI_DEVICE(s); |
1931 |
int i;
|
1932 |
for (i = 0; i < num_vectors; i++) { |
1933 |
msix_vector_unuse(d, i); |
1934 |
} |
1935 |
} |
1936 |
|
1937 |
static bool |
1938 |
vmxnet3_use_msix_vectors(VMXNET3State *s, int num_vectors)
|
1939 |
{ |
1940 |
PCIDevice *d = PCI_DEVICE(s); |
1941 |
int i;
|
1942 |
for (i = 0; i < num_vectors; i++) { |
1943 |
int res = msix_vector_use(d, i);
|
1944 |
if (0 > res) { |
1945 |
VMW_WRPRN("Failed to use MSI-X vector %d, error %d", i, res);
|
1946 |
vmxnet3_unuse_msix_vectors(s, i); |
1947 |
return false; |
1948 |
} |
1949 |
} |
1950 |
return true; |
1951 |
} |
1952 |
|
1953 |
static bool |
1954 |
vmxnet3_init_msix(VMXNET3State *s) |
1955 |
{ |
1956 |
PCIDevice *d = PCI_DEVICE(s); |
1957 |
int res = msix_init(d, VMXNET3_MAX_INTRS,
|
1958 |
&s->msix_bar, |
1959 |
VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_TABLE, |
1960 |
&s->msix_bar, |
1961 |
VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA, |
1962 |
0);
|
1963 |
|
1964 |
if (0 > res) { |
1965 |
VMW_WRPRN("Failed to initialize MSI-X, error %d", res);
|
1966 |
s->msix_used = false;
|
1967 |
} else {
|
1968 |
if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) {
|
1969 |
VMW_WRPRN("Failed to use MSI-X vectors, error %d", res);
|
1970 |
msix_uninit(d, &s->msix_bar, &s->msix_bar); |
1971 |
s->msix_used = false;
|
1972 |
} else {
|
1973 |
s->msix_used = true;
|
1974 |
} |
1975 |
} |
1976 |
return s->msix_used;
|
1977 |
} |
1978 |
|
1979 |
static void |
1980 |
vmxnet3_cleanup_msix(VMXNET3State *s) |
1981 |
{ |
1982 |
PCIDevice *d = PCI_DEVICE(s); |
1983 |
|
1984 |
if (s->msix_used) {
|
1985 |
msix_vector_unuse(d, VMXNET3_MAX_INTRS); |
1986 |
msix_uninit(d, &s->msix_bar, &s->msix_bar); |
1987 |
} |
1988 |
} |
1989 |
|
1990 |
#define VMXNET3_MSI_NUM_VECTORS (1) |
1991 |
#define VMXNET3_MSI_OFFSET (0x50) |
1992 |
#define VMXNET3_USE_64BIT (true) |
1993 |
#define VMXNET3_PER_VECTOR_MASK (false) |
1994 |
|
1995 |
static bool |
1996 |
vmxnet3_init_msi(VMXNET3State *s) |
1997 |
{ |
1998 |
PCIDevice *d = PCI_DEVICE(s); |
1999 |
int res;
|
2000 |
|
2001 |
res = msi_init(d, VMXNET3_MSI_OFFSET, VMXNET3_MSI_NUM_VECTORS, |
2002 |
VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK); |
2003 |
if (0 > res) { |
2004 |
VMW_WRPRN("Failed to initialize MSI, error %d", res);
|
2005 |
s->msi_used = false;
|
2006 |
} else {
|
2007 |
s->msi_used = true;
|
2008 |
} |
2009 |
|
2010 |
return s->msi_used;
|
2011 |
} |
2012 |
|
2013 |
static void |
2014 |
vmxnet3_cleanup_msi(VMXNET3State *s) |
2015 |
{ |
2016 |
PCIDevice *d = PCI_DEVICE(s); |
2017 |
|
2018 |
if (s->msi_used) {
|
2019 |
msi_uninit(d); |
2020 |
} |
2021 |
} |
2022 |
|
2023 |
static void |
2024 |
vmxnet3_msix_save(QEMUFile *f, void *opaque)
|
2025 |
{ |
2026 |
PCIDevice *d = PCI_DEVICE(opaque); |
2027 |
msix_save(d, f); |
2028 |
} |
2029 |
|
2030 |
static int |
2031 |
vmxnet3_msix_load(QEMUFile *f, void *opaque, int version_id) |
2032 |
{ |
2033 |
PCIDevice *d = PCI_DEVICE(opaque); |
2034 |
msix_load(d, f); |
2035 |
return 0; |
2036 |
} |
2037 |
|
2038 |
static const MemoryRegionOps b0_ops = { |
2039 |
.read = vmxnet3_io_bar0_read, |
2040 |
.write = vmxnet3_io_bar0_write, |
2041 |
.endianness = DEVICE_LITTLE_ENDIAN, |
2042 |
.impl = { |
2043 |
.min_access_size = 4,
|
2044 |
.max_access_size = 4,
|
2045 |
}, |
2046 |
}; |
2047 |
|
2048 |
static const MemoryRegionOps b1_ops = { |
2049 |
.read = vmxnet3_io_bar1_read, |
2050 |
.write = vmxnet3_io_bar1_write, |
2051 |
.endianness = DEVICE_LITTLE_ENDIAN, |
2052 |
.impl = { |
2053 |
.min_access_size = 4,
|
2054 |
.max_access_size = 4,
|
2055 |
}, |
2056 |
}; |
2057 |
|
2058 |
static int vmxnet3_pci_init(PCIDevice *pci_dev) |
2059 |
{ |
2060 |
DeviceState *dev = DEVICE(pci_dev); |
2061 |
VMXNET3State *s = VMXNET3(pci_dev); |
2062 |
|
2063 |
VMW_CBPRN("Starting init...");
|
2064 |
|
2065 |
memory_region_init_io(&s->bar0, &b0_ops, s, |
2066 |
"vmxnet3-b0", VMXNET3_PT_REG_SIZE);
|
2067 |
pci_register_bar(pci_dev, VMXNET3_BAR0_IDX, |
2068 |
PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); |
2069 |
|
2070 |
memory_region_init_io(&s->bar1, &b1_ops, s, |
2071 |
"vmxnet3-b1", VMXNET3_VD_REG_SIZE);
|
2072 |
pci_register_bar(pci_dev, VMXNET3_BAR1_IDX, |
2073 |
PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar1); |
2074 |
|
2075 |
memory_region_init(&s->msix_bar, "vmxnet3-msix-bar",
|
2076 |
VMXNET3_MSIX_BAR_SIZE); |
2077 |
pci_register_bar(pci_dev, VMXNET3_MSIX_BAR_IDX, |
2078 |
PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix_bar); |
2079 |
|
2080 |
vmxnet3_reset_interrupt_states(s); |
2081 |
|
2082 |
/* Interrupt pin A */
|
2083 |
pci_dev->config[PCI_INTERRUPT_PIN] = 0x01;
|
2084 |
|
2085 |
if (!vmxnet3_init_msix(s)) {
|
2086 |
VMW_WRPRN("Failed to initialize MSI-X, configuration is inconsistent.");
|
2087 |
} |
2088 |
|
2089 |
if (!vmxnet3_init_msi(s)) {
|
2090 |
VMW_WRPRN("Failed to initialize MSI, configuration is inconsistent.");
|
2091 |
} |
2092 |
|
2093 |
vmxnet3_net_init(s); |
2094 |
|
2095 |
register_savevm(dev, "vmxnet3-msix", -1, 1, |
2096 |
vmxnet3_msix_save, vmxnet3_msix_load, s); |
2097 |
|
2098 |
add_boot_device_path(s->conf.bootindex, dev, "/ethernet-phy@0");
|
2099 |
|
2100 |
return 0; |
2101 |
} |
2102 |
|
2103 |
|
2104 |
static void vmxnet3_pci_uninit(PCIDevice *pci_dev) |
2105 |
{ |
2106 |
DeviceState *dev = DEVICE(pci_dev); |
2107 |
VMXNET3State *s = VMXNET3(pci_dev); |
2108 |
|
2109 |
VMW_CBPRN("Starting uninit...");
|
2110 |
|
2111 |
unregister_savevm(dev, "vmxnet3-msix", s);
|
2112 |
|
2113 |
vmxnet3_net_uninit(s); |
2114 |
|
2115 |
vmxnet3_cleanup_msix(s); |
2116 |
|
2117 |
vmxnet3_cleanup_msi(s); |
2118 |
|
2119 |
memory_region_destroy(&s->bar0); |
2120 |
memory_region_destroy(&s->bar1); |
2121 |
memory_region_destroy(&s->msix_bar); |
2122 |
} |
2123 |
|
2124 |
static void vmxnet3_qdev_reset(DeviceState *dev) |
2125 |
{ |
2126 |
PCIDevice *d = PCI_DEVICE(dev); |
2127 |
VMXNET3State *s = VMXNET3(d); |
2128 |
|
2129 |
VMW_CBPRN("Starting QDEV reset...");
|
2130 |
vmxnet3_reset(s); |
2131 |
} |
2132 |
|
2133 |
static bool vmxnet3_mc_list_needed(void *opaque) |
2134 |
{ |
2135 |
return true; |
2136 |
} |
2137 |
|
2138 |
static int vmxnet3_mcast_list_pre_load(void *opaque) |
2139 |
{ |
2140 |
VMXNET3State *s = opaque; |
2141 |
|
2142 |
s->mcast_list = g_malloc(s->mcast_list_buff_size); |
2143 |
|
2144 |
return 0; |
2145 |
} |
2146 |
|
2147 |
|
2148 |
static void vmxnet3_pre_save(void *opaque) |
2149 |
{ |
2150 |
VMXNET3State *s = opaque; |
2151 |
|
2152 |
s->mcast_list_buff_size = s->mcast_list_len * sizeof(MACAddr);
|
2153 |
} |
2154 |
|
2155 |
static const VMStateDescription vmxstate_vmxnet3_mcast_list = { |
2156 |
.name = "vmxnet3/mcast_list",
|
2157 |
.version_id = 1,
|
2158 |
.minimum_version_id = 1,
|
2159 |
.minimum_version_id_old = 1,
|
2160 |
.pre_load = vmxnet3_mcast_list_pre_load, |
2161 |
.fields = (VMStateField[]) { |
2162 |
VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, 0, |
2163 |
mcast_list_buff_size), |
2164 |
VMSTATE_END_OF_LIST() |
2165 |
} |
2166 |
}; |
2167 |
|
2168 |
static void vmxnet3_get_ring_from_file(QEMUFile *f, Vmxnet3Ring *r) |
2169 |
{ |
2170 |
r->pa = qemu_get_be64(f); |
2171 |
r->size = qemu_get_be32(f); |
2172 |
r->cell_size = qemu_get_be32(f); |
2173 |
r->next = qemu_get_be32(f); |
2174 |
r->gen = qemu_get_byte(f); |
2175 |
} |
2176 |
|
2177 |
static void vmxnet3_put_ring_to_file(QEMUFile *f, Vmxnet3Ring *r) |
2178 |
{ |
2179 |
qemu_put_be64(f, r->pa); |
2180 |
qemu_put_be32(f, r->size); |
2181 |
qemu_put_be32(f, r->cell_size); |
2182 |
qemu_put_be32(f, r->next); |
2183 |
qemu_put_byte(f, r->gen); |
2184 |
} |
2185 |
|
2186 |
static void vmxnet3_get_tx_stats_from_file(QEMUFile *f, |
2187 |
struct UPT1_TxStats *tx_stat)
|
2188 |
{ |
2189 |
tx_stat->TSOPktsTxOK = qemu_get_be64(f); |
2190 |
tx_stat->TSOBytesTxOK = qemu_get_be64(f); |
2191 |
tx_stat->ucastPktsTxOK = qemu_get_be64(f); |
2192 |
tx_stat->ucastBytesTxOK = qemu_get_be64(f); |
2193 |
tx_stat->mcastPktsTxOK = qemu_get_be64(f); |
2194 |
tx_stat->mcastBytesTxOK = qemu_get_be64(f); |
2195 |
tx_stat->bcastPktsTxOK = qemu_get_be64(f); |
2196 |
tx_stat->bcastBytesTxOK = qemu_get_be64(f); |
2197 |
tx_stat->pktsTxError = qemu_get_be64(f); |
2198 |
tx_stat->pktsTxDiscard = qemu_get_be64(f); |
2199 |
} |
2200 |
|
2201 |
static void vmxnet3_put_tx_stats_to_file(QEMUFile *f, |
2202 |
struct UPT1_TxStats *tx_stat)
|
2203 |
{ |
2204 |
qemu_put_be64(f, tx_stat->TSOPktsTxOK); |
2205 |
qemu_put_be64(f, tx_stat->TSOBytesTxOK); |
2206 |
qemu_put_be64(f, tx_stat->ucastPktsTxOK); |
2207 |
qemu_put_be64(f, tx_stat->ucastBytesTxOK); |
2208 |
qemu_put_be64(f, tx_stat->mcastPktsTxOK); |
2209 |
qemu_put_be64(f, tx_stat->mcastBytesTxOK); |
2210 |
qemu_put_be64(f, tx_stat->bcastPktsTxOK); |
2211 |
qemu_put_be64(f, tx_stat->bcastBytesTxOK); |
2212 |
qemu_put_be64(f, tx_stat->pktsTxError); |
2213 |
qemu_put_be64(f, tx_stat->pktsTxDiscard); |
2214 |
} |
2215 |
|
2216 |
static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size) |
2217 |
{ |
2218 |
Vmxnet3TxqDescr *r = pv; |
2219 |
|
2220 |
vmxnet3_get_ring_from_file(f, &r->tx_ring); |
2221 |
vmxnet3_get_ring_from_file(f, &r->comp_ring); |
2222 |
r->intr_idx = qemu_get_byte(f); |
2223 |
r->tx_stats_pa = qemu_get_be64(f); |
2224 |
|
2225 |
vmxnet3_get_tx_stats_from_file(f, &r->txq_stats); |
2226 |
|
2227 |
return 0; |
2228 |
} |
2229 |
|
2230 |
static void vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size) |
2231 |
{ |
2232 |
Vmxnet3TxqDescr *r = pv; |
2233 |
|
2234 |
vmxnet3_put_ring_to_file(f, &r->tx_ring); |
2235 |
vmxnet3_put_ring_to_file(f, &r->comp_ring); |
2236 |
qemu_put_byte(f, r->intr_idx); |
2237 |
qemu_put_be64(f, r->tx_stats_pa); |
2238 |
vmxnet3_put_tx_stats_to_file(f, &r->txq_stats); |
2239 |
} |
2240 |
|
2241 |
const VMStateInfo txq_descr_info = {
|
2242 |
.name = "txq_descr",
|
2243 |
.get = vmxnet3_get_txq_descr, |
2244 |
.put = vmxnet3_put_txq_descr |
2245 |
}; |
2246 |
|
2247 |
static void vmxnet3_get_rx_stats_from_file(QEMUFile *f, |
2248 |
struct UPT1_RxStats *rx_stat)
|
2249 |
{ |
2250 |
rx_stat->LROPktsRxOK = qemu_get_be64(f); |
2251 |
rx_stat->LROBytesRxOK = qemu_get_be64(f); |
2252 |
rx_stat->ucastPktsRxOK = qemu_get_be64(f); |
2253 |
rx_stat->ucastBytesRxOK = qemu_get_be64(f); |
2254 |
rx_stat->mcastPktsRxOK = qemu_get_be64(f); |
2255 |
rx_stat->mcastBytesRxOK = qemu_get_be64(f); |
2256 |
rx_stat->bcastPktsRxOK = qemu_get_be64(f); |
2257 |
rx_stat->bcastBytesRxOK = qemu_get_be64(f); |
2258 |
rx_stat->pktsRxOutOfBuf = qemu_get_be64(f); |
2259 |
rx_stat->pktsRxError = qemu_get_be64(f); |
2260 |
} |
2261 |
|
2262 |
static void vmxnet3_put_rx_stats_to_file(QEMUFile *f, |
2263 |
struct UPT1_RxStats *rx_stat)
|
2264 |
{ |
2265 |
qemu_put_be64(f, rx_stat->LROPktsRxOK); |
2266 |
qemu_put_be64(f, rx_stat->LROBytesRxOK); |
2267 |
qemu_put_be64(f, rx_stat->ucastPktsRxOK); |
2268 |
qemu_put_be64(f, rx_stat->ucastBytesRxOK); |
2269 |
qemu_put_be64(f, rx_stat->mcastPktsRxOK); |
2270 |
qemu_put_be64(f, rx_stat->mcastBytesRxOK); |
2271 |
qemu_put_be64(f, rx_stat->bcastPktsRxOK); |
2272 |
qemu_put_be64(f, rx_stat->bcastBytesRxOK); |
2273 |
qemu_put_be64(f, rx_stat->pktsRxOutOfBuf); |
2274 |
qemu_put_be64(f, rx_stat->pktsRxError); |
2275 |
} |
2276 |
|
2277 |
static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size) |
2278 |
{ |
2279 |
Vmxnet3RxqDescr *r = pv; |
2280 |
int i;
|
2281 |
|
2282 |
for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) { |
2283 |
vmxnet3_get_ring_from_file(f, &r->rx_ring[i]); |
2284 |
} |
2285 |
|
2286 |
vmxnet3_get_ring_from_file(f, &r->comp_ring); |
2287 |
r->intr_idx = qemu_get_byte(f); |
2288 |
r->rx_stats_pa = qemu_get_be64(f); |
2289 |
|
2290 |
vmxnet3_get_rx_stats_from_file(f, &r->rxq_stats); |
2291 |
|
2292 |
return 0; |
2293 |
} |
2294 |
|
2295 |
static void vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size) |
2296 |
{ |
2297 |
Vmxnet3RxqDescr *r = pv; |
2298 |
int i;
|
2299 |
|
2300 |
for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) { |
2301 |
vmxnet3_put_ring_to_file(f, &r->rx_ring[i]); |
2302 |
} |
2303 |
|
2304 |
vmxnet3_put_ring_to_file(f, &r->comp_ring); |
2305 |
qemu_put_byte(f, r->intr_idx); |
2306 |
qemu_put_be64(f, r->rx_stats_pa); |
2307 |
vmxnet3_put_rx_stats_to_file(f, &r->rxq_stats); |
2308 |
} |
2309 |
|
2310 |
static int vmxnet3_post_load(void *opaque, int version_id) |
2311 |
{ |
2312 |
VMXNET3State *s = opaque; |
2313 |
PCIDevice *d = PCI_DEVICE(s); |
2314 |
|
2315 |
vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr); |
2316 |
vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); |
2317 |
|
2318 |
if (s->msix_used) {
|
2319 |
if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) {
|
2320 |
VMW_WRPRN("Failed to re-use MSI-X vectors");
|
2321 |
msix_uninit(d, &s->msix_bar, &s->msix_bar); |
2322 |
s->msix_used = false;
|
2323 |
return -1; |
2324 |
} |
2325 |
} |
2326 |
|
2327 |
return 0; |
2328 |
} |
2329 |
|
2330 |
const VMStateInfo rxq_descr_info = {
|
2331 |
.name = "rxq_descr",
|
2332 |
.get = vmxnet3_get_rxq_descr, |
2333 |
.put = vmxnet3_put_rxq_descr |
2334 |
}; |
2335 |
|
2336 |
static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size) |
2337 |
{ |
2338 |
Vmxnet3IntState *r = pv; |
2339 |
|
2340 |
r->is_masked = qemu_get_byte(f); |
2341 |
r->is_pending = qemu_get_byte(f); |
2342 |
r->is_asserted = qemu_get_byte(f); |
2343 |
|
2344 |
return 0; |
2345 |
} |
2346 |
|
2347 |
static void vmxnet3_put_int_state(QEMUFile *f, void *pv, size_t size) |
2348 |
{ |
2349 |
Vmxnet3IntState *r = pv; |
2350 |
|
2351 |
qemu_put_byte(f, r->is_masked); |
2352 |
qemu_put_byte(f, r->is_pending); |
2353 |
qemu_put_byte(f, r->is_asserted); |
2354 |
} |
2355 |
|
2356 |
const VMStateInfo int_state_info = {
|
2357 |
.name = "int_state",
|
2358 |
.get = vmxnet3_get_int_state, |
2359 |
.put = vmxnet3_put_int_state |
2360 |
}; |
2361 |
|
2362 |
static const VMStateDescription vmstate_vmxnet3 = { |
2363 |
.name = "vmxnet3",
|
2364 |
.version_id = 1,
|
2365 |
.minimum_version_id = 1,
|
2366 |
.minimum_version_id_old = 1,
|
2367 |
.pre_save = vmxnet3_pre_save, |
2368 |
.post_load = vmxnet3_post_load, |
2369 |
.fields = (VMStateField[]) { |
2370 |
VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State), |
2371 |
VMSTATE_BOOL(rx_packets_compound, VMXNET3State), |
2372 |
VMSTATE_BOOL(rx_vlan_stripping, VMXNET3State), |
2373 |
VMSTATE_BOOL(lro_supported, VMXNET3State), |
2374 |
VMSTATE_UINT32(rx_mode, VMXNET3State), |
2375 |
VMSTATE_UINT32(mcast_list_len, VMXNET3State), |
2376 |
VMSTATE_UINT32(mcast_list_buff_size, VMXNET3State), |
2377 |
VMSTATE_UINT32_ARRAY(vlan_table, VMXNET3State, VMXNET3_VFT_SIZE), |
2378 |
VMSTATE_UINT32(mtu, VMXNET3State), |
2379 |
VMSTATE_UINT16(max_rx_frags, VMXNET3State), |
2380 |
VMSTATE_UINT32(max_tx_frags, VMXNET3State), |
2381 |
VMSTATE_UINT8(event_int_idx, VMXNET3State), |
2382 |
VMSTATE_BOOL(auto_int_masking, VMXNET3State), |
2383 |
VMSTATE_UINT8(txq_num, VMXNET3State), |
2384 |
VMSTATE_UINT8(rxq_num, VMXNET3State), |
2385 |
VMSTATE_UINT32(device_active, VMXNET3State), |
2386 |
VMSTATE_UINT32(last_command, VMXNET3State), |
2387 |
VMSTATE_UINT32(link_status_and_speed, VMXNET3State), |
2388 |
VMSTATE_UINT32(temp_mac, VMXNET3State), |
2389 |
VMSTATE_UINT64(drv_shmem, VMXNET3State), |
2390 |
VMSTATE_UINT64(temp_shared_guest_driver_memory, VMXNET3State), |
2391 |
|
2392 |
VMSTATE_ARRAY(txq_descr, VMXNET3State, |
2393 |
VMXNET3_DEVICE_MAX_TX_QUEUES, 0, txq_descr_info,
|
2394 |
Vmxnet3TxqDescr), |
2395 |
VMSTATE_ARRAY(rxq_descr, VMXNET3State, |
2396 |
VMXNET3_DEVICE_MAX_RX_QUEUES, 0, rxq_descr_info,
|
2397 |
Vmxnet3RxqDescr), |
2398 |
VMSTATE_ARRAY(interrupt_states, VMXNET3State, VMXNET3_MAX_INTRS, |
2399 |
0, int_state_info, Vmxnet3IntState),
|
2400 |
|
2401 |
VMSTATE_END_OF_LIST() |
2402 |
}, |
2403 |
.subsections = (VMStateSubsection[]) { |
2404 |
{ |
2405 |
.vmsd = &vmxstate_vmxnet3_mcast_list, |
2406 |
.needed = vmxnet3_mc_list_needed |
2407 |
}, |
2408 |
{ |
2409 |
/* empty element. */
|
2410 |
} |
2411 |
} |
2412 |
}; |
2413 |
|
2414 |
static void |
2415 |
vmxnet3_write_config(PCIDevice *pci_dev, uint32_t addr, uint32_t val, int len)
|
2416 |
{ |
2417 |
pci_default_write_config(pci_dev, addr, val, len); |
2418 |
msix_write_config(pci_dev, addr, val, len); |
2419 |
msi_write_config(pci_dev, addr, val, len); |
2420 |
} |
2421 |
|
2422 |
static Property vmxnet3_properties[] = {
|
2423 |
DEFINE_NIC_PROPERTIES(VMXNET3State, conf), |
2424 |
DEFINE_PROP_END_OF_LIST(), |
2425 |
}; |
2426 |
|
2427 |
static void vmxnet3_class_init(ObjectClass *class, void *data) |
2428 |
{ |
2429 |
DeviceClass *dc = DEVICE_CLASS(class); |
2430 |
PCIDeviceClass *c = PCI_DEVICE_CLASS(class); |
2431 |
|
2432 |
c->init = vmxnet3_pci_init; |
2433 |
c->exit = vmxnet3_pci_uninit; |
2434 |
c->vendor_id = PCI_VENDOR_ID_VMWARE; |
2435 |
c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3; |
2436 |
c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION; |
2437 |
c->class_id = PCI_CLASS_NETWORK_ETHERNET; |
2438 |
c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; |
2439 |
c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3; |
2440 |
c->config_write = vmxnet3_write_config, |
2441 |
dc->desc = "VMWare Paravirtualized Ethernet v3";
|
2442 |
dc->reset = vmxnet3_qdev_reset; |
2443 |
dc->vmsd = &vmstate_vmxnet3; |
2444 |
dc->props = vmxnet3_properties; |
2445 |
} |
2446 |
|
2447 |
static const TypeInfo vmxnet3_info = { |
2448 |
.name = TYPE_VMXNET3, |
2449 |
.parent = TYPE_PCI_DEVICE, |
2450 |
.instance_size = sizeof(VMXNET3State),
|
2451 |
.class_init = vmxnet3_class_init, |
2452 |
}; |
2453 |
|
2454 |
static void vmxnet3_register_types(void) |
2455 |
{ |
2456 |
VMW_CBPRN("vmxnet3_register_types called...");
|
2457 |
type_register_static(&vmxnet3_info); |
2458 |
} |
2459 |
|
2460 |
type_init(vmxnet3_register_types) |