root / hw / virtio-net.c @ 948ecf21
History | View | Annotate | Download (33.9 kB)
1 |
/*
|
---|---|
2 |
* Virtio Network Device
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2007
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include "qemu/iov.h" |
15 |
#include "virtio.h" |
16 |
#include "net/net.h" |
17 |
#include "net/checksum.h" |
18 |
#include "net/tap.h" |
19 |
#include "qemu/error-report.h" |
20 |
#include "qemu/timer.h" |
21 |
#include "virtio-net.h" |
22 |
#include "vhost_net.h" |
23 |
|
24 |
#define VIRTIO_NET_VM_VERSION 11 |
25 |
|
26 |
#define MAC_TABLE_ENTRIES 64 |
27 |
#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ |
28 |
|
29 |
typedef struct VirtIONet |
30 |
{ |
31 |
VirtIODevice vdev; |
32 |
uint8_t mac[ETH_ALEN]; |
33 |
uint16_t status; |
34 |
VirtQueue *rx_vq; |
35 |
VirtQueue *tx_vq; |
36 |
VirtQueue *ctrl_vq; |
37 |
NICState *nic; |
38 |
QEMUTimer *tx_timer; |
39 |
QEMUBH *tx_bh; |
40 |
uint32_t tx_timeout; |
41 |
int32_t tx_burst; |
42 |
int tx_waiting;
|
43 |
uint32_t has_vnet_hdr; |
44 |
size_t host_hdr_len; |
45 |
size_t guest_hdr_len; |
46 |
uint8_t has_ufo; |
47 |
struct {
|
48 |
VirtQueueElement elem; |
49 |
ssize_t len; |
50 |
} async_tx; |
51 |
int mergeable_rx_bufs;
|
52 |
uint8_t promisc; |
53 |
uint8_t allmulti; |
54 |
uint8_t alluni; |
55 |
uint8_t nomulti; |
56 |
uint8_t nouni; |
57 |
uint8_t nobcast; |
58 |
uint8_t vhost_started; |
59 |
struct {
|
60 |
int in_use;
|
61 |
int first_multi;
|
62 |
uint8_t multi_overflow; |
63 |
uint8_t uni_overflow; |
64 |
uint8_t *macs; |
65 |
} mac_table; |
66 |
uint32_t *vlans; |
67 |
DeviceState *qdev; |
68 |
} VirtIONet; |
69 |
|
70 |
/* TODO
|
71 |
* - we could suppress RX interrupt if we were so inclined.
|
72 |
*/
|
73 |
|
74 |
static VirtIONet *to_virtio_net(VirtIODevice *vdev)
|
75 |
{ |
76 |
return (VirtIONet *)vdev;
|
77 |
} |
78 |
|
79 |
static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) |
80 |
{ |
81 |
VirtIONet *n = to_virtio_net(vdev); |
82 |
struct virtio_net_config netcfg;
|
83 |
|
84 |
stw_p(&netcfg.status, n->status); |
85 |
memcpy(netcfg.mac, n->mac, ETH_ALEN); |
86 |
memcpy(config, &netcfg, sizeof(netcfg));
|
87 |
} |
88 |
|
89 |
static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) |
90 |
{ |
91 |
VirtIONet *n = to_virtio_net(vdev); |
92 |
struct virtio_net_config netcfg;
|
93 |
|
94 |
memcpy(&netcfg, config, sizeof(netcfg));
|
95 |
|
96 |
if (!(n->vdev.guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && |
97 |
memcmp(netcfg.mac, n->mac, ETH_ALEN)) { |
98 |
memcpy(n->mac, netcfg.mac, ETH_ALEN); |
99 |
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); |
100 |
} |
101 |
} |
102 |
|
103 |
static bool virtio_net_started(VirtIONet *n, uint8_t status) |
104 |
{ |
105 |
return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
106 |
(n->status & VIRTIO_NET_S_LINK_UP) && n->vdev.vm_running; |
107 |
} |
108 |
|
109 |
static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) |
110 |
{ |
111 |
NetClientState *nc = qemu_get_queue(n->nic); |
112 |
|
113 |
if (!nc->peer) {
|
114 |
return;
|
115 |
} |
116 |
if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
|
117 |
return;
|
118 |
} |
119 |
|
120 |
if (!tap_get_vhost_net(nc->peer)) {
|
121 |
return;
|
122 |
} |
123 |
if (!!n->vhost_started == virtio_net_started(n, status) &&
|
124 |
!nc->peer->link_down) { |
125 |
return;
|
126 |
} |
127 |
if (!n->vhost_started) {
|
128 |
int r;
|
129 |
if (!vhost_net_query(tap_get_vhost_net(nc->peer), &n->vdev)) {
|
130 |
return;
|
131 |
} |
132 |
n->vhost_started = 1;
|
133 |
r = vhost_net_start(tap_get_vhost_net(nc->peer), &n->vdev); |
134 |
if (r < 0) { |
135 |
error_report("unable to start vhost net: %d: "
|
136 |
"falling back on userspace virtio", -r);
|
137 |
n->vhost_started = 0;
|
138 |
} |
139 |
} else {
|
140 |
vhost_net_stop(tap_get_vhost_net(nc->peer), &n->vdev); |
141 |
n->vhost_started = 0;
|
142 |
} |
143 |
} |
144 |
|
145 |
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) |
146 |
{ |
147 |
VirtIONet *n = to_virtio_net(vdev); |
148 |
|
149 |
virtio_net_vhost_status(n, status); |
150 |
|
151 |
if (!n->tx_waiting) {
|
152 |
return;
|
153 |
} |
154 |
|
155 |
if (virtio_net_started(n, status) && !n->vhost_started) {
|
156 |
if (n->tx_timer) {
|
157 |
qemu_mod_timer(n->tx_timer, |
158 |
qemu_get_clock_ns(vm_clock) + n->tx_timeout); |
159 |
} else {
|
160 |
qemu_bh_schedule(n->tx_bh); |
161 |
} |
162 |
} else {
|
163 |
if (n->tx_timer) {
|
164 |
qemu_del_timer(n->tx_timer); |
165 |
} else {
|
166 |
qemu_bh_cancel(n->tx_bh); |
167 |
} |
168 |
} |
169 |
} |
170 |
|
171 |
static void virtio_net_set_link_status(NetClientState *nc) |
172 |
{ |
173 |
VirtIONet *n = qemu_get_nic_opaque(nc); |
174 |
uint16_t old_status = n->status; |
175 |
|
176 |
if (nc->link_down)
|
177 |
n->status &= ~VIRTIO_NET_S_LINK_UP; |
178 |
else
|
179 |
n->status |= VIRTIO_NET_S_LINK_UP; |
180 |
|
181 |
if (n->status != old_status)
|
182 |
virtio_notify_config(&n->vdev); |
183 |
|
184 |
virtio_net_set_status(&n->vdev, n->vdev.status); |
185 |
} |
186 |
|
187 |
static void virtio_net_reset(VirtIODevice *vdev) |
188 |
{ |
189 |
VirtIONet *n = to_virtio_net(vdev); |
190 |
|
191 |
/* Reset back to compatibility mode */
|
192 |
n->promisc = 1;
|
193 |
n->allmulti = 0;
|
194 |
n->alluni = 0;
|
195 |
n->nomulti = 0;
|
196 |
n->nouni = 0;
|
197 |
n->nobcast = 0;
|
198 |
|
199 |
/* Flush any MAC and VLAN filter table state */
|
200 |
n->mac_table.in_use = 0;
|
201 |
n->mac_table.first_multi = 0;
|
202 |
n->mac_table.multi_overflow = 0;
|
203 |
n->mac_table.uni_overflow = 0;
|
204 |
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
|
205 |
memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); |
206 |
memset(n->vlans, 0, MAX_VLAN >> 3); |
207 |
} |
208 |
|
209 |
static void peer_test_vnet_hdr(VirtIONet *n) |
210 |
{ |
211 |
NetClientState *nc = qemu_get_queue(n->nic); |
212 |
if (!nc->peer) {
|
213 |
return;
|
214 |
} |
215 |
|
216 |
if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
|
217 |
return;
|
218 |
} |
219 |
|
220 |
n->has_vnet_hdr = tap_has_vnet_hdr(nc->peer); |
221 |
} |
222 |
|
223 |
static int peer_has_vnet_hdr(VirtIONet *n) |
224 |
{ |
225 |
return n->has_vnet_hdr;
|
226 |
} |
227 |
|
228 |
static int peer_has_ufo(VirtIONet *n) |
229 |
{ |
230 |
if (!peer_has_vnet_hdr(n))
|
231 |
return 0; |
232 |
|
233 |
n->has_ufo = tap_has_ufo(qemu_get_queue(n->nic)->peer); |
234 |
|
235 |
return n->has_ufo;
|
236 |
} |
237 |
|
238 |
static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs) |
239 |
{ |
240 |
n->mergeable_rx_bufs = mergeable_rx_bufs; |
241 |
|
242 |
n->guest_hdr_len = n->mergeable_rx_bufs ? |
243 |
sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); |
244 |
|
245 |
if (peer_has_vnet_hdr(n) &&
|
246 |
tap_has_vnet_hdr_len(qemu_get_queue(n->nic)->peer, n->guest_hdr_len)) { |
247 |
tap_set_vnet_hdr_len(qemu_get_queue(n->nic)->peer, n->guest_hdr_len); |
248 |
n->host_hdr_len = n->guest_hdr_len; |
249 |
} |
250 |
} |
251 |
|
252 |
static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
|
253 |
{ |
254 |
VirtIONet *n = to_virtio_net(vdev); |
255 |
NetClientState *nc = qemu_get_queue(n->nic); |
256 |
|
257 |
features |= (1 << VIRTIO_NET_F_MAC);
|
258 |
|
259 |
if (!peer_has_vnet_hdr(n)) {
|
260 |
features &= ~(0x1 << VIRTIO_NET_F_CSUM);
|
261 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
|
262 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
|
263 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
|
264 |
|
265 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
|
266 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
|
267 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
|
268 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
|
269 |
} |
270 |
|
271 |
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
|
272 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
|
273 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
|
274 |
} |
275 |
|
276 |
if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
|
277 |
return features;
|
278 |
} |
279 |
if (!tap_get_vhost_net(nc->peer)) {
|
280 |
return features;
|
281 |
} |
282 |
return vhost_net_get_features(tap_get_vhost_net(nc->peer), features);
|
283 |
} |
284 |
|
285 |
static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
|
286 |
{ |
287 |
uint32_t features = 0;
|
288 |
|
289 |
/* Linux kernel 2.6.25. It understood MAC (as everyone must),
|
290 |
* but also these: */
|
291 |
features |= (1 << VIRTIO_NET_F_MAC);
|
292 |
features |= (1 << VIRTIO_NET_F_CSUM);
|
293 |
features |= (1 << VIRTIO_NET_F_HOST_TSO4);
|
294 |
features |= (1 << VIRTIO_NET_F_HOST_TSO6);
|
295 |
features |= (1 << VIRTIO_NET_F_HOST_ECN);
|
296 |
|
297 |
return features;
|
298 |
} |
299 |
|
300 |
static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) |
301 |
{ |
302 |
VirtIONet *n = to_virtio_net(vdev); |
303 |
NetClientState *nc = qemu_get_queue(n->nic); |
304 |
|
305 |
virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
|
306 |
|
307 |
if (n->has_vnet_hdr) {
|
308 |
tap_set_offload(nc->peer, |
309 |
(features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
|
310 |
(features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
|
311 |
(features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
|
312 |
(features >> VIRTIO_NET_F_GUEST_ECN) & 1,
|
313 |
(features >> VIRTIO_NET_F_GUEST_UFO) & 1);
|
314 |
} |
315 |
if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
|
316 |
return;
|
317 |
} |
318 |
if (!tap_get_vhost_net(nc->peer)) {
|
319 |
return;
|
320 |
} |
321 |
vhost_net_ack_features(tap_get_vhost_net(nc->peer), features); |
322 |
} |
323 |
|
324 |
static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, |
325 |
struct iovec *iov, unsigned int iov_cnt) |
326 |
{ |
327 |
uint8_t on; |
328 |
size_t s; |
329 |
|
330 |
s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); |
331 |
if (s != sizeof(on)) { |
332 |
return VIRTIO_NET_ERR;
|
333 |
} |
334 |
|
335 |
if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
|
336 |
n->promisc = on; |
337 |
} else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { |
338 |
n->allmulti = on; |
339 |
} else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { |
340 |
n->alluni = on; |
341 |
} else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { |
342 |
n->nomulti = on; |
343 |
} else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { |
344 |
n->nouni = on; |
345 |
} else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { |
346 |
n->nobcast = on; |
347 |
} else {
|
348 |
return VIRTIO_NET_ERR;
|
349 |
} |
350 |
|
351 |
return VIRTIO_NET_OK;
|
352 |
} |
353 |
|
354 |
static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, |
355 |
struct iovec *iov, unsigned int iov_cnt) |
356 |
{ |
357 |
struct virtio_net_ctrl_mac mac_data;
|
358 |
size_t s; |
359 |
|
360 |
if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
|
361 |
if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { |
362 |
return VIRTIO_NET_ERR;
|
363 |
} |
364 |
s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); |
365 |
assert(s == sizeof(n->mac));
|
366 |
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); |
367 |
return VIRTIO_NET_OK;
|
368 |
} |
369 |
|
370 |
if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
|
371 |
return VIRTIO_NET_ERR;
|
372 |
} |
373 |
|
374 |
n->mac_table.in_use = 0;
|
375 |
n->mac_table.first_multi = 0;
|
376 |
n->mac_table.uni_overflow = 0;
|
377 |
n->mac_table.multi_overflow = 0;
|
378 |
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
|
379 |
|
380 |
s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
|
381 |
sizeof(mac_data.entries));
|
382 |
mac_data.entries = ldl_p(&mac_data.entries); |
383 |
if (s != sizeof(mac_data.entries)) { |
384 |
return VIRTIO_NET_ERR;
|
385 |
} |
386 |
iov_discard_front(&iov, &iov_cnt, s); |
387 |
|
388 |
if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
|
389 |
return VIRTIO_NET_ERR;
|
390 |
} |
391 |
|
392 |
if (mac_data.entries <= MAC_TABLE_ENTRIES) {
|
393 |
s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs,
|
394 |
mac_data.entries * ETH_ALEN); |
395 |
if (s != mac_data.entries * ETH_ALEN) {
|
396 |
return VIRTIO_NET_ERR;
|
397 |
} |
398 |
n->mac_table.in_use += mac_data.entries; |
399 |
} else {
|
400 |
n->mac_table.uni_overflow = 1;
|
401 |
} |
402 |
|
403 |
iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); |
404 |
|
405 |
n->mac_table.first_multi = n->mac_table.in_use; |
406 |
|
407 |
s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
|
408 |
sizeof(mac_data.entries));
|
409 |
mac_data.entries = ldl_p(&mac_data.entries); |
410 |
if (s != sizeof(mac_data.entries)) { |
411 |
return VIRTIO_NET_ERR;
|
412 |
} |
413 |
|
414 |
iov_discard_front(&iov, &iov_cnt, s); |
415 |
|
416 |
if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
|
417 |
return VIRTIO_NET_ERR;
|
418 |
} |
419 |
|
420 |
if (n->mac_table.in_use + mac_data.entries <= MAC_TABLE_ENTRIES) {
|
421 |
s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs,
|
422 |
mac_data.entries * ETH_ALEN); |
423 |
if (s != mac_data.entries * ETH_ALEN) {
|
424 |
return VIRTIO_NET_ERR;
|
425 |
} |
426 |
n->mac_table.in_use += mac_data.entries; |
427 |
} else {
|
428 |
n->mac_table.multi_overflow = 1;
|
429 |
} |
430 |
|
431 |
return VIRTIO_NET_OK;
|
432 |
} |
433 |
|
434 |
static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, |
435 |
struct iovec *iov, unsigned int iov_cnt) |
436 |
{ |
437 |
uint16_t vid; |
438 |
size_t s; |
439 |
|
440 |
s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); |
441 |
vid = lduw_p(&vid); |
442 |
if (s != sizeof(vid)) { |
443 |
return VIRTIO_NET_ERR;
|
444 |
} |
445 |
|
446 |
if (vid >= MAX_VLAN)
|
447 |
return VIRTIO_NET_ERR;
|
448 |
|
449 |
if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
|
450 |
n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); |
451 |
else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) |
452 |
n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); |
453 |
else
|
454 |
return VIRTIO_NET_ERR;
|
455 |
|
456 |
return VIRTIO_NET_OK;
|
457 |
} |
458 |
|
459 |
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
460 |
{ |
461 |
VirtIONet *n = to_virtio_net(vdev); |
462 |
struct virtio_net_ctrl_hdr ctrl;
|
463 |
virtio_net_ctrl_ack status = VIRTIO_NET_ERR; |
464 |
VirtQueueElement elem; |
465 |
size_t s; |
466 |
struct iovec *iov;
|
467 |
unsigned int iov_cnt; |
468 |
|
469 |
while (virtqueue_pop(vq, &elem)) {
|
470 |
if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) || |
471 |
iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
|
472 |
error_report("virtio-net ctrl missing headers");
|
473 |
exit(1);
|
474 |
} |
475 |
|
476 |
iov = elem.out_sg; |
477 |
iov_cnt = elem.out_num; |
478 |
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); |
479 |
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
|
480 |
if (s != sizeof(ctrl)) { |
481 |
status = VIRTIO_NET_ERR; |
482 |
} else if (ctrl.class == VIRTIO_NET_CTRL_RX) { |
483 |
status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); |
484 |
} else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { |
485 |
status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); |
486 |
} else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { |
487 |
status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); |
488 |
} |
489 |
|
490 |
s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status)); |
491 |
assert(s == sizeof(status));
|
492 |
|
493 |
virtqueue_push(vq, &elem, sizeof(status));
|
494 |
virtio_notify(vdev, vq); |
495 |
} |
496 |
} |
497 |
|
498 |
/* RX */
|
499 |
|
500 |
static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) |
501 |
{ |
502 |
VirtIONet *n = to_virtio_net(vdev); |
503 |
|
504 |
qemu_flush_queued_packets(qemu_get_queue(n->nic)); |
505 |
} |
506 |
|
507 |
static int virtio_net_can_receive(NetClientState *nc) |
508 |
{ |
509 |
VirtIONet *n = qemu_get_nic_opaque(nc); |
510 |
if (!n->vdev.vm_running) {
|
511 |
return 0; |
512 |
} |
513 |
|
514 |
if (!virtio_queue_ready(n->rx_vq) ||
|
515 |
!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) |
516 |
return 0; |
517 |
|
518 |
return 1; |
519 |
} |
520 |
|
521 |
static int virtio_net_has_buffers(VirtIONet *n, int bufsize) |
522 |
{ |
523 |
if (virtio_queue_empty(n->rx_vq) ||
|
524 |
(n->mergeable_rx_bufs && |
525 |
!virtqueue_avail_bytes(n->rx_vq, bufsize, 0))) {
|
526 |
virtio_queue_set_notification(n->rx_vq, 1);
|
527 |
|
528 |
/* To avoid a race condition where the guest has made some buffers
|
529 |
* available after the above check but before notification was
|
530 |
* enabled, check for available buffers again.
|
531 |
*/
|
532 |
if (virtio_queue_empty(n->rx_vq) ||
|
533 |
(n->mergeable_rx_bufs && |
534 |
!virtqueue_avail_bytes(n->rx_vq, bufsize, 0)))
|
535 |
return 0; |
536 |
} |
537 |
|
538 |
virtio_queue_set_notification(n->rx_vq, 0);
|
539 |
return 1; |
540 |
} |
541 |
|
542 |
/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
|
543 |
* it never finds out that the packets don't have valid checksums. This
|
544 |
* causes dhclient to get upset. Fedora's carried a patch for ages to
|
545 |
* fix this with Xen but it hasn't appeared in an upstream release of
|
546 |
* dhclient yet.
|
547 |
*
|
548 |
* To avoid breaking existing guests, we catch udp packets and add
|
549 |
* checksums. This is terrible but it's better than hacking the guest
|
550 |
* kernels.
|
551 |
*
|
552 |
* N.B. if we introduce a zero-copy API, this operation is no longer free so
|
553 |
* we should provide a mechanism to disable it to avoid polluting the host
|
554 |
* cache.
|
555 |
*/
|
556 |
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, |
557 |
uint8_t *buf, size_t size) |
558 |
{ |
559 |
if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ |
560 |
(size > 27 && size < 1500) && /* normal sized MTU */ |
561 |
(buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ |
562 |
(buf[23] == 17) && /* ip.protocol == UDP */ |
563 |
(buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ |
564 |
net_checksum_calculate(buf, size); |
565 |
hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; |
566 |
} |
567 |
} |
568 |
|
569 |
static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, |
570 |
const void *buf, size_t size) |
571 |
{ |
572 |
if (n->has_vnet_hdr) {
|
573 |
/* FIXME this cast is evil */
|
574 |
void *wbuf = (void *)buf; |
575 |
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, |
576 |
size - n->host_hdr_len); |
577 |
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); |
578 |
} else {
|
579 |
struct virtio_net_hdr hdr = {
|
580 |
.flags = 0,
|
581 |
.gso_type = VIRTIO_NET_HDR_GSO_NONE |
582 |
}; |
583 |
iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); |
584 |
} |
585 |
} |
586 |
|
587 |
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) |
588 |
{ |
589 |
static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
590 |
static const uint8_t vlan[] = {0x81, 0x00}; |
591 |
uint8_t *ptr = (uint8_t *)buf; |
592 |
int i;
|
593 |
|
594 |
if (n->promisc)
|
595 |
return 1; |
596 |
|
597 |
ptr += n->host_hdr_len; |
598 |
|
599 |
if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { |
600 |
int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; |
601 |
if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) |
602 |
return 0; |
603 |
} |
604 |
|
605 |
if (ptr[0] & 1) { // multicast |
606 |
if (!memcmp(ptr, bcast, sizeof(bcast))) { |
607 |
return !n->nobcast;
|
608 |
} else if (n->nomulti) { |
609 |
return 0; |
610 |
} else if (n->allmulti || n->mac_table.multi_overflow) { |
611 |
return 1; |
612 |
} |
613 |
|
614 |
for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
|
615 |
if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
|
616 |
return 1; |
617 |
} |
618 |
} |
619 |
} else { // unicast |
620 |
if (n->nouni) {
|
621 |
return 0; |
622 |
} else if (n->alluni || n->mac_table.uni_overflow) { |
623 |
return 1; |
624 |
} else if (!memcmp(ptr, n->mac, ETH_ALEN)) { |
625 |
return 1; |
626 |
} |
627 |
|
628 |
for (i = 0; i < n->mac_table.first_multi; i++) { |
629 |
if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
|
630 |
return 1; |
631 |
} |
632 |
} |
633 |
} |
634 |
|
635 |
return 0; |
636 |
} |
637 |
|
638 |
static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) |
639 |
{ |
640 |
VirtIONet *n = qemu_get_nic_opaque(nc); |
641 |
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
|
642 |
struct virtio_net_hdr_mrg_rxbuf mhdr;
|
643 |
unsigned mhdr_cnt = 0; |
644 |
size_t offset, i, guest_offset; |
645 |
|
646 |
if (!virtio_net_can_receive(qemu_get_queue(n->nic))) {
|
647 |
return -1; |
648 |
} |
649 |
|
650 |
/* hdr_len refers to the header we supply to the guest */
|
651 |
if (!virtio_net_has_buffers(n, size + n->guest_hdr_len - n->host_hdr_len))
|
652 |
return 0; |
653 |
|
654 |
if (!receive_filter(n, buf, size))
|
655 |
return size;
|
656 |
|
657 |
offset = i = 0;
|
658 |
|
659 |
while (offset < size) {
|
660 |
VirtQueueElement elem; |
661 |
int len, total;
|
662 |
const struct iovec *sg = elem.in_sg; |
663 |
|
664 |
total = 0;
|
665 |
|
666 |
if (virtqueue_pop(n->rx_vq, &elem) == 0) { |
667 |
if (i == 0) |
668 |
return -1; |
669 |
error_report("virtio-net unexpected empty queue: "
|
670 |
"i %zd mergeable %d offset %zd, size %zd, "
|
671 |
"guest hdr len %zd, host hdr len %zd guest features 0x%x",
|
672 |
i, n->mergeable_rx_bufs, offset, size, |
673 |
n->guest_hdr_len, n->host_hdr_len, n->vdev.guest_features); |
674 |
exit(1);
|
675 |
} |
676 |
|
677 |
if (elem.in_num < 1) { |
678 |
error_report("virtio-net receive queue contains no in buffers");
|
679 |
exit(1);
|
680 |
} |
681 |
|
682 |
if (i == 0) { |
683 |
assert(offset == 0);
|
684 |
if (n->mergeable_rx_bufs) {
|
685 |
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), |
686 |
sg, elem.in_num, |
687 |
offsetof(typeof(mhdr), num_buffers), |
688 |
sizeof(mhdr.num_buffers));
|
689 |
} |
690 |
|
691 |
receive_header(n, sg, elem.in_num, buf, size); |
692 |
offset = n->host_hdr_len; |
693 |
total += n->guest_hdr_len; |
694 |
guest_offset = n->guest_hdr_len; |
695 |
} else {
|
696 |
guest_offset = 0;
|
697 |
} |
698 |
|
699 |
/* copy in packet. ugh */
|
700 |
len = iov_from_buf(sg, elem.in_num, guest_offset, |
701 |
buf + offset, size - offset); |
702 |
total += len; |
703 |
offset += len; |
704 |
/* If buffers can't be merged, at this point we
|
705 |
* must have consumed the complete packet.
|
706 |
* Otherwise, drop it. */
|
707 |
if (!n->mergeable_rx_bufs && offset < size) {
|
708 |
#if 0
|
709 |
error_report("virtio-net truncated non-mergeable packet: "
|
710 |
"i %zd mergeable %d offset %zd, size %zd, "
|
711 |
"guest hdr len %zd, host hdr len %zd",
|
712 |
i, n->mergeable_rx_bufs,
|
713 |
offset, size, n->guest_hdr_len, n->host_hdr_len);
|
714 |
#endif
|
715 |
return size;
|
716 |
} |
717 |
|
718 |
/* signal other side */
|
719 |
virtqueue_fill(n->rx_vq, &elem, total, i++); |
720 |
} |
721 |
|
722 |
if (mhdr_cnt) {
|
723 |
stw_p(&mhdr.num_buffers, i); |
724 |
iov_from_buf(mhdr_sg, mhdr_cnt, |
725 |
0,
|
726 |
&mhdr.num_buffers, sizeof mhdr.num_buffers);
|
727 |
} |
728 |
|
729 |
virtqueue_flush(n->rx_vq, i); |
730 |
virtio_notify(&n->vdev, n->rx_vq); |
731 |
|
732 |
return size;
|
733 |
} |
734 |
|
735 |
static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq);
|
736 |
|
737 |
static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) |
738 |
{ |
739 |
VirtIONet *n = qemu_get_nic_opaque(nc); |
740 |
|
741 |
virtqueue_push(n->tx_vq, &n->async_tx.elem, 0);
|
742 |
virtio_notify(&n->vdev, n->tx_vq); |
743 |
|
744 |
n->async_tx.elem.out_num = n->async_tx.len = 0;
|
745 |
|
746 |
virtio_queue_set_notification(n->tx_vq, 1);
|
747 |
virtio_net_flush_tx(n, n->tx_vq); |
748 |
} |
749 |
|
750 |
/* TX */
|
751 |
static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
|
752 |
{ |
753 |
VirtQueueElement elem; |
754 |
int32_t num_packets = 0;
|
755 |
if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
756 |
return num_packets;
|
757 |
} |
758 |
|
759 |
assert(n->vdev.vm_running); |
760 |
|
761 |
if (n->async_tx.elem.out_num) {
|
762 |
virtio_queue_set_notification(n->tx_vq, 0);
|
763 |
return num_packets;
|
764 |
} |
765 |
|
766 |
while (virtqueue_pop(vq, &elem)) {
|
767 |
ssize_t ret, len; |
768 |
unsigned int out_num = elem.out_num; |
769 |
struct iovec *out_sg = &elem.out_sg[0]; |
770 |
struct iovec sg[VIRTQUEUE_MAX_SIZE];
|
771 |
|
772 |
if (out_num < 1) { |
773 |
error_report("virtio-net header not in first element");
|
774 |
exit(1);
|
775 |
} |
776 |
|
777 |
/*
|
778 |
* If host wants to see the guest header as is, we can
|
779 |
* pass it on unchanged. Otherwise, copy just the parts
|
780 |
* that host is interested in.
|
781 |
*/
|
782 |
assert(n->host_hdr_len <= n->guest_hdr_len); |
783 |
if (n->host_hdr_len != n->guest_hdr_len) {
|
784 |
unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
|
785 |
out_sg, out_num, |
786 |
0, n->host_hdr_len);
|
787 |
sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, |
788 |
out_sg, out_num, |
789 |
n->guest_hdr_len, -1);
|
790 |
out_num = sg_num; |
791 |
out_sg = sg; |
792 |
} |
793 |
|
794 |
len = n->guest_hdr_len; |
795 |
|
796 |
ret = qemu_sendv_packet_async(qemu_get_queue(n->nic), out_sg, out_num, |
797 |
virtio_net_tx_complete); |
798 |
if (ret == 0) { |
799 |
virtio_queue_set_notification(n->tx_vq, 0);
|
800 |
n->async_tx.elem = elem; |
801 |
n->async_tx.len = len; |
802 |
return -EBUSY;
|
803 |
} |
804 |
|
805 |
len += ret; |
806 |
|
807 |
virtqueue_push(vq, &elem, 0);
|
808 |
virtio_notify(&n->vdev, vq); |
809 |
|
810 |
if (++num_packets >= n->tx_burst) {
|
811 |
break;
|
812 |
} |
813 |
} |
814 |
return num_packets;
|
815 |
} |
816 |
|
817 |
static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) |
818 |
{ |
819 |
VirtIONet *n = to_virtio_net(vdev); |
820 |
|
821 |
/* This happens when device was stopped but VCPU wasn't. */
|
822 |
if (!n->vdev.vm_running) {
|
823 |
n->tx_waiting = 1;
|
824 |
return;
|
825 |
} |
826 |
|
827 |
if (n->tx_waiting) {
|
828 |
virtio_queue_set_notification(vq, 1);
|
829 |
qemu_del_timer(n->tx_timer); |
830 |
n->tx_waiting = 0;
|
831 |
virtio_net_flush_tx(n, vq); |
832 |
} else {
|
833 |
qemu_mod_timer(n->tx_timer, |
834 |
qemu_get_clock_ns(vm_clock) + n->tx_timeout); |
835 |
n->tx_waiting = 1;
|
836 |
virtio_queue_set_notification(vq, 0);
|
837 |
} |
838 |
} |
839 |
|
840 |
static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) |
841 |
{ |
842 |
VirtIONet *n = to_virtio_net(vdev); |
843 |
|
844 |
if (unlikely(n->tx_waiting)) {
|
845 |
return;
|
846 |
} |
847 |
n->tx_waiting = 1;
|
848 |
/* This happens when device was stopped but VCPU wasn't. */
|
849 |
if (!n->vdev.vm_running) {
|
850 |
return;
|
851 |
} |
852 |
virtio_queue_set_notification(vq, 0);
|
853 |
qemu_bh_schedule(n->tx_bh); |
854 |
} |
855 |
|
856 |
static void virtio_net_tx_timer(void *opaque) |
857 |
{ |
858 |
VirtIONet *n = opaque; |
859 |
assert(n->vdev.vm_running); |
860 |
|
861 |
n->tx_waiting = 0;
|
862 |
|
863 |
/* Just in case the driver is not ready on more */
|
864 |
if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
|
865 |
return;
|
866 |
|
867 |
virtio_queue_set_notification(n->tx_vq, 1);
|
868 |
virtio_net_flush_tx(n, n->tx_vq); |
869 |
} |
870 |
|
871 |
static void virtio_net_tx_bh(void *opaque) |
872 |
{ |
873 |
VirtIONet *n = opaque; |
874 |
int32_t ret; |
875 |
|
876 |
assert(n->vdev.vm_running); |
877 |
|
878 |
n->tx_waiting = 0;
|
879 |
|
880 |
/* Just in case the driver is not ready on more */
|
881 |
if (unlikely(!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)))
|
882 |
return;
|
883 |
|
884 |
ret = virtio_net_flush_tx(n, n->tx_vq); |
885 |
if (ret == -EBUSY) {
|
886 |
return; /* Notification re-enable handled by tx_complete */ |
887 |
} |
888 |
|
889 |
/* If we flush a full burst of packets, assume there are
|
890 |
* more coming and immediately reschedule */
|
891 |
if (ret >= n->tx_burst) {
|
892 |
qemu_bh_schedule(n->tx_bh); |
893 |
n->tx_waiting = 1;
|
894 |
return;
|
895 |
} |
896 |
|
897 |
/* If less than a full burst, re-enable notification and flush
|
898 |
* anything that may have come in while we weren't looking. If
|
899 |
* we find something, assume the guest is still active and reschedule */
|
900 |
virtio_queue_set_notification(n->tx_vq, 1);
|
901 |
if (virtio_net_flush_tx(n, n->tx_vq) > 0) { |
902 |
virtio_queue_set_notification(n->tx_vq, 0);
|
903 |
qemu_bh_schedule(n->tx_bh); |
904 |
n->tx_waiting = 1;
|
905 |
} |
906 |
} |
907 |
|
908 |
static void virtio_net_save(QEMUFile *f, void *opaque) |
909 |
{ |
910 |
VirtIONet *n = opaque; |
911 |
|
912 |
/* At this point, backend must be stopped, otherwise
|
913 |
* it might keep writing to memory. */
|
914 |
assert(!n->vhost_started); |
915 |
virtio_save(&n->vdev, f); |
916 |
|
917 |
qemu_put_buffer(f, n->mac, ETH_ALEN); |
918 |
qemu_put_be32(f, n->tx_waiting); |
919 |
qemu_put_be32(f, n->mergeable_rx_bufs); |
920 |
qemu_put_be16(f, n->status); |
921 |
qemu_put_byte(f, n->promisc); |
922 |
qemu_put_byte(f, n->allmulti); |
923 |
qemu_put_be32(f, n->mac_table.in_use); |
924 |
qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); |
925 |
qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
|
926 |
qemu_put_be32(f, n->has_vnet_hdr); |
927 |
qemu_put_byte(f, n->mac_table.multi_overflow); |
928 |
qemu_put_byte(f, n->mac_table.uni_overflow); |
929 |
qemu_put_byte(f, n->alluni); |
930 |
qemu_put_byte(f, n->nomulti); |
931 |
qemu_put_byte(f, n->nouni); |
932 |
qemu_put_byte(f, n->nobcast); |
933 |
qemu_put_byte(f, n->has_ufo); |
934 |
} |
935 |
|
936 |
static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) |
937 |
{ |
938 |
VirtIONet *n = opaque; |
939 |
int i;
|
940 |
int ret;
|
941 |
|
942 |
if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) |
943 |
return -EINVAL;
|
944 |
|
945 |
ret = virtio_load(&n->vdev, f); |
946 |
if (ret) {
|
947 |
return ret;
|
948 |
} |
949 |
|
950 |
qemu_get_buffer(f, n->mac, ETH_ALEN); |
951 |
n->tx_waiting = qemu_get_be32(f); |
952 |
|
953 |
virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f)); |
954 |
|
955 |
if (version_id >= 3) |
956 |
n->status = qemu_get_be16(f); |
957 |
|
958 |
if (version_id >= 4) { |
959 |
if (version_id < 8) { |
960 |
n->promisc = qemu_get_be32(f); |
961 |
n->allmulti = qemu_get_be32(f); |
962 |
} else {
|
963 |
n->promisc = qemu_get_byte(f); |
964 |
n->allmulti = qemu_get_byte(f); |
965 |
} |
966 |
} |
967 |
|
968 |
if (version_id >= 5) { |
969 |
n->mac_table.in_use = qemu_get_be32(f); |
970 |
/* MAC_TABLE_ENTRIES may be different from the saved image */
|
971 |
if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
|
972 |
qemu_get_buffer(f, n->mac_table.macs, |
973 |
n->mac_table.in_use * ETH_ALEN); |
974 |
} else if (n->mac_table.in_use) { |
975 |
uint8_t *buf = g_malloc0(n->mac_table.in_use); |
976 |
qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN); |
977 |
g_free(buf); |
978 |
n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
|
979 |
n->mac_table.in_use = 0;
|
980 |
} |
981 |
} |
982 |
|
983 |
if (version_id >= 6) |
984 |
qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
|
985 |
|
986 |
if (version_id >= 7) { |
987 |
if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
|
988 |
error_report("virtio-net: saved image requires vnet_hdr=on");
|
989 |
return -1; |
990 |
} |
991 |
|
992 |
if (n->has_vnet_hdr) {
|
993 |
tap_set_offload(qemu_get_queue(n->nic)->peer, |
994 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
|
995 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
|
996 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
|
997 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_ECN) & 1,
|
998 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_UFO) & 1);
|
999 |
} |
1000 |
} |
1001 |
|
1002 |
if (version_id >= 9) { |
1003 |
n->mac_table.multi_overflow = qemu_get_byte(f); |
1004 |
n->mac_table.uni_overflow = qemu_get_byte(f); |
1005 |
} |
1006 |
|
1007 |
if (version_id >= 10) { |
1008 |
n->alluni = qemu_get_byte(f); |
1009 |
n->nomulti = qemu_get_byte(f); |
1010 |
n->nouni = qemu_get_byte(f); |
1011 |
n->nobcast = qemu_get_byte(f); |
1012 |
} |
1013 |
|
1014 |
if (version_id >= 11) { |
1015 |
if (qemu_get_byte(f) && !peer_has_ufo(n)) {
|
1016 |
error_report("virtio-net: saved image requires TUN_F_UFO support");
|
1017 |
return -1; |
1018 |
} |
1019 |
} |
1020 |
|
1021 |
/* Find the first multicast entry in the saved MAC filter */
|
1022 |
for (i = 0; i < n->mac_table.in_use; i++) { |
1023 |
if (n->mac_table.macs[i * ETH_ALEN] & 1) { |
1024 |
break;
|
1025 |
} |
1026 |
} |
1027 |
n->mac_table.first_multi = i; |
1028 |
|
1029 |
/* nc.link_down can't be migrated, so infer link_down according
|
1030 |
* to link status bit in n->status */
|
1031 |
qemu_get_queue(n->nic)->link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
|
1032 |
|
1033 |
return 0; |
1034 |
} |
1035 |
|
1036 |
static void virtio_net_cleanup(NetClientState *nc) |
1037 |
{ |
1038 |
VirtIONet *n = qemu_get_nic_opaque(nc); |
1039 |
|
1040 |
n->nic = NULL;
|
1041 |
} |
1042 |
|
1043 |
static NetClientInfo net_virtio_info = {
|
1044 |
.type = NET_CLIENT_OPTIONS_KIND_NIC, |
1045 |
.size = sizeof(NICState),
|
1046 |
.can_receive = virtio_net_can_receive, |
1047 |
.receive = virtio_net_receive, |
1048 |
.cleanup = virtio_net_cleanup, |
1049 |
.link_status_changed = virtio_net_set_link_status, |
1050 |
}; |
1051 |
|
1052 |
static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) |
1053 |
{ |
1054 |
VirtIONet *n = to_virtio_net(vdev); |
1055 |
NetClientState *nc = qemu_get_queue(n->nic); |
1056 |
assert(n->vhost_started); |
1057 |
return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx);
|
1058 |
} |
1059 |
|
1060 |
static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, |
1061 |
bool mask)
|
1062 |
{ |
1063 |
VirtIONet *n = to_virtio_net(vdev); |
1064 |
NetClientState *nc = qemu_get_queue(n->nic); |
1065 |
assert(n->vhost_started); |
1066 |
vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer), |
1067 |
vdev, idx, mask); |
1068 |
} |
1069 |
|
1070 |
VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf, |
1071 |
virtio_net_conf *net) |
1072 |
{ |
1073 |
VirtIONet *n; |
1074 |
|
1075 |
n = (VirtIONet *)virtio_common_init("virtio-net", VIRTIO_ID_NET,
|
1076 |
sizeof(struct virtio_net_config), |
1077 |
sizeof(VirtIONet));
|
1078 |
|
1079 |
n->vdev.get_config = virtio_net_get_config; |
1080 |
n->vdev.set_config = virtio_net_set_config; |
1081 |
n->vdev.get_features = virtio_net_get_features; |
1082 |
n->vdev.set_features = virtio_net_set_features; |
1083 |
n->vdev.bad_features = virtio_net_bad_features; |
1084 |
n->vdev.reset = virtio_net_reset; |
1085 |
n->vdev.set_status = virtio_net_set_status; |
1086 |
n->vdev.guest_notifier_mask = virtio_net_guest_notifier_mask; |
1087 |
n->vdev.guest_notifier_pending = virtio_net_guest_notifier_pending; |
1088 |
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
|
1089 |
|
1090 |
if (net->tx && strcmp(net->tx, "timer") && strcmp(net->tx, "bh")) { |
1091 |
error_report("virtio-net: "
|
1092 |
"Unknown option tx=%s, valid options: \"timer\" \"bh\"",
|
1093 |
net->tx); |
1094 |
error_report("Defaulting to \"bh\"");
|
1095 |
} |
1096 |
|
1097 |
if (net->tx && !strcmp(net->tx, "timer")) { |
1098 |
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx_timer);
|
1099 |
n->tx_timer = qemu_new_timer_ns(vm_clock, virtio_net_tx_timer, n); |
1100 |
n->tx_timeout = net->txtimer; |
1101 |
} else {
|
1102 |
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx_bh);
|
1103 |
n->tx_bh = qemu_bh_new(virtio_net_tx_bh, n); |
1104 |
} |
1105 |
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
|
1106 |
qemu_macaddr_default_if_unset(&conf->macaddr); |
1107 |
memcpy(&n->mac[0], &conf->macaddr, sizeof(n->mac)); |
1108 |
n->status = VIRTIO_NET_S_LINK_UP; |
1109 |
|
1110 |
n->nic = qemu_new_nic(&net_virtio_info, conf, object_get_typename(OBJECT(dev)), dev->id, n); |
1111 |
peer_test_vnet_hdr(n); |
1112 |
if (peer_has_vnet_hdr(n)) {
|
1113 |
tap_using_vnet_hdr(qemu_get_queue(n->nic)->peer, true);
|
1114 |
n->host_hdr_len = sizeof(struct virtio_net_hdr); |
1115 |
} else {
|
1116 |
n->host_hdr_len = 0;
|
1117 |
} |
1118 |
|
1119 |
qemu_format_nic_info_str(qemu_get_queue(n->nic), conf->macaddr.a); |
1120 |
|
1121 |
n->tx_waiting = 0;
|
1122 |
n->tx_burst = net->txburst; |
1123 |
virtio_net_set_mrg_rx_bufs(n, 0);
|
1124 |
n->promisc = 1; /* for compatibility */ |
1125 |
|
1126 |
n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); |
1127 |
|
1128 |
n->vlans = g_malloc0(MAX_VLAN >> 3);
|
1129 |
|
1130 |
n->qdev = dev; |
1131 |
register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, |
1132 |
virtio_net_save, virtio_net_load, n); |
1133 |
|
1134 |
add_boot_device_path(conf->bootindex, dev, "/ethernet-phy@0");
|
1135 |
|
1136 |
return &n->vdev;
|
1137 |
} |
1138 |
|
1139 |
void virtio_net_exit(VirtIODevice *vdev)
|
1140 |
{ |
1141 |
VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev); |
1142 |
|
1143 |
/* This will stop vhost backend if appropriate. */
|
1144 |
virtio_net_set_status(vdev, 0);
|
1145 |
|
1146 |
qemu_purge_queued_packets(qemu_get_queue(n->nic)); |
1147 |
|
1148 |
unregister_savevm(n->qdev, "virtio-net", n);
|
1149 |
|
1150 |
g_free(n->mac_table.macs); |
1151 |
g_free(n->vlans); |
1152 |
|
1153 |
if (n->tx_timer) {
|
1154 |
qemu_del_timer(n->tx_timer); |
1155 |
qemu_free_timer(n->tx_timer); |
1156 |
} else {
|
1157 |
qemu_bh_delete(n->tx_bh); |
1158 |
} |
1159 |
|
1160 |
qemu_del_nic(n->nic); |
1161 |
virtio_cleanup(&n->vdev); |
1162 |
} |