root / hw / virtio-net.c @ e7b43f7e
History | View | Annotate | Download (30.7 kB)
1 |
/*
|
---|---|
2 |
* Virtio Network Device
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2007
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include "iov.h" |
15 |
#include "virtio.h" |
16 |
#include "net.h" |
17 |
#include "net/checksum.h" |
18 |
#include "net/tap.h" |
19 |
#include "qemu-error.h" |
20 |
#include "qemu-timer.h" |
21 |
#include "virtio-net.h" |
22 |
#include "vhost_net.h" |
23 |
|
24 |
#define VIRTIO_NET_VM_VERSION 11 |
25 |
|
26 |
#define MAC_TABLE_ENTRIES 64 |
27 |
#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ |
28 |
|
29 |
typedef struct VirtIONet |
30 |
{ |
31 |
VirtIODevice vdev; |
32 |
uint8_t mac[ETH_ALEN]; |
33 |
uint16_t status; |
34 |
VirtQueue *rx_vq; |
35 |
VirtQueue *tx_vq; |
36 |
VirtQueue *ctrl_vq; |
37 |
NICState *nic; |
38 |
QEMUTimer *tx_timer; |
39 |
QEMUBH *tx_bh; |
40 |
uint32_t tx_timeout; |
41 |
int32_t tx_burst; |
42 |
int tx_waiting;
|
43 |
uint32_t has_vnet_hdr; |
44 |
uint8_t has_ufo; |
45 |
struct {
|
46 |
VirtQueueElement elem; |
47 |
ssize_t len; |
48 |
} async_tx; |
49 |
int mergeable_rx_bufs;
|
50 |
uint8_t promisc; |
51 |
uint8_t allmulti; |
52 |
uint8_t alluni; |
53 |
uint8_t nomulti; |
54 |
uint8_t nouni; |
55 |
uint8_t nobcast; |
56 |
uint8_t vhost_started; |
57 |
bool vm_running;
|
58 |
VMChangeStateEntry *vmstate; |
59 |
struct {
|
60 |
int in_use;
|
61 |
int first_multi;
|
62 |
uint8_t multi_overflow; |
63 |
uint8_t uni_overflow; |
64 |
uint8_t *macs; |
65 |
} mac_table; |
66 |
uint32_t *vlans; |
67 |
DeviceState *qdev; |
68 |
} VirtIONet; |
69 |
|
70 |
/* TODO
|
71 |
* - we could suppress RX interrupt if we were so inclined.
|
72 |
*/
|
73 |
|
74 |
static VirtIONet *to_virtio_net(VirtIODevice *vdev)
|
75 |
{ |
76 |
return (VirtIONet *)vdev;
|
77 |
} |
78 |
|
79 |
static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) |
80 |
{ |
81 |
VirtIONet *n = to_virtio_net(vdev); |
82 |
struct virtio_net_config netcfg;
|
83 |
|
84 |
netcfg.status = n->status; |
85 |
memcpy(netcfg.mac, n->mac, ETH_ALEN); |
86 |
memcpy(config, &netcfg, sizeof(netcfg));
|
87 |
} |
88 |
|
89 |
static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) |
90 |
{ |
91 |
VirtIONet *n = to_virtio_net(vdev); |
92 |
struct virtio_net_config netcfg;
|
93 |
|
94 |
memcpy(&netcfg, config, sizeof(netcfg));
|
95 |
|
96 |
if (memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
|
97 |
memcpy(n->mac, netcfg.mac, ETH_ALEN); |
98 |
qemu_format_nic_info_str(&n->nic->nc, n->mac); |
99 |
} |
100 |
} |
101 |
|
102 |
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) |
103 |
{ |
104 |
VirtIONet *n = to_virtio_net(vdev); |
105 |
if (!n->nic->nc.peer) {
|
106 |
return;
|
107 |
} |
108 |
if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
|
109 |
return;
|
110 |
} |
111 |
|
112 |
if (!tap_get_vhost_net(n->nic->nc.peer)) {
|
113 |
return;
|
114 |
} |
115 |
if (!!n->vhost_started == ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
116 |
(n->status & VIRTIO_NET_S_LINK_UP) && |
117 |
n->vm_running)) { |
118 |
return;
|
119 |
} |
120 |
if (!n->vhost_started) {
|
121 |
int r = vhost_net_start(tap_get_vhost_net(n->nic->nc.peer), &n->vdev);
|
122 |
if (r < 0) { |
123 |
error_report("unable to start vhost net: %d: "
|
124 |
"falling back on userspace virtio", -r);
|
125 |
} else {
|
126 |
n->vhost_started = 1;
|
127 |
} |
128 |
} else {
|
129 |
vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), &n->vdev); |
130 |
n->vhost_started = 0;
|
131 |
} |
132 |
} |
133 |
|
134 |
static void virtio_net_set_link_status(VLANClientState *nc) |
135 |
{ |
136 |
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; |
137 |
uint16_t old_status = n->status; |
138 |
|
139 |
if (nc->link_down)
|
140 |
n->status &= ~VIRTIO_NET_S_LINK_UP; |
141 |
else
|
142 |
n->status |= VIRTIO_NET_S_LINK_UP; |
143 |
|
144 |
if (n->status != old_status)
|
145 |
virtio_notify_config(&n->vdev); |
146 |
|
147 |
virtio_net_set_status(&n->vdev, n->vdev.status); |
148 |
} |
149 |
|
150 |
static void virtio_net_reset(VirtIODevice *vdev) |
151 |
{ |
152 |
VirtIONet *n = to_virtio_net(vdev); |
153 |
|
154 |
/* Reset back to compatibility mode */
|
155 |
n->promisc = 1;
|
156 |
n->allmulti = 0;
|
157 |
n->alluni = 0;
|
158 |
n->nomulti = 0;
|
159 |
n->nouni = 0;
|
160 |
n->nobcast = 0;
|
161 |
|
162 |
/* Flush any MAC and VLAN filter table state */
|
163 |
n->mac_table.in_use = 0;
|
164 |
n->mac_table.first_multi = 0;
|
165 |
n->mac_table.multi_overflow = 0;
|
166 |
n->mac_table.uni_overflow = 0;
|
167 |
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
|
168 |
memset(n->vlans, 0, MAX_VLAN >> 3); |
169 |
} |
170 |
|
171 |
static int peer_has_vnet_hdr(VirtIONet *n) |
172 |
{ |
173 |
if (!n->nic->nc.peer)
|
174 |
return 0; |
175 |
|
176 |
if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP)
|
177 |
return 0; |
178 |
|
179 |
n->has_vnet_hdr = tap_has_vnet_hdr(n->nic->nc.peer); |
180 |
|
181 |
return n->has_vnet_hdr;
|
182 |
} |
183 |
|
184 |
static int peer_has_ufo(VirtIONet *n) |
185 |
{ |
186 |
if (!peer_has_vnet_hdr(n))
|
187 |
return 0; |
188 |
|
189 |
n->has_ufo = tap_has_ufo(n->nic->nc.peer); |
190 |
|
191 |
return n->has_ufo;
|
192 |
} |
193 |
|
194 |
static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
|
195 |
{ |
196 |
VirtIONet *n = to_virtio_net(vdev); |
197 |
|
198 |
features |= (1 << VIRTIO_NET_F_MAC);
|
199 |
|
200 |
if (peer_has_vnet_hdr(n)) {
|
201 |
tap_using_vnet_hdr(n->nic->nc.peer, 1);
|
202 |
} else {
|
203 |
features &= ~(0x1 << VIRTIO_NET_F_CSUM);
|
204 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
|
205 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
|
206 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
|
207 |
|
208 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
|
209 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
|
210 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
|
211 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
|
212 |
} |
213 |
|
214 |
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
|
215 |
features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
|
216 |
features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
|
217 |
} |
218 |
|
219 |
if (!n->nic->nc.peer ||
|
220 |
n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) { |
221 |
return features;
|
222 |
} |
223 |
if (!tap_get_vhost_net(n->nic->nc.peer)) {
|
224 |
return features;
|
225 |
} |
226 |
return vhost_net_get_features(tap_get_vhost_net(n->nic->nc.peer), features);
|
227 |
} |
228 |
|
229 |
static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
|
230 |
{ |
231 |
uint32_t features = 0;
|
232 |
|
233 |
/* Linux kernel 2.6.25. It understood MAC (as everyone must),
|
234 |
* but also these: */
|
235 |
features |= (1 << VIRTIO_NET_F_MAC);
|
236 |
features |= (1 << VIRTIO_NET_F_CSUM);
|
237 |
features |= (1 << VIRTIO_NET_F_HOST_TSO4);
|
238 |
features |= (1 << VIRTIO_NET_F_HOST_TSO6);
|
239 |
features |= (1 << VIRTIO_NET_F_HOST_ECN);
|
240 |
|
241 |
return features;
|
242 |
} |
243 |
|
244 |
static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) |
245 |
{ |
246 |
VirtIONet *n = to_virtio_net(vdev); |
247 |
|
248 |
n->mergeable_rx_bufs = !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF));
|
249 |
|
250 |
if (n->has_vnet_hdr) {
|
251 |
tap_set_offload(n->nic->nc.peer, |
252 |
(features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
|
253 |
(features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
|
254 |
(features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
|
255 |
(features >> VIRTIO_NET_F_GUEST_ECN) & 1,
|
256 |
(features >> VIRTIO_NET_F_GUEST_UFO) & 1);
|
257 |
} |
258 |
if (!n->nic->nc.peer ||
|
259 |
n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) { |
260 |
return;
|
261 |
} |
262 |
if (!tap_get_vhost_net(n->nic->nc.peer)) {
|
263 |
return;
|
264 |
} |
265 |
vhost_net_ack_features(tap_get_vhost_net(n->nic->nc.peer), features); |
266 |
} |
267 |
|
268 |
static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, |
269 |
VirtQueueElement *elem) |
270 |
{ |
271 |
uint8_t on; |
272 |
|
273 |
if (elem->out_num != 2 || elem->out_sg[1].iov_len != sizeof(on)) { |
274 |
error_report("virtio-net ctrl invalid rx mode command");
|
275 |
exit(1);
|
276 |
} |
277 |
|
278 |
on = ldub_p(elem->out_sg[1].iov_base);
|
279 |
|
280 |
if (cmd == VIRTIO_NET_CTRL_RX_MODE_PROMISC)
|
281 |
n->promisc = on; |
282 |
else if (cmd == VIRTIO_NET_CTRL_RX_MODE_ALLMULTI) |
283 |
n->allmulti = on; |
284 |
else if (cmd == VIRTIO_NET_CTRL_RX_MODE_ALLUNI) |
285 |
n->alluni = on; |
286 |
else if (cmd == VIRTIO_NET_CTRL_RX_MODE_NOMULTI) |
287 |
n->nomulti = on; |
288 |
else if (cmd == VIRTIO_NET_CTRL_RX_MODE_NOUNI) |
289 |
n->nouni = on; |
290 |
else if (cmd == VIRTIO_NET_CTRL_RX_MODE_NOBCAST) |
291 |
n->nobcast = on; |
292 |
else
|
293 |
return VIRTIO_NET_ERR;
|
294 |
|
295 |
return VIRTIO_NET_OK;
|
296 |
} |
297 |
|
298 |
static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, |
299 |
VirtQueueElement *elem) |
300 |
{ |
301 |
struct virtio_net_ctrl_mac mac_data;
|
302 |
|
303 |
if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET || elem->out_num != 3 || |
304 |
elem->out_sg[1].iov_len < sizeof(mac_data) || |
305 |
elem->out_sg[2].iov_len < sizeof(mac_data)) |
306 |
return VIRTIO_NET_ERR;
|
307 |
|
308 |
n->mac_table.in_use = 0;
|
309 |
n->mac_table.first_multi = 0;
|
310 |
n->mac_table.uni_overflow = 0;
|
311 |
n->mac_table.multi_overflow = 0;
|
312 |
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
|
313 |
|
314 |
mac_data.entries = ldl_le_p(elem->out_sg[1].iov_base);
|
315 |
|
316 |
if (sizeof(mac_data.entries) + |
317 |
(mac_data.entries * ETH_ALEN) > elem->out_sg[1].iov_len)
|
318 |
return VIRTIO_NET_ERR;
|
319 |
|
320 |
if (mac_data.entries <= MAC_TABLE_ENTRIES) {
|
321 |
memcpy(n->mac_table.macs, elem->out_sg[1].iov_base + sizeof(mac_data), |
322 |
mac_data.entries * ETH_ALEN); |
323 |
n->mac_table.in_use += mac_data.entries; |
324 |
} else {
|
325 |
n->mac_table.uni_overflow = 1;
|
326 |
} |
327 |
|
328 |
n->mac_table.first_multi = n->mac_table.in_use; |
329 |
|
330 |
mac_data.entries = ldl_le_p(elem->out_sg[2].iov_base);
|
331 |
|
332 |
if (sizeof(mac_data.entries) + |
333 |
(mac_data.entries * ETH_ALEN) > elem->out_sg[2].iov_len)
|
334 |
return VIRTIO_NET_ERR;
|
335 |
|
336 |
if (mac_data.entries) {
|
337 |
if (n->mac_table.in_use + mac_data.entries <= MAC_TABLE_ENTRIES) {
|
338 |
memcpy(n->mac_table.macs + (n->mac_table.in_use * ETH_ALEN), |
339 |
elem->out_sg[2].iov_base + sizeof(mac_data), |
340 |
mac_data.entries * ETH_ALEN); |
341 |
n->mac_table.in_use += mac_data.entries; |
342 |
} else {
|
343 |
n->mac_table.multi_overflow = 1;
|
344 |
} |
345 |
} |
346 |
|
347 |
return VIRTIO_NET_OK;
|
348 |
} |
349 |
|
350 |
static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, |
351 |
VirtQueueElement *elem) |
352 |
{ |
353 |
uint16_t vid; |
354 |
|
355 |
if (elem->out_num != 2 || elem->out_sg[1].iov_len != sizeof(vid)) { |
356 |
error_report("virtio-net ctrl invalid vlan command");
|
357 |
return VIRTIO_NET_ERR;
|
358 |
} |
359 |
|
360 |
vid = lduw_le_p(elem->out_sg[1].iov_base);
|
361 |
|
362 |
if (vid >= MAX_VLAN)
|
363 |
return VIRTIO_NET_ERR;
|
364 |
|
365 |
if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
|
366 |
n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); |
367 |
else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) |
368 |
n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); |
369 |
else
|
370 |
return VIRTIO_NET_ERR;
|
371 |
|
372 |
return VIRTIO_NET_OK;
|
373 |
} |
374 |
|
375 |
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
376 |
{ |
377 |
VirtIONet *n = to_virtio_net(vdev); |
378 |
struct virtio_net_ctrl_hdr ctrl;
|
379 |
virtio_net_ctrl_ack status = VIRTIO_NET_ERR; |
380 |
VirtQueueElement elem; |
381 |
|
382 |
while (virtqueue_pop(vq, &elem)) {
|
383 |
if ((elem.in_num < 1) || (elem.out_num < 1)) { |
384 |
error_report("virtio-net ctrl missing headers");
|
385 |
exit(1);
|
386 |
} |
387 |
|
388 |
if (elem.out_sg[0].iov_len < sizeof(ctrl) || |
389 |
elem.in_sg[elem.in_num - 1].iov_len < sizeof(status)) { |
390 |
error_report("virtio-net ctrl header not in correct element");
|
391 |
exit(1);
|
392 |
} |
393 |
|
394 |
ctrl.class = ldub_p(elem.out_sg[0].iov_base);
|
395 |
ctrl.cmd = ldub_p(elem.out_sg[0].iov_base + sizeof(ctrl.class)); |
396 |
|
397 |
if (ctrl.class == VIRTIO_NET_CTRL_RX_MODE)
|
398 |
status = virtio_net_handle_rx_mode(n, ctrl.cmd, &elem); |
399 |
else if (ctrl.class == VIRTIO_NET_CTRL_MAC) |
400 |
status = virtio_net_handle_mac(n, ctrl.cmd, &elem); |
401 |
else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) |
402 |
status = virtio_net_handle_vlan_table(n, ctrl.cmd, &elem); |
403 |
|
404 |
stb_p(elem.in_sg[elem.in_num - 1].iov_base, status);
|
405 |
|
406 |
virtqueue_push(vq, &elem, sizeof(status));
|
407 |
virtio_notify(vdev, vq); |
408 |
} |
409 |
} |
410 |
|
411 |
/* RX */
|
412 |
|
413 |
static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) |
414 |
{ |
415 |
VirtIONet *n = to_virtio_net(vdev); |
416 |
|
417 |
qemu_flush_queued_packets(&n->nic->nc); |
418 |
|
419 |
/* We now have RX buffers, signal to the IO thread to break out of the
|
420 |
* select to re-poll the tap file descriptor */
|
421 |
qemu_notify_event(); |
422 |
} |
423 |
|
424 |
static int virtio_net_can_receive(VLANClientState *nc) |
425 |
{ |
426 |
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; |
427 |
|
428 |
if (!virtio_queue_ready(n->rx_vq) ||
|
429 |
!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) |
430 |
return 0; |
431 |
|
432 |
return 1; |
433 |
} |
434 |
|
435 |
static int virtio_net_has_buffers(VirtIONet *n, int bufsize) |
436 |
{ |
437 |
if (virtio_queue_empty(n->rx_vq) ||
|
438 |
(n->mergeable_rx_bufs && |
439 |
!virtqueue_avail_bytes(n->rx_vq, bufsize, 0))) {
|
440 |
virtio_queue_set_notification(n->rx_vq, 1);
|
441 |
|
442 |
/* To avoid a race condition where the guest has made some buffers
|
443 |
* available after the above check but before notification was
|
444 |
* enabled, check for available buffers again.
|
445 |
*/
|
446 |
if (virtio_queue_empty(n->rx_vq) ||
|
447 |
(n->mergeable_rx_bufs && |
448 |
!virtqueue_avail_bytes(n->rx_vq, bufsize, 0)))
|
449 |
return 0; |
450 |
} |
451 |
|
452 |
virtio_queue_set_notification(n->rx_vq, 0);
|
453 |
return 1; |
454 |
} |
455 |
|
456 |
/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
|
457 |
* it never finds out that the packets don't have valid checksums. This
|
458 |
* causes dhclient to get upset. Fedora's carried a patch for ages to
|
459 |
* fix this with Xen but it hasn't appeared in an upstream release of
|
460 |
* dhclient yet.
|
461 |
*
|
462 |
* To avoid breaking existing guests, we catch udp packets and add
|
463 |
* checksums. This is terrible but it's better than hacking the guest
|
464 |
* kernels.
|
465 |
*
|
466 |
* N.B. if we introduce a zero-copy API, this operation is no longer free so
|
467 |
* we should provide a mechanism to disable it to avoid polluting the host
|
468 |
* cache.
|
469 |
*/
|
470 |
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, |
471 |
const uint8_t *buf, size_t size)
|
472 |
{ |
473 |
if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ |
474 |
(size > 27 && size < 1500) && /* normal sized MTU */ |
475 |
(buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ |
476 |
(buf[23] == 17) && /* ip.protocol == UDP */ |
477 |
(buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ |
478 |
/* FIXME this cast is evil */
|
479 |
net_checksum_calculate((uint8_t *)buf, size); |
480 |
hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; |
481 |
} |
482 |
} |
483 |
|
484 |
static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt, |
485 |
const void *buf, size_t size, size_t hdr_len) |
486 |
{ |
487 |
struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)iov[0].iov_base; |
488 |
int offset = 0; |
489 |
|
490 |
hdr->flags = 0;
|
491 |
hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; |
492 |
|
493 |
if (n->has_vnet_hdr) {
|
494 |
memcpy(hdr, buf, sizeof(*hdr));
|
495 |
offset = sizeof(*hdr);
|
496 |
work_around_broken_dhclient(hdr, buf + offset, size - offset); |
497 |
} |
498 |
|
499 |
/* We only ever receive a struct virtio_net_hdr from the tapfd,
|
500 |
* but we may be passing along a larger header to the guest.
|
501 |
*/
|
502 |
iov[0].iov_base += hdr_len;
|
503 |
iov[0].iov_len -= hdr_len;
|
504 |
|
505 |
return offset;
|
506 |
} |
507 |
|
508 |
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) |
509 |
{ |
510 |
static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
511 |
static const uint8_t vlan[] = {0x81, 0x00}; |
512 |
uint8_t *ptr = (uint8_t *)buf; |
513 |
int i;
|
514 |
|
515 |
if (n->promisc)
|
516 |
return 1; |
517 |
|
518 |
if (n->has_vnet_hdr) {
|
519 |
ptr += sizeof(struct virtio_net_hdr); |
520 |
} |
521 |
|
522 |
if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { |
523 |
int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; |
524 |
if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) |
525 |
return 0; |
526 |
} |
527 |
|
528 |
if (ptr[0] & 1) { // multicast |
529 |
if (!memcmp(ptr, bcast, sizeof(bcast))) { |
530 |
return !n->nobcast;
|
531 |
} else if (n->nomulti) { |
532 |
return 0; |
533 |
} else if (n->allmulti || n->mac_table.multi_overflow) { |
534 |
return 1; |
535 |
} |
536 |
|
537 |
for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
|
538 |
if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
|
539 |
return 1; |
540 |
} |
541 |
} |
542 |
} else { // unicast |
543 |
if (n->nouni) {
|
544 |
return 0; |
545 |
} else if (n->alluni || n->mac_table.uni_overflow) { |
546 |
return 1; |
547 |
} else if (!memcmp(ptr, n->mac, ETH_ALEN)) { |
548 |
return 1; |
549 |
} |
550 |
|
551 |
for (i = 0; i < n->mac_table.first_multi; i++) { |
552 |
if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
|
553 |
return 1; |
554 |
} |
555 |
} |
556 |
} |
557 |
|
558 |
return 0; |
559 |
} |
560 |
|
561 |
static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_t size) |
562 |
{ |
563 |
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; |
564 |
struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL; |
565 |
size_t guest_hdr_len, offset, i, host_hdr_len; |
566 |
|
567 |
if (!virtio_net_can_receive(&n->nic->nc))
|
568 |
return -1; |
569 |
|
570 |
/* hdr_len refers to the header we supply to the guest */
|
571 |
guest_hdr_len = n->mergeable_rx_bufs ? |
572 |
sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); |
573 |
|
574 |
|
575 |
host_hdr_len = n->has_vnet_hdr ? sizeof(struct virtio_net_hdr) : 0; |
576 |
if (!virtio_net_has_buffers(n, size + guest_hdr_len - host_hdr_len))
|
577 |
return 0; |
578 |
|
579 |
if (!receive_filter(n, buf, size))
|
580 |
return size;
|
581 |
|
582 |
offset = i = 0;
|
583 |
|
584 |
while (offset < size) {
|
585 |
VirtQueueElement elem; |
586 |
int len, total;
|
587 |
struct iovec sg[VIRTQUEUE_MAX_SIZE];
|
588 |
|
589 |
total = 0;
|
590 |
|
591 |
if (virtqueue_pop(n->rx_vq, &elem) == 0) { |
592 |
if (i == 0) |
593 |
return -1; |
594 |
error_report("virtio-net unexpected empty queue: "
|
595 |
"i %zd mergeable %d offset %zd, size %zd, "
|
596 |
"guest hdr len %zd, host hdr len %zd guest features 0x%x",
|
597 |
i, n->mergeable_rx_bufs, offset, size, |
598 |
guest_hdr_len, host_hdr_len, n->vdev.guest_features); |
599 |
exit(1);
|
600 |
} |
601 |
|
602 |
if (elem.in_num < 1) { |
603 |
error_report("virtio-net receive queue contains no in buffers");
|
604 |
exit(1);
|
605 |
} |
606 |
|
607 |
if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != guest_hdr_len) { |
608 |
error_report("virtio-net header not in first element");
|
609 |
exit(1);
|
610 |
} |
611 |
|
612 |
memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num); |
613 |
|
614 |
if (i == 0) { |
615 |
if (n->mergeable_rx_bufs)
|
616 |
mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base; |
617 |
|
618 |
offset += receive_header(n, sg, elem.in_num, |
619 |
buf + offset, size - offset, guest_hdr_len); |
620 |
total += guest_hdr_len; |
621 |
} |
622 |
|
623 |
/* copy in packet. ugh */
|
624 |
len = iov_from_buf(sg, elem.in_num, |
625 |
buf + offset, size - offset); |
626 |
total += len; |
627 |
offset += len; |
628 |
/* If buffers can't be merged, at this point we
|
629 |
* must have consumed the complete packet.
|
630 |
* Otherwise, drop it. */
|
631 |
if (!n->mergeable_rx_bufs && offset < size) {
|
632 |
#if 0
|
633 |
error_report("virtio-net truncated non-mergeable packet: "
|
634 |
"i %zd mergeable %d offset %zd, size %zd, "
|
635 |
"guest hdr len %zd, host hdr len %zd",
|
636 |
i, n->mergeable_rx_bufs,
|
637 |
offset, size, guest_hdr_len, host_hdr_len);
|
638 |
#endif
|
639 |
return size;
|
640 |
} |
641 |
|
642 |
/* signal other side */
|
643 |
virtqueue_fill(n->rx_vq, &elem, total, i++); |
644 |
} |
645 |
|
646 |
if (mhdr)
|
647 |
mhdr->num_buffers = i; |
648 |
|
649 |
virtqueue_flush(n->rx_vq, i); |
650 |
virtio_notify(&n->vdev, n->rx_vq); |
651 |
|
652 |
return size;
|
653 |
} |
654 |
|
655 |
static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq);
|
656 |
|
657 |
static void virtio_net_tx_complete(VLANClientState *nc, ssize_t len) |
658 |
{ |
659 |
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; |
660 |
|
661 |
virtqueue_push(n->tx_vq, &n->async_tx.elem, n->async_tx.len); |
662 |
virtio_notify(&n->vdev, n->tx_vq); |
663 |
|
664 |
n->async_tx.elem.out_num = n->async_tx.len = 0;
|
665 |
|
666 |
virtio_queue_set_notification(n->tx_vq, 1);
|
667 |
virtio_net_flush_tx(n, n->tx_vq); |
668 |
} |
669 |
|
670 |
/* TX */
|
671 |
static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
|
672 |
{ |
673 |
VirtQueueElement elem; |
674 |
int32_t num_packets = 0;
|
675 |
|
676 |
if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
677 |
return num_packets;
|
678 |
} |
679 |
|
680 |
if (n->async_tx.elem.out_num) {
|
681 |
virtio_queue_set_notification(n->tx_vq, 0);
|
682 |
return num_packets;
|
683 |
} |
684 |
|
685 |
while (virtqueue_pop(vq, &elem)) {
|
686 |
ssize_t ret, len = 0;
|
687 |
unsigned int out_num = elem.out_num; |
688 |
struct iovec *out_sg = &elem.out_sg[0]; |
689 |
unsigned hdr_len;
|
690 |
|
691 |
/* hdr_len refers to the header received from the guest */
|
692 |
hdr_len = n->mergeable_rx_bufs ? |
693 |
sizeof(struct virtio_net_hdr_mrg_rxbuf) : |
694 |
sizeof(struct virtio_net_hdr); |
695 |
|
696 |
if (out_num < 1 || out_sg->iov_len != hdr_len) { |
697 |
error_report("virtio-net header not in first element");
|
698 |
exit(1);
|
699 |
} |
700 |
|
701 |
/* ignore the header if GSO is not supported */
|
702 |
if (!n->has_vnet_hdr) {
|
703 |
out_num--; |
704 |
out_sg++; |
705 |
len += hdr_len; |
706 |
} else if (n->mergeable_rx_bufs) { |
707 |
/* tapfd expects a struct virtio_net_hdr */
|
708 |
hdr_len -= sizeof(struct virtio_net_hdr); |
709 |
out_sg->iov_len -= hdr_len; |
710 |
len += hdr_len; |
711 |
} |
712 |
|
713 |
ret = qemu_sendv_packet_async(&n->nic->nc, out_sg, out_num, |
714 |
virtio_net_tx_complete); |
715 |
if (ret == 0) { |
716 |
virtio_queue_set_notification(n->tx_vq, 0);
|
717 |
n->async_tx.elem = elem; |
718 |
n->async_tx.len = len; |
719 |
return -EBUSY;
|
720 |
} |
721 |
|
722 |
len += ret; |
723 |
|
724 |
virtqueue_push(vq, &elem, len); |
725 |
virtio_notify(&n->vdev, vq); |
726 |
|
727 |
if (++num_packets >= n->tx_burst) {
|
728 |
break;
|
729 |
} |
730 |
} |
731 |
return num_packets;
|
732 |
} |
733 |
|
734 |
static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) |
735 |
{ |
736 |
VirtIONet *n = to_virtio_net(vdev); |
737 |
|
738 |
if (n->tx_waiting) {
|
739 |
virtio_queue_set_notification(vq, 1);
|
740 |
qemu_del_timer(n->tx_timer); |
741 |
n->tx_waiting = 0;
|
742 |
virtio_net_flush_tx(n, vq); |
743 |
} else {
|
744 |
qemu_mod_timer(n->tx_timer, |
745 |
qemu_get_clock(vm_clock) + n->tx_timeout); |
746 |
n->tx_waiting = 1;
|
747 |
virtio_queue_set_notification(vq, 0);
|
748 |
} |
749 |
} |
750 |
|
751 |
static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) |
752 |
{ |
753 |
VirtIONet *n = to_virtio_net(vdev); |
754 |
|
755 |
if (unlikely(n->tx_waiting)) {
|
756 |
return;
|
757 |
} |
758 |
virtio_queue_set_notification(vq, 0);
|
759 |
qemu_bh_schedule(n->tx_bh); |
760 |
n->tx_waiting = 1;
|
761 |
} |
762 |
|
763 |
static void virtio_net_tx_timer(void *opaque) |
764 |
{ |
765 |
VirtIONet *n = opaque; |
766 |
|
767 |
n->tx_waiting = 0;
|
768 |
|
769 |
/* Just in case the driver is not ready on more */
|
770 |
if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
|
771 |
return;
|
772 |
|
773 |
virtio_queue_set_notification(n->tx_vq, 1);
|
774 |
virtio_net_flush_tx(n, n->tx_vq); |
775 |
} |
776 |
|
777 |
static void virtio_net_tx_bh(void *opaque) |
778 |
{ |
779 |
VirtIONet *n = opaque; |
780 |
int32_t ret; |
781 |
|
782 |
n->tx_waiting = 0;
|
783 |
|
784 |
/* Just in case the driver is not ready on more */
|
785 |
if (unlikely(!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)))
|
786 |
return;
|
787 |
|
788 |
ret = virtio_net_flush_tx(n, n->tx_vq); |
789 |
if (ret == -EBUSY) {
|
790 |
return; /* Notification re-enable handled by tx_complete */ |
791 |
} |
792 |
|
793 |
/* If we flush a full burst of packets, assume there are
|
794 |
* more coming and immediately reschedule */
|
795 |
if (ret >= n->tx_burst) {
|
796 |
qemu_bh_schedule(n->tx_bh); |
797 |
n->tx_waiting = 1;
|
798 |
return;
|
799 |
} |
800 |
|
801 |
/* If less than a full burst, re-enable notification and flush
|
802 |
* anything that may have come in while we weren't looking. If
|
803 |
* we find something, assume the guest is still active and reschedule */
|
804 |
virtio_queue_set_notification(n->tx_vq, 1);
|
805 |
if (virtio_net_flush_tx(n, n->tx_vq) > 0) { |
806 |
virtio_queue_set_notification(n->tx_vq, 0);
|
807 |
qemu_bh_schedule(n->tx_bh); |
808 |
n->tx_waiting = 1;
|
809 |
} |
810 |
} |
811 |
|
812 |
static void virtio_net_save(QEMUFile *f, void *opaque) |
813 |
{ |
814 |
VirtIONet *n = opaque; |
815 |
|
816 |
/* At this point, backend must be stopped, otherwise
|
817 |
* it might keep writing to memory. */
|
818 |
assert(!n->vhost_started); |
819 |
virtio_save(&n->vdev, f); |
820 |
|
821 |
qemu_put_buffer(f, n->mac, ETH_ALEN); |
822 |
qemu_put_be32(f, n->tx_waiting); |
823 |
qemu_put_be32(f, n->mergeable_rx_bufs); |
824 |
qemu_put_be16(f, n->status); |
825 |
qemu_put_byte(f, n->promisc); |
826 |
qemu_put_byte(f, n->allmulti); |
827 |
qemu_put_be32(f, n->mac_table.in_use); |
828 |
qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); |
829 |
qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
|
830 |
qemu_put_be32(f, n->has_vnet_hdr); |
831 |
qemu_put_byte(f, n->mac_table.multi_overflow); |
832 |
qemu_put_byte(f, n->mac_table.uni_overflow); |
833 |
qemu_put_byte(f, n->alluni); |
834 |
qemu_put_byte(f, n->nomulti); |
835 |
qemu_put_byte(f, n->nouni); |
836 |
qemu_put_byte(f, n->nobcast); |
837 |
qemu_put_byte(f, n->has_ufo); |
838 |
} |
839 |
|
840 |
static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) |
841 |
{ |
842 |
VirtIONet *n = opaque; |
843 |
int i;
|
844 |
|
845 |
if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) |
846 |
return -EINVAL;
|
847 |
|
848 |
virtio_load(&n->vdev, f); |
849 |
|
850 |
qemu_get_buffer(f, n->mac, ETH_ALEN); |
851 |
n->tx_waiting = qemu_get_be32(f); |
852 |
n->mergeable_rx_bufs = qemu_get_be32(f); |
853 |
|
854 |
if (version_id >= 3) |
855 |
n->status = qemu_get_be16(f); |
856 |
|
857 |
if (version_id >= 4) { |
858 |
if (version_id < 8) { |
859 |
n->promisc = qemu_get_be32(f); |
860 |
n->allmulti = qemu_get_be32(f); |
861 |
} else {
|
862 |
n->promisc = qemu_get_byte(f); |
863 |
n->allmulti = qemu_get_byte(f); |
864 |
} |
865 |
} |
866 |
|
867 |
if (version_id >= 5) { |
868 |
n->mac_table.in_use = qemu_get_be32(f); |
869 |
/* MAC_TABLE_ENTRIES may be different from the saved image */
|
870 |
if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
|
871 |
qemu_get_buffer(f, n->mac_table.macs, |
872 |
n->mac_table.in_use * ETH_ALEN); |
873 |
} else if (n->mac_table.in_use) { |
874 |
qemu_fseek(f, n->mac_table.in_use * ETH_ALEN, SEEK_CUR); |
875 |
n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
|
876 |
n->mac_table.in_use = 0;
|
877 |
} |
878 |
} |
879 |
|
880 |
if (version_id >= 6) |
881 |
qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
|
882 |
|
883 |
if (version_id >= 7) { |
884 |
if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
|
885 |
error_report("virtio-net: saved image requires vnet_hdr=on");
|
886 |
return -1; |
887 |
} |
888 |
|
889 |
if (n->has_vnet_hdr) {
|
890 |
tap_using_vnet_hdr(n->nic->nc.peer, 1);
|
891 |
tap_set_offload(n->nic->nc.peer, |
892 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
|
893 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
|
894 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
|
895 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_ECN) & 1,
|
896 |
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_UFO) & 1);
|
897 |
} |
898 |
} |
899 |
|
900 |
if (version_id >= 9) { |
901 |
n->mac_table.multi_overflow = qemu_get_byte(f); |
902 |
n->mac_table.uni_overflow = qemu_get_byte(f); |
903 |
} |
904 |
|
905 |
if (version_id >= 10) { |
906 |
n->alluni = qemu_get_byte(f); |
907 |
n->nomulti = qemu_get_byte(f); |
908 |
n->nouni = qemu_get_byte(f); |
909 |
n->nobcast = qemu_get_byte(f); |
910 |
} |
911 |
|
912 |
if (version_id >= 11) { |
913 |
if (qemu_get_byte(f) && !peer_has_ufo(n)) {
|
914 |
error_report("virtio-net: saved image requires TUN_F_UFO support");
|
915 |
return -1; |
916 |
} |
917 |
} |
918 |
|
919 |
/* Find the first multicast entry in the saved MAC filter */
|
920 |
for (i = 0; i < n->mac_table.in_use; i++) { |
921 |
if (n->mac_table.macs[i * ETH_ALEN] & 1) { |
922 |
break;
|
923 |
} |
924 |
} |
925 |
n->mac_table.first_multi = i; |
926 |
|
927 |
if (n->tx_waiting) {
|
928 |
if (n->tx_timer) {
|
929 |
qemu_mod_timer(n->tx_timer, |
930 |
qemu_get_clock(vm_clock) + n->tx_timeout); |
931 |
} else {
|
932 |
qemu_bh_schedule(n->tx_bh); |
933 |
} |
934 |
} |
935 |
return 0; |
936 |
} |
937 |
|
938 |
static void virtio_net_cleanup(VLANClientState *nc) |
939 |
{ |
940 |
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; |
941 |
|
942 |
n->nic = NULL;
|
943 |
} |
944 |
|
945 |
static NetClientInfo net_virtio_info = {
|
946 |
.type = NET_CLIENT_TYPE_NIC, |
947 |
.size = sizeof(NICState),
|
948 |
.can_receive = virtio_net_can_receive, |
949 |
.receive = virtio_net_receive, |
950 |
.cleanup = virtio_net_cleanup, |
951 |
.link_status_changed = virtio_net_set_link_status, |
952 |
}; |
953 |
|
954 |
static void virtio_net_vmstate_change(void *opaque, int running, int reason) |
955 |
{ |
956 |
VirtIONet *n = opaque; |
957 |
n->vm_running = running; |
958 |
/* This is called when vm is started/stopped,
|
959 |
* it will start/stop vhost backend if appropriate
|
960 |
* e.g. after migration. */
|
961 |
virtio_net_set_status(&n->vdev, n->vdev.status); |
962 |
} |
963 |
|
964 |
VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf, |
965 |
virtio_net_conf *net) |
966 |
{ |
967 |
VirtIONet *n; |
968 |
|
969 |
n = (VirtIONet *)virtio_common_init("virtio-net", VIRTIO_ID_NET,
|
970 |
sizeof(struct virtio_net_config), |
971 |
sizeof(VirtIONet));
|
972 |
|
973 |
n->vdev.get_config = virtio_net_get_config; |
974 |
n->vdev.set_config = virtio_net_set_config; |
975 |
n->vdev.get_features = virtio_net_get_features; |
976 |
n->vdev.set_features = virtio_net_set_features; |
977 |
n->vdev.bad_features = virtio_net_bad_features; |
978 |
n->vdev.reset = virtio_net_reset; |
979 |
n->vdev.set_status = virtio_net_set_status; |
980 |
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
|
981 |
|
982 |
if (net->tx && strcmp(net->tx, "timer") && strcmp(net->tx, "bh")) { |
983 |
error_report("virtio-net: "
|
984 |
"Unknown option tx=%s, valid options: \"timer\" \"bh\"",
|
985 |
net->tx); |
986 |
error_report("Defaulting to \"bh\"");
|
987 |
} |
988 |
|
989 |
if (net->tx && !strcmp(net->tx, "timer")) { |
990 |
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx_timer);
|
991 |
n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n); |
992 |
n->tx_timeout = net->txtimer; |
993 |
} else {
|
994 |
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx_bh);
|
995 |
n->tx_bh = qemu_bh_new(virtio_net_tx_bh, n); |
996 |
} |
997 |
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
|
998 |
qemu_macaddr_default_if_unset(&conf->macaddr); |
999 |
memcpy(&n->mac[0], &conf->macaddr, sizeof(n->mac)); |
1000 |
n->status = VIRTIO_NET_S_LINK_UP; |
1001 |
|
1002 |
n->nic = qemu_new_nic(&net_virtio_info, conf, dev->info->name, dev->id, n); |
1003 |
|
1004 |
qemu_format_nic_info_str(&n->nic->nc, conf->macaddr.a); |
1005 |
|
1006 |
n->tx_waiting = 0;
|
1007 |
n->tx_burst = net->txburst; |
1008 |
n->mergeable_rx_bufs = 0;
|
1009 |
n->promisc = 1; /* for compatibility */ |
1010 |
|
1011 |
n->mac_table.macs = qemu_mallocz(MAC_TABLE_ENTRIES * ETH_ALEN); |
1012 |
|
1013 |
n->vlans = qemu_mallocz(MAX_VLAN >> 3);
|
1014 |
|
1015 |
n->qdev = dev; |
1016 |
register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, |
1017 |
virtio_net_save, virtio_net_load, n); |
1018 |
n->vmstate = qemu_add_vm_change_state_handler(virtio_net_vmstate_change, n); |
1019 |
|
1020 |
return &n->vdev;
|
1021 |
} |
1022 |
|
1023 |
void virtio_net_exit(VirtIODevice *vdev)
|
1024 |
{ |
1025 |
VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev); |
1026 |
qemu_del_vm_change_state_handler(n->vmstate); |
1027 |
|
1028 |
/* This will stop vhost backend if appropriate. */
|
1029 |
virtio_net_set_status(vdev, 0);
|
1030 |
|
1031 |
qemu_purge_queued_packets(&n->nic->nc); |
1032 |
|
1033 |
unregister_savevm(n->qdev, "virtio-net", n);
|
1034 |
|
1035 |
qemu_free(n->mac_table.macs); |
1036 |
qemu_free(n->vlans); |
1037 |
|
1038 |
if (n->tx_timer) {
|
1039 |
qemu_del_timer(n->tx_timer); |
1040 |
qemu_free_timer(n->tx_timer); |
1041 |
} else {
|
1042 |
qemu_bh_delete(n->tx_bh); |
1043 |
} |
1044 |
|
1045 |
virtio_cleanup(&n->vdev); |
1046 |
qemu_del_vlan_client(&n->nic->nc); |
1047 |
} |