root / net / queue.c @ a9899996
History | View | Annotate | Download (7.5 kB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2003-2008 Fabrice Bellard
|
3 |
* Copyright (c) 2009 Red Hat, Inc.
|
4 |
*
|
5 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
* of this software and associated documentation files (the "Software"), to deal
|
7 |
* in the Software without restriction, including without limitation the rights
|
8 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
* copies of the Software, and to permit persons to whom the Software is
|
10 |
* furnished to do so, subject to the following conditions:
|
11 |
*
|
12 |
* The above copyright notice and this permission notice shall be included in
|
13 |
* all copies or substantial portions of the Software.
|
14 |
*
|
15 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
18 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21 |
* THE SOFTWARE.
|
22 |
*/
|
23 |
|
24 |
#include "net/queue.h" |
25 |
#include "qemu-queue.h" |
26 |
|
27 |
/* The delivery handler may only return zero if it will call
|
28 |
* qemu_net_queue_flush() when it determines that it is once again able
|
29 |
* to deliver packets. It must also call qemu_net_queue_purge() in its
|
30 |
* cleanup path.
|
31 |
*
|
32 |
* If a sent callback is provided to send(), the caller must handle a
|
33 |
* zero return from the delivery handler by not sending any more packets
|
34 |
* until we have invoked the callback. Only in that case will we queue
|
35 |
* the packet.
|
36 |
*
|
37 |
* If a sent callback isn't provided, we just drop the packet to avoid
|
38 |
* unbounded queueing.
|
39 |
*/
|
40 |
|
41 |
struct NetPacket {
|
42 |
QTAILQ_ENTRY(NetPacket) entry; |
43 |
VLANClientState *sender; |
44 |
unsigned flags;
|
45 |
int size;
|
46 |
NetPacketSent *sent_cb; |
47 |
uint8_t data[0];
|
48 |
}; |
49 |
|
50 |
struct NetQueue {
|
51 |
NetPacketDeliver *deliver; |
52 |
NetPacketDeliverIOV *deliver_iov; |
53 |
void *opaque;
|
54 |
|
55 |
QTAILQ_HEAD(packets, NetPacket) packets; |
56 |
|
57 |
unsigned delivering : 1; |
58 |
}; |
59 |
|
60 |
NetQueue *qemu_new_net_queue(NetPacketDeliver *deliver, |
61 |
NetPacketDeliverIOV *deliver_iov, |
62 |
void *opaque)
|
63 |
{ |
64 |
NetQueue *queue; |
65 |
|
66 |
queue = qemu_mallocz(sizeof(NetQueue));
|
67 |
|
68 |
queue->deliver = deliver; |
69 |
queue->deliver_iov = deliver_iov; |
70 |
queue->opaque = opaque; |
71 |
|
72 |
QTAILQ_INIT(&queue->packets); |
73 |
|
74 |
queue->delivering = 0;
|
75 |
|
76 |
return queue;
|
77 |
} |
78 |
|
79 |
void qemu_del_net_queue(NetQueue *queue)
|
80 |
{ |
81 |
NetPacket *packet, *next; |
82 |
|
83 |
QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { |
84 |
QTAILQ_REMOVE(&queue->packets, packet, entry); |
85 |
qemu_free(packet); |
86 |
} |
87 |
|
88 |
qemu_free(queue); |
89 |
} |
90 |
|
91 |
static ssize_t qemu_net_queue_append(NetQueue *queue,
|
92 |
VLANClientState *sender, |
93 |
unsigned flags,
|
94 |
const uint8_t *buf,
|
95 |
size_t size, |
96 |
NetPacketSent *sent_cb) |
97 |
{ |
98 |
NetPacket *packet; |
99 |
|
100 |
packet = qemu_malloc(sizeof(NetPacket) + size);
|
101 |
packet->sender = sender; |
102 |
packet->flags = flags; |
103 |
packet->size = size; |
104 |
packet->sent_cb = sent_cb; |
105 |
memcpy(packet->data, buf, size); |
106 |
|
107 |
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); |
108 |
|
109 |
return size;
|
110 |
} |
111 |
|
112 |
static ssize_t qemu_net_queue_append_iov(NetQueue *queue,
|
113 |
VLANClientState *sender, |
114 |
unsigned flags,
|
115 |
const struct iovec *iov, |
116 |
int iovcnt,
|
117 |
NetPacketSent *sent_cb) |
118 |
{ |
119 |
NetPacket *packet; |
120 |
size_t max_len = 0;
|
121 |
int i;
|
122 |
|
123 |
for (i = 0; i < iovcnt; i++) { |
124 |
max_len += iov[i].iov_len; |
125 |
} |
126 |
|
127 |
packet = qemu_malloc(sizeof(NetPacket) + max_len);
|
128 |
packet->sender = sender; |
129 |
packet->sent_cb = sent_cb; |
130 |
packet->flags = flags; |
131 |
packet->size = 0;
|
132 |
|
133 |
for (i = 0; i < iovcnt; i++) { |
134 |
size_t len = iov[i].iov_len; |
135 |
|
136 |
memcpy(packet->data + packet->size, iov[i].iov_base, len); |
137 |
packet->size += len; |
138 |
} |
139 |
|
140 |
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); |
141 |
|
142 |
return packet->size;
|
143 |
} |
144 |
|
145 |
static ssize_t qemu_net_queue_deliver(NetQueue *queue,
|
146 |
VLANClientState *sender, |
147 |
unsigned flags,
|
148 |
const uint8_t *data,
|
149 |
size_t size) |
150 |
{ |
151 |
ssize_t ret = -1;
|
152 |
|
153 |
queue->delivering = 1;
|
154 |
ret = queue->deliver(sender, flags, data, size, queue->opaque); |
155 |
queue->delivering = 0;
|
156 |
|
157 |
return ret;
|
158 |
} |
159 |
|
160 |
static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
|
161 |
VLANClientState *sender, |
162 |
unsigned flags,
|
163 |
const struct iovec *iov, |
164 |
int iovcnt)
|
165 |
{ |
166 |
ssize_t ret = -1;
|
167 |
|
168 |
queue->delivering = 1;
|
169 |
ret = queue->deliver_iov(sender, flags, iov, iovcnt, queue->opaque); |
170 |
queue->delivering = 0;
|
171 |
|
172 |
return ret;
|
173 |
} |
174 |
|
175 |
ssize_t qemu_net_queue_send(NetQueue *queue, |
176 |
VLANClientState *sender, |
177 |
unsigned flags,
|
178 |
const uint8_t *data,
|
179 |
size_t size, |
180 |
NetPacketSent *sent_cb) |
181 |
{ |
182 |
ssize_t ret; |
183 |
|
184 |
if (queue->delivering) {
|
185 |
return qemu_net_queue_append(queue, sender, flags, data, size, NULL); |
186 |
} |
187 |
|
188 |
ret = qemu_net_queue_deliver(queue, sender, flags, data, size); |
189 |
if (ret == 0) { |
190 |
qemu_net_queue_append(queue, sender, flags, data, size, sent_cb); |
191 |
return 0; |
192 |
} |
193 |
|
194 |
qemu_net_queue_flush(queue); |
195 |
|
196 |
return ret;
|
197 |
} |
198 |
|
199 |
ssize_t qemu_net_queue_send_iov(NetQueue *queue, |
200 |
VLANClientState *sender, |
201 |
unsigned flags,
|
202 |
const struct iovec *iov, |
203 |
int iovcnt,
|
204 |
NetPacketSent *sent_cb) |
205 |
{ |
206 |
ssize_t ret; |
207 |
|
208 |
if (queue->delivering) {
|
209 |
return qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, NULL); |
210 |
} |
211 |
|
212 |
ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt); |
213 |
if (ret == 0) { |
214 |
qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); |
215 |
return 0; |
216 |
} |
217 |
|
218 |
qemu_net_queue_flush(queue); |
219 |
|
220 |
return ret;
|
221 |
} |
222 |
|
223 |
void qemu_net_queue_purge(NetQueue *queue, VLANClientState *from)
|
224 |
{ |
225 |
NetPacket *packet, *next; |
226 |
|
227 |
QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { |
228 |
if (packet->sender == from) {
|
229 |
QTAILQ_REMOVE(&queue->packets, packet, entry); |
230 |
qemu_free(packet); |
231 |
} |
232 |
} |
233 |
} |
234 |
|
235 |
void qemu_net_queue_flush(NetQueue *queue)
|
236 |
{ |
237 |
while (!QTAILQ_EMPTY(&queue->packets)) {
|
238 |
NetPacket *packet; |
239 |
int ret;
|
240 |
|
241 |
packet = QTAILQ_FIRST(&queue->packets); |
242 |
QTAILQ_REMOVE(&queue->packets, packet, entry); |
243 |
|
244 |
ret = qemu_net_queue_deliver(queue, |
245 |
packet->sender, |
246 |
packet->flags, |
247 |
packet->data, |
248 |
packet->size); |
249 |
if (ret == 0) { |
250 |
QTAILQ_INSERT_HEAD(&queue->packets, packet, entry); |
251 |
break;
|
252 |
} |
253 |
|
254 |
if (packet->sent_cb) {
|
255 |
packet->sent_cb(packet->sender, ret); |
256 |
} |
257 |
|
258 |
qemu_free(packet); |
259 |
} |
260 |
} |