root / net / queue.c @ 1de7afc9
History | View | Annotate | Download (7.3 kB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2003-2008 Fabrice Bellard
|
3 |
* Copyright (c) 2009 Red Hat, Inc.
|
4 |
*
|
5 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
* of this software and associated documentation files (the "Software"), to deal
|
7 |
* in the Software without restriction, including without limitation the rights
|
8 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
* copies of the Software, and to permit persons to whom the Software is
|
10 |
* furnished to do so, subject to the following conditions:
|
11 |
*
|
12 |
* The above copyright notice and this permission notice shall be included in
|
13 |
* all copies or substantial portions of the Software.
|
14 |
*
|
15 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
18 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21 |
* THE SOFTWARE.
|
22 |
*/
|
23 |
|
24 |
#include "net/queue.h" |
25 |
#include "qemu/queue.h" |
26 |
#include "net/net.h" |
27 |
|
28 |
/* The delivery handler may only return zero if it will call
|
29 |
* qemu_net_queue_flush() when it determines that it is once again able
|
30 |
* to deliver packets. It must also call qemu_net_queue_purge() in its
|
31 |
* cleanup path.
|
32 |
*
|
33 |
* If a sent callback is provided to send(), the caller must handle a
|
34 |
* zero return from the delivery handler by not sending any more packets
|
35 |
* until we have invoked the callback. Only in that case will we queue
|
36 |
* the packet.
|
37 |
*
|
38 |
* If a sent callback isn't provided, we just drop the packet to avoid
|
39 |
* unbounded queueing.
|
40 |
*/
|
41 |
|
42 |
struct NetPacket {
|
43 |
QTAILQ_ENTRY(NetPacket) entry; |
44 |
NetClientState *sender; |
45 |
unsigned flags;
|
46 |
int size;
|
47 |
NetPacketSent *sent_cb; |
48 |
uint8_t data[0];
|
49 |
}; |
50 |
|
51 |
struct NetQueue {
|
52 |
void *opaque;
|
53 |
|
54 |
QTAILQ_HEAD(packets, NetPacket) packets; |
55 |
|
56 |
unsigned delivering : 1; |
57 |
}; |
58 |
|
59 |
NetQueue *qemu_new_net_queue(void *opaque)
|
60 |
{ |
61 |
NetQueue *queue; |
62 |
|
63 |
queue = g_malloc0(sizeof(NetQueue));
|
64 |
|
65 |
queue->opaque = opaque; |
66 |
|
67 |
QTAILQ_INIT(&queue->packets); |
68 |
|
69 |
queue->delivering = 0;
|
70 |
|
71 |
return queue;
|
72 |
} |
73 |
|
74 |
void qemu_del_net_queue(NetQueue *queue)
|
75 |
{ |
76 |
NetPacket *packet, *next; |
77 |
|
78 |
QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { |
79 |
QTAILQ_REMOVE(&queue->packets, packet, entry); |
80 |
g_free(packet); |
81 |
} |
82 |
|
83 |
g_free(queue); |
84 |
} |
85 |
|
86 |
static void qemu_net_queue_append(NetQueue *queue, |
87 |
NetClientState *sender, |
88 |
unsigned flags,
|
89 |
const uint8_t *buf,
|
90 |
size_t size, |
91 |
NetPacketSent *sent_cb) |
92 |
{ |
93 |
NetPacket *packet; |
94 |
|
95 |
packet = g_malloc(sizeof(NetPacket) + size);
|
96 |
packet->sender = sender; |
97 |
packet->flags = flags; |
98 |
packet->size = size; |
99 |
packet->sent_cb = sent_cb; |
100 |
memcpy(packet->data, buf, size); |
101 |
|
102 |
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); |
103 |
} |
104 |
|
105 |
static void qemu_net_queue_append_iov(NetQueue *queue, |
106 |
NetClientState *sender, |
107 |
unsigned flags,
|
108 |
const struct iovec *iov, |
109 |
int iovcnt,
|
110 |
NetPacketSent *sent_cb) |
111 |
{ |
112 |
NetPacket *packet; |
113 |
size_t max_len = 0;
|
114 |
int i;
|
115 |
|
116 |
for (i = 0; i < iovcnt; i++) { |
117 |
max_len += iov[i].iov_len; |
118 |
} |
119 |
|
120 |
packet = g_malloc(sizeof(NetPacket) + max_len);
|
121 |
packet->sender = sender; |
122 |
packet->sent_cb = sent_cb; |
123 |
packet->flags = flags; |
124 |
packet->size = 0;
|
125 |
|
126 |
for (i = 0; i < iovcnt; i++) { |
127 |
size_t len = iov[i].iov_len; |
128 |
|
129 |
memcpy(packet->data + packet->size, iov[i].iov_base, len); |
130 |
packet->size += len; |
131 |
} |
132 |
|
133 |
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); |
134 |
} |
135 |
|
136 |
static ssize_t qemu_net_queue_deliver(NetQueue *queue,
|
137 |
NetClientState *sender, |
138 |
unsigned flags,
|
139 |
const uint8_t *data,
|
140 |
size_t size) |
141 |
{ |
142 |
ssize_t ret = -1;
|
143 |
|
144 |
queue->delivering = 1;
|
145 |
ret = qemu_deliver_packet(sender, flags, data, size, queue->opaque); |
146 |
queue->delivering = 0;
|
147 |
|
148 |
return ret;
|
149 |
} |
150 |
|
151 |
static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
|
152 |
NetClientState *sender, |
153 |
unsigned flags,
|
154 |
const struct iovec *iov, |
155 |
int iovcnt)
|
156 |
{ |
157 |
ssize_t ret = -1;
|
158 |
|
159 |
queue->delivering = 1;
|
160 |
ret = qemu_deliver_packet_iov(sender, flags, iov, iovcnt, queue->opaque); |
161 |
queue->delivering = 0;
|
162 |
|
163 |
return ret;
|
164 |
} |
165 |
|
166 |
ssize_t qemu_net_queue_send(NetQueue *queue, |
167 |
NetClientState *sender, |
168 |
unsigned flags,
|
169 |
const uint8_t *data,
|
170 |
size_t size, |
171 |
NetPacketSent *sent_cb) |
172 |
{ |
173 |
ssize_t ret; |
174 |
|
175 |
if (queue->delivering || !qemu_can_send_packet(sender)) {
|
176 |
qemu_net_queue_append(queue, sender, flags, data, size, sent_cb); |
177 |
return 0; |
178 |
} |
179 |
|
180 |
ret = qemu_net_queue_deliver(queue, sender, flags, data, size); |
181 |
if (ret == 0) { |
182 |
qemu_net_queue_append(queue, sender, flags, data, size, sent_cb); |
183 |
return 0; |
184 |
} |
185 |
|
186 |
qemu_net_queue_flush(queue); |
187 |
|
188 |
return ret;
|
189 |
} |
190 |
|
191 |
ssize_t qemu_net_queue_send_iov(NetQueue *queue, |
192 |
NetClientState *sender, |
193 |
unsigned flags,
|
194 |
const struct iovec *iov, |
195 |
int iovcnt,
|
196 |
NetPacketSent *sent_cb) |
197 |
{ |
198 |
ssize_t ret; |
199 |
|
200 |
if (queue->delivering || !qemu_can_send_packet(sender)) {
|
201 |
qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); |
202 |
return 0; |
203 |
} |
204 |
|
205 |
ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt); |
206 |
if (ret == 0) { |
207 |
qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); |
208 |
return 0; |
209 |
} |
210 |
|
211 |
qemu_net_queue_flush(queue); |
212 |
|
213 |
return ret;
|
214 |
} |
215 |
|
216 |
void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
|
217 |
{ |
218 |
NetPacket *packet, *next; |
219 |
|
220 |
QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { |
221 |
if (packet->sender == from) {
|
222 |
QTAILQ_REMOVE(&queue->packets, packet, entry); |
223 |
g_free(packet); |
224 |
} |
225 |
} |
226 |
} |
227 |
|
228 |
bool qemu_net_queue_flush(NetQueue *queue)
|
229 |
{ |
230 |
while (!QTAILQ_EMPTY(&queue->packets)) {
|
231 |
NetPacket *packet; |
232 |
int ret;
|
233 |
|
234 |
packet = QTAILQ_FIRST(&queue->packets); |
235 |
QTAILQ_REMOVE(&queue->packets, packet, entry); |
236 |
|
237 |
ret = qemu_net_queue_deliver(queue, |
238 |
packet->sender, |
239 |
packet->flags, |
240 |
packet->data, |
241 |
packet->size); |
242 |
if (ret == 0) { |
243 |
QTAILQ_INSERT_HEAD(&queue->packets, packet, entry); |
244 |
return false; |
245 |
} |
246 |
|
247 |
if (packet->sent_cb) {
|
248 |
packet->sent_cb(packet->sender, ret); |
249 |
} |
250 |
|
251 |
g_free(packet); |
252 |
} |
253 |
return true; |
254 |
} |