Revision 460fec67 slirp/if.c
b/slirp/if.c | ||
---|---|---|
7 | 7 |
|
8 | 8 |
#include <slirp.h> |
9 | 9 |
|
10 |
int if_queued = 0; /* Number of packets queued so far */ |
|
11 |
|
|
12 |
struct mbuf if_fastq; /* fast queue (for interactive data) */ |
|
13 |
struct mbuf if_batchq; /* queue for non-interactive data */ |
|
14 |
struct mbuf *next_m; /* Pointer to next mbuf to output */ |
|
15 |
|
|
16 | 10 |
#define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm)) |
17 | 11 |
|
18 | 12 |
static void |
... | ... | |
32 | 26 |
} |
33 | 27 |
|
34 | 28 |
void |
35 |
if_init(void)
|
|
29 |
if_init(Slirp *slirp)
|
|
36 | 30 |
{ |
37 |
if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq;
|
|
38 |
if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq;
|
|
39 |
next_m = &if_batchq;
|
|
31 |
slirp->if_fastq.ifq_next = slirp->if_fastq.ifq_prev = &slirp->if_fastq;
|
|
32 |
slirp->if_batchq.ifq_next = slirp->if_batchq.ifq_prev = &slirp->if_batchq;
|
|
33 |
slirp->next_m = &slirp->if_batchq;
|
|
40 | 34 |
} |
41 | 35 |
|
42 | 36 |
/* |
... | ... | |
55 | 49 |
void |
56 | 50 |
if_output(struct socket *so, struct mbuf *ifm) |
57 | 51 |
{ |
52 |
Slirp *slirp = ifm->slirp; |
|
58 | 53 |
struct mbuf *ifq; |
59 | 54 |
int on_fastq = 1; |
60 | 55 |
|
... | ... | |
79 | 74 |
* We mustn't put this packet back on the fastq (or we'll send it out of order) |
80 | 75 |
* XXX add cache here? |
81 | 76 |
*/ |
82 |
for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) { |
|
77 |
for (ifq = slirp->if_batchq.ifq_prev; ifq != &slirp->if_batchq; |
|
78 |
ifq = ifq->ifq_prev) { |
|
83 | 79 |
if (so == ifq->ifq_so) { |
84 | 80 |
/* A match! */ |
85 | 81 |
ifm->ifq_so = so; |
... | ... | |
90 | 86 |
|
91 | 87 |
/* No match, check which queue to put it on */ |
92 | 88 |
if (so && (so->so_iptos & IPTOS_LOWDELAY)) { |
93 |
ifq = if_fastq.ifq_prev; |
|
89 |
ifq = slirp->if_fastq.ifq_prev;
|
|
94 | 90 |
on_fastq = 1; |
95 | 91 |
/* |
96 | 92 |
* Check if this packet is a part of the last |
... | ... | |
102 | 98 |
goto diddit; |
103 | 99 |
} |
104 | 100 |
} else |
105 |
ifq = if_batchq.ifq_prev; |
|
101 |
ifq = slirp->if_batchq.ifq_prev;
|
|
106 | 102 |
|
107 | 103 |
/* Create a new doubly linked list for this session */ |
108 | 104 |
ifm->ifq_so = so; |
... | ... | |
110 | 106 |
insque(ifm, ifq); |
111 | 107 |
|
112 | 108 |
diddit: |
113 |
++if_queued;
|
|
109 |
slirp->if_queued++;
|
|
114 | 110 |
|
115 | 111 |
if (so) { |
116 | 112 |
/* Update *_queued */ |
... | ... | |
130 | 126 |
remque(ifm->ifs_next); |
131 | 127 |
|
132 | 128 |
/* ...And insert in the new. That'll teach ya! */ |
133 |
insque(ifm->ifs_next, &if_batchq); |
|
129 |
insque(ifm->ifs_next, &slirp->if_batchq);
|
|
134 | 130 |
} |
135 | 131 |
} |
136 | 132 |
|
... | ... | |
138 | 134 |
/* |
139 | 135 |
* This prevents us from malloc()ing too many mbufs |
140 | 136 |
*/ |
141 |
if_start(); |
|
137 |
if_start(ifm->slirp);
|
|
142 | 138 |
#endif |
143 | 139 |
} |
144 | 140 |
|
... | ... | |
155 | 151 |
* to the first, etc. etc. |
156 | 152 |
*/ |
157 | 153 |
void |
158 |
if_start(void)
|
|
154 |
if_start(Slirp *slirp)
|
|
159 | 155 |
{ |
160 | 156 |
struct mbuf *ifm, *ifqt; |
161 | 157 |
|
162 | 158 |
DEBUG_CALL("if_start"); |
163 | 159 |
|
164 |
if (if_queued == 0) |
|
160 |
if (slirp->if_queued == 0)
|
|
165 | 161 |
return; /* Nothing to do */ |
166 | 162 |
|
167 | 163 |
again: |
... | ... | |
173 | 169 |
* See which queue to get next packet from |
174 | 170 |
* If there's something in the fastq, select it immediately |
175 | 171 |
*/ |
176 |
if (if_fastq.ifq_next != &if_fastq) {
|
|
177 |
ifm = if_fastq.ifq_next; |
|
172 |
if (slirp->if_fastq.ifq_next != &slirp->if_fastq) {
|
|
173 |
ifm = slirp->if_fastq.ifq_next;
|
|
178 | 174 |
} else { |
179 | 175 |
/* Nothing on fastq, see if next_m is valid */ |
180 |
if (next_m != &if_batchq)
|
|
181 |
ifm = next_m; |
|
176 |
if (slirp->next_m != &slirp->if_batchq)
|
|
177 |
ifm = slirp->next_m;
|
|
182 | 178 |
else |
183 |
ifm = if_batchq.ifq_next; |
|
179 |
ifm = slirp->if_batchq.ifq_next;
|
|
184 | 180 |
|
185 | 181 |
/* Set which packet to send on next iteration */ |
186 |
next_m = ifm->ifq_next; |
|
182 |
slirp->next_m = ifm->ifq_next;
|
|
187 | 183 |
} |
188 | 184 |
/* Remove it from the queue */ |
189 | 185 |
ifqt = ifm->ifq_prev; |
190 | 186 |
remque(ifm); |
191 |
--if_queued;
|
|
187 |
slirp->if_queued--;
|
|
192 | 188 |
|
193 | 189 |
/* If there are more packets for this session, re-queue them */ |
194 | 190 |
if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) { |
... | ... | |
204 | 200 |
} |
205 | 201 |
|
206 | 202 |
/* Encapsulate the packet for sending */ |
207 |
if_encap((uint8_t *)ifm->m_data, ifm->m_len); |
|
203 |
if_encap(slirp, (uint8_t *)ifm->m_data, ifm->m_len);
|
|
208 | 204 |
|
209 | 205 |
m_free(ifm); |
210 | 206 |
|
211 |
if (if_queued) |
|
207 |
if (slirp->if_queued)
|
|
212 | 208 |
goto again; |
213 | 209 |
} |
Also available in: Unified diff