Statistics
| Branch: | Revision:

root / slirp / if.c @ 1de7afc9

History | View | Annotate | Download (6.8 kB)

1
/*
2
 * Copyright (c) 1995 Danny Gasparovski.
3
 *
4
 * Please read the file COPYRIGHT for the
5
 * terms and conditions of the copyright.
6
 */
7

    
8
#include <slirp.h>
9
#include "qemu/timer.h"
10

    
11
static void
12
ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
13
{
14
        ifm->ifs_next = ifmhead->ifs_next;
15
        ifmhead->ifs_next = ifm;
16
        ifm->ifs_prev = ifmhead;
17
        ifm->ifs_next->ifs_prev = ifm;
18
}
19

    
20
static void
21
ifs_remque(struct mbuf *ifm)
22
{
23
        ifm->ifs_prev->ifs_next = ifm->ifs_next;
24
        ifm->ifs_next->ifs_prev = ifm->ifs_prev;
25
}
26

    
27
void
28
if_init(Slirp *slirp)
29
{
30
    slirp->if_fastq.ifq_next = slirp->if_fastq.ifq_prev = &slirp->if_fastq;
31
    slirp->if_batchq.ifq_next = slirp->if_batchq.ifq_prev = &slirp->if_batchq;
32
    slirp->next_m = &slirp->if_batchq;
33
}
34

    
35
/*
36
 * if_output: Queue packet into an output queue.
37
 * There are 2 output queue's, if_fastq and if_batchq.
38
 * Each output queue is a doubly linked list of double linked lists
39
 * of mbufs, each list belonging to one "session" (socket).  This
40
 * way, we can output packets fairly by sending one packet from each
41
 * session, instead of all the packets from one session, then all packets
42
 * from the next session, etc.  Packets on the if_fastq get absolute
43
 * priority, but if one session hogs the link, it gets "downgraded"
44
 * to the batchq until it runs out of packets, then it'll return
45
 * to the fastq (eg. if the user does an ls -alR in a telnet session,
46
 * it'll temporarily get downgraded to the batchq)
47
 */
48
void
49
if_output(struct socket *so, struct mbuf *ifm)
50
{
51
        Slirp *slirp = ifm->slirp;
52
        struct mbuf *ifq;
53
        int on_fastq = 1;
54

    
55
        DEBUG_CALL("if_output");
56
        DEBUG_ARG("so = %lx", (long)so);
57
        DEBUG_ARG("ifm = %lx", (long)ifm);
58

    
59
        /*
60
         * First remove the mbuf from m_usedlist,
61
         * since we're gonna use m_next and m_prev ourselves
62
         * XXX Shouldn't need this, gotta change dtom() etc.
63
         */
64
        if (ifm->m_flags & M_USEDLIST) {
65
                remque(ifm);
66
                ifm->m_flags &= ~M_USEDLIST;
67
        }
68

    
69
        /*
70
         * See if there's already a batchq list for this session.
71
         * This can include an interactive session, which should go on fastq,
72
         * but gets too greedy... hence it'll be downgraded from fastq to batchq.
73
         * We mustn't put this packet back on the fastq (or we'll send it out of order)
74
         * XXX add cache here?
75
         */
76
        for (ifq = slirp->if_batchq.ifq_prev; ifq != &slirp->if_batchq;
77
             ifq = ifq->ifq_prev) {
78
                if (so == ifq->ifq_so) {
79
                        /* A match! */
80
                        ifm->ifq_so = so;
81
                        ifs_insque(ifm, ifq->ifs_prev);
82
                        goto diddit;
83
                }
84
        }
85

    
86
        /* No match, check which queue to put it on */
87
        if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
88
                ifq = slirp->if_fastq.ifq_prev;
89
                on_fastq = 1;
90
                /*
91
                 * Check if this packet is a part of the last
92
                 * packet's session
93
                 */
94
                if (ifq->ifq_so == so) {
95
                        ifm->ifq_so = so;
96
                        ifs_insque(ifm, ifq->ifs_prev);
97
                        goto diddit;
98
                }
99
        } else {
100
                ifq = slirp->if_batchq.ifq_prev;
101
                /* Set next_m if the queue was empty so far */
102
                if (slirp->next_m == &slirp->if_batchq) {
103
                    slirp->next_m = ifm;
104
                }
105
        }
106

    
107
        /* Create a new doubly linked list for this session */
108
        ifm->ifq_so = so;
109
        ifs_init(ifm);
110
        insque(ifm, ifq);
111

    
112
diddit:
113
        if (so) {
114
                /* Update *_queued */
115
                so->so_queued++;
116
                so->so_nqueued++;
117
                /*
118
                 * Check if the interactive session should be downgraded to
119
                 * the batchq.  A session is downgraded if it has queued 6
120
                 * packets without pausing, and at least 3 of those packets
121
                 * have been sent over the link
122
                 * (XXX These are arbitrary numbers, probably not optimal..)
123
                 */
124
                if (on_fastq && ((so->so_nqueued >= 6) &&
125
                                 (so->so_nqueued - so->so_queued) >= 3)) {
126

    
127
                        /* Remove from current queue... */
128
                        remque(ifm->ifs_next);
129

    
130
                        /* ...And insert in the new.  That'll teach ya! */
131
                        insque(ifm->ifs_next, &slirp->if_batchq);
132
                }
133
        }
134

    
135
#ifndef FULL_BOLT
136
        /*
137
         * This prevents us from malloc()ing too many mbufs
138
         */
139
        if_start(ifm->slirp);
140
#endif
141
}
142

    
143
/*
144
 * Send a packet
145
 * We choose a packet based on it's position in the output queues;
146
 * If there are packets on the fastq, they are sent FIFO, before
147
 * everything else.  Otherwise we choose the first packet from the
148
 * batchq and send it.  the next packet chosen will be from the session
149
 * after this one, then the session after that one, and so on..  So,
150
 * for example, if there are 3 ftp session's fighting for bandwidth,
151
 * one packet will be sent from the first session, then one packet
152
 * from the second session, then one packet from the third, then back
153
 * to the first, etc. etc.
154
 */
155
void if_start(Slirp *slirp)
156
{
157
    uint64_t now = qemu_get_clock_ns(rt_clock);
158
    bool from_batchq, next_from_batchq;
159
    struct mbuf *ifm, *ifm_next, *ifqt;
160

    
161
    DEBUG_CALL("if_start");
162

    
163
    if (slirp->if_start_busy) {
164
        return;
165
    }
166
    slirp->if_start_busy = true;
167

    
168
    if (slirp->if_fastq.ifq_next != &slirp->if_fastq) {
169
        ifm_next = slirp->if_fastq.ifq_next;
170
        next_from_batchq = false;
171
    } else if (slirp->next_m != &slirp->if_batchq) {
172
        /* Nothing on fastq, pick up from batchq via next_m */
173
        ifm_next = slirp->next_m;
174
        next_from_batchq = true;
175
    } else {
176
        ifm_next = NULL;
177
    }
178

    
179
    while (ifm_next) {
180
        ifm = ifm_next;
181
        from_batchq = next_from_batchq;
182

    
183
        ifm_next = ifm->ifq_next;
184
        if (ifm_next == &slirp->if_fastq) {
185
            /* No more packets in fastq, switch to batchq */
186
            ifm_next = slirp->next_m;
187
            next_from_batchq = true;
188
        }
189
        if (ifm_next == &slirp->if_batchq) {
190
            /* end of batchq */
191
            ifm_next = NULL;
192
        }
193

    
194
        /* Try to send packet unless it already expired */
195
        if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
196
            /* Packet is delayed due to pending ARP resolution */
197
            continue;
198
        }
199

    
200
        if (ifm == slirp->next_m) {
201
            /* Set which packet to send on next iteration */
202
            slirp->next_m = ifm->ifq_next;
203
        }
204

    
205
        /* Remove it from the queue */
206
        ifqt = ifm->ifq_prev;
207
        remque(ifm);
208

    
209
        /* If there are more packets for this session, re-queue them */
210
        if (ifm->ifs_next != ifm) {
211
            struct mbuf *next = ifm->ifs_next;
212

    
213
            insque(next, ifqt);
214
            ifs_remque(ifm);
215

    
216
            if (!from_batchq) {
217
                /* Next packet in fastq is from the same session */
218
                ifm_next = next;
219
                next_from_batchq = false;
220
            } else if (slirp->next_m == &slirp->if_batchq) {
221
                /* Set next_m and ifm_next if the session packet is now the
222
                 * only one on batchq */
223
                slirp->next_m = ifm_next = next;
224
            }
225
        }
226

    
227
        /* Update so_queued */
228
        if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
229
            /* If there's no more queued, reset nqueued */
230
            ifm->ifq_so->so_nqueued = 0;
231
        }
232

    
233
        m_free(ifm);
234
    }
235

    
236
    slirp->if_start_busy = false;
237
}