1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*
3 * Copyright (c) 1995 Danny Gasparovski.
4 */
5
6 #include "slirp.h"
7
ifs_insque(struct mbuf * ifm,struct mbuf * ifmhead)8 static void ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
9 {
10 ifm->ifs_next = ifmhead->ifs_next;
11 ifmhead->ifs_next = ifm;
12 ifm->ifs_prev = ifmhead;
13 ifm->ifs_next->ifs_prev = ifm;
14 }
15
ifs_remque(struct mbuf * ifm)16 static void ifs_remque(struct mbuf *ifm)
17 {
18 ifm->ifs_prev->ifs_next = ifm->ifs_next;
19 ifm->ifs_next->ifs_prev = ifm->ifs_prev;
20 }
21
if_init(Slirp * slirp)22 void if_init(Slirp *slirp)
23 {
24 slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
25 slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
26 }
27
28 /*
29 * if_output: Queue packet into an output queue.
30 * There are 2 output queue's, if_fastq and if_batchq.
31 * Each output queue is a doubly linked list of double linked lists
32 * of mbufs, each list belonging to one "session" (socket). This
33 * way, we can output packets fairly by sending one packet from each
34 * session, instead of all the packets from one session, then all packets
35 * from the next session, etc. Packets on the if_fastq get absolute
36 * priority, but if one session hogs the link, it gets "downgraded"
37 * to the batchq until it runs out of packets, then it'll return
38 * to the fastq (eg. if the user does an ls -alR in a telnet session,
39 * it'll temporarily get downgraded to the batchq)
40 */
if_output(struct socket * so,struct mbuf * ifm)41 void if_output(struct socket *so, struct mbuf *ifm)
42 {
43 Slirp *slirp = ifm->slirp;
44 M_DUP_DEBUG(slirp, ifm, 0, 0);
45
46 struct mbuf *ifq;
47 int on_fastq = 1;
48
49 DEBUG_CALL("if_output");
50 DEBUG_ARG("so = %p", so);
51 DEBUG_ARG("ifm = %p", ifm);
52
53 /*
54 * First remove the mbuf from m_usedlist,
55 * since we're gonna use m_next and m_prev ourselves
56 * XXX Shouldn't need this, gotta change dtom() etc.
57 */
58 if (ifm->m_flags & M_USEDLIST) {
59 remque(ifm);
60 ifm->m_flags &= ~M_USEDLIST;
61 }
62
63 /*
64 * See if there's already a batchq list for this session.
65 * This can include an interactive session, which should go on fastq,
66 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
67 * We mustn't put this packet back on the fastq (or we'll send it out of
68 * order)
69 * XXX add cache here?
70 */
71 if (so) {
72 for (ifq = (struct mbuf *)slirp->if_batchq.qh_rlink;
73 (struct quehead *)ifq != &slirp->if_batchq; ifq = ifq->ifq_prev) {
74 if (so == ifq->ifq_so) {
75 /* A match! */
76 ifm->ifq_so = so;
77 ifs_insque(ifm, ifq->ifs_prev);
78 goto diddit;
79 }
80 }
81 }
82
83 /* No match, check which queue to put it on */
84 if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
85 ifq = (struct mbuf *)slirp->if_fastq.qh_rlink;
86 on_fastq = 1;
87 /*
88 * Check if this packet is a part of the last
89 * packet's session
90 */
91 if (ifq->ifq_so == so) {
92 ifm->ifq_so = so;
93 ifs_insque(ifm, ifq->ifs_prev);
94 goto diddit;
95 }
96 } else {
97 ifq = (struct mbuf *)slirp->if_batchq.qh_rlink;
98 }
99
100 /* Create a new doubly linked list for this session */
101 ifm->ifq_so = so;
102 ifs_init(ifm);
103 insque(ifm, ifq);
104
105 diddit:
106 if (so) {
107 /* Update *_queued */
108 so->so_queued++;
109 so->so_nqueued++;
110 /*
111 * Check if the interactive session should be downgraded to
112 * the batchq. A session is downgraded if it has queued 6
113 * packets without pausing, and at least 3 of those packets
114 * have been sent over the link
115 * (XXX These are arbitrary numbers, probably not optimal..)
116 */
117 if (on_fastq &&
118 ((so->so_nqueued >= 6) && (so->so_nqueued - so->so_queued) >= 3)) {
119 /* Remove from current queue... */
120 remque(ifm->ifs_next);
121
122 /* ...And insert in the new. That'll teach ya! */
123 insque(ifm->ifs_next, &slirp->if_batchq);
124 }
125 }
126
127 /*
128 * This prevents us from malloc()ing too many mbufs
129 */
130 if_start(ifm->slirp);
131 }
132
133 /*
134 * Send one packet from each session.
135 * If there are packets on the fastq, they are sent FIFO, before
136 * everything else. Then we choose the first packet from each
137 * batchq session (socket) and send it.
138 * For example, if there are 3 ftp sessions fighting for bandwidth,
139 * one packet will be sent from the first session, then one packet
140 * from the second session, then one packet from the third.
141 */
if_start(Slirp * slirp)142 void if_start(Slirp *slirp)
143 {
144 uint64_t now = slirp->cb->clock_get_ns(slirp->opaque);
145 bool from_batchq = false;
146 struct mbuf *ifm, *ifm_next, *ifqt;
147
148 DEBUG_VERBOSE_CALL("if_start");
149
150 if (slirp->if_start_busy) {
151 return;
152 }
153 slirp->if_start_busy = true;
154
155 struct mbuf *batch_head = NULL;
156 if (slirp->if_batchq.qh_link != &slirp->if_batchq) {
157 batch_head = (struct mbuf *)slirp->if_batchq.qh_link;
158 }
159
160 if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
161 ifm_next = (struct mbuf *)slirp->if_fastq.qh_link;
162 } else if (batch_head) {
163 /* Nothing on fastq, pick up from batchq */
164 ifm_next = batch_head;
165 from_batchq = true;
166 } else {
167 ifm_next = NULL;
168 }
169
170 while (ifm_next) {
171 ifm = ifm_next;
172
173 ifm_next = ifm->ifq_next;
174 if ((struct quehead *)ifm_next == &slirp->if_fastq) {
175 /* No more packets in fastq, switch to batchq */
176 ifm_next = batch_head;
177 from_batchq = true;
178 }
179 if ((struct quehead *)ifm_next == &slirp->if_batchq) {
180 /* end of batchq */
181 ifm_next = NULL;
182 }
183
184 /* Try to send packet unless it already expired */
185 if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
186 /* Packet is delayed due to pending ARP or NDP resolution */
187 continue;
188 }
189
190 /* Remove it from the queue */
191 ifqt = ifm->ifq_prev;
192 remque(ifm);
193
194 /* If there are more packets for this session, re-queue them */
195 if (ifm->ifs_next != ifm) {
196 struct mbuf *next = ifm->ifs_next;
197
198 insque(next, ifqt);
199 ifs_remque(ifm);
200 if (!from_batchq) {
201 ifm_next = next;
202 }
203 }
204
205 /* Update so_queued */
206 if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
207 /* If there's no more queued, reset nqueued */
208 ifm->ifq_so->so_nqueued = 0;
209 }
210
211 m_free(ifm);
212 }
213
214 slirp->if_start_busy = false;
215 }
216