1 /*-
2 * Copyright (c) 2016-2018 Netflix, Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 */
26 #include <sys/cdefs.h>
27 #include "opt_inet.h"
28 #include "opt_inet6.h"
29 #include "opt_rss.h"
30
31 /**
32 * Some notes about usage.
33 *
34 * The tcp_hpts system is designed to provide a high precision timer
35 * system for tcp. Its main purpose is to provide a mechanism for
36 * pacing packets out onto the wire. It can be used in two ways
37 * by a given TCP stack (and those two methods can be used simultaneously).
38 *
39 * First, and probably the main thing its used by Rack and BBR, it can
40 * be used to call tcp_output() of a transport stack at some time in the future.
41 * The normal way this is done is that tcp_output() of the stack schedules
42 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
43 * slot is the time from now that the stack wants to be called but it
44 * must be converted to tcp_hpts's notion of slot. This is done with
45 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
46 * call from the tcp_output() routine might look like:
47 *
48 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
49 *
50 * The above would schedule tcp_output() to be called in 550 useconds.
51 * Note that if using this mechanism the stack will want to add near
52 * its top a check to prevent unwanted calls (from user land or the
53 * arrival of incoming ack's). So it would add something like:
54 *
55 * if (tcp_in_hpts(inp))
56 * return;
57 *
58 * to prevent output processing until the time alotted has gone by.
59 * Of course this is a bare bones example and the stack will probably
60 * have more consideration then just the above.
61 *
62 * In order to run input queued segments from the HPTS context the
63 * tcp stack must define an input function for
64 * tfb_do_queued_segments(). This function understands
65 * how to dequeue a array of packets that were input and
66 * knows how to call the correct processing routine.
67 *
68 * Locking in this is important as well so most likely the
69 * stack will need to define the tfb_do_segment_nounlock()
70 * splitting tfb_do_segment() into two parts. The main processing
71 * part that does not unlock the INP and returns a value of 1 or 0.
72 * It returns 0 if all is well and the lock was not released. It
73 * returns 1 if we had to destroy the TCB (a reset received etc).
74 * The remains of tfb_do_segment() then become just a simple call
75 * to the tfb_do_segment_nounlock() function and check the return
76 * code and possibly unlock.
77 *
78 * The stack must also set the flag on the INP that it supports this
79 * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
80 * this flag as well and will queue packets when it is set.
81 * There are other flags as well INP_MBUF_QUEUE_READY and
82 * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
83 * that we are in the pacer for output so there is no
84 * need to wake up the hpts system to get immediate
85 * input. The second tells the LRO code that its okay
86 * if a SACK arrives you can still defer input and let
87 * the current hpts timer run (this is usually set when
88 * a rack timer is up so we know SACK's are happening
89 * on the connection already and don't want to wakeup yet).
90 *
91 * There is a common functions within the rack_bbr_common code
92 * version i.e. ctf_do_queued_segments(). This function
93 * knows how to take the input queue of packets from tp->t_inqueue
94 * and process them digging out all the arguments, calling any bpf tap and
95 * calling into tfb_do_segment_nounlock(). The common
96 * function (ctf_do_queued_segments()) requires that
97 * you have defined the tfb_do_segment_nounlock() as
98 * described above.
99 */
100
101 #include <sys/param.h>
102 #include <sys/bus.h>
103 #include <sys/interrupt.h>
104 #include <sys/module.h>
105 #include <sys/kernel.h>
106 #include <sys/hhook.h>
107 #include <sys/malloc.h>
108 #include <sys/mbuf.h>
109 #include <sys/proc.h> /* for proc0 declaration */
110 #include <sys/socket.h>
111 #include <sys/socketvar.h>
112 #include <sys/sysctl.h>
113 #include <sys/systm.h>
114 #include <sys/refcount.h>
115 #include <sys/sched.h>
116 #include <sys/queue.h>
117 #include <sys/smp.h>
118 #include <sys/counter.h>
119 #include <sys/time.h>
120 #include <sys/kthread.h>
121 #include <sys/kern_prefetch.h>
122
123 #include <vm/uma.h>
124 #include <vm/vm.h>
125
126 #include <net/route.h>
127 #include <net/vnet.h>
128
129 #ifdef RSS
130 #include <net/netisr.h>
131 #include <net/rss_config.h>
132 #endif
133
134 #define TCPSTATES /* for logging */
135
136 #include <netinet/in.h>
137 #include <netinet/in_kdtrace.h>
138 #include <netinet/in_pcb.h>
139 #include <netinet/ip.h>
140 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
141 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
142 #include <netinet/ip_var.h>
143 #include <netinet/ip6.h>
144 #include <netinet6/in6_pcb.h>
145 #include <netinet6/ip6_var.h>
146 #include <netinet/tcp.h>
147 #include <netinet/tcp_fsm.h>
148 #include <netinet/tcp_seq.h>
149 #include <netinet/tcp_timer.h>
150 #include <netinet/tcp_var.h>
151 #include <netinet/tcpip.h>
152 #include <netinet/cc/cc.h>
153 #include <netinet/tcp_hpts.h>
154 #include <netinet/tcp_log_buf.h>
155
156 #ifdef tcp_offload
157 #include <netinet/tcp_offload.h>
158 #endif
159
160 /*
161 * The hpts uses a 102400 wheel. The wheel
162 * defines the time in 10 usec increments (102400 x 10).
163 * This gives a range of 10usec - 1024ms to place
164 * an entry within. If the user requests more than
165 * 1.024 second, a remaineder is attached and the hpts
166 * when seeing the remainder will re-insert the
167 * inpcb forward in time from where it is until
168 * the remainder is zero.
169 */
170
171 #define NUM_OF_HPTSI_SLOTS 102400
172
173 /* Each hpts has its own p_mtx which is used for locking */
174 #define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
175 #define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
176 #define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
177 struct tcp_hpts_entry {
178 /* Cache line 0x00 */
179 struct mtx p_mtx; /* Mutex for hpts */
180 struct timeval p_mysleep; /* Our min sleep time */
181 uint64_t syscall_cnt;
182 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
183 uint16_t p_hpts_active; /* Flag that says hpts is awake */
184 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
185 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
186 uint32_t p_runningslot; /* Current tick we are at if we are running */
187 uint32_t p_prev_slot; /* Previous slot we were on */
188 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
189 uint32_t p_nxt_slot; /* The next slot outside the current range of
190 * slots that the hpts is running on. */
191 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
192 uint32_t p_lasttick; /* Last tick before the current one */
193 uint8_t p_direct_wake :1, /* boolean */
194 p_on_min_sleep:1, /* boolean */
195 p_hpts_wake_scheduled:1, /* boolean */
196 hit_callout_thresh:1,
197 p_avail:4;
198 uint8_t p_fill[3]; /* Fill to 32 bits */
199 /* Cache line 0x40 */
200 struct hptsh {
201 TAILQ_HEAD(, tcpcb) head;
202 uint32_t count;
203 uint32_t gencnt;
204 } *p_hptss; /* Hptsi wheel */
205 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
206 * of 255ms */
207 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
208 uint32_t saved_lasttick; /* for logging */
209 uint32_t saved_curtick; /* for logging */
210 uint32_t saved_curslot; /* for logging */
211 uint32_t saved_prev_slot; /* for logging */
212 uint32_t p_delayed_by; /* How much were we delayed by */
213 /* Cache line 0x80 */
214 struct sysctl_ctx_list hpts_ctx;
215 struct sysctl_oid *hpts_root;
216 struct intr_event *ie;
217 void *ie_cookie;
218 uint16_t p_num; /* The hpts number one per cpu */
219 uint16_t p_cpu; /* The hpts CPU */
220 /* There is extra space in here */
221 /* Cache line 0x100 */
222 struct callout co __aligned(CACHE_LINE_SIZE);
223 } __aligned(CACHE_LINE_SIZE);
224
225 static struct tcp_hptsi {
226 struct cpu_group **grps;
227 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
228 uint32_t *cts_last_ran;
229 uint32_t grp_cnt;
230 uint32_t rp_num_hptss; /* Number of hpts threads */
231 } tcp_pace;
232
233 static MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
234 #ifdef RSS
235 static int tcp_bind_threads = 1;
236 #else
237 static int tcp_bind_threads = 2;
238 #endif
239 static int tcp_use_irq_cpu = 0;
240 static int hpts_does_tp_logging = 0;
241
242 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
243 static void tcp_hpts_thread(void *ctx);
244
245 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
246 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
247 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
248 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
249
250
251 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
252 "TCP Hpts controls");
253 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
254 "TCP Hpts statistics");
255
256 #define timersub(tvp, uvp, vvp) \
257 do { \
258 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
259 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
260 if ((vvp)->tv_usec < 0) { \
261 (vvp)->tv_sec--; \
262 (vvp)->tv_usec += 1000000; \
263 } \
264 } while (0)
265
266 static int32_t tcp_hpts_precision = 120;
267
268 static struct hpts_domain_info {
269 int count;
270 int cpu[MAXCPU];
271 } hpts_domains[MAXMEMDOM];
272
273 counter_u64_t hpts_hopelessly_behind;
274
275 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
276 &hpts_hopelessly_behind,
277 "Number of times hpts could not catch up and was behind hopelessly");
278
279 counter_u64_t hpts_loops;
280
281 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
282 &hpts_loops, "Number of times hpts had to loop to catch up");
283
284 counter_u64_t back_tosleep;
285
286 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
287 &back_tosleep, "Number of times hpts found no tcbs");
288
289 counter_u64_t combined_wheel_wrap;
290
291 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
292 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
293
294 counter_u64_t wheel_wrap;
295
296 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
297 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
298
299 counter_u64_t hpts_direct_call;
300 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
301 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
302
303 counter_u64_t hpts_wake_timeout;
304
305 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
306 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
307
308 counter_u64_t hpts_direct_awakening;
309
310 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
311 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
312
313 counter_u64_t hpts_back_tosleep;
314
315 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
316 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
317
318 counter_u64_t cpu_uses_flowid;
319 counter_u64_t cpu_uses_random;
320
321 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
322 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
323 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
324 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
325
326 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
327 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
328 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
329 &tcp_bind_threads, 2,
330 "Thread Binding tunable");
331 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
332 &tcp_use_irq_cpu, 0,
333 "Use of irq CPU tunable");
334 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
335 &tcp_hpts_precision, 120,
336 "Value for PRE() precision of callout");
337 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
338 &conn_cnt_thresh, 0,
339 "How many connections (below) make us use the callout based mechanism");
340 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
341 &hpts_does_tp_logging, 0,
342 "Do we add to any tp that has logging on pacer logs");
343 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
344 &dynamic_min_sleep, 250,
345 "What is the dynamic minsleep value?");
346 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
347 &dynamic_max_sleep, 5000,
348 "What is the dynamic maxsleep value?");
349
350 static int32_t max_pacer_loops = 10;
351 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
352 &max_pacer_loops, 10,
353 "What is the maximum number of times the pacer will loop trying to catch up");
354
355 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
356
357 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
358
359 static int
sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)360 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
361 {
362 int error;
363 uint32_t new;
364
365 new = hpts_sleep_max;
366 error = sysctl_handle_int(oidp, &new, 0, req);
367 if (error == 0 && req->newptr) {
368 if ((new < (dynamic_min_sleep/HPTS_TICKS_PER_SLOT)) ||
369 (new > HPTS_MAX_SLEEP_ALLOWED))
370 error = EINVAL;
371 else
372 hpts_sleep_max = new;
373 }
374 return (error);
375 }
376
377 static int
sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)378 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
379 {
380 int error;
381 uint32_t new;
382
383 new = tcp_min_hptsi_time;
384 error = sysctl_handle_int(oidp, &new, 0, req);
385 if (error == 0 && req->newptr) {
386 if (new < LOWEST_SLEEP_ALLOWED)
387 error = EINVAL;
388 else
389 tcp_min_hptsi_time = new;
390 }
391 return (error);
392 }
393
394 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
395 CTLTYPE_UINT | CTLFLAG_RW,
396 &hpts_sleep_max, 0,
397 &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
398 "Maximum time hpts will sleep in slots");
399
400 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
401 CTLTYPE_UINT | CTLFLAG_RW,
402 &tcp_min_hptsi_time, 0,
403 &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
404 "The minimum time the hpts must sleep before processing more slots");
405
406 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
407 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
408 static int tcp_hpts_no_wake_over_thresh = 1;
409
410 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
411 &ticks_indicate_more_sleep, 0,
412 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
413 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
414 &ticks_indicate_less_sleep, 0,
415 "If we process this many or more on a timeout, we need less sleep on the next callout");
416 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
417 &tcp_hpts_no_wake_over_thresh, 0,
418 "When we are over the threshold on the pacer do we prohibit wakeups?");
419
420 static uint16_t
hpts_random_cpu(void)421 hpts_random_cpu(void)
422 {
423 uint16_t cpuid;
424 uint32_t ran;
425
426 ran = arc4random();
427 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
428 return (cpuid);
429 }
430
431 static void
tcp_hpts_log(struct tcp_hpts_entry * hpts,struct tcpcb * tp,struct timeval * tv,int slots_to_run,int idx,int from_callout)432 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
433 int slots_to_run, int idx, int from_callout)
434 {
435 union tcp_log_stackspecific log;
436 /*
437 * Unused logs are
438 * 64 bit - delRate, rttProp, bw_inuse
439 * 16 bit - cwnd_gain
440 * 8 bit - bbr_state, bbr_substate, inhpts;
441 */
442 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
443 log.u_bbr.flex1 = hpts->p_nxt_slot;
444 log.u_bbr.flex2 = hpts->p_cur_slot;
445 log.u_bbr.flex3 = hpts->p_prev_slot;
446 log.u_bbr.flex4 = idx;
447 log.u_bbr.flex5 = hpts->p_curtick;
448 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
449 log.u_bbr.flex7 = hpts->p_cpu;
450 log.u_bbr.flex8 = (uint8_t)from_callout;
451 log.u_bbr.inflight = slots_to_run;
452 log.u_bbr.applimited = hpts->overidden_sleep;
453 log.u_bbr.delivered = hpts->saved_curtick;
454 log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
455 log.u_bbr.epoch = hpts->saved_curslot;
456 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
457 log.u_bbr.pkts_out = hpts->p_delayed_by;
458 log.u_bbr.lost = hpts->p_hpts_sleep_time;
459 log.u_bbr.pacing_gain = hpts->p_cpu;
460 log.u_bbr.pkt_epoch = hpts->p_runningslot;
461 log.u_bbr.use_lt_bw = 1;
462 TCP_LOG_EVENTP(tp, NULL,
463 &tptosocket(tp)->so_rcv,
464 &tptosocket(tp)->so_snd,
465 BBR_LOG_HPTSDIAG, 0,
466 0, &log, false, tv);
467 }
468
469 static void
tcp_wakehpts(struct tcp_hpts_entry * hpts)470 tcp_wakehpts(struct tcp_hpts_entry *hpts)
471 {
472 HPTS_MTX_ASSERT(hpts);
473
474 if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
475 hpts->p_direct_wake = 0;
476 return;
477 }
478 if (hpts->p_hpts_wake_scheduled == 0) {
479 hpts->p_hpts_wake_scheduled = 1;
480 swi_sched(hpts->ie_cookie, 0);
481 }
482 }
483
484 static void
hpts_timeout_swi(void * arg)485 hpts_timeout_swi(void *arg)
486 {
487 struct tcp_hpts_entry *hpts;
488
489 hpts = (struct tcp_hpts_entry *)arg;
490 swi_sched(hpts->ie_cookie, 0);
491 }
492
493 static void
tcp_hpts_insert_internal(struct tcpcb * tp,struct tcp_hpts_entry * hpts)494 tcp_hpts_insert_internal(struct tcpcb *tp, struct tcp_hpts_entry *hpts)
495 {
496 struct inpcb *inp = tptoinpcb(tp);
497 struct hptsh *hptsh;
498
499 INP_WLOCK_ASSERT(inp);
500 HPTS_MTX_ASSERT(hpts);
501 MPASS(hpts->p_cpu == tp->t_hpts_cpu);
502 MPASS(!(inp->inp_flags & INP_DROPPED));
503
504 hptsh = &hpts->p_hptss[tp->t_hpts_slot];
505
506 if (tp->t_in_hpts == IHPTS_NONE) {
507 tp->t_in_hpts = IHPTS_ONQUEUE;
508 in_pcbref(inp);
509 } else if (tp->t_in_hpts == IHPTS_MOVING) {
510 tp->t_in_hpts = IHPTS_ONQUEUE;
511 } else
512 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
513 tp->t_hpts_gencnt = hptsh->gencnt;
514
515 TAILQ_INSERT_TAIL(&hptsh->head, tp, t_hpts);
516 hptsh->count++;
517 hpts->p_on_queue_cnt++;
518 }
519
520 static struct tcp_hpts_entry *
tcp_hpts_lock(struct tcpcb * tp)521 tcp_hpts_lock(struct tcpcb *tp)
522 {
523 struct tcp_hpts_entry *hpts;
524
525 INP_LOCK_ASSERT(tptoinpcb(tp));
526
527 hpts = tcp_pace.rp_ent[tp->t_hpts_cpu];
528 HPTS_LOCK(hpts);
529
530 return (hpts);
531 }
532
533 static void
tcp_hpts_release(struct tcpcb * tp)534 tcp_hpts_release(struct tcpcb *tp)
535 {
536 bool released __diagused;
537
538 tp->t_in_hpts = IHPTS_NONE;
539 released = in_pcbrele_wlocked(tptoinpcb(tp));
540 MPASS(released == false);
541 }
542
543 /*
544 * Initialize tcpcb to get ready for use with HPTS. We will know which CPU
545 * is preferred on the first incoming packet. Before that avoid crowding
546 * a single CPU with newborn connections and use a random one.
547 * This initialization is normally called on a newborn tcpcb, but potentially
548 * can be called once again if stack is switched. In that case we inherit CPU
549 * that the previous stack has set, be it random or not. In extreme cases,
550 * e.g. syzkaller fuzzing, a tcpcb can already be in HPTS in IHPTS_MOVING state
551 * and has never received a first packet.
552 */
553 void
tcp_hpts_init(struct tcpcb * tp)554 tcp_hpts_init(struct tcpcb *tp)
555 {
556
557 if (__predict_true(tp->t_hpts_cpu == HPTS_CPU_NONE)) {
558 tp->t_hpts_cpu = hpts_random_cpu();
559 MPASS(!(tp->t_flags2 & TF2_HPTS_CPU_SET));
560 }
561 }
562
563 /*
564 * Called normally with the INP_LOCKED but it
565 * does not matter, the hpts lock is the key
566 * but the lock order allows us to hold the
567 * INP lock and then get the hpts lock.
568 */
569 void
tcp_hpts_remove(struct tcpcb * tp)570 tcp_hpts_remove(struct tcpcb *tp)
571 {
572 struct tcp_hpts_entry *hpts;
573 struct hptsh *hptsh;
574
575 INP_WLOCK_ASSERT(tptoinpcb(tp));
576
577 hpts = tcp_hpts_lock(tp);
578 if (tp->t_in_hpts == IHPTS_ONQUEUE) {
579 hptsh = &hpts->p_hptss[tp->t_hpts_slot];
580 tp->t_hpts_request = 0;
581 if (__predict_true(tp->t_hpts_gencnt == hptsh->gencnt)) {
582 TAILQ_REMOVE(&hptsh->head, tp, t_hpts);
583 MPASS(hptsh->count > 0);
584 hptsh->count--;
585 MPASS(hpts->p_on_queue_cnt > 0);
586 hpts->p_on_queue_cnt--;
587 tcp_hpts_release(tp);
588 } else {
589 /*
590 * tcp_hptsi() now owns the TAILQ head of this inp.
591 * Can't TAILQ_REMOVE, just mark it.
592 */
593 #ifdef INVARIANTS
594 struct tcpcb *tmp;
595
596 TAILQ_FOREACH(tmp, &hptsh->head, t_hpts)
597 MPASS(tmp != tp);
598 #endif
599 tp->t_in_hpts = IHPTS_MOVING;
600 tp->t_hpts_slot = -1;
601 }
602 } else if (tp->t_in_hpts == IHPTS_MOVING) {
603 /*
604 * Handle a special race condition:
605 * tcp_hptsi() moves inpcb to detached tailq
606 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
607 * tcp_hpts_insert() sets slot to a meaningful value
608 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
609 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
610 */
611 tp->t_hpts_slot = -1;
612 }
613 HPTS_UNLOCK(hpts);
614 }
615
616 static inline int
hpts_slot(uint32_t wheel_slot,uint32_t plus)617 hpts_slot(uint32_t wheel_slot, uint32_t plus)
618 {
619 /*
620 * Given a slot on the wheel, what slot
621 * is that plus ticks out?
622 */
623 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
624 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
625 }
626
627 static inline int
tick_to_wheel(uint32_t cts_in_wticks)628 tick_to_wheel(uint32_t cts_in_wticks)
629 {
630 /*
631 * Given a timestamp in ticks (so by
632 * default to get it to a real time one
633 * would multiply by 10.. i.e the number
634 * of ticks in a slot) map it to our limited
635 * space wheel.
636 */
637 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
638 }
639
640 static inline int
hpts_slots_diff(int prev_slot,int slot_now)641 hpts_slots_diff(int prev_slot, int slot_now)
642 {
643 /*
644 * Given two slots that are someplace
645 * on our wheel. How far are they apart?
646 */
647 if (slot_now > prev_slot)
648 return (slot_now - prev_slot);
649 else if (slot_now == prev_slot)
650 /*
651 * Special case, same means we can go all of our
652 * wheel less one slot.
653 */
654 return (NUM_OF_HPTSI_SLOTS - 1);
655 else
656 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
657 }
658
659 /*
660 * Given a slot on the wheel that is the current time
661 * mapped to the wheel (wheel_slot), what is the maximum
662 * distance forward that can be obtained without
663 * wrapping past either prev_slot or running_slot
664 * depending on the htps state? Also if passed
665 * a uint32_t *, fill it with the slot location.
666 *
667 * Note if you do not give this function the current
668 * time (that you think it is) mapped to the wheel slot
669 * then the results will not be what you expect and
670 * could lead to invalid inserts.
671 */
672 static inline int32_t
max_slots_available(struct tcp_hpts_entry * hpts,uint32_t wheel_slot,uint32_t * target_slot)673 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
674 {
675 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
676
677 if ((hpts->p_hpts_active == 1) &&
678 (hpts->p_wheel_complete == 0)) {
679 end_slot = hpts->p_runningslot;
680 /* Back up one tick */
681 if (end_slot == 0)
682 end_slot = NUM_OF_HPTSI_SLOTS - 1;
683 else
684 end_slot--;
685 if (target_slot)
686 *target_slot = end_slot;
687 } else {
688 /*
689 * For the case where we are
690 * not active, or we have
691 * completed the pass over
692 * the wheel, we can use the
693 * prev tick and subtract one from it. This puts us
694 * as far out as possible on the wheel.
695 */
696 end_slot = hpts->p_prev_slot;
697 if (end_slot == 0)
698 end_slot = NUM_OF_HPTSI_SLOTS - 1;
699 else
700 end_slot--;
701 if (target_slot)
702 *target_slot = end_slot;
703 /*
704 * Now we have close to the full wheel left minus the
705 * time it has been since the pacer went to sleep. Note
706 * that wheel_tick, passed in, should be the current time
707 * from the perspective of the caller, mapped to the wheel.
708 */
709 if (hpts->p_prev_slot != wheel_slot)
710 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
711 else
712 dis_to_travel = 1;
713 /*
714 * dis_to_travel in this case is the space from when the
715 * pacer stopped (p_prev_slot) and where our wheel_slot
716 * is now. To know how many slots we can put it in we
717 * subtract from the wheel size. We would not want
718 * to place something after p_prev_slot or it will
719 * get ran too soon.
720 */
721 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
722 }
723 /*
724 * So how many slots are open between p_runningslot -> p_cur_slot
725 * that is what is currently un-available for insertion. Special
726 * case when we are at the last slot, this gets 1, so that
727 * the answer to how many slots are available is all but 1.
728 */
729 if (hpts->p_runningslot == hpts->p_cur_slot)
730 dis_to_travel = 1;
731 else
732 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
733 /*
734 * How long has the pacer been running?
735 */
736 if (hpts->p_cur_slot != wheel_slot) {
737 /* The pacer is a bit late */
738 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
739 } else {
740 /* The pacer is right on time, now == pacers start time */
741 pacer_to_now = 0;
742 }
743 /*
744 * To get the number left we can insert into we simply
745 * subtract the distance the pacer has to run from how
746 * many slots there are.
747 */
748 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
749 /*
750 * Now how many of those we will eat due to the pacer's
751 * time (p_cur_slot) of start being behind the
752 * real time (wheel_slot)?
753 */
754 if (avail_on_wheel <= pacer_to_now) {
755 /*
756 * Wheel wrap, we can't fit on the wheel, that
757 * is unusual the system must be way overloaded!
758 * Insert into the assured slot, and return special
759 * "0".
760 */
761 counter_u64_add(combined_wheel_wrap, 1);
762 if (target_slot)
763 *target_slot = hpts->p_nxt_slot;
764 return (0);
765 } else {
766 /*
767 * We know how many slots are open
768 * on the wheel (the reverse of what
769 * is left to run. Take away the time
770 * the pacer started to now (wheel_slot)
771 * and that tells you how many slots are
772 * open that can be inserted into that won't
773 * be touched by the pacer until later.
774 */
775 return (avail_on_wheel - pacer_to_now);
776 }
777 }
778
779
780 #ifdef INVARIANTS
781 static void
check_if_slot_would_be_wrong(struct tcp_hpts_entry * hpts,struct tcpcb * tp,uint32_t hptsslot,int line)782 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct tcpcb *tp,
783 uint32_t hptsslot, int line)
784 {
785 /*
786 * Sanity checks for the pacer with invariants
787 * on insert.
788 */
789 KASSERT(hptsslot < NUM_OF_HPTSI_SLOTS,
790 ("hpts:%p tp:%p slot:%d > max", hpts, tp, hptsslot));
791 if ((hpts->p_hpts_active) &&
792 (hpts->p_wheel_complete == 0)) {
793 /*
794 * If the pacer is processing a arc
795 * of the wheel, we need to make
796 * sure we are not inserting within
797 * that arc.
798 */
799 int distance, yet_to_run;
800
801 distance = hpts_slots_diff(hpts->p_runningslot, hptsslot);
802 if (hpts->p_runningslot != hpts->p_cur_slot)
803 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
804 else
805 yet_to_run = 0; /* processing last slot */
806 KASSERT(yet_to_run <= distance, ("hpts:%p tp:%p slot:%d "
807 "distance:%d yet_to_run:%d rs:%d cs:%d", hpts, tp,
808 hptsslot, distance, yet_to_run, hpts->p_runningslot,
809 hpts->p_cur_slot));
810 }
811 }
812 #endif
813
814 uint32_t
tcp_hpts_insert_diag(struct tcpcb * tp,uint32_t slot,int32_t line,struct hpts_diag * diag)815 tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_diag *diag)
816 {
817 struct tcp_hpts_entry *hpts;
818 struct timeval tv;
819 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
820 int32_t wheel_slot, maxslots;
821 bool need_wakeup = false;
822
823 INP_WLOCK_ASSERT(tptoinpcb(tp));
824 MPASS(!(tptoinpcb(tp)->inp_flags & INP_DROPPED));
825 MPASS(!(tp->t_in_hpts == IHPTS_ONQUEUE));
826
827 /*
828 * We now return the next-slot the hpts will be on, beyond its
829 * current run (if up) or where it was when it stopped if it is
830 * sleeping.
831 */
832 hpts = tcp_hpts_lock(tp);
833 microuptime(&tv);
834 if (diag) {
835 memset(diag, 0, sizeof(struct hpts_diag));
836 diag->p_hpts_active = hpts->p_hpts_active;
837 diag->p_prev_slot = hpts->p_prev_slot;
838 diag->p_runningslot = hpts->p_runningslot;
839 diag->p_nxt_slot = hpts->p_nxt_slot;
840 diag->p_cur_slot = hpts->p_cur_slot;
841 diag->p_curtick = hpts->p_curtick;
842 diag->p_lasttick = hpts->p_lasttick;
843 diag->slot_req = slot;
844 diag->p_on_min_sleep = hpts->p_on_min_sleep;
845 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
846 }
847 if (slot == 0) {
848 /* Ok we need to set it on the hpts in the current slot */
849 tp->t_hpts_request = 0;
850 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
851 /*
852 * A sleeping hpts we want in next slot to run
853 * note that in this state p_prev_slot == p_cur_slot
854 */
855 tp->t_hpts_slot = hpts_slot(hpts->p_prev_slot, 1);
856 if ((hpts->p_on_min_sleep == 0) &&
857 (hpts->p_hpts_active == 0))
858 need_wakeup = true;
859 } else
860 tp->t_hpts_slot = hpts->p_runningslot;
861 if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
862 tcp_hpts_insert_internal(tp, hpts);
863 if (need_wakeup) {
864 /*
865 * Activate the hpts if it is sleeping and its
866 * timeout is not 1.
867 */
868 hpts->p_direct_wake = 1;
869 tcp_wakehpts(hpts);
870 }
871 slot_on = hpts->p_nxt_slot;
872 HPTS_UNLOCK(hpts);
873
874 return (slot_on);
875 }
876 /* Get the current time relative to the wheel */
877 wheel_cts = tcp_tv_to_hptstick(&tv);
878 /* Map it onto the wheel */
879 wheel_slot = tick_to_wheel(wheel_cts);
880 /* Now what's the max we can place it at? */
881 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
882 if (diag) {
883 diag->wheel_slot = wheel_slot;
884 diag->maxslots = maxslots;
885 diag->wheel_cts = wheel_cts;
886 }
887 if (maxslots == 0) {
888 /* The pacer is in a wheel wrap behind, yikes! */
889 if (slot > 1) {
890 /*
891 * Reduce by 1 to prevent a forever loop in
892 * case something else is wrong. Note this
893 * probably does not hurt because the pacer
894 * if its true is so far behind we will be
895 * > 1second late calling anyway.
896 */
897 slot--;
898 }
899 tp->t_hpts_slot = last_slot;
900 tp->t_hpts_request = slot;
901 } else if (maxslots >= slot) {
902 /* It all fits on the wheel */
903 tp->t_hpts_request = 0;
904 tp->t_hpts_slot = hpts_slot(wheel_slot, slot);
905 } else {
906 /* It does not fit */
907 tp->t_hpts_request = slot - maxslots;
908 tp->t_hpts_slot = last_slot;
909 }
910 if (diag) {
911 diag->slot_remaining = tp->t_hpts_request;
912 diag->inp_hptsslot = tp->t_hpts_slot;
913 }
914 #ifdef INVARIANTS
915 check_if_slot_would_be_wrong(hpts, tp, tp->t_hpts_slot, line);
916 #endif
917 if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
918 tcp_hpts_insert_internal(tp, hpts);
919 if ((hpts->p_hpts_active == 0) &&
920 (tp->t_hpts_request == 0) &&
921 (hpts->p_on_min_sleep == 0)) {
922 /*
923 * The hpts is sleeping and NOT on a minimum
924 * sleep time, we need to figure out where
925 * it will wake up at and if we need to reschedule
926 * its time-out.
927 */
928 uint32_t have_slept, yet_to_sleep;
929
930 /* Now do we need to restart the hpts's timer? */
931 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
932 if (have_slept < hpts->p_hpts_sleep_time)
933 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
934 else {
935 /* We are over-due */
936 yet_to_sleep = 0;
937 need_wakeup = 1;
938 }
939 if (diag) {
940 diag->have_slept = have_slept;
941 diag->yet_to_sleep = yet_to_sleep;
942 }
943 if (yet_to_sleep &&
944 (yet_to_sleep > slot)) {
945 /*
946 * We need to reschedule the hpts's time-out.
947 */
948 hpts->p_hpts_sleep_time = slot;
949 need_new_to = slot * HPTS_TICKS_PER_SLOT;
950 }
951 }
952 /*
953 * Now how far is the hpts sleeping to? if active is 1, its
954 * up and ticking we do nothing, otherwise we may need to
955 * reschedule its callout if need_new_to is set from above.
956 */
957 if (need_wakeup) {
958 hpts->p_direct_wake = 1;
959 tcp_wakehpts(hpts);
960 if (diag) {
961 diag->need_new_to = 0;
962 diag->co_ret = 0xffff0000;
963 }
964 } else if (need_new_to) {
965 int32_t co_ret;
966 struct timeval tv;
967 sbintime_t sb;
968
969 tv.tv_sec = 0;
970 tv.tv_usec = 0;
971 while (need_new_to > HPTS_USEC_IN_SEC) {
972 tv.tv_sec++;
973 need_new_to -= HPTS_USEC_IN_SEC;
974 }
975 tv.tv_usec = need_new_to;
976 sb = tvtosbt(tv);
977 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
978 hpts_timeout_swi, hpts, hpts->p_cpu,
979 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
980 if (diag) {
981 diag->need_new_to = need_new_to;
982 diag->co_ret = co_ret;
983 }
984 }
985 slot_on = hpts->p_nxt_slot;
986 HPTS_UNLOCK(hpts);
987
988 return (slot_on);
989 }
990
991 static uint16_t
hpts_cpuid(struct tcpcb * tp,int * failed)992 hpts_cpuid(struct tcpcb *tp, int *failed)
993 {
994 struct inpcb *inp = tptoinpcb(tp);
995 u_int cpuid;
996 #ifdef NUMA
997 struct hpts_domain_info *di;
998 #endif
999
1000 *failed = 0;
1001 if (tp->t_flags2 & TF2_HPTS_CPU_SET) {
1002 return (tp->t_hpts_cpu);
1003 }
1004 /*
1005 * If we are using the irq cpu set by LRO or
1006 * the driver then it overrides all other domains.
1007 */
1008 if (tcp_use_irq_cpu) {
1009 if (tp->t_lro_cpu == HPTS_CPU_NONE) {
1010 *failed = 1;
1011 return (0);
1012 }
1013 return (tp->t_lro_cpu);
1014 }
1015 /* If one is set the other must be the same */
1016 #ifdef RSS
1017 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1018 if (cpuid == NETISR_CPUID_NONE)
1019 return (hpts_random_cpu());
1020 else
1021 return (cpuid);
1022 #endif
1023 /*
1024 * We don't have a flowid -> cpuid mapping, so cheat and just map
1025 * unknown cpuids to curcpu. Not the best, but apparently better
1026 * than defaulting to swi 0.
1027 */
1028 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1029 counter_u64_add(cpu_uses_random, 1);
1030 return (hpts_random_cpu());
1031 }
1032 /*
1033 * Hash to a thread based on the flowid. If we are using numa,
1034 * then restrict the hash to the numa domain where the inp lives.
1035 */
1036
1037 #ifdef NUMA
1038 if ((vm_ndomains == 1) ||
1039 (inp->inp_numa_domain == M_NODOM)) {
1040 #endif
1041 cpuid = inp->inp_flowid % mp_ncpus;
1042 #ifdef NUMA
1043 } else {
1044 /* Hash into the cpu's that use that domain */
1045 di = &hpts_domains[inp->inp_numa_domain];
1046 cpuid = di->cpu[inp->inp_flowid % di->count];
1047 }
1048 #endif
1049 counter_u64_add(cpu_uses_flowid, 1);
1050 return (cpuid);
1051 }
1052
1053 static void
tcp_hpts_set_max_sleep(struct tcp_hpts_entry * hpts,int wrap_loop_cnt)1054 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1055 {
1056 uint32_t t = 0, i;
1057
1058 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1059 /*
1060 * Find next slot that is occupied and use that to
1061 * be the sleep time.
1062 */
1063 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1064 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1065 break;
1066 }
1067 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1068 }
1069 KASSERT((i != NUM_OF_HPTSI_SLOTS), ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1070 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1071 } else {
1072 /* No one on the wheel sleep for all but 400 slots or sleep max */
1073 hpts->p_hpts_sleep_time = hpts_sleep_max;
1074 }
1075 }
1076
1077 static int32_t
tcp_hptsi(struct tcp_hpts_entry * hpts,int from_callout)1078 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1079 {
1080 struct tcpcb *tp;
1081 struct timeval tv;
1082 int32_t slots_to_run, i, error;
1083 int32_t loop_cnt = 0;
1084 int32_t did_prefetch = 0;
1085 int32_t prefetch_tp = 0;
1086 int32_t wrap_loop_cnt = 0;
1087 int32_t slot_pos_of_endpoint = 0;
1088 int32_t orig_exit_slot;
1089 int8_t completed_measure = 0, seen_endpoint = 0;
1090
1091 HPTS_MTX_ASSERT(hpts);
1092 NET_EPOCH_ASSERT();
1093 /* record previous info for any logging */
1094 hpts->saved_lasttick = hpts->p_lasttick;
1095 hpts->saved_curtick = hpts->p_curtick;
1096 hpts->saved_curslot = hpts->p_cur_slot;
1097 hpts->saved_prev_slot = hpts->p_prev_slot;
1098
1099 hpts->p_lasttick = hpts->p_curtick;
1100 hpts->p_curtick = tcp_gethptstick(&tv);
1101 tcp_pace.cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1102 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1103 if ((hpts->p_on_queue_cnt == 0) ||
1104 (hpts->p_lasttick == hpts->p_curtick)) {
1105 /*
1106 * No time has yet passed,
1107 * or nothing to do.
1108 */
1109 hpts->p_prev_slot = hpts->p_cur_slot;
1110 hpts->p_lasttick = hpts->p_curtick;
1111 goto no_run;
1112 }
1113 again:
1114 hpts->p_wheel_complete = 0;
1115 HPTS_MTX_ASSERT(hpts);
1116 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1117 if (((hpts->p_curtick - hpts->p_lasttick) >
1118 ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1119 (hpts->p_on_queue_cnt != 0)) {
1120 /*
1121 * Wheel wrap is occuring, basically we
1122 * are behind and the distance between
1123 * run's has spread so much it has exceeded
1124 * the time on the wheel (1.024 seconds). This
1125 * is ugly and should NOT be happening. We
1126 * need to run the entire wheel. We last processed
1127 * p_prev_slot, so that needs to be the last slot
1128 * we run. The next slot after that should be our
1129 * reserved first slot for new, and then starts
1130 * the running position. Now the problem is the
1131 * reserved "not to yet" place does not exist
1132 * and there may be inp's in there that need
1133 * running. We can merge those into the
1134 * first slot at the head.
1135 */
1136 wrap_loop_cnt++;
1137 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1138 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1139 /*
1140 * Adjust p_cur_slot to be where we are starting from
1141 * hopefully we will catch up (fat chance if something
1142 * is broken this bad :( )
1143 */
1144 hpts->p_cur_slot = hpts->p_prev_slot;
1145 /*
1146 * The next slot has guys to run too, and that would
1147 * be where we would normally start, lets move them into
1148 * the next slot (p_prev_slot + 2) so that we will
1149 * run them, the extra 10usecs of late (by being
1150 * put behind) does not really matter in this situation.
1151 */
1152 TAILQ_FOREACH(tp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1153 t_hpts) {
1154 MPASS(tp->t_hpts_slot == hpts->p_nxt_slot);
1155 MPASS(tp->t_hpts_gencnt ==
1156 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1157 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
1158
1159 /*
1160 * Update gencnt and nextslot accordingly to match
1161 * the new location. This is safe since it takes both
1162 * the INP lock and the pacer mutex to change the
1163 * t_hptsslot and t_hpts_gencnt.
1164 */
1165 tp->t_hpts_gencnt =
1166 hpts->p_hptss[hpts->p_runningslot].gencnt;
1167 tp->t_hpts_slot = hpts->p_runningslot;
1168 }
1169 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1170 &hpts->p_hptss[hpts->p_nxt_slot].head, t_hpts);
1171 hpts->p_hptss[hpts->p_runningslot].count +=
1172 hpts->p_hptss[hpts->p_nxt_slot].count;
1173 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1174 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1175 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1176 counter_u64_add(wheel_wrap, 1);
1177 } else {
1178 /*
1179 * Nxt slot is always one after p_runningslot though
1180 * its not used usually unless we are doing wheel wrap.
1181 */
1182 hpts->p_nxt_slot = hpts->p_prev_slot;
1183 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1184 }
1185 if (hpts->p_on_queue_cnt == 0) {
1186 goto no_one;
1187 }
1188 for (i = 0; i < slots_to_run; i++) {
1189 struct tcpcb *tp, *ntp;
1190 TAILQ_HEAD(, tcpcb) head = TAILQ_HEAD_INITIALIZER(head);
1191 struct hptsh *hptsh;
1192 uint32_t runningslot;
1193
1194 /*
1195 * Calculate our delay, if there are no extra ticks there
1196 * was not any (i.e. if slots_to_run == 1, no delay).
1197 */
1198 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1199 HPTS_TICKS_PER_SLOT;
1200
1201 runningslot = hpts->p_runningslot;
1202 hptsh = &hpts->p_hptss[runningslot];
1203 TAILQ_SWAP(&head, &hptsh->head, tcpcb, t_hpts);
1204 hpts->p_on_queue_cnt -= hptsh->count;
1205 hptsh->count = 0;
1206 hptsh->gencnt++;
1207
1208 HPTS_UNLOCK(hpts);
1209
1210 TAILQ_FOREACH_SAFE(tp, &head, t_hpts, ntp) {
1211 struct inpcb *inp = tptoinpcb(tp);
1212 bool set_cpu;
1213
1214 if (ntp != NULL) {
1215 /*
1216 * If we have a next tcpcb, see if we can
1217 * prefetch it. Note this may seem
1218 * "risky" since we have no locks (other
1219 * than the previous inp) and there no
1220 * assurance that ntp was not pulled while
1221 * we were processing tp and freed. If this
1222 * occurred it could mean that either:
1223 *
1224 * a) Its NULL (which is fine we won't go
1225 * here) <or> b) Its valid (which is cool we
1226 * will prefetch it) <or> c) The inp got
1227 * freed back to the slab which was
1228 * reallocated. Then the piece of memory was
1229 * re-used and something else (not an
1230 * address) is in inp_ppcb. If that occurs
1231 * we don't crash, but take a TLB shootdown
1232 * performance hit (same as if it was NULL
1233 * and we tried to pre-fetch it).
1234 *
1235 * Considering that the likelyhood of <c> is
1236 * quite rare we will take a risk on doing
1237 * this. If performance drops after testing
1238 * we can always take this out. NB: the
1239 * kern_prefetch on amd64 actually has
1240 * protection against a bad address now via
1241 * the DMAP_() tests. This will prevent the
1242 * TLB hit, and instead if <c> occurs just
1243 * cause us to load cache with a useless
1244 * address (to us).
1245 *
1246 * XXXGL: this comment and the prefetch action
1247 * could be outdated after tp == inp change.
1248 */
1249 kern_prefetch(ntp, &prefetch_tp);
1250 prefetch_tp = 1;
1251 }
1252
1253 /* For debugging */
1254 if (seen_endpoint == 0) {
1255 seen_endpoint = 1;
1256 orig_exit_slot = slot_pos_of_endpoint =
1257 runningslot;
1258 } else if (completed_measure == 0) {
1259 /* Record the new position */
1260 orig_exit_slot = runningslot;
1261 }
1262
1263 INP_WLOCK(inp);
1264 if ((tp->t_flags2 & TF2_HPTS_CPU_SET) == 0) {
1265 set_cpu = true;
1266 } else {
1267 set_cpu = false;
1268 }
1269
1270 if (__predict_false(tp->t_in_hpts == IHPTS_MOVING)) {
1271 if (tp->t_hpts_slot == -1) {
1272 tp->t_in_hpts = IHPTS_NONE;
1273 if (in_pcbrele_wlocked(inp) == false)
1274 INP_WUNLOCK(inp);
1275 } else {
1276 HPTS_LOCK(hpts);
1277 tcp_hpts_insert_internal(tp, hpts);
1278 HPTS_UNLOCK(hpts);
1279 INP_WUNLOCK(inp);
1280 }
1281 continue;
1282 }
1283
1284 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
1285 MPASS(!(inp->inp_flags & INP_DROPPED));
1286 KASSERT(runningslot == tp->t_hpts_slot,
1287 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1288 hpts, inp, runningslot, tp->t_hpts_slot));
1289
1290 if (tp->t_hpts_request) {
1291 /*
1292 * This guy is deferred out further in time
1293 * then our wheel had available on it.
1294 * Push him back on the wheel or run it
1295 * depending.
1296 */
1297 uint32_t maxslots, last_slot, remaining_slots;
1298
1299 remaining_slots = slots_to_run - (i + 1);
1300 if (tp->t_hpts_request > remaining_slots) {
1301 HPTS_LOCK(hpts);
1302 /*
1303 * How far out can we go?
1304 */
1305 maxslots = max_slots_available(hpts,
1306 hpts->p_cur_slot, &last_slot);
1307 if (maxslots >= tp->t_hpts_request) {
1308 /* We can place it finally to
1309 * be processed. */
1310 tp->t_hpts_slot = hpts_slot(
1311 hpts->p_runningslot,
1312 tp->t_hpts_request);
1313 tp->t_hpts_request = 0;
1314 } else {
1315 /* Work off some more time */
1316 tp->t_hpts_slot = last_slot;
1317 tp->t_hpts_request -=
1318 maxslots;
1319 }
1320 tcp_hpts_insert_internal(tp, hpts);
1321 HPTS_UNLOCK(hpts);
1322 INP_WUNLOCK(inp);
1323 continue;
1324 }
1325 tp->t_hpts_request = 0;
1326 /* Fall through we will so do it now */
1327 }
1328
1329 tcp_hpts_release(tp);
1330 if (set_cpu) {
1331 /*
1332 * Setup so the next time we will move to
1333 * the right CPU. This should be a rare
1334 * event. It will sometimes happens when we
1335 * are the client side (usually not the
1336 * server). Somehow tcp_output() gets called
1337 * before the tcp_do_segment() sets the
1338 * intial state. This means the r_cpu and
1339 * r_hpts_cpu is 0. We get on the hpts, and
1340 * then tcp_input() gets called setting up
1341 * the r_cpu to the correct value. The hpts
1342 * goes off and sees the mis-match. We
1343 * simply correct it here and the CPU will
1344 * switch to the new hpts nextime the tcb
1345 * gets added to the hpts (not this one)
1346 * :-)
1347 */
1348 tcp_set_hpts(tp);
1349 }
1350 CURVNET_SET(inp->inp_vnet);
1351 /* Lets do any logging that we might want to */
1352 if (hpts_does_tp_logging && tcp_bblogging_on(tp)) {
1353 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1354 }
1355
1356 if (tp->t_fb_ptr != NULL) {
1357 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1358 did_prefetch = 1;
1359 }
1360 /*
1361 * We set TF2_HPTS_CALLS before any possible output.
1362 * The contract with the transport is that if it cares
1363 * about hpts calling it should clear the flag. That
1364 * way next time it is called it will know it is hpts.
1365 *
1366 * We also only call tfb_do_queued_segments() <or>
1367 * tcp_output(). It is expected that if segments are
1368 * queued and come in that the final input mbuf will
1369 * cause a call to output if it is needed so we do
1370 * not need a second call to tcp_output(). So we do
1371 * one or the other but not both.
1372 */
1373 tp->t_flags2 |= TF2_HPTS_CALLS;
1374 if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) &&
1375 !STAILQ_EMPTY(&tp->t_inqueue)) {
1376 error = (*tp->t_fb->tfb_do_queued_segments)(tp, 0);
1377 /*
1378 * A non-zero return for input queue processing
1379 * is the lock is released and most likely the
1380 * inp is gone.
1381 */
1382 if (error)
1383 goto skip_pacing;
1384 } else
1385 error = tcp_output(tp);
1386 if (error < 0)
1387 goto skip_pacing;
1388 INP_WUNLOCK(inp);
1389 skip_pacing:
1390 CURVNET_RESTORE();
1391 }
1392 if (seen_endpoint) {
1393 /*
1394 * We now have a accurate distance between
1395 * slot_pos_of_endpoint <-> orig_exit_slot
1396 * to tell us how late we were, orig_exit_slot
1397 * is where we calculated the end of our cycle to
1398 * be when we first entered.
1399 */
1400 completed_measure = 1;
1401 }
1402 HPTS_LOCK(hpts);
1403 hpts->p_runningslot++;
1404 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1405 hpts->p_runningslot = 0;
1406 }
1407 }
1408 no_one:
1409 HPTS_MTX_ASSERT(hpts);
1410 hpts->p_delayed_by = 0;
1411 /*
1412 * Check to see if we took an excess amount of time and need to run
1413 * more ticks (if we did not hit eno-bufs).
1414 */
1415 hpts->p_prev_slot = hpts->p_cur_slot;
1416 hpts->p_lasttick = hpts->p_curtick;
1417 if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1418 /*
1419 * Something is serious slow we have
1420 * looped through processing the wheel
1421 * and by the time we cleared the
1422 * needs to run max_pacer_loops time
1423 * we still needed to run. That means
1424 * the system is hopelessly behind and
1425 * can never catch up :(
1426 *
1427 * We will just lie to this thread
1428 * and let it thing p_curtick is
1429 * correct. When it next awakens
1430 * it will find itself further behind.
1431 */
1432 if (from_callout)
1433 counter_u64_add(hpts_hopelessly_behind, 1);
1434 goto no_run;
1435 }
1436 hpts->p_curtick = tcp_gethptstick(&tv);
1437 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1438 if (seen_endpoint == 0) {
1439 /* We saw no endpoint but we may be looping */
1440 orig_exit_slot = hpts->p_cur_slot;
1441 }
1442 if ((wrap_loop_cnt < 2) &&
1443 (hpts->p_lasttick != hpts->p_curtick)) {
1444 counter_u64_add(hpts_loops, 1);
1445 loop_cnt++;
1446 goto again;
1447 }
1448 no_run:
1449 tcp_pace.cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1450 /*
1451 * Set flag to tell that we are done for
1452 * any slot input that happens during
1453 * input.
1454 */
1455 hpts->p_wheel_complete = 1;
1456 /*
1457 * Now did we spend too long running input and need to run more ticks?
1458 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1459 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1460 * is greater than 2, then the condtion most likely are *not* true.
1461 * Also if we are called not from the callout, we don't run the wheel
1462 * multiple times so the slots may not align either.
1463 */
1464 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1465 (wrap_loop_cnt >= 2) || (from_callout == 0)),
1466 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1467 hpts->p_prev_slot, hpts->p_cur_slot));
1468 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1469 || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1470 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1471 hpts->p_lasttick, hpts->p_curtick));
1472 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1473 hpts->p_curtick = tcp_gethptstick(&tv);
1474 counter_u64_add(hpts_loops, 1);
1475 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1476 goto again;
1477 }
1478
1479 if (from_callout){
1480 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1481 }
1482 if (seen_endpoint)
1483 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1484 else
1485 return (0);
1486 }
1487
1488 void
__tcp_set_hpts(struct tcpcb * tp,int32_t line)1489 __tcp_set_hpts(struct tcpcb *tp, int32_t line)
1490 {
1491 struct tcp_hpts_entry *hpts;
1492 int failed;
1493
1494 INP_WLOCK_ASSERT(tptoinpcb(tp));
1495
1496 hpts = tcp_hpts_lock(tp);
1497 if (tp->t_in_hpts == IHPTS_NONE && !(tp->t_flags2 & TF2_HPTS_CPU_SET)) {
1498 tp->t_hpts_cpu = hpts_cpuid(tp, &failed);
1499 if (failed == 0)
1500 tp->t_flags2 |= TF2_HPTS_CPU_SET;
1501 }
1502 mtx_unlock(&hpts->p_mtx);
1503 }
1504
1505 static struct tcp_hpts_entry *
tcp_choose_hpts_to_run(void)1506 tcp_choose_hpts_to_run(void)
1507 {
1508 int i, oldest_idx, start, end;
1509 uint32_t cts, time_since_ran, calc;
1510
1511 cts = tcp_get_usecs(NULL);
1512 time_since_ran = 0;
1513 /* Default is all one group */
1514 start = 0;
1515 end = tcp_pace.rp_num_hptss;
1516 /*
1517 * If we have more than one L3 group figure out which one
1518 * this CPU is in.
1519 */
1520 if (tcp_pace.grp_cnt > 1) {
1521 for (i = 0; i < tcp_pace.grp_cnt; i++) {
1522 if (CPU_ISSET(curcpu, &tcp_pace.grps[i]->cg_mask)) {
1523 start = tcp_pace.grps[i]->cg_first;
1524 end = (tcp_pace.grps[i]->cg_last + 1);
1525 break;
1526 }
1527 }
1528 }
1529 oldest_idx = -1;
1530 for (i = start; i < end; i++) {
1531 if (TSTMP_GT(cts, tcp_pace.cts_last_ran[i]))
1532 calc = cts - tcp_pace.cts_last_ran[i];
1533 else
1534 calc = 0;
1535 if (calc > time_since_ran) {
1536 oldest_idx = i;
1537 time_since_ran = calc;
1538 }
1539 }
1540 if (oldest_idx >= 0)
1541 return(tcp_pace.rp_ent[oldest_idx]);
1542 else
1543 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1544 }
1545
1546 static void
__tcp_run_hpts(void)1547 __tcp_run_hpts(void)
1548 {
1549 struct epoch_tracker et;
1550 struct tcp_hpts_entry *hpts;
1551 int ticks_ran;
1552
1553 hpts = tcp_choose_hpts_to_run();
1554
1555 if (hpts->p_hpts_active) {
1556 /* Already active */
1557 return;
1558 }
1559 if (mtx_trylock(&hpts->p_mtx) == 0) {
1560 /* Someone else got the lock */
1561 return;
1562 }
1563 NET_EPOCH_ENTER(et);
1564 if (hpts->p_hpts_active)
1565 goto out_with_mtx;
1566 hpts->syscall_cnt++;
1567 counter_u64_add(hpts_direct_call, 1);
1568 hpts->p_hpts_active = 1;
1569 ticks_ran = tcp_hptsi(hpts, 0);
1570 /* We may want to adjust the sleep values here */
1571 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1572 if (ticks_ran > ticks_indicate_less_sleep) {
1573 struct timeval tv;
1574 sbintime_t sb;
1575
1576 hpts->p_mysleep.tv_usec /= 2;
1577 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1578 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1579 /* Reschedule with new to value */
1580 tcp_hpts_set_max_sleep(hpts, 0);
1581 tv.tv_sec = 0;
1582 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1583 /* Validate its in the right ranges */
1584 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1585 hpts->overidden_sleep = tv.tv_usec;
1586 tv.tv_usec = hpts->p_mysleep.tv_usec;
1587 } else if (tv.tv_usec > dynamic_max_sleep) {
1588 /* Lets not let sleep get above this value */
1589 hpts->overidden_sleep = tv.tv_usec;
1590 tv.tv_usec = dynamic_max_sleep;
1591 }
1592 /*
1593 * In this mode the timer is a backstop to
1594 * all the userret/lro_flushes so we use
1595 * the dynamic value and set the on_min_sleep
1596 * flag so we will not be awoken.
1597 */
1598 sb = tvtosbt(tv);
1599 /* Store off to make visible the actual sleep time */
1600 hpts->sleeping = tv.tv_usec;
1601 callout_reset_sbt_on(&hpts->co, sb, 0,
1602 hpts_timeout_swi, hpts, hpts->p_cpu,
1603 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1604 } else if (ticks_ran < ticks_indicate_more_sleep) {
1605 /* For the further sleep, don't reschedule hpts */
1606 hpts->p_mysleep.tv_usec *= 2;
1607 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1608 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1609 }
1610 hpts->p_on_min_sleep = 1;
1611 }
1612 hpts->p_hpts_active = 0;
1613 out_with_mtx:
1614 HPTS_MTX_ASSERT(hpts);
1615 mtx_unlock(&hpts->p_mtx);
1616 NET_EPOCH_EXIT(et);
1617 }
1618
1619 static void
tcp_hpts_thread(void * ctx)1620 tcp_hpts_thread(void *ctx)
1621 {
1622 struct tcp_hpts_entry *hpts;
1623 struct epoch_tracker et;
1624 struct timeval tv;
1625 sbintime_t sb;
1626 int ticks_ran;
1627
1628 hpts = (struct tcp_hpts_entry *)ctx;
1629 mtx_lock(&hpts->p_mtx);
1630 if (hpts->p_direct_wake) {
1631 /* Signaled by input or output with low occupancy count. */
1632 callout_stop(&hpts->co);
1633 counter_u64_add(hpts_direct_awakening, 1);
1634 } else {
1635 /* Timed out, the normal case. */
1636 counter_u64_add(hpts_wake_timeout, 1);
1637 if (callout_pending(&hpts->co) ||
1638 !callout_active(&hpts->co)) {
1639 mtx_unlock(&hpts->p_mtx);
1640 return;
1641 }
1642 }
1643 callout_deactivate(&hpts->co);
1644 hpts->p_hpts_wake_scheduled = 0;
1645 NET_EPOCH_ENTER(et);
1646 if (hpts->p_hpts_active) {
1647 /*
1648 * We are active already. This means that a syscall
1649 * trap or LRO is running in behalf of hpts. In that case
1650 * we need to double our timeout since there seems to be
1651 * enough activity in the system that we don't need to
1652 * run as often (if we were not directly woken).
1653 */
1654 tv.tv_sec = 0;
1655 if (hpts->p_direct_wake == 0) {
1656 counter_u64_add(hpts_back_tosleep, 1);
1657 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1658 hpts->p_mysleep.tv_usec *= 2;
1659 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1660 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1661 tv.tv_usec = hpts->p_mysleep.tv_usec;
1662 hpts->p_on_min_sleep = 1;
1663 } else {
1664 /*
1665 * Here we have low count on the wheel, but
1666 * somehow we still collided with one of the
1667 * connections. Lets go back to sleep for a
1668 * min sleep time, but clear the flag so we
1669 * can be awoken by insert.
1670 */
1671 hpts->p_on_min_sleep = 0;
1672 tv.tv_usec = tcp_min_hptsi_time;
1673 }
1674 } else {
1675 /*
1676 * Directly woken most likely to reset the
1677 * callout time.
1678 */
1679 tv.tv_usec = hpts->p_mysleep.tv_usec;
1680 }
1681 goto back_to_sleep;
1682 }
1683 hpts->sleeping = 0;
1684 hpts->p_hpts_active = 1;
1685 ticks_ran = tcp_hptsi(hpts, 1);
1686 tv.tv_sec = 0;
1687 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1688 if ((hpts->p_on_queue_cnt > conn_cnt_thresh) && (hpts->hit_callout_thresh == 0)) {
1689 hpts->hit_callout_thresh = 1;
1690 atomic_add_int(&hpts_that_need_softclock, 1);
1691 } else if ((hpts->p_on_queue_cnt <= conn_cnt_thresh) && (hpts->hit_callout_thresh == 1)) {
1692 hpts->hit_callout_thresh = 0;
1693 atomic_subtract_int(&hpts_that_need_softclock, 1);
1694 }
1695 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1696 if(hpts->p_direct_wake == 0) {
1697 /*
1698 * Only adjust sleep time if we were
1699 * called from the callout i.e. direct_wake == 0.
1700 */
1701 if (ticks_ran < ticks_indicate_more_sleep) {
1702 hpts->p_mysleep.tv_usec *= 2;
1703 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1704 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1705 } else if (ticks_ran > ticks_indicate_less_sleep) {
1706 hpts->p_mysleep.tv_usec /= 2;
1707 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1708 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1709 }
1710 }
1711 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1712 hpts->overidden_sleep = tv.tv_usec;
1713 tv.tv_usec = hpts->p_mysleep.tv_usec;
1714 } else if (tv.tv_usec > dynamic_max_sleep) {
1715 /* Lets not let sleep get above this value */
1716 hpts->overidden_sleep = tv.tv_usec;
1717 tv.tv_usec = dynamic_max_sleep;
1718 }
1719 /*
1720 * In this mode the timer is a backstop to
1721 * all the userret/lro_flushes so we use
1722 * the dynamic value and set the on_min_sleep
1723 * flag so we will not be awoken.
1724 */
1725 hpts->p_on_min_sleep = 1;
1726 } else if (hpts->p_on_queue_cnt == 0) {
1727 /*
1728 * No one on the wheel, please wake us up
1729 * if you insert on the wheel.
1730 */
1731 hpts->p_on_min_sleep = 0;
1732 hpts->overidden_sleep = 0;
1733 } else {
1734 /*
1735 * We hit here when we have a low number of
1736 * clients on the wheel (our else clause).
1737 * We may need to go on min sleep, if we set
1738 * the flag we will not be awoken if someone
1739 * is inserted ahead of us. Clearing the flag
1740 * means we can be awoken. This is "old mode"
1741 * where the timer is what runs hpts mainly.
1742 */
1743 if (tv.tv_usec < tcp_min_hptsi_time) {
1744 /*
1745 * Yes on min sleep, which means
1746 * we cannot be awoken.
1747 */
1748 hpts->overidden_sleep = tv.tv_usec;
1749 tv.tv_usec = tcp_min_hptsi_time;
1750 hpts->p_on_min_sleep = 1;
1751 } else {
1752 /* Clear the min sleep flag */
1753 hpts->overidden_sleep = 0;
1754 hpts->p_on_min_sleep = 0;
1755 }
1756 }
1757 HPTS_MTX_ASSERT(hpts);
1758 hpts->p_hpts_active = 0;
1759 back_to_sleep:
1760 hpts->p_direct_wake = 0;
1761 sb = tvtosbt(tv);
1762 /* Store off to make visible the actual sleep time */
1763 hpts->sleeping = tv.tv_usec;
1764 callout_reset_sbt_on(&hpts->co, sb, 0,
1765 hpts_timeout_swi, hpts, hpts->p_cpu,
1766 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1767 NET_EPOCH_EXIT(et);
1768 mtx_unlock(&hpts->p_mtx);
1769 }
1770
1771 #undef timersub
1772
1773 static int32_t
hpts_count_level(struct cpu_group * cg)1774 hpts_count_level(struct cpu_group *cg)
1775 {
1776 int32_t count_l3, i;
1777
1778 count_l3 = 0;
1779 if (cg->cg_level == CG_SHARE_L3)
1780 count_l3++;
1781 /* Walk all the children looking for L3 */
1782 for (i = 0; i < cg->cg_children; i++) {
1783 count_l3 += hpts_count_level(&cg->cg_child[i]);
1784 }
1785 return (count_l3);
1786 }
1787
1788 static void
hpts_gather_grps(struct cpu_group ** grps,int32_t * at,int32_t max,struct cpu_group * cg)1789 hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_group *cg)
1790 {
1791 int32_t idx, i;
1792
1793 idx = *at;
1794 if (cg->cg_level == CG_SHARE_L3) {
1795 grps[idx] = cg;
1796 idx++;
1797 if (idx == max) {
1798 *at = idx;
1799 return;
1800 }
1801 }
1802 *at = idx;
1803 /* Walk all the children looking for L3 */
1804 for (i = 0; i < cg->cg_children; i++) {
1805 hpts_gather_grps(grps, at, max, &cg->cg_child[i]);
1806 }
1807 }
1808
1809 static void
tcp_hpts_mod_load(void)1810 tcp_hpts_mod_load(void)
1811 {
1812 struct cpu_group *cpu_top;
1813 int32_t error __diagused;
1814 int32_t i, j, bound = 0, created = 0;
1815 size_t sz, asz;
1816 struct timeval tv;
1817 sbintime_t sb;
1818 struct tcp_hpts_entry *hpts;
1819 struct pcpu *pc;
1820 char unit[16];
1821 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1822 int count, domain;
1823
1824 #ifdef SMP
1825 cpu_top = smp_topo();
1826 #else
1827 cpu_top = NULL;
1828 #endif
1829 tcp_pace.rp_num_hptss = ncpus;
1830 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1831 hpts_loops = counter_u64_alloc(M_WAITOK);
1832 back_tosleep = counter_u64_alloc(M_WAITOK);
1833 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1834 wheel_wrap = counter_u64_alloc(M_WAITOK);
1835 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1836 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1837 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1838 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1839 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1840 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1841
1842 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1843 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1844 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1845 tcp_pace.cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1846 tcp_pace.grp_cnt = 0;
1847 if (cpu_top == NULL) {
1848 tcp_pace.grp_cnt = 1;
1849 } else {
1850 /* Find out how many cache level 3 domains we have */
1851 count = 0;
1852 tcp_pace.grp_cnt = hpts_count_level(cpu_top);
1853 if (tcp_pace.grp_cnt == 0) {
1854 tcp_pace.grp_cnt = 1;
1855 }
1856 sz = (tcp_pace.grp_cnt * sizeof(struct cpu_group *));
1857 tcp_pace.grps = malloc(sz, M_TCPHPTS, M_WAITOK);
1858 /* Now populate the groups */
1859 if (tcp_pace.grp_cnt == 1) {
1860 /*
1861 * All we need is the top level all cpu's are in
1862 * the same cache so when we use grp[0]->cg_mask
1863 * with the cg_first <-> cg_last it will include
1864 * all cpu's in it. The level here is probably
1865 * zero which is ok.
1866 */
1867 tcp_pace.grps[0] = cpu_top;
1868 } else {
1869 /*
1870 * Here we must find all the level three cache domains
1871 * and setup our pointers to them.
1872 */
1873 count = 0;
1874 hpts_gather_grps(tcp_pace.grps, &count, tcp_pace.grp_cnt, cpu_top);
1875 }
1876 }
1877 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1878 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1879 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1880 M_TCPHPTS, M_WAITOK | M_ZERO);
1881 tcp_pace.rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS, M_WAITOK);
1882 hpts = tcp_pace.rp_ent[i];
1883 /*
1884 * Init all the hpts structures that are not specifically
1885 * zero'd by the allocations. Also lets attach them to the
1886 * appropriate sysctl block as well.
1887 */
1888 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1889 "hpts", MTX_DEF | MTX_DUPOK);
1890 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1891 TAILQ_INIT(&hpts->p_hptss[j].head);
1892 hpts->p_hptss[j].count = 0;
1893 hpts->p_hptss[j].gencnt = 0;
1894 }
1895 sysctl_ctx_init(&hpts->hpts_ctx);
1896 sprintf(unit, "%d", i);
1897 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1898 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1899 OID_AUTO,
1900 unit,
1901 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1902 "");
1903 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1904 SYSCTL_CHILDREN(hpts->hpts_root),
1905 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1906 &hpts->p_on_queue_cnt, 0,
1907 "Count TCB's awaiting output processing");
1908 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1909 SYSCTL_CHILDREN(hpts->hpts_root),
1910 OID_AUTO, "active", CTLFLAG_RD,
1911 &hpts->p_hpts_active, 0,
1912 "Is the hpts active");
1913 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1914 SYSCTL_CHILDREN(hpts->hpts_root),
1915 OID_AUTO, "curslot", CTLFLAG_RD,
1916 &hpts->p_cur_slot, 0,
1917 "What the current running pacers goal");
1918 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1919 SYSCTL_CHILDREN(hpts->hpts_root),
1920 OID_AUTO, "runtick", CTLFLAG_RD,
1921 &hpts->p_runningslot, 0,
1922 "What the running pacers current slot is");
1923 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1924 SYSCTL_CHILDREN(hpts->hpts_root),
1925 OID_AUTO, "curtick", CTLFLAG_RD,
1926 &hpts->p_curtick, 0,
1927 "What the running pacers last tick mapped to the wheel was");
1928 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1929 SYSCTL_CHILDREN(hpts->hpts_root),
1930 OID_AUTO, "lastran", CTLFLAG_RD,
1931 &tcp_pace.cts_last_ran[i], 0,
1932 "The last usec tick that this hpts ran");
1933 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1934 SYSCTL_CHILDREN(hpts->hpts_root),
1935 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1936 &hpts->p_mysleep.tv_usec,
1937 "What the running pacers is using for p_mysleep.tv_usec");
1938 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1939 SYSCTL_CHILDREN(hpts->hpts_root),
1940 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1941 &hpts->sleeping, 0,
1942 "What the running pacers is actually sleeping for");
1943 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1944 SYSCTL_CHILDREN(hpts->hpts_root),
1945 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1946 &hpts->syscall_cnt, 0,
1947 "How many times we had syscalls on this hpts");
1948
1949 hpts->p_hpts_sleep_time = hpts_sleep_max;
1950 hpts->p_num = i;
1951 hpts->p_curtick = tcp_gethptstick(&tv);
1952 tcp_pace.cts_last_ran[i] = tcp_tv_to_usectick(&tv);
1953 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1954 hpts->p_cpu = 0xffff;
1955 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1956 callout_init(&hpts->co, 1);
1957 }
1958 /* Don't try to bind to NUMA domains if we don't have any */
1959 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1960 tcp_bind_threads = 0;
1961
1962 /*
1963 * Now lets start ithreads to handle the hptss.
1964 */
1965 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1966 hpts = tcp_pace.rp_ent[i];
1967 hpts->p_cpu = i;
1968
1969 error = swi_add(&hpts->ie, "hpts",
1970 tcp_hpts_thread, (void *)hpts,
1971 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1972 KASSERT(error == 0,
1973 ("Can't add hpts:%p i:%d err:%d",
1974 hpts, i, error));
1975 created++;
1976 hpts->p_mysleep.tv_sec = 0;
1977 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
1978 if (tcp_bind_threads == 1) {
1979 if (intr_event_bind(hpts->ie, i) == 0)
1980 bound++;
1981 } else if (tcp_bind_threads == 2) {
1982 /* Find the group for this CPU (i) and bind into it */
1983 for (j = 0; j < tcp_pace.grp_cnt; j++) {
1984 if (CPU_ISSET(i, &tcp_pace.grps[j]->cg_mask)) {
1985 if (intr_event_bind_ithread_cpuset(hpts->ie,
1986 &tcp_pace.grps[j]->cg_mask) == 0) {
1987 bound++;
1988 pc = pcpu_find(i);
1989 domain = pc->pc_domain;
1990 count = hpts_domains[domain].count;
1991 hpts_domains[domain].cpu[count] = i;
1992 hpts_domains[domain].count++;
1993 break;
1994 }
1995 }
1996 }
1997 }
1998 tv.tv_sec = 0;
1999 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2000 hpts->sleeping = tv.tv_usec;
2001 sb = tvtosbt(tv);
2002 callout_reset_sbt_on(&hpts->co, sb, 0,
2003 hpts_timeout_swi, hpts, hpts->p_cpu,
2004 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2005 }
2006 /*
2007 * If we somehow have an empty domain, fall back to choosing
2008 * among all htps threads.
2009 */
2010 for (i = 0; i < vm_ndomains; i++) {
2011 if (hpts_domains[i].count == 0) {
2012 tcp_bind_threads = 0;
2013 break;
2014 }
2015 }
2016 tcp_hpts_softclock = __tcp_run_hpts;
2017 tcp_lro_hpts_init();
2018 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2019 created, bound,
2020 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2021 }
2022
2023 static void
tcp_hpts_mod_unload(void)2024 tcp_hpts_mod_unload(void)
2025 {
2026 int rv __diagused;
2027
2028 tcp_lro_hpts_uninit();
2029 atomic_store_ptr(&tcp_hpts_softclock, NULL);
2030
2031 for (int i = 0; i < tcp_pace.rp_num_hptss; i++) {
2032 struct tcp_hpts_entry *hpts = tcp_pace.rp_ent[i];
2033
2034 rv = callout_drain(&hpts->co);
2035 MPASS(rv != 0);
2036
2037 rv = swi_remove(hpts->ie_cookie);
2038 MPASS(rv == 0);
2039
2040 rv = sysctl_ctx_free(&hpts->hpts_ctx);
2041 MPASS(rv == 0);
2042
2043 mtx_destroy(&hpts->p_mtx);
2044 free(hpts->p_hptss, M_TCPHPTS);
2045 free(hpts, M_TCPHPTS);
2046 }
2047
2048 free(tcp_pace.rp_ent, M_TCPHPTS);
2049 free(tcp_pace.cts_last_ran, M_TCPHPTS);
2050 #ifdef SMP
2051 free(tcp_pace.grps, M_TCPHPTS);
2052 #endif
2053
2054 counter_u64_free(hpts_hopelessly_behind);
2055 counter_u64_free(hpts_loops);
2056 counter_u64_free(back_tosleep);
2057 counter_u64_free(combined_wheel_wrap);
2058 counter_u64_free(wheel_wrap);
2059 counter_u64_free(hpts_wake_timeout);
2060 counter_u64_free(hpts_direct_awakening);
2061 counter_u64_free(hpts_back_tosleep);
2062 counter_u64_free(hpts_direct_call);
2063 counter_u64_free(cpu_uses_flowid);
2064 counter_u64_free(cpu_uses_random);
2065 }
2066
2067 static int
tcp_hpts_modevent(module_t mod,int what,void * arg)2068 tcp_hpts_modevent(module_t mod, int what, void *arg)
2069 {
2070
2071 switch (what) {
2072 case MOD_LOAD:
2073 tcp_hpts_mod_load();
2074 return (0);
2075 case MOD_QUIESCE:
2076 /*
2077 * Since we are a dependency of TCP stack modules, they should
2078 * already be unloaded, and the HPTS ring is empty. However,
2079 * function pointer manipulations aren't 100% safe. Although,
2080 * tcp_hpts_mod_unload() use atomic(9) the userret() doesn't.
2081 * Thus, allow only forced unload of HPTS.
2082 */
2083 return (EBUSY);
2084 case MOD_UNLOAD:
2085 tcp_hpts_mod_unload();
2086 return (0);
2087 default:
2088 return (EINVAL);
2089 };
2090 }
2091
2092 static moduledata_t tcp_hpts_module = {
2093 .name = "tcphpts",
2094 .evhand = tcp_hpts_modevent,
2095 };
2096
2097 DECLARE_MODULE(tcphpts, tcp_hpts_module, SI_SUB_SOFTINTR, SI_ORDER_ANY);
2098 MODULE_VERSION(tcphpts, 1);
2099