1 /*-
2 * Copyright (c) 2016-2018 Netflix, Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 */
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_rss.h"
32
33 /**
34 * Some notes about usage.
35 *
36 * The tcp_hpts system is designed to provide a high precision timer
37 * system for tcp. Its main purpose is to provide a mechanism for
38 * pacing packets out onto the wire. It can be used in two ways
39 * by a given TCP stack (and those two methods can be used simultaneously).
40 *
41 * First, and probably the main thing its used by Rack and BBR, it can
42 * be used to call tcp_output() of a transport stack at some time in the future.
43 * The normal way this is done is that tcp_output() of the stack schedules
44 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
45 * slot is the time from now that the stack wants to be called but it
46 * must be converted to tcp_hpts's notion of slot. This is done with
47 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
48 * call from the tcp_output() routine might look like:
49 *
50 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
51 *
52 * The above would schedule tcp_ouput() to be called in 550 useconds.
53 * Note that if using this mechanism the stack will want to add near
54 * its top a check to prevent unwanted calls (from user land or the
55 * arrival of incoming ack's). So it would add something like:
56 *
57 * if (tcp_in_hpts(inp))
58 * return;
59 *
60 * to prevent output processing until the time alotted has gone by.
61 * Of course this is a bare bones example and the stack will probably
62 * have more consideration then just the above.
63 *
64 * In order to run input queued segments from the HPTS context the
65 * tcp stack must define an input function for
66 * tfb_do_queued_segments(). This function understands
67 * how to dequeue a array of packets that were input and
68 * knows how to call the correct processing routine.
69 *
70 * Locking in this is important as well so most likely the
71 * stack will need to define the tfb_do_segment_nounlock()
72 * splitting tfb_do_segment() into two parts. The main processing
73 * part that does not unlock the INP and returns a value of 1 or 0.
74 * It returns 0 if all is well and the lock was not released. It
75 * returns 1 if we had to destroy the TCB (a reset received etc).
76 * The remains of tfb_do_segment() then become just a simple call
77 * to the tfb_do_segment_nounlock() function and check the return
78 * code and possibly unlock.
79 *
80 * The stack must also set the flag on the INP that it supports this
81 * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
82 * this flag as well and will queue packets when it is set.
83 * There are other flags as well INP_MBUF_QUEUE_READY and
84 * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
85 * that we are in the pacer for output so there is no
86 * need to wake up the hpts system to get immediate
87 * input. The second tells the LRO code that its okay
88 * if a SACK arrives you can still defer input and let
89 * the current hpts timer run (this is usually set when
90 * a rack timer is up so we know SACK's are happening
91 * on the connection already and don't want to wakeup yet).
92 *
93 * There is a common functions within the rack_bbr_common code
94 * version i.e. ctf_do_queued_segments(). This function
95 * knows how to take the input queue of packets from
96 * tp->t_in_pkts and process them digging out
97 * all the arguments, calling any bpf tap and
98 * calling into tfb_do_segment_nounlock(). The common
99 * function (ctf_do_queued_segments()) requires that
100 * you have defined the tfb_do_segment_nounlock() as
101 * described above.
102 */
103
104 #include <sys/param.h>
105 #include <sys/bus.h>
106 #include <sys/interrupt.h>
107 #include <sys/module.h>
108 #include <sys/kernel.h>
109 #include <sys/hhook.h>
110 #include <sys/malloc.h>
111 #include <sys/mbuf.h>
112 #include <sys/proc.h> /* for proc0 declaration */
113 #include <sys/socket.h>
114 #include <sys/socketvar.h>
115 #include <sys/sysctl.h>
116 #include <sys/systm.h>
117 #include <sys/refcount.h>
118 #include <sys/sched.h>
119 #include <sys/queue.h>
120 #include <sys/smp.h>
121 #include <sys/counter.h>
122 #include <sys/time.h>
123 #include <sys/kthread.h>
124 #include <sys/kern_prefetch.h>
125
126 #include <vm/uma.h>
127 #include <vm/vm.h>
128
129 #include <net/route.h>
130 #include <net/vnet.h>
131
132 #ifdef RSS
133 #include <net/netisr.h>
134 #include <net/rss_config.h>
135 #endif
136
137 #define TCPSTATES /* for logging */
138
139 #include <netinet/in.h>
140 #include <netinet/in_kdtrace.h>
141 #include <netinet/in_pcb.h>
142 #include <netinet/ip.h>
143 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
144 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
145 #include <netinet/ip_var.h>
146 #include <netinet/ip6.h>
147 #include <netinet6/in6_pcb.h>
148 #include <netinet6/ip6_var.h>
149 #include <netinet/tcp.h>
150 #include <netinet/tcp_fsm.h>
151 #include <netinet/tcp_seq.h>
152 #include <netinet/tcp_timer.h>
153 #include <netinet/tcp_var.h>
154 #include <netinet/tcpip.h>
155 #include <netinet/cc/cc.h>
156 #include <netinet/tcp_hpts.h>
157 #include <netinet/tcp_log_buf.h>
158
159 #ifdef tcp_offload
160 #include <netinet/tcp_offload.h>
161 #endif
162
163 /*
164 * The hpts uses a 102400 wheel. The wheel
165 * defines the time in 10 usec increments (102400 x 10).
166 * This gives a range of 10usec - 1024ms to place
167 * an entry within. If the user requests more than
168 * 1.024 second, a remaineder is attached and the hpts
169 * when seeing the remainder will re-insert the
170 * inpcb forward in time from where it is until
171 * the remainder is zero.
172 */
173
174 #define NUM_OF_HPTSI_SLOTS 102400
175
176 /* Each hpts has its own p_mtx which is used for locking */
177 #define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
178 #define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
179 #define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
180 struct tcp_hpts_entry {
181 /* Cache line 0x00 */
182 struct mtx p_mtx; /* Mutex for hpts */
183 struct timeval p_mysleep; /* Our min sleep time */
184 uint64_t syscall_cnt;
185 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
186 uint16_t p_hpts_active; /* Flag that says hpts is awake */
187 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
188 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
189 uint32_t p_runningslot; /* Current tick we are at if we are running */
190 uint32_t p_prev_slot; /* Previous slot we were on */
191 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
192 uint32_t p_nxt_slot; /* The next slot outside the current range of
193 * slots that the hpts is running on. */
194 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
195 uint32_t p_lasttick; /* Last tick before the current one */
196 uint8_t p_direct_wake :1, /* boolean */
197 p_on_min_sleep:1, /* boolean */
198 p_hpts_wake_scheduled:1, /* boolean */
199 p_avail:5;
200 uint8_t p_fill[3]; /* Fill to 32 bits */
201 /* Cache line 0x40 */
202 struct hptsh {
203 TAILQ_HEAD(, inpcb) head;
204 uint32_t count;
205 uint32_t gencnt;
206 } *p_hptss; /* Hptsi wheel */
207 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
208 * of 255ms */
209 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
210 uint32_t saved_lasttick; /* for logging */
211 uint32_t saved_curtick; /* for logging */
212 uint32_t saved_curslot; /* for logging */
213 uint32_t saved_prev_slot; /* for logging */
214 uint32_t p_delayed_by; /* How much were we delayed by */
215 /* Cache line 0x80 */
216 struct sysctl_ctx_list hpts_ctx;
217 struct sysctl_oid *hpts_root;
218 struct intr_event *ie;
219 void *ie_cookie;
220 uint16_t p_num; /* The hpts number one per cpu */
221 uint16_t p_cpu; /* The hpts CPU */
222 /* There is extra space in here */
223 /* Cache line 0x100 */
224 struct callout co __aligned(CACHE_LINE_SIZE);
225 } __aligned(CACHE_LINE_SIZE);
226
227 static struct tcp_hptsi {
228 struct cpu_group **grps;
229 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
230 uint32_t *cts_last_ran;
231 uint32_t grp_cnt;
232 uint32_t rp_num_hptss; /* Number of hpts threads */
233 } tcp_pace;
234
235 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
236 #ifdef RSS
237 static int tcp_bind_threads = 1;
238 #else
239 static int tcp_bind_threads = 2;
240 #endif
241 static int tcp_use_irq_cpu = 0;
242 static uint32_t *cts_last_ran;
243 static int hpts_does_tp_logging = 0;
244
245 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
246 static void tcp_hpts_thread(void *ctx);
247 static void tcp_init_hptsi(void *st);
248
249 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
250 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
251 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
252 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
253
254
255 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
256 "TCP Hpts controls");
257 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
258 "TCP Hpts statistics");
259
260 #define timersub(tvp, uvp, vvp) \
261 do { \
262 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
263 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
264 if ((vvp)->tv_usec < 0) { \
265 (vvp)->tv_sec--; \
266 (vvp)->tv_usec += 1000000; \
267 } \
268 } while (0)
269
270 static int32_t tcp_hpts_precision = 120;
271
272 static struct hpts_domain_info {
273 int count;
274 int cpu[MAXCPU];
275 } hpts_domains[MAXMEMDOM];
276
277 enum {
278 IHPTS_NONE = 0,
279 IHPTS_ONQUEUE,
280 IHPTS_MOVING,
281 };
282
283 counter_u64_t hpts_hopelessly_behind;
284
285 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
286 &hpts_hopelessly_behind,
287 "Number of times hpts could not catch up and was behind hopelessly");
288
289 counter_u64_t hpts_loops;
290
291 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
292 &hpts_loops, "Number of times hpts had to loop to catch up");
293
294 counter_u64_t back_tosleep;
295
296 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
297 &back_tosleep, "Number of times hpts found no tcbs");
298
299 counter_u64_t combined_wheel_wrap;
300
301 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
302 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
303
304 counter_u64_t wheel_wrap;
305
306 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
307 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
308
309 counter_u64_t hpts_direct_call;
310 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
311 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
312
313 counter_u64_t hpts_wake_timeout;
314
315 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
316 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
317
318 counter_u64_t hpts_direct_awakening;
319
320 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
321 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
322
323 counter_u64_t hpts_back_tosleep;
324
325 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
326 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
327
328 counter_u64_t cpu_uses_flowid;
329 counter_u64_t cpu_uses_random;
330
331 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
332 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
333 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
334 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
335
336 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
337 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
338 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
339 &tcp_bind_threads, 2,
340 "Thread Binding tunable");
341 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
342 &tcp_use_irq_cpu, 0,
343 "Use of irq CPU tunable");
344 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
345 &tcp_hpts_precision, 120,
346 "Value for PRE() precision of callout");
347 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
348 &conn_cnt_thresh, 0,
349 "How many connections (below) make us use the callout based mechanism");
350 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
351 &hpts_does_tp_logging, 0,
352 "Do we add to any tp that has logging on pacer logs");
353 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
354 &dynamic_min_sleep, 250,
355 "What is the dynamic minsleep value?");
356 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
357 &dynamic_max_sleep, 5000,
358 "What is the dynamic maxsleep value?");
359
360 static int32_t max_pacer_loops = 10;
361 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
362 &max_pacer_loops, 10,
363 "What is the maximum number of times the pacer will loop trying to catch up");
364
365 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
366
367 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
368
369 static int
370 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
371 {
372 int error;
373 uint32_t new;
374
375 new = hpts_sleep_max;
376 error = sysctl_handle_int(oidp, &new, 0, req);
377 if (error == 0 && req->newptr) {
378 if ((new < (dynamic_min_sleep/HPTS_TICKS_PER_SLOT)) ||
379 (new > HPTS_MAX_SLEEP_ALLOWED))
380 error = EINVAL;
381 else
382 hpts_sleep_max = new;
383 }
384 return (error);
385 }
386
387 static int
388 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
389 {
390 int error;
391 uint32_t new;
392
393 new = tcp_min_hptsi_time;
394 error = sysctl_handle_int(oidp, &new, 0, req);
395 if (error == 0 && req->newptr) {
396 if (new < LOWEST_SLEEP_ALLOWED)
397 error = EINVAL;
398 else
399 tcp_min_hptsi_time = new;
400 }
401 return (error);
402 }
403
404 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
405 CTLTYPE_UINT | CTLFLAG_RW,
406 &hpts_sleep_max, 0,
407 &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
408 "Maximum time hpts will sleep in slots");
409
410 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
411 CTLTYPE_UINT | CTLFLAG_RW,
412 &tcp_min_hptsi_time, 0,
413 &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
414 "The minimum time the hpts must sleep before processing more slots");
415
416 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
417 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
418 static int tcp_hpts_no_wake_over_thresh = 1;
419
420 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
421 &ticks_indicate_more_sleep, 0,
422 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
423 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
424 &ticks_indicate_less_sleep, 0,
425 "If we process this many or more on a timeout, we need less sleep on the next callout");
426 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
427 &tcp_hpts_no_wake_over_thresh, 0,
428 "When we are over the threshold on the pacer do we prohibit wakeups?");
429
430 static void
431 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
432 int slots_to_run, int idx, int from_callout)
433 {
434 union tcp_log_stackspecific log;
435 /*
436 * Unused logs are
437 * 64 bit - delRate, rttProp, bw_inuse
438 * 16 bit - cwnd_gain
439 * 8 bit - bbr_state, bbr_substate, inhpts;
440 */
441 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
442 log.u_bbr.flex1 = hpts->p_nxt_slot;
443 log.u_bbr.flex2 = hpts->p_cur_slot;
444 log.u_bbr.flex3 = hpts->p_prev_slot;
445 log.u_bbr.flex4 = idx;
446 log.u_bbr.flex5 = hpts->p_curtick;
447 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
448 log.u_bbr.flex7 = hpts->p_cpu;
449 log.u_bbr.flex8 = (uint8_t)from_callout;
450 log.u_bbr.inflight = slots_to_run;
451 log.u_bbr.applimited = hpts->overidden_sleep;
452 log.u_bbr.delivered = hpts->saved_curtick;
453 log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
454 log.u_bbr.epoch = hpts->saved_curslot;
455 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
456 log.u_bbr.pkts_out = hpts->p_delayed_by;
457 log.u_bbr.lost = hpts->p_hpts_sleep_time;
458 log.u_bbr.pacing_gain = hpts->p_cpu;
459 log.u_bbr.pkt_epoch = hpts->p_runningslot;
460 log.u_bbr.use_lt_bw = 1;
461 TCP_LOG_EVENTP(tp, NULL,
462 &tptosocket(tp)->so_rcv,
463 &tptosocket(tp)->so_snd,
464 BBR_LOG_HPTSDIAG, 0,
465 0, &log, false, tv);
466 }
467
468 static void
469 tcp_wakehpts(struct tcp_hpts_entry *hpts)
470 {
471 HPTS_MTX_ASSERT(hpts);
472
473 if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
474 hpts->p_direct_wake = 0;
475 return;
476 }
477 if (hpts->p_hpts_wake_scheduled == 0) {
478 hpts->p_hpts_wake_scheduled = 1;
479 swi_sched(hpts->ie_cookie, 0);
480 }
481 }
482
483 static void
484 hpts_timeout_swi(void *arg)
485 {
486 struct tcp_hpts_entry *hpts;
487
488 hpts = (struct tcp_hpts_entry *)arg;
489 swi_sched(hpts->ie_cookie, 0);
490 }
491
492 static void
493 inp_hpts_insert(struct inpcb *inp, struct tcp_hpts_entry *hpts)
494 {
495 struct hptsh *hptsh;
496
497 INP_WLOCK_ASSERT(inp);
498 HPTS_MTX_ASSERT(hpts);
499 MPASS(hpts->p_cpu == inp->inp_hpts_cpu);
500 MPASS(!(inp->inp_flags & INP_DROPPED));
501
502 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
503
504 if (inp->inp_in_hpts == IHPTS_NONE) {
505 inp->inp_in_hpts = IHPTS_ONQUEUE;
506 in_pcbref(inp);
507 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
508 inp->inp_in_hpts = IHPTS_ONQUEUE;
509 } else
510 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
511 inp->inp_hpts_gencnt = hptsh->gencnt;
512
513 TAILQ_INSERT_TAIL(&hptsh->head, inp, inp_hpts);
514 hptsh->count++;
515 hpts->p_on_queue_cnt++;
516 }
517
518 static struct tcp_hpts_entry *
519 tcp_hpts_lock(struct inpcb *inp)
520 {
521 struct tcp_hpts_entry *hpts;
522
523 INP_LOCK_ASSERT(inp);
524
525 hpts = tcp_pace.rp_ent[inp->inp_hpts_cpu];
526 HPTS_LOCK(hpts);
527
528 return (hpts);
529 }
530
531 static void
532 inp_hpts_release(struct inpcb *inp)
533 {
534 bool released __diagused;
535
536 inp->inp_in_hpts = IHPTS_NONE;
537 released = in_pcbrele_wlocked(inp);
538 MPASS(released == false);
539 }
540
541 /*
542 * Called normally with the INP_LOCKED but it
543 * does not matter, the hpts lock is the key
544 * but the lock order allows us to hold the
545 * INP lock and then get the hpts lock.
546 */
547 void
548 tcp_hpts_remove(struct inpcb *inp)
549 {
550 struct tcp_hpts_entry *hpts;
551 struct hptsh *hptsh;
552
553 INP_WLOCK_ASSERT(inp);
554
555 hpts = tcp_hpts_lock(inp);
556 if (inp->inp_in_hpts == IHPTS_ONQUEUE) {
557 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
558 inp->inp_hpts_request = 0;
559 if (__predict_true(inp->inp_hpts_gencnt == hptsh->gencnt)) {
560 TAILQ_REMOVE(&hptsh->head, inp, inp_hpts);
561 MPASS(hptsh->count > 0);
562 hptsh->count--;
563 MPASS(hpts->p_on_queue_cnt > 0);
564 hpts->p_on_queue_cnt--;
565 inp_hpts_release(inp);
566 } else {
567 /*
568 * tcp_hptsi() now owns the TAILQ head of this inp.
569 * Can't TAILQ_REMOVE, just mark it.
570 */
571 #ifdef INVARIANTS
572 struct inpcb *tmp;
573
574 TAILQ_FOREACH(tmp, &hptsh->head, inp_hpts)
575 MPASS(tmp != inp);
576 #endif
577 inp->inp_in_hpts = IHPTS_MOVING;
578 inp->inp_hptsslot = -1;
579 }
580 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
581 /*
582 * Handle a special race condition:
583 * tcp_hptsi() moves inpcb to detached tailq
584 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
585 * tcp_hpts_insert() sets slot to a meaningful value
586 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
587 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
588 */
589 inp->inp_hptsslot = -1;
590 }
591 HPTS_UNLOCK(hpts);
592 }
593
594 bool
595 tcp_in_hpts(struct inpcb *inp)
596 {
597
598 return (inp->inp_in_hpts == IHPTS_ONQUEUE);
599 }
600
601 static inline int
602 hpts_slot(uint32_t wheel_slot, uint32_t plus)
603 {
604 /*
605 * Given a slot on the wheel, what slot
606 * is that plus ticks out?
607 */
608 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
609 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
610 }
611
612 static inline int
613 tick_to_wheel(uint32_t cts_in_wticks)
614 {
615 /*
616 * Given a timestamp in ticks (so by
617 * default to get it to a real time one
618 * would multiply by 10.. i.e the number
619 * of ticks in a slot) map it to our limited
620 * space wheel.
621 */
622 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
623 }
624
625 static inline int
626 hpts_slots_diff(int prev_slot, int slot_now)
627 {
628 /*
629 * Given two slots that are someplace
630 * on our wheel. How far are they apart?
631 */
632 if (slot_now > prev_slot)
633 return (slot_now - prev_slot);
634 else if (slot_now == prev_slot)
635 /*
636 * Special case, same means we can go all of our
637 * wheel less one slot.
638 */
639 return (NUM_OF_HPTSI_SLOTS - 1);
640 else
641 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
642 }
643
644 /*
645 * Given a slot on the wheel that is the current time
646 * mapped to the wheel (wheel_slot), what is the maximum
647 * distance forward that can be obtained without
648 * wrapping past either prev_slot or running_slot
649 * depending on the htps state? Also if passed
650 * a uint32_t *, fill it with the slot location.
651 *
652 * Note if you do not give this function the current
653 * time (that you think it is) mapped to the wheel slot
654 * then the results will not be what you expect and
655 * could lead to invalid inserts.
656 */
657 static inline int32_t
658 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
659 {
660 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
661
662 if ((hpts->p_hpts_active == 1) &&
663 (hpts->p_wheel_complete == 0)) {
664 end_slot = hpts->p_runningslot;
665 /* Back up one tick */
666 if (end_slot == 0)
667 end_slot = NUM_OF_HPTSI_SLOTS - 1;
668 else
669 end_slot--;
670 if (target_slot)
671 *target_slot = end_slot;
672 } else {
673 /*
674 * For the case where we are
675 * not active, or we have
676 * completed the pass over
677 * the wheel, we can use the
678 * prev tick and subtract one from it. This puts us
679 * as far out as possible on the wheel.
680 */
681 end_slot = hpts->p_prev_slot;
682 if (end_slot == 0)
683 end_slot = NUM_OF_HPTSI_SLOTS - 1;
684 else
685 end_slot--;
686 if (target_slot)
687 *target_slot = end_slot;
688 /*
689 * Now we have close to the full wheel left minus the
690 * time it has been since the pacer went to sleep. Note
691 * that wheel_tick, passed in, should be the current time
692 * from the perspective of the caller, mapped to the wheel.
693 */
694 if (hpts->p_prev_slot != wheel_slot)
695 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
696 else
697 dis_to_travel = 1;
698 /*
699 * dis_to_travel in this case is the space from when the
700 * pacer stopped (p_prev_slot) and where our wheel_slot
701 * is now. To know how many slots we can put it in we
702 * subtract from the wheel size. We would not want
703 * to place something after p_prev_slot or it will
704 * get ran too soon.
705 */
706 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
707 }
708 /*
709 * So how many slots are open between p_runningslot -> p_cur_slot
710 * that is what is currently un-available for insertion. Special
711 * case when we are at the last slot, this gets 1, so that
712 * the answer to how many slots are available is all but 1.
713 */
714 if (hpts->p_runningslot == hpts->p_cur_slot)
715 dis_to_travel = 1;
716 else
717 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
718 /*
719 * How long has the pacer been running?
720 */
721 if (hpts->p_cur_slot != wheel_slot) {
722 /* The pacer is a bit late */
723 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
724 } else {
725 /* The pacer is right on time, now == pacers start time */
726 pacer_to_now = 0;
727 }
728 /*
729 * To get the number left we can insert into we simply
730 * subtract the distance the pacer has to run from how
731 * many slots there are.
732 */
733 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
734 /*
735 * Now how many of those we will eat due to the pacer's
736 * time (p_cur_slot) of start being behind the
737 * real time (wheel_slot)?
738 */
739 if (avail_on_wheel <= pacer_to_now) {
740 /*
741 * Wheel wrap, we can't fit on the wheel, that
742 * is unusual the system must be way overloaded!
743 * Insert into the assured slot, and return special
744 * "".
745 */
746 counter_u64_add(combined_wheel_wrap, 1);
747 *target_slot = hpts->p_nxt_slot;
748 return (0);
749 } else {
750 /*
751 * We know how many slots are open
752 * on the wheel (the reverse of what
753 * is left to run. Take away the time
754 * the pacer started to now (wheel_slot)
755 * and that tells you how many slots are
756 * open that can be inserted into that won't
757 * be touched by the pacer until later.
758 */
759 return (avail_on_wheel - pacer_to_now);
760 }
761 }
762
763
764 #ifdef INVARIANTS
765 static void
766 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t inp_hptsslot, int line)
767 {
768 /*
769 * Sanity checks for the pacer with invariants
770 * on insert.
771 */
772 KASSERT(inp_hptsslot < NUM_OF_HPTSI_SLOTS,
773 ("hpts:%p inp:%p slot:%d > max",
774 hpts, inp, inp_hptsslot));
775 if ((hpts->p_hpts_active) &&
776 (hpts->p_wheel_complete == 0)) {
777 /*
778 * If the pacer is processing a arc
779 * of the wheel, we need to make
780 * sure we are not inserting within
781 * that arc.
782 */
783 int distance, yet_to_run;
784
785 distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
786 if (hpts->p_runningslot != hpts->p_cur_slot)
787 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
788 else
789 yet_to_run = 0; /* processing last slot */
790 KASSERT(yet_to_run <= distance,
791 ("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
792 hpts, inp, inp_hptsslot,
793 distance, yet_to_run,
794 hpts->p_runningslot, hpts->p_cur_slot));
795 }
796 }
797 #endif
798
799 uint32_t
800 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
801 {
802 struct tcp_hpts_entry *hpts;
803 struct timeval tv;
804 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
805 int32_t wheel_slot, maxslots;
806 bool need_wakeup = false;
807
808 INP_WLOCK_ASSERT(inp);
809 MPASS(!tcp_in_hpts(inp));
810 MPASS(!(inp->inp_flags & INP_DROPPED));
811
812 /*
813 * We now return the next-slot the hpts will be on, beyond its
814 * current run (if up) or where it was when it stopped if it is
815 * sleeping.
816 */
817 hpts = tcp_hpts_lock(inp);
818 microuptime(&tv);
819 if (diag) {
820 memset(diag, 0, sizeof(struct hpts_diag));
821 diag->p_hpts_active = hpts->p_hpts_active;
822 diag->p_prev_slot = hpts->p_prev_slot;
823 diag->p_runningslot = hpts->p_runningslot;
824 diag->p_nxt_slot = hpts->p_nxt_slot;
825 diag->p_cur_slot = hpts->p_cur_slot;
826 diag->p_curtick = hpts->p_curtick;
827 diag->p_lasttick = hpts->p_lasttick;
828 diag->slot_req = slot;
829 diag->p_on_min_sleep = hpts->p_on_min_sleep;
830 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
831 }
832 if (slot == 0) {
833 /* Ok we need to set it on the hpts in the current slot */
834 inp->inp_hpts_request = 0;
835 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
836 /*
837 * A sleeping hpts we want in next slot to run
838 * note that in this state p_prev_slot == p_cur_slot
839 */
840 inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
841 if ((hpts->p_on_min_sleep == 0) &&
842 (hpts->p_hpts_active == 0))
843 need_wakeup = true;
844 } else
845 inp->inp_hptsslot = hpts->p_runningslot;
846 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
847 inp_hpts_insert(inp, hpts);
848 if (need_wakeup) {
849 /*
850 * Activate the hpts if it is sleeping and its
851 * timeout is not 1.
852 */
853 hpts->p_direct_wake = 1;
854 tcp_wakehpts(hpts);
855 }
856 slot_on = hpts->p_nxt_slot;
857 HPTS_UNLOCK(hpts);
858
859 return (slot_on);
860 }
861 /* Get the current time relative to the wheel */
862 wheel_cts = tcp_tv_to_hptstick(&tv);
863 /* Map it onto the wheel */
864 wheel_slot = tick_to_wheel(wheel_cts);
865 /* Now what's the max we can place it at? */
866 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
867 if (diag) {
868 diag->wheel_slot = wheel_slot;
869 diag->maxslots = maxslots;
870 diag->wheel_cts = wheel_cts;
871 }
872 if (maxslots == 0) {
873 /* The pacer is in a wheel wrap behind, yikes! */
874 if (slot > 1) {
875 /*
876 * Reduce by 1 to prevent a forever loop in
877 * case something else is wrong. Note this
878 * probably does not hurt because the pacer
879 * if its true is so far behind we will be
880 * > 1second late calling anyway.
881 */
882 slot--;
883 }
884 inp->inp_hptsslot = last_slot;
885 inp->inp_hpts_request = slot;
886 } else if (maxslots >= slot) {
887 /* It all fits on the wheel */
888 inp->inp_hpts_request = 0;
889 inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
890 } else {
891 /* It does not fit */
892 inp->inp_hpts_request = slot - maxslots;
893 inp->inp_hptsslot = last_slot;
894 }
895 if (diag) {
896 diag->slot_remaining = inp->inp_hpts_request;
897 diag->inp_hptsslot = inp->inp_hptsslot;
898 }
899 #ifdef INVARIANTS
900 check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
901 #endif
902 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
903 inp_hpts_insert(inp, hpts);
904 if ((hpts->p_hpts_active == 0) &&
905 (inp->inp_hpts_request == 0) &&
906 (hpts->p_on_min_sleep == 0)) {
907 /*
908 * The hpts is sleeping and NOT on a minimum
909 * sleep time, we need to figure out where
910 * it will wake up at and if we need to reschedule
911 * its time-out.
912 */
913 uint32_t have_slept, yet_to_sleep;
914
915 /* Now do we need to restart the hpts's timer? */
916 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
917 if (have_slept < hpts->p_hpts_sleep_time)
918 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
919 else {
920 /* We are over-due */
921 yet_to_sleep = 0;
922 need_wakeup = 1;
923 }
924 if (diag) {
925 diag->have_slept = have_slept;
926 diag->yet_to_sleep = yet_to_sleep;
927 }
928 if (yet_to_sleep &&
929 (yet_to_sleep > slot)) {
930 /*
931 * We need to reschedule the hpts's time-out.
932 */
933 hpts->p_hpts_sleep_time = slot;
934 need_new_to = slot * HPTS_TICKS_PER_SLOT;
935 }
936 }
937 /*
938 * Now how far is the hpts sleeping to? if active is 1, its
939 * up and ticking we do nothing, otherwise we may need to
940 * reschedule its callout if need_new_to is set from above.
941 */
942 if (need_wakeup) {
943 hpts->p_direct_wake = 1;
944 tcp_wakehpts(hpts);
945 if (diag) {
946 diag->need_new_to = 0;
947 diag->co_ret = 0xffff0000;
948 }
949 } else if (need_new_to) {
950 int32_t co_ret;
951 struct timeval tv;
952 sbintime_t sb;
953
954 tv.tv_sec = 0;
955 tv.tv_usec = 0;
956 while (need_new_to > HPTS_USEC_IN_SEC) {
957 tv.tv_sec++;
958 need_new_to -= HPTS_USEC_IN_SEC;
959 }
960 tv.tv_usec = need_new_to;
961 sb = tvtosbt(tv);
962 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
963 hpts_timeout_swi, hpts, hpts->p_cpu,
964 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
965 if (diag) {
966 diag->need_new_to = need_new_to;
967 diag->co_ret = co_ret;
968 }
969 }
970 slot_on = hpts->p_nxt_slot;
971 HPTS_UNLOCK(hpts);
972
973 return (slot_on);
974 }
975
976 uint16_t
977 hpts_random_cpu(struct inpcb *inp){
978 /*
979 * No flow type set distribute the load randomly.
980 */
981 uint16_t cpuid;
982 uint32_t ran;
983
984 /*
985 * Shortcut if it is already set. XXXGL: does it happen?
986 */
987 if (inp->inp_hpts_cpu_set) {
988 return (inp->inp_hpts_cpu);
989 }
990 /* Nothing set use a random number */
991 ran = arc4random();
992 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
993 return (cpuid);
994 }
995
996 static uint16_t
997 hpts_cpuid(struct inpcb *inp, int *failed)
998 {
999 u_int cpuid;
1000 #ifdef NUMA
1001 struct hpts_domain_info *di;
1002 #endif
1003
1004 *failed = 0;
1005 if (inp->inp_hpts_cpu_set) {
1006 return (inp->inp_hpts_cpu);
1007 }
1008 /*
1009 * If we are using the irq cpu set by LRO or
1010 * the driver then it overrides all other domains.
1011 */
1012 if (tcp_use_irq_cpu) {
1013 if (inp->inp_irq_cpu_set == 0) {
1014 *failed = 1;
1015 return(0);
1016 }
1017 return(inp->inp_irq_cpu);
1018 }
1019 /* If one is set the other must be the same */
1020 #ifdef RSS
1021 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1022 if (cpuid == NETISR_CPUID_NONE)
1023 return (hpts_random_cpu(inp));
1024 else
1025 return (cpuid);
1026 #endif
1027 /*
1028 * We don't have a flowid -> cpuid mapping, so cheat and just map
1029 * unknown cpuids to curcpu. Not the best, but apparently better
1030 * than defaulting to swi 0.
1031 */
1032 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1033 counter_u64_add(cpu_uses_random, 1);
1034 return (hpts_random_cpu(inp));
1035 }
1036 /*
1037 * Hash to a thread based on the flowid. If we are using numa,
1038 * then restrict the hash to the numa domain where the inp lives.
1039 */
1040
1041 #ifdef NUMA
1042 if ((vm_ndomains == 1) ||
1043 (inp->inp_numa_domain == M_NODOM)) {
1044 #endif
1045 cpuid = inp->inp_flowid % mp_ncpus;
1046 #ifdef NUMA
1047 } else {
1048 /* Hash into the cpu's that use that domain */
1049 di = &hpts_domains[inp->inp_numa_domain];
1050 cpuid = di->cpu[inp->inp_flowid % di->count];
1051 }
1052 #endif
1053 counter_u64_add(cpu_uses_flowid, 1);
1054 return (cpuid);
1055 }
1056
1057 #ifdef not_longer_used_gleb
1058 static void
1059 tcp_drop_in_pkts(struct tcpcb *tp)
1060 {
1061 struct mbuf *m, *n;
1062
1063 m = tp->t_in_pkt;
1064 if (m)
1065 n = m->m_nextpkt;
1066 else
1067 n = NULL;
1068 tp->t_in_pkt = NULL;
1069 while (m) {
1070 m_freem(m);
1071 m = n;
1072 if (m)
1073 n = m->m_nextpkt;
1074 }
1075 }
1076 #endif
1077
1078 static void
1079 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1080 {
1081 uint32_t t = 0, i;
1082
1083 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1084 /*
1085 * Find next slot that is occupied and use that to
1086 * be the sleep time.
1087 */
1088 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1089 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1090 break;
1091 }
1092 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1093 }
1094 KASSERT((i != NUM_OF_HPTSI_SLOTS), ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1095 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1096 } else {
1097 /* No one on the wheel sleep for all but 400 slots or sleep max */
1098 hpts->p_hpts_sleep_time = hpts_sleep_max;
1099 }
1100 }
1101
1102 static int32_t
1103 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1104 {
1105 struct tcpcb *tp;
1106 struct inpcb *inp;
1107 struct timeval tv;
1108 int32_t slots_to_run, i, error;
1109 int32_t loop_cnt = 0;
1110 int32_t did_prefetch = 0;
1111 int32_t prefetch_ninp = 0;
1112 int32_t prefetch_tp = 0;
1113 int32_t wrap_loop_cnt = 0;
1114 int32_t slot_pos_of_endpoint = 0;
1115 int32_t orig_exit_slot;
1116 int8_t completed_measure = 0, seen_endpoint = 0;
1117
1118 HPTS_MTX_ASSERT(hpts);
1119 NET_EPOCH_ASSERT();
1120 /* record previous info for any logging */
1121 hpts->saved_lasttick = hpts->p_lasttick;
1122 hpts->saved_curtick = hpts->p_curtick;
1123 hpts->saved_curslot = hpts->p_cur_slot;
1124 hpts->saved_prev_slot = hpts->p_prev_slot;
1125
1126 hpts->p_lasttick = hpts->p_curtick;
1127 hpts->p_curtick = tcp_gethptstick(&tv);
1128 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1129 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1130 if ((hpts->p_on_queue_cnt == 0) ||
1131 (hpts->p_lasttick == hpts->p_curtick)) {
1132 /*
1133 * No time has yet passed,
1134 * or nothing to do.
1135 */
1136 hpts->p_prev_slot = hpts->p_cur_slot;
1137 hpts->p_lasttick = hpts->p_curtick;
1138 goto no_run;
1139 }
1140 again:
1141 hpts->p_wheel_complete = 0;
1142 HPTS_MTX_ASSERT(hpts);
1143 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1144 if (((hpts->p_curtick - hpts->p_lasttick) >
1145 ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1146 (hpts->p_on_queue_cnt != 0)) {
1147 /*
1148 * Wheel wrap is occuring, basically we
1149 * are behind and the distance between
1150 * run's has spread so much it has exceeded
1151 * the time on the wheel (1.024 seconds). This
1152 * is ugly and should NOT be happening. We
1153 * need to run the entire wheel. We last processed
1154 * p_prev_slot, so that needs to be the last slot
1155 * we run. The next slot after that should be our
1156 * reserved first slot for new, and then starts
1157 * the running position. Now the problem is the
1158 * reserved "not to yet" place does not exist
1159 * and there may be inp's in there that need
1160 * running. We can merge those into the
1161 * first slot at the head.
1162 */
1163 wrap_loop_cnt++;
1164 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1165 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1166 /*
1167 * Adjust p_cur_slot to be where we are starting from
1168 * hopefully we will catch up (fat chance if something
1169 * is broken this bad :( )
1170 */
1171 hpts->p_cur_slot = hpts->p_prev_slot;
1172 /*
1173 * The next slot has guys to run too, and that would
1174 * be where we would normally start, lets move them into
1175 * the next slot (p_prev_slot + 2) so that we will
1176 * run them, the extra 10usecs of late (by being
1177 * put behind) does not really matter in this situation.
1178 */
1179 TAILQ_FOREACH(inp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1180 inp_hpts) {
1181 MPASS(inp->inp_hptsslot == hpts->p_nxt_slot);
1182 MPASS(inp->inp_hpts_gencnt ==
1183 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1184 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1185
1186 /*
1187 * Update gencnt and nextslot accordingly to match
1188 * the new location. This is safe since it takes both
1189 * the INP lock and the pacer mutex to change the
1190 * inp_hptsslot and inp_hpts_gencnt.
1191 */
1192 inp->inp_hpts_gencnt =
1193 hpts->p_hptss[hpts->p_runningslot].gencnt;
1194 inp->inp_hptsslot = hpts->p_runningslot;
1195 }
1196 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1197 &hpts->p_hptss[hpts->p_nxt_slot].head, inp_hpts);
1198 hpts->p_hptss[hpts->p_runningslot].count +=
1199 hpts->p_hptss[hpts->p_nxt_slot].count;
1200 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1201 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1202 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1203 counter_u64_add(wheel_wrap, 1);
1204 } else {
1205 /*
1206 * Nxt slot is always one after p_runningslot though
1207 * its not used usually unless we are doing wheel wrap.
1208 */
1209 hpts->p_nxt_slot = hpts->p_prev_slot;
1210 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1211 }
1212 if (hpts->p_on_queue_cnt == 0) {
1213 goto no_one;
1214 }
1215 for (i = 0; i < slots_to_run; i++) {
1216 struct inpcb *inp, *ninp;
1217 TAILQ_HEAD(, inpcb) head = TAILQ_HEAD_INITIALIZER(head);
1218 struct hptsh *hptsh;
1219 uint32_t runningslot;
1220
1221 /*
1222 * Calculate our delay, if there are no extra ticks there
1223 * was not any (i.e. if slots_to_run == 1, no delay).
1224 */
1225 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1226 HPTS_TICKS_PER_SLOT;
1227
1228 runningslot = hpts->p_runningslot;
1229 hptsh = &hpts->p_hptss[runningslot];
1230 TAILQ_SWAP(&head, &hptsh->head, inpcb, inp_hpts);
1231 hpts->p_on_queue_cnt -= hptsh->count;
1232 hptsh->count = 0;
1233 hptsh->gencnt++;
1234
1235 HPTS_UNLOCK(hpts);
1236
1237 TAILQ_FOREACH_SAFE(inp, &head, inp_hpts, ninp) {
1238 bool set_cpu;
1239
1240 if (ninp != NULL) {
1241 /* We prefetch the next inp if possible */
1242 kern_prefetch(ninp, &prefetch_ninp);
1243 prefetch_ninp = 1;
1244 }
1245
1246 /* For debugging */
1247 if (seen_endpoint == 0) {
1248 seen_endpoint = 1;
1249 orig_exit_slot = slot_pos_of_endpoint =
1250 runningslot;
1251 } else if (completed_measure == 0) {
1252 /* Record the new position */
1253 orig_exit_slot = runningslot;
1254 }
1255
1256 INP_WLOCK(inp);
1257 if (inp->inp_hpts_cpu_set == 0) {
1258 set_cpu = true;
1259 } else {
1260 set_cpu = false;
1261 }
1262
1263 if (__predict_false(inp->inp_in_hpts == IHPTS_MOVING)) {
1264 if (inp->inp_hptsslot == -1) {
1265 inp->inp_in_hpts = IHPTS_NONE;
1266 if (in_pcbrele_wlocked(inp) == false)
1267 INP_WUNLOCK(inp);
1268 } else {
1269 HPTS_LOCK(hpts);
1270 inp_hpts_insert(inp, hpts);
1271 HPTS_UNLOCK(hpts);
1272 INP_WUNLOCK(inp);
1273 }
1274 continue;
1275 }
1276
1277 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1278 MPASS(!(inp->inp_flags & INP_DROPPED));
1279 KASSERT(runningslot == inp->inp_hptsslot,
1280 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1281 hpts, inp, runningslot, inp->inp_hptsslot));
1282
1283 if (inp->inp_hpts_request) {
1284 /*
1285 * This guy is deferred out further in time
1286 * then our wheel had available on it.
1287 * Push him back on the wheel or run it
1288 * depending.
1289 */
1290 uint32_t maxslots, last_slot, remaining_slots;
1291
1292 remaining_slots = slots_to_run - (i + 1);
1293 if (inp->inp_hpts_request > remaining_slots) {
1294 HPTS_LOCK(hpts);
1295 /*
1296 * How far out can we go?
1297 */
1298 maxslots = max_slots_available(hpts,
1299 hpts->p_cur_slot, &last_slot);
1300 if (maxslots >= inp->inp_hpts_request) {
1301 /* We can place it finally to
1302 * be processed. */
1303 inp->inp_hptsslot = hpts_slot(
1304 hpts->p_runningslot,
1305 inp->inp_hpts_request);
1306 inp->inp_hpts_request = 0;
1307 } else {
1308 /* Work off some more time */
1309 inp->inp_hptsslot = last_slot;
1310 inp->inp_hpts_request -=
1311 maxslots;
1312 }
1313 inp_hpts_insert(inp, hpts);
1314 HPTS_UNLOCK(hpts);
1315 INP_WUNLOCK(inp);
1316 continue;
1317 }
1318 inp->inp_hpts_request = 0;
1319 /* Fall through we will so do it now */
1320 }
1321
1322 inp_hpts_release(inp);
1323 tp = intotcpcb(inp);
1324 MPASS(tp);
1325 if (set_cpu) {
1326 /*
1327 * Setup so the next time we will move to
1328 * the right CPU. This should be a rare
1329 * event. It will sometimes happens when we
1330 * are the client side (usually not the
1331 * server). Somehow tcp_output() gets called
1332 * before the tcp_do_segment() sets the
1333 * intial state. This means the r_cpu and
1334 * r_hpts_cpu is 0. We get on the hpts, and
1335 * then tcp_input() gets called setting up
1336 * the r_cpu to the correct value. The hpts
1337 * goes off and sees the mis-match. We
1338 * simply correct it here and the CPU will
1339 * switch to the new hpts nextime the tcb
1340 * gets added to the hpts (not this one)
1341 * :-)
1342 */
1343 tcp_set_hpts(inp);
1344 }
1345 CURVNET_SET(inp->inp_vnet);
1346 /* Lets do any logging that we might want to */
1347 if (hpts_does_tp_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1348 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1349 }
1350
1351 if (tp->t_fb_ptr != NULL) {
1352 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1353 did_prefetch = 1;
1354 }
1355 if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) && tp->t_in_pkt) {
1356 error = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1357 if (error) {
1358 /* The input killed the connection */
1359 goto skip_pacing;
1360 }
1361 }
1362 inp->inp_hpts_calls = 1;
1363 error = tcp_output(tp);
1364 if (error < 0)
1365 goto skip_pacing;
1366 inp->inp_hpts_calls = 0;
1367 if (ninp) {
1368 /*
1369 * If we have a nxt inp, see if we can
1370 * prefetch it. Note this may seem
1371 * "risky" since we have no locks (other
1372 * than the previous inp) and there no
1373 * assurance that ninp was not pulled while
1374 * we were processing inp and freed. If this
1375 * occurred it could mean that either:
1376 *
1377 * a) Its NULL (which is fine we won't go
1378 * here) <or> b) Its valid (which is cool we
1379 * will prefetch it) <or> c) The inp got
1380 * freed back to the slab which was
1381 * reallocated. Then the piece of memory was
1382 * re-used and something else (not an
1383 * address) is in inp_ppcb. If that occurs
1384 * we don't crash, but take a TLB shootdown
1385 * performance hit (same as if it was NULL
1386 * and we tried to pre-fetch it).
1387 *
1388 * Considering that the likelyhood of <c> is
1389 * quite rare we will take a risk on doing
1390 * this. If performance drops after testing
1391 * we can always take this out. NB: the
1392 * kern_prefetch on amd64 actually has
1393 * protection against a bad address now via
1394 * the DMAP_() tests. This will prevent the
1395 * TLB hit, and instead if <c> occurs just
1396 * cause us to load cache with a useless
1397 * address (to us).
1398 *
1399 * XXXGL: with tcpcb == inpcb, I'm unsure this
1400 * prefetch is still correct and useful.
1401 */
1402 kern_prefetch(ninp, &prefetch_tp);
1403 prefetch_tp = 1;
1404 }
1405 INP_WUNLOCK(inp);
1406 skip_pacing:
1407 CURVNET_RESTORE();
1408 }
1409 if (seen_endpoint) {
1410 /*
1411 * We now have a accurate distance between
1412 * slot_pos_of_endpoint <-> orig_exit_slot
1413 * to tell us how late we were, orig_exit_slot
1414 * is where we calculated the end of our cycle to
1415 * be when we first entered.
1416 */
1417 completed_measure = 1;
1418 }
1419 HPTS_LOCK(hpts);
1420 hpts->p_runningslot++;
1421 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1422 hpts->p_runningslot = 0;
1423 }
1424 }
1425 no_one:
1426 HPTS_MTX_ASSERT(hpts);
1427 hpts->p_delayed_by = 0;
1428 /*
1429 * Check to see if we took an excess amount of time and need to run
1430 * more ticks (if we did not hit eno-bufs).
1431 */
1432 hpts->p_prev_slot = hpts->p_cur_slot;
1433 hpts->p_lasttick = hpts->p_curtick;
1434 if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1435 /*
1436 * Something is serious slow we have
1437 * looped through processing the wheel
1438 * and by the time we cleared the
1439 * needs to run max_pacer_loops time
1440 * we still needed to run. That means
1441 * the system is hopelessly behind and
1442 * can never catch up :(
1443 *
1444 * We will just lie to this thread
1445 * and let it thing p_curtick is
1446 * correct. When it next awakens
1447 * it will find itself further behind.
1448 */
1449 if (from_callout)
1450 counter_u64_add(hpts_hopelessly_behind, 1);
1451 goto no_run;
1452 }
1453 hpts->p_curtick = tcp_gethptstick(&tv);
1454 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1455 if (seen_endpoint == 0) {
1456 /* We saw no endpoint but we may be looping */
1457 orig_exit_slot = hpts->p_cur_slot;
1458 }
1459 if ((wrap_loop_cnt < 2) &&
1460 (hpts->p_lasttick != hpts->p_curtick)) {
1461 counter_u64_add(hpts_loops, 1);
1462 loop_cnt++;
1463 goto again;
1464 }
1465 no_run:
1466 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1467 /*
1468 * Set flag to tell that we are done for
1469 * any slot input that happens during
1470 * input.
1471 */
1472 hpts->p_wheel_complete = 1;
1473 /*
1474 * Now did we spend too long running input and need to run more ticks?
1475 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1476 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1477 * is greater than 2, then the condtion most likely are *not* true.
1478 * Also if we are called not from the callout, we don't run the wheel
1479 * multiple times so the slots may not align either.
1480 */
1481 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1482 (wrap_loop_cnt >= 2) || (from_callout == 0)),
1483 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1484 hpts->p_prev_slot, hpts->p_cur_slot));
1485 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1486 || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1487 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1488 hpts->p_lasttick, hpts->p_curtick));
1489 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1490 hpts->p_curtick = tcp_gethptstick(&tv);
1491 counter_u64_add(hpts_loops, 1);
1492 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1493 goto again;
1494 }
1495
1496 if (from_callout){
1497 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1498 }
1499 if (seen_endpoint)
1500 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1501 else
1502 return (0);
1503 }
1504
1505 void
1506 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1507 {
1508 struct tcp_hpts_entry *hpts;
1509 int failed;
1510
1511 INP_WLOCK_ASSERT(inp);
1512 hpts = tcp_hpts_lock(inp);
1513 if ((inp->inp_in_hpts == 0) &&
1514 (inp->inp_hpts_cpu_set == 0)) {
1515 inp->inp_hpts_cpu = hpts_cpuid(inp, &failed);
1516 if (failed == 0)
1517 inp->inp_hpts_cpu_set = 1;
1518 }
1519 mtx_unlock(&hpts->p_mtx);
1520 }
1521
1522 static void
1523 __tcp_run_hpts(struct tcp_hpts_entry *hpts)
1524 {
1525 int ticks_ran;
1526
1527 if (hpts->p_hpts_active) {
1528 /* Already active */
1529 return;
1530 }
1531 if (mtx_trylock(&hpts->p_mtx) == 0) {
1532 /* Someone else got the lock */
1533 return;
1534 }
1535 if (hpts->p_hpts_active)
1536 goto out_with_mtx;
1537 hpts->syscall_cnt++;
1538 counter_u64_add(hpts_direct_call, 1);
1539 hpts->p_hpts_active = 1;
1540 ticks_ran = tcp_hptsi(hpts, 0);
1541 /* We may want to adjust the sleep values here */
1542 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1543 if (ticks_ran > ticks_indicate_less_sleep) {
1544 struct timeval tv;
1545 sbintime_t sb;
1546
1547 hpts->p_mysleep.tv_usec /= 2;
1548 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1549 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1550 /* Reschedule with new to value */
1551 tcp_hpts_set_max_sleep(hpts, 0);
1552 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1553 /* Validate its in the right ranges */
1554 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1555 hpts->overidden_sleep = tv.tv_usec;
1556 tv.tv_usec = hpts->p_mysleep.tv_usec;
1557 } else if (tv.tv_usec > dynamic_max_sleep) {
1558 /* Lets not let sleep get above this value */
1559 hpts->overidden_sleep = tv.tv_usec;
1560 tv.tv_usec = dynamic_max_sleep;
1561 }
1562 /*
1563 * In this mode the timer is a backstop to
1564 * all the userret/lro_flushes so we use
1565 * the dynamic value and set the on_min_sleep
1566 * flag so we will not be awoken.
1567 */
1568 sb = tvtosbt(tv);
1569 /* Store off to make visible the actual sleep time */
1570 hpts->sleeping = tv.tv_usec;
1571 callout_reset_sbt_on(&hpts->co, sb, 0,
1572 hpts_timeout_swi, hpts, hpts->p_cpu,
1573 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1574 } else if (ticks_ran < ticks_indicate_more_sleep) {
1575 /* For the further sleep, don't reschedule hpts */
1576 hpts->p_mysleep.tv_usec *= 2;
1577 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1578 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1579 }
1580 hpts->p_on_min_sleep = 1;
1581 }
1582 hpts->p_hpts_active = 0;
1583 out_with_mtx:
1584 HPTS_MTX_ASSERT(hpts);
1585 mtx_unlock(&hpts->p_mtx);
1586 }
1587
1588 static struct tcp_hpts_entry *
1589 tcp_choose_hpts_to_run(void)
1590 {
1591 int i, oldest_idx, start, end;
1592 uint32_t cts, time_since_ran, calc;
1593
1594 cts = tcp_get_usecs(NULL);
1595 time_since_ran = 0;
1596 /* Default is all one group */
1597 start = 0;
1598 end = tcp_pace.rp_num_hptss;
1599 /*
1600 * If we have more than one L3 group figure out which one
1601 * this CPU is in.
1602 */
1603 if (tcp_pace.grp_cnt > 1) {
1604 for (i = 0; i < tcp_pace.grp_cnt; i++) {
1605 if (CPU_ISSET(curcpu, &tcp_pace.grps[i]->cg_mask)) {
1606 start = tcp_pace.grps[i]->cg_first;
1607 end = (tcp_pace.grps[i]->cg_last + 1);
1608 break;
1609 }
1610 }
1611 }
1612 oldest_idx = -1;
1613 for (i = start; i < end; i++) {
1614 if (TSTMP_GT(cts, cts_last_ran[i]))
1615 calc = cts - cts_last_ran[i];
1616 else
1617 calc = 0;
1618 if (calc > time_since_ran) {
1619 oldest_idx = i;
1620 time_since_ran = calc;
1621 }
1622 }
1623 if (oldest_idx >= 0)
1624 return(tcp_pace.rp_ent[oldest_idx]);
1625 else
1626 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1627 }
1628
1629
1630 void
1631 tcp_run_hpts(void)
1632 {
1633 static struct tcp_hpts_entry *hpts;
1634 struct epoch_tracker et;
1635
1636 NET_EPOCH_ENTER(et);
1637 hpts = tcp_choose_hpts_to_run();
1638 __tcp_run_hpts(hpts);
1639 NET_EPOCH_EXIT(et);
1640 }
1641
1642
1643 static void
1644 tcp_hpts_thread(void *ctx)
1645 {
1646 struct tcp_hpts_entry *hpts;
1647 struct epoch_tracker et;
1648 struct timeval tv;
1649 sbintime_t sb;
1650 int ticks_ran;
1651
1652 hpts = (struct tcp_hpts_entry *)ctx;
1653 mtx_lock(&hpts->p_mtx);
1654 if (hpts->p_direct_wake) {
1655 /* Signaled by input or output with low occupancy count. */
1656 callout_stop(&hpts->co);
1657 counter_u64_add(hpts_direct_awakening, 1);
1658 } else {
1659 /* Timed out, the normal case. */
1660 counter_u64_add(hpts_wake_timeout, 1);
1661 if (callout_pending(&hpts->co) ||
1662 !callout_active(&hpts->co)) {
1663 mtx_unlock(&hpts->p_mtx);
1664 return;
1665 }
1666 }
1667 callout_deactivate(&hpts->co);
1668 hpts->p_hpts_wake_scheduled = 0;
1669 NET_EPOCH_ENTER(et);
1670 if (hpts->p_hpts_active) {
1671 /*
1672 * We are active already. This means that a syscall
1673 * trap or LRO is running in behalf of hpts. In that case
1674 * we need to double our timeout since there seems to be
1675 * enough activity in the system that we don't need to
1676 * run as often (if we were not directly woken).
1677 */
1678 if (hpts->p_direct_wake == 0) {
1679 counter_u64_add(hpts_back_tosleep, 1);
1680 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1681 hpts->p_mysleep.tv_usec *= 2;
1682 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1683 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1684 tv.tv_usec = hpts->p_mysleep.tv_usec;
1685 hpts->p_on_min_sleep = 1;
1686 } else {
1687 /*
1688 * Here we have low count on the wheel, but
1689 * somehow we still collided with one of the
1690 * connections. Lets go back to sleep for a
1691 * min sleep time, but clear the flag so we
1692 * can be awoken by insert.
1693 */
1694 hpts->p_on_min_sleep = 0;
1695 tv.tv_usec = tcp_min_hptsi_time;
1696 }
1697 } else {
1698 /*
1699 * Directly woken most likely to reset the
1700 * callout time.
1701 */
1702 tv.tv_sec = 0;
1703 tv.tv_usec = hpts->p_mysleep.tv_usec;
1704 }
1705 goto back_to_sleep;
1706 }
1707 hpts->sleeping = 0;
1708 hpts->p_hpts_active = 1;
1709 ticks_ran = tcp_hptsi(hpts, 1);
1710 tv.tv_sec = 0;
1711 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1712 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1713 if(hpts->p_direct_wake == 0) {
1714 /*
1715 * Only adjust sleep time if we were
1716 * called from the callout i.e. direct_wake == 0.
1717 */
1718 if (ticks_ran < ticks_indicate_more_sleep) {
1719 hpts->p_mysleep.tv_usec *= 2;
1720 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1721 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1722 } else if (ticks_ran > ticks_indicate_less_sleep) {
1723 hpts->p_mysleep.tv_usec /= 2;
1724 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1725 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1726 }
1727 }
1728 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1729 hpts->overidden_sleep = tv.tv_usec;
1730 tv.tv_usec = hpts->p_mysleep.tv_usec;
1731 } else if (tv.tv_usec > dynamic_max_sleep) {
1732 /* Lets not let sleep get above this value */
1733 hpts->overidden_sleep = tv.tv_usec;
1734 tv.tv_usec = dynamic_max_sleep;
1735 }
1736 /*
1737 * In this mode the timer is a backstop to
1738 * all the userret/lro_flushes so we use
1739 * the dynamic value and set the on_min_sleep
1740 * flag so we will not be awoken.
1741 */
1742 hpts->p_on_min_sleep = 1;
1743 } else if (hpts->p_on_queue_cnt == 0) {
1744 /*
1745 * No one on the wheel, please wake us up
1746 * if you insert on the wheel.
1747 */
1748 hpts->p_on_min_sleep = 0;
1749 hpts->overidden_sleep = 0;
1750 } else {
1751 /*
1752 * We hit here when we have a low number of
1753 * clients on the wheel (our else clause).
1754 * We may need to go on min sleep, if we set
1755 * the flag we will not be awoken if someone
1756 * is inserted ahead of us. Clearing the flag
1757 * means we can be awoken. This is "old mode"
1758 * where the timer is what runs hpts mainly.
1759 */
1760 if (tv.tv_usec < tcp_min_hptsi_time) {
1761 /*
1762 * Yes on min sleep, which means
1763 * we cannot be awoken.
1764 */
1765 hpts->overidden_sleep = tv.tv_usec;
1766 tv.tv_usec = tcp_min_hptsi_time;
1767 hpts->p_on_min_sleep = 1;
1768 } else {
1769 /* Clear the min sleep flag */
1770 hpts->overidden_sleep = 0;
1771 hpts->p_on_min_sleep = 0;
1772 }
1773 }
1774 HPTS_MTX_ASSERT(hpts);
1775 hpts->p_hpts_active = 0;
1776 back_to_sleep:
1777 hpts->p_direct_wake = 0;
1778 sb = tvtosbt(tv);
1779 /* Store off to make visible the actual sleep time */
1780 hpts->sleeping = tv.tv_usec;
1781 callout_reset_sbt_on(&hpts->co, sb, 0,
1782 hpts_timeout_swi, hpts, hpts->p_cpu,
1783 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1784 NET_EPOCH_EXIT(et);
1785 mtx_unlock(&hpts->p_mtx);
1786 }
1787
1788 #undef timersub
1789
1790 static int32_t
1791 hpts_count_level(struct cpu_group *cg)
1792 {
1793 int32_t count_l3, i;
1794
1795 count_l3 = 0;
1796 if (cg->cg_level == CG_SHARE_L3)
1797 count_l3++;
1798 /* Walk all the children looking for L3 */
1799 for (i = 0; i < cg->cg_children; i++) {
1800 count_l3 += hpts_count_level(&cg->cg_child[i]);
1801 }
1802 return (count_l3);
1803 }
1804
1805 static void
1806 hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_group *cg)
1807 {
1808 int32_t idx, i;
1809
1810 idx = *at;
1811 if (cg->cg_level == CG_SHARE_L3) {
1812 grps[idx] = cg;
1813 idx++;
1814 if (idx == max) {
1815 *at = idx;
1816 return;
1817 }
1818 }
1819 *at = idx;
1820 /* Walk all the children looking for L3 */
1821 for (i = 0; i < cg->cg_children; i++) {
1822 hpts_gather_grps(grps, at, max, &cg->cg_child[i]);
1823 }
1824 }
1825
1826 static void
1827 tcp_init_hptsi(void *st)
1828 {
1829 struct cpu_group *cpu_top;
1830 int32_t error __diagused;
1831 int32_t i, j, bound = 0, created = 0;
1832 size_t sz, asz;
1833 struct timeval tv;
1834 sbintime_t sb;
1835 struct tcp_hpts_entry *hpts;
1836 struct pcpu *pc;
1837 char unit[16];
1838 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1839 int count, domain;
1840
1841 #ifdef SMP
1842 cpu_top = smp_topo();
1843 #else
1844 cpu_top = NULL;
1845 #endif
1846 tcp_pace.rp_num_hptss = ncpus;
1847 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1848 hpts_loops = counter_u64_alloc(M_WAITOK);
1849 back_tosleep = counter_u64_alloc(M_WAITOK);
1850 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1851 wheel_wrap = counter_u64_alloc(M_WAITOK);
1852 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1853 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1854 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1855 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1856 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1857 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1858
1859 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1860 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1861 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1862 cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1863 tcp_pace.grp_cnt = 0;
1864 if (cpu_top == NULL) {
1865 tcp_pace.grp_cnt = 1;
1866 } else {
1867 /* Find out how many cache level 3 domains we have */
1868 count = 0;
1869 tcp_pace.grp_cnt = hpts_count_level(cpu_top);
1870 if (tcp_pace.grp_cnt == 0) {
1871 tcp_pace.grp_cnt = 1;
1872 }
1873 sz = (tcp_pace.grp_cnt * sizeof(struct cpu_group *));
1874 tcp_pace.grps = malloc(sz, M_TCPHPTS, M_WAITOK);
1875 /* Now populate the groups */
1876 if (tcp_pace.grp_cnt == 1) {
1877 /*
1878 * All we need is the top level all cpu's are in
1879 * the same cache so when we use grp[0]->cg_mask
1880 * with the cg_first <-> cg_last it will include
1881 * all cpu's in it. The level here is probably
1882 * zero which is ok.
1883 */
1884 tcp_pace.grps[0] = cpu_top;
1885 } else {
1886 /*
1887 * Here we must find all the level three cache domains
1888 * and setup our pointers to them.
1889 */
1890 count = 0;
1891 hpts_gather_grps(tcp_pace.grps, &count, tcp_pace.grp_cnt, cpu_top);
1892 }
1893 }
1894 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1895 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1896 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1897 M_TCPHPTS, M_WAITOK | M_ZERO);
1898 tcp_pace.rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS, M_WAITOK);
1899 hpts = tcp_pace.rp_ent[i];
1900 /*
1901 * Init all the hpts structures that are not specifically
1902 * zero'd by the allocations. Also lets attach them to the
1903 * appropriate sysctl block as well.
1904 */
1905 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1906 "hpts", MTX_DEF | MTX_DUPOK);
1907 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1908 TAILQ_INIT(&hpts->p_hptss[j].head);
1909 hpts->p_hptss[j].count = 0;
1910 hpts->p_hptss[j].gencnt = 0;
1911 }
1912 sysctl_ctx_init(&hpts->hpts_ctx);
1913 sprintf(unit, "%d", i);
1914 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1915 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1916 OID_AUTO,
1917 unit,
1918 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1919 "");
1920 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1921 SYSCTL_CHILDREN(hpts->hpts_root),
1922 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1923 &hpts->p_on_queue_cnt, 0,
1924 "Count TCB's awaiting output processing");
1925 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1926 SYSCTL_CHILDREN(hpts->hpts_root),
1927 OID_AUTO, "active", CTLFLAG_RD,
1928 &hpts->p_hpts_active, 0,
1929 "Is the hpts active");
1930 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1931 SYSCTL_CHILDREN(hpts->hpts_root),
1932 OID_AUTO, "curslot", CTLFLAG_RD,
1933 &hpts->p_cur_slot, 0,
1934 "What the current running pacers goal");
1935 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1936 SYSCTL_CHILDREN(hpts->hpts_root),
1937 OID_AUTO, "runtick", CTLFLAG_RD,
1938 &hpts->p_runningslot, 0,
1939 "What the running pacers current slot is");
1940 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1941 SYSCTL_CHILDREN(hpts->hpts_root),
1942 OID_AUTO, "curtick", CTLFLAG_RD,
1943 &hpts->p_curtick, 0,
1944 "What the running pacers last tick mapped to the wheel was");
1945 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1946 SYSCTL_CHILDREN(hpts->hpts_root),
1947 OID_AUTO, "lastran", CTLFLAG_RD,
1948 &cts_last_ran[i], 0,
1949 "The last usec tick that this hpts ran");
1950 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1951 SYSCTL_CHILDREN(hpts->hpts_root),
1952 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1953 &hpts->p_mysleep.tv_usec,
1954 "What the running pacers is using for p_mysleep.tv_usec");
1955 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1956 SYSCTL_CHILDREN(hpts->hpts_root),
1957 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1958 &hpts->sleeping, 0,
1959 "What the running pacers is actually sleeping for");
1960 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1961 SYSCTL_CHILDREN(hpts->hpts_root),
1962 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1963 &hpts->syscall_cnt, 0,
1964 "How many times we had syscalls on this hpts");
1965
1966 hpts->p_hpts_sleep_time = hpts_sleep_max;
1967 hpts->p_num = i;
1968 hpts->p_curtick = tcp_gethptstick(&tv);
1969 cts_last_ran[i] = tcp_tv_to_usectick(&tv);
1970 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1971 hpts->p_cpu = 0xffff;
1972 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1973 callout_init(&hpts->co, 1);
1974 }
1975 /* Don't try to bind to NUMA domains if we don't have any */
1976 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1977 tcp_bind_threads = 0;
1978
1979 /*
1980 * Now lets start ithreads to handle the hptss.
1981 */
1982 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1983 hpts = tcp_pace.rp_ent[i];
1984 hpts->p_cpu = i;
1985
1986 error = swi_add(&hpts->ie, "hpts",
1987 tcp_hpts_thread, (void *)hpts,
1988 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1989 KASSERT(error == 0,
1990 ("Can't add hpts:%p i:%d err:%d",
1991 hpts, i, error));
1992 created++;
1993 hpts->p_mysleep.tv_sec = 0;
1994 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
1995 if (tcp_bind_threads == 1) {
1996 if (intr_event_bind(hpts->ie, i) == 0)
1997 bound++;
1998 } else if (tcp_bind_threads == 2) {
1999 /* Find the group for this CPU (i) and bind into it */
2000 for (j = 0; j < tcp_pace.grp_cnt; j++) {
2001 if (CPU_ISSET(i, &tcp_pace.grps[j]->cg_mask)) {
2002 if (intr_event_bind_ithread_cpuset(hpts->ie,
2003 &tcp_pace.grps[j]->cg_mask) == 0) {
2004 bound++;
2005 pc = pcpu_find(i);
2006 domain = pc->pc_domain;
2007 count = hpts_domains[domain].count;
2008 hpts_domains[domain].cpu[count] = i;
2009 hpts_domains[domain].count++;
2010 break;
2011 }
2012 }
2013 }
2014 }
2015 tv.tv_sec = 0;
2016 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2017 hpts->sleeping = tv.tv_usec;
2018 sb = tvtosbt(tv);
2019 callout_reset_sbt_on(&hpts->co, sb, 0,
2020 hpts_timeout_swi, hpts, hpts->p_cpu,
2021 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2022 }
2023 /*
2024 * If we somehow have an empty domain, fall back to choosing
2025 * among all htps threads.
2026 */
2027 for (i = 0; i < vm_ndomains; i++) {
2028 if (hpts_domains[i].count == 0) {
2029 tcp_bind_threads = 0;
2030 break;
2031 }
2032 }
2033 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2034 created, bound,
2035 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2036 #ifdef INVARIANTS
2037 printf("HPTS is in INVARIANT mode!!\n");
2038 #endif
2039 }
2040
2041 SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
2042 MODULE_VERSION(tcphpts, 1);
Cache object: 9be64c79d60ba333001bb34c55ee8456
|