FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_poll.c
1 /*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
3 *
4 * Supported by: the Xorp Project (www.xorp.org)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/10.2/sys/kern/kern_poll.c 261276 2014-01-29 21:57:00Z brooks $");
30
31 #include "opt_device_polling.h"
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/proc.h>
38 #include <sys/eventhandler.h>
39 #include <sys/resourcevar.h>
40 #include <sys/socket.h> /* needed by net/if.h */
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 #include <sys/syslog.h>
44
45 #include <net/if.h> /* for IFF_* flags */
46 #include <net/netisr.h> /* for NETISR_POLL */
47 #include <net/vnet.h>
48
49 void hardclock_device_poll(void); /* hook from hardclock */
50
51 static struct mtx poll_mtx;
52
53 /*
54 * Polling support for [network] device drivers.
55 *
56 * Drivers which support this feature can register with the
57 * polling code.
58 *
59 * If registration is successful, the driver must disable interrupts,
60 * and further I/O is performed through the handler, which is invoked
61 * (at least once per clock tick) with 3 arguments: the "arg" passed at
62 * register time (a struct ifnet pointer), a command, and a "count" limit.
63 *
64 * The command can be one of the following:
65 * POLL_ONLY: quick move of "count" packets from input/output queues.
66 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do
67 * other more expensive operations. This command is issued periodically
68 * but less frequently than POLL_ONLY.
69 *
70 * The count limit specifies how much work the handler can do during the
71 * call -- typically this is the number of packets to be received, or
72 * transmitted, etc. (drivers are free to interpret this number, as long
73 * as the max time spent in the function grows roughly linearly with the
74 * count).
75 *
76 * Polling is enabled and disabled via setting IFCAP_POLLING flag on
77 * the interface. The driver ioctl handler should register interface
78 * with polling and disable interrupts, if registration was successful.
79 *
80 * A second variable controls the sharing of CPU between polling/kernel
81 * network processing, and other activities (typically userlevel tasks):
82 * kern.polling.user_frac (between 0 and 100, default 50) sets the share
83 * of CPU allocated to user tasks. CPU is allocated proportionally to the
84 * shares, by dynamically adjusting the "count" (poll_burst).
85 *
86 * Other parameters can should be left to their default values.
87 * The following constraints hold
88 *
89 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max
90 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
91 */
92
93 #define MIN_POLL_BURST_MAX 10
94 #define MAX_POLL_BURST_MAX 20000
95
96 static uint32_t poll_burst = 5;
97 static uint32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */
98 static uint32_t poll_each_burst = 5;
99
100 static SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
101 "Device polling parameters");
102
103 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
104 &poll_burst, 0, "Current polling burst size");
105
106 static int netisr_poll_scheduled;
107 static int netisr_pollmore_scheduled;
108 static int poll_shutting_down;
109
110 static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
111 {
112 uint32_t val = poll_burst_max;
113 int error;
114
115 error = sysctl_handle_int(oidp, &val, 0, req);
116 if (error || !req->newptr )
117 return (error);
118 if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
119 return (EINVAL);
120
121 mtx_lock(&poll_mtx);
122 poll_burst_max = val;
123 if (poll_burst > poll_burst_max)
124 poll_burst = poll_burst_max;
125 if (poll_each_burst > poll_burst_max)
126 poll_each_burst = MIN_POLL_BURST_MAX;
127 mtx_unlock(&poll_mtx);
128
129 return (0);
130 }
131 SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW,
132 0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size");
133
134 static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
135 {
136 uint32_t val = poll_each_burst;
137 int error;
138
139 error = sysctl_handle_int(oidp, &val, 0, req);
140 if (error || !req->newptr )
141 return (error);
142 if (val < 1)
143 return (EINVAL);
144
145 mtx_lock(&poll_mtx);
146 if (val > poll_burst_max) {
147 mtx_unlock(&poll_mtx);
148 return (EINVAL);
149 }
150 poll_each_burst = val;
151 mtx_unlock(&poll_mtx);
152
153 return (0);
154 }
155 SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW,
156 0, sizeof(uint32_t), poll_each_burst_sysctl, "I",
157 "Max size of each burst");
158
159 static uint32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */
160 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
161 &poll_in_idle_loop, 0, "Enable device polling in idle loop");
162
163 static uint32_t user_frac = 50;
164 static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
165 {
166 uint32_t val = user_frac;
167 int error;
168
169 error = sysctl_handle_int(oidp, &val, 0, req);
170 if (error || !req->newptr )
171 return (error);
172 if (val > 99)
173 return (EINVAL);
174
175 mtx_lock(&poll_mtx);
176 user_frac = val;
177 mtx_unlock(&poll_mtx);
178
179 return (0);
180 }
181 SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW,
182 0, sizeof(uint32_t), user_frac_sysctl, "I",
183 "Desired user fraction of cpu time");
184
185 static uint32_t reg_frac_count = 0;
186 static uint32_t reg_frac = 20 ;
187 static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
188 {
189 uint32_t val = reg_frac;
190 int error;
191
192 error = sysctl_handle_int(oidp, &val, 0, req);
193 if (error || !req->newptr )
194 return (error);
195 if (val < 1 || val > hz)
196 return (EINVAL);
197
198 mtx_lock(&poll_mtx);
199 reg_frac = val;
200 if (reg_frac_count >= reg_frac)
201 reg_frac_count = 0;
202 mtx_unlock(&poll_mtx);
203
204 return (0);
205 }
206 SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW,
207 0, sizeof(uint32_t), reg_frac_sysctl, "I",
208 "Every this many cycles check registers");
209
210 static uint32_t short_ticks;
211 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
212 &short_ticks, 0, "Hardclock ticks shorter than they should be");
213
214 static uint32_t lost_polls;
215 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
216 &lost_polls, 0, "How many times we would have lost a poll tick");
217
218 static uint32_t pending_polls;
219 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
220 &pending_polls, 0, "Do we need to poll again");
221
222 static int residual_burst = 0;
223 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
224 &residual_burst, 0, "# of residual cycles in burst");
225
226 static uint32_t poll_handlers; /* next free entry in pr[]. */
227 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
228 &poll_handlers, 0, "Number of registered poll handlers");
229
230 static uint32_t phase;
231 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
232 &phase, 0, "Polling phase");
233
234 static uint32_t suspect;
235 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
236 &suspect, 0, "suspect event");
237
238 static uint32_t stalled;
239 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
240 &stalled, 0, "potential stalls");
241
242 static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
243 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
244 &idlepoll_sleeping, 0, "idlepoll is sleeping");
245
246
247 #define POLL_LIST_LEN 128
248 struct pollrec {
249 poll_handler_t *handler;
250 struct ifnet *ifp;
251 };
252
253 static struct pollrec pr[POLL_LIST_LEN];
254
255 static void
256 poll_shutdown(void *arg, int howto)
257 {
258
259 poll_shutting_down = 1;
260 }
261
262 static void
263 init_device_poll(void)
264 {
265
266 mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
267 EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL,
268 SHUTDOWN_PRI_LAST);
269 }
270 SYSINIT(device_poll, SI_SUB_SOFTINTR, SI_ORDER_MIDDLE, init_device_poll, NULL);
271
272
273 /*
274 * Hook from hardclock. Tries to schedule a netisr, but keeps track
275 * of lost ticks due to the previous handler taking too long.
276 * Normally, this should not happen, because polling handler should
277 * run for a short time. However, in some cases (e.g. when there are
278 * changes in link status etc.) the drivers take a very long time
279 * (even in the order of milliseconds) to reset and reconfigure the
280 * device, causing apparent lost polls.
281 *
282 * The first part of the code is just for debugging purposes, and tries
283 * to count how often hardclock ticks are shorter than they should,
284 * meaning either stray interrupts or delayed events.
285 */
286 void
287 hardclock_device_poll(void)
288 {
289 static struct timeval prev_t, t;
290 int delta;
291
292 if (poll_handlers == 0 || poll_shutting_down)
293 return;
294
295 microuptime(&t);
296 delta = (t.tv_usec - prev_t.tv_usec) +
297 (t.tv_sec - prev_t.tv_sec)*1000000;
298 if (delta * hz < 500000)
299 short_ticks++;
300 else
301 prev_t = t;
302
303 if (pending_polls > 100) {
304 /*
305 * Too much, assume it has stalled (not always true
306 * see comment above).
307 */
308 stalled++;
309 pending_polls = 0;
310 phase = 0;
311 }
312
313 if (phase <= 2) {
314 if (phase != 0)
315 suspect++;
316 phase = 1;
317 netisr_poll_scheduled = 1;
318 netisr_pollmore_scheduled = 1;
319 netisr_sched_poll();
320 phase = 2;
321 }
322 if (pending_polls++ > 0)
323 lost_polls++;
324 }
325
326 /*
327 * ether_poll is called from the idle loop.
328 */
329 static void
330 ether_poll(int count)
331 {
332 int i;
333
334 mtx_lock(&poll_mtx);
335
336 if (count > poll_each_burst)
337 count = poll_each_burst;
338
339 for (i = 0 ; i < poll_handlers ; i++)
340 pr[i].handler(pr[i].ifp, POLL_ONLY, count);
341
342 mtx_unlock(&poll_mtx);
343 }
344
345 /*
346 * netisr_pollmore is called after other netisr's, possibly scheduling
347 * another NETISR_POLL call, or adapting the burst size for the next cycle.
348 *
349 * It is very bad to fetch large bursts of packets from a single card at once,
350 * because the burst could take a long time to be completely processed, or
351 * could saturate the intermediate queue (ipintrq or similar) leading to
352 * losses or unfairness. To reduce the problem, and also to account better for
353 * time spent in network-related processing, we split the burst in smaller
354 * chunks of fixed size, giving control to the other netisr's between chunks.
355 * This helps in improving the fairness, reducing livelock (because we
356 * emulate more closely the "process to completion" that we have with
357 * fastforwarding) and accounting for the work performed in low level
358 * handling and forwarding.
359 */
360
361 static struct timeval poll_start_t;
362
363 void
364 netisr_pollmore()
365 {
366 struct timeval t;
367 int kern_load;
368
369 mtx_lock(&poll_mtx);
370 if (!netisr_pollmore_scheduled) {
371 mtx_unlock(&poll_mtx);
372 return;
373 }
374 netisr_pollmore_scheduled = 0;
375 phase = 5;
376 if (residual_burst > 0) {
377 netisr_poll_scheduled = 1;
378 netisr_pollmore_scheduled = 1;
379 netisr_sched_poll();
380 mtx_unlock(&poll_mtx);
381 /* will run immediately on return, followed by netisrs */
382 return;
383 }
384 /* here we can account time spent in netisr's in this tick */
385 microuptime(&t);
386 kern_load = (t.tv_usec - poll_start_t.tv_usec) +
387 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */
388 kern_load = (kern_load * hz) / 10000; /* 0..100 */
389 if (kern_load > (100 - user_frac)) { /* try decrease ticks */
390 if (poll_burst > 1)
391 poll_burst--;
392 } else {
393 if (poll_burst < poll_burst_max)
394 poll_burst++;
395 }
396
397 pending_polls--;
398 if (pending_polls == 0) /* we are done */
399 phase = 0;
400 else {
401 /*
402 * Last cycle was long and caused us to miss one or more
403 * hardclock ticks. Restart processing again, but slightly
404 * reduce the burst size to prevent that this happens again.
405 */
406 poll_burst -= (poll_burst / 8);
407 if (poll_burst < 1)
408 poll_burst = 1;
409 netisr_poll_scheduled = 1;
410 netisr_pollmore_scheduled = 1;
411 netisr_sched_poll();
412 phase = 6;
413 }
414 mtx_unlock(&poll_mtx);
415 }
416
417 /*
418 * netisr_poll is typically scheduled once per tick.
419 */
420 void
421 netisr_poll(void)
422 {
423 int i, cycles;
424 enum poll_cmd arg = POLL_ONLY;
425
426 mtx_lock(&poll_mtx);
427 if (!netisr_poll_scheduled) {
428 mtx_unlock(&poll_mtx);
429 return;
430 }
431 netisr_poll_scheduled = 0;
432 phase = 3;
433 if (residual_burst == 0) { /* first call in this tick */
434 microuptime(&poll_start_t);
435 if (++reg_frac_count == reg_frac) {
436 arg = POLL_AND_CHECK_STATUS;
437 reg_frac_count = 0;
438 }
439
440 residual_burst = poll_burst;
441 }
442 cycles = (residual_burst < poll_each_burst) ?
443 residual_burst : poll_each_burst;
444 residual_burst -= cycles;
445
446 for (i = 0 ; i < poll_handlers ; i++)
447 pr[i].handler(pr[i].ifp, arg, cycles);
448
449 phase = 4;
450 mtx_unlock(&poll_mtx);
451 }
452
453 /*
454 * Try to register routine for polling. Returns 0 if successful
455 * (and polling should be enabled), error code otherwise.
456 * A device is not supposed to register itself multiple times.
457 *
458 * This is called from within the *_ioctl() functions.
459 */
460 int
461 ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
462 {
463 int i;
464
465 KASSERT(h != NULL, ("%s: handler is NULL", __func__));
466 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
467
468 mtx_lock(&poll_mtx);
469 if (poll_handlers >= POLL_LIST_LEN) {
470 /*
471 * List full, cannot register more entries.
472 * This should never happen; if it does, it is probably a
473 * broken driver trying to register multiple times. Checking
474 * this at runtime is expensive, and won't solve the problem
475 * anyways, so just report a few times and then give up.
476 */
477 static int verbose = 10 ;
478 if (verbose >0) {
479 log(LOG_ERR, "poll handlers list full, "
480 "maybe a broken driver ?\n");
481 verbose--;
482 }
483 mtx_unlock(&poll_mtx);
484 return (ENOMEM); /* no polling for you */
485 }
486
487 for (i = 0 ; i < poll_handlers ; i++)
488 if (pr[i].ifp == ifp && pr[i].handler != NULL) {
489 mtx_unlock(&poll_mtx);
490 log(LOG_DEBUG, "ether_poll_register: %s: handler"
491 " already registered\n", ifp->if_xname);
492 return (EEXIST);
493 }
494
495 pr[poll_handlers].handler = h;
496 pr[poll_handlers].ifp = ifp;
497 poll_handlers++;
498 mtx_unlock(&poll_mtx);
499 if (idlepoll_sleeping)
500 wakeup(&idlepoll_sleeping);
501 return (0);
502 }
503
504 /*
505 * Remove interface from the polling list. Called from *_ioctl(), too.
506 */
507 int
508 ether_poll_deregister(struct ifnet *ifp)
509 {
510 int i;
511
512 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
513
514 mtx_lock(&poll_mtx);
515
516 for (i = 0 ; i < poll_handlers ; i++)
517 if (pr[i].ifp == ifp) /* found it */
518 break;
519 if (i == poll_handlers) {
520 log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
521 ifp->if_xname);
522 mtx_unlock(&poll_mtx);
523 return (ENOENT);
524 }
525 poll_handlers--;
526 if (i < poll_handlers) { /* Last entry replaces this one. */
527 pr[i].handler = pr[poll_handlers].handler;
528 pr[i].ifp = pr[poll_handlers].ifp;
529 }
530 mtx_unlock(&poll_mtx);
531 return (0);
532 }
533
534 static void
535 poll_idle(void)
536 {
537 struct thread *td = curthread;
538 struct rtprio rtp;
539
540 rtp.prio = RTP_PRIO_MAX; /* lowest priority */
541 rtp.type = RTP_PRIO_IDLE;
542 PROC_SLOCK(td->td_proc);
543 rtp_to_pri(&rtp, td);
544 PROC_SUNLOCK(td->td_proc);
545
546 for (;;) {
547 if (poll_in_idle_loop && poll_handlers > 0) {
548 idlepoll_sleeping = 0;
549 ether_poll(poll_each_burst);
550 thread_lock(td);
551 mi_switch(SW_VOL, NULL);
552 thread_unlock(td);
553 } else {
554 idlepoll_sleeping = 1;
555 tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
556 }
557 }
558 }
559
560 static struct proc *idlepoll;
561 static struct kproc_desc idlepoll_kp = {
562 "idlepoll",
563 poll_idle,
564 &idlepoll
565 };
566 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start,
567 &idlepoll_kp);
Cache object: cea5194992cd3d05e005fbc3c324187d
|