FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_poll.c
1 /*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
3 *
4 * Supported by: the Xorp Project (www.xorp.org)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/socket.h> /* needed by net/if.h */
34 #include <sys/sysctl.h>
35
36 #include <i386/include/md_var.h> /* for vm_page_zero_idle() */
37 #include <net/if.h> /* for IFF_* flags */
38 #include <net/netisr.h> /* for NETISR_POLL */
39
40 #ifdef SMP
41 #include "opt_lint.h"
42 #ifndef COMPILING_LINT
43 #error DEVICE_POLLING is not compatible with SMP
44 #endif
45 #endif
46
47 static void netisr_poll(void); /* the two netisr handlers */
48 static void netisr_pollmore(void);
49
50 void init_device_poll(void); /* init routine */
51 void hardclock_device_poll(void); /* hook from hardclock */
52 void ether_poll(int); /* polling while in trap */
53 int idle_poll(void); /* poll while in idle loop */
54
55 /*
56 * Polling support for [network] device drivers.
57 *
58 * Drivers which support this feature try to register with the
59 * polling code.
60 *
61 * If registration is successful, the driver must disable interrupts,
62 * and further I/O is performed through the handler, which is invoked
63 * (at least once per clock tick) with 3 arguments: the "arg" passed at
64 * register time (a struct ifnet pointer), a command, and a "count" limit.
65 *
66 * The command can be one of the following:
67 * POLL_ONLY: quick move of "count" packets from input/output queues.
68 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do
69 * other more expensive operations. This command is issued periodically
70 * but less frequently than POLL_ONLY.
71 * POLL_DEREGISTER: deregister and return to interrupt mode.
72 *
73 * The first two commands are only issued if the interface is marked as
74 * 'IFF_UP and IFF_RUNNING', the last one only if IFF_RUNNING is set.
75 *
76 * The count limit specifies how much work the handler can do during the
77 * call -- typically this is the number of packets to be received, or
78 * transmitted, etc. (drivers are free to interpret this number, as long
79 * as the max time spent in the function grows roughly linearly with the
80 * count).
81 *
82 * Deregistration can be requested by the driver itself (typically in the
83 * *_stop() routine), or by the polling code, by invoking the handler.
84 *
85 * Polling can be globally enabled or disabled with the sysctl variable
86 * kern.polling.enable (default is 0, disabled)
87 *
88 * A second variable controls the sharing of CPU between polling/kernel
89 * network processing, and other activities (typically userlevel tasks):
90 * kern.polling.user_frac (between 0 and 100, default 50) sets the share
91 * of CPU allocated to user tasks. CPU is allocated proportionally to the
92 * shares, by dynamically adjusting the "count" (poll_burst).
93 *
94 * Other parameters can should be left to their default values.
95 * The following constraints hold
96 *
97 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max
98 * 0 <= poll_in_trap <= poll_each_burst
99 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
100 */
101
102 #define MIN_POLL_BURST_MAX 10
103 #define MAX_POLL_BURST_MAX 1000
104
105 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
106 "Device polling parameters");
107
108 static u_int32_t poll_burst = 5;
109 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RW,
110 &poll_burst, 0, "Current polling burst size");
111
112 static u_int32_t poll_each_burst = 5;
113 SYSCTL_UINT(_kern_polling, OID_AUTO, each_burst, CTLFLAG_RW,
114 &poll_each_burst, 0, "Max size of each burst");
115
116 static u_int32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */
117 SYSCTL_UINT(_kern_polling, OID_AUTO, burst_max, CTLFLAG_RW,
118 &poll_burst_max, 0, "Max Polling burst size");
119
120 static u_int32_t poll_in_idle_loop=1; /* do we poll in idle loop ? */
121 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
122 &poll_in_idle_loop, 0, "Enable device polling in idle loop");
123
124 u_int32_t poll_in_trap; /* used in trap.c */
125 SYSCTL_UINT(_kern_polling, OID_AUTO, poll_in_trap, CTLFLAG_RW,
126 &poll_in_trap, 0, "Poll burst size during a trap");
127
128 static u_int32_t user_frac = 50;
129 SYSCTL_UINT(_kern_polling, OID_AUTO, user_frac, CTLFLAG_RW,
130 &user_frac, 0, "Desired user fraction of cpu time");
131
132 static u_int32_t reg_frac = 20 ;
133 SYSCTL_UINT(_kern_polling, OID_AUTO, reg_frac, CTLFLAG_RW,
134 ®_frac, 0, "Every this many cycles poll register");
135
136 static u_int32_t short_ticks;
137 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RW,
138 &short_ticks, 0, "Hardclock ticks shorter than they should be");
139
140 static u_int32_t lost_polls;
141 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RW,
142 &lost_polls, 0, "How many times we would have lost a poll tick");
143
144 static u_int32_t pending_polls;
145 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RW,
146 &pending_polls, 0, "Do we need to poll again");
147
148 static int residual_burst = 0;
149 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RW,
150 &residual_burst, 0, "# of residual cycles in burst");
151
152 static u_int32_t poll_handlers; /* next free entry in pr[]. */
153 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
154 &poll_handlers, 0, "Number of registered poll handlers");
155
156 static int polling = 0; /* global polling enable */
157 SYSCTL_UINT(_kern_polling, OID_AUTO, enable, CTLFLAG_RW,
158 &polling, 0, "Polling enabled");
159
160 static u_int32_t phase;
161 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RW,
162 &phase, 0, "Polling phase");
163
164 static u_int32_t suspect;
165 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RW,
166 &suspect, 0, "suspect event");
167
168 static u_int32_t stalled;
169 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RW,
170 &stalled, 0, "potential stalls");
171
172
173 #define POLL_LIST_LEN 128
174 struct pollrec {
175 poll_handler_t *handler;
176 struct ifnet *ifp;
177 };
178
179 static struct pollrec pr[POLL_LIST_LEN];
180
181 /*
182 * register relevant netisr. Called from kern_clock.c:
183 */
184 void
185 init_device_poll(void)
186 {
187 register_netisr(NETISR_POLL, netisr_poll);
188 register_netisr(NETISR_POLLMORE, netisr_pollmore);
189 }
190
191 /*
192 * Hook from hardclock. Tries to schedule a netisr, but keeps track
193 * of lost ticks due to the previous handler taking too long.
194 * Normally, this should not happen, because polling handler should
195 * run for a short time. However, in some cases (e.g. when there are
196 * changes in link status etc.) the drivers take a very long time
197 * (even in the order of milliseconds) to reset and reconfigure the
198 * device, causing apparent lost polls.
199 *
200 * The first part of the code is just for debugging purposes, and tries
201 * to count how often hardclock ticks are shorter than they should,
202 * meaning either stray interrupts or delayed events.
203 */
204 void
205 hardclock_device_poll(void)
206 {
207 static struct timeval prev_t, t;
208 int delta;
209
210 if (poll_handlers == 0)
211 return;
212
213 microuptime(&t);
214 delta = (t.tv_usec - prev_t.tv_usec) +
215 (t.tv_sec - prev_t.tv_sec)*1000000;
216 if (delta * hz < 500000)
217 short_ticks++;
218 else
219 prev_t = t;
220
221 if (pending_polls > 100) {
222 /*
223 * Too much, assume it has stalled (not always true
224 * see comment above).
225 */
226 stalled++;
227 pending_polls = 0;
228 phase = 0;
229 }
230
231 if (phase <= 2) {
232 if (phase != 0)
233 suspect++;
234 phase = 1;
235 schednetisr(NETISR_POLL);
236 phase = 2;
237 }
238 if (pending_polls++ > 0)
239 lost_polls++;
240 }
241
242 /*
243 * ether_poll is called from the idle loop or from the trap handler.
244 */
245 void
246 ether_poll(int count)
247 {
248 int i;
249 int s = splimp();
250
251 if (count > poll_each_burst)
252 count = poll_each_burst;
253 for (i = 0 ; i < poll_handlers ; i++)
254 if (pr[i].handler && (IFF_UP|IFF_RUNNING) ==
255 (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) )
256 pr[i].handler(pr[i].ifp, 0, count); /* quick check */
257 splx(s);
258 }
259
260 /*
261 * idle_poll is replaces the body of the idle loop when DEVICE_POLLING
262 * is used.
263 */
264 int
265 idle_poll(void)
266 {
267 if (poll_in_idle_loop && poll_handlers > 0) {
268 int s = splimp();
269 enable_intr();
270 ether_poll(poll_each_burst);
271 disable_intr();
272 splx(s);
273 vm_page_zero_idle();
274 return 1;
275 } else
276 return vm_page_zero_idle();
277 }
278
279 /*
280 * netisr_pollmore is called after other netisr's, possibly scheduling
281 * another NETISR_POLL call, or adapting the burst size for the next cycle.
282 *
283 * It is very bad to fetch large bursts of packets from a single card at once,
284 * because the burst could take a long time to be completely processed, or
285 * could saturate the intermediate queue (ipintrq or similar) leading to
286 * losses or unfairness. To reduce the problem, and also to account better for
287 * time spent in network-related processing, we split the burst in smaller
288 * chunks of fixed size, giving control to the other netisr's between chunks.
289 * This helps in improving the fairness, reducing livelock (because we
290 * emulate more closely the "process to completion" that we have with
291 * fastforwarding) and accounting for the work performed in low level
292 * handling and forwarding.
293 */
294
295
296 static struct timeval poll_start_t;
297
298 static void
299 netisr_pollmore()
300 {
301 struct timeval t;
302 int kern_load;
303 int s = splhigh();
304
305 phase = 5;
306 if (residual_burst > 0) {
307 schednetisr(NETISR_POLL);
308 /* will run immediately on return, followed by netisrs */
309 splx(s);
310 return ;
311 }
312 /* here we can account time spent in netisr's in this tick */
313 microuptime(&t);
314 kern_load = (t.tv_usec - poll_start_t.tv_usec) +
315 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */
316 kern_load = (kern_load * hz) / 10000; /* 0..100 */
317 if (kern_load > (100 - user_frac)) { /* try decrease ticks */
318 if (poll_burst > 1)
319 poll_burst--;
320 } else {
321 if (poll_burst < poll_burst_max)
322 poll_burst++;
323 }
324
325 pending_polls--;
326 if (pending_polls == 0) /* we are done */
327 phase = 0;
328 else {
329 /*
330 * Last cycle was long and caused us to miss one or more
331 * hardclock ticks. Restart processing again, but slightly
332 * reduce the burst size to prevent that this happens again.
333 */
334 poll_burst -= (poll_burst / 8);
335 if (poll_burst < 1)
336 poll_burst = 1;
337 schednetisr(NETISR_POLL);
338 phase = 6;
339 }
340 splx(s);
341 }
342
343 /*
344 * netisr_poll is scheduled by schednetisr when appropriate, typically once
345 * per tick. It is called at splnet() so first thing to do is to upgrade to
346 * splimp(), and call all registered handlers.
347 */
348 static void
349 netisr_poll(void)
350 {
351 static int reg_frac_count;
352 int i, cycles;
353 enum poll_cmd arg = POLL_ONLY;
354 int s=splimp();
355
356 phase = 3;
357 if (residual_burst == 0) { /* first call in this tick */
358 microuptime(&poll_start_t);
359 /*
360 * Check that paremeters are consistent with runtime
361 * variables. Some of these tests could be done at sysctl
362 * time, but the savings would be very limited because we
363 * still have to check against reg_frac_count and
364 * poll_each_burst. So, instead of writing separate sysctl
365 * handlers, we do all here.
366 */
367
368 if (reg_frac > hz)
369 reg_frac = hz;
370 else if (reg_frac < 1)
371 reg_frac = 1;
372 if (reg_frac_count > reg_frac)
373 reg_frac_count = reg_frac - 1;
374 if (reg_frac_count-- == 0) {
375 arg = POLL_AND_CHECK_STATUS;
376 reg_frac_count = reg_frac - 1;
377 }
378 if (poll_burst_max < MIN_POLL_BURST_MAX)
379 poll_burst_max = MIN_POLL_BURST_MAX;
380 else if (poll_burst_max > MAX_POLL_BURST_MAX)
381 poll_burst_max = MAX_POLL_BURST_MAX;
382
383 if (poll_each_burst < 1)
384 poll_each_burst = 1;
385 else if (poll_each_burst > poll_burst_max)
386 poll_each_burst = poll_burst_max;
387
388 if (poll_burst > poll_burst_max)
389 poll_burst = poll_burst_max;
390 residual_burst = poll_burst;
391 }
392 cycles = (residual_burst < poll_each_burst) ?
393 residual_burst : poll_each_burst;
394 residual_burst -= cycles;
395
396 if (polling) {
397 for (i = 0 ; i < poll_handlers ; i++)
398 if (pr[i].handler && (IFF_UP|IFF_RUNNING) ==
399 (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) )
400 pr[i].handler(pr[i].ifp, arg, cycles);
401 } else { /* unregister */
402 for (i = 0 ; i < poll_handlers ; i++) {
403 if (pr[i].handler &&
404 pr[i].ifp->if_flags & IFF_RUNNING) {
405 pr[i].ifp->if_ipending &= ~IFF_POLLING;
406 pr[i].handler(pr[i].ifp, POLL_DEREGISTER, 1);
407 }
408 pr[i].handler=NULL;
409 }
410 residual_burst = 0;
411 poll_handlers = 0;
412 }
413 schednetisr(NETISR_POLLMORE);
414 phase = 4;
415 splx(s);
416 }
417
418 /*
419 * Try to register routine for polling. Returns 1 if successful
420 * (and polling should be enabled), 0 otherwise.
421 * A device is not supposed to register itself multiple times.
422 *
423 * This is called from within the *_intr() functions, so we do not need
424 * further locking.
425 */
426 int
427 ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
428 {
429 int s;
430
431 if (polling == 0) /* polling disabled, cannot register */
432 return 0;
433 if (h == NULL || ifp == NULL) /* bad arguments */
434 return 0;
435 if ( !(ifp->if_flags & IFF_UP) ) /* must be up */
436 return 0;
437 if (ifp->if_ipending & IFF_POLLING) /* already polling */
438 return 0;
439
440 s = splhigh();
441 if (poll_handlers >= POLL_LIST_LEN) {
442 /*
443 * List full, cannot register more entries.
444 * This should never happen; if it does, it is probably a
445 * broken driver trying to register multiple times. Checking
446 * this at runtime is expensive, and won't solve the problem
447 * anyways, so just report a few times and then give up.
448 */
449 static int verbose = 10 ;
450 splx(s);
451 if (verbose >0) {
452 printf("poll handlers list full, "
453 "maybe a broken driver ?\n");
454 verbose--;
455 }
456 return 0; /* no polling for you */
457 }
458
459 pr[poll_handlers].handler = h;
460 pr[poll_handlers].ifp = ifp;
461 poll_handlers++;
462 ifp->if_ipending |= IFF_POLLING;
463 splx(s);
464 return 1; /* polling enabled in next call */
465 }
466
467 /*
468 * Remove interface from the polling list. Normally called by *_stop().
469 * It is not an error to call it with IFF_POLLING clear, the call is
470 * sufficiently rare to be preferable to save the space for the extra
471 * test in each driver in exchange of one additional function call.
472 */
473 int
474 ether_poll_deregister(struct ifnet *ifp)
475 {
476 int i;
477 int s = splimp();
478
479 if ( !ifp || !(ifp->if_ipending & IFF_POLLING) ) {
480 splx(s);
481 return 0;
482 }
483 for (i = 0 ; i < poll_handlers ; i++)
484 if (pr[i].ifp == ifp) /* found it */
485 break;
486 ifp->if_ipending &= ~IFF_POLLING; /* found or not... */
487 if (i == poll_handlers) {
488 splx(s);
489 printf("ether_poll_deregister: ifp not found!!!\n");
490 return 0;
491 }
492 poll_handlers--;
493 if (i < poll_handlers) { /* Last entry replaces this one. */
494 pr[i].handler = pr[poll_handlers].handler;
495 pr[i].ifp = pr[poll_handlers].ifp;
496 }
497 splx(s);
498 return 1;
499 }
Cache object: fd16ab634ed12e29b2406c7237749e89
|