FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
4 * Copyright (c) 2009 Apple, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/8.4/sys/kern/kern_event.c 239916 2012-08-30 18:30:08Z jhb $");
31
32 #include "opt_ktrace.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/unistd.h>
42 #include <sys/file.h>
43 #include <sys/filedesc.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/kthread.h>
47 #include <sys/selinfo.h>
48 #include <sys/queue.h>
49 #include <sys/event.h>
50 #include <sys/eventvar.h>
51 #include <sys/poll.h>
52 #include <sys/protosw.h>
53 #include <sys/sigio.h>
54 #include <sys/signalvar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/stat.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
60 #include <sys/syscallsubr.h>
61 #include <sys/taskqueue.h>
62 #include <sys/uio.h>
63 #ifdef KTRACE
64 #include <sys/ktrace.h>
65 #endif
66
67 #include <vm/uma.h>
68
69 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
70
71 /*
72 * This lock is used if multiple kq locks are required. This possibly
73 * should be made into a per proc lock.
74 */
75 static struct mtx kq_global;
76 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
77 #define KQ_GLOBAL_LOCK(lck, haslck) do { \
78 if (!haslck) \
79 mtx_lock(lck); \
80 haslck = 1; \
81 } while (0)
82 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \
83 if (haslck) \
84 mtx_unlock(lck); \
85 haslck = 0; \
86 } while (0)
87
88 TASKQUEUE_DEFINE_THREAD(kqueue);
89
90 static int kevent_copyout(void *arg, struct kevent *kevp, int count);
91 static int kevent_copyin(void *arg, struct kevent *kevp, int count);
92 static int kqueue_register(struct kqueue *kq, struct kevent *kev,
93 struct thread *td, int waitok);
94 static int kqueue_acquire(struct file *fp, struct kqueue **kqp);
95 static void kqueue_release(struct kqueue *kq, int locked);
96 static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
97 uintptr_t ident, int waitok);
98 static void kqueue_task(void *arg, int pending);
99 static int kqueue_scan(struct kqueue *kq, int maxevents,
100 struct kevent_copyops *k_ops,
101 const struct timespec *timeout,
102 struct kevent *keva, struct thread *td);
103 static void kqueue_wakeup(struct kqueue *kq);
104 static struct filterops *kqueue_fo_find(int filt);
105 static void kqueue_fo_release(int filt);
106
107 static fo_rdwr_t kqueue_read;
108 static fo_rdwr_t kqueue_write;
109 static fo_truncate_t kqueue_truncate;
110 static fo_ioctl_t kqueue_ioctl;
111 static fo_poll_t kqueue_poll;
112 static fo_kqfilter_t kqueue_kqfilter;
113 static fo_stat_t kqueue_stat;
114 static fo_close_t kqueue_close;
115
116 static struct fileops kqueueops = {
117 .fo_read = kqueue_read,
118 .fo_write = kqueue_write,
119 .fo_truncate = kqueue_truncate,
120 .fo_ioctl = kqueue_ioctl,
121 .fo_poll = kqueue_poll,
122 .fo_kqfilter = kqueue_kqfilter,
123 .fo_stat = kqueue_stat,
124 .fo_close = kqueue_close,
125 };
126
127 static int knote_attach(struct knote *kn, struct kqueue *kq);
128 static void knote_drop(struct knote *kn, struct thread *td);
129 static void knote_enqueue(struct knote *kn);
130 static void knote_dequeue(struct knote *kn);
131 static void knote_init(void);
132 static struct knote *knote_alloc(int waitok);
133 static void knote_free(struct knote *kn);
134
135 static void filt_kqdetach(struct knote *kn);
136 static int filt_kqueue(struct knote *kn, long hint);
137 static int filt_procattach(struct knote *kn);
138 static void filt_procdetach(struct knote *kn);
139 static int filt_proc(struct knote *kn, long hint);
140 static int filt_fileattach(struct knote *kn);
141 static void filt_timerexpire(void *knx);
142 static int filt_timerattach(struct knote *kn);
143 static void filt_timerdetach(struct knote *kn);
144 static int filt_timer(struct knote *kn, long hint);
145 static int filt_userattach(struct knote *kn);
146 static void filt_userdetach(struct knote *kn);
147 static int filt_user(struct knote *kn, long hint);
148 static void filt_usertouch(struct knote *kn, struct kevent *kev,
149 u_long type);
150
151 static struct filterops file_filtops =
152 { 1, filt_fileattach, NULL, NULL };
153 static struct filterops kqread_filtops =
154 { 1, NULL, filt_kqdetach, filt_kqueue };
155 /* XXX - move to kern_proc.c? */
156 static struct filterops proc_filtops =
157 { 0, filt_procattach, filt_procdetach, filt_proc };
158 static struct filterops timer_filtops =
159 { 0, filt_timerattach, filt_timerdetach, filt_timer };
160 static struct filterops user_filtops = {
161 .f_attach = filt_userattach,
162 .f_detach = filt_userdetach,
163 .f_event = filt_user,
164 .f_touch = filt_usertouch,
165 };
166
167 static uma_zone_t knote_zone;
168 static int kq_ncallouts = 0;
169 static int kq_calloutmax = (4 * 1024);
170 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
171 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
172
173 /* XXX - ensure not KN_INFLUX?? */
174 #define KNOTE_ACTIVATE(kn, islock) do { \
175 if ((islock)) \
176 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \
177 else \
178 KQ_LOCK((kn)->kn_kq); \
179 (kn)->kn_status |= KN_ACTIVE; \
180 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
181 knote_enqueue((kn)); \
182 if (!(islock)) \
183 KQ_UNLOCK((kn)->kn_kq); \
184 } while(0)
185 #define KQ_LOCK(kq) do { \
186 mtx_lock(&(kq)->kq_lock); \
187 } while (0)
188 #define KQ_FLUX_WAKEUP(kq) do { \
189 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \
190 (kq)->kq_state &= ~KQ_FLUXWAIT; \
191 wakeup((kq)); \
192 } \
193 } while (0)
194 #define KQ_UNLOCK_FLUX(kq) do { \
195 KQ_FLUX_WAKEUP(kq); \
196 mtx_unlock(&(kq)->kq_lock); \
197 } while (0)
198 #define KQ_UNLOCK(kq) do { \
199 mtx_unlock(&(kq)->kq_lock); \
200 } while (0)
201 #define KQ_OWNED(kq) do { \
202 mtx_assert(&(kq)->kq_lock, MA_OWNED); \
203 } while (0)
204 #define KQ_NOTOWNED(kq) do { \
205 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \
206 } while (0)
207 #define KN_LIST_LOCK(kn) do { \
208 if (kn->kn_knlist != NULL) \
209 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \
210 } while (0)
211 #define KN_LIST_UNLOCK(kn) do { \
212 if (kn->kn_knlist != NULL) \
213 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \
214 } while (0)
215 #define KNL_ASSERT_LOCK(knl, islocked) do { \
216 if (islocked) \
217 KNL_ASSERT_LOCKED(knl); \
218 else \
219 KNL_ASSERT_UNLOCKED(knl); \
220 } while (0)
221 #ifdef INVARIANTS
222 #define KNL_ASSERT_LOCKED(knl) do { \
223 knl->kl_assert_locked((knl)->kl_lockarg); \
224 } while (0)
225 #define KNL_ASSERT_UNLOCKED(knl) do { \
226 knl->kl_assert_unlocked((knl)->kl_lockarg); \
227 } while (0)
228 #else /* !INVARIANTS */
229 #define KNL_ASSERT_LOCKED(knl) do {} while(0)
230 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
231 #endif /* INVARIANTS */
232
233 #define KN_HASHSIZE 64 /* XXX should be tunable */
234 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
235
236 static int
237 filt_nullattach(struct knote *kn)
238 {
239
240 return (ENXIO);
241 };
242
243 struct filterops null_filtops =
244 { 0, filt_nullattach, NULL, NULL };
245
246 /* XXX - make SYSINIT to add these, and move into respective modules. */
247 extern struct filterops sig_filtops;
248 extern struct filterops fs_filtops;
249
250 /*
251 * Table for for all system-defined filters.
252 */
253 static struct mtx filterops_lock;
254 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
255 MTX_DEF);
256 static struct {
257 struct filterops *for_fop;
258 int for_refcnt;
259 } sysfilt_ops[EVFILT_SYSCOUNT] = {
260 { &file_filtops }, /* EVFILT_READ */
261 { &file_filtops }, /* EVFILT_WRITE */
262 { &null_filtops }, /* EVFILT_AIO */
263 { &file_filtops }, /* EVFILT_VNODE */
264 { &proc_filtops }, /* EVFILT_PROC */
265 { &sig_filtops }, /* EVFILT_SIGNAL */
266 { &timer_filtops }, /* EVFILT_TIMER */
267 { &null_filtops }, /* former EVFILT_NETDEV */
268 { &fs_filtops }, /* EVFILT_FS */
269 { &null_filtops }, /* EVFILT_LIO */
270 { &user_filtops }, /* EVFILT_USER */
271 };
272
273 /*
274 * Simple redirection for all cdevsw style objects to call their fo_kqfilter
275 * method.
276 */
277 static int
278 filt_fileattach(struct knote *kn)
279 {
280
281 return (fo_kqfilter(kn->kn_fp, kn));
282 }
283
284 /*ARGSUSED*/
285 static int
286 kqueue_kqfilter(struct file *fp, struct knote *kn)
287 {
288 struct kqueue *kq = kn->kn_fp->f_data;
289
290 if (kn->kn_filter != EVFILT_READ)
291 return (EINVAL);
292
293 kn->kn_status |= KN_KQUEUE;
294 kn->kn_fop = &kqread_filtops;
295 knlist_add(&kq->kq_sel.si_note, kn, 0);
296
297 return (0);
298 }
299
300 static void
301 filt_kqdetach(struct knote *kn)
302 {
303 struct kqueue *kq = kn->kn_fp->f_data;
304
305 knlist_remove(&kq->kq_sel.si_note, kn, 0);
306 }
307
308 /*ARGSUSED*/
309 static int
310 filt_kqueue(struct knote *kn, long hint)
311 {
312 struct kqueue *kq = kn->kn_fp->f_data;
313
314 kn->kn_data = kq->kq_count;
315 return (kn->kn_data > 0);
316 }
317
318 /* XXX - move to kern_proc.c? */
319 static int
320 filt_procattach(struct knote *kn)
321 {
322 struct proc *p;
323 int immediate;
324 int error;
325
326 immediate = 0;
327 p = pfind(kn->kn_id);
328 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
329 p = zpfind(kn->kn_id);
330 immediate = 1;
331 } else if (p != NULL && (p->p_flag & P_WEXIT)) {
332 immediate = 1;
333 }
334
335 if (p == NULL)
336 return (ESRCH);
337 if ((error = p_cansee(curthread, p))) {
338 PROC_UNLOCK(p);
339 return (error);
340 }
341
342 kn->kn_ptr.p_proc = p;
343 kn->kn_flags |= EV_CLEAR; /* automatically set */
344
345 /*
346 * internal flag indicating registration done by kernel
347 */
348 if (kn->kn_flags & EV_FLAG1) {
349 kn->kn_data = kn->kn_sdata; /* ppid */
350 kn->kn_fflags = NOTE_CHILD;
351 kn->kn_flags &= ~EV_FLAG1;
352 }
353
354 if (immediate == 0)
355 knlist_add(&p->p_klist, kn, 1);
356
357 /*
358 * Immediately activate any exit notes if the target process is a
359 * zombie. This is necessary to handle the case where the target
360 * process, e.g. a child, dies before the kevent is registered.
361 */
362 if (immediate && filt_proc(kn, NOTE_EXIT))
363 KNOTE_ACTIVATE(kn, 0);
364
365 PROC_UNLOCK(p);
366
367 return (0);
368 }
369
370 /*
371 * The knote may be attached to a different process, which may exit,
372 * leaving nothing for the knote to be attached to. So when the process
373 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
374 * it will be deleted when read out. However, as part of the knote deletion,
375 * this routine is called, so a check is needed to avoid actually performing
376 * a detach, because the original process does not exist any more.
377 */
378 /* XXX - move to kern_proc.c? */
379 static void
380 filt_procdetach(struct knote *kn)
381 {
382 struct proc *p;
383
384 p = kn->kn_ptr.p_proc;
385 knlist_remove(&p->p_klist, kn, 0);
386 kn->kn_ptr.p_proc = NULL;
387 }
388
389 /* XXX - move to kern_proc.c? */
390 static int
391 filt_proc(struct knote *kn, long hint)
392 {
393 struct proc *p = kn->kn_ptr.p_proc;
394 u_int event;
395
396 /*
397 * mask off extra data
398 */
399 event = (u_int)hint & NOTE_PCTRLMASK;
400
401 /*
402 * if the user is interested in this event, record it.
403 */
404 if (kn->kn_sfflags & event)
405 kn->kn_fflags |= event;
406
407 /*
408 * process is gone, so flag the event as finished.
409 */
410 if (event == NOTE_EXIT) {
411 if (!(kn->kn_status & KN_DETACHED))
412 knlist_remove_inevent(&p->p_klist, kn);
413 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
414 kn->kn_data = p->p_xstat;
415 kn->kn_ptr.p_proc = NULL;
416 return (1);
417 }
418
419 return (kn->kn_fflags != 0);
420 }
421
422 /*
423 * Called when the process forked. It mostly does the same as the
424 * knote(), activating all knotes registered to be activated when the
425 * process forked. Additionally, for each knote attached to the
426 * parent, check whether user wants to track the new process. If so
427 * attach a new knote to it, and immediately report an event with the
428 * child's pid.
429 */
430 void
431 knote_fork(struct knlist *list, int pid)
432 {
433 struct kqueue *kq;
434 struct knote *kn;
435 struct kevent kev;
436 int error;
437
438 if (list == NULL)
439 return;
440 list->kl_lock(list->kl_lockarg);
441
442 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
443 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX)
444 continue;
445 kq = kn->kn_kq;
446 KQ_LOCK(kq);
447 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
448 KQ_UNLOCK(kq);
449 continue;
450 }
451
452 /*
453 * The same as knote(), activate the event.
454 */
455 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
456 kn->kn_status |= KN_HASKQLOCK;
457 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
458 KNOTE_ACTIVATE(kn, 1);
459 kn->kn_status &= ~KN_HASKQLOCK;
460 KQ_UNLOCK(kq);
461 continue;
462 }
463
464 /*
465 * The NOTE_TRACK case. In addition to the activation
466 * of the event, we need to register new event to
467 * track the child. Drop the locks in preparation for
468 * the call to kqueue_register().
469 */
470 kn->kn_status |= KN_INFLUX;
471 KQ_UNLOCK(kq);
472 list->kl_unlock(list->kl_lockarg);
473
474 /*
475 * Activate existing knote and register a knote with
476 * new process.
477 */
478 kev.ident = pid;
479 kev.filter = kn->kn_filter;
480 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
481 kev.fflags = kn->kn_sfflags;
482 kev.data = kn->kn_id; /* parent */
483 kev.udata = kn->kn_kevent.udata;/* preserve udata */
484 error = kqueue_register(kq, &kev, NULL, 0);
485 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
486 KNOTE_ACTIVATE(kn, 0);
487 if (error)
488 kn->kn_fflags |= NOTE_TRACKERR;
489 KQ_LOCK(kq);
490 kn->kn_status &= ~KN_INFLUX;
491 KQ_UNLOCK_FLUX(kq);
492 list->kl_lock(list->kl_lockarg);
493 }
494 list->kl_unlock(list->kl_lockarg);
495 }
496
497 /*
498 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
499 * interval timer support code.
500 */
501 static int
502 timertoticks(intptr_t data)
503 {
504 struct timeval tv;
505 int tticks;
506
507 tv.tv_sec = data / 1000;
508 tv.tv_usec = (data % 1000) * 1000;
509 tticks = tvtohz(&tv);
510
511 return tticks;
512 }
513
514 static void
515 filt_timerexpire(void *knx)
516 {
517 struct knote *kn = knx;
518 struct callout *calloutp;
519
520 kn->kn_data++;
521 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */
522
523 /*
524 * timertoticks() uses tvtohz() which always adds 1 to allow
525 * for the time until the next clock interrupt being strictly
526 * less than 1 clock tick. We don't want that here since we
527 * want to appear to be in sync with the clock interrupt even
528 * when we're delayed.
529 */
530 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
531 calloutp = (struct callout *)kn->kn_hook;
532 callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata) - 1,
533 filt_timerexpire, kn);
534 }
535 }
536
537 /*
538 * data contains amount of time to sleep, in milliseconds
539 */
540 static int
541 filt_timerattach(struct knote *kn)
542 {
543 struct callout *calloutp;
544
545 atomic_add_int(&kq_ncallouts, 1);
546
547 if (kq_ncallouts >= kq_calloutmax) {
548 atomic_add_int(&kq_ncallouts, -1);
549 return (ENOMEM);
550 }
551
552 kn->kn_flags |= EV_CLEAR; /* automatically set */
553 kn->kn_status &= ~KN_DETACHED; /* knlist_add usually sets it */
554 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
555 callout_init(calloutp, CALLOUT_MPSAFE);
556 kn->kn_hook = calloutp;
557 callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata),
558 filt_timerexpire, kn);
559
560 return (0);
561 }
562
563 static void
564 filt_timerdetach(struct knote *kn)
565 {
566 struct callout *calloutp;
567
568 calloutp = (struct callout *)kn->kn_hook;
569 callout_drain(calloutp);
570 free(calloutp, M_KQUEUE);
571 atomic_add_int(&kq_ncallouts, -1);
572 kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */
573 }
574
575 static int
576 filt_timer(struct knote *kn, long hint)
577 {
578
579 return (kn->kn_data != 0);
580 }
581
582 static int
583 filt_userattach(struct knote *kn)
584 {
585
586 /*
587 * EVFILT_USER knotes are not attached to anything in the kernel.
588 */
589 kn->kn_hook = NULL;
590 if (kn->kn_fflags & NOTE_TRIGGER)
591 kn->kn_hookid = 1;
592 else
593 kn->kn_hookid = 0;
594 return (0);
595 }
596
597 static void
598 filt_userdetach(__unused struct knote *kn)
599 {
600
601 /*
602 * EVFILT_USER knotes are not attached to anything in the kernel.
603 */
604 }
605
606 static int
607 filt_user(struct knote *kn, __unused long hint)
608 {
609
610 return (kn->kn_hookid);
611 }
612
613 static void
614 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
615 {
616 u_int ffctrl;
617
618 switch (type) {
619 case EVENT_REGISTER:
620 if (kev->fflags & NOTE_TRIGGER)
621 kn->kn_hookid = 1;
622
623 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
624 kev->fflags &= NOTE_FFLAGSMASK;
625 switch (ffctrl) {
626 case NOTE_FFNOP:
627 break;
628
629 case NOTE_FFAND:
630 kn->kn_sfflags &= kev->fflags;
631 break;
632
633 case NOTE_FFOR:
634 kn->kn_sfflags |= kev->fflags;
635 break;
636
637 case NOTE_FFCOPY:
638 kn->kn_sfflags = kev->fflags;
639 break;
640
641 default:
642 /* XXX Return error? */
643 break;
644 }
645 kn->kn_sdata = kev->data;
646 if (kev->flags & EV_CLEAR) {
647 kn->kn_hookid = 0;
648 kn->kn_data = 0;
649 kn->kn_fflags = 0;
650 }
651 break;
652
653 case EVENT_PROCESS:
654 *kev = kn->kn_kevent;
655 kev->fflags = kn->kn_sfflags;
656 kev->data = kn->kn_sdata;
657 if (kn->kn_flags & EV_CLEAR) {
658 kn->kn_hookid = 0;
659 kn->kn_data = 0;
660 kn->kn_fflags = 0;
661 }
662 break;
663
664 default:
665 panic("filt_usertouch() - invalid type (%ld)", type);
666 break;
667 }
668 }
669
670 int
671 kqueue(struct thread *td, struct kqueue_args *uap)
672 {
673 struct filedesc *fdp;
674 struct kqueue *kq;
675 struct file *fp;
676 int fd, error;
677
678 fdp = td->td_proc->p_fd;
679 error = falloc(td, &fp, &fd);
680 if (error)
681 goto done2;
682
683 /* An extra reference on `nfp' has been held for us by falloc(). */
684 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
685 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
686 TAILQ_INIT(&kq->kq_head);
687 kq->kq_fdp = fdp;
688 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
689 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
690
691 FILEDESC_XLOCK(fdp);
692 SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
693 FILEDESC_XUNLOCK(fdp);
694
695 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
696 fdrop(fp, td);
697
698 td->td_retval[0] = fd;
699 done2:
700 return (error);
701 }
702
703 #ifndef _SYS_SYSPROTO_H_
704 struct kevent_args {
705 int fd;
706 const struct kevent *changelist;
707 int nchanges;
708 struct kevent *eventlist;
709 int nevents;
710 const struct timespec *timeout;
711 };
712 #endif
713 int
714 kevent(struct thread *td, struct kevent_args *uap)
715 {
716 struct timespec ts, *tsp;
717 struct kevent_copyops k_ops = { uap,
718 kevent_copyout,
719 kevent_copyin};
720 int error;
721 #ifdef KTRACE
722 struct uio ktruio;
723 struct iovec ktriov;
724 struct uio *ktruioin = NULL;
725 struct uio *ktruioout = NULL;
726 #endif
727
728 if (uap->timeout != NULL) {
729 error = copyin(uap->timeout, &ts, sizeof(ts));
730 if (error)
731 return (error);
732 tsp = &ts;
733 } else
734 tsp = NULL;
735
736 #ifdef KTRACE
737 if (KTRPOINT(td, KTR_GENIO)) {
738 ktriov.iov_base = uap->changelist;
739 ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
740 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
741 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
742 .uio_td = td };
743 ktruioin = cloneuio(&ktruio);
744 ktriov.iov_base = uap->eventlist;
745 ktriov.iov_len = uap->nevents * sizeof(struct kevent);
746 ktruioout = cloneuio(&ktruio);
747 }
748 #endif
749
750 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
751 &k_ops, tsp);
752
753 #ifdef KTRACE
754 if (ktruioin != NULL) {
755 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
756 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
757 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
758 ktrgenio(uap->fd, UIO_READ, ktruioout, error);
759 }
760 #endif
761
762 return (error);
763 }
764
765 /*
766 * Copy 'count' items into the destination list pointed to by uap->eventlist.
767 */
768 static int
769 kevent_copyout(void *arg, struct kevent *kevp, int count)
770 {
771 struct kevent_args *uap;
772 int error;
773
774 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
775 uap = (struct kevent_args *)arg;
776
777 error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
778 if (error == 0)
779 uap->eventlist += count;
780 return (error);
781 }
782
783 /*
784 * Copy 'count' items from the list pointed to by uap->changelist.
785 */
786 static int
787 kevent_copyin(void *arg, struct kevent *kevp, int count)
788 {
789 struct kevent_args *uap;
790 int error;
791
792 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
793 uap = (struct kevent_args *)arg;
794
795 error = copyin(uap->changelist, kevp, count * sizeof *kevp);
796 if (error == 0)
797 uap->changelist += count;
798 return (error);
799 }
800
801 int
802 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
803 struct kevent_copyops *k_ops, const struct timespec *timeout)
804 {
805 struct kevent keva[KQ_NEVENTS];
806 struct kevent *kevp, *changes;
807 struct kqueue *kq;
808 struct file *fp;
809 int i, n, nerrors, error;
810
811 if ((error = fget(td, fd, &fp)) != 0)
812 return (error);
813 if ((error = kqueue_acquire(fp, &kq)) != 0)
814 goto done_norel;
815
816 nerrors = 0;
817
818 while (nchanges > 0) {
819 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
820 error = k_ops->k_copyin(k_ops->arg, keva, n);
821 if (error)
822 goto done;
823 changes = keva;
824 for (i = 0; i < n; i++) {
825 kevp = &changes[i];
826 if (!kevp->filter)
827 continue;
828 kevp->flags &= ~EV_SYSFLAGS;
829 error = kqueue_register(kq, kevp, td, 1);
830 if (error || (kevp->flags & EV_RECEIPT)) {
831 if (nevents != 0) {
832 kevp->flags = EV_ERROR;
833 kevp->data = error;
834 (void) k_ops->k_copyout(k_ops->arg,
835 kevp, 1);
836 nevents--;
837 nerrors++;
838 } else {
839 goto done;
840 }
841 }
842 }
843 nchanges -= n;
844 }
845 if (nerrors) {
846 td->td_retval[0] = nerrors;
847 error = 0;
848 goto done;
849 }
850
851 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
852 done:
853 kqueue_release(kq, 0);
854 done_norel:
855 fdrop(fp, td);
856 return (error);
857 }
858
859 int
860 kqueue_add_filteropts(int filt, struct filterops *filtops)
861 {
862 int error;
863
864 error = 0;
865 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
866 printf(
867 "trying to add a filterop that is out of range: %d is beyond %d\n",
868 ~filt, EVFILT_SYSCOUNT);
869 return EINVAL;
870 }
871 mtx_lock(&filterops_lock);
872 if (sysfilt_ops[~filt].for_fop != &null_filtops &&
873 sysfilt_ops[~filt].for_fop != NULL)
874 error = EEXIST;
875 else {
876 sysfilt_ops[~filt].for_fop = filtops;
877 sysfilt_ops[~filt].for_refcnt = 0;
878 }
879 mtx_unlock(&filterops_lock);
880
881 return (error);
882 }
883
884 int
885 kqueue_del_filteropts(int filt)
886 {
887 int error;
888
889 error = 0;
890 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
891 return EINVAL;
892
893 mtx_lock(&filterops_lock);
894 if (sysfilt_ops[~filt].for_fop == &null_filtops ||
895 sysfilt_ops[~filt].for_fop == NULL)
896 error = EINVAL;
897 else if (sysfilt_ops[~filt].for_refcnt != 0)
898 error = EBUSY;
899 else {
900 sysfilt_ops[~filt].for_fop = &null_filtops;
901 sysfilt_ops[~filt].for_refcnt = 0;
902 }
903 mtx_unlock(&filterops_lock);
904
905 return error;
906 }
907
908 static struct filterops *
909 kqueue_fo_find(int filt)
910 {
911
912 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
913 return NULL;
914
915 mtx_lock(&filterops_lock);
916 sysfilt_ops[~filt].for_refcnt++;
917 if (sysfilt_ops[~filt].for_fop == NULL)
918 sysfilt_ops[~filt].for_fop = &null_filtops;
919 mtx_unlock(&filterops_lock);
920
921 return sysfilt_ops[~filt].for_fop;
922 }
923
924 static void
925 kqueue_fo_release(int filt)
926 {
927
928 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
929 return;
930
931 mtx_lock(&filterops_lock);
932 KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
933 ("filter object refcount not valid on release"));
934 sysfilt_ops[~filt].for_refcnt--;
935 mtx_unlock(&filterops_lock);
936 }
937
938 /*
939 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will
940 * influence if memory allocation should wait. Make sure it is 0 if you
941 * hold any mutexes.
942 */
943 static int
944 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
945 {
946 struct filterops *fops;
947 struct file *fp;
948 struct knote *kn, *tkn;
949 int error, filt, event;
950 int haskqglobal;
951
952 fp = NULL;
953 kn = NULL;
954 error = 0;
955 haskqglobal = 0;
956
957 filt = kev->filter;
958 fops = kqueue_fo_find(filt);
959 if (fops == NULL)
960 return EINVAL;
961
962 tkn = knote_alloc(waitok); /* prevent waiting with locks */
963
964 findkn:
965 if (fops->f_isfd) {
966 KASSERT(td != NULL, ("td is NULL"));
967 error = fget(td, kev->ident, &fp);
968 if (error)
969 goto done;
970
971 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
972 kev->ident, 0) != 0) {
973 /* try again */
974 fdrop(fp, td);
975 fp = NULL;
976 error = kqueue_expand(kq, fops, kev->ident, waitok);
977 if (error)
978 goto done;
979 goto findkn;
980 }
981
982 if (fp->f_type == DTYPE_KQUEUE) {
983 /*
984 * if we add some inteligence about what we are doing,
985 * we should be able to support events on ourselves.
986 * We need to know when we are doing this to prevent
987 * getting both the knlist lock and the kq lock since
988 * they are the same thing.
989 */
990 if (fp->f_data == kq) {
991 error = EINVAL;
992 goto done;
993 }
994
995 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
996 }
997
998 KQ_LOCK(kq);
999 if (kev->ident < kq->kq_knlistsize) {
1000 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1001 if (kev->filter == kn->kn_filter)
1002 break;
1003 }
1004 } else {
1005 if ((kev->flags & EV_ADD) == EV_ADD)
1006 kqueue_expand(kq, fops, kev->ident, waitok);
1007
1008 KQ_LOCK(kq);
1009 if (kq->kq_knhashmask != 0) {
1010 struct klist *list;
1011
1012 list = &kq->kq_knhash[
1013 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1014 SLIST_FOREACH(kn, list, kn_link)
1015 if (kev->ident == kn->kn_id &&
1016 kev->filter == kn->kn_filter)
1017 break;
1018 }
1019 }
1020
1021 /* knote is in the process of changing, wait for it to stablize. */
1022 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1023 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1024 kq->kq_state |= KQ_FLUXWAIT;
1025 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1026 if (fp != NULL) {
1027 fdrop(fp, td);
1028 fp = NULL;
1029 }
1030 goto findkn;
1031 }
1032
1033 /*
1034 * kn now contains the matching knote, or NULL if no match
1035 */
1036 if (kn == NULL) {
1037 if (kev->flags & EV_ADD) {
1038 kn = tkn;
1039 tkn = NULL;
1040 if (kn == NULL) {
1041 KQ_UNLOCK(kq);
1042 error = ENOMEM;
1043 goto done;
1044 }
1045 kn->kn_fp = fp;
1046 kn->kn_kq = kq;
1047 kn->kn_fop = fops;
1048 /*
1049 * apply reference counts to knote structure, and
1050 * do not release it at the end of this routine.
1051 */
1052 fops = NULL;
1053 fp = NULL;
1054
1055 kn->kn_sfflags = kev->fflags;
1056 kn->kn_sdata = kev->data;
1057 kev->fflags = 0;
1058 kev->data = 0;
1059 kn->kn_kevent = *kev;
1060 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1061 EV_ENABLE | EV_DISABLE);
1062 kn->kn_status = KN_INFLUX|KN_DETACHED;
1063
1064 error = knote_attach(kn, kq);
1065 KQ_UNLOCK(kq);
1066 if (error != 0) {
1067 tkn = kn;
1068 goto done;
1069 }
1070
1071 if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1072 knote_drop(kn, td);
1073 goto done;
1074 }
1075 KN_LIST_LOCK(kn);
1076 goto done_ev_add;
1077 } else {
1078 /* No matching knote and the EV_ADD flag is not set. */
1079 KQ_UNLOCK(kq);
1080 error = ENOENT;
1081 goto done;
1082 }
1083 }
1084
1085 if (kev->flags & EV_DELETE) {
1086 kn->kn_status |= KN_INFLUX;
1087 KQ_UNLOCK(kq);
1088 if (!(kn->kn_status & KN_DETACHED))
1089 kn->kn_fop->f_detach(kn);
1090 knote_drop(kn, td);
1091 goto done;
1092 }
1093
1094 /*
1095 * The user may change some filter values after the initial EV_ADD,
1096 * but doing so will not reset any filter which has already been
1097 * triggered.
1098 */
1099 kn->kn_status |= KN_INFLUX;
1100 KQ_UNLOCK(kq);
1101 KN_LIST_LOCK(kn);
1102 kn->kn_kevent.udata = kev->udata;
1103 if (!fops->f_isfd && fops->f_touch != NULL) {
1104 fops->f_touch(kn, kev, EVENT_REGISTER);
1105 } else {
1106 kn->kn_sfflags = kev->fflags;
1107 kn->kn_sdata = kev->data;
1108 }
1109
1110 /*
1111 * We can get here with kn->kn_knlist == NULL. This can happen when
1112 * the initial attach event decides that the event is "completed"
1113 * already. i.e. filt_procattach is called on a zombie process. It
1114 * will call filt_proc which will remove it from the list, and NULL
1115 * kn_knlist.
1116 */
1117 done_ev_add:
1118 event = kn->kn_fop->f_event(kn, 0);
1119 KQ_LOCK(kq);
1120 if (event)
1121 KNOTE_ACTIVATE(kn, 1);
1122 kn->kn_status &= ~KN_INFLUX;
1123 KN_LIST_UNLOCK(kn);
1124
1125 if ((kev->flags & EV_DISABLE) &&
1126 ((kn->kn_status & KN_DISABLED) == 0)) {
1127 kn->kn_status |= KN_DISABLED;
1128 }
1129
1130 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1131 kn->kn_status &= ~KN_DISABLED;
1132 if ((kn->kn_status & KN_ACTIVE) &&
1133 ((kn->kn_status & KN_QUEUED) == 0))
1134 knote_enqueue(kn);
1135 }
1136 KQ_UNLOCK_FLUX(kq);
1137
1138 done:
1139 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1140 if (fp != NULL)
1141 fdrop(fp, td);
1142 if (tkn != NULL)
1143 knote_free(tkn);
1144 if (fops != NULL)
1145 kqueue_fo_release(filt);
1146 return (error);
1147 }
1148
1149 static int
1150 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1151 {
1152 int error;
1153 struct kqueue *kq;
1154
1155 error = 0;
1156
1157 kq = fp->f_data;
1158 if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1159 return (EBADF);
1160 *kqp = kq;
1161 KQ_LOCK(kq);
1162 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1163 KQ_UNLOCK(kq);
1164 return (EBADF);
1165 }
1166 kq->kq_refcnt++;
1167 KQ_UNLOCK(kq);
1168
1169 return error;
1170 }
1171
1172 static void
1173 kqueue_release(struct kqueue *kq, int locked)
1174 {
1175 if (locked)
1176 KQ_OWNED(kq);
1177 else
1178 KQ_LOCK(kq);
1179 kq->kq_refcnt--;
1180 if (kq->kq_refcnt == 1)
1181 wakeup(&kq->kq_refcnt);
1182 if (!locked)
1183 KQ_UNLOCK(kq);
1184 }
1185
1186 static void
1187 kqueue_schedtask(struct kqueue *kq)
1188 {
1189
1190 KQ_OWNED(kq);
1191 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1192 ("scheduling kqueue task while draining"));
1193
1194 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1195 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
1196 kq->kq_state |= KQ_TASKSCHED;
1197 }
1198 }
1199
1200 /*
1201 * Expand the kq to make sure we have storage for fops/ident pair.
1202 *
1203 * Return 0 on success (or no work necessary), return errno on failure.
1204 *
1205 * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1206 * If kqueue_register is called from a non-fd context, there usually/should
1207 * be no locks held.
1208 */
1209 static int
1210 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1211 int waitok)
1212 {
1213 struct klist *list, *tmp_knhash, *to_free;
1214 u_long tmp_knhashmask;
1215 int size;
1216 int fd;
1217 int mflag = waitok ? M_WAITOK : M_NOWAIT;
1218
1219 KQ_NOTOWNED(kq);
1220
1221 to_free = NULL;
1222 if (fops->f_isfd) {
1223 fd = ident;
1224 if (kq->kq_knlistsize <= fd) {
1225 size = kq->kq_knlistsize;
1226 while (size <= fd)
1227 size += KQEXTENT;
1228 list = malloc(size * sizeof list, M_KQUEUE, mflag);
1229 if (list == NULL)
1230 return ENOMEM;
1231 KQ_LOCK(kq);
1232 if (kq->kq_knlistsize > fd) {
1233 to_free = list;
1234 list = NULL;
1235 } else {
1236 if (kq->kq_knlist != NULL) {
1237 bcopy(kq->kq_knlist, list,
1238 kq->kq_knlistsize * sizeof list);
1239 to_free = kq->kq_knlist;
1240 kq->kq_knlist = NULL;
1241 }
1242 bzero((caddr_t)list +
1243 kq->kq_knlistsize * sizeof list,
1244 (size - kq->kq_knlistsize) * sizeof list);
1245 kq->kq_knlistsize = size;
1246 kq->kq_knlist = list;
1247 }
1248 KQ_UNLOCK(kq);
1249 }
1250 } else {
1251 if (kq->kq_knhashmask == 0) {
1252 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1253 &tmp_knhashmask);
1254 if (tmp_knhash == NULL)
1255 return ENOMEM;
1256 KQ_LOCK(kq);
1257 if (kq->kq_knhashmask == 0) {
1258 kq->kq_knhash = tmp_knhash;
1259 kq->kq_knhashmask = tmp_knhashmask;
1260 } else {
1261 to_free = tmp_knhash;
1262 }
1263 KQ_UNLOCK(kq);
1264 }
1265 }
1266 free(to_free, M_KQUEUE);
1267
1268 KQ_NOTOWNED(kq);
1269 return 0;
1270 }
1271
1272 static void
1273 kqueue_task(void *arg, int pending)
1274 {
1275 struct kqueue *kq;
1276 int haskqglobal;
1277
1278 haskqglobal = 0;
1279 kq = arg;
1280
1281 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1282 KQ_LOCK(kq);
1283
1284 KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1285
1286 kq->kq_state &= ~KQ_TASKSCHED;
1287 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1288 wakeup(&kq->kq_state);
1289 }
1290 KQ_UNLOCK(kq);
1291 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1292 }
1293
1294 /*
1295 * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1296 * We treat KN_MARKER knotes as if they are INFLUX.
1297 */
1298 static int
1299 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1300 const struct timespec *tsp, struct kevent *keva, struct thread *td)
1301 {
1302 struct kevent *kevp;
1303 struct timeval atv, rtv, ttv;
1304 struct knote *kn, *marker;
1305 int count, timeout, nkev, error, influx;
1306 int haskqglobal, touch;
1307
1308 count = maxevents;
1309 nkev = 0;
1310 error = 0;
1311 haskqglobal = 0;
1312
1313 if (maxevents == 0)
1314 goto done_nl;
1315
1316 if (tsp != NULL) {
1317 TIMESPEC_TO_TIMEVAL(&atv, tsp);
1318 if (itimerfix(&atv)) {
1319 error = EINVAL;
1320 goto done_nl;
1321 }
1322 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1323 timeout = -1;
1324 else
1325 timeout = atv.tv_sec > 24 * 60 * 60 ?
1326 24 * 60 * 60 * hz : tvtohz(&atv);
1327 getmicrouptime(&rtv);
1328 timevaladd(&atv, &rtv);
1329 } else {
1330 atv.tv_sec = 0;
1331 atv.tv_usec = 0;
1332 timeout = 0;
1333 }
1334 marker = knote_alloc(1);
1335 if (marker == NULL) {
1336 error = ENOMEM;
1337 goto done_nl;
1338 }
1339 marker->kn_status = KN_MARKER;
1340 KQ_LOCK(kq);
1341 goto start;
1342
1343 retry:
1344 if (atv.tv_sec || atv.tv_usec) {
1345 getmicrouptime(&rtv);
1346 if (timevalcmp(&rtv, &atv, >=))
1347 goto done;
1348 ttv = atv;
1349 timevalsub(&ttv, &rtv);
1350 timeout = ttv.tv_sec > 24 * 60 * 60 ?
1351 24 * 60 * 60 * hz : tvtohz(&ttv);
1352 }
1353
1354 start:
1355 kevp = keva;
1356 if (kq->kq_count == 0) {
1357 if (timeout < 0) {
1358 error = EWOULDBLOCK;
1359 } else {
1360 kq->kq_state |= KQ_SLEEP;
1361 error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
1362 "kqread", timeout);
1363 }
1364 if (error == 0)
1365 goto retry;
1366 /* don't restart after signals... */
1367 if (error == ERESTART)
1368 error = EINTR;
1369 else if (error == EWOULDBLOCK)
1370 error = 0;
1371 goto done;
1372 }
1373
1374 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1375 influx = 0;
1376 while (count) {
1377 KQ_OWNED(kq);
1378 kn = TAILQ_FIRST(&kq->kq_head);
1379
1380 if ((kn->kn_status == KN_MARKER && kn != marker) ||
1381 (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1382 if (influx) {
1383 influx = 0;
1384 KQ_FLUX_WAKEUP(kq);
1385 }
1386 kq->kq_state |= KQ_FLUXWAIT;
1387 error = msleep(kq, &kq->kq_lock, PSOCK,
1388 "kqflxwt", 0);
1389 continue;
1390 }
1391
1392 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1393 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1394 kn->kn_status &= ~KN_QUEUED;
1395 kq->kq_count--;
1396 continue;
1397 }
1398 if (kn == marker) {
1399 KQ_FLUX_WAKEUP(kq);
1400 if (count == maxevents)
1401 goto retry;
1402 goto done;
1403 }
1404 KASSERT((kn->kn_status & KN_INFLUX) == 0,
1405 ("KN_INFLUX set when not suppose to be"));
1406
1407 if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1408 kn->kn_status &= ~KN_QUEUED;
1409 kn->kn_status |= KN_INFLUX;
1410 kq->kq_count--;
1411 KQ_UNLOCK(kq);
1412 /*
1413 * We don't need to lock the list since we've marked
1414 * it _INFLUX.
1415 */
1416 *kevp = kn->kn_kevent;
1417 if (!(kn->kn_status & KN_DETACHED))
1418 kn->kn_fop->f_detach(kn);
1419 knote_drop(kn, td);
1420 KQ_LOCK(kq);
1421 kn = NULL;
1422 } else {
1423 kn->kn_status |= KN_INFLUX;
1424 KQ_UNLOCK(kq);
1425 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1426 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1427 KN_LIST_LOCK(kn);
1428 if (kn->kn_fop->f_event(kn, 0) == 0) {
1429 KQ_LOCK(kq);
1430 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1431 kn->kn_status &=
1432 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
1433 kq->kq_count--;
1434 KN_LIST_UNLOCK(kn);
1435 influx = 1;
1436 continue;
1437 }
1438 touch = (!kn->kn_fop->f_isfd &&
1439 kn->kn_fop->f_touch != NULL);
1440 if (touch)
1441 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1442 else
1443 *kevp = kn->kn_kevent;
1444 KQ_LOCK(kq);
1445 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1446 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1447 /*
1448 * Manually clear knotes who weren't
1449 * 'touch'ed.
1450 */
1451 if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1452 kn->kn_data = 0;
1453 kn->kn_fflags = 0;
1454 }
1455 if (kn->kn_flags & EV_DISPATCH)
1456 kn->kn_status |= KN_DISABLED;
1457 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1458 kq->kq_count--;
1459 } else
1460 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1461
1462 kn->kn_status &= ~(KN_INFLUX);
1463 KN_LIST_UNLOCK(kn);
1464 influx = 1;
1465 }
1466
1467 /* we are returning a copy to the user */
1468 kevp++;
1469 nkev++;
1470 count--;
1471
1472 if (nkev == KQ_NEVENTS) {
1473 influx = 0;
1474 KQ_UNLOCK_FLUX(kq);
1475 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1476 nkev = 0;
1477 kevp = keva;
1478 KQ_LOCK(kq);
1479 if (error)
1480 break;
1481 }
1482 }
1483 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1484 done:
1485 KQ_OWNED(kq);
1486 KQ_UNLOCK_FLUX(kq);
1487 knote_free(marker);
1488 done_nl:
1489 KQ_NOTOWNED(kq);
1490 if (nkev != 0)
1491 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1492 td->td_retval[0] = maxevents - count;
1493 return (error);
1494 }
1495
1496 /*
1497 * XXX
1498 * This could be expanded to call kqueue_scan, if desired.
1499 */
1500 /*ARGSUSED*/
1501 static int
1502 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1503 int flags, struct thread *td)
1504 {
1505 return (ENXIO);
1506 }
1507
1508 /*ARGSUSED*/
1509 static int
1510 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1511 int flags, struct thread *td)
1512 {
1513 return (ENXIO);
1514 }
1515
1516 /*ARGSUSED*/
1517 static int
1518 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1519 struct thread *td)
1520 {
1521
1522 return (EINVAL);
1523 }
1524
1525 /*ARGSUSED*/
1526 static int
1527 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1528 struct ucred *active_cred, struct thread *td)
1529 {
1530 /*
1531 * Enabling sigio causes two major problems:
1532 * 1) infinite recursion:
1533 * Synopsys: kevent is being used to track signals and have FIOASYNC
1534 * set. On receipt of a signal this will cause a kqueue to recurse
1535 * into itself over and over. Sending the sigio causes the kqueue
1536 * to become ready, which in turn posts sigio again, forever.
1537 * Solution: this can be solved by setting a flag in the kqueue that
1538 * we have a SIGIO in progress.
1539 * 2) locking problems:
1540 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1541 * us above the proc and pgrp locks.
1542 * Solution: Post a signal using an async mechanism, being sure to
1543 * record a generation count in the delivery so that we do not deliver
1544 * a signal to the wrong process.
1545 *
1546 * Note, these two mechanisms are somewhat mutually exclusive!
1547 */
1548 #if 0
1549 struct kqueue *kq;
1550
1551 kq = fp->f_data;
1552 switch (cmd) {
1553 case FIOASYNC:
1554 if (*(int *)data) {
1555 kq->kq_state |= KQ_ASYNC;
1556 } else {
1557 kq->kq_state &= ~KQ_ASYNC;
1558 }
1559 return (0);
1560
1561 case FIOSETOWN:
1562 return (fsetown(*(int *)data, &kq->kq_sigio));
1563
1564 case FIOGETOWN:
1565 *(int *)data = fgetown(&kq->kq_sigio);
1566 return (0);
1567 }
1568 #endif
1569
1570 return (ENOTTY);
1571 }
1572
1573 /*ARGSUSED*/
1574 static int
1575 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1576 struct thread *td)
1577 {
1578 struct kqueue *kq;
1579 int revents = 0;
1580 int error;
1581
1582 if ((error = kqueue_acquire(fp, &kq)))
1583 return POLLERR;
1584
1585 KQ_LOCK(kq);
1586 if (events & (POLLIN | POLLRDNORM)) {
1587 if (kq->kq_count) {
1588 revents |= events & (POLLIN | POLLRDNORM);
1589 } else {
1590 selrecord(td, &kq->kq_sel);
1591 if (SEL_WAITING(&kq->kq_sel))
1592 kq->kq_state |= KQ_SEL;
1593 }
1594 }
1595 kqueue_release(kq, 1);
1596 KQ_UNLOCK(kq);
1597 return (revents);
1598 }
1599
1600 /*ARGSUSED*/
1601 static int
1602 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1603 struct thread *td)
1604 {
1605
1606 bzero((void *)st, sizeof *st);
1607 /*
1608 * We no longer return kq_count because the unlocked value is useless.
1609 * If you spent all this time getting the count, why not spend your
1610 * syscall better by calling kevent?
1611 *
1612 * XXX - This is needed for libc_r.
1613 */
1614 st->st_mode = S_IFIFO;
1615 return (0);
1616 }
1617
1618 /*ARGSUSED*/
1619 static int
1620 kqueue_close(struct file *fp, struct thread *td)
1621 {
1622 struct kqueue *kq = fp->f_data;
1623 struct filedesc *fdp;
1624 struct knote *kn;
1625 int i;
1626 int error;
1627
1628 if ((error = kqueue_acquire(fp, &kq)))
1629 return error;
1630
1631 KQ_LOCK(kq);
1632
1633 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1634 ("kqueue already closing"));
1635 kq->kq_state |= KQ_CLOSING;
1636 if (kq->kq_refcnt > 1)
1637 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1638
1639 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1640 fdp = kq->kq_fdp;
1641
1642 KASSERT(knlist_empty(&kq->kq_sel.si_note),
1643 ("kqueue's knlist not empty"));
1644
1645 for (i = 0; i < kq->kq_knlistsize; i++) {
1646 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1647 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1648 kq->kq_state |= KQ_FLUXWAIT;
1649 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
1650 continue;
1651 }
1652 kn->kn_status |= KN_INFLUX;
1653 KQ_UNLOCK(kq);
1654 if (!(kn->kn_status & KN_DETACHED))
1655 kn->kn_fop->f_detach(kn);
1656 knote_drop(kn, td);
1657 KQ_LOCK(kq);
1658 }
1659 }
1660 if (kq->kq_knhashmask != 0) {
1661 for (i = 0; i <= kq->kq_knhashmask; i++) {
1662 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1663 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1664 kq->kq_state |= KQ_FLUXWAIT;
1665 msleep(kq, &kq->kq_lock, PSOCK,
1666 "kqclo2", 0);
1667 continue;
1668 }
1669 kn->kn_status |= KN_INFLUX;
1670 KQ_UNLOCK(kq);
1671 if (!(kn->kn_status & KN_DETACHED))
1672 kn->kn_fop->f_detach(kn);
1673 knote_drop(kn, td);
1674 KQ_LOCK(kq);
1675 }
1676 }
1677 }
1678
1679 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1680 kq->kq_state |= KQ_TASKDRAIN;
1681 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1682 }
1683
1684 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1685 selwakeuppri(&kq->kq_sel, PSOCK);
1686 if (!SEL_WAITING(&kq->kq_sel))
1687 kq->kq_state &= ~KQ_SEL;
1688 }
1689
1690 KQ_UNLOCK(kq);
1691
1692 FILEDESC_XLOCK(fdp);
1693 SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
1694 FILEDESC_XUNLOCK(fdp);
1695
1696 seldrain(&kq->kq_sel);
1697 knlist_destroy(&kq->kq_sel.si_note);
1698 mtx_destroy(&kq->kq_lock);
1699 kq->kq_fdp = NULL;
1700
1701 if (kq->kq_knhash != NULL)
1702 free(kq->kq_knhash, M_KQUEUE);
1703 if (kq->kq_knlist != NULL)
1704 free(kq->kq_knlist, M_KQUEUE);
1705
1706 funsetown(&kq->kq_sigio);
1707 free(kq, M_KQUEUE);
1708 fp->f_data = NULL;
1709
1710 return (0);
1711 }
1712
1713 static void
1714 kqueue_wakeup(struct kqueue *kq)
1715 {
1716 KQ_OWNED(kq);
1717
1718 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1719 kq->kq_state &= ~KQ_SLEEP;
1720 wakeup(kq);
1721 }
1722 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1723 selwakeuppri(&kq->kq_sel, PSOCK);
1724 if (!SEL_WAITING(&kq->kq_sel))
1725 kq->kq_state &= ~KQ_SEL;
1726 }
1727 if (!knlist_empty(&kq->kq_sel.si_note))
1728 kqueue_schedtask(kq);
1729 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1730 pgsigio(&kq->kq_sigio, SIGIO, 0);
1731 }
1732 }
1733
1734 /*
1735 * Walk down a list of knotes, activating them if their event has triggered.
1736 *
1737 * There is a possibility to optimize in the case of one kq watching another.
1738 * Instead of scheduling a task to wake it up, you could pass enough state
1739 * down the chain to make up the parent kqueue. Make this code functional
1740 * first.
1741 */
1742 void
1743 knote(struct knlist *list, long hint, int lockflags)
1744 {
1745 struct kqueue *kq;
1746 struct knote *kn;
1747 int error;
1748
1749 if (list == NULL)
1750 return;
1751
1752 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
1753
1754 if ((lockflags & KNF_LISTLOCKED) == 0)
1755 list->kl_lock(list->kl_lockarg);
1756
1757 /*
1758 * If we unlock the list lock (and set KN_INFLUX), we can eliminate
1759 * the kqueue scheduling, but this will introduce four
1760 * lock/unlock's for each knote to test. If we do, continue to use
1761 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
1762 * only safe if you want to remove the current item, which we are
1763 * not doing.
1764 */
1765 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
1766 kq = kn->kn_kq;
1767 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1768 KQ_LOCK(kq);
1769 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1770 KQ_UNLOCK(kq);
1771 } else if ((lockflags & KNF_NOKQLOCK) != 0) {
1772 kn->kn_status |= KN_INFLUX;
1773 KQ_UNLOCK(kq);
1774 error = kn->kn_fop->f_event(kn, hint);
1775 KQ_LOCK(kq);
1776 kn->kn_status &= ~KN_INFLUX;
1777 if (error)
1778 KNOTE_ACTIVATE(kn, 1);
1779 KQ_UNLOCK_FLUX(kq);
1780 } else {
1781 kn->kn_status |= KN_HASKQLOCK;
1782 if (kn->kn_fop->f_event(kn, hint))
1783 KNOTE_ACTIVATE(kn, 1);
1784 kn->kn_status &= ~KN_HASKQLOCK;
1785 KQ_UNLOCK(kq);
1786 }
1787 }
1788 kq = NULL;
1789 }
1790 if ((lockflags & KNF_LISTLOCKED) == 0)
1791 list->kl_unlock(list->kl_lockarg);
1792 }
1793
1794 /*
1795 * add a knote to a knlist
1796 */
1797 void
1798 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
1799 {
1800 KNL_ASSERT_LOCK(knl, islocked);
1801 KQ_NOTOWNED(kn->kn_kq);
1802 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
1803 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
1804 if (!islocked)
1805 knl->kl_lock(knl->kl_lockarg);
1806 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
1807 if (!islocked)
1808 knl->kl_unlock(knl->kl_lockarg);
1809 KQ_LOCK(kn->kn_kq);
1810 kn->kn_knlist = knl;
1811 kn->kn_status &= ~KN_DETACHED;
1812 KQ_UNLOCK(kn->kn_kq);
1813 }
1814
1815 static void
1816 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
1817 {
1818 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
1819 KNL_ASSERT_LOCK(knl, knlislocked);
1820 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
1821 if (!kqislocked)
1822 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
1823 ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
1824 if (!knlislocked)
1825 knl->kl_lock(knl->kl_lockarg);
1826 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
1827 kn->kn_knlist = NULL;
1828 if (!knlislocked)
1829 knl->kl_unlock(knl->kl_lockarg);
1830 if (!kqislocked)
1831 KQ_LOCK(kn->kn_kq);
1832 kn->kn_status |= KN_DETACHED;
1833 if (!kqislocked)
1834 KQ_UNLOCK(kn->kn_kq);
1835 }
1836
1837 /*
1838 * remove all knotes from a specified klist
1839 */
1840 void
1841 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
1842 {
1843
1844 knlist_remove_kq(knl, kn, islocked, 0);
1845 }
1846
1847 /*
1848 * remove knote from a specified klist while in f_event handler.
1849 */
1850 void
1851 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
1852 {
1853
1854 knlist_remove_kq(knl, kn, 1,
1855 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
1856 }
1857
1858 int
1859 knlist_empty(struct knlist *knl)
1860 {
1861 KNL_ASSERT_LOCKED(knl);
1862 return SLIST_EMPTY(&knl->kl_list);
1863 }
1864
1865 static struct mtx knlist_lock;
1866 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
1867 MTX_DEF);
1868 static void knlist_mtx_lock(void *arg);
1869 static void knlist_mtx_unlock(void *arg);
1870
1871 static void
1872 knlist_mtx_lock(void *arg)
1873 {
1874 mtx_lock((struct mtx *)arg);
1875 }
1876
1877 static void
1878 knlist_mtx_unlock(void *arg)
1879 {
1880 mtx_unlock((struct mtx *)arg);
1881 }
1882
1883 static void
1884 knlist_mtx_assert_locked(void *arg)
1885 {
1886 mtx_assert((struct mtx *)arg, MA_OWNED);
1887 }
1888
1889 static void
1890 knlist_mtx_assert_unlocked(void *arg)
1891 {
1892 mtx_assert((struct mtx *)arg, MA_NOTOWNED);
1893 }
1894
1895 void
1896 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
1897 void (*kl_unlock)(void *),
1898 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
1899 {
1900
1901 if (lock == NULL)
1902 knl->kl_lockarg = &knlist_lock;
1903 else
1904 knl->kl_lockarg = lock;
1905
1906 if (kl_lock == NULL)
1907 knl->kl_lock = knlist_mtx_lock;
1908 else
1909 knl->kl_lock = kl_lock;
1910 if (kl_unlock == NULL)
1911 knl->kl_unlock = knlist_mtx_unlock;
1912 else
1913 knl->kl_unlock = kl_unlock;
1914 if (kl_assert_locked == NULL)
1915 knl->kl_assert_locked = knlist_mtx_assert_locked;
1916 else
1917 knl->kl_assert_locked = kl_assert_locked;
1918 if (kl_assert_unlocked == NULL)
1919 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
1920 else
1921 knl->kl_assert_unlocked = kl_assert_unlocked;
1922
1923 SLIST_INIT(&knl->kl_list);
1924 }
1925
1926 void
1927 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
1928 {
1929
1930 knlist_init(knl, lock, NULL, NULL, NULL, NULL);
1931 }
1932
1933 void
1934 knlist_destroy(struct knlist *knl)
1935 {
1936
1937 #ifdef INVARIANTS
1938 /*
1939 * if we run across this error, we need to find the offending
1940 * driver and have it call knlist_clear.
1941 */
1942 if (!SLIST_EMPTY(&knl->kl_list))
1943 printf("WARNING: destroying knlist w/ knotes on it!\n");
1944 #endif
1945
1946 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
1947 SLIST_INIT(&knl->kl_list);
1948 }
1949
1950 /*
1951 * Even if we are locked, we may need to drop the lock to allow any influx
1952 * knotes time to "settle".
1953 */
1954 void
1955 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
1956 {
1957 struct knote *kn, *kn2;
1958 struct kqueue *kq;
1959
1960 if (islocked)
1961 KNL_ASSERT_LOCKED(knl);
1962 else {
1963 KNL_ASSERT_UNLOCKED(knl);
1964 again: /* need to reacquire lock since we have dropped it */
1965 knl->kl_lock(knl->kl_lockarg);
1966 }
1967
1968 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
1969 kq = kn->kn_kq;
1970 KQ_LOCK(kq);
1971 if ((kn->kn_status & KN_INFLUX)) {
1972 KQ_UNLOCK(kq);
1973 continue;
1974 }
1975 knlist_remove_kq(knl, kn, 1, 1);
1976 if (killkn) {
1977 kn->kn_status |= KN_INFLUX | KN_DETACHED;
1978 KQ_UNLOCK(kq);
1979 knote_drop(kn, td);
1980 } else {
1981 /* Make sure cleared knotes disappear soon */
1982 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1983 KQ_UNLOCK(kq);
1984 }
1985 kq = NULL;
1986 }
1987
1988 if (!SLIST_EMPTY(&knl->kl_list)) {
1989 /* there are still KN_INFLUX remaining */
1990 kn = SLIST_FIRST(&knl->kl_list);
1991 kq = kn->kn_kq;
1992 KQ_LOCK(kq);
1993 KASSERT(kn->kn_status & KN_INFLUX,
1994 ("knote removed w/o list lock"));
1995 knl->kl_unlock(knl->kl_lockarg);
1996 kq->kq_state |= KQ_FLUXWAIT;
1997 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
1998 kq = NULL;
1999 goto again;
2000 }
2001
2002 if (islocked)
2003 KNL_ASSERT_LOCKED(knl);
2004 else {
2005 knl->kl_unlock(knl->kl_lockarg);
2006 KNL_ASSERT_UNLOCKED(knl);
2007 }
2008 }
2009
2010 /*
2011 * Remove all knotes referencing a specified fd must be called with FILEDESC
2012 * lock. This prevents a race where a new fd comes along and occupies the
2013 * entry and we attach a knote to the fd.
2014 */
2015 void
2016 knote_fdclose(struct thread *td, int fd)
2017 {
2018 struct filedesc *fdp = td->td_proc->p_fd;
2019 struct kqueue *kq;
2020 struct knote *kn;
2021 int influx;
2022
2023 FILEDESC_XLOCK_ASSERT(fdp);
2024
2025 /*
2026 * We shouldn't have to worry about new kevents appearing on fd
2027 * since filedesc is locked.
2028 */
2029 SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2030 KQ_LOCK(kq);
2031
2032 again:
2033 influx = 0;
2034 while (kq->kq_knlistsize > fd &&
2035 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2036 if (kn->kn_status & KN_INFLUX) {
2037 /* someone else might be waiting on our knote */
2038 if (influx)
2039 wakeup(kq);
2040 kq->kq_state |= KQ_FLUXWAIT;
2041 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2042 goto again;
2043 }
2044 kn->kn_status |= KN_INFLUX;
2045 KQ_UNLOCK(kq);
2046 if (!(kn->kn_status & KN_DETACHED))
2047 kn->kn_fop->f_detach(kn);
2048 knote_drop(kn, td);
2049 influx = 1;
2050 KQ_LOCK(kq);
2051 }
2052 KQ_UNLOCK_FLUX(kq);
2053 }
2054 }
2055
2056 static int
2057 knote_attach(struct knote *kn, struct kqueue *kq)
2058 {
2059 struct klist *list;
2060
2061 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
2062 KQ_OWNED(kq);
2063
2064 if (kn->kn_fop->f_isfd) {
2065 if (kn->kn_id >= kq->kq_knlistsize)
2066 return ENOMEM;
2067 list = &kq->kq_knlist[kn->kn_id];
2068 } else {
2069 if (kq->kq_knhash == NULL)
2070 return ENOMEM;
2071 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2072 }
2073
2074 SLIST_INSERT_HEAD(list, kn, kn_link);
2075
2076 return 0;
2077 }
2078
2079 /*
2080 * knote must already have been detached using the f_detach method.
2081 * no lock need to be held, it is assumed that the KN_INFLUX flag is set
2082 * to prevent other removal.
2083 */
2084 static void
2085 knote_drop(struct knote *kn, struct thread *td)
2086 {
2087 struct kqueue *kq;
2088 struct klist *list;
2089
2090 kq = kn->kn_kq;
2091
2092 KQ_NOTOWNED(kq);
2093 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
2094 ("knote_drop called without KN_INFLUX set in kn_status"));
2095
2096 KQ_LOCK(kq);
2097 if (kn->kn_fop->f_isfd)
2098 list = &kq->kq_knlist[kn->kn_id];
2099 else
2100 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2101
2102 if (!SLIST_EMPTY(list))
2103 SLIST_REMOVE(list, kn, knote, kn_link);
2104 if (kn->kn_status & KN_QUEUED)
2105 knote_dequeue(kn);
2106 KQ_UNLOCK_FLUX(kq);
2107
2108 if (kn->kn_fop->f_isfd) {
2109 fdrop(kn->kn_fp, td);
2110 kn->kn_fp = NULL;
2111 }
2112 kqueue_fo_release(kn->kn_kevent.filter);
2113 kn->kn_fop = NULL;
2114 knote_free(kn);
2115 }
2116
2117 static void
2118 knote_enqueue(struct knote *kn)
2119 {
2120 struct kqueue *kq = kn->kn_kq;
2121
2122 KQ_OWNED(kn->kn_kq);
2123 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2124
2125 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2126 kn->kn_status |= KN_QUEUED;
2127 kq->kq_count++;
2128 kqueue_wakeup(kq);
2129 }
2130
2131 static void
2132 knote_dequeue(struct knote *kn)
2133 {
2134 struct kqueue *kq = kn->kn_kq;
2135
2136 KQ_OWNED(kn->kn_kq);
2137 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2138
2139 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2140 kn->kn_status &= ~KN_QUEUED;
2141 kq->kq_count--;
2142 }
2143
2144 static void
2145 knote_init(void)
2146 {
2147
2148 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2149 NULL, NULL, UMA_ALIGN_PTR, 0);
2150 }
2151 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2152
2153 static struct knote *
2154 knote_alloc(int waitok)
2155 {
2156 return ((struct knote *)uma_zalloc(knote_zone,
2157 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
2158 }
2159
2160 static void
2161 knote_free(struct knote *kn)
2162 {
2163 if (kn != NULL)
2164 uma_zfree(knote_zone, kn);
2165 }
2166
2167 /*
2168 * Register the kev w/ the kq specified by fd.
2169 */
2170 int
2171 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
2172 {
2173 struct kqueue *kq;
2174 struct file *fp;
2175 int error;
2176
2177 if ((error = fget(td, fd, &fp)) != 0)
2178 return (error);
2179 if ((error = kqueue_acquire(fp, &kq)) != 0)
2180 goto noacquire;
2181
2182 error = kqueue_register(kq, kev, td, waitok);
2183
2184 kqueue_release(kq, 0);
2185
2186 noacquire:
2187 fdrop(fp, td);
2188
2189 return error;
2190 }
Cache object: a0d125ef00856ee516f3ca81fe21f87a
|