FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
6 * Copyright (c) 2009 Apple, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_ktrace.h"
35 #include "opt_kqueue.h"
36
37 #ifdef COMPAT_FREEBSD11
38 #define _WANT_FREEBSD11_KEVENT
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/capsicum.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/unistd.h>
52 #include <sys/file.h>
53 #include <sys/filedesc.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/kthread.h>
57 #include <sys/selinfo.h>
58 #include <sys/queue.h>
59 #include <sys/event.h>
60 #include <sys/eventvar.h>
61 #include <sys/poll.h>
62 #include <sys/protosw.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sigio.h>
65 #include <sys/signalvar.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/stat.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysproto.h>
71 #include <sys/syscallsubr.h>
72 #include <sys/taskqueue.h>
73 #include <sys/uio.h>
74 #include <sys/user.h>
75 #ifdef KTRACE
76 #include <sys/ktrace.h>
77 #endif
78 #include <machine/atomic.h>
79
80 #include <vm/uma.h>
81
82 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
83
84 /*
85 * This lock is used if multiple kq locks are required. This possibly
86 * should be made into a per proc lock.
87 */
88 static struct mtx kq_global;
89 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
90 #define KQ_GLOBAL_LOCK(lck, haslck) do { \
91 if (!haslck) \
92 mtx_lock(lck); \
93 haslck = 1; \
94 } while (0)
95 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \
96 if (haslck) \
97 mtx_unlock(lck); \
98 haslck = 0; \
99 } while (0)
100
101 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
102
103 static int kevent_copyout(void *arg, struct kevent *kevp, int count);
104 static int kevent_copyin(void *arg, struct kevent *kevp, int count);
105 static int kqueue_register(struct kqueue *kq, struct kevent *kev,
106 struct thread *td, int mflag);
107 static int kqueue_acquire(struct file *fp, struct kqueue **kqp);
108 static void kqueue_release(struct kqueue *kq, int locked);
109 static void kqueue_destroy(struct kqueue *kq);
110 static void kqueue_drain(struct kqueue *kq, struct thread *td);
111 static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
112 uintptr_t ident, int mflag);
113 static void kqueue_task(void *arg, int pending);
114 static int kqueue_scan(struct kqueue *kq, int maxevents,
115 struct kevent_copyops *k_ops,
116 const struct timespec *timeout,
117 struct kevent *keva, struct thread *td);
118 static void kqueue_wakeup(struct kqueue *kq);
119 static struct filterops *kqueue_fo_find(int filt);
120 static void kqueue_fo_release(int filt);
121 struct g_kevent_args;
122 static int kern_kevent_generic(struct thread *td,
123 struct g_kevent_args *uap,
124 struct kevent_copyops *k_ops, const char *struct_name);
125
126 static fo_ioctl_t kqueue_ioctl;
127 static fo_poll_t kqueue_poll;
128 static fo_kqfilter_t kqueue_kqfilter;
129 static fo_stat_t kqueue_stat;
130 static fo_close_t kqueue_close;
131 static fo_fill_kinfo_t kqueue_fill_kinfo;
132
133 static struct fileops kqueueops = {
134 .fo_read = invfo_rdwr,
135 .fo_write = invfo_rdwr,
136 .fo_truncate = invfo_truncate,
137 .fo_ioctl = kqueue_ioctl,
138 .fo_poll = kqueue_poll,
139 .fo_kqfilter = kqueue_kqfilter,
140 .fo_stat = kqueue_stat,
141 .fo_close = kqueue_close,
142 .fo_chmod = invfo_chmod,
143 .fo_chown = invfo_chown,
144 .fo_sendfile = invfo_sendfile,
145 .fo_fill_kinfo = kqueue_fill_kinfo,
146 };
147
148 static int knote_attach(struct knote *kn, struct kqueue *kq);
149 static void knote_drop(struct knote *kn, struct thread *td);
150 static void knote_drop_detached(struct knote *kn, struct thread *td);
151 static void knote_enqueue(struct knote *kn);
152 static void knote_dequeue(struct knote *kn);
153 static void knote_init(void);
154 static struct knote *knote_alloc(int mflag);
155 static void knote_free(struct knote *kn);
156
157 static void filt_kqdetach(struct knote *kn);
158 static int filt_kqueue(struct knote *kn, long hint);
159 static int filt_procattach(struct knote *kn);
160 static void filt_procdetach(struct knote *kn);
161 static int filt_proc(struct knote *kn, long hint);
162 static int filt_fileattach(struct knote *kn);
163 static void filt_timerexpire(void *knx);
164 static void filt_timerexpire_l(struct knote *kn, bool proc_locked);
165 static int filt_timerattach(struct knote *kn);
166 static void filt_timerdetach(struct knote *kn);
167 static void filt_timerstart(struct knote *kn, sbintime_t to);
168 static void filt_timertouch(struct knote *kn, struct kevent *kev,
169 u_long type);
170 static int filt_timervalidate(struct knote *kn, sbintime_t *to);
171 static int filt_timer(struct knote *kn, long hint);
172 static int filt_userattach(struct knote *kn);
173 static void filt_userdetach(struct knote *kn);
174 static int filt_user(struct knote *kn, long hint);
175 static void filt_usertouch(struct knote *kn, struct kevent *kev,
176 u_long type);
177
178 static struct filterops file_filtops = {
179 .f_isfd = 1,
180 .f_attach = filt_fileattach,
181 };
182 static struct filterops kqread_filtops = {
183 .f_isfd = 1,
184 .f_detach = filt_kqdetach,
185 .f_event = filt_kqueue,
186 };
187 /* XXX - move to kern_proc.c? */
188 static struct filterops proc_filtops = {
189 .f_isfd = 0,
190 .f_attach = filt_procattach,
191 .f_detach = filt_procdetach,
192 .f_event = filt_proc,
193 };
194 static struct filterops timer_filtops = {
195 .f_isfd = 0,
196 .f_attach = filt_timerattach,
197 .f_detach = filt_timerdetach,
198 .f_event = filt_timer,
199 .f_touch = filt_timertouch,
200 };
201 static struct filterops user_filtops = {
202 .f_attach = filt_userattach,
203 .f_detach = filt_userdetach,
204 .f_event = filt_user,
205 .f_touch = filt_usertouch,
206 };
207
208 static uma_zone_t knote_zone;
209 static unsigned int __exclusive_cache_line kq_ncallouts;
210 static unsigned int kq_calloutmax = 4 * 1024;
211 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
212 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
213
214 /* XXX - ensure not influx ? */
215 #define KNOTE_ACTIVATE(kn, islock) do { \
216 if ((islock)) \
217 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \
218 else \
219 KQ_LOCK((kn)->kn_kq); \
220 (kn)->kn_status |= KN_ACTIVE; \
221 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
222 knote_enqueue((kn)); \
223 if (!(islock)) \
224 KQ_UNLOCK((kn)->kn_kq); \
225 } while(0)
226 #define KQ_LOCK(kq) do { \
227 mtx_lock(&(kq)->kq_lock); \
228 } while (0)
229 #define KQ_FLUX_WAKEUP(kq) do { \
230 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \
231 (kq)->kq_state &= ~KQ_FLUXWAIT; \
232 wakeup((kq)); \
233 } \
234 } while (0)
235 #define KQ_UNLOCK_FLUX(kq) do { \
236 KQ_FLUX_WAKEUP(kq); \
237 mtx_unlock(&(kq)->kq_lock); \
238 } while (0)
239 #define KQ_UNLOCK(kq) do { \
240 mtx_unlock(&(kq)->kq_lock); \
241 } while (0)
242 #define KQ_OWNED(kq) do { \
243 mtx_assert(&(kq)->kq_lock, MA_OWNED); \
244 } while (0)
245 #define KQ_NOTOWNED(kq) do { \
246 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \
247 } while (0)
248
249 static struct knlist *
250 kn_list_lock(struct knote *kn)
251 {
252 struct knlist *knl;
253
254 knl = kn->kn_knlist;
255 if (knl != NULL)
256 knl->kl_lock(knl->kl_lockarg);
257 return (knl);
258 }
259
260 static void
261 kn_list_unlock(struct knlist *knl)
262 {
263 bool do_free;
264
265 if (knl == NULL)
266 return;
267 do_free = knl->kl_autodestroy && knlist_empty(knl);
268 knl->kl_unlock(knl->kl_lockarg);
269 if (do_free) {
270 knlist_destroy(knl);
271 free(knl, M_KQUEUE);
272 }
273 }
274
275 static bool
276 kn_in_flux(struct knote *kn)
277 {
278
279 return (kn->kn_influx > 0);
280 }
281
282 static void
283 kn_enter_flux(struct knote *kn)
284 {
285
286 KQ_OWNED(kn->kn_kq);
287 MPASS(kn->kn_influx < INT_MAX);
288 kn->kn_influx++;
289 }
290
291 static bool
292 kn_leave_flux(struct knote *kn)
293 {
294
295 KQ_OWNED(kn->kn_kq);
296 MPASS(kn->kn_influx > 0);
297 kn->kn_influx--;
298 return (kn->kn_influx == 0);
299 }
300
301 #define KNL_ASSERT_LOCK(knl, islocked) do { \
302 if (islocked) \
303 KNL_ASSERT_LOCKED(knl); \
304 else \
305 KNL_ASSERT_UNLOCKED(knl); \
306 } while (0)
307 #ifdef INVARIANTS
308 #define KNL_ASSERT_LOCKED(knl) do { \
309 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \
310 } while (0)
311 #define KNL_ASSERT_UNLOCKED(knl) do { \
312 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \
313 } while (0)
314 #else /* !INVARIANTS */
315 #define KNL_ASSERT_LOCKED(knl) do {} while(0)
316 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
317 #endif /* INVARIANTS */
318
319 #ifndef KN_HASHSIZE
320 #define KN_HASHSIZE 64 /* XXX should be tunable */
321 #endif
322
323 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
324
325 static int
326 filt_nullattach(struct knote *kn)
327 {
328
329 return (ENXIO);
330 };
331
332 struct filterops null_filtops = {
333 .f_isfd = 0,
334 .f_attach = filt_nullattach,
335 };
336
337 /* XXX - make SYSINIT to add these, and move into respective modules. */
338 extern struct filterops sig_filtops;
339 extern struct filterops fs_filtops;
340
341 /*
342 * Table for all system-defined filters.
343 */
344 static struct mtx filterops_lock;
345 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
346 MTX_DEF);
347 static struct {
348 struct filterops *for_fop;
349 int for_nolock;
350 int for_refcnt;
351 } sysfilt_ops[EVFILT_SYSCOUNT] = {
352 { &file_filtops, 1 }, /* EVFILT_READ */
353 { &file_filtops, 1 }, /* EVFILT_WRITE */
354 { &null_filtops }, /* EVFILT_AIO */
355 { &file_filtops, 1 }, /* EVFILT_VNODE */
356 { &proc_filtops, 1 }, /* EVFILT_PROC */
357 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */
358 { &timer_filtops, 1 }, /* EVFILT_TIMER */
359 { &file_filtops, 1 }, /* EVFILT_PROCDESC */
360 { &fs_filtops, 1 }, /* EVFILT_FS */
361 { &null_filtops }, /* EVFILT_LIO */
362 { &user_filtops, 1 }, /* EVFILT_USER */
363 { &null_filtops }, /* EVFILT_SENDFILE */
364 { &file_filtops, 1 }, /* EVFILT_EMPTY */
365 };
366
367 /*
368 * Simple redirection for all cdevsw style objects to call their fo_kqfilter
369 * method.
370 */
371 static int
372 filt_fileattach(struct knote *kn)
373 {
374
375 return (fo_kqfilter(kn->kn_fp, kn));
376 }
377
378 /*ARGSUSED*/
379 static int
380 kqueue_kqfilter(struct file *fp, struct knote *kn)
381 {
382 struct kqueue *kq = kn->kn_fp->f_data;
383
384 if (kn->kn_filter != EVFILT_READ)
385 return (EINVAL);
386
387 kn->kn_status |= KN_KQUEUE;
388 kn->kn_fop = &kqread_filtops;
389 knlist_add(&kq->kq_sel.si_note, kn, 0);
390
391 return (0);
392 }
393
394 static void
395 filt_kqdetach(struct knote *kn)
396 {
397 struct kqueue *kq = kn->kn_fp->f_data;
398
399 knlist_remove(&kq->kq_sel.si_note, kn, 0);
400 }
401
402 /*ARGSUSED*/
403 static int
404 filt_kqueue(struct knote *kn, long hint)
405 {
406 struct kqueue *kq = kn->kn_fp->f_data;
407
408 kn->kn_data = kq->kq_count;
409 return (kn->kn_data > 0);
410 }
411
412 /* XXX - move to kern_proc.c? */
413 static int
414 filt_procattach(struct knote *kn)
415 {
416 struct proc *p;
417 int error;
418 bool exiting, immediate;
419
420 exiting = immediate = false;
421 if (kn->kn_sfflags & NOTE_EXIT)
422 p = pfind_any(kn->kn_id);
423 else
424 p = pfind(kn->kn_id);
425 if (p == NULL)
426 return (ESRCH);
427 if (p->p_flag & P_WEXIT)
428 exiting = true;
429
430 if ((error = p_cansee(curthread, p))) {
431 PROC_UNLOCK(p);
432 return (error);
433 }
434
435 kn->kn_ptr.p_proc = p;
436 kn->kn_flags |= EV_CLEAR; /* automatically set */
437
438 /*
439 * Internal flag indicating registration done by kernel for the
440 * purposes of getting a NOTE_CHILD notification.
441 */
442 if (kn->kn_flags & EV_FLAG2) {
443 kn->kn_flags &= ~EV_FLAG2;
444 kn->kn_data = kn->kn_sdata; /* ppid */
445 kn->kn_fflags = NOTE_CHILD;
446 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
447 immediate = true; /* Force immediate activation of child note. */
448 }
449 /*
450 * Internal flag indicating registration done by kernel (for other than
451 * NOTE_CHILD).
452 */
453 if (kn->kn_flags & EV_FLAG1) {
454 kn->kn_flags &= ~EV_FLAG1;
455 }
456
457 knlist_add(p->p_klist, kn, 1);
458
459 /*
460 * Immediately activate any child notes or, in the case of a zombie
461 * target process, exit notes. The latter is necessary to handle the
462 * case where the target process, e.g. a child, dies before the kevent
463 * is registered.
464 */
465 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
466 KNOTE_ACTIVATE(kn, 0);
467
468 PROC_UNLOCK(p);
469
470 return (0);
471 }
472
473 /*
474 * The knote may be attached to a different process, which may exit,
475 * leaving nothing for the knote to be attached to. So when the process
476 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
477 * it will be deleted when read out. However, as part of the knote deletion,
478 * this routine is called, so a check is needed to avoid actually performing
479 * a detach, because the original process does not exist any more.
480 */
481 /* XXX - move to kern_proc.c? */
482 static void
483 filt_procdetach(struct knote *kn)
484 {
485
486 knlist_remove(kn->kn_knlist, kn, 0);
487 kn->kn_ptr.p_proc = NULL;
488 }
489
490 /* XXX - move to kern_proc.c? */
491 static int
492 filt_proc(struct knote *kn, long hint)
493 {
494 struct proc *p;
495 u_int event;
496
497 p = kn->kn_ptr.p_proc;
498 if (p == NULL) /* already activated, from attach filter */
499 return (0);
500
501 /* Mask off extra data. */
502 event = (u_int)hint & NOTE_PCTRLMASK;
503
504 /* If the user is interested in this event, record it. */
505 if (kn->kn_sfflags & event)
506 kn->kn_fflags |= event;
507
508 /* Process is gone, so flag the event as finished. */
509 if (event == NOTE_EXIT) {
510 kn->kn_flags |= EV_EOF | EV_ONESHOT;
511 kn->kn_ptr.p_proc = NULL;
512 if (kn->kn_fflags & NOTE_EXIT)
513 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
514 if (kn->kn_fflags == 0)
515 kn->kn_flags |= EV_DROP;
516 return (1);
517 }
518
519 return (kn->kn_fflags != 0);
520 }
521
522 /*
523 * Called when the process forked. It mostly does the same as the
524 * knote(), activating all knotes registered to be activated when the
525 * process forked. Additionally, for each knote attached to the
526 * parent, check whether user wants to track the new process. If so
527 * attach a new knote to it, and immediately report an event with the
528 * child's pid.
529 */
530 void
531 knote_fork(struct knlist *list, int pid)
532 {
533 struct kqueue *kq;
534 struct knote *kn;
535 struct kevent kev;
536 int error;
537
538 MPASS(list != NULL);
539 KNL_ASSERT_LOCKED(list);
540 if (SLIST_EMPTY(&list->kl_list))
541 return;
542
543 memset(&kev, 0, sizeof(kev));
544 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
545 kq = kn->kn_kq;
546 KQ_LOCK(kq);
547 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
548 KQ_UNLOCK(kq);
549 continue;
550 }
551
552 /*
553 * The same as knote(), activate the event.
554 */
555 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
556 if (kn->kn_fop->f_event(kn, NOTE_FORK))
557 KNOTE_ACTIVATE(kn, 1);
558 KQ_UNLOCK(kq);
559 continue;
560 }
561
562 /*
563 * The NOTE_TRACK case. In addition to the activation
564 * of the event, we need to register new events to
565 * track the child. Drop the locks in preparation for
566 * the call to kqueue_register().
567 */
568 kn_enter_flux(kn);
569 KQ_UNLOCK(kq);
570 list->kl_unlock(list->kl_lockarg);
571
572 /*
573 * Activate existing knote and register tracking knotes with
574 * new process.
575 *
576 * First register a knote to get just the child notice. This
577 * must be a separate note from a potential NOTE_EXIT
578 * notification since both NOTE_CHILD and NOTE_EXIT are defined
579 * to use the data field (in conflicting ways).
580 */
581 kev.ident = pid;
582 kev.filter = kn->kn_filter;
583 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
584 EV_FLAG2;
585 kev.fflags = kn->kn_sfflags;
586 kev.data = kn->kn_id; /* parent */
587 kev.udata = kn->kn_kevent.udata;/* preserve udata */
588 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
589 if (error)
590 kn->kn_fflags |= NOTE_TRACKERR;
591
592 /*
593 * Then register another knote to track other potential events
594 * from the new process.
595 */
596 kev.ident = pid;
597 kev.filter = kn->kn_filter;
598 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
599 kev.fflags = kn->kn_sfflags;
600 kev.data = kn->kn_id; /* parent */
601 kev.udata = kn->kn_kevent.udata;/* preserve udata */
602 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
603 if (error)
604 kn->kn_fflags |= NOTE_TRACKERR;
605 if (kn->kn_fop->f_event(kn, NOTE_FORK))
606 KNOTE_ACTIVATE(kn, 0);
607 list->kl_lock(list->kl_lockarg);
608 KQ_LOCK(kq);
609 kn_leave_flux(kn);
610 KQ_UNLOCK_FLUX(kq);
611 }
612 }
613
614 /*
615 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
616 * interval timer support code.
617 */
618
619 #define NOTE_TIMER_PRECMASK \
620 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS)
621
622 static sbintime_t
623 timer2sbintime(int64_t data, int flags)
624 {
625 int64_t secs;
626
627 /*
628 * Macros for converting to the fractional second portion of an
629 * sbintime_t using 64bit multiplication to improve precision.
630 */
631 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
632 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
633 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
634 switch (flags & NOTE_TIMER_PRECMASK) {
635 case NOTE_SECONDS:
636 #ifdef __LP64__
637 if (data > (SBT_MAX / SBT_1S))
638 return (SBT_MAX);
639 #endif
640 return ((sbintime_t)data << 32);
641 case NOTE_MSECONDS: /* FALLTHROUGH */
642 case 0:
643 if (data >= 1000) {
644 secs = data / 1000;
645 #ifdef __LP64__
646 if (secs > (SBT_MAX / SBT_1S))
647 return (SBT_MAX);
648 #endif
649 return (secs << 32 | MS_TO_SBT(data % 1000));
650 }
651 return (MS_TO_SBT(data));
652 case NOTE_USECONDS:
653 if (data >= 1000000) {
654 secs = data / 1000000;
655 #ifdef __LP64__
656 if (secs > (SBT_MAX / SBT_1S))
657 return (SBT_MAX);
658 #endif
659 return (secs << 32 | US_TO_SBT(data % 1000000));
660 }
661 return (US_TO_SBT(data));
662 case NOTE_NSECONDS:
663 if (data >= 1000000000) {
664 secs = data / 1000000000;
665 #ifdef __LP64__
666 if (secs > (SBT_MAX / SBT_1S))
667 return (SBT_MAX);
668 #endif
669 return (secs << 32 | NS_TO_SBT(data % 1000000000));
670 }
671 return (NS_TO_SBT(data));
672 default:
673 break;
674 }
675 return (-1);
676 }
677
678 struct kq_timer_cb_data {
679 struct callout c;
680 struct proc *p;
681 struct knote *kn;
682 int cpuid;
683 int flags;
684 TAILQ_ENTRY(kq_timer_cb_data) link;
685 sbintime_t next; /* next timer event fires at */
686 sbintime_t to; /* precalculated timer period, 0 for abs */
687 };
688
689 #define KQ_TIMER_CB_ENQUEUED 0x01
690
691 static void
692 kqtimer_sched_callout(struct kq_timer_cb_data *kc)
693 {
694 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn,
695 kc->cpuid, C_ABSOLUTE);
696 }
697
698 void
699 kqtimer_proc_continue(struct proc *p)
700 {
701 struct kq_timer_cb_data *kc, *kc1;
702 struct bintime bt;
703 sbintime_t now;
704
705 PROC_LOCK_ASSERT(p, MA_OWNED);
706
707 getboottimebin(&bt);
708 now = bttosbt(bt);
709
710 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) {
711 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link);
712 kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
713 if (kc->next <= now)
714 filt_timerexpire_l(kc->kn, true);
715 else
716 kqtimer_sched_callout(kc);
717 }
718 }
719
720 static void
721 filt_timerexpire_l(struct knote *kn, bool proc_locked)
722 {
723 struct kq_timer_cb_data *kc;
724 struct proc *p;
725 uint64_t delta;
726 sbintime_t now;
727
728 kc = kn->kn_ptr.p_v;
729
730 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) {
731 kn->kn_data++;
732 KNOTE_ACTIVATE(kn, 0);
733 return;
734 }
735
736 now = sbinuptime();
737 if (now >= kc->next) {
738 delta = (now - kc->next) / kc->to;
739 if (delta == 0)
740 delta = 1;
741 kn->kn_data += delta;
742 kc->next += delta * kc->to;
743 if (now >= kc->next) /* overflow */
744 kc->next = now + kc->to;
745 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */
746 }
747
748 /*
749 * Initial check for stopped kc->p is racy. It is fine to
750 * miss the set of the stop flags, at worst we would schedule
751 * one more callout. On the other hand, it is not fine to not
752 * schedule when we we missed clearing of the flags, we
753 * recheck them under the lock and observe consistent state.
754 */
755 p = kc->p;
756 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
757 if (!proc_locked)
758 PROC_LOCK(p);
759 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
760 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) {
761 kc->flags |= KQ_TIMER_CB_ENQUEUED;
762 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link);
763 }
764 if (!proc_locked)
765 PROC_UNLOCK(p);
766 return;
767 }
768 if (!proc_locked)
769 PROC_UNLOCK(p);
770 }
771 kqtimer_sched_callout(kc);
772 }
773
774 static void
775 filt_timerexpire(void *knx)
776 {
777 filt_timerexpire_l(knx, false);
778 }
779
780 /*
781 * data contains amount of time to sleep
782 */
783 static int
784 filt_timervalidate(struct knote *kn, sbintime_t *to)
785 {
786 struct bintime bt;
787 sbintime_t sbt;
788
789 if (kn->kn_sdata < 0)
790 return (EINVAL);
791 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
792 kn->kn_sdata = 1;
793 /*
794 * The only fflags values supported are the timer unit
795 * (precision) and the absolute time indicator.
796 */
797 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0)
798 return (EINVAL);
799
800 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
801 if (*to < 0)
802 return (EINVAL);
803 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
804 getboottimebin(&bt);
805 sbt = bttosbt(bt);
806 *to = MAX(0, *to - sbt);
807 }
808 return (0);
809 }
810
811 static int
812 filt_timerattach(struct knote *kn)
813 {
814 struct kq_timer_cb_data *kc;
815 sbintime_t to;
816 int error;
817
818 to = -1;
819 error = filt_timervalidate(kn, &to);
820 if (error != 0)
821 return (error);
822 KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 ||
823 (kn->kn_sfflags & NOTE_ABSTIME) != 0,
824 ("%s: periodic timer has a calculated zero timeout", __func__));
825 KASSERT(to >= 0,
826 ("%s: timer has a calculated negative timeout", __func__));
827
828 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) {
829 atomic_subtract_int(&kq_ncallouts, 1);
830 return (ENOMEM);
831 }
832
833 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
834 kn->kn_flags |= EV_CLEAR; /* automatically set */
835 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */
836 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
837 kc->kn = kn;
838 kc->p = curproc;
839 kc->cpuid = PCPU_GET(cpuid);
840 kc->flags = 0;
841 callout_init(&kc->c, 1);
842 filt_timerstart(kn, to);
843
844 return (0);
845 }
846
847 static void
848 filt_timerstart(struct knote *kn, sbintime_t to)
849 {
850 struct kq_timer_cb_data *kc;
851
852 kc = kn->kn_ptr.p_v;
853 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
854 kc->next = to;
855 kc->to = 0;
856 } else {
857 kc->next = to + sbinuptime();
858 kc->to = to;
859 }
860 kqtimer_sched_callout(kc);
861 }
862
863 static void
864 filt_timerdetach(struct knote *kn)
865 {
866 struct kq_timer_cb_data *kc;
867 unsigned int old __unused;
868 bool pending;
869
870 kc = kn->kn_ptr.p_v;
871 do {
872 callout_drain(&kc->c);
873
874 /*
875 * kqtimer_proc_continue() might have rescheduled this callout.
876 * Double-check, using the process mutex as an interlock.
877 */
878 PROC_LOCK(kc->p);
879 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) {
880 kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
881 TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link);
882 }
883 pending = callout_pending(&kc->c);
884 PROC_UNLOCK(kc->p);
885 } while (pending);
886 free(kc, M_KQUEUE);
887 old = atomic_fetchadd_int(&kq_ncallouts, -1);
888 KASSERT(old > 0, ("Number of callouts cannot become negative"));
889 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */
890 }
891
892 static void
893 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type)
894 {
895 struct kq_timer_cb_data *kc;
896 struct kqueue *kq;
897 sbintime_t to;
898 int error;
899
900 switch (type) {
901 case EVENT_REGISTER:
902 /* Handle re-added timers that update data/fflags */
903 if (kev->flags & EV_ADD) {
904 kc = kn->kn_ptr.p_v;
905
906 /* Drain any existing callout. */
907 callout_drain(&kc->c);
908
909 /* Throw away any existing undelivered record
910 * of the timer expiration. This is done under
911 * the presumption that if a process is
912 * re-adding this timer with new parameters,
913 * it is no longer interested in what may have
914 * happened under the old parameters. If it is
915 * interested, it can wait for the expiration,
916 * delete the old timer definition, and then
917 * add the new one.
918 *
919 * This has to be done while the kq is locked:
920 * - if enqueued, dequeue
921 * - make it no longer active
922 * - clear the count of expiration events
923 */
924 kq = kn->kn_kq;
925 KQ_LOCK(kq);
926 if (kn->kn_status & KN_QUEUED)
927 knote_dequeue(kn);
928
929 kn->kn_status &= ~KN_ACTIVE;
930 kn->kn_data = 0;
931 KQ_UNLOCK(kq);
932
933 /* Reschedule timer based on new data/fflags */
934 kn->kn_sfflags = kev->fflags;
935 kn->kn_sdata = kev->data;
936 error = filt_timervalidate(kn, &to);
937 if (error != 0) {
938 kn->kn_flags |= EV_ERROR;
939 kn->kn_data = error;
940 } else
941 filt_timerstart(kn, to);
942 }
943 break;
944
945 case EVENT_PROCESS:
946 *kev = kn->kn_kevent;
947 if (kn->kn_flags & EV_CLEAR) {
948 kn->kn_data = 0;
949 kn->kn_fflags = 0;
950 }
951 break;
952
953 default:
954 panic("filt_timertouch() - invalid type (%ld)", type);
955 break;
956 }
957 }
958
959 static int
960 filt_timer(struct knote *kn, long hint)
961 {
962
963 return (kn->kn_data != 0);
964 }
965
966 static int
967 filt_userattach(struct knote *kn)
968 {
969
970 /*
971 * EVFILT_USER knotes are not attached to anything in the kernel.
972 */
973 kn->kn_hook = NULL;
974 if (kn->kn_fflags & NOTE_TRIGGER)
975 kn->kn_hookid = 1;
976 else
977 kn->kn_hookid = 0;
978 return (0);
979 }
980
981 static void
982 filt_userdetach(__unused struct knote *kn)
983 {
984
985 /*
986 * EVFILT_USER knotes are not attached to anything in the kernel.
987 */
988 }
989
990 static int
991 filt_user(struct knote *kn, __unused long hint)
992 {
993
994 return (kn->kn_hookid);
995 }
996
997 static void
998 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
999 {
1000 u_int ffctrl;
1001
1002 switch (type) {
1003 case EVENT_REGISTER:
1004 if (kev->fflags & NOTE_TRIGGER)
1005 kn->kn_hookid = 1;
1006
1007 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1008 kev->fflags &= NOTE_FFLAGSMASK;
1009 switch (ffctrl) {
1010 case NOTE_FFNOP:
1011 break;
1012
1013 case NOTE_FFAND:
1014 kn->kn_sfflags &= kev->fflags;
1015 break;
1016
1017 case NOTE_FFOR:
1018 kn->kn_sfflags |= kev->fflags;
1019 break;
1020
1021 case NOTE_FFCOPY:
1022 kn->kn_sfflags = kev->fflags;
1023 break;
1024
1025 default:
1026 /* XXX Return error? */
1027 break;
1028 }
1029 kn->kn_sdata = kev->data;
1030 if (kev->flags & EV_CLEAR) {
1031 kn->kn_hookid = 0;
1032 kn->kn_data = 0;
1033 kn->kn_fflags = 0;
1034 }
1035 break;
1036
1037 case EVENT_PROCESS:
1038 *kev = kn->kn_kevent;
1039 kev->fflags = kn->kn_sfflags;
1040 kev->data = kn->kn_sdata;
1041 if (kn->kn_flags & EV_CLEAR) {
1042 kn->kn_hookid = 0;
1043 kn->kn_data = 0;
1044 kn->kn_fflags = 0;
1045 }
1046 break;
1047
1048 default:
1049 panic("filt_usertouch() - invalid type (%ld)", type);
1050 break;
1051 }
1052 }
1053
1054 int
1055 sys_kqueue(struct thread *td, struct kqueue_args *uap)
1056 {
1057
1058 return (kern_kqueue(td, 0, NULL));
1059 }
1060
1061 static void
1062 kqueue_init(struct kqueue *kq)
1063 {
1064
1065 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
1066 TAILQ_INIT(&kq->kq_head);
1067 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
1068 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
1069 }
1070
1071 int
1072 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
1073 {
1074 struct filedesc *fdp;
1075 struct kqueue *kq;
1076 struct file *fp;
1077 struct ucred *cred;
1078 int fd, error;
1079
1080 fdp = td->td_proc->p_fd;
1081 cred = td->td_ucred;
1082 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
1083 return (ENOMEM);
1084
1085 error = falloc_caps(td, &fp, &fd, flags, fcaps);
1086 if (error != 0) {
1087 chgkqcnt(cred->cr_ruidinfo, -1, 0);
1088 return (error);
1089 }
1090
1091 /* An extra reference on `fp' has been held for us by falloc(). */
1092 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
1093 kqueue_init(kq);
1094 kq->kq_fdp = fdp;
1095 kq->kq_cred = crhold(cred);
1096
1097 FILEDESC_XLOCK(fdp);
1098 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
1099 FILEDESC_XUNLOCK(fdp);
1100
1101 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
1102 fdrop(fp, td);
1103
1104 td->td_retval[0] = fd;
1105 return (0);
1106 }
1107
1108 struct g_kevent_args {
1109 int fd;
1110 void *changelist;
1111 int nchanges;
1112 void *eventlist;
1113 int nevents;
1114 const struct timespec *timeout;
1115 };
1116
1117 int
1118 sys_kevent(struct thread *td, struct kevent_args *uap)
1119 {
1120 struct kevent_copyops k_ops = {
1121 .arg = uap,
1122 .k_copyout = kevent_copyout,
1123 .k_copyin = kevent_copyin,
1124 .kevent_size = sizeof(struct kevent),
1125 };
1126 struct g_kevent_args gk_args = {
1127 .fd = uap->fd,
1128 .changelist = uap->changelist,
1129 .nchanges = uap->nchanges,
1130 .eventlist = uap->eventlist,
1131 .nevents = uap->nevents,
1132 .timeout = uap->timeout,
1133 };
1134
1135 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent"));
1136 }
1137
1138 static int
1139 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap,
1140 struct kevent_copyops *k_ops, const char *struct_name)
1141 {
1142 struct timespec ts, *tsp;
1143 #ifdef KTRACE
1144 struct kevent *eventlist = uap->eventlist;
1145 #endif
1146 int error;
1147
1148 if (uap->timeout != NULL) {
1149 error = copyin(uap->timeout, &ts, sizeof(ts));
1150 if (error)
1151 return (error);
1152 tsp = &ts;
1153 } else
1154 tsp = NULL;
1155
1156 #ifdef KTRACE
1157 if (KTRPOINT(td, KTR_STRUCT_ARRAY))
1158 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist,
1159 uap->nchanges, k_ops->kevent_size);
1160 #endif
1161
1162 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
1163 k_ops, tsp);
1164
1165 #ifdef KTRACE
1166 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
1167 ktrstructarray(struct_name, UIO_USERSPACE, eventlist,
1168 td->td_retval[0], k_ops->kevent_size);
1169 #endif
1170
1171 return (error);
1172 }
1173
1174 /*
1175 * Copy 'count' items into the destination list pointed to by uap->eventlist.
1176 */
1177 static int
1178 kevent_copyout(void *arg, struct kevent *kevp, int count)
1179 {
1180 struct kevent_args *uap;
1181 int error;
1182
1183 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1184 uap = (struct kevent_args *)arg;
1185
1186 error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
1187 if (error == 0)
1188 uap->eventlist += count;
1189 return (error);
1190 }
1191
1192 /*
1193 * Copy 'count' items from the list pointed to by uap->changelist.
1194 */
1195 static int
1196 kevent_copyin(void *arg, struct kevent *kevp, int count)
1197 {
1198 struct kevent_args *uap;
1199 int error;
1200
1201 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1202 uap = (struct kevent_args *)arg;
1203
1204 error = copyin(uap->changelist, kevp, count * sizeof *kevp);
1205 if (error == 0)
1206 uap->changelist += count;
1207 return (error);
1208 }
1209
1210 #ifdef COMPAT_FREEBSD11
1211 static int
1212 kevent11_copyout(void *arg, struct kevent *kevp, int count)
1213 {
1214 struct freebsd11_kevent_args *uap;
1215 struct kevent_freebsd11 kev11;
1216 int error, i;
1217
1218 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1219 uap = (struct freebsd11_kevent_args *)arg;
1220
1221 for (i = 0; i < count; i++) {
1222 kev11.ident = kevp->ident;
1223 kev11.filter = kevp->filter;
1224 kev11.flags = kevp->flags;
1225 kev11.fflags = kevp->fflags;
1226 kev11.data = kevp->data;
1227 kev11.udata = kevp->udata;
1228 error = copyout(&kev11, uap->eventlist, sizeof(kev11));
1229 if (error != 0)
1230 break;
1231 uap->eventlist++;
1232 kevp++;
1233 }
1234 return (error);
1235 }
1236
1237 /*
1238 * Copy 'count' items from the list pointed to by uap->changelist.
1239 */
1240 static int
1241 kevent11_copyin(void *arg, struct kevent *kevp, int count)
1242 {
1243 struct freebsd11_kevent_args *uap;
1244 struct kevent_freebsd11 kev11;
1245 int error, i;
1246
1247 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1248 uap = (struct freebsd11_kevent_args *)arg;
1249
1250 for (i = 0; i < count; i++) {
1251 error = copyin(uap->changelist, &kev11, sizeof(kev11));
1252 if (error != 0)
1253 break;
1254 kevp->ident = kev11.ident;
1255 kevp->filter = kev11.filter;
1256 kevp->flags = kev11.flags;
1257 kevp->fflags = kev11.fflags;
1258 kevp->data = (uintptr_t)kev11.data;
1259 kevp->udata = kev11.udata;
1260 bzero(&kevp->ext, sizeof(kevp->ext));
1261 uap->changelist++;
1262 kevp++;
1263 }
1264 return (error);
1265 }
1266
1267 int
1268 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap)
1269 {
1270 struct kevent_copyops k_ops = {
1271 .arg = uap,
1272 .k_copyout = kevent11_copyout,
1273 .k_copyin = kevent11_copyin,
1274 .kevent_size = sizeof(struct kevent_freebsd11),
1275 };
1276 struct g_kevent_args gk_args = {
1277 .fd = uap->fd,
1278 .changelist = uap->changelist,
1279 .nchanges = uap->nchanges,
1280 .eventlist = uap->eventlist,
1281 .nevents = uap->nevents,
1282 .timeout = uap->timeout,
1283 };
1284
1285 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11"));
1286 }
1287 #endif
1288
1289 int
1290 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
1291 struct kevent_copyops *k_ops, const struct timespec *timeout)
1292 {
1293 cap_rights_t rights;
1294 struct file *fp;
1295 int error;
1296
1297 cap_rights_init_zero(&rights);
1298 if (nchanges > 0)
1299 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE);
1300 if (nevents > 0)
1301 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT);
1302 error = fget(td, fd, &rights, &fp);
1303 if (error != 0)
1304 return (error);
1305
1306 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
1307 fdrop(fp, td);
1308
1309 return (error);
1310 }
1311
1312 static int
1313 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1314 struct kevent_copyops *k_ops, const struct timespec *timeout)
1315 {
1316 struct kevent keva[KQ_NEVENTS];
1317 struct kevent *kevp, *changes;
1318 int i, n, nerrors, error;
1319
1320 if (nchanges < 0)
1321 return (EINVAL);
1322
1323 nerrors = 0;
1324 while (nchanges > 0) {
1325 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
1326 error = k_ops->k_copyin(k_ops->arg, keva, n);
1327 if (error)
1328 return (error);
1329 changes = keva;
1330 for (i = 0; i < n; i++) {
1331 kevp = &changes[i];
1332 if (!kevp->filter)
1333 continue;
1334 kevp->flags &= ~EV_SYSFLAGS;
1335 error = kqueue_register(kq, kevp, td, M_WAITOK);
1336 if (error || (kevp->flags & EV_RECEIPT)) {
1337 if (nevents == 0)
1338 return (error);
1339 kevp->flags = EV_ERROR;
1340 kevp->data = error;
1341 (void)k_ops->k_copyout(k_ops->arg, kevp, 1);
1342 nevents--;
1343 nerrors++;
1344 }
1345 }
1346 nchanges -= n;
1347 }
1348 if (nerrors) {
1349 td->td_retval[0] = nerrors;
1350 return (0);
1351 }
1352
1353 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1354 }
1355
1356 int
1357 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
1358 struct kevent_copyops *k_ops, const struct timespec *timeout)
1359 {
1360 struct kqueue *kq;
1361 int error;
1362
1363 error = kqueue_acquire(fp, &kq);
1364 if (error != 0)
1365 return (error);
1366 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1367 kqueue_release(kq, 0);
1368 return (error);
1369 }
1370
1371 /*
1372 * Performs a kevent() call on a temporarily created kqueue. This can be
1373 * used to perform one-shot polling, similar to poll() and select().
1374 */
1375 int
1376 kern_kevent_anonymous(struct thread *td, int nevents,
1377 struct kevent_copyops *k_ops)
1378 {
1379 struct kqueue kq = {};
1380 int error;
1381
1382 kqueue_init(&kq);
1383 kq.kq_refcnt = 1;
1384 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1385 kqueue_drain(&kq, td);
1386 kqueue_destroy(&kq);
1387 return (error);
1388 }
1389
1390 int
1391 kqueue_add_filteropts(int filt, struct filterops *filtops)
1392 {
1393 int error;
1394
1395 error = 0;
1396 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
1397 printf(
1398 "trying to add a filterop that is out of range: %d is beyond %d\n",
1399 ~filt, EVFILT_SYSCOUNT);
1400 return EINVAL;
1401 }
1402 mtx_lock(&filterops_lock);
1403 if (sysfilt_ops[~filt].for_fop != &null_filtops &&
1404 sysfilt_ops[~filt].for_fop != NULL)
1405 error = EEXIST;
1406 else {
1407 sysfilt_ops[~filt].for_fop = filtops;
1408 sysfilt_ops[~filt].for_refcnt = 0;
1409 }
1410 mtx_unlock(&filterops_lock);
1411
1412 return (error);
1413 }
1414
1415 int
1416 kqueue_del_filteropts(int filt)
1417 {
1418 int error;
1419
1420 error = 0;
1421 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1422 return EINVAL;
1423
1424 mtx_lock(&filterops_lock);
1425 if (sysfilt_ops[~filt].for_fop == &null_filtops ||
1426 sysfilt_ops[~filt].for_fop == NULL)
1427 error = EINVAL;
1428 else if (sysfilt_ops[~filt].for_refcnt != 0)
1429 error = EBUSY;
1430 else {
1431 sysfilt_ops[~filt].for_fop = &null_filtops;
1432 sysfilt_ops[~filt].for_refcnt = 0;
1433 }
1434 mtx_unlock(&filterops_lock);
1435
1436 return error;
1437 }
1438
1439 static struct filterops *
1440 kqueue_fo_find(int filt)
1441 {
1442
1443 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1444 return NULL;
1445
1446 if (sysfilt_ops[~filt].for_nolock)
1447 return sysfilt_ops[~filt].for_fop;
1448
1449 mtx_lock(&filterops_lock);
1450 sysfilt_ops[~filt].for_refcnt++;
1451 if (sysfilt_ops[~filt].for_fop == NULL)
1452 sysfilt_ops[~filt].for_fop = &null_filtops;
1453 mtx_unlock(&filterops_lock);
1454
1455 return sysfilt_ops[~filt].for_fop;
1456 }
1457
1458 static void
1459 kqueue_fo_release(int filt)
1460 {
1461
1462 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1463 return;
1464
1465 if (sysfilt_ops[~filt].for_nolock)
1466 return;
1467
1468 mtx_lock(&filterops_lock);
1469 KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1470 ("filter object refcount not valid on release"));
1471 sysfilt_ops[~filt].for_refcnt--;
1472 mtx_unlock(&filterops_lock);
1473 }
1474
1475 /*
1476 * A ref to kq (obtained via kqueue_acquire) must be held.
1477 */
1478 static int
1479 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
1480 int mflag)
1481 {
1482 struct filterops *fops;
1483 struct file *fp;
1484 struct knote *kn, *tkn;
1485 struct knlist *knl;
1486 int error, filt, event;
1487 int haskqglobal, filedesc_unlock;
1488
1489 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
1490 return (EINVAL);
1491
1492 fp = NULL;
1493 kn = NULL;
1494 knl = NULL;
1495 error = 0;
1496 haskqglobal = 0;
1497 filedesc_unlock = 0;
1498
1499 filt = kev->filter;
1500 fops = kqueue_fo_find(filt);
1501 if (fops == NULL)
1502 return EINVAL;
1503
1504 if (kev->flags & EV_ADD) {
1505 /*
1506 * Prevent waiting with locks. Non-sleepable
1507 * allocation failures are handled in the loop, only
1508 * if the spare knote appears to be actually required.
1509 */
1510 tkn = knote_alloc(mflag);
1511 } else {
1512 tkn = NULL;
1513 }
1514
1515 findkn:
1516 if (fops->f_isfd) {
1517 KASSERT(td != NULL, ("td is NULL"));
1518 if (kev->ident > INT_MAX)
1519 error = EBADF;
1520 else
1521 error = fget(td, kev->ident, &cap_event_rights, &fp);
1522 if (error)
1523 goto done;
1524
1525 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1526 kev->ident, M_NOWAIT) != 0) {
1527 /* try again */
1528 fdrop(fp, td);
1529 fp = NULL;
1530 error = kqueue_expand(kq, fops, kev->ident, mflag);
1531 if (error)
1532 goto done;
1533 goto findkn;
1534 }
1535
1536 if (fp->f_type == DTYPE_KQUEUE) {
1537 /*
1538 * If we add some intelligence about what we are doing,
1539 * we should be able to support events on ourselves.
1540 * We need to know when we are doing this to prevent
1541 * getting both the knlist lock and the kq lock since
1542 * they are the same thing.
1543 */
1544 if (fp->f_data == kq) {
1545 error = EINVAL;
1546 goto done;
1547 }
1548
1549 /*
1550 * Pre-lock the filedesc before the global
1551 * lock mutex, see the comment in
1552 * kqueue_close().
1553 */
1554 FILEDESC_XLOCK(td->td_proc->p_fd);
1555 filedesc_unlock = 1;
1556 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1557 }
1558
1559 KQ_LOCK(kq);
1560 if (kev->ident < kq->kq_knlistsize) {
1561 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1562 if (kev->filter == kn->kn_filter)
1563 break;
1564 }
1565 } else {
1566 if ((kev->flags & EV_ADD) == EV_ADD) {
1567 error = kqueue_expand(kq, fops, kev->ident, mflag);
1568 if (error != 0)
1569 goto done;
1570 }
1571
1572 KQ_LOCK(kq);
1573
1574 /*
1575 * If possible, find an existing knote to use for this kevent.
1576 */
1577 if (kev->filter == EVFILT_PROC &&
1578 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
1579 /* This is an internal creation of a process tracking
1580 * note. Don't attempt to coalesce this with an
1581 * existing note.
1582 */
1583 ;
1584 } else if (kq->kq_knhashmask != 0) {
1585 struct klist *list;
1586
1587 list = &kq->kq_knhash[
1588 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1589 SLIST_FOREACH(kn, list, kn_link)
1590 if (kev->ident == kn->kn_id &&
1591 kev->filter == kn->kn_filter)
1592 break;
1593 }
1594 }
1595
1596 /* knote is in the process of changing, wait for it to stabilize. */
1597 if (kn != NULL && kn_in_flux(kn)) {
1598 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1599 if (filedesc_unlock) {
1600 FILEDESC_XUNLOCK(td->td_proc->p_fd);
1601 filedesc_unlock = 0;
1602 }
1603 kq->kq_state |= KQ_FLUXWAIT;
1604 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1605 if (fp != NULL) {
1606 fdrop(fp, td);
1607 fp = NULL;
1608 }
1609 goto findkn;
1610 }
1611
1612 /*
1613 * kn now contains the matching knote, or NULL if no match
1614 */
1615 if (kn == NULL) {
1616 if (kev->flags & EV_ADD) {
1617 kn = tkn;
1618 tkn = NULL;
1619 if (kn == NULL) {
1620 KQ_UNLOCK(kq);
1621 error = ENOMEM;
1622 goto done;
1623 }
1624 kn->kn_fp = fp;
1625 kn->kn_kq = kq;
1626 kn->kn_fop = fops;
1627 /*
1628 * apply reference counts to knote structure, and
1629 * do not release it at the end of this routine.
1630 */
1631 fops = NULL;
1632 fp = NULL;
1633
1634 kn->kn_sfflags = kev->fflags;
1635 kn->kn_sdata = kev->data;
1636 kev->fflags = 0;
1637 kev->data = 0;
1638 kn->kn_kevent = *kev;
1639 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1640 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
1641 kn->kn_status = KN_DETACHED;
1642 if ((kev->flags & EV_DISABLE) != 0)
1643 kn->kn_status |= KN_DISABLED;
1644 kn_enter_flux(kn);
1645
1646 error = knote_attach(kn, kq);
1647 KQ_UNLOCK(kq);
1648 if (error != 0) {
1649 tkn = kn;
1650 goto done;
1651 }
1652
1653 if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1654 knote_drop_detached(kn, td);
1655 goto done;
1656 }
1657 knl = kn_list_lock(kn);
1658 goto done_ev_add;
1659 } else {
1660 /* No matching knote and the EV_ADD flag is not set. */
1661 KQ_UNLOCK(kq);
1662 error = ENOENT;
1663 goto done;
1664 }
1665 }
1666
1667 if (kev->flags & EV_DELETE) {
1668 kn_enter_flux(kn);
1669 KQ_UNLOCK(kq);
1670 knote_drop(kn, td);
1671 goto done;
1672 }
1673
1674 if (kev->flags & EV_FORCEONESHOT) {
1675 kn->kn_flags |= EV_ONESHOT;
1676 KNOTE_ACTIVATE(kn, 1);
1677 }
1678
1679 if ((kev->flags & EV_ENABLE) != 0)
1680 kn->kn_status &= ~KN_DISABLED;
1681 else if ((kev->flags & EV_DISABLE) != 0)
1682 kn->kn_status |= KN_DISABLED;
1683
1684 /*
1685 * The user may change some filter values after the initial EV_ADD,
1686 * but doing so will not reset any filter which has already been
1687 * triggered.
1688 */
1689 kn->kn_status |= KN_SCAN;
1690 kn_enter_flux(kn);
1691 KQ_UNLOCK(kq);
1692 knl = kn_list_lock(kn);
1693 kn->kn_kevent.udata = kev->udata;
1694 if (!fops->f_isfd && fops->f_touch != NULL) {
1695 fops->f_touch(kn, kev, EVENT_REGISTER);
1696 } else {
1697 kn->kn_sfflags = kev->fflags;
1698 kn->kn_sdata = kev->data;
1699 }
1700
1701 done_ev_add:
1702 /*
1703 * We can get here with kn->kn_knlist == NULL. This can happen when
1704 * the initial attach event decides that the event is "completed"
1705 * already, e.g., filt_procattach() is called on a zombie process. It
1706 * will call filt_proc() which will remove it from the list, and NULL
1707 * kn_knlist.
1708 *
1709 * KN_DISABLED will be stable while the knote is in flux, so the
1710 * unlocked read will not race with an update.
1711 */
1712 if ((kn->kn_status & KN_DISABLED) == 0)
1713 event = kn->kn_fop->f_event(kn, 0);
1714 else
1715 event = 0;
1716
1717 KQ_LOCK(kq);
1718 if (event)
1719 kn->kn_status |= KN_ACTIVE;
1720 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
1721 KN_ACTIVE)
1722 knote_enqueue(kn);
1723 kn->kn_status &= ~KN_SCAN;
1724 kn_leave_flux(kn);
1725 kn_list_unlock(knl);
1726 KQ_UNLOCK_FLUX(kq);
1727
1728 done:
1729 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1730 if (filedesc_unlock)
1731 FILEDESC_XUNLOCK(td->td_proc->p_fd);
1732 if (fp != NULL)
1733 fdrop(fp, td);
1734 knote_free(tkn);
1735 if (fops != NULL)
1736 kqueue_fo_release(filt);
1737 return (error);
1738 }
1739
1740 static int
1741 kqueue_acquire(struct file *fp, struct kqueue **kqp)
1742 {
1743 int error;
1744 struct kqueue *kq;
1745
1746 error = 0;
1747
1748 kq = fp->f_data;
1749 if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1750 return (EBADF);
1751 *kqp = kq;
1752 KQ_LOCK(kq);
1753 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1754 KQ_UNLOCK(kq);
1755 return (EBADF);
1756 }
1757 kq->kq_refcnt++;
1758 KQ_UNLOCK(kq);
1759
1760 return error;
1761 }
1762
1763 static void
1764 kqueue_release(struct kqueue *kq, int locked)
1765 {
1766 if (locked)
1767 KQ_OWNED(kq);
1768 else
1769 KQ_LOCK(kq);
1770 kq->kq_refcnt--;
1771 if (kq->kq_refcnt == 1)
1772 wakeup(&kq->kq_refcnt);
1773 if (!locked)
1774 KQ_UNLOCK(kq);
1775 }
1776
1777 void
1778 kqueue_drain_schedtask(void)
1779 {
1780 taskqueue_quiesce(taskqueue_kqueue_ctx);
1781 }
1782
1783 static void
1784 kqueue_schedtask(struct kqueue *kq)
1785 {
1786 struct thread *td;
1787
1788 KQ_OWNED(kq);
1789 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1790 ("scheduling kqueue task while draining"));
1791
1792 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1793 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1794 kq->kq_state |= KQ_TASKSCHED;
1795 td = curthread;
1796 thread_lock(td);
1797 td->td_flags |= TDF_ASTPENDING | TDF_KQTICKLED;
1798 thread_unlock(td);
1799 }
1800 }
1801
1802 /*
1803 * Expand the kq to make sure we have storage for fops/ident pair.
1804 *
1805 * Return 0 on success (or no work necessary), return errno on failure.
1806 */
1807 static int
1808 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1809 int mflag)
1810 {
1811 struct klist *list, *tmp_knhash, *to_free;
1812 u_long tmp_knhashmask;
1813 int error, fd, size;
1814
1815 KQ_NOTOWNED(kq);
1816
1817 error = 0;
1818 to_free = NULL;
1819 if (fops->f_isfd) {
1820 fd = ident;
1821 if (kq->kq_knlistsize <= fd) {
1822 size = kq->kq_knlistsize;
1823 while (size <= fd)
1824 size += KQEXTENT;
1825 list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1826 if (list == NULL)
1827 return ENOMEM;
1828 KQ_LOCK(kq);
1829 if ((kq->kq_state & KQ_CLOSING) != 0) {
1830 to_free = list;
1831 error = EBADF;
1832 } else if (kq->kq_knlistsize > fd) {
1833 to_free = list;
1834 } else {
1835 if (kq->kq_knlist != NULL) {
1836 bcopy(kq->kq_knlist, list,
1837 kq->kq_knlistsize * sizeof(*list));
1838 to_free = kq->kq_knlist;
1839 kq->kq_knlist = NULL;
1840 }
1841 bzero((caddr_t)list +
1842 kq->kq_knlistsize * sizeof(*list),
1843 (size - kq->kq_knlistsize) * sizeof(*list));
1844 kq->kq_knlistsize = size;
1845 kq->kq_knlist = list;
1846 }
1847 KQ_UNLOCK(kq);
1848 }
1849 } else {
1850 if (kq->kq_knhashmask == 0) {
1851 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE,
1852 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ?
1853 HASH_WAITOK : HASH_NOWAIT);
1854 if (tmp_knhash == NULL)
1855 return (ENOMEM);
1856 KQ_LOCK(kq);
1857 if ((kq->kq_state & KQ_CLOSING) != 0) {
1858 to_free = tmp_knhash;
1859 error = EBADF;
1860 } else if (kq->kq_knhashmask == 0) {
1861 kq->kq_knhash = tmp_knhash;
1862 kq->kq_knhashmask = tmp_knhashmask;
1863 } else {
1864 to_free = tmp_knhash;
1865 }
1866 KQ_UNLOCK(kq);
1867 }
1868 }
1869 free(to_free, M_KQUEUE);
1870
1871 KQ_NOTOWNED(kq);
1872 return (error);
1873 }
1874
1875 static void
1876 kqueue_task(void *arg, int pending)
1877 {
1878 struct kqueue *kq;
1879 int haskqglobal;
1880
1881 haskqglobal = 0;
1882 kq = arg;
1883
1884 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1885 KQ_LOCK(kq);
1886
1887 KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1888
1889 kq->kq_state &= ~KQ_TASKSCHED;
1890 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1891 wakeup(&kq->kq_state);
1892 }
1893 KQ_UNLOCK(kq);
1894 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1895 }
1896
1897 /*
1898 * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1899 * We treat KN_MARKER knotes as if they are in flux.
1900 */
1901 static int
1902 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1903 const struct timespec *tsp, struct kevent *keva, struct thread *td)
1904 {
1905 struct kevent *kevp;
1906 struct knote *kn, *marker;
1907 struct knlist *knl;
1908 sbintime_t asbt, rsbt;
1909 int count, error, haskqglobal, influx, nkev, touch;
1910
1911 count = maxevents;
1912 nkev = 0;
1913 error = 0;
1914 haskqglobal = 0;
1915
1916 if (maxevents == 0)
1917 goto done_nl;
1918 if (maxevents < 0) {
1919 error = EINVAL;
1920 goto done_nl;
1921 }
1922
1923 rsbt = 0;
1924 if (tsp != NULL) {
1925 if (!timespecvalid_interval(tsp)) {
1926 error = EINVAL;
1927 goto done_nl;
1928 }
1929 if (timespecisset(tsp)) {
1930 if (tsp->tv_sec <= INT32_MAX) {
1931 rsbt = tstosbt(*tsp);
1932 if (TIMESEL(&asbt, rsbt))
1933 asbt += tc_tick_sbt;
1934 if (asbt <= SBT_MAX - rsbt)
1935 asbt += rsbt;
1936 else
1937 asbt = 0;
1938 rsbt >>= tc_precexp;
1939 } else
1940 asbt = 0;
1941 } else
1942 asbt = -1;
1943 } else
1944 asbt = 0;
1945 marker = knote_alloc(M_WAITOK);
1946 marker->kn_status = KN_MARKER;
1947 KQ_LOCK(kq);
1948
1949 retry:
1950 kevp = keva;
1951 if (kq->kq_count == 0) {
1952 if (asbt == -1) {
1953 error = EWOULDBLOCK;
1954 } else {
1955 kq->kq_state |= KQ_SLEEP;
1956 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1957 "kqread", asbt, rsbt, C_ABSOLUTE);
1958 }
1959 if (error == 0)
1960 goto retry;
1961 /* don't restart after signals... */
1962 if (error == ERESTART)
1963 error = EINTR;
1964 else if (error == EWOULDBLOCK)
1965 error = 0;
1966 goto done;
1967 }
1968
1969 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1970 influx = 0;
1971 while (count) {
1972 KQ_OWNED(kq);
1973 kn = TAILQ_FIRST(&kq->kq_head);
1974
1975 if ((kn->kn_status == KN_MARKER && kn != marker) ||
1976 kn_in_flux(kn)) {
1977 if (influx) {
1978 influx = 0;
1979 KQ_FLUX_WAKEUP(kq);
1980 }
1981 kq->kq_state |= KQ_FLUXWAIT;
1982 error = msleep(kq, &kq->kq_lock, PSOCK,
1983 "kqflxwt", 0);
1984 continue;
1985 }
1986
1987 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1988 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1989 kn->kn_status &= ~KN_QUEUED;
1990 kq->kq_count--;
1991 continue;
1992 }
1993 if (kn == marker) {
1994 KQ_FLUX_WAKEUP(kq);
1995 if (count == maxevents)
1996 goto retry;
1997 goto done;
1998 }
1999 KASSERT(!kn_in_flux(kn),
2000 ("knote %p is unexpectedly in flux", kn));
2001
2002 if ((kn->kn_flags & EV_DROP) == EV_DROP) {
2003 kn->kn_status &= ~KN_QUEUED;
2004 kn_enter_flux(kn);
2005 kq->kq_count--;
2006 KQ_UNLOCK(kq);
2007 /*
2008 * We don't need to lock the list since we've
2009 * marked it as in flux.
2010 */
2011 knote_drop(kn, td);
2012 KQ_LOCK(kq);
2013 continue;
2014 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
2015 kn->kn_status &= ~KN_QUEUED;
2016 kn_enter_flux(kn);
2017 kq->kq_count--;
2018 KQ_UNLOCK(kq);
2019 /*
2020 * We don't need to lock the list since we've
2021 * marked the knote as being in flux.
2022 */
2023 *kevp = kn->kn_kevent;
2024 knote_drop(kn, td);
2025 KQ_LOCK(kq);
2026 kn = NULL;
2027 } else {
2028 kn->kn_status |= KN_SCAN;
2029 kn_enter_flux(kn);
2030 KQ_UNLOCK(kq);
2031 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
2032 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
2033 knl = kn_list_lock(kn);
2034 if (kn->kn_fop->f_event(kn, 0) == 0) {
2035 KQ_LOCK(kq);
2036 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
2037 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
2038 KN_SCAN);
2039 kn_leave_flux(kn);
2040 kq->kq_count--;
2041 kn_list_unlock(knl);
2042 influx = 1;
2043 continue;
2044 }
2045 touch = (!kn->kn_fop->f_isfd &&
2046 kn->kn_fop->f_touch != NULL);
2047 if (touch)
2048 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
2049 else
2050 *kevp = kn->kn_kevent;
2051 KQ_LOCK(kq);
2052 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
2053 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
2054 /*
2055 * Manually clear knotes who weren't
2056 * 'touch'ed.
2057 */
2058 if (touch == 0 && kn->kn_flags & EV_CLEAR) {
2059 kn->kn_data = 0;
2060 kn->kn_fflags = 0;
2061 }
2062 if (kn->kn_flags & EV_DISPATCH)
2063 kn->kn_status |= KN_DISABLED;
2064 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
2065 kq->kq_count--;
2066 } else
2067 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2068
2069 kn->kn_status &= ~KN_SCAN;
2070 kn_leave_flux(kn);
2071 kn_list_unlock(knl);
2072 influx = 1;
2073 }
2074
2075 /* we are returning a copy to the user */
2076 kevp++;
2077 nkev++;
2078 count--;
2079
2080 if (nkev == KQ_NEVENTS) {
2081 influx = 0;
2082 KQ_UNLOCK_FLUX(kq);
2083 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2084 nkev = 0;
2085 kevp = keva;
2086 KQ_LOCK(kq);
2087 if (error)
2088 break;
2089 }
2090 }
2091 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
2092 done:
2093 KQ_OWNED(kq);
2094 KQ_UNLOCK_FLUX(kq);
2095 knote_free(marker);
2096 done_nl:
2097 KQ_NOTOWNED(kq);
2098 if (nkev != 0)
2099 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2100 td->td_retval[0] = maxevents - count;
2101 return (error);
2102 }
2103
2104 /*ARGSUSED*/
2105 static int
2106 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
2107 struct ucred *active_cred, struct thread *td)
2108 {
2109 /*
2110 * Enabling sigio causes two major problems:
2111 * 1) infinite recursion:
2112 * Synopsys: kevent is being used to track signals and have FIOASYNC
2113 * set. On receipt of a signal this will cause a kqueue to recurse
2114 * into itself over and over. Sending the sigio causes the kqueue
2115 * to become ready, which in turn posts sigio again, forever.
2116 * Solution: this can be solved by setting a flag in the kqueue that
2117 * we have a SIGIO in progress.
2118 * 2) locking problems:
2119 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
2120 * us above the proc and pgrp locks.
2121 * Solution: Post a signal using an async mechanism, being sure to
2122 * record a generation count in the delivery so that we do not deliver
2123 * a signal to the wrong process.
2124 *
2125 * Note, these two mechanisms are somewhat mutually exclusive!
2126 */
2127 #if 0
2128 struct kqueue *kq;
2129
2130 kq = fp->f_data;
2131 switch (cmd) {
2132 case FIOASYNC:
2133 if (*(int *)data) {
2134 kq->kq_state |= KQ_ASYNC;
2135 } else {
2136 kq->kq_state &= ~KQ_ASYNC;
2137 }
2138 return (0);
2139
2140 case FIOSETOWN:
2141 return (fsetown(*(int *)data, &kq->kq_sigio));
2142
2143 case FIOGETOWN:
2144 *(int *)data = fgetown(&kq->kq_sigio);
2145 return (0);
2146 }
2147 #endif
2148
2149 return (ENOTTY);
2150 }
2151
2152 /*ARGSUSED*/
2153 static int
2154 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
2155 struct thread *td)
2156 {
2157 struct kqueue *kq;
2158 int revents = 0;
2159 int error;
2160
2161 if ((error = kqueue_acquire(fp, &kq)))
2162 return POLLERR;
2163
2164 KQ_LOCK(kq);
2165 if (events & (POLLIN | POLLRDNORM)) {
2166 if (kq->kq_count) {
2167 revents |= events & (POLLIN | POLLRDNORM);
2168 } else {
2169 selrecord(td, &kq->kq_sel);
2170 if (SEL_WAITING(&kq->kq_sel))
2171 kq->kq_state |= KQ_SEL;
2172 }
2173 }
2174 kqueue_release(kq, 1);
2175 KQ_UNLOCK(kq);
2176 return (revents);
2177 }
2178
2179 /*ARGSUSED*/
2180 static int
2181 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
2182 struct thread *td)
2183 {
2184
2185 bzero((void *)st, sizeof *st);
2186 /*
2187 * We no longer return kq_count because the unlocked value is useless.
2188 * If you spent all this time getting the count, why not spend your
2189 * syscall better by calling kevent?
2190 *
2191 * XXX - This is needed for libc_r.
2192 */
2193 st->st_mode = S_IFIFO;
2194 return (0);
2195 }
2196
2197 static void
2198 kqueue_drain(struct kqueue *kq, struct thread *td)
2199 {
2200 struct knote *kn;
2201 int i;
2202
2203 KQ_LOCK(kq);
2204
2205 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
2206 ("kqueue already closing"));
2207 kq->kq_state |= KQ_CLOSING;
2208 if (kq->kq_refcnt > 1)
2209 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
2210
2211 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
2212
2213 KASSERT(knlist_empty(&kq->kq_sel.si_note),
2214 ("kqueue's knlist not empty"));
2215
2216 for (i = 0; i < kq->kq_knlistsize; i++) {
2217 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
2218 if (kn_in_flux(kn)) {
2219 kq->kq_state |= KQ_FLUXWAIT;
2220 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
2221 continue;
2222 }
2223 kn_enter_flux(kn);
2224 KQ_UNLOCK(kq);
2225 knote_drop(kn, td);
2226 KQ_LOCK(kq);
2227 }
2228 }
2229 if (kq->kq_knhashmask != 0) {
2230 for (i = 0; i <= kq->kq_knhashmask; i++) {
2231 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
2232 if (kn_in_flux(kn)) {
2233 kq->kq_state |= KQ_FLUXWAIT;
2234 msleep(kq, &kq->kq_lock, PSOCK,
2235 "kqclo2", 0);
2236 continue;
2237 }
2238 kn_enter_flux(kn);
2239 KQ_UNLOCK(kq);
2240 knote_drop(kn, td);
2241 KQ_LOCK(kq);
2242 }
2243 }
2244 }
2245
2246 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
2247 kq->kq_state |= KQ_TASKDRAIN;
2248 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
2249 }
2250
2251 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2252 selwakeuppri(&kq->kq_sel, PSOCK);
2253 if (!SEL_WAITING(&kq->kq_sel))
2254 kq->kq_state &= ~KQ_SEL;
2255 }
2256
2257 KQ_UNLOCK(kq);
2258 }
2259
2260 static void
2261 kqueue_destroy(struct kqueue *kq)
2262 {
2263
2264 KASSERT(kq->kq_fdp == NULL,
2265 ("kqueue still attached to a file descriptor"));
2266 seldrain(&kq->kq_sel);
2267 knlist_destroy(&kq->kq_sel.si_note);
2268 mtx_destroy(&kq->kq_lock);
2269
2270 if (kq->kq_knhash != NULL)
2271 free(kq->kq_knhash, M_KQUEUE);
2272 if (kq->kq_knlist != NULL)
2273 free(kq->kq_knlist, M_KQUEUE);
2274
2275 funsetown(&kq->kq_sigio);
2276 }
2277
2278 /*ARGSUSED*/
2279 static int
2280 kqueue_close(struct file *fp, struct thread *td)
2281 {
2282 struct kqueue *kq = fp->f_data;
2283 struct filedesc *fdp;
2284 int error;
2285 int filedesc_unlock;
2286
2287 if ((error = kqueue_acquire(fp, &kq)))
2288 return error;
2289 kqueue_drain(kq, td);
2290
2291 /*
2292 * We could be called due to the knote_drop() doing fdrop(),
2293 * called from kqueue_register(). In this case the global
2294 * lock is owned, and filedesc sx is locked before, to not
2295 * take the sleepable lock after non-sleepable.
2296 */
2297 fdp = kq->kq_fdp;
2298 kq->kq_fdp = NULL;
2299 if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
2300 FILEDESC_XLOCK(fdp);
2301 filedesc_unlock = 1;
2302 } else
2303 filedesc_unlock = 0;
2304 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
2305 if (filedesc_unlock)
2306 FILEDESC_XUNLOCK(fdp);
2307
2308 kqueue_destroy(kq);
2309 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
2310 crfree(kq->kq_cred);
2311 free(kq, M_KQUEUE);
2312 fp->f_data = NULL;
2313
2314 return (0);
2315 }
2316
2317 static int
2318 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2319 {
2320 struct kqueue *kq = fp->f_data;
2321
2322 kif->kf_type = KF_TYPE_KQUEUE;
2323 kif->kf_un.kf_kqueue.kf_kqueue_addr = (uintptr_t)kq;
2324 kif->kf_un.kf_kqueue.kf_kqueue_count = kq->kq_count;
2325 kif->kf_un.kf_kqueue.kf_kqueue_state = kq->kq_state;
2326 return (0);
2327 }
2328
2329 static void
2330 kqueue_wakeup(struct kqueue *kq)
2331 {
2332 KQ_OWNED(kq);
2333
2334 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2335 kq->kq_state &= ~KQ_SLEEP;
2336 wakeup(kq);
2337 }
2338 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2339 selwakeuppri(&kq->kq_sel, PSOCK);
2340 if (!SEL_WAITING(&kq->kq_sel))
2341 kq->kq_state &= ~KQ_SEL;
2342 }
2343 if (!knlist_empty(&kq->kq_sel.si_note))
2344 kqueue_schedtask(kq);
2345 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2346 pgsigio(&kq->kq_sigio, SIGIO, 0);
2347 }
2348 }
2349
2350 /*
2351 * Walk down a list of knotes, activating them if their event has triggered.
2352 *
2353 * There is a possibility to optimize in the case of one kq watching another.
2354 * Instead of scheduling a task to wake it up, you could pass enough state
2355 * down the chain to make up the parent kqueue. Make this code functional
2356 * first.
2357 */
2358 void
2359 knote(struct knlist *list, long hint, int lockflags)
2360 {
2361 struct kqueue *kq;
2362 struct knote *kn, *tkn;
2363 int error;
2364
2365 if (list == NULL)
2366 return;
2367
2368 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
2369
2370 if ((lockflags & KNF_LISTLOCKED) == 0)
2371 list->kl_lock(list->kl_lockarg);
2372
2373 /*
2374 * If we unlock the list lock (and enter influx), we can
2375 * eliminate the kqueue scheduling, but this will introduce
2376 * four lock/unlock's for each knote to test. Also, marker
2377 * would be needed to keep iteration position, since filters
2378 * or other threads could remove events.
2379 */
2380 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
2381 kq = kn->kn_kq;
2382 KQ_LOCK(kq);
2383 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
2384 /*
2385 * Do not process the influx notes, except for
2386 * the influx coming from the kq unlock in the
2387 * kqueue_scan(). In the later case, we do
2388 * not interfere with the scan, since the code
2389 * fragment in kqueue_scan() locks the knlist,
2390 * and cannot proceed until we finished.
2391 */
2392 KQ_UNLOCK(kq);
2393 } else if ((lockflags & KNF_NOKQLOCK) != 0) {
2394 kn_enter_flux(kn);
2395 KQ_UNLOCK(kq);
2396 error = kn->kn_fop->f_event(kn, hint);
2397 KQ_LOCK(kq);
2398 kn_leave_flux(kn);
2399 if (error)
2400 KNOTE_ACTIVATE(kn, 1);
2401 KQ_UNLOCK_FLUX(kq);
2402 } else {
2403 if (kn->kn_fop->f_event(kn, hint))
2404 KNOTE_ACTIVATE(kn, 1);
2405 KQ_UNLOCK(kq);
2406 }
2407 }
2408 if ((lockflags & KNF_LISTLOCKED) == 0)
2409 list->kl_unlock(list->kl_lockarg);
2410 }
2411
2412 /*
2413 * add a knote to a knlist
2414 */
2415 void
2416 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
2417 {
2418
2419 KNL_ASSERT_LOCK(knl, islocked);
2420 KQ_NOTOWNED(kn->kn_kq);
2421 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
2422 KASSERT((kn->kn_status & KN_DETACHED) != 0,
2423 ("knote %p was not detached", kn));
2424 if (!islocked)
2425 knl->kl_lock(knl->kl_lockarg);
2426 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
2427 if (!islocked)
2428 knl->kl_unlock(knl->kl_lockarg);
2429 KQ_LOCK(kn->kn_kq);
2430 kn->kn_knlist = knl;
2431 kn->kn_status &= ~KN_DETACHED;
2432 KQ_UNLOCK(kn->kn_kq);
2433 }
2434
2435 static void
2436 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
2437 int kqislocked)
2438 {
2439
2440 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2441 KNL_ASSERT_LOCK(knl, knlislocked);
2442 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
2443 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
2444 KASSERT((kn->kn_status & KN_DETACHED) == 0,
2445 ("knote %p was already detached", kn));
2446 if (!knlislocked)
2447 knl->kl_lock(knl->kl_lockarg);
2448 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
2449 kn->kn_knlist = NULL;
2450 if (!knlislocked)
2451 kn_list_unlock(knl);
2452 if (!kqislocked)
2453 KQ_LOCK(kn->kn_kq);
2454 kn->kn_status |= KN_DETACHED;
2455 if (!kqislocked)
2456 KQ_UNLOCK(kn->kn_kq);
2457 }
2458
2459 /*
2460 * remove knote from the specified knlist
2461 */
2462 void
2463 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
2464 {
2465
2466 knlist_remove_kq(knl, kn, islocked, 0);
2467 }
2468
2469 int
2470 knlist_empty(struct knlist *knl)
2471 {
2472
2473 KNL_ASSERT_LOCKED(knl);
2474 return (SLIST_EMPTY(&knl->kl_list));
2475 }
2476
2477 static struct mtx knlist_lock;
2478 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
2479 MTX_DEF);
2480 static void knlist_mtx_lock(void *arg);
2481 static void knlist_mtx_unlock(void *arg);
2482
2483 static void
2484 knlist_mtx_lock(void *arg)
2485 {
2486
2487 mtx_lock((struct mtx *)arg);
2488 }
2489
2490 static void
2491 knlist_mtx_unlock(void *arg)
2492 {
2493
2494 mtx_unlock((struct mtx *)arg);
2495 }
2496
2497 static void
2498 knlist_mtx_assert_lock(void *arg, int what)
2499 {
2500
2501 if (what == LA_LOCKED)
2502 mtx_assert((struct mtx *)arg, MA_OWNED);
2503 else
2504 mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2505 }
2506
2507 static void
2508 knlist_rw_rlock(void *arg)
2509 {
2510
2511 rw_rlock((struct rwlock *)arg);
2512 }
2513
2514 static void
2515 knlist_rw_runlock(void *arg)
2516 {
2517
2518 rw_runlock((struct rwlock *)arg);
2519 }
2520
2521 static void
2522 knlist_rw_assert_lock(void *arg, int what)
2523 {
2524
2525 if (what == LA_LOCKED)
2526 rw_assert((struct rwlock *)arg, RA_LOCKED);
2527 else
2528 rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2529 }
2530
2531 void
2532 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2533 void (*kl_unlock)(void *),
2534 void (*kl_assert_lock)(void *, int))
2535 {
2536
2537 if (lock == NULL)
2538 knl->kl_lockarg = &knlist_lock;
2539 else
2540 knl->kl_lockarg = lock;
2541
2542 if (kl_lock == NULL)
2543 knl->kl_lock = knlist_mtx_lock;
2544 else
2545 knl->kl_lock = kl_lock;
2546 if (kl_unlock == NULL)
2547 knl->kl_unlock = knlist_mtx_unlock;
2548 else
2549 knl->kl_unlock = kl_unlock;
2550 if (kl_assert_lock == NULL)
2551 knl->kl_assert_lock = knlist_mtx_assert_lock;
2552 else
2553 knl->kl_assert_lock = kl_assert_lock;
2554
2555 knl->kl_autodestroy = 0;
2556 SLIST_INIT(&knl->kl_list);
2557 }
2558
2559 void
2560 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2561 {
2562
2563 knlist_init(knl, lock, NULL, NULL, NULL);
2564 }
2565
2566 struct knlist *
2567 knlist_alloc(struct mtx *lock)
2568 {
2569 struct knlist *knl;
2570
2571 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
2572 knlist_init_mtx(knl, lock);
2573 return (knl);
2574 }
2575
2576 void
2577 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2578 {
2579
2580 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2581 knlist_rw_assert_lock);
2582 }
2583
2584 void
2585 knlist_destroy(struct knlist *knl)
2586 {
2587
2588 KASSERT(KNLIST_EMPTY(knl),
2589 ("destroying knlist %p with knotes on it", knl));
2590 }
2591
2592 void
2593 knlist_detach(struct knlist *knl)
2594 {
2595
2596 KNL_ASSERT_LOCKED(knl);
2597 knl->kl_autodestroy = 1;
2598 if (knlist_empty(knl)) {
2599 knlist_destroy(knl);
2600 free(knl, M_KQUEUE);
2601 }
2602 }
2603
2604 /*
2605 * Even if we are locked, we may need to drop the lock to allow any influx
2606 * knotes time to "settle".
2607 */
2608 void
2609 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2610 {
2611 struct knote *kn, *kn2;
2612 struct kqueue *kq;
2613
2614 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
2615 if (islocked)
2616 KNL_ASSERT_LOCKED(knl);
2617 else {
2618 KNL_ASSERT_UNLOCKED(knl);
2619 again: /* need to reacquire lock since we have dropped it */
2620 knl->kl_lock(knl->kl_lockarg);
2621 }
2622
2623 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2624 kq = kn->kn_kq;
2625 KQ_LOCK(kq);
2626 if (kn_in_flux(kn)) {
2627 KQ_UNLOCK(kq);
2628 continue;
2629 }
2630 knlist_remove_kq(knl, kn, 1, 1);
2631 if (killkn) {
2632 kn_enter_flux(kn);
2633 KQ_UNLOCK(kq);
2634 knote_drop_detached(kn, td);
2635 } else {
2636 /* Make sure cleared knotes disappear soon */
2637 kn->kn_flags |= EV_EOF | EV_ONESHOT;
2638 KQ_UNLOCK(kq);
2639 }
2640 kq = NULL;
2641 }
2642
2643 if (!SLIST_EMPTY(&knl->kl_list)) {
2644 /* there are still in flux knotes remaining */
2645 kn = SLIST_FIRST(&knl->kl_list);
2646 kq = kn->kn_kq;
2647 KQ_LOCK(kq);
2648 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
2649 knl->kl_unlock(knl->kl_lockarg);
2650 kq->kq_state |= KQ_FLUXWAIT;
2651 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2652 kq = NULL;
2653 goto again;
2654 }
2655
2656 if (islocked)
2657 KNL_ASSERT_LOCKED(knl);
2658 else {
2659 knl->kl_unlock(knl->kl_lockarg);
2660 KNL_ASSERT_UNLOCKED(knl);
2661 }
2662 }
2663
2664 /*
2665 * Remove all knotes referencing a specified fd must be called with FILEDESC
2666 * lock. This prevents a race where a new fd comes along and occupies the
2667 * entry and we attach a knote to the fd.
2668 */
2669 void
2670 knote_fdclose(struct thread *td, int fd)
2671 {
2672 struct filedesc *fdp = td->td_proc->p_fd;
2673 struct kqueue *kq;
2674 struct knote *kn;
2675 int influx;
2676
2677 FILEDESC_XLOCK_ASSERT(fdp);
2678
2679 /*
2680 * We shouldn't have to worry about new kevents appearing on fd
2681 * since filedesc is locked.
2682 */
2683 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2684 KQ_LOCK(kq);
2685
2686 again:
2687 influx = 0;
2688 while (kq->kq_knlistsize > fd &&
2689 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2690 if (kn_in_flux(kn)) {
2691 /* someone else might be waiting on our knote */
2692 if (influx)
2693 wakeup(kq);
2694 kq->kq_state |= KQ_FLUXWAIT;
2695 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2696 goto again;
2697 }
2698 kn_enter_flux(kn);
2699 KQ_UNLOCK(kq);
2700 influx = 1;
2701 knote_drop(kn, td);
2702 KQ_LOCK(kq);
2703 }
2704 KQ_UNLOCK_FLUX(kq);
2705 }
2706 }
2707
2708 static int
2709 knote_attach(struct knote *kn, struct kqueue *kq)
2710 {
2711 struct klist *list;
2712
2713 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
2714 KQ_OWNED(kq);
2715
2716 if ((kq->kq_state & KQ_CLOSING) != 0)
2717 return (EBADF);
2718 if (kn->kn_fop->f_isfd) {
2719 if (kn->kn_id >= kq->kq_knlistsize)
2720 return (ENOMEM);
2721 list = &kq->kq_knlist[kn->kn_id];
2722 } else {
2723 if (kq->kq_knhash == NULL)
2724 return (ENOMEM);
2725 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2726 }
2727 SLIST_INSERT_HEAD(list, kn, kn_link);
2728 return (0);
2729 }
2730
2731 static void
2732 knote_drop(struct knote *kn, struct thread *td)
2733 {
2734
2735 if ((kn->kn_status & KN_DETACHED) == 0)
2736 kn->kn_fop->f_detach(kn);
2737 knote_drop_detached(kn, td);
2738 }
2739
2740 static void
2741 knote_drop_detached(struct knote *kn, struct thread *td)
2742 {
2743 struct kqueue *kq;
2744 struct klist *list;
2745
2746 kq = kn->kn_kq;
2747
2748 KASSERT((kn->kn_status & KN_DETACHED) != 0,
2749 ("knote %p still attached", kn));
2750 KQ_NOTOWNED(kq);
2751
2752 KQ_LOCK(kq);
2753 KASSERT(kn->kn_influx == 1,
2754 ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
2755
2756 if (kn->kn_fop->f_isfd)
2757 list = &kq->kq_knlist[kn->kn_id];
2758 else
2759 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2760
2761 if (!SLIST_EMPTY(list))
2762 SLIST_REMOVE(list, kn, knote, kn_link);
2763 if (kn->kn_status & KN_QUEUED)
2764 knote_dequeue(kn);
2765 KQ_UNLOCK_FLUX(kq);
2766
2767 if (kn->kn_fop->f_isfd) {
2768 fdrop(kn->kn_fp, td);
2769 kn->kn_fp = NULL;
2770 }
2771 kqueue_fo_release(kn->kn_kevent.filter);
2772 kn->kn_fop = NULL;
2773 knote_free(kn);
2774 }
2775
2776 static void
2777 knote_enqueue(struct knote *kn)
2778 {
2779 struct kqueue *kq = kn->kn_kq;
2780
2781 KQ_OWNED(kn->kn_kq);
2782 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2783
2784 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2785 kn->kn_status |= KN_QUEUED;
2786 kq->kq_count++;
2787 kqueue_wakeup(kq);
2788 }
2789
2790 static void
2791 knote_dequeue(struct knote *kn)
2792 {
2793 struct kqueue *kq = kn->kn_kq;
2794
2795 KQ_OWNED(kn->kn_kq);
2796 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2797
2798 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2799 kn->kn_status &= ~KN_QUEUED;
2800 kq->kq_count--;
2801 }
2802
2803 static void
2804 knote_init(void)
2805 {
2806
2807 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2808 NULL, NULL, UMA_ALIGN_PTR, 0);
2809 }
2810 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2811
2812 static struct knote *
2813 knote_alloc(int mflag)
2814 {
2815
2816 return (uma_zalloc(knote_zone, mflag | M_ZERO));
2817 }
2818
2819 static void
2820 knote_free(struct knote *kn)
2821 {
2822
2823 uma_zfree(knote_zone, kn);
2824 }
2825
2826 /*
2827 * Register the kev w/ the kq specified by fd.
2828 */
2829 int
2830 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag)
2831 {
2832 struct kqueue *kq;
2833 struct file *fp;
2834 cap_rights_t rights;
2835 int error;
2836
2837 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE),
2838 &fp);
2839 if (error != 0)
2840 return (error);
2841 if ((error = kqueue_acquire(fp, &kq)) != 0)
2842 goto noacquire;
2843
2844 error = kqueue_register(kq, kev, td, mflag);
2845 kqueue_release(kq, 0);
2846
2847 noacquire:
2848 fdrop(fp, td);
2849 return (error);
2850 }
Cache object: ffea2677c540747f8a986d0bb7dab518
|