FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_aio.c
1 /*-
2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
11 *
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
15 */
16
17 /*
18 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19 */
20
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD: releng/9.1/sys/kern/vfs_aio.c 234438 2012-04-18 19:15:59Z ambrisko $");
23
24 #include "opt_compat.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
29 #include <sys/bio.h>
30 #include <sys/buf.h>
31 #include <sys/capability.h>
32 #include <sys/eventhandler.h>
33 #include <sys/sysproto.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/kthread.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/unistd.h>
44 #include <sys/posix4.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/protosw.h>
49 #include <sys/sema.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/syscall.h>
53 #include <sys/sysent.h>
54 #include <sys/sysctl.h>
55 #include <sys/sx.h>
56 #include <sys/taskqueue.h>
57 #include <sys/vnode.h>
58 #include <sys/conf.h>
59 #include <sys/event.h>
60 #include <sys/mount.h>
61
62 #include <machine/atomic.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/uma.h>
70 #include <sys/aio.h>
71
72 #include "opt_vfs_aio.h"
73
74 /*
75 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
76 * overflow. (XXX will be removed soon.)
77 */
78 static u_long jobrefid;
79
80 /*
81 * Counter for aio_fsync.
82 */
83 static uint64_t jobseqno;
84
85 #define JOBST_NULL 0
86 #define JOBST_JOBQSOCK 1
87 #define JOBST_JOBQGLOBAL 2
88 #define JOBST_JOBRUNNING 3
89 #define JOBST_JOBFINISHED 4
90 #define JOBST_JOBQBUF 5
91 #define JOBST_JOBQSYNC 6
92
93 #ifndef MAX_AIO_PER_PROC
94 #define MAX_AIO_PER_PROC 32
95 #endif
96
97 #ifndef MAX_AIO_QUEUE_PER_PROC
98 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
99 #endif
100
101 #ifndef MAX_AIO_PROCS
102 #define MAX_AIO_PROCS 32
103 #endif
104
105 #ifndef MAX_AIO_QUEUE
106 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
107 #endif
108
109 #ifndef TARGET_AIO_PROCS
110 #define TARGET_AIO_PROCS 4
111 #endif
112
113 #ifndef MAX_BUF_AIO
114 #define MAX_BUF_AIO 16
115 #endif
116
117 #ifndef AIOD_TIMEOUT_DEFAULT
118 #define AIOD_TIMEOUT_DEFAULT (10 * hz)
119 #endif
120
121 #ifndef AIOD_LIFETIME_DEFAULT
122 #define AIOD_LIFETIME_DEFAULT (30 * hz)
123 #endif
124
125 FEATURE(aio, "Asynchronous I/O");
126
127 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
128
129 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
130
131 static int max_aio_procs = MAX_AIO_PROCS;
132 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
133 CTLFLAG_RW, &max_aio_procs, 0,
134 "Maximum number of kernel threads to use for handling async IO ");
135
136 static int num_aio_procs = 0;
137 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
138 CTLFLAG_RD, &num_aio_procs, 0,
139 "Number of presently active kernel threads for async IO");
140
141 /*
142 * The code will adjust the actual number of AIO processes towards this
143 * number when it gets a chance.
144 */
145 static int target_aio_procs = TARGET_AIO_PROCS;
146 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
147 0, "Preferred number of ready kernel threads for async IO");
148
149 static int max_queue_count = MAX_AIO_QUEUE;
150 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
151 "Maximum number of aio requests to queue, globally");
152
153 static int num_queue_count = 0;
154 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
155 "Number of queued aio requests");
156
157 static int num_buf_aio = 0;
158 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
159 "Number of aio requests presently handled by the buf subsystem");
160
161 /* Number of async I/O thread in the process of being started */
162 /* XXX This should be local to aio_aqueue() */
163 static int num_aio_resv_start = 0;
164
165 static int aiod_timeout;
166 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0,
167 "Timeout value for synchronous aio operations");
168
169 static int aiod_lifetime;
170 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
171 "Maximum lifetime for idle aiod");
172
173 static int unloadable = 0;
174 SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0,
175 "Allow unload of aio (not recommended)");
176
177
178 static int max_aio_per_proc = MAX_AIO_PER_PROC;
179 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
180 0, "Maximum active aio requests per process (stored in the process)");
181
182 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
183 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
184 &max_aio_queue_per_proc, 0,
185 "Maximum queued aio requests per process (stored in the process)");
186
187 static int max_buf_aio = MAX_BUF_AIO;
188 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
189 "Maximum buf aio requests per process (stored in the process)");
190
191 typedef struct oaiocb {
192 int aio_fildes; /* File descriptor */
193 off_t aio_offset; /* File offset for I/O */
194 volatile void *aio_buf; /* I/O buffer in process space */
195 size_t aio_nbytes; /* Number of bytes for I/O */
196 struct osigevent aio_sigevent; /* Signal to deliver */
197 int aio_lio_opcode; /* LIO opcode */
198 int aio_reqprio; /* Request priority -- ignored */
199 struct __aiocb_private _aiocb_private;
200 } oaiocb_t;
201
202 /*
203 * Below is a key of locks used to protect each member of struct aiocblist
204 * aioliojob and kaioinfo and any backends.
205 *
206 * * - need not protected
207 * a - locked by kaioinfo lock
208 * b - locked by backend lock, the backend lock can be null in some cases,
209 * for example, BIO belongs to this type, in this case, proc lock is
210 * reused.
211 * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
212 */
213
214 /*
215 * Current, there is only two backends: BIO and generic file I/O.
216 * socket I/O is served by generic file I/O, this is not a good idea, since
217 * disk file I/O and any other types without O_NONBLOCK flag can block daemon
218 * threads, if there is no thread to serve socket I/O, the socket I/O will be
219 * delayed too long or starved, we should create some threads dedicated to
220 * sockets to do non-blocking I/O, same for pipe and fifo, for these I/O
221 * systems we really need non-blocking interface, fiddling O_NONBLOCK in file
222 * structure is not safe because there is race between userland and aio
223 * daemons.
224 */
225
226 struct aiocblist {
227 TAILQ_ENTRY(aiocblist) list; /* (b) internal list of for backend */
228 TAILQ_ENTRY(aiocblist) plist; /* (a) list of jobs for each backend */
229 TAILQ_ENTRY(aiocblist) allist; /* (a) list of all jobs in proc */
230 int jobflags; /* (a) job flags */
231 int jobstate; /* (b) job state */
232 int inputcharge; /* (*) input blockes */
233 int outputcharge; /* (*) output blockes */
234 struct buf *bp; /* (*) private to BIO backend,
235 * buffer pointer
236 */
237 struct proc *userproc; /* (*) user process */
238 struct ucred *cred; /* (*) active credential when created */
239 struct file *fd_file; /* (*) pointer to file structure */
240 struct aioliojob *lio; /* (*) optional lio job */
241 struct aiocb *uuaiocb; /* (*) pointer in userspace of aiocb */
242 struct knlist klist; /* (a) list of knotes */
243 struct aiocb uaiocb; /* (*) kernel I/O control block */
244 ksiginfo_t ksi; /* (a) realtime signal info */
245 struct task biotask; /* (*) private to BIO backend */
246 uint64_t seqno; /* (*) job number */
247 int pending; /* (a) number of pending I/O, aio_fsync only */
248 };
249
250 /* jobflags */
251 #define AIOCBLIST_DONE 0x01
252 #define AIOCBLIST_BUFDONE 0x02
253 #define AIOCBLIST_RUNDOWN 0x04
254 #define AIOCBLIST_CHECKSYNC 0x08
255
256 /*
257 * AIO process info
258 */
259 #define AIOP_FREE 0x1 /* proc on free queue */
260
261 struct aiothreadlist {
262 int aiothreadflags; /* (c) AIO proc flags */
263 TAILQ_ENTRY(aiothreadlist) list; /* (c) list of processes */
264 struct thread *aiothread; /* (*) the AIO thread */
265 };
266
267 /*
268 * data-structure for lio signal management
269 */
270 struct aioliojob {
271 int lioj_flags; /* (a) listio flags */
272 int lioj_count; /* (a) listio flags */
273 int lioj_finished_count; /* (a) listio flags */
274 struct sigevent lioj_signal; /* (a) signal on all I/O done */
275 TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */
276 struct knlist klist; /* (a) list of knotes */
277 ksiginfo_t lioj_ksi; /* (a) Realtime signal info */
278 };
279
280 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
281 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
282 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */
283
284 /*
285 * per process aio data structure
286 */
287 struct kaioinfo {
288 struct mtx kaio_mtx; /* the lock to protect this struct */
289 int kaio_flags; /* (a) per process kaio flags */
290 int kaio_maxactive_count; /* (*) maximum number of AIOs */
291 int kaio_active_count; /* (c) number of currently used AIOs */
292 int kaio_qallowed_count; /* (*) maxiumu size of AIO queue */
293 int kaio_count; /* (a) size of AIO queue */
294 int kaio_ballowed_count; /* (*) maximum number of buffers */
295 int kaio_buffer_count; /* (a) number of physio buffers */
296 TAILQ_HEAD(,aiocblist) kaio_all; /* (a) all AIOs in the process */
297 TAILQ_HEAD(,aiocblist) kaio_done; /* (a) done queue for process */
298 TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
299 TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* (a) job queue for process */
300 TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* (a) buffer job queue for process */
301 TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* (a) queue for aios waiting on sockets,
302 * NOT USED YET.
303 */
304 TAILQ_HEAD(,aiocblist) kaio_syncqueue; /* (a) queue for aio_fsync */
305 struct task kaio_task; /* (*) task to kick aio threads */
306 };
307
308 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx)
309 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx)
310 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f))
311 #define AIO_MTX(ki) (&(ki)->kaio_mtx)
312
313 #define KAIO_RUNDOWN 0x1 /* process is being run down */
314 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
315
316 /*
317 * Operations used to interact with userland aio control blocks.
318 * Different ABIs provide their own operations.
319 */
320 struct aiocb_ops {
321 int (*copyin)(struct aiocb *ujob, struct aiocb *kjob);
322 long (*fetch_status)(struct aiocb *ujob);
323 long (*fetch_error)(struct aiocb *ujob);
324 int (*store_status)(struct aiocb *ujob, long status);
325 int (*store_error)(struct aiocb *ujob, long error);
326 int (*store_kernelinfo)(struct aiocb *ujob, long jobref);
327 int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
328 };
329
330 static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* (c) Idle daemons */
331 static struct sema aio_newproc_sem;
332 static struct mtx aio_job_mtx;
333 static struct mtx aio_sock_mtx;
334 static TAILQ_HEAD(,aiocblist) aio_jobs; /* (c) Async job list */
335 static struct unrhdr *aiod_unr;
336
337 void aio_init_aioinfo(struct proc *p);
338 static int aio_onceonly(void);
339 static int aio_free_entry(struct aiocblist *aiocbe);
340 static void aio_process(struct aiocblist *aiocbe);
341 static int aio_newproc(int *);
342 int aio_aqueue(struct thread *td, struct aiocb *job,
343 struct aioliojob *lio, int type, struct aiocb_ops *ops);
344 static void aio_physwakeup(struct buf *bp);
345 static void aio_proc_rundown(void *arg, struct proc *p);
346 static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp);
347 static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
348 static void biohelper(void *, int);
349 static void aio_daemon(void *param);
350 static void aio_swake_cb(struct socket *, struct sockbuf *);
351 static int aio_unload(void);
352 static void aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type);
353 #define DONE_BUF 1
354 #define DONE_QUEUE 2
355 static int aio_kick(struct proc *userp);
356 static void aio_kick_nowait(struct proc *userp);
357 static void aio_kick_helper(void *context, int pending);
358 static int filt_aioattach(struct knote *kn);
359 static void filt_aiodetach(struct knote *kn);
360 static int filt_aio(struct knote *kn, long hint);
361 static int filt_lioattach(struct knote *kn);
362 static void filt_liodetach(struct knote *kn);
363 static int filt_lio(struct knote *kn, long hint);
364
365 /*
366 * Zones for:
367 * kaio Per process async io info
368 * aiop async io thread data
369 * aiocb async io jobs
370 * aiol list io job pointer - internal to aio_suspend XXX
371 * aiolio list io jobs
372 */
373 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
374
375 /* kqueue filters for aio */
376 static struct filterops aio_filtops = {
377 .f_isfd = 0,
378 .f_attach = filt_aioattach,
379 .f_detach = filt_aiodetach,
380 .f_event = filt_aio,
381 };
382 static struct filterops lio_filtops = {
383 .f_isfd = 0,
384 .f_attach = filt_lioattach,
385 .f_detach = filt_liodetach,
386 .f_event = filt_lio
387 };
388
389 static eventhandler_tag exit_tag, exec_tag;
390
391 TASKQUEUE_DEFINE_THREAD(aiod_bio);
392
393 /*
394 * Main operations function for use as a kernel module.
395 */
396 static int
397 aio_modload(struct module *module, int cmd, void *arg)
398 {
399 int error = 0;
400
401 switch (cmd) {
402 case MOD_LOAD:
403 aio_onceonly();
404 break;
405 case MOD_UNLOAD:
406 error = aio_unload();
407 break;
408 case MOD_SHUTDOWN:
409 break;
410 default:
411 error = EINVAL;
412 break;
413 }
414 return (error);
415 }
416
417 static moduledata_t aio_mod = {
418 "aio",
419 &aio_modload,
420 NULL
421 };
422
423 static struct syscall_helper_data aio_syscalls[] = {
424 SYSCALL_INIT_HELPER(aio_cancel),
425 SYSCALL_INIT_HELPER(aio_error),
426 SYSCALL_INIT_HELPER(aio_fsync),
427 SYSCALL_INIT_HELPER(aio_read),
428 SYSCALL_INIT_HELPER(aio_return),
429 SYSCALL_INIT_HELPER(aio_suspend),
430 SYSCALL_INIT_HELPER(aio_waitcomplete),
431 SYSCALL_INIT_HELPER(aio_write),
432 SYSCALL_INIT_HELPER(lio_listio),
433 SYSCALL_INIT_HELPER(oaio_read),
434 SYSCALL_INIT_HELPER(oaio_write),
435 SYSCALL_INIT_HELPER(olio_listio),
436 SYSCALL_INIT_LAST
437 };
438
439 #ifdef COMPAT_FREEBSD32
440 #include <sys/mount.h>
441 #include <sys/socket.h>
442 #include <compat/freebsd32/freebsd32.h>
443 #include <compat/freebsd32/freebsd32_proto.h>
444 #include <compat/freebsd32/freebsd32_signal.h>
445 #include <compat/freebsd32/freebsd32_syscall.h>
446 #include <compat/freebsd32/freebsd32_util.h>
447
448 static struct syscall_helper_data aio32_syscalls[] = {
449 SYSCALL32_INIT_HELPER(freebsd32_aio_return),
450 SYSCALL32_INIT_HELPER(freebsd32_aio_suspend),
451 SYSCALL32_INIT_HELPER(freebsd32_aio_cancel),
452 SYSCALL32_INIT_HELPER(freebsd32_aio_error),
453 SYSCALL32_INIT_HELPER(freebsd32_aio_fsync),
454 SYSCALL32_INIT_HELPER(freebsd32_aio_read),
455 SYSCALL32_INIT_HELPER(freebsd32_aio_write),
456 SYSCALL32_INIT_HELPER(freebsd32_aio_waitcomplete),
457 SYSCALL32_INIT_HELPER(freebsd32_lio_listio),
458 SYSCALL32_INIT_HELPER(freebsd32_oaio_read),
459 SYSCALL32_INIT_HELPER(freebsd32_oaio_write),
460 SYSCALL32_INIT_HELPER(freebsd32_olio_listio),
461 SYSCALL_INIT_LAST
462 };
463 #endif
464
465 DECLARE_MODULE(aio, aio_mod,
466 SI_SUB_VFS, SI_ORDER_ANY);
467 MODULE_VERSION(aio, 1);
468
469 /*
470 * Startup initialization
471 */
472 static int
473 aio_onceonly(void)
474 {
475 int error;
476
477 /* XXX: should probably just use so->callback */
478 aio_swake = &aio_swake_cb;
479 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
480 EVENTHANDLER_PRI_ANY);
481 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, NULL,
482 EVENTHANDLER_PRI_ANY);
483 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
484 kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
485 TAILQ_INIT(&aio_freeproc);
486 sema_init(&aio_newproc_sem, 0, "aio_new_proc");
487 mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
488 mtx_init(&aio_sock_mtx, "aio_sock", NULL, MTX_DEF);
489 TAILQ_INIT(&aio_jobs);
490 aiod_unr = new_unrhdr(1, INT_MAX, NULL);
491 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
492 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
493 aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
494 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
495 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
496 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
497 aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
498 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
499 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
500 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
501 aiod_timeout = AIOD_TIMEOUT_DEFAULT;
502 aiod_lifetime = AIOD_LIFETIME_DEFAULT;
503 jobrefid = 1;
504 async_io_version = _POSIX_VERSION;
505 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
506 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
507 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
508
509 error = syscall_helper_register(aio_syscalls);
510 if (error)
511 return (error);
512 #ifdef COMPAT_FREEBSD32
513 error = syscall32_helper_register(aio32_syscalls);
514 if (error)
515 return (error);
516 #endif
517 return (0);
518 }
519
520 /*
521 * Callback for unload of AIO when used as a module.
522 */
523 static int
524 aio_unload(void)
525 {
526 int error;
527
528 /*
529 * XXX: no unloads by default, it's too dangerous.
530 * perhaps we could do it if locked out callers and then
531 * did an aio_proc_rundown() on each process.
532 *
533 * jhb: aio_proc_rundown() needs to run on curproc though,
534 * so I don't think that would fly.
535 */
536 if (!unloadable)
537 return (EOPNOTSUPP);
538
539 #ifdef COMPAT_FREEBSD32
540 syscall32_helper_unregister(aio32_syscalls);
541 #endif
542 syscall_helper_unregister(aio_syscalls);
543
544 error = kqueue_del_filteropts(EVFILT_AIO);
545 if (error)
546 return error;
547 error = kqueue_del_filteropts(EVFILT_LIO);
548 if (error)
549 return error;
550 async_io_version = 0;
551 aio_swake = NULL;
552 taskqueue_free(taskqueue_aiod_bio);
553 delete_unrhdr(aiod_unr);
554 uma_zdestroy(kaio_zone);
555 uma_zdestroy(aiop_zone);
556 uma_zdestroy(aiocb_zone);
557 uma_zdestroy(aiol_zone);
558 uma_zdestroy(aiolio_zone);
559 EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
560 EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
561 mtx_destroy(&aio_job_mtx);
562 mtx_destroy(&aio_sock_mtx);
563 sema_destroy(&aio_newproc_sem);
564 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
565 p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
566 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
567 return (0);
568 }
569
570 /*
571 * Init the per-process aioinfo structure. The aioinfo limits are set
572 * per-process for user limit (resource) management.
573 */
574 void
575 aio_init_aioinfo(struct proc *p)
576 {
577 struct kaioinfo *ki;
578
579 ki = uma_zalloc(kaio_zone, M_WAITOK);
580 mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF);
581 ki->kaio_flags = 0;
582 ki->kaio_maxactive_count = max_aio_per_proc;
583 ki->kaio_active_count = 0;
584 ki->kaio_qallowed_count = max_aio_queue_per_proc;
585 ki->kaio_count = 0;
586 ki->kaio_ballowed_count = max_buf_aio;
587 ki->kaio_buffer_count = 0;
588 TAILQ_INIT(&ki->kaio_all);
589 TAILQ_INIT(&ki->kaio_done);
590 TAILQ_INIT(&ki->kaio_jobqueue);
591 TAILQ_INIT(&ki->kaio_bufqueue);
592 TAILQ_INIT(&ki->kaio_liojoblist);
593 TAILQ_INIT(&ki->kaio_sockqueue);
594 TAILQ_INIT(&ki->kaio_syncqueue);
595 TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
596 PROC_LOCK(p);
597 if (p->p_aioinfo == NULL) {
598 p->p_aioinfo = ki;
599 PROC_UNLOCK(p);
600 } else {
601 PROC_UNLOCK(p);
602 mtx_destroy(&ki->kaio_mtx);
603 uma_zfree(kaio_zone, ki);
604 }
605
606 while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
607 aio_newproc(NULL);
608 }
609
610 static int
611 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
612 {
613 struct thread *td;
614 int error;
615
616 error = sigev_findtd(p, sigev, &td);
617 if (error)
618 return (error);
619 if (!KSI_ONQ(ksi)) {
620 ksiginfo_set_sigev(ksi, sigev);
621 ksi->ksi_code = SI_ASYNCIO;
622 ksi->ksi_flags |= KSI_EXT | KSI_INS;
623 tdsendsignal(p, td, ksi->ksi_signo, ksi);
624 }
625 PROC_UNLOCK(p);
626 return (error);
627 }
628
629 /*
630 * Free a job entry. Wait for completion if it is currently active, but don't
631 * delay forever. If we delay, we return a flag that says that we have to
632 * restart the queue scan.
633 */
634 static int
635 aio_free_entry(struct aiocblist *aiocbe)
636 {
637 struct kaioinfo *ki;
638 struct aioliojob *lj;
639 struct proc *p;
640
641 p = aiocbe->userproc;
642 MPASS(curproc == p);
643 ki = p->p_aioinfo;
644 MPASS(ki != NULL);
645
646 AIO_LOCK_ASSERT(ki, MA_OWNED);
647 MPASS(aiocbe->jobstate == JOBST_JOBFINISHED);
648
649 atomic_subtract_int(&num_queue_count, 1);
650
651 ki->kaio_count--;
652 MPASS(ki->kaio_count >= 0);
653
654 TAILQ_REMOVE(&ki->kaio_done, aiocbe, plist);
655 TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist);
656
657 lj = aiocbe->lio;
658 if (lj) {
659 lj->lioj_count--;
660 lj->lioj_finished_count--;
661
662 if (lj->lioj_count == 0) {
663 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
664 /* lio is going away, we need to destroy any knotes */
665 knlist_delete(&lj->klist, curthread, 1);
666 PROC_LOCK(p);
667 sigqueue_take(&lj->lioj_ksi);
668 PROC_UNLOCK(p);
669 uma_zfree(aiolio_zone, lj);
670 }
671 }
672
673 /* aiocbe is going away, we need to destroy any knotes */
674 knlist_delete(&aiocbe->klist, curthread, 1);
675 PROC_LOCK(p);
676 sigqueue_take(&aiocbe->ksi);
677 PROC_UNLOCK(p);
678
679 MPASS(aiocbe->bp == NULL);
680 aiocbe->jobstate = JOBST_NULL;
681 AIO_UNLOCK(ki);
682
683 /*
684 * The thread argument here is used to find the owning process
685 * and is also passed to fo_close() which may pass it to various
686 * places such as devsw close() routines. Because of that, we
687 * need a thread pointer from the process owning the job that is
688 * persistent and won't disappear out from under us or move to
689 * another process.
690 *
691 * Currently, all the callers of this function call it to remove
692 * an aiocblist from the current process' job list either via a
693 * syscall or due to the current process calling exit() or
694 * execve(). Thus, we know that p == curproc. We also know that
695 * curthread can't exit since we are curthread.
696 *
697 * Therefore, we use curthread as the thread to pass to
698 * knlist_delete(). This does mean that it is possible for the
699 * thread pointer at close time to differ from the thread pointer
700 * at open time, but this is already true of file descriptors in
701 * a multithreaded process.
702 */
703 fdrop(aiocbe->fd_file, curthread);
704 crfree(aiocbe->cred);
705 uma_zfree(aiocb_zone, aiocbe);
706 AIO_LOCK(ki);
707
708 return (0);
709 }
710
711 static void
712 aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp __unused)
713 {
714 aio_proc_rundown(arg, p);
715 }
716
717 /*
718 * Rundown the jobs for a given process.
719 */
720 static void
721 aio_proc_rundown(void *arg, struct proc *p)
722 {
723 struct kaioinfo *ki;
724 struct aioliojob *lj;
725 struct aiocblist *cbe, *cbn;
726 struct file *fp;
727 struct socket *so;
728 int remove;
729
730 KASSERT(curthread->td_proc == p,
731 ("%s: called on non-curproc", __func__));
732 ki = p->p_aioinfo;
733 if (ki == NULL)
734 return;
735
736 AIO_LOCK(ki);
737 ki->kaio_flags |= KAIO_RUNDOWN;
738
739 restart:
740
741 /*
742 * Try to cancel all pending requests. This code simulates
743 * aio_cancel on all pending I/O requests.
744 */
745 TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
746 remove = 0;
747 mtx_lock(&aio_job_mtx);
748 if (cbe->jobstate == JOBST_JOBQGLOBAL) {
749 TAILQ_REMOVE(&aio_jobs, cbe, list);
750 remove = 1;
751 } else if (cbe->jobstate == JOBST_JOBQSOCK) {
752 fp = cbe->fd_file;
753 MPASS(fp->f_type == DTYPE_SOCKET);
754 so = fp->f_data;
755 TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
756 remove = 1;
757 } else if (cbe->jobstate == JOBST_JOBQSYNC) {
758 TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
759 remove = 1;
760 }
761 mtx_unlock(&aio_job_mtx);
762
763 if (remove) {
764 cbe->jobstate = JOBST_JOBFINISHED;
765 cbe->uaiocb._aiocb_private.status = -1;
766 cbe->uaiocb._aiocb_private.error = ECANCELED;
767 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
768 aio_bio_done_notify(p, cbe, DONE_QUEUE);
769 }
770 }
771
772 /* Wait for all running I/O to be finished */
773 if (TAILQ_FIRST(&ki->kaio_bufqueue) ||
774 TAILQ_FIRST(&ki->kaio_jobqueue)) {
775 ki->kaio_flags |= KAIO_WAKEUP;
776 msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
777 goto restart;
778 }
779
780 /* Free all completed I/O requests. */
781 while ((cbe = TAILQ_FIRST(&ki->kaio_done)) != NULL)
782 aio_free_entry(cbe);
783
784 while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
785 if (lj->lioj_count == 0) {
786 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
787 knlist_delete(&lj->klist, curthread, 1);
788 PROC_LOCK(p);
789 sigqueue_take(&lj->lioj_ksi);
790 PROC_UNLOCK(p);
791 uma_zfree(aiolio_zone, lj);
792 } else {
793 panic("LIO job not cleaned up: C:%d, FC:%d\n",
794 lj->lioj_count, lj->lioj_finished_count);
795 }
796 }
797 AIO_UNLOCK(ki);
798 taskqueue_drain(taskqueue_aiod_bio, &ki->kaio_task);
799 mtx_destroy(&ki->kaio_mtx);
800 uma_zfree(kaio_zone, ki);
801 p->p_aioinfo = NULL;
802 }
803
804 /*
805 * Select a job to run (called by an AIO daemon).
806 */
807 static struct aiocblist *
808 aio_selectjob(struct aiothreadlist *aiop)
809 {
810 struct aiocblist *aiocbe;
811 struct kaioinfo *ki;
812 struct proc *userp;
813
814 mtx_assert(&aio_job_mtx, MA_OWNED);
815 TAILQ_FOREACH(aiocbe, &aio_jobs, list) {
816 userp = aiocbe->userproc;
817 ki = userp->p_aioinfo;
818
819 if (ki->kaio_active_count < ki->kaio_maxactive_count) {
820 TAILQ_REMOVE(&aio_jobs, aiocbe, list);
821 /* Account for currently active jobs. */
822 ki->kaio_active_count++;
823 aiocbe->jobstate = JOBST_JOBRUNNING;
824 break;
825 }
826 }
827 return (aiocbe);
828 }
829
830 /*
831 * Move all data to a permanent storage device, this code
832 * simulates fsync syscall.
833 */
834 static int
835 aio_fsync_vnode(struct thread *td, struct vnode *vp)
836 {
837 struct mount *mp;
838 int vfslocked;
839 int error;
840
841 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
842 if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
843 goto drop;
844 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
845 if (vp->v_object != NULL) {
846 VM_OBJECT_LOCK(vp->v_object);
847 vm_object_page_clean(vp->v_object, 0, 0, 0);
848 VM_OBJECT_UNLOCK(vp->v_object);
849 }
850 error = VOP_FSYNC(vp, MNT_WAIT, td);
851
852 VOP_UNLOCK(vp, 0);
853 vn_finished_write(mp);
854 drop:
855 VFS_UNLOCK_GIANT(vfslocked);
856 return (error);
857 }
858
859 /*
860 * The AIO processing activity. This is the code that does the I/O request for
861 * the non-physio version of the operations. The normal vn operations are used,
862 * and this code should work in all instances for every type of file, including
863 * pipes, sockets, fifos, and regular files.
864 *
865 * XXX I don't think it works well for socket, pipe, and fifo.
866 */
867 static void
868 aio_process(struct aiocblist *aiocbe)
869 {
870 struct ucred *td_savedcred;
871 struct thread *td;
872 struct aiocb *cb;
873 struct file *fp;
874 struct socket *so;
875 struct uio auio;
876 struct iovec aiov;
877 int cnt;
878 int error;
879 int oublock_st, oublock_end;
880 int inblock_st, inblock_end;
881
882 td = curthread;
883 td_savedcred = td->td_ucred;
884 td->td_ucred = aiocbe->cred;
885 cb = &aiocbe->uaiocb;
886 fp = aiocbe->fd_file;
887
888 if (cb->aio_lio_opcode == LIO_SYNC) {
889 error = 0;
890 cnt = 0;
891 if (fp->f_vnode != NULL)
892 error = aio_fsync_vnode(td, fp->f_vnode);
893 cb->_aiocb_private.error = error;
894 cb->_aiocb_private.status = 0;
895 td->td_ucred = td_savedcred;
896 return;
897 }
898
899 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
900 aiov.iov_len = cb->aio_nbytes;
901
902 auio.uio_iov = &aiov;
903 auio.uio_iovcnt = 1;
904 auio.uio_offset = cb->aio_offset;
905 auio.uio_resid = cb->aio_nbytes;
906 cnt = cb->aio_nbytes;
907 auio.uio_segflg = UIO_USERSPACE;
908 auio.uio_td = td;
909
910 inblock_st = td->td_ru.ru_inblock;
911 oublock_st = td->td_ru.ru_oublock;
912 /*
913 * aio_aqueue() acquires a reference to the file that is
914 * released in aio_free_entry().
915 */
916 if (cb->aio_lio_opcode == LIO_READ) {
917 auio.uio_rw = UIO_READ;
918 if (auio.uio_resid == 0)
919 error = 0;
920 else
921 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
922 } else {
923 if (fp->f_type == DTYPE_VNODE)
924 bwillwrite();
925 auio.uio_rw = UIO_WRITE;
926 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
927 }
928 inblock_end = td->td_ru.ru_inblock;
929 oublock_end = td->td_ru.ru_oublock;
930
931 aiocbe->inputcharge = inblock_end - inblock_st;
932 aiocbe->outputcharge = oublock_end - oublock_st;
933
934 if ((error) && (auio.uio_resid != cnt)) {
935 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
936 error = 0;
937 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
938 int sigpipe = 1;
939 if (fp->f_type == DTYPE_SOCKET) {
940 so = fp->f_data;
941 if (so->so_options & SO_NOSIGPIPE)
942 sigpipe = 0;
943 }
944 if (sigpipe) {
945 PROC_LOCK(aiocbe->userproc);
946 kern_psignal(aiocbe->userproc, SIGPIPE);
947 PROC_UNLOCK(aiocbe->userproc);
948 }
949 }
950 }
951
952 cnt -= auio.uio_resid;
953 cb->_aiocb_private.error = error;
954 cb->_aiocb_private.status = cnt;
955 td->td_ucred = td_savedcred;
956 }
957
958 static void
959 aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type)
960 {
961 struct aioliojob *lj;
962 struct kaioinfo *ki;
963 struct aiocblist *scb, *scbn;
964 int lj_done;
965
966 ki = userp->p_aioinfo;
967 AIO_LOCK_ASSERT(ki, MA_OWNED);
968 lj = aiocbe->lio;
969 lj_done = 0;
970 if (lj) {
971 lj->lioj_finished_count++;
972 if (lj->lioj_count == lj->lioj_finished_count)
973 lj_done = 1;
974 }
975 if (type == DONE_QUEUE) {
976 aiocbe->jobflags |= AIOCBLIST_DONE;
977 } else {
978 aiocbe->jobflags |= AIOCBLIST_BUFDONE;
979 }
980 TAILQ_INSERT_TAIL(&ki->kaio_done, aiocbe, plist);
981 aiocbe->jobstate = JOBST_JOBFINISHED;
982
983 if (ki->kaio_flags & KAIO_RUNDOWN)
984 goto notification_done;
985
986 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
987 aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
988 aio_sendsig(userp, &aiocbe->uaiocb.aio_sigevent, &aiocbe->ksi);
989
990 KNOTE_LOCKED(&aiocbe->klist, 1);
991
992 if (lj_done) {
993 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
994 lj->lioj_flags |= LIOJ_KEVENT_POSTED;
995 KNOTE_LOCKED(&lj->klist, 1);
996 }
997 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
998 == LIOJ_SIGNAL
999 && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
1000 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
1001 aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
1002 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
1003 }
1004 }
1005
1006 notification_done:
1007 if (aiocbe->jobflags & AIOCBLIST_CHECKSYNC) {
1008 TAILQ_FOREACH_SAFE(scb, &ki->kaio_syncqueue, list, scbn) {
1009 if (aiocbe->fd_file == scb->fd_file &&
1010 aiocbe->seqno < scb->seqno) {
1011 if (--scb->pending == 0) {
1012 mtx_lock(&aio_job_mtx);
1013 scb->jobstate = JOBST_JOBQGLOBAL;
1014 TAILQ_REMOVE(&ki->kaio_syncqueue, scb, list);
1015 TAILQ_INSERT_TAIL(&aio_jobs, scb, list);
1016 aio_kick_nowait(userp);
1017 mtx_unlock(&aio_job_mtx);
1018 }
1019 }
1020 }
1021 }
1022 if (ki->kaio_flags & KAIO_WAKEUP) {
1023 ki->kaio_flags &= ~KAIO_WAKEUP;
1024 wakeup(&userp->p_aioinfo);
1025 }
1026 }
1027
1028 /*
1029 * The AIO daemon, most of the actual work is done in aio_process,
1030 * but the setup (and address space mgmt) is done in this routine.
1031 */
1032 static void
1033 aio_daemon(void *_id)
1034 {
1035 struct aiocblist *aiocbe;
1036 struct aiothreadlist *aiop;
1037 struct kaioinfo *ki;
1038 struct proc *curcp, *mycp, *userp;
1039 struct vmspace *myvm, *tmpvm;
1040 struct thread *td = curthread;
1041 int id = (intptr_t)_id;
1042
1043 /*
1044 * Local copies of curproc (cp) and vmspace (myvm)
1045 */
1046 mycp = td->td_proc;
1047 myvm = mycp->p_vmspace;
1048
1049 KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp"));
1050
1051 /*
1052 * Allocate and ready the aio control info. There is one aiop structure
1053 * per daemon.
1054 */
1055 aiop = uma_zalloc(aiop_zone, M_WAITOK);
1056 aiop->aiothread = td;
1057 aiop->aiothreadflags = 0;
1058
1059 /* The daemon resides in its own pgrp. */
1060 sys_setsid(td, NULL);
1061
1062 /*
1063 * Wakeup parent process. (Parent sleeps to keep from blasting away
1064 * and creating too many daemons.)
1065 */
1066 sema_post(&aio_newproc_sem);
1067
1068 mtx_lock(&aio_job_mtx);
1069 for (;;) {
1070 /*
1071 * curcp is the current daemon process context.
1072 * userp is the current user process context.
1073 */
1074 curcp = mycp;
1075
1076 /*
1077 * Take daemon off of free queue
1078 */
1079 if (aiop->aiothreadflags & AIOP_FREE) {
1080 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1081 aiop->aiothreadflags &= ~AIOP_FREE;
1082 }
1083
1084 /*
1085 * Check for jobs.
1086 */
1087 while ((aiocbe = aio_selectjob(aiop)) != NULL) {
1088 mtx_unlock(&aio_job_mtx);
1089 userp = aiocbe->userproc;
1090
1091 /*
1092 * Connect to process address space for user program.
1093 */
1094 if (userp != curcp) {
1095 /*
1096 * Save the current address space that we are
1097 * connected to.
1098 */
1099 tmpvm = mycp->p_vmspace;
1100
1101 /*
1102 * Point to the new user address space, and
1103 * refer to it.
1104 */
1105 mycp->p_vmspace = userp->p_vmspace;
1106 atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1);
1107
1108 /* Activate the new mapping. */
1109 pmap_activate(FIRST_THREAD_IN_PROC(mycp));
1110
1111 /*
1112 * If the old address space wasn't the daemons
1113 * own address space, then we need to remove the
1114 * daemon's reference from the other process
1115 * that it was acting on behalf of.
1116 */
1117 if (tmpvm != myvm) {
1118 vmspace_free(tmpvm);
1119 }
1120 curcp = userp;
1121 }
1122
1123 ki = userp->p_aioinfo;
1124
1125 /* Do the I/O function. */
1126 aio_process(aiocbe);
1127
1128 mtx_lock(&aio_job_mtx);
1129 /* Decrement the active job count. */
1130 ki->kaio_active_count--;
1131 mtx_unlock(&aio_job_mtx);
1132
1133 AIO_LOCK(ki);
1134 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
1135 aio_bio_done_notify(userp, aiocbe, DONE_QUEUE);
1136 AIO_UNLOCK(ki);
1137
1138 mtx_lock(&aio_job_mtx);
1139 }
1140
1141 /*
1142 * Disconnect from user address space.
1143 */
1144 if (curcp != mycp) {
1145
1146 mtx_unlock(&aio_job_mtx);
1147
1148 /* Get the user address space to disconnect from. */
1149 tmpvm = mycp->p_vmspace;
1150
1151 /* Get original address space for daemon. */
1152 mycp->p_vmspace = myvm;
1153
1154 /* Activate the daemon's address space. */
1155 pmap_activate(FIRST_THREAD_IN_PROC(mycp));
1156 #ifdef DIAGNOSTIC
1157 if (tmpvm == myvm) {
1158 printf("AIOD: vmspace problem -- %d\n",
1159 mycp->p_pid);
1160 }
1161 #endif
1162 /* Remove our vmspace reference. */
1163 vmspace_free(tmpvm);
1164
1165 curcp = mycp;
1166
1167 mtx_lock(&aio_job_mtx);
1168 /*
1169 * We have to restart to avoid race, we only sleep if
1170 * no job can be selected, that should be
1171 * curcp == mycp.
1172 */
1173 continue;
1174 }
1175
1176 mtx_assert(&aio_job_mtx, MA_OWNED);
1177
1178 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
1179 aiop->aiothreadflags |= AIOP_FREE;
1180
1181 /*
1182 * If daemon is inactive for a long time, allow it to exit,
1183 * thereby freeing resources.
1184 */
1185 if (msleep(aiop->aiothread, &aio_job_mtx, PRIBIO, "aiordy",
1186 aiod_lifetime)) {
1187 if (TAILQ_EMPTY(&aio_jobs)) {
1188 if ((aiop->aiothreadflags & AIOP_FREE) &&
1189 (num_aio_procs > target_aio_procs)) {
1190 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1191 num_aio_procs--;
1192 mtx_unlock(&aio_job_mtx);
1193 uma_zfree(aiop_zone, aiop);
1194 free_unr(aiod_unr, id);
1195 #ifdef DIAGNOSTIC
1196 if (mycp->p_vmspace->vm_refcnt <= 1) {
1197 printf("AIOD: bad vm refcnt for"
1198 " exiting daemon: %d\n",
1199 mycp->p_vmspace->vm_refcnt);
1200 }
1201 #endif
1202 kproc_exit(0);
1203 }
1204 }
1205 }
1206 }
1207 mtx_unlock(&aio_job_mtx);
1208 panic("shouldn't be here\n");
1209 }
1210
1211 /*
1212 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1213 * AIO daemon modifies its environment itself.
1214 */
1215 static int
1216 aio_newproc(int *start)
1217 {
1218 int error;
1219 struct proc *p;
1220 int id;
1221
1222 id = alloc_unr(aiod_unr);
1223 error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
1224 RFNOWAIT, 0, "aiod%d", id);
1225 if (error == 0) {
1226 /*
1227 * Wait until daemon is started.
1228 */
1229 sema_wait(&aio_newproc_sem);
1230 mtx_lock(&aio_job_mtx);
1231 num_aio_procs++;
1232 if (start != NULL)
1233 (*start)--;
1234 mtx_unlock(&aio_job_mtx);
1235 } else {
1236 free_unr(aiod_unr, id);
1237 }
1238 return (error);
1239 }
1240
1241 /*
1242 * Try the high-performance, low-overhead physio method for eligible
1243 * VCHR devices. This method doesn't use an aio helper thread, and
1244 * thus has very low overhead.
1245 *
1246 * Assumes that the caller, aio_aqueue(), has incremented the file
1247 * structure's reference count, preventing its deallocation for the
1248 * duration of this call.
1249 */
1250 static int
1251 aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
1252 {
1253 struct aiocb *cb;
1254 struct file *fp;
1255 struct buf *bp;
1256 struct vnode *vp;
1257 struct kaioinfo *ki;
1258 struct aioliojob *lj;
1259 int error;
1260
1261 cb = &aiocbe->uaiocb;
1262 fp = aiocbe->fd_file;
1263
1264 if (fp->f_type != DTYPE_VNODE)
1265 return (-1);
1266
1267 vp = fp->f_vnode;
1268
1269 /*
1270 * If its not a disk, we don't want to return a positive error.
1271 * It causes the aio code to not fall through to try the thread
1272 * way when you're talking to a regular file.
1273 */
1274 if (!vn_isdisk(vp, &error)) {
1275 if (error == ENOTBLK)
1276 return (-1);
1277 else
1278 return (error);
1279 }
1280
1281 if (vp->v_bufobj.bo_bsize == 0)
1282 return (-1);
1283
1284 if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1285 return (-1);
1286
1287 if (cb->aio_nbytes > vp->v_rdev->si_iosize_max)
1288 return (-1);
1289
1290 if (cb->aio_nbytes >
1291 MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
1292 return (-1);
1293
1294 ki = p->p_aioinfo;
1295 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
1296 return (-1);
1297
1298 /* Create and build a buffer header for a transfer. */
1299 bp = (struct buf *)getpbuf(NULL);
1300 BUF_KERNPROC(bp);
1301
1302 AIO_LOCK(ki);
1303 ki->kaio_count++;
1304 ki->kaio_buffer_count++;
1305 lj = aiocbe->lio;
1306 if (lj)
1307 lj->lioj_count++;
1308 AIO_UNLOCK(ki);
1309
1310 /*
1311 * Get a copy of the kva from the physical buffer.
1312 */
1313 error = 0;
1314
1315 bp->b_bcount = cb->aio_nbytes;
1316 bp->b_bufsize = cb->aio_nbytes;
1317 bp->b_iodone = aio_physwakeup;
1318 bp->b_saveaddr = bp->b_data;
1319 bp->b_data = (void *)(uintptr_t)cb->aio_buf;
1320 bp->b_offset = cb->aio_offset;
1321 bp->b_iooffset = cb->aio_offset;
1322 bp->b_blkno = btodb(cb->aio_offset);
1323 bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1324
1325 /*
1326 * Bring buffer into kernel space.
1327 */
1328 if (vmapbuf(bp) < 0) {
1329 error = EFAULT;
1330 goto doerror;
1331 }
1332
1333 AIO_LOCK(ki);
1334 aiocbe->bp = bp;
1335 bp->b_caller1 = (void *)aiocbe;
1336 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
1337 TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
1338 aiocbe->jobstate = JOBST_JOBQBUF;
1339 cb->_aiocb_private.status = cb->aio_nbytes;
1340 AIO_UNLOCK(ki);
1341
1342 atomic_add_int(&num_queue_count, 1);
1343 atomic_add_int(&num_buf_aio, 1);
1344
1345 bp->b_error = 0;
1346
1347 TASK_INIT(&aiocbe->biotask, 0, biohelper, aiocbe);
1348
1349 /* Perform transfer. */
1350 dev_strategy(vp->v_rdev, bp);
1351 return (0);
1352
1353 doerror:
1354 AIO_LOCK(ki);
1355 ki->kaio_count--;
1356 ki->kaio_buffer_count--;
1357 if (lj)
1358 lj->lioj_count--;
1359 aiocbe->bp = NULL;
1360 AIO_UNLOCK(ki);
1361 relpbuf(bp, NULL);
1362 return (error);
1363 }
1364
1365 /*
1366 * Wake up aio requests that may be serviceable now.
1367 */
1368 static void
1369 aio_swake_cb(struct socket *so, struct sockbuf *sb)
1370 {
1371 struct aiocblist *cb, *cbn;
1372 int opcode;
1373
1374 SOCKBUF_LOCK_ASSERT(sb);
1375 if (sb == &so->so_snd)
1376 opcode = LIO_WRITE;
1377 else
1378 opcode = LIO_READ;
1379
1380 sb->sb_flags &= ~SB_AIO;
1381 mtx_lock(&aio_job_mtx);
1382 TAILQ_FOREACH_SAFE(cb, &so->so_aiojobq, list, cbn) {
1383 if (opcode == cb->uaiocb.aio_lio_opcode) {
1384 if (cb->jobstate != JOBST_JOBQSOCK)
1385 panic("invalid queue value");
1386 /* XXX
1387 * We don't have actual sockets backend yet,
1388 * so we simply move the requests to the generic
1389 * file I/O backend.
1390 */
1391 TAILQ_REMOVE(&so->so_aiojobq, cb, list);
1392 TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
1393 aio_kick_nowait(cb->userproc);
1394 }
1395 }
1396 mtx_unlock(&aio_job_mtx);
1397 }
1398
1399 static int
1400 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
1401 {
1402
1403 /*
1404 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
1405 * supported by AIO with the old sigevent structure.
1406 */
1407 nsig->sigev_notify = osig->sigev_notify;
1408 switch (nsig->sigev_notify) {
1409 case SIGEV_NONE:
1410 break;
1411 case SIGEV_SIGNAL:
1412 nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
1413 break;
1414 case SIGEV_KEVENT:
1415 nsig->sigev_notify_kqueue =
1416 osig->__sigev_u.__sigev_notify_kqueue;
1417 nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
1418 break;
1419 default:
1420 return (EINVAL);
1421 }
1422 return (0);
1423 }
1424
1425 static int
1426 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
1427 {
1428 struct oaiocb *ojob;
1429 int error;
1430
1431 bzero(kjob, sizeof(struct aiocb));
1432 error = copyin(ujob, kjob, sizeof(struct oaiocb));
1433 if (error)
1434 return (error);
1435 ojob = (struct oaiocb *)kjob;
1436 return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
1437 }
1438
1439 static int
1440 aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
1441 {
1442
1443 return (copyin(ujob, kjob, sizeof(struct aiocb)));
1444 }
1445
1446 static long
1447 aiocb_fetch_status(struct aiocb *ujob)
1448 {
1449
1450 return (fuword(&ujob->_aiocb_private.status));
1451 }
1452
1453 static long
1454 aiocb_fetch_error(struct aiocb *ujob)
1455 {
1456
1457 return (fuword(&ujob->_aiocb_private.error));
1458 }
1459
1460 static int
1461 aiocb_store_status(struct aiocb *ujob, long status)
1462 {
1463
1464 return (suword(&ujob->_aiocb_private.status, status));
1465 }
1466
1467 static int
1468 aiocb_store_error(struct aiocb *ujob, long error)
1469 {
1470
1471 return (suword(&ujob->_aiocb_private.error, error));
1472 }
1473
1474 static int
1475 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
1476 {
1477
1478 return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
1479 }
1480
1481 static int
1482 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
1483 {
1484
1485 return (suword(ujobp, (long)ujob));
1486 }
1487
1488 static struct aiocb_ops aiocb_ops = {
1489 .copyin = aiocb_copyin,
1490 .fetch_status = aiocb_fetch_status,
1491 .fetch_error = aiocb_fetch_error,
1492 .store_status = aiocb_store_status,
1493 .store_error = aiocb_store_error,
1494 .store_kernelinfo = aiocb_store_kernelinfo,
1495 .store_aiocb = aiocb_store_aiocb,
1496 };
1497
1498 static struct aiocb_ops aiocb_ops_osigevent = {
1499 .copyin = aiocb_copyin_old_sigevent,
1500 .fetch_status = aiocb_fetch_status,
1501 .fetch_error = aiocb_fetch_error,
1502 .store_status = aiocb_store_status,
1503 .store_error = aiocb_store_error,
1504 .store_kernelinfo = aiocb_store_kernelinfo,
1505 .store_aiocb = aiocb_store_aiocb,
1506 };
1507
1508 /*
1509 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1510 * technique is done in this code.
1511 */
1512 int
1513 aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj,
1514 int type, struct aiocb_ops *ops)
1515 {
1516 struct proc *p = td->td_proc;
1517 struct file *fp;
1518 struct socket *so;
1519 struct aiocblist *aiocbe, *cb;
1520 struct kaioinfo *ki;
1521 struct kevent kev;
1522 struct sockbuf *sb;
1523 int opcode;
1524 int error;
1525 int fd, kqfd;
1526 int jid;
1527 u_short evflags;
1528
1529 if (p->p_aioinfo == NULL)
1530 aio_init_aioinfo(p);
1531
1532 ki = p->p_aioinfo;
1533
1534 ops->store_status(job, -1);
1535 ops->store_error(job, 0);
1536 ops->store_kernelinfo(job, -1);
1537
1538 if (num_queue_count >= max_queue_count ||
1539 ki->kaio_count >= ki->kaio_qallowed_count) {
1540 ops->store_error(job, EAGAIN);
1541 return (EAGAIN);
1542 }
1543
1544 aiocbe = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1545 aiocbe->inputcharge = 0;
1546 aiocbe->outputcharge = 0;
1547 knlist_init_mtx(&aiocbe->klist, AIO_MTX(ki));
1548
1549 error = ops->copyin(job, &aiocbe->uaiocb);
1550 if (error) {
1551 ops->store_error(job, error);
1552 uma_zfree(aiocb_zone, aiocbe);
1553 return (error);
1554 }
1555
1556 if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
1557 aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
1558 aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
1559 aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
1560 ops->store_error(job, EINVAL);
1561 uma_zfree(aiocb_zone, aiocbe);
1562 return (EINVAL);
1563 }
1564
1565 if ((aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
1566 aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
1567 !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
1568 uma_zfree(aiocb_zone, aiocbe);
1569 return (EINVAL);
1570 }
1571
1572 ksiginfo_init(&aiocbe->ksi);
1573
1574 /* Save userspace address of the job info. */
1575 aiocbe->uuaiocb = job;
1576
1577 /* Get the opcode. */
1578 if (type != LIO_NOP)
1579 aiocbe->uaiocb.aio_lio_opcode = type;
1580 opcode = aiocbe->uaiocb.aio_lio_opcode;
1581
1582 /*
1583 * Validate the opcode and fetch the file object for the specified
1584 * file descriptor.
1585 *
1586 * XXXRW: Moved the opcode validation up here so that we don't
1587 * retrieve a file descriptor without knowing what the capabiltity
1588 * should be.
1589 */
1590 fd = aiocbe->uaiocb.aio_fildes;
1591 switch (opcode) {
1592 case LIO_WRITE:
1593 error = fget_write(td, fd, CAP_WRITE | CAP_SEEK, &fp);
1594 break;
1595 case LIO_READ:
1596 error = fget_read(td, fd, CAP_READ | CAP_SEEK, &fp);
1597 break;
1598 case LIO_SYNC:
1599 error = fget(td, fd, CAP_FSYNC, &fp);
1600 break;
1601 case LIO_NOP:
1602 error = fget(td, fd, 0, &fp);
1603 break;
1604 default:
1605 error = EINVAL;
1606 }
1607 if (error) {
1608 uma_zfree(aiocb_zone, aiocbe);
1609 ops->store_error(job, error);
1610 return (error);
1611 }
1612
1613 if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
1614 error = EINVAL;
1615 goto aqueue_fail;
1616 }
1617
1618 if (opcode != LIO_SYNC && aiocbe->uaiocb.aio_offset == -1LL) {
1619 error = EINVAL;
1620 goto aqueue_fail;
1621 }
1622
1623 aiocbe->fd_file = fp;
1624
1625 mtx_lock(&aio_job_mtx);
1626 jid = jobrefid++;
1627 aiocbe->seqno = jobseqno++;
1628 mtx_unlock(&aio_job_mtx);
1629 error = ops->store_kernelinfo(job, jid);
1630 if (error) {
1631 error = EINVAL;
1632 goto aqueue_fail;
1633 }
1634 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
1635
1636 if (opcode == LIO_NOP) {
1637 fdrop(fp, td);
1638 uma_zfree(aiocb_zone, aiocbe);
1639 return (0);
1640 }
1641
1642 if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1643 goto no_kqueue;
1644 evflags = aiocbe->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1645 if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1646 error = EINVAL;
1647 goto aqueue_fail;
1648 }
1649 kqfd = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
1650 kev.ident = (uintptr_t)aiocbe->uuaiocb;
1651 kev.filter = EVFILT_AIO;
1652 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
1653 kev.data = (intptr_t)aiocbe;
1654 kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1655 error = kqfd_register(kqfd, &kev, td, 1);
1656 aqueue_fail:
1657 if (error) {
1658 fdrop(fp, td);
1659 uma_zfree(aiocb_zone, aiocbe);
1660 ops->store_error(job, error);
1661 goto done;
1662 }
1663 no_kqueue:
1664
1665 ops->store_error(job, EINPROGRESS);
1666 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
1667 aiocbe->userproc = p;
1668 aiocbe->cred = crhold(td->td_ucred);
1669 aiocbe->jobflags = 0;
1670 aiocbe->lio = lj;
1671
1672 if (opcode == LIO_SYNC)
1673 goto queueit;
1674
1675 if (fp->f_type == DTYPE_SOCKET) {
1676 /*
1677 * Alternate queueing for socket ops: Reach down into the
1678 * descriptor to get the socket data. Then check to see if the
1679 * socket is ready to be read or written (based on the requested
1680 * operation).
1681 *
1682 * If it is not ready for io, then queue the aiocbe on the
1683 * socket, and set the flags so we get a call when sbnotify()
1684 * happens.
1685 *
1686 * Note if opcode is neither LIO_WRITE nor LIO_READ we lock
1687 * and unlock the snd sockbuf for no reason.
1688 */
1689 so = fp->f_data;
1690 sb = (opcode == LIO_READ) ? &so->so_rcv : &so->so_snd;
1691 SOCKBUF_LOCK(sb);
1692 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
1693 LIO_WRITE) && (!sowriteable(so)))) {
1694 sb->sb_flags |= SB_AIO;
1695
1696 mtx_lock(&aio_job_mtx);
1697 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
1698 mtx_unlock(&aio_job_mtx);
1699
1700 AIO_LOCK(ki);
1701 TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
1702 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
1703 aiocbe->jobstate = JOBST_JOBQSOCK;
1704 ki->kaio_count++;
1705 if (lj)
1706 lj->lioj_count++;
1707 AIO_UNLOCK(ki);
1708 SOCKBUF_UNLOCK(sb);
1709 atomic_add_int(&num_queue_count, 1);
1710 error = 0;
1711 goto done;
1712 }
1713 SOCKBUF_UNLOCK(sb);
1714 }
1715
1716 if ((error = aio_qphysio(p, aiocbe)) == 0)
1717 goto done;
1718 #if 0
1719 if (error > 0) {
1720 aiocbe->uaiocb._aiocb_private.error = error;
1721 ops->store_error(job, error);
1722 goto done;
1723 }
1724 #endif
1725 queueit:
1726 /* No buffer for daemon I/O. */
1727 aiocbe->bp = NULL;
1728 atomic_add_int(&num_queue_count, 1);
1729
1730 AIO_LOCK(ki);
1731 ki->kaio_count++;
1732 if (lj)
1733 lj->lioj_count++;
1734 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
1735 TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
1736 if (opcode == LIO_SYNC) {
1737 TAILQ_FOREACH(cb, &ki->kaio_jobqueue, plist) {
1738 if (cb->fd_file == aiocbe->fd_file &&
1739 cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
1740 cb->seqno < aiocbe->seqno) {
1741 cb->jobflags |= AIOCBLIST_CHECKSYNC;
1742 aiocbe->pending++;
1743 }
1744 }
1745 TAILQ_FOREACH(cb, &ki->kaio_bufqueue, plist) {
1746 if (cb->fd_file == aiocbe->fd_file &&
1747 cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
1748 cb->seqno < aiocbe->seqno) {
1749 cb->jobflags |= AIOCBLIST_CHECKSYNC;
1750 aiocbe->pending++;
1751 }
1752 }
1753 if (aiocbe->pending != 0) {
1754 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, aiocbe, list);
1755 aiocbe->jobstate = JOBST_JOBQSYNC;
1756 AIO_UNLOCK(ki);
1757 goto done;
1758 }
1759 }
1760 mtx_lock(&aio_job_mtx);
1761 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
1762 aiocbe->jobstate = JOBST_JOBQGLOBAL;
1763 aio_kick_nowait(p);
1764 mtx_unlock(&aio_job_mtx);
1765 AIO_UNLOCK(ki);
1766 error = 0;
1767 done:
1768 return (error);
1769 }
1770
1771 static void
1772 aio_kick_nowait(struct proc *userp)
1773 {
1774 struct kaioinfo *ki = userp->p_aioinfo;
1775 struct aiothreadlist *aiop;
1776
1777 mtx_assert(&aio_job_mtx, MA_OWNED);
1778 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1779 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1780 aiop->aiothreadflags &= ~AIOP_FREE;
1781 wakeup(aiop->aiothread);
1782 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1783 ((ki->kaio_active_count + num_aio_resv_start) <
1784 ki->kaio_maxactive_count)) {
1785 taskqueue_enqueue(taskqueue_aiod_bio, &ki->kaio_task);
1786 }
1787 }
1788
1789 static int
1790 aio_kick(struct proc *userp)
1791 {
1792 struct kaioinfo *ki = userp->p_aioinfo;
1793 struct aiothreadlist *aiop;
1794 int error, ret = 0;
1795
1796 mtx_assert(&aio_job_mtx, MA_OWNED);
1797 retryproc:
1798 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1799 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1800 aiop->aiothreadflags &= ~AIOP_FREE;
1801 wakeup(aiop->aiothread);
1802 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1803 ((ki->kaio_active_count + num_aio_resv_start) <
1804 ki->kaio_maxactive_count)) {
1805 num_aio_resv_start++;
1806 mtx_unlock(&aio_job_mtx);
1807 error = aio_newproc(&num_aio_resv_start);
1808 mtx_lock(&aio_job_mtx);
1809 if (error) {
1810 num_aio_resv_start--;
1811 goto retryproc;
1812 }
1813 } else {
1814 ret = -1;
1815 }
1816 return (ret);
1817 }
1818
1819 static void
1820 aio_kick_helper(void *context, int pending)
1821 {
1822 struct proc *userp = context;
1823
1824 mtx_lock(&aio_job_mtx);
1825 while (--pending >= 0) {
1826 if (aio_kick(userp))
1827 break;
1828 }
1829 mtx_unlock(&aio_job_mtx);
1830 }
1831
1832 /*
1833 * Support the aio_return system call, as a side-effect, kernel resources are
1834 * released.
1835 */
1836 static int
1837 kern_aio_return(struct thread *td, struct aiocb *uaiocb, struct aiocb_ops *ops)
1838 {
1839 struct proc *p = td->td_proc;
1840 struct aiocblist *cb;
1841 struct kaioinfo *ki;
1842 int status, error;
1843
1844 ki = p->p_aioinfo;
1845 if (ki == NULL)
1846 return (EINVAL);
1847 AIO_LOCK(ki);
1848 TAILQ_FOREACH(cb, &ki->kaio_done, plist) {
1849 if (cb->uuaiocb == uaiocb)
1850 break;
1851 }
1852 if (cb != NULL) {
1853 MPASS(cb->jobstate == JOBST_JOBFINISHED);
1854 status = cb->uaiocb._aiocb_private.status;
1855 error = cb->uaiocb._aiocb_private.error;
1856 td->td_retval[0] = status;
1857 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
1858 td->td_ru.ru_oublock += cb->outputcharge;
1859 cb->outputcharge = 0;
1860 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
1861 td->td_ru.ru_inblock += cb->inputcharge;
1862 cb->inputcharge = 0;
1863 }
1864 aio_free_entry(cb);
1865 AIO_UNLOCK(ki);
1866 ops->store_error(uaiocb, error);
1867 ops->store_status(uaiocb, status);
1868 } else {
1869 error = EINVAL;
1870 AIO_UNLOCK(ki);
1871 }
1872 return (error);
1873 }
1874
1875 int
1876 sys_aio_return(struct thread *td, struct aio_return_args *uap)
1877 {
1878
1879 return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
1880 }
1881
1882 /*
1883 * Allow a process to wakeup when any of the I/O requests are completed.
1884 */
1885 static int
1886 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
1887 struct timespec *ts)
1888 {
1889 struct proc *p = td->td_proc;
1890 struct timeval atv;
1891 struct kaioinfo *ki;
1892 struct aiocblist *cb, *cbfirst;
1893 int error, i, timo;
1894
1895 timo = 0;
1896 if (ts) {
1897 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
1898 return (EINVAL);
1899
1900 TIMESPEC_TO_TIMEVAL(&atv, ts);
1901 if (itimerfix(&atv))
1902 return (EINVAL);
1903 timo = tvtohz(&atv);
1904 }
1905
1906 ki = p->p_aioinfo;
1907 if (ki == NULL)
1908 return (EAGAIN);
1909
1910 if (njoblist == 0)
1911 return (0);
1912
1913 AIO_LOCK(ki);
1914 for (;;) {
1915 cbfirst = NULL;
1916 error = 0;
1917 TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
1918 for (i = 0; i < njoblist; i++) {
1919 if (cb->uuaiocb == ujoblist[i]) {
1920 if (cbfirst == NULL)
1921 cbfirst = cb;
1922 if (cb->jobstate == JOBST_JOBFINISHED)
1923 goto RETURN;
1924 }
1925 }
1926 }
1927 /* All tasks were finished. */
1928 if (cbfirst == NULL)
1929 break;
1930
1931 ki->kaio_flags |= KAIO_WAKEUP;
1932 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
1933 "aiospn", timo);
1934 if (error == ERESTART)
1935 error = EINTR;
1936 if (error)
1937 break;
1938 }
1939 RETURN:
1940 AIO_UNLOCK(ki);
1941 return (error);
1942 }
1943
1944 int
1945 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
1946 {
1947 struct timespec ts, *tsp;
1948 struct aiocb **ujoblist;
1949 int error;
1950
1951 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
1952 return (EINVAL);
1953
1954 if (uap->timeout) {
1955 /* Get timespec struct. */
1956 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
1957 return (error);
1958 tsp = &ts;
1959 } else
1960 tsp = NULL;
1961
1962 ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
1963 error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
1964 if (error == 0)
1965 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
1966 uma_zfree(aiol_zone, ujoblist);
1967 return (error);
1968 }
1969
1970 /*
1971 * aio_cancel cancels any non-physio aio operations not currently in
1972 * progress.
1973 */
1974 int
1975 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1976 {
1977 struct proc *p = td->td_proc;
1978 struct kaioinfo *ki;
1979 struct aiocblist *cbe, *cbn;
1980 struct file *fp;
1981 struct socket *so;
1982 int error;
1983 int remove;
1984 int cancelled = 0;
1985 int notcancelled = 0;
1986 struct vnode *vp;
1987
1988 /* Lookup file object. */
1989 error = fget(td, uap->fd, 0, &fp);
1990 if (error)
1991 return (error);
1992
1993 ki = p->p_aioinfo;
1994 if (ki == NULL)
1995 goto done;
1996
1997 if (fp->f_type == DTYPE_VNODE) {
1998 vp = fp->f_vnode;
1999 if (vn_isdisk(vp, &error)) {
2000 fdrop(fp, td);
2001 td->td_retval[0] = AIO_NOTCANCELED;
2002 return (0);
2003 }
2004 }
2005
2006 AIO_LOCK(ki);
2007 TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
2008 if ((uap->fd == cbe->uaiocb.aio_fildes) &&
2009 ((uap->aiocbp == NULL) ||
2010 (uap->aiocbp == cbe->uuaiocb))) {
2011 remove = 0;
2012
2013 mtx_lock(&aio_job_mtx);
2014 if (cbe->jobstate == JOBST_JOBQGLOBAL) {
2015 TAILQ_REMOVE(&aio_jobs, cbe, list);
2016 remove = 1;
2017 } else if (cbe->jobstate == JOBST_JOBQSOCK) {
2018 MPASS(fp->f_type == DTYPE_SOCKET);
2019 so = fp->f_data;
2020 TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
2021 remove = 1;
2022 } else if (cbe->jobstate == JOBST_JOBQSYNC) {
2023 TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
2024 remove = 1;
2025 }
2026 mtx_unlock(&aio_job_mtx);
2027
2028 if (remove) {
2029 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
2030 cbe->uaiocb._aiocb_private.status = -1;
2031 cbe->uaiocb._aiocb_private.error = ECANCELED;
2032 aio_bio_done_notify(p, cbe, DONE_QUEUE);
2033 cancelled++;
2034 } else {
2035 notcancelled++;
2036 }
2037 if (uap->aiocbp != NULL)
2038 break;
2039 }
2040 }
2041 AIO_UNLOCK(ki);
2042
2043 done:
2044 fdrop(fp, td);
2045
2046 if (uap->aiocbp != NULL) {
2047 if (cancelled) {
2048 td->td_retval[0] = AIO_CANCELED;
2049 return (0);
2050 }
2051 }
2052
2053 if (notcancelled) {
2054 td->td_retval[0] = AIO_NOTCANCELED;
2055 return (0);
2056 }
2057
2058 if (cancelled) {
2059 td->td_retval[0] = AIO_CANCELED;
2060 return (0);
2061 }
2062
2063 td->td_retval[0] = AIO_ALLDONE;
2064
2065 return (0);
2066 }
2067
2068 /*
2069 * aio_error is implemented in the kernel level for compatibility purposes
2070 * only. For a user mode async implementation, it would be best to do it in
2071 * a userland subroutine.
2072 */
2073 static int
2074 kern_aio_error(struct thread *td, struct aiocb *aiocbp, struct aiocb_ops *ops)
2075 {
2076 struct proc *p = td->td_proc;
2077 struct aiocblist *cb;
2078 struct kaioinfo *ki;
2079 int status;
2080
2081 ki = p->p_aioinfo;
2082 if (ki == NULL) {
2083 td->td_retval[0] = EINVAL;
2084 return (0);
2085 }
2086
2087 AIO_LOCK(ki);
2088 TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
2089 if (cb->uuaiocb == aiocbp) {
2090 if (cb->jobstate == JOBST_JOBFINISHED)
2091 td->td_retval[0] =
2092 cb->uaiocb._aiocb_private.error;
2093 else
2094 td->td_retval[0] = EINPROGRESS;
2095 AIO_UNLOCK(ki);
2096 return (0);
2097 }
2098 }
2099 AIO_UNLOCK(ki);
2100
2101 /*
2102 * Hack for failure of aio_aqueue.
2103 */
2104 status = ops->fetch_status(aiocbp);
2105 if (status == -1) {
2106 td->td_retval[0] = ops->fetch_error(aiocbp);
2107 return (0);
2108 }
2109
2110 td->td_retval[0] = EINVAL;
2111 return (0);
2112 }
2113
2114 int
2115 sys_aio_error(struct thread *td, struct aio_error_args *uap)
2116 {
2117
2118 return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
2119 }
2120
2121 /* syscall - asynchronous read from a file (REALTIME) */
2122 int
2123 sys_oaio_read(struct thread *td, struct oaio_read_args *uap)
2124 {
2125
2126 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2127 &aiocb_ops_osigevent));
2128 }
2129
2130 int
2131 sys_aio_read(struct thread *td, struct aio_read_args *uap)
2132 {
2133
2134 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2135 }
2136
2137 /* syscall - asynchronous write to a file (REALTIME) */
2138 int
2139 sys_oaio_write(struct thread *td, struct oaio_write_args *uap)
2140 {
2141
2142 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2143 &aiocb_ops_osigevent));
2144 }
2145
2146 int
2147 sys_aio_write(struct thread *td, struct aio_write_args *uap)
2148 {
2149
2150 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
2151 }
2152
2153 static int
2154 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
2155 struct aiocb **acb_list, int nent, struct sigevent *sig,
2156 struct aiocb_ops *ops)
2157 {
2158 struct proc *p = td->td_proc;
2159 struct aiocb *iocb;
2160 struct kaioinfo *ki;
2161 struct aioliojob *lj;
2162 struct kevent kev;
2163 int error;
2164 int nerror;
2165 int i;
2166
2167 if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2168 return (EINVAL);
2169
2170 if (nent < 0 || nent > AIO_LISTIO_MAX)
2171 return (EINVAL);
2172
2173 if (p->p_aioinfo == NULL)
2174 aio_init_aioinfo(p);
2175
2176 ki = p->p_aioinfo;
2177
2178 lj = uma_zalloc(aiolio_zone, M_WAITOK);
2179 lj->lioj_flags = 0;
2180 lj->lioj_count = 0;
2181 lj->lioj_finished_count = 0;
2182 knlist_init_mtx(&lj->klist, AIO_MTX(ki));
2183 ksiginfo_init(&lj->lioj_ksi);
2184
2185 /*
2186 * Setup signal.
2187 */
2188 if (sig && (mode == LIO_NOWAIT)) {
2189 bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
2190 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2191 /* Assume only new style KEVENT */
2192 kev.filter = EVFILT_LIO;
2193 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
2194 kev.ident = (uintptr_t)uacb_list; /* something unique */
2195 kev.data = (intptr_t)lj;
2196 /* pass user defined sigval data */
2197 kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
2198 error = kqfd_register(
2199 lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
2200 if (error) {
2201 uma_zfree(aiolio_zone, lj);
2202 return (error);
2203 }
2204 } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
2205 ;
2206 } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2207 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
2208 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
2209 uma_zfree(aiolio_zone, lj);
2210 return EINVAL;
2211 }
2212 lj->lioj_flags |= LIOJ_SIGNAL;
2213 } else {
2214 uma_zfree(aiolio_zone, lj);
2215 return EINVAL;
2216 }
2217 }
2218
2219 AIO_LOCK(ki);
2220 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
2221 /*
2222 * Add extra aiocb count to avoid the lio to be freed
2223 * by other threads doing aio_waitcomplete or aio_return,
2224 * and prevent event from being sent until we have queued
2225 * all tasks.
2226 */
2227 lj->lioj_count = 1;
2228 AIO_UNLOCK(ki);
2229
2230 /*
2231 * Get pointers to the list of I/O requests.
2232 */
2233 nerror = 0;
2234 for (i = 0; i < nent; i++) {
2235 iocb = acb_list[i];
2236 if (iocb != NULL) {
2237 error = aio_aqueue(td, iocb, lj, LIO_NOP, ops);
2238 if (error != 0)
2239 nerror++;
2240 }
2241 }
2242
2243 error = 0;
2244 AIO_LOCK(ki);
2245 if (mode == LIO_WAIT) {
2246 while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2247 ki->kaio_flags |= KAIO_WAKEUP;
2248 error = msleep(&p->p_aioinfo, AIO_MTX(ki),
2249 PRIBIO | PCATCH, "aiospn", 0);
2250 if (error == ERESTART)
2251 error = EINTR;
2252 if (error)
2253 break;
2254 }
2255 } else {
2256 if (lj->lioj_count - 1 == lj->lioj_finished_count) {
2257 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2258 lj->lioj_flags |= LIOJ_KEVENT_POSTED;
2259 KNOTE_LOCKED(&lj->klist, 1);
2260 }
2261 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
2262 == LIOJ_SIGNAL
2263 && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2264 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
2265 aio_sendsig(p, &lj->lioj_signal,
2266 &lj->lioj_ksi);
2267 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2268 }
2269 }
2270 }
2271 lj->lioj_count--;
2272 if (lj->lioj_count == 0) {
2273 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
2274 knlist_delete(&lj->klist, curthread, 1);
2275 PROC_LOCK(p);
2276 sigqueue_take(&lj->lioj_ksi);
2277 PROC_UNLOCK(p);
2278 AIO_UNLOCK(ki);
2279 uma_zfree(aiolio_zone, lj);
2280 } else
2281 AIO_UNLOCK(ki);
2282
2283 if (nerror)
2284 return (EIO);
2285 return (error);
2286 }
2287
2288 /* syscall - list directed I/O (REALTIME) */
2289 int
2290 sys_olio_listio(struct thread *td, struct olio_listio_args *uap)
2291 {
2292 struct aiocb **acb_list;
2293 struct sigevent *sigp, sig;
2294 struct osigevent osig;
2295 int error, nent;
2296
2297 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2298 return (EINVAL);
2299
2300 nent = uap->nent;
2301 if (nent < 0 || nent > AIO_LISTIO_MAX)
2302 return (EINVAL);
2303
2304 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2305 error = copyin(uap->sig, &osig, sizeof(osig));
2306 if (error)
2307 return (error);
2308 error = convert_old_sigevent(&osig, &sig);
2309 if (error)
2310 return (error);
2311 sigp = &sig;
2312 } else
2313 sigp = NULL;
2314
2315 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2316 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2317 if (error == 0)
2318 error = kern_lio_listio(td, uap->mode,
2319 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2320 &aiocb_ops_osigevent);
2321 free(acb_list, M_LIO);
2322 return (error);
2323 }
2324
2325 /* syscall - list directed I/O (REALTIME) */
2326 int
2327 sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
2328 {
2329 struct aiocb **acb_list;
2330 struct sigevent *sigp, sig;
2331 int error, nent;
2332
2333 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2334 return (EINVAL);
2335
2336 nent = uap->nent;
2337 if (nent < 0 || nent > AIO_LISTIO_MAX)
2338 return (EINVAL);
2339
2340 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2341 error = copyin(uap->sig, &sig, sizeof(sig));
2342 if (error)
2343 return (error);
2344 sigp = &sig;
2345 } else
2346 sigp = NULL;
2347
2348 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2349 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2350 if (error == 0)
2351 error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
2352 nent, sigp, &aiocb_ops);
2353 free(acb_list, M_LIO);
2354 return (error);
2355 }
2356
2357 /*
2358 * Called from interrupt thread for physio, we should return as fast
2359 * as possible, so we schedule a biohelper task.
2360 */
2361 static void
2362 aio_physwakeup(struct buf *bp)
2363 {
2364 struct aiocblist *aiocbe;
2365
2366 aiocbe = (struct aiocblist *)bp->b_caller1;
2367 taskqueue_enqueue(taskqueue_aiod_bio, &aiocbe->biotask);
2368 }
2369
2370 /*
2371 * Task routine to perform heavy tasks, process wakeup, and signals.
2372 */
2373 static void
2374 biohelper(void *context, int pending)
2375 {
2376 struct aiocblist *aiocbe = context;
2377 struct buf *bp;
2378 struct proc *userp;
2379 struct kaioinfo *ki;
2380 int nblks;
2381
2382 bp = aiocbe->bp;
2383 userp = aiocbe->userproc;
2384 ki = userp->p_aioinfo;
2385 AIO_LOCK(ki);
2386 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
2387 aiocbe->uaiocb._aiocb_private.error = 0;
2388 if (bp->b_ioflags & BIO_ERROR)
2389 aiocbe->uaiocb._aiocb_private.error = bp->b_error;
2390 nblks = btodb(aiocbe->uaiocb.aio_nbytes);
2391 if (aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE)
2392 aiocbe->outputcharge += nblks;
2393 else
2394 aiocbe->inputcharge += nblks;
2395 aiocbe->bp = NULL;
2396 TAILQ_REMOVE(&userp->p_aioinfo->kaio_bufqueue, aiocbe, plist);
2397 ki->kaio_buffer_count--;
2398 aio_bio_done_notify(userp, aiocbe, DONE_BUF);
2399 AIO_UNLOCK(ki);
2400
2401 /* Release mapping into kernel space. */
2402 vunmapbuf(bp);
2403 relpbuf(bp, NULL);
2404 atomic_subtract_int(&num_buf_aio, 1);
2405 }
2406
2407 /* syscall - wait for the next completion of an aio request */
2408 static int
2409 kern_aio_waitcomplete(struct thread *td, struct aiocb **aiocbp,
2410 struct timespec *ts, struct aiocb_ops *ops)
2411 {
2412 struct proc *p = td->td_proc;
2413 struct timeval atv;
2414 struct kaioinfo *ki;
2415 struct aiocblist *cb;
2416 struct aiocb *uuaiocb;
2417 int error, status, timo;
2418
2419 ops->store_aiocb(aiocbp, NULL);
2420
2421 timo = 0;
2422 if (ts) {
2423 if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2424 return (EINVAL);
2425
2426 TIMESPEC_TO_TIMEVAL(&atv, ts);
2427 if (itimerfix(&atv))
2428 return (EINVAL);
2429 timo = tvtohz(&atv);
2430 }
2431
2432 if (p->p_aioinfo == NULL)
2433 aio_init_aioinfo(p);
2434 ki = p->p_aioinfo;
2435
2436 error = 0;
2437 cb = NULL;
2438 AIO_LOCK(ki);
2439 while ((cb = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
2440 ki->kaio_flags |= KAIO_WAKEUP;
2441 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2442 "aiowc", timo);
2443 if (timo && error == ERESTART)
2444 error = EINTR;
2445 if (error)
2446 break;
2447 }
2448
2449 if (cb != NULL) {
2450 MPASS(cb->jobstate == JOBST_JOBFINISHED);
2451 uuaiocb = cb->uuaiocb;
2452 status = cb->uaiocb._aiocb_private.status;
2453 error = cb->uaiocb._aiocb_private.error;
2454 td->td_retval[0] = status;
2455 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
2456 td->td_ru.ru_oublock += cb->outputcharge;
2457 cb->outputcharge = 0;
2458 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
2459 td->td_ru.ru_inblock += cb->inputcharge;
2460 cb->inputcharge = 0;
2461 }
2462 aio_free_entry(cb);
2463 AIO_UNLOCK(ki);
2464 ops->store_aiocb(aiocbp, uuaiocb);
2465 ops->store_error(uuaiocb, error);
2466 ops->store_status(uuaiocb, status);
2467 } else
2468 AIO_UNLOCK(ki);
2469
2470 return (error);
2471 }
2472
2473 int
2474 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2475 {
2476 struct timespec ts, *tsp;
2477 int error;
2478
2479 if (uap->timeout) {
2480 /* Get timespec struct. */
2481 error = copyin(uap->timeout, &ts, sizeof(ts));
2482 if (error)
2483 return (error);
2484 tsp = &ts;
2485 } else
2486 tsp = NULL;
2487
2488 return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
2489 }
2490
2491 static int
2492 kern_aio_fsync(struct thread *td, int op, struct aiocb *aiocbp,
2493 struct aiocb_ops *ops)
2494 {
2495 struct proc *p = td->td_proc;
2496 struct kaioinfo *ki;
2497
2498 if (op != O_SYNC) /* XXX lack of O_DSYNC */
2499 return (EINVAL);
2500 ki = p->p_aioinfo;
2501 if (ki == NULL)
2502 aio_init_aioinfo(p);
2503 return (aio_aqueue(td, aiocbp, NULL, LIO_SYNC, ops));
2504 }
2505
2506 int
2507 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
2508 {
2509
2510 return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
2511 }
2512
2513 /* kqueue attach function */
2514 static int
2515 filt_aioattach(struct knote *kn)
2516 {
2517 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2518
2519 /*
2520 * The aiocbe pointer must be validated before using it, so
2521 * registration is restricted to the kernel; the user cannot
2522 * set EV_FLAG1.
2523 */
2524 if ((kn->kn_flags & EV_FLAG1) == 0)
2525 return (EPERM);
2526 kn->kn_ptr.p_aio = aiocbe;
2527 kn->kn_flags &= ~EV_FLAG1;
2528
2529 knlist_add(&aiocbe->klist, kn, 0);
2530
2531 return (0);
2532 }
2533
2534 /* kqueue detach function */
2535 static void
2536 filt_aiodetach(struct knote *kn)
2537 {
2538 struct knlist *knl;
2539
2540 knl = &kn->kn_ptr.p_aio->klist;
2541 knl->kl_lock(knl->kl_lockarg);
2542 if (!knlist_empty(knl))
2543 knlist_remove(knl, kn, 1);
2544 knl->kl_unlock(knl->kl_lockarg);
2545 }
2546
2547 /* kqueue filter function */
2548 /*ARGSUSED*/
2549 static int
2550 filt_aio(struct knote *kn, long hint)
2551 {
2552 struct aiocblist *aiocbe = kn->kn_ptr.p_aio;
2553
2554 kn->kn_data = aiocbe->uaiocb._aiocb_private.error;
2555 if (aiocbe->jobstate != JOBST_JOBFINISHED)
2556 return (0);
2557 kn->kn_flags |= EV_EOF;
2558 return (1);
2559 }
2560
2561 /* kqueue attach function */
2562 static int
2563 filt_lioattach(struct knote *kn)
2564 {
2565 struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
2566
2567 /*
2568 * The aioliojob pointer must be validated before using it, so
2569 * registration is restricted to the kernel; the user cannot
2570 * set EV_FLAG1.
2571 */
2572 if ((kn->kn_flags & EV_FLAG1) == 0)
2573 return (EPERM);
2574 kn->kn_ptr.p_lio = lj;
2575 kn->kn_flags &= ~EV_FLAG1;
2576
2577 knlist_add(&lj->klist, kn, 0);
2578
2579 return (0);
2580 }
2581
2582 /* kqueue detach function */
2583 static void
2584 filt_liodetach(struct knote *kn)
2585 {
2586 struct knlist *knl;
2587
2588 knl = &kn->kn_ptr.p_lio->klist;
2589 knl->kl_lock(knl->kl_lockarg);
2590 if (!knlist_empty(knl))
2591 knlist_remove(knl, kn, 1);
2592 knl->kl_unlock(knl->kl_lockarg);
2593 }
2594
2595 /* kqueue filter function */
2596 /*ARGSUSED*/
2597 static int
2598 filt_lio(struct knote *kn, long hint)
2599 {
2600 struct aioliojob * lj = kn->kn_ptr.p_lio;
2601
2602 return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
2603 }
2604
2605 #ifdef COMPAT_FREEBSD32
2606
2607 struct __aiocb_private32 {
2608 int32_t status;
2609 int32_t error;
2610 uint32_t kernelinfo;
2611 };
2612
2613 typedef struct oaiocb32 {
2614 int aio_fildes; /* File descriptor */
2615 uint64_t aio_offset __packed; /* File offset for I/O */
2616 uint32_t aio_buf; /* I/O buffer in process space */
2617 uint32_t aio_nbytes; /* Number of bytes for I/O */
2618 struct osigevent32 aio_sigevent; /* Signal to deliver */
2619 int aio_lio_opcode; /* LIO opcode */
2620 int aio_reqprio; /* Request priority -- ignored */
2621 struct __aiocb_private32 _aiocb_private;
2622 } oaiocb32_t;
2623
2624 typedef struct aiocb32 {
2625 int32_t aio_fildes; /* File descriptor */
2626 uint64_t aio_offset __packed; /* File offset for I/O */
2627 uint32_t aio_buf; /* I/O buffer in process space */
2628 uint32_t aio_nbytes; /* Number of bytes for I/O */
2629 int __spare__[2];
2630 uint32_t __spare2__;
2631 int aio_lio_opcode; /* LIO opcode */
2632 int aio_reqprio; /* Request priority -- ignored */
2633 struct __aiocb_private32 _aiocb_private;
2634 struct sigevent32 aio_sigevent; /* Signal to deliver */
2635 } aiocb32_t;
2636
2637 static int
2638 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
2639 {
2640
2641 /*
2642 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
2643 * supported by AIO with the old sigevent structure.
2644 */
2645 CP(*osig, *nsig, sigev_notify);
2646 switch (nsig->sigev_notify) {
2647 case SIGEV_NONE:
2648 break;
2649 case SIGEV_SIGNAL:
2650 nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
2651 break;
2652 case SIGEV_KEVENT:
2653 nsig->sigev_notify_kqueue =
2654 osig->__sigev_u.__sigev_notify_kqueue;
2655 PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
2656 break;
2657 default:
2658 return (EINVAL);
2659 }
2660 return (0);
2661 }
2662
2663 static int
2664 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
2665 {
2666 struct oaiocb32 job32;
2667 int error;
2668
2669 bzero(kjob, sizeof(struct aiocb));
2670 error = copyin(ujob, &job32, sizeof(job32));
2671 if (error)
2672 return (error);
2673
2674 CP(job32, *kjob, aio_fildes);
2675 CP(job32, *kjob, aio_offset);
2676 PTRIN_CP(job32, *kjob, aio_buf);
2677 CP(job32, *kjob, aio_nbytes);
2678 CP(job32, *kjob, aio_lio_opcode);
2679 CP(job32, *kjob, aio_reqprio);
2680 CP(job32, *kjob, _aiocb_private.status);
2681 CP(job32, *kjob, _aiocb_private.error);
2682 PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
2683 return (convert_old_sigevent32(&job32.aio_sigevent,
2684 &kjob->aio_sigevent));
2685 }
2686
2687 static int
2688 convert_sigevent32(struct sigevent32 *sig32, struct sigevent *sig)
2689 {
2690
2691 CP(*sig32, *sig, sigev_notify);
2692 switch (sig->sigev_notify) {
2693 case SIGEV_NONE:
2694 break;
2695 case SIGEV_THREAD_ID:
2696 CP(*sig32, *sig, sigev_notify_thread_id);
2697 /* FALLTHROUGH */
2698 case SIGEV_SIGNAL:
2699 CP(*sig32, *sig, sigev_signo);
2700 break;
2701 case SIGEV_KEVENT:
2702 CP(*sig32, *sig, sigev_notify_kqueue);
2703 CP(*sig32, *sig, sigev_notify_kevent_flags);
2704 PTRIN_CP(*sig32, *sig, sigev_value.sival_ptr);
2705 break;
2706 default:
2707 return (EINVAL);
2708 }
2709 return (0);
2710 }
2711
2712 static int
2713 aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
2714 {
2715 struct aiocb32 job32;
2716 int error;
2717
2718 error = copyin(ujob, &job32, sizeof(job32));
2719 if (error)
2720 return (error);
2721 CP(job32, *kjob, aio_fildes);
2722 CP(job32, *kjob, aio_offset);
2723 PTRIN_CP(job32, *kjob, aio_buf);
2724 CP(job32, *kjob, aio_nbytes);
2725 CP(job32, *kjob, aio_lio_opcode);
2726 CP(job32, *kjob, aio_reqprio);
2727 CP(job32, *kjob, _aiocb_private.status);
2728 CP(job32, *kjob, _aiocb_private.error);
2729 PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
2730 return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
2731 }
2732
2733 static long
2734 aiocb32_fetch_status(struct aiocb *ujob)
2735 {
2736 struct aiocb32 *ujob32;
2737
2738 ujob32 = (struct aiocb32 *)ujob;
2739 return (fuword32(&ujob32->_aiocb_private.status));
2740 }
2741
2742 static long
2743 aiocb32_fetch_error(struct aiocb *ujob)
2744 {
2745 struct aiocb32 *ujob32;
2746
2747 ujob32 = (struct aiocb32 *)ujob;
2748 return (fuword32(&ujob32->_aiocb_private.error));
2749 }
2750
2751 static int
2752 aiocb32_store_status(struct aiocb *ujob, long status)
2753 {
2754 struct aiocb32 *ujob32;
2755
2756 ujob32 = (struct aiocb32 *)ujob;
2757 return (suword32(&ujob32->_aiocb_private.status, status));
2758 }
2759
2760 static int
2761 aiocb32_store_error(struct aiocb *ujob, long error)
2762 {
2763 struct aiocb32 *ujob32;
2764
2765 ujob32 = (struct aiocb32 *)ujob;
2766 return (suword32(&ujob32->_aiocb_private.error, error));
2767 }
2768
2769 static int
2770 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
2771 {
2772 struct aiocb32 *ujob32;
2773
2774 ujob32 = (struct aiocb32 *)ujob;
2775 return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
2776 }
2777
2778 static int
2779 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
2780 {
2781
2782 return (suword32(ujobp, (long)ujob));
2783 }
2784
2785 static struct aiocb_ops aiocb32_ops = {
2786 .copyin = aiocb32_copyin,
2787 .fetch_status = aiocb32_fetch_status,
2788 .fetch_error = aiocb32_fetch_error,
2789 .store_status = aiocb32_store_status,
2790 .store_error = aiocb32_store_error,
2791 .store_kernelinfo = aiocb32_store_kernelinfo,
2792 .store_aiocb = aiocb32_store_aiocb,
2793 };
2794
2795 static struct aiocb_ops aiocb32_ops_osigevent = {
2796 .copyin = aiocb32_copyin_old_sigevent,
2797 .fetch_status = aiocb32_fetch_status,
2798 .fetch_error = aiocb32_fetch_error,
2799 .store_status = aiocb32_store_status,
2800 .store_error = aiocb32_store_error,
2801 .store_kernelinfo = aiocb32_store_kernelinfo,
2802 .store_aiocb = aiocb32_store_aiocb,
2803 };
2804
2805 int
2806 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
2807 {
2808
2809 return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2810 }
2811
2812 int
2813 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
2814 {
2815 struct timespec32 ts32;
2816 struct timespec ts, *tsp;
2817 struct aiocb **ujoblist;
2818 uint32_t *ujoblist32;
2819 int error, i;
2820
2821 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
2822 return (EINVAL);
2823
2824 if (uap->timeout) {
2825 /* Get timespec struct. */
2826 if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
2827 return (error);
2828 CP(ts32, ts, tv_sec);
2829 CP(ts32, ts, tv_nsec);
2830 tsp = &ts;
2831 } else
2832 tsp = NULL;
2833
2834 ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
2835 ujoblist32 = (uint32_t *)ujoblist;
2836 error = copyin(uap->aiocbp, ujoblist32, uap->nent *
2837 sizeof(ujoblist32[0]));
2838 if (error == 0) {
2839 for (i = uap->nent; i > 0; i--)
2840 ujoblist[i] = PTRIN(ujoblist32[i]);
2841
2842 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2843 }
2844 uma_zfree(aiol_zone, ujoblist);
2845 return (error);
2846 }
2847
2848 int
2849 freebsd32_aio_cancel(struct thread *td, struct freebsd32_aio_cancel_args *uap)
2850 {
2851
2852 return (sys_aio_cancel(td, (struct aio_cancel_args *)uap));
2853 }
2854
2855 int
2856 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
2857 {
2858
2859 return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2860 }
2861
2862 int
2863 freebsd32_oaio_read(struct thread *td, struct freebsd32_oaio_read_args *uap)
2864 {
2865
2866 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2867 &aiocb32_ops_osigevent));
2868 }
2869
2870 int
2871 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
2872 {
2873
2874 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2875 &aiocb32_ops));
2876 }
2877
2878 int
2879 freebsd32_oaio_write(struct thread *td, struct freebsd32_oaio_write_args *uap)
2880 {
2881
2882 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2883 &aiocb32_ops_osigevent));
2884 }
2885
2886 int
2887 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
2888 {
2889
2890 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2891 &aiocb32_ops));
2892 }
2893
2894 int
2895 freebsd32_aio_waitcomplete(struct thread *td,
2896 struct freebsd32_aio_waitcomplete_args *uap)
2897 {
2898 struct timespec32 ts32;
2899 struct timespec ts, *tsp;
2900 int error;
2901
2902 if (uap->timeout) {
2903 /* Get timespec struct. */
2904 error = copyin(uap->timeout, &ts32, sizeof(ts32));
2905 if (error)
2906 return (error);
2907 CP(ts32, ts, tv_sec);
2908 CP(ts32, ts, tv_nsec);
2909 tsp = &ts;
2910 } else
2911 tsp = NULL;
2912
2913 return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
2914 &aiocb32_ops));
2915 }
2916
2917 int
2918 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
2919 {
2920
2921 return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
2922 &aiocb32_ops));
2923 }
2924
2925 int
2926 freebsd32_olio_listio(struct thread *td, struct freebsd32_olio_listio_args *uap)
2927 {
2928 struct aiocb **acb_list;
2929 struct sigevent *sigp, sig;
2930 struct osigevent32 osig;
2931 uint32_t *acb_list32;
2932 int error, i, nent;
2933
2934 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2935 return (EINVAL);
2936
2937 nent = uap->nent;
2938 if (nent < 0 || nent > AIO_LISTIO_MAX)
2939 return (EINVAL);
2940
2941 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2942 error = copyin(uap->sig, &osig, sizeof(osig));
2943 if (error)
2944 return (error);
2945 error = convert_old_sigevent32(&osig, &sig);
2946 if (error)
2947 return (error);
2948 sigp = &sig;
2949 } else
2950 sigp = NULL;
2951
2952 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
2953 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
2954 if (error) {
2955 free(acb_list32, M_LIO);
2956 return (error);
2957 }
2958 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2959 for (i = 0; i < nent; i++)
2960 acb_list[i] = PTRIN(acb_list32[i]);
2961 free(acb_list32, M_LIO);
2962
2963 error = kern_lio_listio(td, uap->mode,
2964 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2965 &aiocb32_ops_osigevent);
2966 free(acb_list, M_LIO);
2967 return (error);
2968 }
2969
2970 int
2971 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
2972 {
2973 struct aiocb **acb_list;
2974 struct sigevent *sigp, sig;
2975 struct sigevent32 sig32;
2976 uint32_t *acb_list32;
2977 int error, i, nent;
2978
2979 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2980 return (EINVAL);
2981
2982 nent = uap->nent;
2983 if (nent < 0 || nent > AIO_LISTIO_MAX)
2984 return (EINVAL);
2985
2986 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2987 error = copyin(uap->sig, &sig32, sizeof(sig32));
2988 if (error)
2989 return (error);
2990 error = convert_sigevent32(&sig32, &sig);
2991 if (error)
2992 return (error);
2993 sigp = &sig;
2994 } else
2995 sigp = NULL;
2996
2997 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
2998 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
2999 if (error) {
3000 free(acb_list32, M_LIO);
3001 return (error);
3002 }
3003 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
3004 for (i = 0; i < nent; i++)
3005 acb_list[i] = PTRIN(acb_list32[i]);
3006 free(acb_list32, M_LIO);
3007
3008 error = kern_lio_listio(td, uap->mode,
3009 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
3010 &aiocb32_ops);
3011 free(acb_list, M_LIO);
3012 return (error);
3013 }
3014
3015 #endif
Cache object: 18abc9edb79cdfe5ac552efe6ae58975
|