FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_aio.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997 John S. Dyson. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. John S. Dyson's name may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
15 * bad that happens because of using this software isn't the responsibility
16 * of the author. This software is distributed AS-IS.
17 */
18
19 /*
20 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
21 */
22
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
29 #include <sys/bio.h>
30 #include <sys/buf.h>
31 #include <sys/capsicum.h>
32 #include <sys/eventhandler.h>
33 #include <sys/sysproto.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/kthread.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/unistd.h>
44 #include <sys/posix4.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/protosw.h>
50 #include <sys/rwlock.h>
51 #include <sys/sema.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/syscall.h>
55 #include <sys/sysctl.h>
56 #include <sys/syslog.h>
57 #include <sys/sx.h>
58 #include <sys/taskqueue.h>
59 #include <sys/vnode.h>
60 #include <sys/conf.h>
61 #include <sys/event.h>
62 #include <sys/mount.h>
63 #include <geom/geom.h>
64
65 #include <machine/atomic.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_extern.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/uma.h>
74 #include <sys/aio.h>
75
76 /*
77 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
78 * overflow. (XXX will be removed soon.)
79 */
80 static u_long jobrefid;
81
82 /*
83 * Counter for aio_fsync.
84 */
85 static uint64_t jobseqno;
86
87 #ifndef MAX_AIO_PER_PROC
88 #define MAX_AIO_PER_PROC 32
89 #endif
90
91 #ifndef MAX_AIO_QUEUE_PER_PROC
92 #define MAX_AIO_QUEUE_PER_PROC 256
93 #endif
94
95 #ifndef MAX_AIO_QUEUE
96 #define MAX_AIO_QUEUE 1024 /* Bigger than MAX_AIO_QUEUE_PER_PROC */
97 #endif
98
99 #ifndef MAX_BUF_AIO
100 #define MAX_BUF_AIO 16
101 #endif
102
103 FEATURE(aio, "Asynchronous I/O");
104 SYSCTL_DECL(_p1003_1b);
105
106 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
107 static MALLOC_DEFINE(M_AIO, "aio", "structures for asynchronous I/O");
108
109 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
110 "Async IO management");
111
112 static int enable_aio_unsafe = 0;
113 SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
114 "Permit asynchronous IO on all file types, not just known-safe types");
115
116 static unsigned int unsafe_warningcnt = 1;
117 SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW,
118 &unsafe_warningcnt, 0,
119 "Warnings that will be triggered upon failed IO requests on unsafe files");
120
121 static int max_aio_procs = MAX_AIO_PROCS;
122 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
123 "Maximum number of kernel processes to use for handling async IO ");
124
125 static int num_aio_procs = 0;
126 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
127 "Number of presently active kernel processes for async IO");
128
129 /*
130 * The code will adjust the actual number of AIO processes towards this
131 * number when it gets a chance.
132 */
133 static int target_aio_procs = TARGET_AIO_PROCS;
134 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
135 0,
136 "Preferred number of ready kernel processes for async IO");
137
138 static int max_queue_count = MAX_AIO_QUEUE;
139 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
140 "Maximum number of aio requests to queue, globally");
141
142 static int num_queue_count = 0;
143 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
144 "Number of queued aio requests");
145
146 static int num_buf_aio = 0;
147 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
148 "Number of aio requests presently handled by the buf subsystem");
149
150 static int num_unmapped_aio = 0;
151 SYSCTL_INT(_vfs_aio, OID_AUTO, num_unmapped_aio, CTLFLAG_RD, &num_unmapped_aio,
152 0,
153 "Number of aio requests presently handled by unmapped I/O buffers");
154
155 /* Number of async I/O processes in the process of being started */
156 /* XXX This should be local to aio_aqueue() */
157 static int num_aio_resv_start = 0;
158
159 static int aiod_lifetime;
160 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
161 "Maximum lifetime for idle aiod");
162
163 static int max_aio_per_proc = MAX_AIO_PER_PROC;
164 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
165 0,
166 "Maximum active aio requests per process");
167
168 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
169 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
170 &max_aio_queue_per_proc, 0,
171 "Maximum queued aio requests per process");
172
173 static int max_buf_aio = MAX_BUF_AIO;
174 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
175 "Maximum buf aio requests per process");
176
177 /*
178 * Though redundant with vfs.aio.max_aio_queue_per_proc, POSIX requires
179 * sysconf(3) to support AIO_LISTIO_MAX, and we implement that with
180 * vfs.aio.aio_listio_max.
181 */
182 SYSCTL_INT(_p1003_1b, CTL_P1003_1B_AIO_LISTIO_MAX, aio_listio_max,
183 CTLFLAG_RD | CTLFLAG_CAPRD, &max_aio_queue_per_proc,
184 0, "Maximum aio requests for a single lio_listio call");
185
186 #ifdef COMPAT_FREEBSD6
187 typedef struct oaiocb {
188 int aio_fildes; /* File descriptor */
189 off_t aio_offset; /* File offset for I/O */
190 volatile void *aio_buf; /* I/O buffer in process space */
191 size_t aio_nbytes; /* Number of bytes for I/O */
192 struct osigevent aio_sigevent; /* Signal to deliver */
193 int aio_lio_opcode; /* LIO opcode */
194 int aio_reqprio; /* Request priority -- ignored */
195 struct __aiocb_private _aiocb_private;
196 } oaiocb_t;
197 #endif
198
199 /*
200 * Below is a key of locks used to protect each member of struct kaiocb
201 * aioliojob and kaioinfo and any backends.
202 *
203 * * - need not protected
204 * a - locked by kaioinfo lock
205 * b - locked by backend lock, the backend lock can be null in some cases,
206 * for example, BIO belongs to this type, in this case, proc lock is
207 * reused.
208 * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
209 */
210
211 /*
212 * If the routine that services an AIO request blocks while running in an
213 * AIO kernel process it can starve other I/O requests. BIO requests
214 * queued via aio_qbio() complete asynchronously and do not use AIO kernel
215 * processes at all. Socket I/O requests use a separate pool of
216 * kprocs and also force non-blocking I/O. Other file I/O requests
217 * use the generic fo_read/fo_write operations which can block. The
218 * fsync and mlock operations can also block while executing. Ideally
219 * none of these requests would block while executing.
220 *
221 * Note that the service routines cannot toggle O_NONBLOCK in the file
222 * structure directly while handling a request due to races with
223 * userland threads.
224 */
225
226 /* jobflags */
227 #define KAIOCB_QUEUEING 0x01
228 #define KAIOCB_CANCELLED 0x02
229 #define KAIOCB_CANCELLING 0x04
230 #define KAIOCB_CHECKSYNC 0x08
231 #define KAIOCB_CLEARED 0x10
232 #define KAIOCB_FINISHED 0x20
233
234 /*
235 * AIO process info
236 */
237 #define AIOP_FREE 0x1 /* proc on free queue */
238
239 struct aioproc {
240 int aioprocflags; /* (c) AIO proc flags */
241 TAILQ_ENTRY(aioproc) list; /* (c) list of processes */
242 struct proc *aioproc; /* (*) the AIO proc */
243 };
244
245 /*
246 * data-structure for lio signal management
247 */
248 struct aioliojob {
249 int lioj_flags; /* (a) listio flags */
250 int lioj_count; /* (a) count of jobs */
251 int lioj_finished_count; /* (a) count of finished jobs */
252 struct sigevent lioj_signal; /* (a) signal on all I/O done */
253 TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */
254 struct knlist klist; /* (a) list of knotes */
255 ksiginfo_t lioj_ksi; /* (a) Realtime signal info */
256 };
257
258 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
259 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
260 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */
261
262 /*
263 * per process aio data structure
264 */
265 struct kaioinfo {
266 struct mtx kaio_mtx; /* the lock to protect this struct */
267 int kaio_flags; /* (a) per process kaio flags */
268 int kaio_active_count; /* (c) number of currently used AIOs */
269 int kaio_count; /* (a) size of AIO queue */
270 int kaio_buffer_count; /* (a) number of bio buffers */
271 TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */
272 TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */
273 TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
274 TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */
275 TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */
276 TAILQ_HEAD(,kaiocb) kaio_syncready; /* (a) second q for aio_fsync */
277 struct task kaio_task; /* (*) task to kick aio processes */
278 struct task kaio_sync_task; /* (*) task to schedule fsync jobs */
279 };
280
281 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx)
282 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx)
283 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f))
284 #define AIO_MTX(ki) (&(ki)->kaio_mtx)
285
286 #define KAIO_RUNDOWN 0x1 /* process is being run down */
287 #define KAIO_WAKEUP 0x2 /* wakeup process when AIO completes */
288
289 /*
290 * Operations used to interact with userland aio control blocks.
291 * Different ABIs provide their own operations.
292 */
293 struct aiocb_ops {
294 int (*aio_copyin)(struct aiocb *ujob, struct kaiocb *kjob, int ty);
295 long (*fetch_status)(struct aiocb *ujob);
296 long (*fetch_error)(struct aiocb *ujob);
297 int (*store_status)(struct aiocb *ujob, long status);
298 int (*store_error)(struct aiocb *ujob, long error);
299 int (*store_kernelinfo)(struct aiocb *ujob, long jobref);
300 int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
301 };
302
303 static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */
304 static struct sema aio_newproc_sem;
305 static struct mtx aio_job_mtx;
306 static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */
307 static struct unrhdr *aiod_unr;
308
309 static void aio_biocleanup(struct bio *bp);
310 void aio_init_aioinfo(struct proc *p);
311 static int aio_onceonly(void);
312 static int aio_free_entry(struct kaiocb *job);
313 static void aio_process_rw(struct kaiocb *job);
314 static void aio_process_sync(struct kaiocb *job);
315 static void aio_process_mlock(struct kaiocb *job);
316 static void aio_schedule_fsync(void *context, int pending);
317 static int aio_newproc(int *);
318 int aio_aqueue(struct thread *td, struct aiocb *ujob,
319 struct aioliojob *lio, int type, struct aiocb_ops *ops);
320 static int aio_queue_file(struct file *fp, struct kaiocb *job);
321 static void aio_biowakeup(struct bio *bp);
322 static void aio_proc_rundown(void *arg, struct proc *p);
323 static void aio_proc_rundown_exec(void *arg, struct proc *p,
324 struct image_params *imgp);
325 static int aio_qbio(struct proc *p, struct kaiocb *job);
326 static void aio_daemon(void *param);
327 static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
328 static bool aio_clear_cancel_function_locked(struct kaiocb *job);
329 static int aio_kick(struct proc *userp);
330 static void aio_kick_nowait(struct proc *userp);
331 static void aio_kick_helper(void *context, int pending);
332 static int filt_aioattach(struct knote *kn);
333 static void filt_aiodetach(struct knote *kn);
334 static int filt_aio(struct knote *kn, long hint);
335 static int filt_lioattach(struct knote *kn);
336 static void filt_liodetach(struct knote *kn);
337 static int filt_lio(struct knote *kn, long hint);
338
339 /*
340 * Zones for:
341 * kaio Per process async io info
342 * aiocb async io jobs
343 * aiolio list io jobs
344 */
345 static uma_zone_t kaio_zone, aiocb_zone, aiolio_zone;
346
347 /* kqueue filters for aio */
348 static struct filterops aio_filtops = {
349 .f_isfd = 0,
350 .f_attach = filt_aioattach,
351 .f_detach = filt_aiodetach,
352 .f_event = filt_aio,
353 };
354 static struct filterops lio_filtops = {
355 .f_isfd = 0,
356 .f_attach = filt_lioattach,
357 .f_detach = filt_liodetach,
358 .f_event = filt_lio
359 };
360
361 static eventhandler_tag exit_tag, exec_tag;
362
363 TASKQUEUE_DEFINE_THREAD(aiod_kick);
364
365 /*
366 * Main operations function for use as a kernel module.
367 */
368 static int
369 aio_modload(struct module *module, int cmd, void *arg)
370 {
371 int error = 0;
372
373 switch (cmd) {
374 case MOD_LOAD:
375 aio_onceonly();
376 break;
377 case MOD_SHUTDOWN:
378 break;
379 default:
380 error = EOPNOTSUPP;
381 break;
382 }
383 return (error);
384 }
385
386 static moduledata_t aio_mod = {
387 "aio",
388 &aio_modload,
389 NULL
390 };
391
392 DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
393 MODULE_VERSION(aio, 1);
394
395 /*
396 * Startup initialization
397 */
398 static int
399 aio_onceonly(void)
400 {
401
402 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
403 EVENTHANDLER_PRI_ANY);
404 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
405 NULL, EVENTHANDLER_PRI_ANY);
406 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
407 kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
408 TAILQ_INIT(&aio_freeproc);
409 sema_init(&aio_newproc_sem, 0, "aio_new_proc");
410 mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
411 TAILQ_INIT(&aio_jobs);
412 aiod_unr = new_unrhdr(1, INT_MAX, NULL);
413 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
414 NULL, NULL, UMA_ALIGN_PTR, 0);
415 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
416 NULL, NULL, UMA_ALIGN_PTR, 0);
417 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
418 NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
419 aiod_lifetime = AIOD_LIFETIME_DEFAULT;
420 jobrefid = 1;
421 p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
422 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
423 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
424
425 return (0);
426 }
427
428 /*
429 * Init the per-process aioinfo structure. The aioinfo limits are set
430 * per-process for user limit (resource) management.
431 */
432 void
433 aio_init_aioinfo(struct proc *p)
434 {
435 struct kaioinfo *ki;
436
437 ki = uma_zalloc(kaio_zone, M_WAITOK);
438 mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
439 ki->kaio_flags = 0;
440 ki->kaio_active_count = 0;
441 ki->kaio_count = 0;
442 ki->kaio_buffer_count = 0;
443 TAILQ_INIT(&ki->kaio_all);
444 TAILQ_INIT(&ki->kaio_done);
445 TAILQ_INIT(&ki->kaio_jobqueue);
446 TAILQ_INIT(&ki->kaio_liojoblist);
447 TAILQ_INIT(&ki->kaio_syncqueue);
448 TAILQ_INIT(&ki->kaio_syncready);
449 TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
450 TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
451 PROC_LOCK(p);
452 if (p->p_aioinfo == NULL) {
453 p->p_aioinfo = ki;
454 PROC_UNLOCK(p);
455 } else {
456 PROC_UNLOCK(p);
457 mtx_destroy(&ki->kaio_mtx);
458 uma_zfree(kaio_zone, ki);
459 }
460
461 while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
462 aio_newproc(NULL);
463 }
464
465 static int
466 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi, bool ext)
467 {
468 struct thread *td;
469 int error;
470
471 error = sigev_findtd(p, sigev, &td);
472 if (error)
473 return (error);
474 if (!KSI_ONQ(ksi)) {
475 ksiginfo_set_sigev(ksi, sigev);
476 ksi->ksi_code = SI_ASYNCIO;
477 ksi->ksi_flags |= ext ? (KSI_EXT | KSI_INS) : 0;
478 tdsendsignal(p, td, ksi->ksi_signo, ksi);
479 }
480 PROC_UNLOCK(p);
481 return (error);
482 }
483
484 /*
485 * Free a job entry. Wait for completion if it is currently active, but don't
486 * delay forever. If we delay, we return a flag that says that we have to
487 * restart the queue scan.
488 */
489 static int
490 aio_free_entry(struct kaiocb *job)
491 {
492 struct kaioinfo *ki;
493 struct aioliojob *lj;
494 struct proc *p;
495
496 p = job->userproc;
497 MPASS(curproc == p);
498 ki = p->p_aioinfo;
499 MPASS(ki != NULL);
500
501 AIO_LOCK_ASSERT(ki, MA_OWNED);
502 MPASS(job->jobflags & KAIOCB_FINISHED);
503
504 atomic_subtract_int(&num_queue_count, 1);
505
506 ki->kaio_count--;
507 MPASS(ki->kaio_count >= 0);
508
509 TAILQ_REMOVE(&ki->kaio_done, job, plist);
510 TAILQ_REMOVE(&ki->kaio_all, job, allist);
511
512 lj = job->lio;
513 if (lj) {
514 lj->lioj_count--;
515 lj->lioj_finished_count--;
516
517 if (lj->lioj_count == 0) {
518 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
519 /* lio is going away, we need to destroy any knotes */
520 knlist_delete(&lj->klist, curthread, 1);
521 PROC_LOCK(p);
522 sigqueue_take(&lj->lioj_ksi);
523 PROC_UNLOCK(p);
524 uma_zfree(aiolio_zone, lj);
525 }
526 }
527
528 /* job is going away, we need to destroy any knotes */
529 knlist_delete(&job->klist, curthread, 1);
530 PROC_LOCK(p);
531 sigqueue_take(&job->ksi);
532 PROC_UNLOCK(p);
533
534 AIO_UNLOCK(ki);
535
536 /*
537 * The thread argument here is used to find the owning process
538 * and is also passed to fo_close() which may pass it to various
539 * places such as devsw close() routines. Because of that, we
540 * need a thread pointer from the process owning the job that is
541 * persistent and won't disappear out from under us or move to
542 * another process.
543 *
544 * Currently, all the callers of this function call it to remove
545 * a kaiocb from the current process' job list either via a
546 * syscall or due to the current process calling exit() or
547 * execve(). Thus, we know that p == curproc. We also know that
548 * curthread can't exit since we are curthread.
549 *
550 * Therefore, we use curthread as the thread to pass to
551 * knlist_delete(). This does mean that it is possible for the
552 * thread pointer at close time to differ from the thread pointer
553 * at open time, but this is already true of file descriptors in
554 * a multithreaded process.
555 */
556 if (job->fd_file)
557 fdrop(job->fd_file, curthread);
558 crfree(job->cred);
559 if (job->uiop != &job->uio)
560 free(job->uiop, M_IOV);
561 uma_zfree(aiocb_zone, job);
562 AIO_LOCK(ki);
563
564 return (0);
565 }
566
567 static void
568 aio_proc_rundown_exec(void *arg, struct proc *p,
569 struct image_params *imgp __unused)
570 {
571 aio_proc_rundown(arg, p);
572 }
573
574 static int
575 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
576 {
577 aio_cancel_fn_t *func;
578 int cancelled;
579
580 AIO_LOCK_ASSERT(ki, MA_OWNED);
581 if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
582 return (0);
583 MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
584 job->jobflags |= KAIOCB_CANCELLED;
585
586 func = job->cancel_fn;
587
588 /*
589 * If there is no cancel routine, just leave the job marked as
590 * cancelled. The job should be in active use by a caller who
591 * should complete it normally or when it fails to install a
592 * cancel routine.
593 */
594 if (func == NULL)
595 return (0);
596
597 /*
598 * Set the CANCELLING flag so that aio_complete() will defer
599 * completions of this job. This prevents the job from being
600 * freed out from under the cancel callback. After the
601 * callback any deferred completion (whether from the callback
602 * or any other source) will be completed.
603 */
604 job->jobflags |= KAIOCB_CANCELLING;
605 AIO_UNLOCK(ki);
606 func(job);
607 AIO_LOCK(ki);
608 job->jobflags &= ~KAIOCB_CANCELLING;
609 if (job->jobflags & KAIOCB_FINISHED) {
610 cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
611 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
612 aio_bio_done_notify(p, job);
613 } else {
614 /*
615 * The cancel callback might have scheduled an
616 * operation to cancel this request, but it is
617 * only counted as cancelled if the request is
618 * cancelled when the callback returns.
619 */
620 cancelled = 0;
621 }
622 return (cancelled);
623 }
624
625 /*
626 * Rundown the jobs for a given process.
627 */
628 static void
629 aio_proc_rundown(void *arg, struct proc *p)
630 {
631 struct kaioinfo *ki;
632 struct aioliojob *lj;
633 struct kaiocb *job, *jobn;
634
635 KASSERT(curthread->td_proc == p,
636 ("%s: called on non-curproc", __func__));
637 ki = p->p_aioinfo;
638 if (ki == NULL)
639 return;
640
641 AIO_LOCK(ki);
642 ki->kaio_flags |= KAIO_RUNDOWN;
643
644 restart:
645
646 /*
647 * Try to cancel all pending requests. This code simulates
648 * aio_cancel on all pending I/O requests.
649 */
650 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
651 aio_cancel_job(p, ki, job);
652 }
653
654 /* Wait for all running I/O to be finished */
655 if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
656 ki->kaio_flags |= KAIO_WAKEUP;
657 msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
658 goto restart;
659 }
660
661 /* Free all completed I/O requests. */
662 while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
663 aio_free_entry(job);
664
665 while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
666 if (lj->lioj_count == 0) {
667 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
668 knlist_delete(&lj->klist, curthread, 1);
669 PROC_LOCK(p);
670 sigqueue_take(&lj->lioj_ksi);
671 PROC_UNLOCK(p);
672 uma_zfree(aiolio_zone, lj);
673 } else {
674 panic("LIO job not cleaned up: C:%d, FC:%d\n",
675 lj->lioj_count, lj->lioj_finished_count);
676 }
677 }
678 AIO_UNLOCK(ki);
679 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
680 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
681 mtx_destroy(&ki->kaio_mtx);
682 uma_zfree(kaio_zone, ki);
683 p->p_aioinfo = NULL;
684 }
685
686 /*
687 * Select a job to run (called by an AIO daemon).
688 */
689 static struct kaiocb *
690 aio_selectjob(struct aioproc *aiop)
691 {
692 struct kaiocb *job;
693 struct kaioinfo *ki;
694 struct proc *userp;
695
696 mtx_assert(&aio_job_mtx, MA_OWNED);
697 restart:
698 TAILQ_FOREACH(job, &aio_jobs, list) {
699 userp = job->userproc;
700 ki = userp->p_aioinfo;
701
702 if (ki->kaio_active_count < max_aio_per_proc) {
703 TAILQ_REMOVE(&aio_jobs, job, list);
704 if (!aio_clear_cancel_function(job))
705 goto restart;
706
707 /* Account for currently active jobs. */
708 ki->kaio_active_count++;
709 break;
710 }
711 }
712 return (job);
713 }
714
715 /*
716 * Move all data to a permanent storage device. This code
717 * simulates the fsync and fdatasync syscalls.
718 */
719 static int
720 aio_fsync_vnode(struct thread *td, struct vnode *vp, int op)
721 {
722 struct mount *mp;
723 vm_object_t obj;
724 int error;
725
726 for (;;) {
727 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
728 if (error != 0)
729 break;
730 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
731 obj = vp->v_object;
732 if (obj != NULL) {
733 VM_OBJECT_WLOCK(obj);
734 vm_object_page_clean(obj, 0, 0, 0);
735 VM_OBJECT_WUNLOCK(obj);
736 }
737 if (op == LIO_DSYNC)
738 error = VOP_FDATASYNC(vp, td);
739 else
740 error = VOP_FSYNC(vp, MNT_WAIT, td);
741
742 VOP_UNLOCK(vp);
743 vn_finished_write(mp);
744 if (error != ERELOOKUP)
745 break;
746 }
747 return (error);
748 }
749
750 /*
751 * The AIO processing activity for LIO_READ/LIO_WRITE. This is the code that
752 * does the I/O request for the non-bio version of the operations. The normal
753 * vn operations are used, and this code should work in all instances for every
754 * type of file, including pipes, sockets, fifos, and regular files.
755 *
756 * XXX I don't think it works well for socket, pipe, and fifo.
757 */
758 static void
759 aio_process_rw(struct kaiocb *job)
760 {
761 struct ucred *td_savedcred;
762 struct thread *td;
763 struct aiocb *cb;
764 struct file *fp;
765 ssize_t cnt;
766 long msgsnd_st, msgsnd_end;
767 long msgrcv_st, msgrcv_end;
768 long oublock_st, oublock_end;
769 long inblock_st, inblock_end;
770 int error, opcode;
771
772 KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
773 job->uaiocb.aio_lio_opcode == LIO_READV ||
774 job->uaiocb.aio_lio_opcode == LIO_WRITE ||
775 job->uaiocb.aio_lio_opcode == LIO_WRITEV,
776 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
777
778 aio_switch_vmspace(job);
779 td = curthread;
780 td_savedcred = td->td_ucred;
781 td->td_ucred = job->cred;
782 job->uiop->uio_td = td;
783 cb = &job->uaiocb;
784 fp = job->fd_file;
785
786 opcode = job->uaiocb.aio_lio_opcode;
787 cnt = job->uiop->uio_resid;
788
789 msgrcv_st = td->td_ru.ru_msgrcv;
790 msgsnd_st = td->td_ru.ru_msgsnd;
791 inblock_st = td->td_ru.ru_inblock;
792 oublock_st = td->td_ru.ru_oublock;
793
794 /*
795 * aio_aqueue() acquires a reference to the file that is
796 * released in aio_free_entry().
797 */
798 if (opcode == LIO_READ || opcode == LIO_READV) {
799 if (job->uiop->uio_resid == 0)
800 error = 0;
801 else
802 error = fo_read(fp, job->uiop, fp->f_cred, FOF_OFFSET,
803 td);
804 } else {
805 if (fp->f_type == DTYPE_VNODE)
806 bwillwrite();
807 error = fo_write(fp, job->uiop, fp->f_cred, FOF_OFFSET, td);
808 }
809 msgrcv_end = td->td_ru.ru_msgrcv;
810 msgsnd_end = td->td_ru.ru_msgsnd;
811 inblock_end = td->td_ru.ru_inblock;
812 oublock_end = td->td_ru.ru_oublock;
813
814 job->msgrcv = msgrcv_end - msgrcv_st;
815 job->msgsnd = msgsnd_end - msgsnd_st;
816 job->inblock = inblock_end - inblock_st;
817 job->outblock = oublock_end - oublock_st;
818
819 if (error != 0 && job->uiop->uio_resid != cnt) {
820 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
821 error = 0;
822 if (error == EPIPE && (opcode & LIO_WRITE)) {
823 PROC_LOCK(job->userproc);
824 kern_psignal(job->userproc, SIGPIPE);
825 PROC_UNLOCK(job->userproc);
826 }
827 }
828
829 cnt -= job->uiop->uio_resid;
830 td->td_ucred = td_savedcred;
831 if (error)
832 aio_complete(job, -1, error);
833 else
834 aio_complete(job, cnt, 0);
835 }
836
837 static void
838 aio_process_sync(struct kaiocb *job)
839 {
840 struct thread *td = curthread;
841 struct ucred *td_savedcred = td->td_ucred;
842 struct file *fp = job->fd_file;
843 int error = 0;
844
845 KASSERT(job->uaiocb.aio_lio_opcode & LIO_SYNC,
846 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
847
848 td->td_ucred = job->cred;
849 if (fp->f_vnode != NULL) {
850 error = aio_fsync_vnode(td, fp->f_vnode,
851 job->uaiocb.aio_lio_opcode);
852 }
853 td->td_ucred = td_savedcred;
854 if (error)
855 aio_complete(job, -1, error);
856 else
857 aio_complete(job, 0, 0);
858 }
859
860 static void
861 aio_process_mlock(struct kaiocb *job)
862 {
863 struct aiocb *cb = &job->uaiocb;
864 int error;
865
866 KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
867 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
868
869 aio_switch_vmspace(job);
870 error = kern_mlock(job->userproc, job->cred,
871 __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes);
872 aio_complete(job, error != 0 ? -1 : 0, error);
873 }
874
875 static void
876 aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
877 {
878 struct aioliojob *lj;
879 struct kaioinfo *ki;
880 struct kaiocb *sjob, *sjobn;
881 int lj_done;
882 bool schedule_fsync;
883
884 ki = userp->p_aioinfo;
885 AIO_LOCK_ASSERT(ki, MA_OWNED);
886 lj = job->lio;
887 lj_done = 0;
888 if (lj) {
889 lj->lioj_finished_count++;
890 if (lj->lioj_count == lj->lioj_finished_count)
891 lj_done = 1;
892 }
893 TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
894 MPASS(job->jobflags & KAIOCB_FINISHED);
895
896 if (ki->kaio_flags & KAIO_RUNDOWN)
897 goto notification_done;
898
899 if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
900 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
901 aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi, true);
902
903 KNOTE_LOCKED(&job->klist, 1);
904
905 if (lj_done) {
906 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
907 lj->lioj_flags |= LIOJ_KEVENT_POSTED;
908 KNOTE_LOCKED(&lj->klist, 1);
909 }
910 if ((lj->lioj_flags & (LIOJ_SIGNAL | LIOJ_SIGNAL_POSTED))
911 == LIOJ_SIGNAL &&
912 (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
913 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
914 aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi,
915 true);
916 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
917 }
918 }
919
920 notification_done:
921 if (job->jobflags & KAIOCB_CHECKSYNC) {
922 schedule_fsync = false;
923 TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
924 if (job->fd_file != sjob->fd_file ||
925 job->seqno >= sjob->seqno)
926 continue;
927 if (--sjob->pending > 0)
928 continue;
929 TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list);
930 if (!aio_clear_cancel_function_locked(sjob))
931 continue;
932 TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list);
933 schedule_fsync = true;
934 }
935 if (schedule_fsync)
936 taskqueue_enqueue(taskqueue_aiod_kick,
937 &ki->kaio_sync_task);
938 }
939 if (ki->kaio_flags & KAIO_WAKEUP) {
940 ki->kaio_flags &= ~KAIO_WAKEUP;
941 wakeup(&userp->p_aioinfo);
942 }
943 }
944
945 static void
946 aio_schedule_fsync(void *context, int pending)
947 {
948 struct kaioinfo *ki;
949 struct kaiocb *job;
950
951 ki = context;
952 AIO_LOCK(ki);
953 while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
954 job = TAILQ_FIRST(&ki->kaio_syncready);
955 TAILQ_REMOVE(&ki->kaio_syncready, job, list);
956 AIO_UNLOCK(ki);
957 aio_schedule(job, aio_process_sync);
958 AIO_LOCK(ki);
959 }
960 AIO_UNLOCK(ki);
961 }
962
963 bool
964 aio_cancel_cleared(struct kaiocb *job)
965 {
966
967 /*
968 * The caller should hold the same queue lock held when
969 * aio_clear_cancel_function() was called and set this flag
970 * ensuring this check sees an up-to-date value. However,
971 * there is no way to assert that.
972 */
973 return ((job->jobflags & KAIOCB_CLEARED) != 0);
974 }
975
976 static bool
977 aio_clear_cancel_function_locked(struct kaiocb *job)
978 {
979
980 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
981 MPASS(job->cancel_fn != NULL);
982 if (job->jobflags & KAIOCB_CANCELLING) {
983 job->jobflags |= KAIOCB_CLEARED;
984 return (false);
985 }
986 job->cancel_fn = NULL;
987 return (true);
988 }
989
990 bool
991 aio_clear_cancel_function(struct kaiocb *job)
992 {
993 struct kaioinfo *ki;
994 bool ret;
995
996 ki = job->userproc->p_aioinfo;
997 AIO_LOCK(ki);
998 ret = aio_clear_cancel_function_locked(job);
999 AIO_UNLOCK(ki);
1000 return (ret);
1001 }
1002
1003 static bool
1004 aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
1005 {
1006
1007 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
1008 if (job->jobflags & KAIOCB_CANCELLED)
1009 return (false);
1010 job->cancel_fn = func;
1011 return (true);
1012 }
1013
1014 bool
1015 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1016 {
1017 struct kaioinfo *ki;
1018 bool ret;
1019
1020 ki = job->userproc->p_aioinfo;
1021 AIO_LOCK(ki);
1022 ret = aio_set_cancel_function_locked(job, func);
1023 AIO_UNLOCK(ki);
1024 return (ret);
1025 }
1026
1027 void
1028 aio_complete(struct kaiocb *job, long status, int error)
1029 {
1030 struct kaioinfo *ki;
1031 struct proc *userp;
1032
1033 job->uaiocb._aiocb_private.error = error;
1034 job->uaiocb._aiocb_private.status = status;
1035
1036 userp = job->userproc;
1037 ki = userp->p_aioinfo;
1038
1039 AIO_LOCK(ki);
1040 KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1041 ("duplicate aio_complete"));
1042 job->jobflags |= KAIOCB_FINISHED;
1043 if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1044 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1045 aio_bio_done_notify(userp, job);
1046 }
1047 AIO_UNLOCK(ki);
1048 }
1049
1050 void
1051 aio_cancel(struct kaiocb *job)
1052 {
1053
1054 aio_complete(job, -1, ECANCELED);
1055 }
1056
1057 void
1058 aio_switch_vmspace(struct kaiocb *job)
1059 {
1060
1061 vmspace_switch_aio(job->userproc->p_vmspace);
1062 }
1063
1064 /*
1065 * The AIO daemon, most of the actual work is done in aio_process_*,
1066 * but the setup (and address space mgmt) is done in this routine.
1067 */
1068 static void
1069 aio_daemon(void *_id)
1070 {
1071 struct kaiocb *job;
1072 struct aioproc *aiop;
1073 struct kaioinfo *ki;
1074 struct proc *p;
1075 struct vmspace *myvm;
1076 struct thread *td = curthread;
1077 int id = (intptr_t)_id;
1078
1079 /*
1080 * Grab an extra reference on the daemon's vmspace so that it
1081 * doesn't get freed by jobs that switch to a different
1082 * vmspace.
1083 */
1084 p = td->td_proc;
1085 myvm = vmspace_acquire_ref(p);
1086
1087 KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1088
1089 /*
1090 * Allocate and ready the aio control info. There is one aiop structure
1091 * per daemon.
1092 */
1093 aiop = malloc(sizeof(*aiop), M_AIO, M_WAITOK);
1094 aiop->aioproc = p;
1095 aiop->aioprocflags = 0;
1096
1097 /*
1098 * Wakeup parent process. (Parent sleeps to keep from blasting away
1099 * and creating too many daemons.)
1100 */
1101 sema_post(&aio_newproc_sem);
1102
1103 mtx_lock(&aio_job_mtx);
1104 for (;;) {
1105 /*
1106 * Take daemon off of free queue
1107 */
1108 if (aiop->aioprocflags & AIOP_FREE) {
1109 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1110 aiop->aioprocflags &= ~AIOP_FREE;
1111 }
1112
1113 /*
1114 * Check for jobs.
1115 */
1116 while ((job = aio_selectjob(aiop)) != NULL) {
1117 mtx_unlock(&aio_job_mtx);
1118
1119 ki = job->userproc->p_aioinfo;
1120 job->handle_fn(job);
1121
1122 mtx_lock(&aio_job_mtx);
1123 /* Decrement the active job count. */
1124 ki->kaio_active_count--;
1125 }
1126
1127 /*
1128 * Disconnect from user address space.
1129 */
1130 if (p->p_vmspace != myvm) {
1131 mtx_unlock(&aio_job_mtx);
1132 vmspace_switch_aio(myvm);
1133 mtx_lock(&aio_job_mtx);
1134 /*
1135 * We have to restart to avoid race, we only sleep if
1136 * no job can be selected.
1137 */
1138 continue;
1139 }
1140
1141 mtx_assert(&aio_job_mtx, MA_OWNED);
1142
1143 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
1144 aiop->aioprocflags |= AIOP_FREE;
1145
1146 /*
1147 * If daemon is inactive for a long time, allow it to exit,
1148 * thereby freeing resources.
1149 */
1150 if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
1151 aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
1152 (aiop->aioprocflags & AIOP_FREE) &&
1153 num_aio_procs > target_aio_procs)
1154 break;
1155 }
1156 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1157 num_aio_procs--;
1158 mtx_unlock(&aio_job_mtx);
1159 free(aiop, M_AIO);
1160 free_unr(aiod_unr, id);
1161 vmspace_free(myvm);
1162
1163 KASSERT(p->p_vmspace == myvm,
1164 ("AIOD: bad vmspace for exiting daemon"));
1165 KASSERT(refcount_load(&myvm->vm_refcnt) > 1,
1166 ("AIOD: bad vm refcnt for exiting daemon: %d",
1167 refcount_load(&myvm->vm_refcnt)));
1168 kproc_exit(0);
1169 }
1170
1171 /*
1172 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1173 * AIO daemon modifies its environment itself.
1174 */
1175 static int
1176 aio_newproc(int *start)
1177 {
1178 int error;
1179 struct proc *p;
1180 int id;
1181
1182 id = alloc_unr(aiod_unr);
1183 error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
1184 RFNOWAIT, 0, "aiod%d", id);
1185 if (error == 0) {
1186 /*
1187 * Wait until daemon is started.
1188 */
1189 sema_wait(&aio_newproc_sem);
1190 mtx_lock(&aio_job_mtx);
1191 num_aio_procs++;
1192 if (start != NULL)
1193 (*start)--;
1194 mtx_unlock(&aio_job_mtx);
1195 } else {
1196 free_unr(aiod_unr, id);
1197 }
1198 return (error);
1199 }
1200
1201 /*
1202 * Try the high-performance, low-overhead bio method for eligible
1203 * VCHR devices. This method doesn't use an aio helper thread, and
1204 * thus has very low overhead.
1205 *
1206 * Assumes that the caller, aio_aqueue(), has incremented the file
1207 * structure's reference count, preventing its deallocation for the
1208 * duration of this call.
1209 */
1210 static int
1211 aio_qbio(struct proc *p, struct kaiocb *job)
1212 {
1213 struct aiocb *cb;
1214 struct file *fp;
1215 struct buf *pbuf;
1216 struct vnode *vp;
1217 struct cdevsw *csw;
1218 struct cdev *dev;
1219 struct kaioinfo *ki;
1220 struct bio **bios = NULL;
1221 off_t offset;
1222 int bio_cmd, error, i, iovcnt, opcode, poff, ref;
1223 vm_prot_t prot;
1224 bool use_unmapped;
1225
1226 cb = &job->uaiocb;
1227 fp = job->fd_file;
1228 opcode = cb->aio_lio_opcode;
1229
1230 if (!(opcode == LIO_WRITE || opcode == LIO_WRITEV ||
1231 opcode == LIO_READ || opcode == LIO_READV))
1232 return (-1);
1233 if (fp == NULL || fp->f_type != DTYPE_VNODE)
1234 return (-1);
1235
1236 vp = fp->f_vnode;
1237 if (vp->v_type != VCHR)
1238 return (-1);
1239 if (vp->v_bufobj.bo_bsize == 0)
1240 return (-1);
1241
1242 bio_cmd = (opcode & LIO_WRITE) ? BIO_WRITE : BIO_READ;
1243 iovcnt = job->uiop->uio_iovcnt;
1244 if (iovcnt > max_buf_aio)
1245 return (-1);
1246 for (i = 0; i < iovcnt; i++) {
1247 if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0)
1248 return (-1);
1249 if (job->uiop->uio_iov[i].iov_len > maxphys) {
1250 error = -1;
1251 return (-1);
1252 }
1253 }
1254 offset = cb->aio_offset;
1255
1256 ref = 0;
1257 csw = devvn_refthread(vp, &dev, &ref);
1258 if (csw == NULL)
1259 return (ENXIO);
1260
1261 if ((csw->d_flags & D_DISK) == 0) {
1262 error = -1;
1263 goto unref;
1264 }
1265 if (job->uiop->uio_resid > dev->si_iosize_max) {
1266 error = -1;
1267 goto unref;
1268 }
1269
1270 ki = p->p_aioinfo;
1271 job->error = 0;
1272
1273 use_unmapped = (dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed;
1274 if (!use_unmapped) {
1275 AIO_LOCK(ki);
1276 if (ki->kaio_buffer_count + iovcnt > max_buf_aio) {
1277 AIO_UNLOCK(ki);
1278 error = EAGAIN;
1279 goto unref;
1280 }
1281 ki->kaio_buffer_count += iovcnt;
1282 AIO_UNLOCK(ki);
1283 }
1284
1285 bios = malloc(sizeof(struct bio *) * iovcnt, M_TEMP, M_WAITOK);
1286 atomic_store_int(&job->nbio, iovcnt);
1287 for (i = 0; i < iovcnt; i++) {
1288 struct vm_page** pages;
1289 struct bio *bp;
1290 void *buf;
1291 size_t nbytes;
1292 int npages;
1293
1294 buf = job->uiop->uio_iov[i].iov_base;
1295 nbytes = job->uiop->uio_iov[i].iov_len;
1296
1297 bios[i] = g_alloc_bio();
1298 bp = bios[i];
1299
1300 poff = (vm_offset_t)buf & PAGE_MASK;
1301 if (use_unmapped) {
1302 pbuf = NULL;
1303 pages = malloc(sizeof(vm_page_t) * (atop(round_page(
1304 nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO);
1305 } else {
1306 pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
1307 BUF_KERNPROC(pbuf);
1308 pages = pbuf->b_pages;
1309 }
1310
1311 bp->bio_length = nbytes;
1312 bp->bio_bcount = nbytes;
1313 bp->bio_done = aio_biowakeup;
1314 bp->bio_offset = offset;
1315 bp->bio_cmd = bio_cmd;
1316 bp->bio_dev = dev;
1317 bp->bio_caller1 = job;
1318 bp->bio_caller2 = pbuf;
1319
1320 prot = VM_PROT_READ;
1321 if (opcode == LIO_READ || opcode == LIO_READV)
1322 prot |= VM_PROT_WRITE; /* Less backwards than it looks */
1323 npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1324 (vm_offset_t)buf, bp->bio_length, prot, pages,
1325 atop(maxphys) + 1);
1326 if (npages < 0) {
1327 if (pbuf != NULL)
1328 uma_zfree(pbuf_zone, pbuf);
1329 else
1330 free(pages, M_TEMP);
1331 error = EFAULT;
1332 g_destroy_bio(bp);
1333 i--;
1334 goto destroy_bios;
1335 }
1336 if (pbuf != NULL) {
1337 pmap_qenter((vm_offset_t)pbuf->b_data, pages, npages);
1338 bp->bio_data = pbuf->b_data + poff;
1339 pbuf->b_npages = npages;
1340 atomic_add_int(&num_buf_aio, 1);
1341 } else {
1342 bp->bio_ma = pages;
1343 bp->bio_ma_n = npages;
1344 bp->bio_ma_offset = poff;
1345 bp->bio_data = unmapped_buf;
1346 bp->bio_flags |= BIO_UNMAPPED;
1347 atomic_add_int(&num_unmapped_aio, 1);
1348 }
1349
1350 offset += nbytes;
1351 }
1352
1353 /* Perform transfer. */
1354 for (i = 0; i < iovcnt; i++)
1355 csw->d_strategy(bios[i]);
1356 free(bios, M_TEMP);
1357
1358 dev_relthread(dev, ref);
1359 return (0);
1360
1361 destroy_bios:
1362 for (; i >= 0; i--)
1363 aio_biocleanup(bios[i]);
1364 free(bios, M_TEMP);
1365 unref:
1366 dev_relthread(dev, ref);
1367 return (error);
1368 }
1369
1370 #ifdef COMPAT_FREEBSD6
1371 static int
1372 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
1373 {
1374
1375 /*
1376 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
1377 * supported by AIO with the old sigevent structure.
1378 */
1379 nsig->sigev_notify = osig->sigev_notify;
1380 switch (nsig->sigev_notify) {
1381 case SIGEV_NONE:
1382 break;
1383 case SIGEV_SIGNAL:
1384 nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
1385 break;
1386 case SIGEV_KEVENT:
1387 nsig->sigev_notify_kqueue =
1388 osig->__sigev_u.__sigev_notify_kqueue;
1389 nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
1390 break;
1391 default:
1392 return (EINVAL);
1393 }
1394 return (0);
1395 }
1396
1397 static int
1398 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob,
1399 int type __unused)
1400 {
1401 struct oaiocb *ojob;
1402 struct aiocb *kcb = &kjob->uaiocb;
1403 int error;
1404
1405 bzero(kcb, sizeof(struct aiocb));
1406 error = copyin(ujob, kcb, sizeof(struct oaiocb));
1407 if (error)
1408 return (error);
1409 /* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */
1410 ojob = (struct oaiocb *)kcb;
1411 return (convert_old_sigevent(&ojob->aio_sigevent, &kcb->aio_sigevent));
1412 }
1413 #endif
1414
1415 static int
1416 aiocb_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type)
1417 {
1418 struct aiocb *kcb = &kjob->uaiocb;
1419 int error;
1420
1421 error = copyin(ujob, kcb, sizeof(struct aiocb));
1422 if (error)
1423 return (error);
1424 if (type & LIO_VECTORED) {
1425 /* malloc a uio and copy in the iovec */
1426 error = copyinuio(__DEVOLATILE(struct iovec*, kcb->aio_iov),
1427 kcb->aio_iovcnt, &kjob->uiop);
1428 }
1429
1430 return (error);
1431 }
1432
1433 static long
1434 aiocb_fetch_status(struct aiocb *ujob)
1435 {
1436
1437 return (fuword(&ujob->_aiocb_private.status));
1438 }
1439
1440 static long
1441 aiocb_fetch_error(struct aiocb *ujob)
1442 {
1443
1444 return (fuword(&ujob->_aiocb_private.error));
1445 }
1446
1447 static int
1448 aiocb_store_status(struct aiocb *ujob, long status)
1449 {
1450
1451 return (suword(&ujob->_aiocb_private.status, status));
1452 }
1453
1454 static int
1455 aiocb_store_error(struct aiocb *ujob, long error)
1456 {
1457
1458 return (suword(&ujob->_aiocb_private.error, error));
1459 }
1460
1461 static int
1462 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
1463 {
1464
1465 return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
1466 }
1467
1468 static int
1469 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
1470 {
1471
1472 return (suword(ujobp, (long)ujob));
1473 }
1474
1475 static struct aiocb_ops aiocb_ops = {
1476 .aio_copyin = aiocb_copyin,
1477 .fetch_status = aiocb_fetch_status,
1478 .fetch_error = aiocb_fetch_error,
1479 .store_status = aiocb_store_status,
1480 .store_error = aiocb_store_error,
1481 .store_kernelinfo = aiocb_store_kernelinfo,
1482 .store_aiocb = aiocb_store_aiocb,
1483 };
1484
1485 #ifdef COMPAT_FREEBSD6
1486 static struct aiocb_ops aiocb_ops_osigevent = {
1487 .aio_copyin = aiocb_copyin_old_sigevent,
1488 .fetch_status = aiocb_fetch_status,
1489 .fetch_error = aiocb_fetch_error,
1490 .store_status = aiocb_store_status,
1491 .store_error = aiocb_store_error,
1492 .store_kernelinfo = aiocb_store_kernelinfo,
1493 .store_aiocb = aiocb_store_aiocb,
1494 };
1495 #endif
1496
1497 /*
1498 * Queue a new AIO request. Choosing either the threaded or direct bio VCHR
1499 * technique is done in this code.
1500 */
1501 int
1502 aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
1503 int type, struct aiocb_ops *ops)
1504 {
1505 struct proc *p = td->td_proc;
1506 struct file *fp = NULL;
1507 struct kaiocb *job;
1508 struct kaioinfo *ki;
1509 struct kevent kev;
1510 int opcode;
1511 int error;
1512 int fd, kqfd;
1513 int jid;
1514 u_short evflags;
1515
1516 if (p->p_aioinfo == NULL)
1517 aio_init_aioinfo(p);
1518
1519 ki = p->p_aioinfo;
1520
1521 ops->store_status(ujob, -1);
1522 ops->store_error(ujob, 0);
1523 ops->store_kernelinfo(ujob, -1);
1524
1525 if (num_queue_count >= max_queue_count ||
1526 ki->kaio_count >= max_aio_queue_per_proc) {
1527 error = EAGAIN;
1528 goto err1;
1529 }
1530
1531 job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1532 knlist_init_mtx(&job->klist, AIO_MTX(ki));
1533
1534 error = ops->aio_copyin(ujob, job, type);
1535 if (error)
1536 goto err2;
1537
1538 if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
1539 error = EINVAL;
1540 goto err2;
1541 }
1542
1543 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
1544 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
1545 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
1546 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
1547 error = EINVAL;
1548 goto err2;
1549 }
1550
1551 if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
1552 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
1553 !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
1554 error = EINVAL;
1555 goto err2;
1556 }
1557
1558 /* Get the opcode. */
1559 if (type == LIO_NOP) {
1560 switch (job->uaiocb.aio_lio_opcode) {
1561 case LIO_WRITE:
1562 case LIO_NOP:
1563 case LIO_READ:
1564 opcode = job->uaiocb.aio_lio_opcode;
1565 break;
1566 default:
1567 error = EINVAL;
1568 goto err2;
1569 }
1570 } else
1571 opcode = job->uaiocb.aio_lio_opcode = type;
1572
1573 ksiginfo_init(&job->ksi);
1574
1575 /* Save userspace address of the job info. */
1576 job->ujob = ujob;
1577
1578 /*
1579 * Validate the opcode and fetch the file object for the specified
1580 * file descriptor.
1581 *
1582 * XXXRW: Moved the opcode validation up here so that we don't
1583 * retrieve a file descriptor without knowing what the capabiltity
1584 * should be.
1585 */
1586 fd = job->uaiocb.aio_fildes;
1587 switch (opcode) {
1588 case LIO_WRITE:
1589 case LIO_WRITEV:
1590 error = fget_write(td, fd, &cap_pwrite_rights, &fp);
1591 break;
1592 case LIO_READ:
1593 case LIO_READV:
1594 error = fget_read(td, fd, &cap_pread_rights, &fp);
1595 break;
1596 case LIO_SYNC:
1597 case LIO_DSYNC:
1598 error = fget(td, fd, &cap_fsync_rights, &fp);
1599 break;
1600 case LIO_MLOCK:
1601 break;
1602 case LIO_NOP:
1603 error = fget(td, fd, &cap_no_rights, &fp);
1604 break;
1605 default:
1606 error = EINVAL;
1607 }
1608 if (error)
1609 goto err3;
1610
1611 if ((opcode & LIO_SYNC) && fp->f_vnode == NULL) {
1612 error = EINVAL;
1613 goto err3;
1614 }
1615
1616 if ((opcode == LIO_READ || opcode == LIO_READV ||
1617 opcode == LIO_WRITE || opcode == LIO_WRITEV) &&
1618 job->uaiocb.aio_offset < 0 &&
1619 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) {
1620 error = EINVAL;
1621 goto err3;
1622 }
1623
1624 if (fp != NULL && fp->f_ops == &path_fileops) {
1625 error = EBADF;
1626 goto err3;
1627 }
1628
1629 job->fd_file = fp;
1630
1631 mtx_lock(&aio_job_mtx);
1632 jid = jobrefid++;
1633 job->seqno = jobseqno++;
1634 mtx_unlock(&aio_job_mtx);
1635 error = ops->store_kernelinfo(ujob, jid);
1636 if (error) {
1637 error = EINVAL;
1638 goto err3;
1639 }
1640 job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
1641
1642 if (opcode == LIO_NOP) {
1643 fdrop(fp, td);
1644 MPASS(job->uiop == &job->uio || job->uiop == NULL);
1645 uma_zfree(aiocb_zone, job);
1646 return (0);
1647 }
1648
1649 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1650 goto no_kqueue;
1651 evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1652 if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1653 error = EINVAL;
1654 goto err3;
1655 }
1656 kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
1657 memset(&kev, 0, sizeof(kev));
1658 kev.ident = (uintptr_t)job->ujob;
1659 kev.filter = EVFILT_AIO;
1660 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
1661 kev.data = (intptr_t)job;
1662 kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1663 error = kqfd_register(kqfd, &kev, td, M_WAITOK);
1664 if (error)
1665 goto err3;
1666
1667 no_kqueue:
1668
1669 ops->store_error(ujob, EINPROGRESS);
1670 job->uaiocb._aiocb_private.error = EINPROGRESS;
1671 job->userproc = p;
1672 job->cred = crhold(td->td_ucred);
1673 job->jobflags = KAIOCB_QUEUEING;
1674 job->lio = lj;
1675
1676 if (opcode & LIO_VECTORED) {
1677 /* Use the uio copied in by aio_copyin */
1678 MPASS(job->uiop != &job->uio && job->uiop != NULL);
1679 } else {
1680 /* Setup the inline uio */
1681 job->iov[0].iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf;
1682 job->iov[0].iov_len = job->uaiocb.aio_nbytes;
1683 job->uio.uio_iov = job->iov;
1684 job->uio.uio_iovcnt = 1;
1685 job->uio.uio_resid = job->uaiocb.aio_nbytes;
1686 job->uio.uio_segflg = UIO_USERSPACE;
1687 job->uiop = &job->uio;
1688 }
1689 switch (opcode & (LIO_READ | LIO_WRITE)) {
1690 case LIO_READ:
1691 job->uiop->uio_rw = UIO_READ;
1692 break;
1693 case LIO_WRITE:
1694 job->uiop->uio_rw = UIO_WRITE;
1695 break;
1696 }
1697 job->uiop->uio_offset = job->uaiocb.aio_offset;
1698 job->uiop->uio_td = td;
1699
1700 if (opcode == LIO_MLOCK) {
1701 aio_schedule(job, aio_process_mlock);
1702 error = 0;
1703 } else if (fp->f_ops->fo_aio_queue == NULL)
1704 error = aio_queue_file(fp, job);
1705 else
1706 error = fo_aio_queue(fp, job);
1707 if (error)
1708 goto err4;
1709
1710 AIO_LOCK(ki);
1711 job->jobflags &= ~KAIOCB_QUEUEING;
1712 TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1713 ki->kaio_count++;
1714 if (lj)
1715 lj->lioj_count++;
1716 atomic_add_int(&num_queue_count, 1);
1717 if (job->jobflags & KAIOCB_FINISHED) {
1718 /*
1719 * The queue callback completed the request synchronously.
1720 * The bulk of the completion is deferred in that case
1721 * until this point.
1722 */
1723 aio_bio_done_notify(p, job);
1724 } else
1725 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1726 AIO_UNLOCK(ki);
1727 return (0);
1728
1729 err4:
1730 crfree(job->cred);
1731 err3:
1732 if (fp)
1733 fdrop(fp, td);
1734 knlist_delete(&job->klist, curthread, 0);
1735 err2:
1736 if (job->uiop != &job->uio)
1737 free(job->uiop, M_IOV);
1738 uma_zfree(aiocb_zone, job);
1739 err1:
1740 ops->store_error(ujob, error);
1741 return (error);
1742 }
1743
1744 static void
1745 aio_cancel_daemon_job(struct kaiocb *job)
1746 {
1747
1748 mtx_lock(&aio_job_mtx);
1749 if (!aio_cancel_cleared(job))
1750 TAILQ_REMOVE(&aio_jobs, job, list);
1751 mtx_unlock(&aio_job_mtx);
1752 aio_cancel(job);
1753 }
1754
1755 void
1756 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1757 {
1758
1759 mtx_lock(&aio_job_mtx);
1760 if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1761 mtx_unlock(&aio_job_mtx);
1762 aio_cancel(job);
1763 return;
1764 }
1765 job->handle_fn = func;
1766 TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1767 aio_kick_nowait(job->userproc);
1768 mtx_unlock(&aio_job_mtx);
1769 }
1770
1771 static void
1772 aio_cancel_sync(struct kaiocb *job)
1773 {
1774 struct kaioinfo *ki;
1775
1776 ki = job->userproc->p_aioinfo;
1777 AIO_LOCK(ki);
1778 if (!aio_cancel_cleared(job))
1779 TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1780 AIO_UNLOCK(ki);
1781 aio_cancel(job);
1782 }
1783
1784 int
1785 aio_queue_file(struct file *fp, struct kaiocb *job)
1786 {
1787 struct kaioinfo *ki;
1788 struct kaiocb *job2;
1789 struct vnode *vp;
1790 struct mount *mp;
1791 int error;
1792 bool safe;
1793
1794 ki = job->userproc->p_aioinfo;
1795 error = aio_qbio(job->userproc, job);
1796 if (error >= 0)
1797 return (error);
1798 safe = false;
1799 if (fp->f_type == DTYPE_VNODE) {
1800 vp = fp->f_vnode;
1801 if (vp->v_type == VREG || vp->v_type == VDIR) {
1802 mp = fp->f_vnode->v_mount;
1803 if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0)
1804 safe = true;
1805 }
1806 }
1807 if (!(safe || enable_aio_unsafe)) {
1808 counted_warning(&unsafe_warningcnt,
1809 "is attempting to use unsafe AIO requests");
1810 return (EOPNOTSUPP);
1811 }
1812
1813 if (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) {
1814 aio_schedule(job, aio_process_rw);
1815 error = 0;
1816 } else if (job->uaiocb.aio_lio_opcode & LIO_SYNC) {
1817 AIO_LOCK(ki);
1818 TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
1819 if (job2->fd_file == job->fd_file &&
1820 ((job2->uaiocb.aio_lio_opcode & LIO_SYNC) == 0) &&
1821 job2->seqno < job->seqno) {
1822 job2->jobflags |= KAIOCB_CHECKSYNC;
1823 job->pending++;
1824 }
1825 }
1826 if (job->pending != 0) {
1827 if (!aio_set_cancel_function_locked(job,
1828 aio_cancel_sync)) {
1829 AIO_UNLOCK(ki);
1830 aio_cancel(job);
1831 return (0);
1832 }
1833 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1834 AIO_UNLOCK(ki);
1835 return (0);
1836 }
1837 AIO_UNLOCK(ki);
1838 aio_schedule(job, aio_process_sync);
1839 error = 0;
1840 } else {
1841 error = EINVAL;
1842 }
1843 return (error);
1844 }
1845
1846 static void
1847 aio_kick_nowait(struct proc *userp)
1848 {
1849 struct kaioinfo *ki = userp->p_aioinfo;
1850 struct aioproc *aiop;
1851
1852 mtx_assert(&aio_job_mtx, MA_OWNED);
1853 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1854 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1855 aiop->aioprocflags &= ~AIOP_FREE;
1856 wakeup(aiop->aioproc);
1857 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1858 ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1859 taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
1860 }
1861 }
1862
1863 static int
1864 aio_kick(struct proc *userp)
1865 {
1866 struct kaioinfo *ki = userp->p_aioinfo;
1867 struct aioproc *aiop;
1868 int error, ret = 0;
1869
1870 mtx_assert(&aio_job_mtx, MA_OWNED);
1871 retryproc:
1872 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1873 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1874 aiop->aioprocflags &= ~AIOP_FREE;
1875 wakeup(aiop->aioproc);
1876 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1877 ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1878 num_aio_resv_start++;
1879 mtx_unlock(&aio_job_mtx);
1880 error = aio_newproc(&num_aio_resv_start);
1881 mtx_lock(&aio_job_mtx);
1882 if (error) {
1883 num_aio_resv_start--;
1884 goto retryproc;
1885 }
1886 } else {
1887 ret = -1;
1888 }
1889 return (ret);
1890 }
1891
1892 static void
1893 aio_kick_helper(void *context, int pending)
1894 {
1895 struct proc *userp = context;
1896
1897 mtx_lock(&aio_job_mtx);
1898 while (--pending >= 0) {
1899 if (aio_kick(userp))
1900 break;
1901 }
1902 mtx_unlock(&aio_job_mtx);
1903 }
1904
1905 /*
1906 * Support the aio_return system call, as a side-effect, kernel resources are
1907 * released.
1908 */
1909 static int
1910 kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1911 {
1912 struct proc *p = td->td_proc;
1913 struct kaiocb *job;
1914 struct kaioinfo *ki;
1915 long status, error;
1916
1917 ki = p->p_aioinfo;
1918 if (ki == NULL)
1919 return (EINVAL);
1920 AIO_LOCK(ki);
1921 TAILQ_FOREACH(job, &ki->kaio_done, plist) {
1922 if (job->ujob == ujob)
1923 break;
1924 }
1925 if (job != NULL) {
1926 MPASS(job->jobflags & KAIOCB_FINISHED);
1927 status = job->uaiocb._aiocb_private.status;
1928 error = job->uaiocb._aiocb_private.error;
1929 td->td_retval[0] = status;
1930 td->td_ru.ru_oublock += job->outblock;
1931 td->td_ru.ru_inblock += job->inblock;
1932 td->td_ru.ru_msgsnd += job->msgsnd;
1933 td->td_ru.ru_msgrcv += job->msgrcv;
1934 aio_free_entry(job);
1935 AIO_UNLOCK(ki);
1936 ops->store_error(ujob, error);
1937 ops->store_status(ujob, status);
1938 } else {
1939 error = EINVAL;
1940 AIO_UNLOCK(ki);
1941 }
1942 return (error);
1943 }
1944
1945 int
1946 sys_aio_return(struct thread *td, struct aio_return_args *uap)
1947 {
1948
1949 return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
1950 }
1951
1952 /*
1953 * Allow a process to wakeup when any of the I/O requests are completed.
1954 */
1955 static int
1956 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
1957 struct timespec *ts)
1958 {
1959 struct proc *p = td->td_proc;
1960 struct timeval atv;
1961 struct kaioinfo *ki;
1962 struct kaiocb *firstjob, *job;
1963 int error, i, timo;
1964
1965 timo = 0;
1966 if (ts) {
1967 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
1968 return (EINVAL);
1969
1970 TIMESPEC_TO_TIMEVAL(&atv, ts);
1971 if (itimerfix(&atv))
1972 return (EINVAL);
1973 timo = tvtohz(&atv);
1974 }
1975
1976 ki = p->p_aioinfo;
1977 if (ki == NULL)
1978 return (EAGAIN);
1979
1980 if (njoblist == 0)
1981 return (0);
1982
1983 AIO_LOCK(ki);
1984 for (;;) {
1985 firstjob = NULL;
1986 error = 0;
1987 TAILQ_FOREACH(job, &ki->kaio_all, allist) {
1988 for (i = 0; i < njoblist; i++) {
1989 if (job->ujob == ujoblist[i]) {
1990 if (firstjob == NULL)
1991 firstjob = job;
1992 if (job->jobflags & KAIOCB_FINISHED)
1993 goto RETURN;
1994 }
1995 }
1996 }
1997 /* All tasks were finished. */
1998 if (firstjob == NULL)
1999 break;
2000
2001 ki->kaio_flags |= KAIO_WAKEUP;
2002 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2003 "aiospn", timo);
2004 if (error == ERESTART)
2005 error = EINTR;
2006 if (error)
2007 break;
2008 }
2009 RETURN:
2010 AIO_UNLOCK(ki);
2011 return (error);
2012 }
2013
2014 int
2015 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
2016 {
2017 struct timespec ts, *tsp;
2018 struct aiocb **ujoblist;
2019 int error;
2020
2021 if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
2022 return (EINVAL);
2023
2024 if (uap->timeout) {
2025 /* Get timespec struct. */
2026 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
2027 return (error);
2028 tsp = &ts;
2029 } else
2030 tsp = NULL;
2031
2032 ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIO, M_WAITOK);
2033 error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
2034 if (error == 0)
2035 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2036 free(ujoblist, M_AIO);
2037 return (error);
2038 }
2039
2040 /*
2041 * aio_cancel cancels any non-bio aio operations not currently in progress.
2042 */
2043 int
2044 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
2045 {
2046 struct proc *p = td->td_proc;
2047 struct kaioinfo *ki;
2048 struct kaiocb *job, *jobn;
2049 struct file *fp;
2050 int error;
2051 int cancelled = 0;
2052 int notcancelled = 0;
2053 struct vnode *vp;
2054
2055 /* Lookup file object. */
2056 error = fget(td, uap->fd, &cap_no_rights, &fp);
2057 if (error)
2058 return (error);
2059
2060 ki = p->p_aioinfo;
2061 if (ki == NULL)
2062 goto done;
2063
2064 if (fp->f_type == DTYPE_VNODE) {
2065 vp = fp->f_vnode;
2066 if (vn_isdisk(vp)) {
2067 fdrop(fp, td);
2068 td->td_retval[0] = AIO_NOTCANCELED;
2069 return (0);
2070 }
2071 }
2072
2073 AIO_LOCK(ki);
2074 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
2075 if ((uap->fd == job->uaiocb.aio_fildes) &&
2076 ((uap->aiocbp == NULL) ||
2077 (uap->aiocbp == job->ujob))) {
2078 if (aio_cancel_job(p, ki, job)) {
2079 cancelled++;
2080 } else {
2081 notcancelled++;
2082 }
2083 if (uap->aiocbp != NULL)
2084 break;
2085 }
2086 }
2087 AIO_UNLOCK(ki);
2088
2089 done:
2090 fdrop(fp, td);
2091
2092 if (uap->aiocbp != NULL) {
2093 if (cancelled) {
2094 td->td_retval[0] = AIO_CANCELED;
2095 return (0);
2096 }
2097 }
2098
2099 if (notcancelled) {
2100 td->td_retval[0] = AIO_NOTCANCELED;
2101 return (0);
2102 }
2103
2104 if (cancelled) {
2105 td->td_retval[0] = AIO_CANCELED;
2106 return (0);
2107 }
2108
2109 td->td_retval[0] = AIO_ALLDONE;
2110
2111 return (0);
2112 }
2113
2114 /*
2115 * aio_error is implemented in the kernel level for compatibility purposes
2116 * only. For a user mode async implementation, it would be best to do it in
2117 * a userland subroutine.
2118 */
2119 static int
2120 kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2121 {
2122 struct proc *p = td->td_proc;
2123 struct kaiocb *job;
2124 struct kaioinfo *ki;
2125 int status;
2126
2127 ki = p->p_aioinfo;
2128 if (ki == NULL) {
2129 td->td_retval[0] = EINVAL;
2130 return (0);
2131 }
2132
2133 AIO_LOCK(ki);
2134 TAILQ_FOREACH(job, &ki->kaio_all, allist) {
2135 if (job->ujob == ujob) {
2136 if (job->jobflags & KAIOCB_FINISHED)
2137 td->td_retval[0] =
2138 job->uaiocb._aiocb_private.error;
2139 else
2140 td->td_retval[0] = EINPROGRESS;
2141 AIO_UNLOCK(ki);
2142 return (0);
2143 }
2144 }
2145 AIO_UNLOCK(ki);
2146
2147 /*
2148 * Hack for failure of aio_aqueue.
2149 */
2150 status = ops->fetch_status(ujob);
2151 if (status == -1) {
2152 td->td_retval[0] = ops->fetch_error(ujob);
2153 return (0);
2154 }
2155
2156 td->td_retval[0] = EINVAL;
2157 return (0);
2158 }
2159
2160 int
2161 sys_aio_error(struct thread *td, struct aio_error_args *uap)
2162 {
2163
2164 return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
2165 }
2166
2167 /* syscall - asynchronous read from a file (REALTIME) */
2168 #ifdef COMPAT_FREEBSD6
2169 int
2170 freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
2171 {
2172
2173 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2174 &aiocb_ops_osigevent));
2175 }
2176 #endif
2177
2178 int
2179 sys_aio_read(struct thread *td, struct aio_read_args *uap)
2180 {
2181
2182 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2183 }
2184
2185 int
2186 sys_aio_readv(struct thread *td, struct aio_readv_args *uap)
2187 {
2188
2189 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READV, &aiocb_ops));
2190 }
2191
2192 /* syscall - asynchronous write to a file (REALTIME) */
2193 #ifdef COMPAT_FREEBSD6
2194 int
2195 freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
2196 {
2197
2198 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2199 &aiocb_ops_osigevent));
2200 }
2201 #endif
2202
2203 int
2204 sys_aio_write(struct thread *td, struct aio_write_args *uap)
2205 {
2206
2207 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
2208 }
2209
2210 int
2211 sys_aio_writev(struct thread *td, struct aio_writev_args *uap)
2212 {
2213
2214 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITEV, &aiocb_ops));
2215 }
2216
2217 int
2218 sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
2219 {
2220
2221 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
2222 }
2223
2224 static int
2225 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
2226 struct aiocb **acb_list, int nent, struct sigevent *sig,
2227 struct aiocb_ops *ops)
2228 {
2229 struct proc *p = td->td_proc;
2230 struct aiocb *job;
2231 struct kaioinfo *ki;
2232 struct aioliojob *lj;
2233 struct kevent kev;
2234 int error;
2235 int nagain, nerror;
2236 int i;
2237
2238 if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2239 return (EINVAL);
2240
2241 if (nent < 0 || nent > max_aio_queue_per_proc)
2242 return (EINVAL);
2243
2244 if (p->p_aioinfo == NULL)
2245 aio_init_aioinfo(p);
2246
2247 ki = p->p_aioinfo;
2248
2249 lj = uma_zalloc(aiolio_zone, M_WAITOK);
2250 lj->lioj_flags = 0;
2251 lj->lioj_count = 0;
2252 lj->lioj_finished_count = 0;
2253 lj->lioj_signal.sigev_notify = SIGEV_NONE;
2254 knlist_init_mtx(&lj->klist, AIO_MTX(ki));
2255 ksiginfo_init(&lj->lioj_ksi);
2256
2257 /*
2258 * Setup signal.
2259 */
2260 if (sig && (mode == LIO_NOWAIT)) {
2261 bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
2262 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2263 /* Assume only new style KEVENT */
2264 memset(&kev, 0, sizeof(kev));
2265 kev.filter = EVFILT_LIO;
2266 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
2267 kev.ident = (uintptr_t)uacb_list; /* something unique */
2268 kev.data = (intptr_t)lj;
2269 /* pass user defined sigval data */
2270 kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
2271 error = kqfd_register(
2272 lj->lioj_signal.sigev_notify_kqueue, &kev, td,
2273 M_WAITOK);
2274 if (error) {
2275 uma_zfree(aiolio_zone, lj);
2276 return (error);
2277 }
2278 } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
2279 ;
2280 } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2281 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
2282 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
2283 uma_zfree(aiolio_zone, lj);
2284 return EINVAL;
2285 }
2286 lj->lioj_flags |= LIOJ_SIGNAL;
2287 } else {
2288 uma_zfree(aiolio_zone, lj);
2289 return EINVAL;
2290 }
2291 }
2292
2293 AIO_LOCK(ki);
2294 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
2295 /*
2296 * Add extra aiocb count to avoid the lio to be freed
2297 * by other threads doing aio_waitcomplete or aio_return,
2298 * and prevent event from being sent until we have queued
2299 * all tasks.
2300 */
2301 lj->lioj_count = 1;
2302 AIO_UNLOCK(ki);
2303
2304 /*
2305 * Get pointers to the list of I/O requests.
2306 */
2307 nagain = 0;
2308 nerror = 0;
2309 for (i = 0; i < nent; i++) {
2310 job = acb_list[i];
2311 if (job != NULL) {
2312 error = aio_aqueue(td, job, lj, LIO_NOP, ops);
2313 if (error == EAGAIN)
2314 nagain++;
2315 else if (error != 0)
2316 nerror++;
2317 }
2318 }
2319
2320 error = 0;
2321 AIO_LOCK(ki);
2322 if (mode == LIO_WAIT) {
2323 while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2324 ki->kaio_flags |= KAIO_WAKEUP;
2325 error = msleep(&p->p_aioinfo, AIO_MTX(ki),
2326 PRIBIO | PCATCH, "aiospn", 0);
2327 if (error == ERESTART)
2328 error = EINTR;
2329 if (error)
2330 break;
2331 }
2332 } else {
2333 if (lj->lioj_count - 1 == lj->lioj_finished_count) {
2334 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2335 lj->lioj_flags |= LIOJ_KEVENT_POSTED;
2336 KNOTE_LOCKED(&lj->klist, 1);
2337 }
2338 if ((lj->lioj_flags & (LIOJ_SIGNAL |
2339 LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL &&
2340 (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2341 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
2342 aio_sendsig(p, &lj->lioj_signal, &lj->lioj_ksi,
2343 lj->lioj_count != 1);
2344 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2345 }
2346 }
2347 }
2348 lj->lioj_count--;
2349 if (lj->lioj_count == 0) {
2350 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
2351 knlist_delete(&lj->klist, curthread, 1);
2352 PROC_LOCK(p);
2353 sigqueue_take(&lj->lioj_ksi);
2354 PROC_UNLOCK(p);
2355 AIO_UNLOCK(ki);
2356 uma_zfree(aiolio_zone, lj);
2357 } else
2358 AIO_UNLOCK(ki);
2359
2360 if (nerror)
2361 return (EIO);
2362 else if (nagain)
2363 return (EAGAIN);
2364 else
2365 return (error);
2366 }
2367
2368 /* syscall - list directed I/O (REALTIME) */
2369 #ifdef COMPAT_FREEBSD6
2370 int
2371 freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
2372 {
2373 struct aiocb **acb_list;
2374 struct sigevent *sigp, sig;
2375 struct osigevent osig;
2376 int error, nent;
2377
2378 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2379 return (EINVAL);
2380
2381 nent = uap->nent;
2382 if (nent < 0 || nent > max_aio_queue_per_proc)
2383 return (EINVAL);
2384
2385 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2386 error = copyin(uap->sig, &osig, sizeof(osig));
2387 if (error)
2388 return (error);
2389 error = convert_old_sigevent(&osig, &sig);
2390 if (error)
2391 return (error);
2392 sigp = &sig;
2393 } else
2394 sigp = NULL;
2395
2396 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2397 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2398 if (error == 0)
2399 error = kern_lio_listio(td, uap->mode,
2400 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2401 &aiocb_ops_osigevent);
2402 free(acb_list, M_LIO);
2403 return (error);
2404 }
2405 #endif
2406
2407 /* syscall - list directed I/O (REALTIME) */
2408 int
2409 sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
2410 {
2411 struct aiocb **acb_list;
2412 struct sigevent *sigp, sig;
2413 int error, nent;
2414
2415 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2416 return (EINVAL);
2417
2418 nent = uap->nent;
2419 if (nent < 0 || nent > max_aio_queue_per_proc)
2420 return (EINVAL);
2421
2422 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2423 error = copyin(uap->sig, &sig, sizeof(sig));
2424 if (error)
2425 return (error);
2426 sigp = &sig;
2427 } else
2428 sigp = NULL;
2429
2430 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2431 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2432 if (error == 0)
2433 error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
2434 nent, sigp, &aiocb_ops);
2435 free(acb_list, M_LIO);
2436 return (error);
2437 }
2438
2439 static void
2440 aio_biocleanup(struct bio *bp)
2441 {
2442 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2443 struct kaioinfo *ki;
2444 struct buf *pbuf = (struct buf *)bp->bio_caller2;
2445
2446 /* Release mapping into kernel space. */
2447 if (pbuf != NULL) {
2448 MPASS(pbuf->b_npages <= atop(maxphys) + 1);
2449 pmap_qremove((vm_offset_t)pbuf->b_data, pbuf->b_npages);
2450 vm_page_unhold_pages(pbuf->b_pages, pbuf->b_npages);
2451 uma_zfree(pbuf_zone, pbuf);
2452 atomic_subtract_int(&num_buf_aio, 1);
2453 ki = job->userproc->p_aioinfo;
2454 AIO_LOCK(ki);
2455 ki->kaio_buffer_count--;
2456 AIO_UNLOCK(ki);
2457 } else {
2458 MPASS(bp->bio_ma_n <= atop(maxphys) + 1);
2459 vm_page_unhold_pages(bp->bio_ma, bp->bio_ma_n);
2460 free(bp->bio_ma, M_TEMP);
2461 atomic_subtract_int(&num_unmapped_aio, 1);
2462 }
2463 g_destroy_bio(bp);
2464 }
2465
2466 static void
2467 aio_biowakeup(struct bio *bp)
2468 {
2469 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2470 size_t nbytes;
2471 long bcount = bp->bio_bcount;
2472 long resid = bp->bio_resid;
2473 int error, opcode, nblks;
2474 int bio_error = bp->bio_error;
2475 uint16_t flags = bp->bio_flags;
2476
2477 opcode = job->uaiocb.aio_lio_opcode;
2478
2479 aio_biocleanup(bp);
2480
2481 nbytes =bcount - resid;
2482 atomic_add_acq_long(&job->nbytes, nbytes);
2483 nblks = btodb(nbytes);
2484 error = 0;
2485 /*
2486 * If multiple bios experienced an error, the job will reflect the
2487 * error of whichever failed bio completed last.
2488 */
2489 if (flags & BIO_ERROR)
2490 atomic_set_int(&job->error, bio_error);
2491 if (opcode & LIO_WRITE)
2492 atomic_add_int(&job->outblock, nblks);
2493 else
2494 atomic_add_int(&job->inblock, nblks);
2495 atomic_subtract_int(&job->nbio, 1);
2496
2497
2498 if (atomic_load_int(&job->nbio) == 0) {
2499 if (atomic_load_int(&job->error))
2500 aio_complete(job, -1, job->error);
2501 else
2502 aio_complete(job, atomic_load_long(&job->nbytes), 0);
2503 }
2504 }
2505
2506 /* syscall - wait for the next completion of an aio request */
2507 static int
2508 kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
2509 struct timespec *ts, struct aiocb_ops *ops)
2510 {
2511 struct proc *p = td->td_proc;
2512 struct timeval atv;
2513 struct kaioinfo *ki;
2514 struct kaiocb *job;
2515 struct aiocb *ujob;
2516 long error, status;
2517 int timo;
2518
2519 ops->store_aiocb(ujobp, NULL);
2520
2521 if (ts == NULL) {
2522 timo = 0;
2523 } else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
2524 timo = -1;
2525 } else {
2526 if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2527 return (EINVAL);
2528
2529 TIMESPEC_TO_TIMEVAL(&atv, ts);
2530 if (itimerfix(&atv))
2531 return (EINVAL);
2532 timo = tvtohz(&atv);
2533 }
2534
2535 if (p->p_aioinfo == NULL)
2536 aio_init_aioinfo(p);
2537 ki = p->p_aioinfo;
2538
2539 error = 0;
2540 job = NULL;
2541 AIO_LOCK(ki);
2542 while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
2543 if (timo == -1) {
2544 error = EWOULDBLOCK;
2545 break;
2546 }
2547 ki->kaio_flags |= KAIO_WAKEUP;
2548 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2549 "aiowc", timo);
2550 if (timo && error == ERESTART)
2551 error = EINTR;
2552 if (error)
2553 break;
2554 }
2555
2556 if (job != NULL) {
2557 MPASS(job->jobflags & KAIOCB_FINISHED);
2558 ujob = job->ujob;
2559 status = job->uaiocb._aiocb_private.status;
2560 error = job->uaiocb._aiocb_private.error;
2561 td->td_retval[0] = status;
2562 td->td_ru.ru_oublock += job->outblock;
2563 td->td_ru.ru_inblock += job->inblock;
2564 td->td_ru.ru_msgsnd += job->msgsnd;
2565 td->td_ru.ru_msgrcv += job->msgrcv;
2566 aio_free_entry(job);
2567 AIO_UNLOCK(ki);
2568 ops->store_aiocb(ujobp, ujob);
2569 ops->store_error(ujob, error);
2570 ops->store_status(ujob, status);
2571 } else
2572 AIO_UNLOCK(ki);
2573
2574 return (error);
2575 }
2576
2577 int
2578 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2579 {
2580 struct timespec ts, *tsp;
2581 int error;
2582
2583 if (uap->timeout) {
2584 /* Get timespec struct. */
2585 error = copyin(uap->timeout, &ts, sizeof(ts));
2586 if (error)
2587 return (error);
2588 tsp = &ts;
2589 } else
2590 tsp = NULL;
2591
2592 return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
2593 }
2594
2595 static int
2596 kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
2597 struct aiocb_ops *ops)
2598 {
2599 int listop;
2600
2601 switch (op) {
2602 case O_SYNC:
2603 listop = LIO_SYNC;
2604 break;
2605 case O_DSYNC:
2606 listop = LIO_DSYNC;
2607 break;
2608 default:
2609 return (EINVAL);
2610 }
2611
2612 return (aio_aqueue(td, ujob, NULL, listop, ops));
2613 }
2614
2615 int
2616 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
2617 {
2618
2619 return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
2620 }
2621
2622 /* kqueue attach function */
2623 static int
2624 filt_aioattach(struct knote *kn)
2625 {
2626 struct kaiocb *job;
2627
2628 job = (struct kaiocb *)(uintptr_t)kn->kn_sdata;
2629
2630 /*
2631 * The job pointer must be validated before using it, so
2632 * registration is restricted to the kernel; the user cannot
2633 * set EV_FLAG1.
2634 */
2635 if ((kn->kn_flags & EV_FLAG1) == 0)
2636 return (EPERM);
2637 kn->kn_ptr.p_aio = job;
2638 kn->kn_flags &= ~EV_FLAG1;
2639
2640 knlist_add(&job->klist, kn, 0);
2641
2642 return (0);
2643 }
2644
2645 /* kqueue detach function */
2646 static void
2647 filt_aiodetach(struct knote *kn)
2648 {
2649 struct knlist *knl;
2650
2651 knl = &kn->kn_ptr.p_aio->klist;
2652 knl->kl_lock(knl->kl_lockarg);
2653 if (!knlist_empty(knl))
2654 knlist_remove(knl, kn, 1);
2655 knl->kl_unlock(knl->kl_lockarg);
2656 }
2657
2658 /* kqueue filter function */
2659 /*ARGSUSED*/
2660 static int
2661 filt_aio(struct knote *kn, long hint)
2662 {
2663 struct kaiocb *job = kn->kn_ptr.p_aio;
2664
2665 kn->kn_data = job->uaiocb._aiocb_private.error;
2666 if (!(job->jobflags & KAIOCB_FINISHED))
2667 return (0);
2668 kn->kn_flags |= EV_EOF;
2669 return (1);
2670 }
2671
2672 /* kqueue attach function */
2673 static int
2674 filt_lioattach(struct knote *kn)
2675 {
2676 struct aioliojob *lj;
2677
2678 lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata;
2679
2680 /*
2681 * The aioliojob pointer must be validated before using it, so
2682 * registration is restricted to the kernel; the user cannot
2683 * set EV_FLAG1.
2684 */
2685 if ((kn->kn_flags & EV_FLAG1) == 0)
2686 return (EPERM);
2687 kn->kn_ptr.p_lio = lj;
2688 kn->kn_flags &= ~EV_FLAG1;
2689
2690 knlist_add(&lj->klist, kn, 0);
2691
2692 return (0);
2693 }
2694
2695 /* kqueue detach function */
2696 static void
2697 filt_liodetach(struct knote *kn)
2698 {
2699 struct knlist *knl;
2700
2701 knl = &kn->kn_ptr.p_lio->klist;
2702 knl->kl_lock(knl->kl_lockarg);
2703 if (!knlist_empty(knl))
2704 knlist_remove(knl, kn, 1);
2705 knl->kl_unlock(knl->kl_lockarg);
2706 }
2707
2708 /* kqueue filter function */
2709 /*ARGSUSED*/
2710 static int
2711 filt_lio(struct knote *kn, long hint)
2712 {
2713 struct aioliojob * lj = kn->kn_ptr.p_lio;
2714
2715 return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
2716 }
2717
2718 #ifdef COMPAT_FREEBSD32
2719 #include <sys/mount.h>
2720 #include <sys/socket.h>
2721 #include <sys/sysent.h>
2722 #include <compat/freebsd32/freebsd32.h>
2723 #include <compat/freebsd32/freebsd32_proto.h>
2724 #include <compat/freebsd32/freebsd32_signal.h>
2725 #include <compat/freebsd32/freebsd32_syscall.h>
2726 #include <compat/freebsd32/freebsd32_util.h>
2727
2728 struct __aiocb_private32 {
2729 int32_t status;
2730 int32_t error;
2731 uint32_t kernelinfo;
2732 };
2733
2734 #ifdef COMPAT_FREEBSD6
2735 typedef struct oaiocb32 {
2736 int aio_fildes; /* File descriptor */
2737 uint64_t aio_offset __packed; /* File offset for I/O */
2738 uint32_t aio_buf; /* I/O buffer in process space */
2739 uint32_t aio_nbytes; /* Number of bytes for I/O */
2740 struct osigevent32 aio_sigevent; /* Signal to deliver */
2741 int aio_lio_opcode; /* LIO opcode */
2742 int aio_reqprio; /* Request priority -- ignored */
2743 struct __aiocb_private32 _aiocb_private;
2744 } oaiocb32_t;
2745 #endif
2746
2747 typedef struct aiocb32 {
2748 int32_t aio_fildes; /* File descriptor */
2749 uint64_t aio_offset __packed; /* File offset for I/O */
2750 uint32_t aio_buf; /* I/O buffer in process space */
2751 uint32_t aio_nbytes; /* Number of bytes for I/O */
2752 int __spare__[2];
2753 uint32_t __spare2__;
2754 int aio_lio_opcode; /* LIO opcode */
2755 int aio_reqprio; /* Request priority -- ignored */
2756 struct __aiocb_private32 _aiocb_private;
2757 struct sigevent32 aio_sigevent; /* Signal to deliver */
2758 } aiocb32_t;
2759
2760 #ifdef COMPAT_FREEBSD6
2761 static int
2762 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
2763 {
2764
2765 /*
2766 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
2767 * supported by AIO with the old sigevent structure.
2768 */
2769 CP(*osig, *nsig, sigev_notify);
2770 switch (nsig->sigev_notify) {
2771 case SIGEV_NONE:
2772 break;
2773 case SIGEV_SIGNAL:
2774 nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
2775 break;
2776 case SIGEV_KEVENT:
2777 nsig->sigev_notify_kqueue =
2778 osig->__sigev_u.__sigev_notify_kqueue;
2779 PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
2780 break;
2781 default:
2782 return (EINVAL);
2783 }
2784 return (0);
2785 }
2786
2787 static int
2788 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob,
2789 int type __unused)
2790 {
2791 struct oaiocb32 job32;
2792 struct aiocb *kcb = &kjob->uaiocb;
2793 int error;
2794
2795 bzero(kcb, sizeof(struct aiocb));
2796 error = copyin(ujob, &job32, sizeof(job32));
2797 if (error)
2798 return (error);
2799
2800 /* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */
2801
2802 CP(job32, *kcb, aio_fildes);
2803 CP(job32, *kcb, aio_offset);
2804 PTRIN_CP(job32, *kcb, aio_buf);
2805 CP(job32, *kcb, aio_nbytes);
2806 CP(job32, *kcb, aio_lio_opcode);
2807 CP(job32, *kcb, aio_reqprio);
2808 CP(job32, *kcb, _aiocb_private.status);
2809 CP(job32, *kcb, _aiocb_private.error);
2810 PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo);
2811 return (convert_old_sigevent32(&job32.aio_sigevent,
2812 &kcb->aio_sigevent));
2813 }
2814 #endif
2815
2816 static int
2817 aiocb32_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type)
2818 {
2819 struct aiocb32 job32;
2820 struct aiocb *kcb = &kjob->uaiocb;
2821 struct iovec32 *iov32;
2822 int error;
2823
2824 error = copyin(ujob, &job32, sizeof(job32));
2825 if (error)
2826 return (error);
2827 CP(job32, *kcb, aio_fildes);
2828 CP(job32, *kcb, aio_offset);
2829 CP(job32, *kcb, aio_lio_opcode);
2830 if (type & LIO_VECTORED) {
2831 iov32 = PTRIN(job32.aio_iov);
2832 CP(job32, *kcb, aio_iovcnt);
2833 /* malloc a uio and copy in the iovec */
2834 error = freebsd32_copyinuio(iov32,
2835 kcb->aio_iovcnt, &kjob->uiop);
2836 if (error)
2837 return (error);
2838 } else {
2839 PTRIN_CP(job32, *kcb, aio_buf);
2840 CP(job32, *kcb, aio_nbytes);
2841 }
2842 CP(job32, *kcb, aio_reqprio);
2843 CP(job32, *kcb, _aiocb_private.status);
2844 CP(job32, *kcb, _aiocb_private.error);
2845 PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo);
2846 error = convert_sigevent32(&job32.aio_sigevent, &kcb->aio_sigevent);
2847
2848 return (error);
2849 }
2850
2851 static long
2852 aiocb32_fetch_status(struct aiocb *ujob)
2853 {
2854 struct aiocb32 *ujob32;
2855
2856 ujob32 = (struct aiocb32 *)ujob;
2857 return (fuword32(&ujob32->_aiocb_private.status));
2858 }
2859
2860 static long
2861 aiocb32_fetch_error(struct aiocb *ujob)
2862 {
2863 struct aiocb32 *ujob32;
2864
2865 ujob32 = (struct aiocb32 *)ujob;
2866 return (fuword32(&ujob32->_aiocb_private.error));
2867 }
2868
2869 static int
2870 aiocb32_store_status(struct aiocb *ujob, long status)
2871 {
2872 struct aiocb32 *ujob32;
2873
2874 ujob32 = (struct aiocb32 *)ujob;
2875 return (suword32(&ujob32->_aiocb_private.status, status));
2876 }
2877
2878 static int
2879 aiocb32_store_error(struct aiocb *ujob, long error)
2880 {
2881 struct aiocb32 *ujob32;
2882
2883 ujob32 = (struct aiocb32 *)ujob;
2884 return (suword32(&ujob32->_aiocb_private.error, error));
2885 }
2886
2887 static int
2888 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
2889 {
2890 struct aiocb32 *ujob32;
2891
2892 ujob32 = (struct aiocb32 *)ujob;
2893 return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
2894 }
2895
2896 static int
2897 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
2898 {
2899
2900 return (suword32(ujobp, (long)ujob));
2901 }
2902
2903 static struct aiocb_ops aiocb32_ops = {
2904 .aio_copyin = aiocb32_copyin,
2905 .fetch_status = aiocb32_fetch_status,
2906 .fetch_error = aiocb32_fetch_error,
2907 .store_status = aiocb32_store_status,
2908 .store_error = aiocb32_store_error,
2909 .store_kernelinfo = aiocb32_store_kernelinfo,
2910 .store_aiocb = aiocb32_store_aiocb,
2911 };
2912
2913 #ifdef COMPAT_FREEBSD6
2914 static struct aiocb_ops aiocb32_ops_osigevent = {
2915 .aio_copyin = aiocb32_copyin_old_sigevent,
2916 .fetch_status = aiocb32_fetch_status,
2917 .fetch_error = aiocb32_fetch_error,
2918 .store_status = aiocb32_store_status,
2919 .store_error = aiocb32_store_error,
2920 .store_kernelinfo = aiocb32_store_kernelinfo,
2921 .store_aiocb = aiocb32_store_aiocb,
2922 };
2923 #endif
2924
2925 int
2926 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
2927 {
2928
2929 return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2930 }
2931
2932 int
2933 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
2934 {
2935 struct timespec32 ts32;
2936 struct timespec ts, *tsp;
2937 struct aiocb **ujoblist;
2938 uint32_t *ujoblist32;
2939 int error, i;
2940
2941 if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
2942 return (EINVAL);
2943
2944 if (uap->timeout) {
2945 /* Get timespec struct. */
2946 if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
2947 return (error);
2948 CP(ts32, ts, tv_sec);
2949 CP(ts32, ts, tv_nsec);
2950 tsp = &ts;
2951 } else
2952 tsp = NULL;
2953
2954 ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIO, M_WAITOK);
2955 ujoblist32 = (uint32_t *)ujoblist;
2956 error = copyin(uap->aiocbp, ujoblist32, uap->nent *
2957 sizeof(ujoblist32[0]));
2958 if (error == 0) {
2959 for (i = uap->nent - 1; i >= 0; i--)
2960 ujoblist[i] = PTRIN(ujoblist32[i]);
2961
2962 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2963 }
2964 free(ujoblist, M_AIO);
2965 return (error);
2966 }
2967
2968 int
2969 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
2970 {
2971
2972 return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2973 }
2974
2975 #ifdef COMPAT_FREEBSD6
2976 int
2977 freebsd6_freebsd32_aio_read(struct thread *td,
2978 struct freebsd6_freebsd32_aio_read_args *uap)
2979 {
2980
2981 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2982 &aiocb32_ops_osigevent));
2983 }
2984 #endif
2985
2986 int
2987 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
2988 {
2989
2990 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2991 &aiocb32_ops));
2992 }
2993
2994 int
2995 freebsd32_aio_readv(struct thread *td, struct freebsd32_aio_readv_args *uap)
2996 {
2997
2998 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READV,
2999 &aiocb32_ops));
3000 }
3001
3002 #ifdef COMPAT_FREEBSD6
3003 int
3004 freebsd6_freebsd32_aio_write(struct thread *td,
3005 struct freebsd6_freebsd32_aio_write_args *uap)
3006 {
3007
3008 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
3009 &aiocb32_ops_osigevent));
3010 }
3011 #endif
3012
3013 int
3014 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
3015 {
3016
3017 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
3018 &aiocb32_ops));
3019 }
3020
3021 int
3022 freebsd32_aio_writev(struct thread *td, struct freebsd32_aio_writev_args *uap)
3023 {
3024
3025 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITEV,
3026 &aiocb32_ops));
3027 }
3028
3029 int
3030 freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
3031 {
3032
3033 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
3034 &aiocb32_ops));
3035 }
3036
3037 int
3038 freebsd32_aio_waitcomplete(struct thread *td,
3039 struct freebsd32_aio_waitcomplete_args *uap)
3040 {
3041 struct timespec32 ts32;
3042 struct timespec ts, *tsp;
3043 int error;
3044
3045 if (uap->timeout) {
3046 /* Get timespec struct. */
3047 error = copyin(uap->timeout, &ts32, sizeof(ts32));
3048 if (error)
3049 return (error);
3050 CP(ts32, ts, tv_sec);
3051 CP(ts32, ts, tv_nsec);
3052 tsp = &ts;
3053 } else
3054 tsp = NULL;
3055
3056 return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
3057 &aiocb32_ops));
3058 }
3059
3060 int
3061 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
3062 {
3063
3064 return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
3065 &aiocb32_ops));
3066 }
3067
3068 #ifdef COMPAT_FREEBSD6
3069 int
3070 freebsd6_freebsd32_lio_listio(struct thread *td,
3071 struct freebsd6_freebsd32_lio_listio_args *uap)
3072 {
3073 struct aiocb **acb_list;
3074 struct sigevent *sigp, sig;
3075 struct osigevent32 osig;
3076 uint32_t *acb_list32;
3077 int error, i, nent;
3078
3079 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
3080 return (EINVAL);
3081
3082 nent = uap->nent;
3083 if (nent < 0 || nent > max_aio_queue_per_proc)
3084 return (EINVAL);
3085
3086 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
3087 error = copyin(uap->sig, &osig, sizeof(osig));
3088 if (error)
3089 return (error);
3090 error = convert_old_sigevent32(&osig, &sig);
3091 if (error)
3092 return (error);
3093 sigp = &sig;
3094 } else
3095 sigp = NULL;
3096
3097 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
3098 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
3099 if (error) {
3100 free(acb_list32, M_LIO);
3101 return (error);
3102 }
3103 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
3104 for (i = 0; i < nent; i++)
3105 acb_list[i] = PTRIN(acb_list32[i]);
3106 free(acb_list32, M_LIO);
3107
3108 error = kern_lio_listio(td, uap->mode,
3109 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
3110 &aiocb32_ops_osigevent);
3111 free(acb_list, M_LIO);
3112 return (error);
3113 }
3114 #endif
3115
3116 int
3117 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
3118 {
3119 struct aiocb **acb_list;
3120 struct sigevent *sigp, sig;
3121 struct sigevent32 sig32;
3122 uint32_t *acb_list32;
3123 int error, i, nent;
3124
3125 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
3126 return (EINVAL);
3127
3128 nent = uap->nent;
3129 if (nent < 0 || nent > max_aio_queue_per_proc)
3130 return (EINVAL);
3131
3132 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
3133 error = copyin(uap->sig, &sig32, sizeof(sig32));
3134 if (error)
3135 return (error);
3136 error = convert_sigevent32(&sig32, &sig);
3137 if (error)
3138 return (error);
3139 sigp = &sig;
3140 } else
3141 sigp = NULL;
3142
3143 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
3144 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
3145 if (error) {
3146 free(acb_list32, M_LIO);
3147 return (error);
3148 }
3149 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
3150 for (i = 0; i < nent; i++)
3151 acb_list[i] = PTRIN(acb_list32[i]);
3152 free(acb_list32, M_LIO);
3153
3154 error = kern_lio_listio(td, uap->mode,
3155 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
3156 &aiocb32_ops);
3157 free(acb_list, M_LIO);
3158 return (error);
3159 }
3160
3161 #endif
Cache object: 104ff0000c1614746046abd87664ec96
|