FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_aio.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997 John S. Dyson. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. John S. Dyson's name may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
15 * bad that happens because of using this software isn't the responsibility
16 * of the author. This software is distributed AS-IS.
17 */
18
19 /*
20 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
21 */
22
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
29 #include <sys/bio.h>
30 #include <sys/buf.h>
31 #include <sys/capsicum.h>
32 #include <sys/eventhandler.h>
33 #include <sys/sysproto.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/kthread.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/unistd.h>
44 #include <sys/posix4.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/protosw.h>
50 #include <sys/rwlock.h>
51 #include <sys/sema.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/syscall.h>
55 #include <sys/sysctl.h>
56 #include <sys/syslog.h>
57 #include <sys/sx.h>
58 #include <sys/taskqueue.h>
59 #include <sys/vnode.h>
60 #include <sys/conf.h>
61 #include <sys/event.h>
62 #include <sys/mount.h>
63 #include <geom/geom.h>
64
65 #include <machine/atomic.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_extern.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/uma.h>
74 #include <sys/aio.h>
75
76 /*
77 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
78 * overflow. (XXX will be removed soon.)
79 */
80 static u_long jobrefid;
81
82 /*
83 * Counter for aio_fsync.
84 */
85 static uint64_t jobseqno;
86
87 #ifndef MAX_AIO_PER_PROC
88 #define MAX_AIO_PER_PROC 32
89 #endif
90
91 #ifndef MAX_AIO_QUEUE_PER_PROC
92 #define MAX_AIO_QUEUE_PER_PROC 256
93 #endif
94
95 #ifndef MAX_AIO_QUEUE
96 #define MAX_AIO_QUEUE 1024 /* Bigger than MAX_AIO_QUEUE_PER_PROC */
97 #endif
98
99 #ifndef MAX_BUF_AIO
100 #define MAX_BUF_AIO 16
101 #endif
102
103 FEATURE(aio, "Asynchronous I/O");
104 SYSCTL_DECL(_p1003_1b);
105
106 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
107 static MALLOC_DEFINE(M_AIO, "aio", "structures for asynchronous I/O");
108
109 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
110 "Async IO management");
111
112 static int enable_aio_unsafe = 0;
113 SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
114 "Permit asynchronous IO on all file types, not just known-safe types");
115
116 static unsigned int unsafe_warningcnt = 1;
117 SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW,
118 &unsafe_warningcnt, 0,
119 "Warnings that will be triggered upon failed IO requests on unsafe files");
120
121 static int max_aio_procs = MAX_AIO_PROCS;
122 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
123 "Maximum number of kernel processes to use for handling async IO ");
124
125 static int num_aio_procs = 0;
126 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
127 "Number of presently active kernel processes for async IO");
128
129 /*
130 * The code will adjust the actual number of AIO processes towards this
131 * number when it gets a chance.
132 */
133 static int target_aio_procs = TARGET_AIO_PROCS;
134 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
135 0,
136 "Preferred number of ready kernel processes for async IO");
137
138 static int max_queue_count = MAX_AIO_QUEUE;
139 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
140 "Maximum number of aio requests to queue, globally");
141
142 static int num_queue_count = 0;
143 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
144 "Number of queued aio requests");
145
146 static int num_buf_aio = 0;
147 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
148 "Number of aio requests presently handled by the buf subsystem");
149
150 static int num_unmapped_aio = 0;
151 SYSCTL_INT(_vfs_aio, OID_AUTO, num_unmapped_aio, CTLFLAG_RD, &num_unmapped_aio,
152 0,
153 "Number of aio requests presently handled by unmapped I/O buffers");
154
155 /* Number of async I/O processes in the process of being started */
156 /* XXX This should be local to aio_aqueue() */
157 static int num_aio_resv_start = 0;
158
159 static int aiod_lifetime;
160 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
161 "Maximum lifetime for idle aiod");
162
163 static int max_aio_per_proc = MAX_AIO_PER_PROC;
164 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
165 0,
166 "Maximum active aio requests per process");
167
168 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
169 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
170 &max_aio_queue_per_proc, 0,
171 "Maximum queued aio requests per process");
172
173 static int max_buf_aio = MAX_BUF_AIO;
174 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
175 "Maximum buf aio requests per process");
176
177 /*
178 * Though redundant with vfs.aio.max_aio_queue_per_proc, POSIX requires
179 * sysconf(3) to support AIO_LISTIO_MAX, and we implement that with
180 * vfs.aio.aio_listio_max.
181 */
182 SYSCTL_INT(_p1003_1b, CTL_P1003_1B_AIO_LISTIO_MAX, aio_listio_max,
183 CTLFLAG_RD | CTLFLAG_CAPRD, &max_aio_queue_per_proc,
184 0, "Maximum aio requests for a single lio_listio call");
185
186 #ifdef COMPAT_FREEBSD6
187 typedef struct oaiocb {
188 int aio_fildes; /* File descriptor */
189 off_t aio_offset; /* File offset for I/O */
190 volatile void *aio_buf; /* I/O buffer in process space */
191 size_t aio_nbytes; /* Number of bytes for I/O */
192 struct osigevent aio_sigevent; /* Signal to deliver */
193 int aio_lio_opcode; /* LIO opcode */
194 int aio_reqprio; /* Request priority -- ignored */
195 struct __aiocb_private _aiocb_private;
196 } oaiocb_t;
197 #endif
198
199 /*
200 * Below is a key of locks used to protect each member of struct kaiocb
201 * aioliojob and kaioinfo and any backends.
202 *
203 * * - need not protected
204 * a - locked by kaioinfo lock
205 * b - locked by backend lock, the backend lock can be null in some cases,
206 * for example, BIO belongs to this type, in this case, proc lock is
207 * reused.
208 * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
209 */
210
211 /*
212 * If the routine that services an AIO request blocks while running in an
213 * AIO kernel process it can starve other I/O requests. BIO requests
214 * queued via aio_qbio() complete asynchronously and do not use AIO kernel
215 * processes at all. Socket I/O requests use a separate pool of
216 * kprocs and also force non-blocking I/O. Other file I/O requests
217 * use the generic fo_read/fo_write operations which can block. The
218 * fsync and mlock operations can also block while executing. Ideally
219 * none of these requests would block while executing.
220 *
221 * Note that the service routines cannot toggle O_NONBLOCK in the file
222 * structure directly while handling a request due to races with
223 * userland threads.
224 */
225
226 /* jobflags */
227 #define KAIOCB_QUEUEING 0x01
228 #define KAIOCB_CANCELLED 0x02
229 #define KAIOCB_CANCELLING 0x04
230 #define KAIOCB_CHECKSYNC 0x08
231 #define KAIOCB_CLEARED 0x10
232 #define KAIOCB_FINISHED 0x20
233
234 /*
235 * AIO process info
236 */
237 #define AIOP_FREE 0x1 /* proc on free queue */
238
239 struct aioproc {
240 int aioprocflags; /* (c) AIO proc flags */
241 TAILQ_ENTRY(aioproc) list; /* (c) list of processes */
242 struct proc *aioproc; /* (*) the AIO proc */
243 };
244
245 /*
246 * data-structure for lio signal management
247 */
248 struct aioliojob {
249 int lioj_flags; /* (a) listio flags */
250 int lioj_count; /* (a) count of jobs */
251 int lioj_finished_count; /* (a) count of finished jobs */
252 struct sigevent lioj_signal; /* (a) signal on all I/O done */
253 TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */
254 struct knlist klist; /* (a) list of knotes */
255 ksiginfo_t lioj_ksi; /* (a) Realtime signal info */
256 };
257
258 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
259 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
260 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */
261
262 /*
263 * per process aio data structure
264 */
265 struct kaioinfo {
266 struct mtx kaio_mtx; /* the lock to protect this struct */
267 int kaio_flags; /* (a) per process kaio flags */
268 int kaio_active_count; /* (c) number of currently used AIOs */
269 int kaio_count; /* (a) size of AIO queue */
270 int kaio_buffer_count; /* (a) number of bio buffers */
271 TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */
272 TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */
273 TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
274 TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */
275 TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */
276 TAILQ_HEAD(,kaiocb) kaio_syncready; /* (a) second q for aio_fsync */
277 struct task kaio_task; /* (*) task to kick aio processes */
278 struct task kaio_sync_task; /* (*) task to schedule fsync jobs */
279 };
280
281 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx)
282 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx)
283 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f))
284 #define AIO_MTX(ki) (&(ki)->kaio_mtx)
285
286 #define KAIO_RUNDOWN 0x1 /* process is being run down */
287 #define KAIO_WAKEUP 0x2 /* wakeup process when AIO completes */
288
289 /*
290 * Operations used to interact with userland aio control blocks.
291 * Different ABIs provide their own operations.
292 */
293 struct aiocb_ops {
294 int (*aio_copyin)(struct aiocb *ujob, struct kaiocb *kjob, int ty);
295 long (*fetch_status)(struct aiocb *ujob);
296 long (*fetch_error)(struct aiocb *ujob);
297 int (*store_status)(struct aiocb *ujob, long status);
298 int (*store_error)(struct aiocb *ujob, long error);
299 int (*store_kernelinfo)(struct aiocb *ujob, long jobref);
300 int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
301 };
302
303 static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */
304 static struct sema aio_newproc_sem;
305 static struct mtx aio_job_mtx;
306 static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */
307 static struct unrhdr *aiod_unr;
308
309 static void aio_biocleanup(struct bio *bp);
310 void aio_init_aioinfo(struct proc *p);
311 static int aio_onceonly(void);
312 static int aio_free_entry(struct kaiocb *job);
313 static void aio_process_rw(struct kaiocb *job);
314 static void aio_process_sync(struct kaiocb *job);
315 static void aio_process_mlock(struct kaiocb *job);
316 static void aio_schedule_fsync(void *context, int pending);
317 static int aio_newproc(int *);
318 int aio_aqueue(struct thread *td, struct aiocb *ujob,
319 struct aioliojob *lio, int type, struct aiocb_ops *ops);
320 static int aio_queue_file(struct file *fp, struct kaiocb *job);
321 static void aio_biowakeup(struct bio *bp);
322 static void aio_proc_rundown(void *arg, struct proc *p);
323 static void aio_proc_rundown_exec(void *arg, struct proc *p,
324 struct image_params *imgp);
325 static int aio_qbio(struct proc *p, struct kaiocb *job);
326 static void aio_daemon(void *param);
327 static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
328 static bool aio_clear_cancel_function_locked(struct kaiocb *job);
329 static int aio_kick(struct proc *userp);
330 static void aio_kick_nowait(struct proc *userp);
331 static void aio_kick_helper(void *context, int pending);
332 static int filt_aioattach(struct knote *kn);
333 static void filt_aiodetach(struct knote *kn);
334 static int filt_aio(struct knote *kn, long hint);
335 static int filt_lioattach(struct knote *kn);
336 static void filt_liodetach(struct knote *kn);
337 static int filt_lio(struct knote *kn, long hint);
338
339 /*
340 * Zones for:
341 * kaio Per process async io info
342 * aiocb async io jobs
343 * aiolio list io jobs
344 */
345 static uma_zone_t kaio_zone, aiocb_zone, aiolio_zone;
346
347 /* kqueue filters for aio */
348 static struct filterops aio_filtops = {
349 .f_isfd = 0,
350 .f_attach = filt_aioattach,
351 .f_detach = filt_aiodetach,
352 .f_event = filt_aio,
353 };
354 static struct filterops lio_filtops = {
355 .f_isfd = 0,
356 .f_attach = filt_lioattach,
357 .f_detach = filt_liodetach,
358 .f_event = filt_lio
359 };
360
361 static eventhandler_tag exit_tag, exec_tag;
362
363 TASKQUEUE_DEFINE_THREAD(aiod_kick);
364
365 /*
366 * Main operations function for use as a kernel module.
367 */
368 static int
369 aio_modload(struct module *module, int cmd, void *arg)
370 {
371 int error = 0;
372
373 switch (cmd) {
374 case MOD_LOAD:
375 aio_onceonly();
376 break;
377 case MOD_SHUTDOWN:
378 break;
379 default:
380 error = EOPNOTSUPP;
381 break;
382 }
383 return (error);
384 }
385
386 static moduledata_t aio_mod = {
387 "aio",
388 &aio_modload,
389 NULL
390 };
391
392 DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
393 MODULE_VERSION(aio, 1);
394
395 /*
396 * Startup initialization
397 */
398 static int
399 aio_onceonly(void)
400 {
401
402 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
403 EVENTHANDLER_PRI_ANY);
404 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
405 NULL, EVENTHANDLER_PRI_ANY);
406 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
407 kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
408 TAILQ_INIT(&aio_freeproc);
409 sema_init(&aio_newproc_sem, 0, "aio_new_proc");
410 mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
411 TAILQ_INIT(&aio_jobs);
412 aiod_unr = new_unrhdr(1, INT_MAX, NULL);
413 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
414 NULL, NULL, UMA_ALIGN_PTR, 0);
415 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
416 NULL, NULL, UMA_ALIGN_PTR, 0);
417 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
418 NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
419 aiod_lifetime = AIOD_LIFETIME_DEFAULT;
420 jobrefid = 1;
421 p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
422 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
423 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
424
425 return (0);
426 }
427
428 /*
429 * Init the per-process aioinfo structure. The aioinfo limits are set
430 * per-process for user limit (resource) management.
431 */
432 void
433 aio_init_aioinfo(struct proc *p)
434 {
435 struct kaioinfo *ki;
436
437 ki = uma_zalloc(kaio_zone, M_WAITOK);
438 mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
439 ki->kaio_flags = 0;
440 ki->kaio_active_count = 0;
441 ki->kaio_count = 0;
442 ki->kaio_buffer_count = 0;
443 TAILQ_INIT(&ki->kaio_all);
444 TAILQ_INIT(&ki->kaio_done);
445 TAILQ_INIT(&ki->kaio_jobqueue);
446 TAILQ_INIT(&ki->kaio_liojoblist);
447 TAILQ_INIT(&ki->kaio_syncqueue);
448 TAILQ_INIT(&ki->kaio_syncready);
449 TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
450 TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
451 PROC_LOCK(p);
452 if (p->p_aioinfo == NULL) {
453 p->p_aioinfo = ki;
454 PROC_UNLOCK(p);
455 } else {
456 PROC_UNLOCK(p);
457 mtx_destroy(&ki->kaio_mtx);
458 uma_zfree(kaio_zone, ki);
459 }
460
461 while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
462 aio_newproc(NULL);
463 }
464
465 static int
466 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi, bool ext)
467 {
468 struct thread *td;
469 int error;
470
471 error = sigev_findtd(p, sigev, &td);
472 if (error)
473 return (error);
474 if (!KSI_ONQ(ksi)) {
475 ksiginfo_set_sigev(ksi, sigev);
476 ksi->ksi_code = SI_ASYNCIO;
477 ksi->ksi_flags |= ext ? (KSI_EXT | KSI_INS) : 0;
478 tdsendsignal(p, td, ksi->ksi_signo, ksi);
479 }
480 PROC_UNLOCK(p);
481 return (error);
482 }
483
484 /*
485 * Free a job entry. Wait for completion if it is currently active, but don't
486 * delay forever. If we delay, we return a flag that says that we have to
487 * restart the queue scan.
488 */
489 static int
490 aio_free_entry(struct kaiocb *job)
491 {
492 struct kaioinfo *ki;
493 struct aioliojob *lj;
494 struct proc *p;
495
496 p = job->userproc;
497 MPASS(curproc == p);
498 ki = p->p_aioinfo;
499 MPASS(ki != NULL);
500
501 AIO_LOCK_ASSERT(ki, MA_OWNED);
502 MPASS(job->jobflags & KAIOCB_FINISHED);
503
504 atomic_subtract_int(&num_queue_count, 1);
505
506 ki->kaio_count--;
507 MPASS(ki->kaio_count >= 0);
508
509 TAILQ_REMOVE(&ki->kaio_done, job, plist);
510 TAILQ_REMOVE(&ki->kaio_all, job, allist);
511
512 lj = job->lio;
513 if (lj) {
514 lj->lioj_count--;
515 lj->lioj_finished_count--;
516
517 if (lj->lioj_count == 0) {
518 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
519 /* lio is going away, we need to destroy any knotes */
520 knlist_delete(&lj->klist, curthread, 1);
521 PROC_LOCK(p);
522 sigqueue_take(&lj->lioj_ksi);
523 PROC_UNLOCK(p);
524 uma_zfree(aiolio_zone, lj);
525 }
526 }
527
528 /* job is going away, we need to destroy any knotes */
529 knlist_delete(&job->klist, curthread, 1);
530 PROC_LOCK(p);
531 sigqueue_take(&job->ksi);
532 PROC_UNLOCK(p);
533
534 AIO_UNLOCK(ki);
535
536 /*
537 * The thread argument here is used to find the owning process
538 * and is also passed to fo_close() which may pass it to various
539 * places such as devsw close() routines. Because of that, we
540 * need a thread pointer from the process owning the job that is
541 * persistent and won't disappear out from under us or move to
542 * another process.
543 *
544 * Currently, all the callers of this function call it to remove
545 * a kaiocb from the current process' job list either via a
546 * syscall or due to the current process calling exit() or
547 * execve(). Thus, we know that p == curproc. We also know that
548 * curthread can't exit since we are curthread.
549 *
550 * Therefore, we use curthread as the thread to pass to
551 * knlist_delete(). This does mean that it is possible for the
552 * thread pointer at close time to differ from the thread pointer
553 * at open time, but this is already true of file descriptors in
554 * a multithreaded process.
555 */
556 if (job->fd_file)
557 fdrop(job->fd_file, curthread);
558 crfree(job->cred);
559 if (job->uiop != &job->uio)
560 free(job->uiop, M_IOV);
561 uma_zfree(aiocb_zone, job);
562 AIO_LOCK(ki);
563
564 return (0);
565 }
566
567 static void
568 aio_proc_rundown_exec(void *arg, struct proc *p,
569 struct image_params *imgp __unused)
570 {
571 aio_proc_rundown(arg, p);
572 }
573
574 static int
575 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
576 {
577 aio_cancel_fn_t *func;
578 int cancelled;
579
580 AIO_LOCK_ASSERT(ki, MA_OWNED);
581 if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
582 return (0);
583 MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
584 job->jobflags |= KAIOCB_CANCELLED;
585
586 func = job->cancel_fn;
587
588 /*
589 * If there is no cancel routine, just leave the job marked as
590 * cancelled. The job should be in active use by a caller who
591 * should complete it normally or when it fails to install a
592 * cancel routine.
593 */
594 if (func == NULL)
595 return (0);
596
597 /*
598 * Set the CANCELLING flag so that aio_complete() will defer
599 * completions of this job. This prevents the job from being
600 * freed out from under the cancel callback. After the
601 * callback any deferred completion (whether from the callback
602 * or any other source) will be completed.
603 */
604 job->jobflags |= KAIOCB_CANCELLING;
605 AIO_UNLOCK(ki);
606 func(job);
607 AIO_LOCK(ki);
608 job->jobflags &= ~KAIOCB_CANCELLING;
609 if (job->jobflags & KAIOCB_FINISHED) {
610 cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
611 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
612 aio_bio_done_notify(p, job);
613 } else {
614 /*
615 * The cancel callback might have scheduled an
616 * operation to cancel this request, but it is
617 * only counted as cancelled if the request is
618 * cancelled when the callback returns.
619 */
620 cancelled = 0;
621 }
622 return (cancelled);
623 }
624
625 /*
626 * Rundown the jobs for a given process.
627 */
628 static void
629 aio_proc_rundown(void *arg, struct proc *p)
630 {
631 struct kaioinfo *ki;
632 struct aioliojob *lj;
633 struct kaiocb *job, *jobn;
634
635 KASSERT(curthread->td_proc == p,
636 ("%s: called on non-curproc", __func__));
637 ki = p->p_aioinfo;
638 if (ki == NULL)
639 return;
640
641 AIO_LOCK(ki);
642 ki->kaio_flags |= KAIO_RUNDOWN;
643
644 restart:
645
646 /*
647 * Try to cancel all pending requests. This code simulates
648 * aio_cancel on all pending I/O requests.
649 */
650 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
651 aio_cancel_job(p, ki, job);
652 }
653
654 /* Wait for all running I/O to be finished */
655 if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
656 ki->kaio_flags |= KAIO_WAKEUP;
657 msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
658 goto restart;
659 }
660
661 /* Free all completed I/O requests. */
662 while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
663 aio_free_entry(job);
664
665 while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
666 if (lj->lioj_count == 0) {
667 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
668 knlist_delete(&lj->klist, curthread, 1);
669 PROC_LOCK(p);
670 sigqueue_take(&lj->lioj_ksi);
671 PROC_UNLOCK(p);
672 uma_zfree(aiolio_zone, lj);
673 } else {
674 panic("LIO job not cleaned up: C:%d, FC:%d\n",
675 lj->lioj_count, lj->lioj_finished_count);
676 }
677 }
678 AIO_UNLOCK(ki);
679 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
680 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
681 mtx_destroy(&ki->kaio_mtx);
682 uma_zfree(kaio_zone, ki);
683 p->p_aioinfo = NULL;
684 }
685
686 /*
687 * Select a job to run (called by an AIO daemon).
688 */
689 static struct kaiocb *
690 aio_selectjob(struct aioproc *aiop)
691 {
692 struct kaiocb *job;
693 struct kaioinfo *ki;
694 struct proc *userp;
695
696 mtx_assert(&aio_job_mtx, MA_OWNED);
697 restart:
698 TAILQ_FOREACH(job, &aio_jobs, list) {
699 userp = job->userproc;
700 ki = userp->p_aioinfo;
701
702 if (ki->kaio_active_count < max_aio_per_proc) {
703 TAILQ_REMOVE(&aio_jobs, job, list);
704 if (!aio_clear_cancel_function(job))
705 goto restart;
706
707 /* Account for currently active jobs. */
708 ki->kaio_active_count++;
709 break;
710 }
711 }
712 return (job);
713 }
714
715 /*
716 * Move all data to a permanent storage device. This code
717 * simulates the fsync and fdatasync syscalls.
718 */
719 static int
720 aio_fsync_vnode(struct thread *td, struct vnode *vp, int op)
721 {
722 struct mount *mp;
723 vm_object_t obj;
724 int error;
725
726 for (;;) {
727 error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH);
728 if (error != 0)
729 break;
730 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
731 obj = vp->v_object;
732 if (obj != NULL) {
733 VM_OBJECT_WLOCK(obj);
734 vm_object_page_clean(obj, 0, 0, 0);
735 VM_OBJECT_WUNLOCK(obj);
736 }
737 if (op == LIO_DSYNC)
738 error = VOP_FDATASYNC(vp, td);
739 else
740 error = VOP_FSYNC(vp, MNT_WAIT, td);
741
742 VOP_UNLOCK(vp);
743 vn_finished_write(mp);
744 if (error != ERELOOKUP)
745 break;
746 }
747 return (error);
748 }
749
750 /*
751 * The AIO processing activity for LIO_READ/LIO_WRITE. This is the code that
752 * does the I/O request for the non-bio version of the operations. The normal
753 * vn operations are used, and this code should work in all instances for every
754 * type of file, including pipes, sockets, fifos, and regular files.
755 *
756 * XXX I don't think it works well for socket, pipe, and fifo.
757 */
758 static void
759 aio_process_rw(struct kaiocb *job)
760 {
761 struct ucred *td_savedcred;
762 struct thread *td;
763 struct file *fp;
764 ssize_t cnt;
765 long msgsnd_st, msgsnd_end;
766 long msgrcv_st, msgrcv_end;
767 long oublock_st, oublock_end;
768 long inblock_st, inblock_end;
769 int error, opcode;
770
771 KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
772 job->uaiocb.aio_lio_opcode == LIO_READV ||
773 job->uaiocb.aio_lio_opcode == LIO_WRITE ||
774 job->uaiocb.aio_lio_opcode == LIO_WRITEV,
775 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
776
777 aio_switch_vmspace(job);
778 td = curthread;
779 td_savedcred = td->td_ucred;
780 td->td_ucred = job->cred;
781 job->uiop->uio_td = td;
782 fp = job->fd_file;
783
784 opcode = job->uaiocb.aio_lio_opcode;
785 cnt = job->uiop->uio_resid;
786
787 msgrcv_st = td->td_ru.ru_msgrcv;
788 msgsnd_st = td->td_ru.ru_msgsnd;
789 inblock_st = td->td_ru.ru_inblock;
790 oublock_st = td->td_ru.ru_oublock;
791
792 /*
793 * aio_aqueue() acquires a reference to the file that is
794 * released in aio_free_entry().
795 */
796 if (opcode == LIO_READ || opcode == LIO_READV) {
797 if (job->uiop->uio_resid == 0)
798 error = 0;
799 else
800 error = fo_read(fp, job->uiop, fp->f_cred, FOF_OFFSET,
801 td);
802 } else {
803 if (fp->f_type == DTYPE_VNODE)
804 bwillwrite();
805 error = fo_write(fp, job->uiop, fp->f_cred, FOF_OFFSET, td);
806 }
807 msgrcv_end = td->td_ru.ru_msgrcv;
808 msgsnd_end = td->td_ru.ru_msgsnd;
809 inblock_end = td->td_ru.ru_inblock;
810 oublock_end = td->td_ru.ru_oublock;
811
812 job->msgrcv = msgrcv_end - msgrcv_st;
813 job->msgsnd = msgsnd_end - msgsnd_st;
814 job->inblock = inblock_end - inblock_st;
815 job->outblock = oublock_end - oublock_st;
816
817 if (error != 0 && job->uiop->uio_resid != cnt) {
818 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
819 error = 0;
820 if (error == EPIPE && (opcode & LIO_WRITE)) {
821 PROC_LOCK(job->userproc);
822 kern_psignal(job->userproc, SIGPIPE);
823 PROC_UNLOCK(job->userproc);
824 }
825 }
826
827 cnt -= job->uiop->uio_resid;
828 td->td_ucred = td_savedcred;
829 if (error)
830 aio_complete(job, -1, error);
831 else
832 aio_complete(job, cnt, 0);
833 }
834
835 static void
836 aio_process_sync(struct kaiocb *job)
837 {
838 struct thread *td = curthread;
839 struct ucred *td_savedcred = td->td_ucred;
840 struct file *fp = job->fd_file;
841 int error = 0;
842
843 KASSERT(job->uaiocb.aio_lio_opcode & LIO_SYNC,
844 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
845
846 td->td_ucred = job->cred;
847 if (fp->f_vnode != NULL) {
848 error = aio_fsync_vnode(td, fp->f_vnode,
849 job->uaiocb.aio_lio_opcode);
850 }
851 td->td_ucred = td_savedcred;
852 if (error)
853 aio_complete(job, -1, error);
854 else
855 aio_complete(job, 0, 0);
856 }
857
858 static void
859 aio_process_mlock(struct kaiocb *job)
860 {
861 struct aiocb *cb = &job->uaiocb;
862 int error;
863
864 KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
865 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
866
867 aio_switch_vmspace(job);
868 error = kern_mlock(job->userproc, job->cred,
869 __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes);
870 aio_complete(job, error != 0 ? -1 : 0, error);
871 }
872
873 static void
874 aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
875 {
876 struct aioliojob *lj;
877 struct kaioinfo *ki;
878 struct kaiocb *sjob, *sjobn;
879 int lj_done;
880 bool schedule_fsync;
881
882 ki = userp->p_aioinfo;
883 AIO_LOCK_ASSERT(ki, MA_OWNED);
884 lj = job->lio;
885 lj_done = 0;
886 if (lj) {
887 lj->lioj_finished_count++;
888 if (lj->lioj_count == lj->lioj_finished_count)
889 lj_done = 1;
890 }
891 TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
892 MPASS(job->jobflags & KAIOCB_FINISHED);
893
894 if (ki->kaio_flags & KAIO_RUNDOWN)
895 goto notification_done;
896
897 if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
898 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
899 aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi, true);
900
901 KNOTE_LOCKED(&job->klist, 1);
902
903 if (lj_done) {
904 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
905 lj->lioj_flags |= LIOJ_KEVENT_POSTED;
906 KNOTE_LOCKED(&lj->klist, 1);
907 }
908 if ((lj->lioj_flags & (LIOJ_SIGNAL | LIOJ_SIGNAL_POSTED))
909 == LIOJ_SIGNAL &&
910 (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
911 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
912 aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi,
913 true);
914 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
915 }
916 }
917
918 notification_done:
919 if (job->jobflags & KAIOCB_CHECKSYNC) {
920 schedule_fsync = false;
921 TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
922 if (job->fd_file != sjob->fd_file ||
923 job->seqno >= sjob->seqno)
924 continue;
925 if (--sjob->pending > 0)
926 continue;
927 TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list);
928 if (!aio_clear_cancel_function_locked(sjob))
929 continue;
930 TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list);
931 schedule_fsync = true;
932 }
933 if (schedule_fsync)
934 taskqueue_enqueue(taskqueue_aiod_kick,
935 &ki->kaio_sync_task);
936 }
937 if (ki->kaio_flags & KAIO_WAKEUP) {
938 ki->kaio_flags &= ~KAIO_WAKEUP;
939 wakeup(&userp->p_aioinfo);
940 }
941 }
942
943 static void
944 aio_schedule_fsync(void *context, int pending)
945 {
946 struct kaioinfo *ki;
947 struct kaiocb *job;
948
949 ki = context;
950 AIO_LOCK(ki);
951 while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
952 job = TAILQ_FIRST(&ki->kaio_syncready);
953 TAILQ_REMOVE(&ki->kaio_syncready, job, list);
954 AIO_UNLOCK(ki);
955 aio_schedule(job, aio_process_sync);
956 AIO_LOCK(ki);
957 }
958 AIO_UNLOCK(ki);
959 }
960
961 bool
962 aio_cancel_cleared(struct kaiocb *job)
963 {
964
965 /*
966 * The caller should hold the same queue lock held when
967 * aio_clear_cancel_function() was called and set this flag
968 * ensuring this check sees an up-to-date value. However,
969 * there is no way to assert that.
970 */
971 return ((job->jobflags & KAIOCB_CLEARED) != 0);
972 }
973
974 static bool
975 aio_clear_cancel_function_locked(struct kaiocb *job)
976 {
977
978 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
979 MPASS(job->cancel_fn != NULL);
980 if (job->jobflags & KAIOCB_CANCELLING) {
981 job->jobflags |= KAIOCB_CLEARED;
982 return (false);
983 }
984 job->cancel_fn = NULL;
985 return (true);
986 }
987
988 bool
989 aio_clear_cancel_function(struct kaiocb *job)
990 {
991 struct kaioinfo *ki;
992 bool ret;
993
994 ki = job->userproc->p_aioinfo;
995 AIO_LOCK(ki);
996 ret = aio_clear_cancel_function_locked(job);
997 AIO_UNLOCK(ki);
998 return (ret);
999 }
1000
1001 static bool
1002 aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
1003 {
1004
1005 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
1006 if (job->jobflags & KAIOCB_CANCELLED)
1007 return (false);
1008 job->cancel_fn = func;
1009 return (true);
1010 }
1011
1012 bool
1013 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1014 {
1015 struct kaioinfo *ki;
1016 bool ret;
1017
1018 ki = job->userproc->p_aioinfo;
1019 AIO_LOCK(ki);
1020 ret = aio_set_cancel_function_locked(job, func);
1021 AIO_UNLOCK(ki);
1022 return (ret);
1023 }
1024
1025 void
1026 aio_complete(struct kaiocb *job, long status, int error)
1027 {
1028 struct kaioinfo *ki;
1029 struct proc *userp;
1030
1031 job->uaiocb._aiocb_private.error = error;
1032 job->uaiocb._aiocb_private.status = status;
1033
1034 userp = job->userproc;
1035 ki = userp->p_aioinfo;
1036
1037 AIO_LOCK(ki);
1038 KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1039 ("duplicate aio_complete"));
1040 job->jobflags |= KAIOCB_FINISHED;
1041 if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1042 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1043 aio_bio_done_notify(userp, job);
1044 }
1045 AIO_UNLOCK(ki);
1046 }
1047
1048 void
1049 aio_cancel(struct kaiocb *job)
1050 {
1051
1052 aio_complete(job, -1, ECANCELED);
1053 }
1054
1055 void
1056 aio_switch_vmspace(struct kaiocb *job)
1057 {
1058
1059 vmspace_switch_aio(job->userproc->p_vmspace);
1060 }
1061
1062 /*
1063 * The AIO daemon, most of the actual work is done in aio_process_*,
1064 * but the setup (and address space mgmt) is done in this routine.
1065 */
1066 static void
1067 aio_daemon(void *_id)
1068 {
1069 struct kaiocb *job;
1070 struct aioproc *aiop;
1071 struct kaioinfo *ki;
1072 struct proc *p;
1073 struct vmspace *myvm;
1074 struct thread *td = curthread;
1075 int id = (intptr_t)_id;
1076
1077 /*
1078 * Grab an extra reference on the daemon's vmspace so that it
1079 * doesn't get freed by jobs that switch to a different
1080 * vmspace.
1081 */
1082 p = td->td_proc;
1083 myvm = vmspace_acquire_ref(p);
1084
1085 KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1086
1087 /*
1088 * Allocate and ready the aio control info. There is one aiop structure
1089 * per daemon.
1090 */
1091 aiop = malloc(sizeof(*aiop), M_AIO, M_WAITOK);
1092 aiop->aioproc = p;
1093 aiop->aioprocflags = 0;
1094
1095 /*
1096 * Wakeup parent process. (Parent sleeps to keep from blasting away
1097 * and creating too many daemons.)
1098 */
1099 sema_post(&aio_newproc_sem);
1100
1101 mtx_lock(&aio_job_mtx);
1102 for (;;) {
1103 /*
1104 * Take daemon off of free queue
1105 */
1106 if (aiop->aioprocflags & AIOP_FREE) {
1107 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1108 aiop->aioprocflags &= ~AIOP_FREE;
1109 }
1110
1111 /*
1112 * Check for jobs.
1113 */
1114 while ((job = aio_selectjob(aiop)) != NULL) {
1115 mtx_unlock(&aio_job_mtx);
1116
1117 ki = job->userproc->p_aioinfo;
1118 job->handle_fn(job);
1119
1120 mtx_lock(&aio_job_mtx);
1121 /* Decrement the active job count. */
1122 ki->kaio_active_count--;
1123 }
1124
1125 /*
1126 * Disconnect from user address space.
1127 */
1128 if (p->p_vmspace != myvm) {
1129 mtx_unlock(&aio_job_mtx);
1130 vmspace_switch_aio(myvm);
1131 mtx_lock(&aio_job_mtx);
1132 /*
1133 * We have to restart to avoid race, we only sleep if
1134 * no job can be selected.
1135 */
1136 continue;
1137 }
1138
1139 mtx_assert(&aio_job_mtx, MA_OWNED);
1140
1141 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
1142 aiop->aioprocflags |= AIOP_FREE;
1143
1144 /*
1145 * If daemon is inactive for a long time, allow it to exit,
1146 * thereby freeing resources.
1147 */
1148 if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
1149 aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
1150 (aiop->aioprocflags & AIOP_FREE) &&
1151 num_aio_procs > target_aio_procs)
1152 break;
1153 }
1154 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1155 num_aio_procs--;
1156 mtx_unlock(&aio_job_mtx);
1157 free(aiop, M_AIO);
1158 free_unr(aiod_unr, id);
1159 vmspace_free(myvm);
1160
1161 KASSERT(p->p_vmspace == myvm,
1162 ("AIOD: bad vmspace for exiting daemon"));
1163 KASSERT(refcount_load(&myvm->vm_refcnt) > 1,
1164 ("AIOD: bad vm refcnt for exiting daemon: %d",
1165 refcount_load(&myvm->vm_refcnt)));
1166 kproc_exit(0);
1167 }
1168
1169 /*
1170 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1171 * AIO daemon modifies its environment itself.
1172 */
1173 static int
1174 aio_newproc(int *start)
1175 {
1176 int error;
1177 struct proc *p;
1178 int id;
1179
1180 id = alloc_unr(aiod_unr);
1181 error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
1182 RFNOWAIT, 0, "aiod%d", id);
1183 if (error == 0) {
1184 /*
1185 * Wait until daemon is started.
1186 */
1187 sema_wait(&aio_newproc_sem);
1188 mtx_lock(&aio_job_mtx);
1189 num_aio_procs++;
1190 if (start != NULL)
1191 (*start)--;
1192 mtx_unlock(&aio_job_mtx);
1193 } else {
1194 free_unr(aiod_unr, id);
1195 }
1196 return (error);
1197 }
1198
1199 /*
1200 * Try the high-performance, low-overhead bio method for eligible
1201 * VCHR devices. This method doesn't use an aio helper thread, and
1202 * thus has very low overhead.
1203 *
1204 * Assumes that the caller, aio_aqueue(), has incremented the file
1205 * structure's reference count, preventing its deallocation for the
1206 * duration of this call.
1207 */
1208 static int
1209 aio_qbio(struct proc *p, struct kaiocb *job)
1210 {
1211 struct aiocb *cb;
1212 struct file *fp;
1213 struct buf *pbuf;
1214 struct vnode *vp;
1215 struct cdevsw *csw;
1216 struct cdev *dev;
1217 struct kaioinfo *ki;
1218 struct bio **bios = NULL;
1219 off_t offset;
1220 int bio_cmd, error, i, iovcnt, opcode, poff, ref;
1221 vm_prot_t prot;
1222 bool use_unmapped;
1223
1224 cb = &job->uaiocb;
1225 fp = job->fd_file;
1226 opcode = cb->aio_lio_opcode;
1227
1228 if (!(opcode == LIO_WRITE || opcode == LIO_WRITEV ||
1229 opcode == LIO_READ || opcode == LIO_READV))
1230 return (-1);
1231 if (fp == NULL || fp->f_type != DTYPE_VNODE)
1232 return (-1);
1233
1234 vp = fp->f_vnode;
1235 if (vp->v_type != VCHR)
1236 return (-1);
1237 if (vp->v_bufobj.bo_bsize == 0)
1238 return (-1);
1239
1240 bio_cmd = (opcode & LIO_WRITE) ? BIO_WRITE : BIO_READ;
1241 iovcnt = job->uiop->uio_iovcnt;
1242 if (iovcnt > max_buf_aio)
1243 return (-1);
1244 for (i = 0; i < iovcnt; i++) {
1245 if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0)
1246 return (-1);
1247 if (job->uiop->uio_iov[i].iov_len > maxphys) {
1248 error = -1;
1249 return (-1);
1250 }
1251 }
1252 offset = cb->aio_offset;
1253
1254 ref = 0;
1255 csw = devvn_refthread(vp, &dev, &ref);
1256 if (csw == NULL)
1257 return (ENXIO);
1258
1259 if ((csw->d_flags & D_DISK) == 0) {
1260 error = -1;
1261 goto unref;
1262 }
1263 if (job->uiop->uio_resid > dev->si_iosize_max) {
1264 error = -1;
1265 goto unref;
1266 }
1267
1268 ki = p->p_aioinfo;
1269 job->error = 0;
1270
1271 use_unmapped = (dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed;
1272 if (!use_unmapped) {
1273 AIO_LOCK(ki);
1274 if (ki->kaio_buffer_count + iovcnt > max_buf_aio) {
1275 AIO_UNLOCK(ki);
1276 error = EAGAIN;
1277 goto unref;
1278 }
1279 ki->kaio_buffer_count += iovcnt;
1280 AIO_UNLOCK(ki);
1281 }
1282
1283 bios = malloc(sizeof(struct bio *) * iovcnt, M_TEMP, M_WAITOK);
1284 atomic_store_int(&job->nbio, iovcnt);
1285 for (i = 0; i < iovcnt; i++) {
1286 struct vm_page** pages;
1287 struct bio *bp;
1288 void *buf;
1289 size_t nbytes;
1290 int npages;
1291
1292 buf = job->uiop->uio_iov[i].iov_base;
1293 nbytes = job->uiop->uio_iov[i].iov_len;
1294
1295 bios[i] = g_alloc_bio();
1296 bp = bios[i];
1297
1298 poff = (vm_offset_t)buf & PAGE_MASK;
1299 if (use_unmapped) {
1300 pbuf = NULL;
1301 pages = malloc(sizeof(vm_page_t) * (atop(round_page(
1302 nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO);
1303 } else {
1304 pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
1305 BUF_KERNPROC(pbuf);
1306 pages = pbuf->b_pages;
1307 }
1308
1309 bp->bio_length = nbytes;
1310 bp->bio_bcount = nbytes;
1311 bp->bio_done = aio_biowakeup;
1312 bp->bio_offset = offset;
1313 bp->bio_cmd = bio_cmd;
1314 bp->bio_dev = dev;
1315 bp->bio_caller1 = job;
1316 bp->bio_caller2 = pbuf;
1317
1318 prot = VM_PROT_READ;
1319 if (opcode == LIO_READ || opcode == LIO_READV)
1320 prot |= VM_PROT_WRITE; /* Less backwards than it looks */
1321 npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1322 (vm_offset_t)buf, bp->bio_length, prot, pages,
1323 atop(maxphys) + 1);
1324 if (npages < 0) {
1325 if (pbuf != NULL)
1326 uma_zfree(pbuf_zone, pbuf);
1327 else
1328 free(pages, M_TEMP);
1329 error = EFAULT;
1330 g_destroy_bio(bp);
1331 i--;
1332 goto destroy_bios;
1333 }
1334 if (pbuf != NULL) {
1335 pmap_qenter((vm_offset_t)pbuf->b_data, pages, npages);
1336 bp->bio_data = pbuf->b_data + poff;
1337 pbuf->b_npages = npages;
1338 atomic_add_int(&num_buf_aio, 1);
1339 } else {
1340 bp->bio_ma = pages;
1341 bp->bio_ma_n = npages;
1342 bp->bio_ma_offset = poff;
1343 bp->bio_data = unmapped_buf;
1344 bp->bio_flags |= BIO_UNMAPPED;
1345 atomic_add_int(&num_unmapped_aio, 1);
1346 }
1347
1348 offset += nbytes;
1349 }
1350
1351 /* Perform transfer. */
1352 for (i = 0; i < iovcnt; i++)
1353 csw->d_strategy(bios[i]);
1354 free(bios, M_TEMP);
1355
1356 dev_relthread(dev, ref);
1357 return (0);
1358
1359 destroy_bios:
1360 for (; i >= 0; i--)
1361 aio_biocleanup(bios[i]);
1362 free(bios, M_TEMP);
1363 unref:
1364 dev_relthread(dev, ref);
1365 return (error);
1366 }
1367
1368 #ifdef COMPAT_FREEBSD6
1369 static int
1370 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
1371 {
1372
1373 /*
1374 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
1375 * supported by AIO with the old sigevent structure.
1376 */
1377 nsig->sigev_notify = osig->sigev_notify;
1378 switch (nsig->sigev_notify) {
1379 case SIGEV_NONE:
1380 break;
1381 case SIGEV_SIGNAL:
1382 nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
1383 break;
1384 case SIGEV_KEVENT:
1385 nsig->sigev_notify_kqueue =
1386 osig->__sigev_u.__sigev_notify_kqueue;
1387 nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
1388 break;
1389 default:
1390 return (EINVAL);
1391 }
1392 return (0);
1393 }
1394
1395 static int
1396 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob,
1397 int type __unused)
1398 {
1399 struct oaiocb *ojob;
1400 struct aiocb *kcb = &kjob->uaiocb;
1401 int error;
1402
1403 bzero(kcb, sizeof(struct aiocb));
1404 error = copyin(ujob, kcb, sizeof(struct oaiocb));
1405 if (error)
1406 return (error);
1407 /* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */
1408 ojob = (struct oaiocb *)kcb;
1409 return (convert_old_sigevent(&ojob->aio_sigevent, &kcb->aio_sigevent));
1410 }
1411 #endif
1412
1413 static int
1414 aiocb_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type)
1415 {
1416 struct aiocb *kcb = &kjob->uaiocb;
1417 int error;
1418
1419 error = copyin(ujob, kcb, sizeof(struct aiocb));
1420 if (error)
1421 return (error);
1422 if (type == LIO_NOP)
1423 type = kcb->aio_lio_opcode;
1424 if (type & LIO_VECTORED) {
1425 /* malloc a uio and copy in the iovec */
1426 error = copyinuio(__DEVOLATILE(struct iovec*, kcb->aio_iov),
1427 kcb->aio_iovcnt, &kjob->uiop);
1428 }
1429
1430 return (error);
1431 }
1432
1433 static long
1434 aiocb_fetch_status(struct aiocb *ujob)
1435 {
1436
1437 return (fuword(&ujob->_aiocb_private.status));
1438 }
1439
1440 static long
1441 aiocb_fetch_error(struct aiocb *ujob)
1442 {
1443
1444 return (fuword(&ujob->_aiocb_private.error));
1445 }
1446
1447 static int
1448 aiocb_store_status(struct aiocb *ujob, long status)
1449 {
1450
1451 return (suword(&ujob->_aiocb_private.status, status));
1452 }
1453
1454 static int
1455 aiocb_store_error(struct aiocb *ujob, long error)
1456 {
1457
1458 return (suword(&ujob->_aiocb_private.error, error));
1459 }
1460
1461 static int
1462 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
1463 {
1464
1465 return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
1466 }
1467
1468 static int
1469 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
1470 {
1471
1472 return (suword(ujobp, (long)ujob));
1473 }
1474
1475 static struct aiocb_ops aiocb_ops = {
1476 .aio_copyin = aiocb_copyin,
1477 .fetch_status = aiocb_fetch_status,
1478 .fetch_error = aiocb_fetch_error,
1479 .store_status = aiocb_store_status,
1480 .store_error = aiocb_store_error,
1481 .store_kernelinfo = aiocb_store_kernelinfo,
1482 .store_aiocb = aiocb_store_aiocb,
1483 };
1484
1485 #ifdef COMPAT_FREEBSD6
1486 static struct aiocb_ops aiocb_ops_osigevent = {
1487 .aio_copyin = aiocb_copyin_old_sigevent,
1488 .fetch_status = aiocb_fetch_status,
1489 .fetch_error = aiocb_fetch_error,
1490 .store_status = aiocb_store_status,
1491 .store_error = aiocb_store_error,
1492 .store_kernelinfo = aiocb_store_kernelinfo,
1493 .store_aiocb = aiocb_store_aiocb,
1494 };
1495 #endif
1496
1497 /*
1498 * Queue a new AIO request. Choosing either the threaded or direct bio VCHR
1499 * technique is done in this code.
1500 */
1501 int
1502 aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
1503 int type, struct aiocb_ops *ops)
1504 {
1505 struct proc *p = td->td_proc;
1506 struct file *fp = NULL;
1507 struct kaiocb *job;
1508 struct kaioinfo *ki;
1509 struct kevent kev;
1510 int opcode;
1511 int error;
1512 int fd, kqfd;
1513 int jid;
1514 u_short evflags;
1515
1516 if (p->p_aioinfo == NULL)
1517 aio_init_aioinfo(p);
1518
1519 ki = p->p_aioinfo;
1520
1521 ops->store_status(ujob, -1);
1522 ops->store_error(ujob, 0);
1523 ops->store_kernelinfo(ujob, -1);
1524
1525 if (num_queue_count >= max_queue_count ||
1526 ki->kaio_count >= max_aio_queue_per_proc) {
1527 error = EAGAIN;
1528 goto err1;
1529 }
1530
1531 job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1532 knlist_init_mtx(&job->klist, AIO_MTX(ki));
1533
1534 error = ops->aio_copyin(ujob, job, type);
1535 if (error)
1536 goto err2;
1537
1538 if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
1539 error = EINVAL;
1540 goto err2;
1541 }
1542
1543 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
1544 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
1545 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
1546 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
1547 error = EINVAL;
1548 goto err2;
1549 }
1550
1551 if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
1552 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
1553 !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
1554 error = EINVAL;
1555 goto err2;
1556 }
1557
1558 /* Get the opcode. */
1559 if (type == LIO_NOP) {
1560 switch (job->uaiocb.aio_lio_opcode) {
1561 case LIO_WRITE:
1562 case LIO_WRITEV:
1563 case LIO_NOP:
1564 case LIO_READ:
1565 case LIO_READV:
1566 opcode = job->uaiocb.aio_lio_opcode;
1567 break;
1568 default:
1569 error = EINVAL;
1570 goto err2;
1571 }
1572 } else
1573 opcode = job->uaiocb.aio_lio_opcode = type;
1574
1575 ksiginfo_init(&job->ksi);
1576
1577 /* Save userspace address of the job info. */
1578 job->ujob = ujob;
1579
1580 /*
1581 * Validate the opcode and fetch the file object for the specified
1582 * file descriptor.
1583 *
1584 * XXXRW: Moved the opcode validation up here so that we don't
1585 * retrieve a file descriptor without knowing what the capabiltity
1586 * should be.
1587 */
1588 fd = job->uaiocb.aio_fildes;
1589 switch (opcode) {
1590 case LIO_WRITE:
1591 case LIO_WRITEV:
1592 error = fget_write(td, fd, &cap_pwrite_rights, &fp);
1593 break;
1594 case LIO_READ:
1595 case LIO_READV:
1596 error = fget_read(td, fd, &cap_pread_rights, &fp);
1597 break;
1598 case LIO_SYNC:
1599 case LIO_DSYNC:
1600 error = fget(td, fd, &cap_fsync_rights, &fp);
1601 break;
1602 case LIO_MLOCK:
1603 break;
1604 case LIO_NOP:
1605 error = fget(td, fd, &cap_no_rights, &fp);
1606 break;
1607 default:
1608 error = EINVAL;
1609 }
1610 if (error)
1611 goto err3;
1612
1613 if ((opcode & LIO_SYNC) && fp->f_vnode == NULL) {
1614 error = EINVAL;
1615 goto err3;
1616 }
1617
1618 if ((opcode == LIO_READ || opcode == LIO_READV ||
1619 opcode == LIO_WRITE || opcode == LIO_WRITEV) &&
1620 job->uaiocb.aio_offset < 0 &&
1621 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) {
1622 error = EINVAL;
1623 goto err3;
1624 }
1625
1626 if (fp != NULL && fp->f_ops == &path_fileops) {
1627 error = EBADF;
1628 goto err3;
1629 }
1630
1631 job->fd_file = fp;
1632
1633 mtx_lock(&aio_job_mtx);
1634 jid = jobrefid++;
1635 job->seqno = jobseqno++;
1636 mtx_unlock(&aio_job_mtx);
1637 error = ops->store_kernelinfo(ujob, jid);
1638 if (error) {
1639 error = EINVAL;
1640 goto err3;
1641 }
1642 job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
1643
1644 if (opcode == LIO_NOP) {
1645 fdrop(fp, td);
1646 MPASS(job->uiop == &job->uio || job->uiop == NULL);
1647 uma_zfree(aiocb_zone, job);
1648 return (0);
1649 }
1650
1651 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1652 goto no_kqueue;
1653 evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1654 if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1655 error = EINVAL;
1656 goto err3;
1657 }
1658 kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
1659 memset(&kev, 0, sizeof(kev));
1660 kev.ident = (uintptr_t)job->ujob;
1661 kev.filter = EVFILT_AIO;
1662 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
1663 kev.data = (intptr_t)job;
1664 kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1665 error = kqfd_register(kqfd, &kev, td, M_WAITOK);
1666 if (error)
1667 goto err3;
1668
1669 no_kqueue:
1670
1671 ops->store_error(ujob, EINPROGRESS);
1672 job->uaiocb._aiocb_private.error = EINPROGRESS;
1673 job->userproc = p;
1674 job->cred = crhold(td->td_ucred);
1675 job->jobflags = KAIOCB_QUEUEING;
1676 job->lio = lj;
1677
1678 if (opcode & LIO_VECTORED) {
1679 /* Use the uio copied in by aio_copyin */
1680 MPASS(job->uiop != &job->uio && job->uiop != NULL);
1681 } else {
1682 /* Setup the inline uio */
1683 job->iov[0].iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf;
1684 job->iov[0].iov_len = job->uaiocb.aio_nbytes;
1685 job->uio.uio_iov = job->iov;
1686 job->uio.uio_iovcnt = 1;
1687 job->uio.uio_resid = job->uaiocb.aio_nbytes;
1688 job->uio.uio_segflg = UIO_USERSPACE;
1689 job->uiop = &job->uio;
1690 }
1691 switch (opcode & (LIO_READ | LIO_WRITE)) {
1692 case LIO_READ:
1693 job->uiop->uio_rw = UIO_READ;
1694 break;
1695 case LIO_WRITE:
1696 job->uiop->uio_rw = UIO_WRITE;
1697 break;
1698 }
1699 job->uiop->uio_offset = job->uaiocb.aio_offset;
1700 job->uiop->uio_td = td;
1701
1702 if (opcode == LIO_MLOCK) {
1703 aio_schedule(job, aio_process_mlock);
1704 error = 0;
1705 } else if (fp->f_ops->fo_aio_queue == NULL)
1706 error = aio_queue_file(fp, job);
1707 else
1708 error = fo_aio_queue(fp, job);
1709 if (error)
1710 goto err4;
1711
1712 AIO_LOCK(ki);
1713 job->jobflags &= ~KAIOCB_QUEUEING;
1714 TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1715 ki->kaio_count++;
1716 if (lj)
1717 lj->lioj_count++;
1718 atomic_add_int(&num_queue_count, 1);
1719 if (job->jobflags & KAIOCB_FINISHED) {
1720 /*
1721 * The queue callback completed the request synchronously.
1722 * The bulk of the completion is deferred in that case
1723 * until this point.
1724 */
1725 aio_bio_done_notify(p, job);
1726 } else
1727 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1728 AIO_UNLOCK(ki);
1729 return (0);
1730
1731 err4:
1732 crfree(job->cred);
1733 err3:
1734 if (fp)
1735 fdrop(fp, td);
1736 knlist_delete(&job->klist, curthread, 0);
1737 err2:
1738 if (job->uiop != &job->uio)
1739 free(job->uiop, M_IOV);
1740 uma_zfree(aiocb_zone, job);
1741 err1:
1742 ops->store_error(ujob, error);
1743 return (error);
1744 }
1745
1746 static void
1747 aio_cancel_daemon_job(struct kaiocb *job)
1748 {
1749
1750 mtx_lock(&aio_job_mtx);
1751 if (!aio_cancel_cleared(job))
1752 TAILQ_REMOVE(&aio_jobs, job, list);
1753 mtx_unlock(&aio_job_mtx);
1754 aio_cancel(job);
1755 }
1756
1757 void
1758 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1759 {
1760
1761 mtx_lock(&aio_job_mtx);
1762 if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1763 mtx_unlock(&aio_job_mtx);
1764 aio_cancel(job);
1765 return;
1766 }
1767 job->handle_fn = func;
1768 TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1769 aio_kick_nowait(job->userproc);
1770 mtx_unlock(&aio_job_mtx);
1771 }
1772
1773 static void
1774 aio_cancel_sync(struct kaiocb *job)
1775 {
1776 struct kaioinfo *ki;
1777
1778 ki = job->userproc->p_aioinfo;
1779 AIO_LOCK(ki);
1780 if (!aio_cancel_cleared(job))
1781 TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1782 AIO_UNLOCK(ki);
1783 aio_cancel(job);
1784 }
1785
1786 int
1787 aio_queue_file(struct file *fp, struct kaiocb *job)
1788 {
1789 struct kaioinfo *ki;
1790 struct kaiocb *job2;
1791 struct vnode *vp;
1792 struct mount *mp;
1793 int error;
1794 bool safe;
1795
1796 ki = job->userproc->p_aioinfo;
1797 error = aio_qbio(job->userproc, job);
1798 if (error >= 0)
1799 return (error);
1800 safe = false;
1801 if (fp->f_type == DTYPE_VNODE) {
1802 vp = fp->f_vnode;
1803 if (vp->v_type == VREG || vp->v_type == VDIR) {
1804 mp = fp->f_vnode->v_mount;
1805 if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0)
1806 safe = true;
1807 }
1808 }
1809 if (!(safe || enable_aio_unsafe)) {
1810 counted_warning(&unsafe_warningcnt,
1811 "is attempting to use unsafe AIO requests");
1812 return (EOPNOTSUPP);
1813 }
1814
1815 if (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) {
1816 aio_schedule(job, aio_process_rw);
1817 error = 0;
1818 } else if (job->uaiocb.aio_lio_opcode & LIO_SYNC) {
1819 AIO_LOCK(ki);
1820 TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
1821 if (job2->fd_file == job->fd_file &&
1822 ((job2->uaiocb.aio_lio_opcode & LIO_SYNC) == 0) &&
1823 job2->seqno < job->seqno) {
1824 job2->jobflags |= KAIOCB_CHECKSYNC;
1825 job->pending++;
1826 }
1827 }
1828 if (job->pending != 0) {
1829 if (!aio_set_cancel_function_locked(job,
1830 aio_cancel_sync)) {
1831 AIO_UNLOCK(ki);
1832 aio_cancel(job);
1833 return (0);
1834 }
1835 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1836 AIO_UNLOCK(ki);
1837 return (0);
1838 }
1839 AIO_UNLOCK(ki);
1840 aio_schedule(job, aio_process_sync);
1841 error = 0;
1842 } else {
1843 error = EINVAL;
1844 }
1845 return (error);
1846 }
1847
1848 static void
1849 aio_kick_nowait(struct proc *userp)
1850 {
1851 struct kaioinfo *ki = userp->p_aioinfo;
1852 struct aioproc *aiop;
1853
1854 mtx_assert(&aio_job_mtx, MA_OWNED);
1855 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1856 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1857 aiop->aioprocflags &= ~AIOP_FREE;
1858 wakeup(aiop->aioproc);
1859 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1860 ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1861 taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
1862 }
1863 }
1864
1865 static int
1866 aio_kick(struct proc *userp)
1867 {
1868 struct kaioinfo *ki = userp->p_aioinfo;
1869 struct aioproc *aiop;
1870 int error, ret = 0;
1871
1872 mtx_assert(&aio_job_mtx, MA_OWNED);
1873 retryproc:
1874 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1875 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1876 aiop->aioprocflags &= ~AIOP_FREE;
1877 wakeup(aiop->aioproc);
1878 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1879 ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1880 num_aio_resv_start++;
1881 mtx_unlock(&aio_job_mtx);
1882 error = aio_newproc(&num_aio_resv_start);
1883 mtx_lock(&aio_job_mtx);
1884 if (error) {
1885 num_aio_resv_start--;
1886 goto retryproc;
1887 }
1888 } else {
1889 ret = -1;
1890 }
1891 return (ret);
1892 }
1893
1894 static void
1895 aio_kick_helper(void *context, int pending)
1896 {
1897 struct proc *userp = context;
1898
1899 mtx_lock(&aio_job_mtx);
1900 while (--pending >= 0) {
1901 if (aio_kick(userp))
1902 break;
1903 }
1904 mtx_unlock(&aio_job_mtx);
1905 }
1906
1907 /*
1908 * Support the aio_return system call, as a side-effect, kernel resources are
1909 * released.
1910 */
1911 static int
1912 kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1913 {
1914 struct proc *p = td->td_proc;
1915 struct kaiocb *job;
1916 struct kaioinfo *ki;
1917 long status, error;
1918
1919 ki = p->p_aioinfo;
1920 if (ki == NULL)
1921 return (EINVAL);
1922 AIO_LOCK(ki);
1923 TAILQ_FOREACH(job, &ki->kaio_done, plist) {
1924 if (job->ujob == ujob)
1925 break;
1926 }
1927 if (job != NULL) {
1928 MPASS(job->jobflags & KAIOCB_FINISHED);
1929 status = job->uaiocb._aiocb_private.status;
1930 error = job->uaiocb._aiocb_private.error;
1931 td->td_retval[0] = status;
1932 td->td_ru.ru_oublock += job->outblock;
1933 td->td_ru.ru_inblock += job->inblock;
1934 td->td_ru.ru_msgsnd += job->msgsnd;
1935 td->td_ru.ru_msgrcv += job->msgrcv;
1936 aio_free_entry(job);
1937 AIO_UNLOCK(ki);
1938 ops->store_error(ujob, error);
1939 ops->store_status(ujob, status);
1940 } else {
1941 error = EINVAL;
1942 AIO_UNLOCK(ki);
1943 }
1944 return (error);
1945 }
1946
1947 int
1948 sys_aio_return(struct thread *td, struct aio_return_args *uap)
1949 {
1950
1951 return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
1952 }
1953
1954 /*
1955 * Allow a process to wakeup when any of the I/O requests are completed.
1956 */
1957 static int
1958 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
1959 struct timespec *ts)
1960 {
1961 struct proc *p = td->td_proc;
1962 struct timeval atv;
1963 struct kaioinfo *ki;
1964 struct kaiocb *firstjob, *job;
1965 int error, i, timo;
1966
1967 timo = 0;
1968 if (ts) {
1969 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
1970 return (EINVAL);
1971
1972 TIMESPEC_TO_TIMEVAL(&atv, ts);
1973 if (itimerfix(&atv))
1974 return (EINVAL);
1975 timo = tvtohz(&atv);
1976 }
1977
1978 ki = p->p_aioinfo;
1979 if (ki == NULL)
1980 return (EAGAIN);
1981
1982 if (njoblist == 0)
1983 return (0);
1984
1985 AIO_LOCK(ki);
1986 for (;;) {
1987 firstjob = NULL;
1988 error = 0;
1989 TAILQ_FOREACH(job, &ki->kaio_all, allist) {
1990 for (i = 0; i < njoblist; i++) {
1991 if (job->ujob == ujoblist[i]) {
1992 if (firstjob == NULL)
1993 firstjob = job;
1994 if (job->jobflags & KAIOCB_FINISHED)
1995 goto RETURN;
1996 }
1997 }
1998 }
1999 /* All tasks were finished. */
2000 if (firstjob == NULL)
2001 break;
2002
2003 ki->kaio_flags |= KAIO_WAKEUP;
2004 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2005 "aiospn", timo);
2006 if (error == ERESTART)
2007 error = EINTR;
2008 if (error)
2009 break;
2010 }
2011 RETURN:
2012 AIO_UNLOCK(ki);
2013 return (error);
2014 }
2015
2016 int
2017 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
2018 {
2019 struct timespec ts, *tsp;
2020 struct aiocb **ujoblist;
2021 int error;
2022
2023 if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
2024 return (EINVAL);
2025
2026 if (uap->timeout) {
2027 /* Get timespec struct. */
2028 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
2029 return (error);
2030 tsp = &ts;
2031 } else
2032 tsp = NULL;
2033
2034 ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIO, M_WAITOK);
2035 error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
2036 if (error == 0)
2037 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2038 free(ujoblist, M_AIO);
2039 return (error);
2040 }
2041
2042 /*
2043 * aio_cancel cancels any non-bio aio operations not currently in progress.
2044 */
2045 int
2046 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
2047 {
2048 struct proc *p = td->td_proc;
2049 struct kaioinfo *ki;
2050 struct kaiocb *job, *jobn;
2051 struct file *fp;
2052 int error;
2053 int cancelled = 0;
2054 int notcancelled = 0;
2055 struct vnode *vp;
2056
2057 /* Lookup file object. */
2058 error = fget(td, uap->fd, &cap_no_rights, &fp);
2059 if (error)
2060 return (error);
2061
2062 ki = p->p_aioinfo;
2063 if (ki == NULL)
2064 goto done;
2065
2066 if (fp->f_type == DTYPE_VNODE) {
2067 vp = fp->f_vnode;
2068 if (vn_isdisk(vp)) {
2069 fdrop(fp, td);
2070 td->td_retval[0] = AIO_NOTCANCELED;
2071 return (0);
2072 }
2073 }
2074
2075 AIO_LOCK(ki);
2076 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
2077 if ((uap->fd == job->uaiocb.aio_fildes) &&
2078 ((uap->aiocbp == NULL) ||
2079 (uap->aiocbp == job->ujob))) {
2080 if (aio_cancel_job(p, ki, job)) {
2081 cancelled++;
2082 } else {
2083 notcancelled++;
2084 }
2085 if (uap->aiocbp != NULL)
2086 break;
2087 }
2088 }
2089 AIO_UNLOCK(ki);
2090
2091 done:
2092 fdrop(fp, td);
2093
2094 if (uap->aiocbp != NULL) {
2095 if (cancelled) {
2096 td->td_retval[0] = AIO_CANCELED;
2097 return (0);
2098 }
2099 }
2100
2101 if (notcancelled) {
2102 td->td_retval[0] = AIO_NOTCANCELED;
2103 return (0);
2104 }
2105
2106 if (cancelled) {
2107 td->td_retval[0] = AIO_CANCELED;
2108 return (0);
2109 }
2110
2111 td->td_retval[0] = AIO_ALLDONE;
2112
2113 return (0);
2114 }
2115
2116 /*
2117 * aio_error is implemented in the kernel level for compatibility purposes
2118 * only. For a user mode async implementation, it would be best to do it in
2119 * a userland subroutine.
2120 */
2121 static int
2122 kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2123 {
2124 struct proc *p = td->td_proc;
2125 struct kaiocb *job;
2126 struct kaioinfo *ki;
2127 int status;
2128
2129 ki = p->p_aioinfo;
2130 if (ki == NULL) {
2131 td->td_retval[0] = EINVAL;
2132 return (0);
2133 }
2134
2135 AIO_LOCK(ki);
2136 TAILQ_FOREACH(job, &ki->kaio_all, allist) {
2137 if (job->ujob == ujob) {
2138 if (job->jobflags & KAIOCB_FINISHED)
2139 td->td_retval[0] =
2140 job->uaiocb._aiocb_private.error;
2141 else
2142 td->td_retval[0] = EINPROGRESS;
2143 AIO_UNLOCK(ki);
2144 return (0);
2145 }
2146 }
2147 AIO_UNLOCK(ki);
2148
2149 /*
2150 * Hack for failure of aio_aqueue.
2151 */
2152 status = ops->fetch_status(ujob);
2153 if (status == -1) {
2154 td->td_retval[0] = ops->fetch_error(ujob);
2155 return (0);
2156 }
2157
2158 td->td_retval[0] = EINVAL;
2159 return (0);
2160 }
2161
2162 int
2163 sys_aio_error(struct thread *td, struct aio_error_args *uap)
2164 {
2165
2166 return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
2167 }
2168
2169 /* syscall - asynchronous read from a file (REALTIME) */
2170 #ifdef COMPAT_FREEBSD6
2171 int
2172 freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
2173 {
2174
2175 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2176 &aiocb_ops_osigevent));
2177 }
2178 #endif
2179
2180 int
2181 sys_aio_read(struct thread *td, struct aio_read_args *uap)
2182 {
2183
2184 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2185 }
2186
2187 int
2188 sys_aio_readv(struct thread *td, struct aio_readv_args *uap)
2189 {
2190
2191 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READV, &aiocb_ops));
2192 }
2193
2194 /* syscall - asynchronous write to a file (REALTIME) */
2195 #ifdef COMPAT_FREEBSD6
2196 int
2197 freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
2198 {
2199
2200 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2201 &aiocb_ops_osigevent));
2202 }
2203 #endif
2204
2205 int
2206 sys_aio_write(struct thread *td, struct aio_write_args *uap)
2207 {
2208
2209 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
2210 }
2211
2212 int
2213 sys_aio_writev(struct thread *td, struct aio_writev_args *uap)
2214 {
2215
2216 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITEV, &aiocb_ops));
2217 }
2218
2219 int
2220 sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
2221 {
2222
2223 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
2224 }
2225
2226 static int
2227 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
2228 struct aiocb **acb_list, int nent, struct sigevent *sig,
2229 struct aiocb_ops *ops)
2230 {
2231 struct proc *p = td->td_proc;
2232 struct aiocb *job;
2233 struct kaioinfo *ki;
2234 struct aioliojob *lj;
2235 struct kevent kev;
2236 int error;
2237 int nagain, nerror;
2238 int i;
2239
2240 if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2241 return (EINVAL);
2242
2243 if (nent < 0 || nent > max_aio_queue_per_proc)
2244 return (EINVAL);
2245
2246 if (p->p_aioinfo == NULL)
2247 aio_init_aioinfo(p);
2248
2249 ki = p->p_aioinfo;
2250
2251 lj = uma_zalloc(aiolio_zone, M_WAITOK);
2252 lj->lioj_flags = 0;
2253 lj->lioj_count = 0;
2254 lj->lioj_finished_count = 0;
2255 lj->lioj_signal.sigev_notify = SIGEV_NONE;
2256 knlist_init_mtx(&lj->klist, AIO_MTX(ki));
2257 ksiginfo_init(&lj->lioj_ksi);
2258
2259 /*
2260 * Setup signal.
2261 */
2262 if (sig && (mode == LIO_NOWAIT)) {
2263 bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
2264 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2265 /* Assume only new style KEVENT */
2266 memset(&kev, 0, sizeof(kev));
2267 kev.filter = EVFILT_LIO;
2268 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
2269 kev.ident = (uintptr_t)uacb_list; /* something unique */
2270 kev.data = (intptr_t)lj;
2271 /* pass user defined sigval data */
2272 kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
2273 error = kqfd_register(
2274 lj->lioj_signal.sigev_notify_kqueue, &kev, td,
2275 M_WAITOK);
2276 if (error) {
2277 uma_zfree(aiolio_zone, lj);
2278 return (error);
2279 }
2280 } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
2281 ;
2282 } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2283 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
2284 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
2285 uma_zfree(aiolio_zone, lj);
2286 return EINVAL;
2287 }
2288 lj->lioj_flags |= LIOJ_SIGNAL;
2289 } else {
2290 uma_zfree(aiolio_zone, lj);
2291 return EINVAL;
2292 }
2293 }
2294
2295 AIO_LOCK(ki);
2296 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
2297 /*
2298 * Add extra aiocb count to avoid the lio to be freed
2299 * by other threads doing aio_waitcomplete or aio_return,
2300 * and prevent event from being sent until we have queued
2301 * all tasks.
2302 */
2303 lj->lioj_count = 1;
2304 AIO_UNLOCK(ki);
2305
2306 /*
2307 * Get pointers to the list of I/O requests.
2308 */
2309 nagain = 0;
2310 nerror = 0;
2311 for (i = 0; i < nent; i++) {
2312 job = acb_list[i];
2313 if (job != NULL) {
2314 error = aio_aqueue(td, job, lj, LIO_NOP, ops);
2315 if (error == EAGAIN)
2316 nagain++;
2317 else if (error != 0)
2318 nerror++;
2319 }
2320 }
2321
2322 error = 0;
2323 AIO_LOCK(ki);
2324 if (mode == LIO_WAIT) {
2325 while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2326 ki->kaio_flags |= KAIO_WAKEUP;
2327 error = msleep(&p->p_aioinfo, AIO_MTX(ki),
2328 PRIBIO | PCATCH, "aiospn", 0);
2329 if (error == ERESTART)
2330 error = EINTR;
2331 if (error)
2332 break;
2333 }
2334 } else {
2335 if (lj->lioj_count - 1 == lj->lioj_finished_count) {
2336 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2337 lj->lioj_flags |= LIOJ_KEVENT_POSTED;
2338 KNOTE_LOCKED(&lj->klist, 1);
2339 }
2340 if ((lj->lioj_flags & (LIOJ_SIGNAL |
2341 LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL &&
2342 (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2343 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
2344 aio_sendsig(p, &lj->lioj_signal, &lj->lioj_ksi,
2345 lj->lioj_count != 1);
2346 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2347 }
2348 }
2349 }
2350 lj->lioj_count--;
2351 if (lj->lioj_count == 0) {
2352 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
2353 knlist_delete(&lj->klist, curthread, 1);
2354 PROC_LOCK(p);
2355 sigqueue_take(&lj->lioj_ksi);
2356 PROC_UNLOCK(p);
2357 AIO_UNLOCK(ki);
2358 uma_zfree(aiolio_zone, lj);
2359 } else
2360 AIO_UNLOCK(ki);
2361
2362 if (nerror)
2363 return (EIO);
2364 else if (nagain)
2365 return (EAGAIN);
2366 else
2367 return (error);
2368 }
2369
2370 /* syscall - list directed I/O (REALTIME) */
2371 #ifdef COMPAT_FREEBSD6
2372 int
2373 freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
2374 {
2375 struct aiocb **acb_list;
2376 struct sigevent *sigp, sig;
2377 struct osigevent osig;
2378 int error, nent;
2379
2380 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2381 return (EINVAL);
2382
2383 nent = uap->nent;
2384 if (nent < 0 || nent > max_aio_queue_per_proc)
2385 return (EINVAL);
2386
2387 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2388 error = copyin(uap->sig, &osig, sizeof(osig));
2389 if (error)
2390 return (error);
2391 error = convert_old_sigevent(&osig, &sig);
2392 if (error)
2393 return (error);
2394 sigp = &sig;
2395 } else
2396 sigp = NULL;
2397
2398 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2399 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2400 if (error == 0)
2401 error = kern_lio_listio(td, uap->mode,
2402 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2403 &aiocb_ops_osigevent);
2404 free(acb_list, M_LIO);
2405 return (error);
2406 }
2407 #endif
2408
2409 /* syscall - list directed I/O (REALTIME) */
2410 int
2411 sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
2412 {
2413 struct aiocb **acb_list;
2414 struct sigevent *sigp, sig;
2415 int error, nent;
2416
2417 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2418 return (EINVAL);
2419
2420 nent = uap->nent;
2421 if (nent < 0 || nent > max_aio_queue_per_proc)
2422 return (EINVAL);
2423
2424 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2425 error = copyin(uap->sig, &sig, sizeof(sig));
2426 if (error)
2427 return (error);
2428 sigp = &sig;
2429 } else
2430 sigp = NULL;
2431
2432 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2433 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2434 if (error == 0)
2435 error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
2436 nent, sigp, &aiocb_ops);
2437 free(acb_list, M_LIO);
2438 return (error);
2439 }
2440
2441 static void
2442 aio_biocleanup(struct bio *bp)
2443 {
2444 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2445 struct kaioinfo *ki;
2446 struct buf *pbuf = (struct buf *)bp->bio_caller2;
2447
2448 /* Release mapping into kernel space. */
2449 if (pbuf != NULL) {
2450 MPASS(pbuf->b_npages <= atop(maxphys) + 1);
2451 pmap_qremove((vm_offset_t)pbuf->b_data, pbuf->b_npages);
2452 vm_page_unhold_pages(pbuf->b_pages, pbuf->b_npages);
2453 uma_zfree(pbuf_zone, pbuf);
2454 atomic_subtract_int(&num_buf_aio, 1);
2455 ki = job->userproc->p_aioinfo;
2456 AIO_LOCK(ki);
2457 ki->kaio_buffer_count--;
2458 AIO_UNLOCK(ki);
2459 } else {
2460 MPASS(bp->bio_ma_n <= atop(maxphys) + 1);
2461 vm_page_unhold_pages(bp->bio_ma, bp->bio_ma_n);
2462 free(bp->bio_ma, M_TEMP);
2463 atomic_subtract_int(&num_unmapped_aio, 1);
2464 }
2465 g_destroy_bio(bp);
2466 }
2467
2468 static void
2469 aio_biowakeup(struct bio *bp)
2470 {
2471 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2472 size_t nbytes;
2473 long bcount = bp->bio_bcount;
2474 long resid = bp->bio_resid;
2475 int opcode, nblks;
2476 int bio_error = bp->bio_error;
2477 uint16_t flags = bp->bio_flags;
2478
2479 opcode = job->uaiocb.aio_lio_opcode;
2480
2481 aio_biocleanup(bp);
2482
2483 nbytes =bcount - resid;
2484 atomic_add_acq_long(&job->nbytes, nbytes);
2485 nblks = btodb(nbytes);
2486 /*
2487 * If multiple bios experienced an error, the job will reflect the
2488 * error of whichever failed bio completed last.
2489 */
2490 if (flags & BIO_ERROR)
2491 atomic_set_int(&job->error, bio_error);
2492 if (opcode & LIO_WRITE)
2493 atomic_add_int(&job->outblock, nblks);
2494 else
2495 atomic_add_int(&job->inblock, nblks);
2496 atomic_subtract_int(&job->nbio, 1);
2497
2498
2499 if (atomic_load_int(&job->nbio) == 0) {
2500 if (atomic_load_int(&job->error))
2501 aio_complete(job, -1, job->error);
2502 else
2503 aio_complete(job, atomic_load_long(&job->nbytes), 0);
2504 }
2505 }
2506
2507 /* syscall - wait for the next completion of an aio request */
2508 static int
2509 kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
2510 struct timespec *ts, struct aiocb_ops *ops)
2511 {
2512 struct proc *p = td->td_proc;
2513 struct timeval atv;
2514 struct kaioinfo *ki;
2515 struct kaiocb *job;
2516 struct aiocb *ujob;
2517 long error, status;
2518 int timo;
2519
2520 ops->store_aiocb(ujobp, NULL);
2521
2522 if (ts == NULL) {
2523 timo = 0;
2524 } else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
2525 timo = -1;
2526 } else {
2527 if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2528 return (EINVAL);
2529
2530 TIMESPEC_TO_TIMEVAL(&atv, ts);
2531 if (itimerfix(&atv))
2532 return (EINVAL);
2533 timo = tvtohz(&atv);
2534 }
2535
2536 if (p->p_aioinfo == NULL)
2537 aio_init_aioinfo(p);
2538 ki = p->p_aioinfo;
2539
2540 error = 0;
2541 job = NULL;
2542 AIO_LOCK(ki);
2543 while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
2544 if (timo == -1) {
2545 error = EWOULDBLOCK;
2546 break;
2547 }
2548 ki->kaio_flags |= KAIO_WAKEUP;
2549 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2550 "aiowc", timo);
2551 if (timo && error == ERESTART)
2552 error = EINTR;
2553 if (error)
2554 break;
2555 }
2556
2557 if (job != NULL) {
2558 MPASS(job->jobflags & KAIOCB_FINISHED);
2559 ujob = job->ujob;
2560 status = job->uaiocb._aiocb_private.status;
2561 error = job->uaiocb._aiocb_private.error;
2562 td->td_retval[0] = status;
2563 td->td_ru.ru_oublock += job->outblock;
2564 td->td_ru.ru_inblock += job->inblock;
2565 td->td_ru.ru_msgsnd += job->msgsnd;
2566 td->td_ru.ru_msgrcv += job->msgrcv;
2567 aio_free_entry(job);
2568 AIO_UNLOCK(ki);
2569 ops->store_aiocb(ujobp, ujob);
2570 ops->store_error(ujob, error);
2571 ops->store_status(ujob, status);
2572 } else
2573 AIO_UNLOCK(ki);
2574
2575 return (error);
2576 }
2577
2578 int
2579 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2580 {
2581 struct timespec ts, *tsp;
2582 int error;
2583
2584 if (uap->timeout) {
2585 /* Get timespec struct. */
2586 error = copyin(uap->timeout, &ts, sizeof(ts));
2587 if (error)
2588 return (error);
2589 tsp = &ts;
2590 } else
2591 tsp = NULL;
2592
2593 return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
2594 }
2595
2596 static int
2597 kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
2598 struct aiocb_ops *ops)
2599 {
2600 int listop;
2601
2602 switch (op) {
2603 case O_SYNC:
2604 listop = LIO_SYNC;
2605 break;
2606 case O_DSYNC:
2607 listop = LIO_DSYNC;
2608 break;
2609 default:
2610 return (EINVAL);
2611 }
2612
2613 return (aio_aqueue(td, ujob, NULL, listop, ops));
2614 }
2615
2616 int
2617 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
2618 {
2619
2620 return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
2621 }
2622
2623 /* kqueue attach function */
2624 static int
2625 filt_aioattach(struct knote *kn)
2626 {
2627 struct kaiocb *job;
2628
2629 job = (struct kaiocb *)(uintptr_t)kn->kn_sdata;
2630
2631 /*
2632 * The job pointer must be validated before using it, so
2633 * registration is restricted to the kernel; the user cannot
2634 * set EV_FLAG1.
2635 */
2636 if ((kn->kn_flags & EV_FLAG1) == 0)
2637 return (EPERM);
2638 kn->kn_ptr.p_aio = job;
2639 kn->kn_flags &= ~EV_FLAG1;
2640
2641 knlist_add(&job->klist, kn, 0);
2642
2643 return (0);
2644 }
2645
2646 /* kqueue detach function */
2647 static void
2648 filt_aiodetach(struct knote *kn)
2649 {
2650 struct knlist *knl;
2651
2652 knl = &kn->kn_ptr.p_aio->klist;
2653 knl->kl_lock(knl->kl_lockarg);
2654 if (!knlist_empty(knl))
2655 knlist_remove(knl, kn, 1);
2656 knl->kl_unlock(knl->kl_lockarg);
2657 }
2658
2659 /* kqueue filter function */
2660 /*ARGSUSED*/
2661 static int
2662 filt_aio(struct knote *kn, long hint)
2663 {
2664 struct kaiocb *job = kn->kn_ptr.p_aio;
2665
2666 kn->kn_data = job->uaiocb._aiocb_private.error;
2667 if (!(job->jobflags & KAIOCB_FINISHED))
2668 return (0);
2669 kn->kn_flags |= EV_EOF;
2670 return (1);
2671 }
2672
2673 /* kqueue attach function */
2674 static int
2675 filt_lioattach(struct knote *kn)
2676 {
2677 struct aioliojob *lj;
2678
2679 lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata;
2680
2681 /*
2682 * The aioliojob pointer must be validated before using it, so
2683 * registration is restricted to the kernel; the user cannot
2684 * set EV_FLAG1.
2685 */
2686 if ((kn->kn_flags & EV_FLAG1) == 0)
2687 return (EPERM);
2688 kn->kn_ptr.p_lio = lj;
2689 kn->kn_flags &= ~EV_FLAG1;
2690
2691 knlist_add(&lj->klist, kn, 0);
2692
2693 return (0);
2694 }
2695
2696 /* kqueue detach function */
2697 static void
2698 filt_liodetach(struct knote *kn)
2699 {
2700 struct knlist *knl;
2701
2702 knl = &kn->kn_ptr.p_lio->klist;
2703 knl->kl_lock(knl->kl_lockarg);
2704 if (!knlist_empty(knl))
2705 knlist_remove(knl, kn, 1);
2706 knl->kl_unlock(knl->kl_lockarg);
2707 }
2708
2709 /* kqueue filter function */
2710 /*ARGSUSED*/
2711 static int
2712 filt_lio(struct knote *kn, long hint)
2713 {
2714 struct aioliojob * lj = kn->kn_ptr.p_lio;
2715
2716 return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
2717 }
2718
2719 #ifdef COMPAT_FREEBSD32
2720 #include <sys/mount.h>
2721 #include <sys/socket.h>
2722 #include <sys/sysent.h>
2723 #include <compat/freebsd32/freebsd32.h>
2724 #include <compat/freebsd32/freebsd32_proto.h>
2725 #include <compat/freebsd32/freebsd32_signal.h>
2726 #include <compat/freebsd32/freebsd32_syscall.h>
2727 #include <compat/freebsd32/freebsd32_util.h>
2728
2729 struct __aiocb_private32 {
2730 int32_t status;
2731 int32_t error;
2732 uint32_t kernelinfo;
2733 };
2734
2735 #ifdef COMPAT_FREEBSD6
2736 typedef struct oaiocb32 {
2737 int aio_fildes; /* File descriptor */
2738 uint64_t aio_offset __packed; /* File offset for I/O */
2739 uint32_t aio_buf; /* I/O buffer in process space */
2740 uint32_t aio_nbytes; /* Number of bytes for I/O */
2741 struct osigevent32 aio_sigevent; /* Signal to deliver */
2742 int aio_lio_opcode; /* LIO opcode */
2743 int aio_reqprio; /* Request priority -- ignored */
2744 struct __aiocb_private32 _aiocb_private;
2745 } oaiocb32_t;
2746 #endif
2747
2748 typedef struct aiocb32 {
2749 int32_t aio_fildes; /* File descriptor */
2750 uint64_t aio_offset __packed; /* File offset for I/O */
2751 uint32_t aio_buf; /* I/O buffer in process space */
2752 uint32_t aio_nbytes; /* Number of bytes for I/O */
2753 int __spare__[2];
2754 uint32_t __spare2__;
2755 int aio_lio_opcode; /* LIO opcode */
2756 int aio_reqprio; /* Request priority -- ignored */
2757 struct __aiocb_private32 _aiocb_private;
2758 struct sigevent32 aio_sigevent; /* Signal to deliver */
2759 } aiocb32_t;
2760
2761 #ifdef COMPAT_FREEBSD6
2762 static int
2763 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
2764 {
2765
2766 /*
2767 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
2768 * supported by AIO with the old sigevent structure.
2769 */
2770 CP(*osig, *nsig, sigev_notify);
2771 switch (nsig->sigev_notify) {
2772 case SIGEV_NONE:
2773 break;
2774 case SIGEV_SIGNAL:
2775 nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
2776 break;
2777 case SIGEV_KEVENT:
2778 nsig->sigev_notify_kqueue =
2779 osig->__sigev_u.__sigev_notify_kqueue;
2780 PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
2781 break;
2782 default:
2783 return (EINVAL);
2784 }
2785 return (0);
2786 }
2787
2788 static int
2789 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob,
2790 int type __unused)
2791 {
2792 struct oaiocb32 job32;
2793 struct aiocb *kcb = &kjob->uaiocb;
2794 int error;
2795
2796 bzero(kcb, sizeof(struct aiocb));
2797 error = copyin(ujob, &job32, sizeof(job32));
2798 if (error)
2799 return (error);
2800
2801 /* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */
2802
2803 CP(job32, *kcb, aio_fildes);
2804 CP(job32, *kcb, aio_offset);
2805 PTRIN_CP(job32, *kcb, aio_buf);
2806 CP(job32, *kcb, aio_nbytes);
2807 CP(job32, *kcb, aio_lio_opcode);
2808 CP(job32, *kcb, aio_reqprio);
2809 CP(job32, *kcb, _aiocb_private.status);
2810 CP(job32, *kcb, _aiocb_private.error);
2811 PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo);
2812 return (convert_old_sigevent32(&job32.aio_sigevent,
2813 &kcb->aio_sigevent));
2814 }
2815 #endif
2816
2817 static int
2818 aiocb32_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type)
2819 {
2820 struct aiocb32 job32;
2821 struct aiocb *kcb = &kjob->uaiocb;
2822 struct iovec32 *iov32;
2823 int error;
2824
2825 error = copyin(ujob, &job32, sizeof(job32));
2826 if (error)
2827 return (error);
2828 CP(job32, *kcb, aio_fildes);
2829 CP(job32, *kcb, aio_offset);
2830 CP(job32, *kcb, aio_lio_opcode);
2831 if (type == LIO_NOP)
2832 type = kcb->aio_lio_opcode;
2833 if (type & LIO_VECTORED) {
2834 iov32 = PTRIN(job32.aio_iov);
2835 CP(job32, *kcb, aio_iovcnt);
2836 /* malloc a uio and copy in the iovec */
2837 error = freebsd32_copyinuio(iov32,
2838 kcb->aio_iovcnt, &kjob->uiop);
2839 if (error)
2840 return (error);
2841 } else {
2842 PTRIN_CP(job32, *kcb, aio_buf);
2843 CP(job32, *kcb, aio_nbytes);
2844 }
2845 CP(job32, *kcb, aio_reqprio);
2846 CP(job32, *kcb, _aiocb_private.status);
2847 CP(job32, *kcb, _aiocb_private.error);
2848 PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo);
2849 error = convert_sigevent32(&job32.aio_sigevent, &kcb->aio_sigevent);
2850
2851 return (error);
2852 }
2853
2854 static long
2855 aiocb32_fetch_status(struct aiocb *ujob)
2856 {
2857 struct aiocb32 *ujob32;
2858
2859 ujob32 = (struct aiocb32 *)ujob;
2860 return (fuword32(&ujob32->_aiocb_private.status));
2861 }
2862
2863 static long
2864 aiocb32_fetch_error(struct aiocb *ujob)
2865 {
2866 struct aiocb32 *ujob32;
2867
2868 ujob32 = (struct aiocb32 *)ujob;
2869 return (fuword32(&ujob32->_aiocb_private.error));
2870 }
2871
2872 static int
2873 aiocb32_store_status(struct aiocb *ujob, long status)
2874 {
2875 struct aiocb32 *ujob32;
2876
2877 ujob32 = (struct aiocb32 *)ujob;
2878 return (suword32(&ujob32->_aiocb_private.status, status));
2879 }
2880
2881 static int
2882 aiocb32_store_error(struct aiocb *ujob, long error)
2883 {
2884 struct aiocb32 *ujob32;
2885
2886 ujob32 = (struct aiocb32 *)ujob;
2887 return (suword32(&ujob32->_aiocb_private.error, error));
2888 }
2889
2890 static int
2891 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
2892 {
2893 struct aiocb32 *ujob32;
2894
2895 ujob32 = (struct aiocb32 *)ujob;
2896 return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
2897 }
2898
2899 static int
2900 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
2901 {
2902
2903 return (suword32(ujobp, (long)ujob));
2904 }
2905
2906 static struct aiocb_ops aiocb32_ops = {
2907 .aio_copyin = aiocb32_copyin,
2908 .fetch_status = aiocb32_fetch_status,
2909 .fetch_error = aiocb32_fetch_error,
2910 .store_status = aiocb32_store_status,
2911 .store_error = aiocb32_store_error,
2912 .store_kernelinfo = aiocb32_store_kernelinfo,
2913 .store_aiocb = aiocb32_store_aiocb,
2914 };
2915
2916 #ifdef COMPAT_FREEBSD6
2917 static struct aiocb_ops aiocb32_ops_osigevent = {
2918 .aio_copyin = aiocb32_copyin_old_sigevent,
2919 .fetch_status = aiocb32_fetch_status,
2920 .fetch_error = aiocb32_fetch_error,
2921 .store_status = aiocb32_store_status,
2922 .store_error = aiocb32_store_error,
2923 .store_kernelinfo = aiocb32_store_kernelinfo,
2924 .store_aiocb = aiocb32_store_aiocb,
2925 };
2926 #endif
2927
2928 int
2929 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
2930 {
2931
2932 return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2933 }
2934
2935 int
2936 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
2937 {
2938 struct timespec32 ts32;
2939 struct timespec ts, *tsp;
2940 struct aiocb **ujoblist;
2941 uint32_t *ujoblist32;
2942 int error, i;
2943
2944 if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
2945 return (EINVAL);
2946
2947 if (uap->timeout) {
2948 /* Get timespec struct. */
2949 if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
2950 return (error);
2951 CP(ts32, ts, tv_sec);
2952 CP(ts32, ts, tv_nsec);
2953 tsp = &ts;
2954 } else
2955 tsp = NULL;
2956
2957 ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIO, M_WAITOK);
2958 ujoblist32 = (uint32_t *)ujoblist;
2959 error = copyin(uap->aiocbp, ujoblist32, uap->nent *
2960 sizeof(ujoblist32[0]));
2961 if (error == 0) {
2962 for (i = uap->nent - 1; i >= 0; i--)
2963 ujoblist[i] = PTRIN(ujoblist32[i]);
2964
2965 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2966 }
2967 free(ujoblist, M_AIO);
2968 return (error);
2969 }
2970
2971 int
2972 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
2973 {
2974
2975 return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2976 }
2977
2978 #ifdef COMPAT_FREEBSD6
2979 int
2980 freebsd6_freebsd32_aio_read(struct thread *td,
2981 struct freebsd6_freebsd32_aio_read_args *uap)
2982 {
2983
2984 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2985 &aiocb32_ops_osigevent));
2986 }
2987 #endif
2988
2989 int
2990 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
2991 {
2992
2993 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2994 &aiocb32_ops));
2995 }
2996
2997 int
2998 freebsd32_aio_readv(struct thread *td, struct freebsd32_aio_readv_args *uap)
2999 {
3000
3001 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READV,
3002 &aiocb32_ops));
3003 }
3004
3005 #ifdef COMPAT_FREEBSD6
3006 int
3007 freebsd6_freebsd32_aio_write(struct thread *td,
3008 struct freebsd6_freebsd32_aio_write_args *uap)
3009 {
3010
3011 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
3012 &aiocb32_ops_osigevent));
3013 }
3014 #endif
3015
3016 int
3017 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
3018 {
3019
3020 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
3021 &aiocb32_ops));
3022 }
3023
3024 int
3025 freebsd32_aio_writev(struct thread *td, struct freebsd32_aio_writev_args *uap)
3026 {
3027
3028 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITEV,
3029 &aiocb32_ops));
3030 }
3031
3032 int
3033 freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
3034 {
3035
3036 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
3037 &aiocb32_ops));
3038 }
3039
3040 int
3041 freebsd32_aio_waitcomplete(struct thread *td,
3042 struct freebsd32_aio_waitcomplete_args *uap)
3043 {
3044 struct timespec32 ts32;
3045 struct timespec ts, *tsp;
3046 int error;
3047
3048 if (uap->timeout) {
3049 /* Get timespec struct. */
3050 error = copyin(uap->timeout, &ts32, sizeof(ts32));
3051 if (error)
3052 return (error);
3053 CP(ts32, ts, tv_sec);
3054 CP(ts32, ts, tv_nsec);
3055 tsp = &ts;
3056 } else
3057 tsp = NULL;
3058
3059 return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
3060 &aiocb32_ops));
3061 }
3062
3063 int
3064 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
3065 {
3066
3067 return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
3068 &aiocb32_ops));
3069 }
3070
3071 #ifdef COMPAT_FREEBSD6
3072 int
3073 freebsd6_freebsd32_lio_listio(struct thread *td,
3074 struct freebsd6_freebsd32_lio_listio_args *uap)
3075 {
3076 struct aiocb **acb_list;
3077 struct sigevent *sigp, sig;
3078 struct osigevent32 osig;
3079 uint32_t *acb_list32;
3080 int error, i, nent;
3081
3082 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
3083 return (EINVAL);
3084
3085 nent = uap->nent;
3086 if (nent < 0 || nent > max_aio_queue_per_proc)
3087 return (EINVAL);
3088
3089 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
3090 error = copyin(uap->sig, &osig, sizeof(osig));
3091 if (error)
3092 return (error);
3093 error = convert_old_sigevent32(&osig, &sig);
3094 if (error)
3095 return (error);
3096 sigp = &sig;
3097 } else
3098 sigp = NULL;
3099
3100 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
3101 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
3102 if (error) {
3103 free(acb_list32, M_LIO);
3104 return (error);
3105 }
3106 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
3107 for (i = 0; i < nent; i++)
3108 acb_list[i] = PTRIN(acb_list32[i]);
3109 free(acb_list32, M_LIO);
3110
3111 error = kern_lio_listio(td, uap->mode,
3112 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
3113 &aiocb32_ops_osigevent);
3114 free(acb_list, M_LIO);
3115 return (error);
3116 }
3117 #endif
3118
3119 int
3120 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
3121 {
3122 struct aiocb **acb_list;
3123 struct sigevent *sigp, sig;
3124 struct sigevent32 sig32;
3125 uint32_t *acb_list32;
3126 int error, i, nent;
3127
3128 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
3129 return (EINVAL);
3130
3131 nent = uap->nent;
3132 if (nent < 0 || nent > max_aio_queue_per_proc)
3133 return (EINVAL);
3134
3135 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
3136 error = copyin(uap->sig, &sig32, sizeof(sig32));
3137 if (error)
3138 return (error);
3139 error = convert_sigevent32(&sig32, &sig);
3140 if (error)
3141 return (error);
3142 sigp = &sig;
3143 } else
3144 sigp = NULL;
3145
3146 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
3147 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
3148 if (error) {
3149 free(acb_list32, M_LIO);
3150 return (error);
3151 }
3152 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
3153 for (i = 0; i < nent; i++)
3154 acb_list[i] = PTRIN(acb_list32[i]);
3155 free(acb_list32, M_LIO);
3156
3157 error = kern_lio_listio(td, uap->mode,
3158 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
3159 &aiocb32_ops);
3160 free(acb_list, M_LIO);
3161 return (error);
3162 }
3163
3164 #endif
Cache object: 5aa9bd73a163e12994c4a284e4ee4175
|