1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 */
21
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD: releng/12.0/sys/kern/kern_physio.c 333842 2018-05-19 04:09:58Z mmacy $");
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/bio.h>
28 #include <sys/buf.h>
29 #include <sys/conf.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32 #include <sys/racct.h>
33 #include <sys/uio.h>
34 #include <geom/geom.h>
35
36 #include <vm/vm.h>
37 #include <vm/vm_page.h>
38 #include <vm/vm_extern.h>
39 #include <vm/vm_map.h>
40
41 int
42 physio(struct cdev *dev, struct uio *uio, int ioflag)
43 {
44 struct cdevsw *csw;
45 struct buf *pbuf;
46 struct bio *bp;
47 struct vm_page **pages;
48 caddr_t sa;
49 u_int iolen, poff;
50 int error, i, npages, maxpages;
51 vm_prot_t prot;
52
53 csw = dev->si_devsw;
54 npages = 0;
55 sa = NULL;
56 /* check if character device is being destroyed */
57 if (csw == NULL)
58 return (ENXIO);
59
60 /* XXX: sanity check */
61 if(dev->si_iosize_max < PAGE_SIZE) {
62 printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
63 devtoname(dev), dev->si_iosize_max);
64 dev->si_iosize_max = DFLTPHYS;
65 }
66
67 /*
68 * If the driver does not want I/O to be split, that means that we
69 * need to reject any requests that will not fit into one buffer.
70 */
71 if (dev->si_flags & SI_NOSPLIT &&
72 (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS ||
73 uio->uio_iovcnt > 1)) {
74 /*
75 * Tell the user why his I/O was rejected.
76 */
77 if (uio->uio_resid > dev->si_iosize_max)
78 uprintf("%s: request size=%zd > si_iosize_max=%d; "
79 "cannot split request\n", devtoname(dev),
80 uio->uio_resid, dev->si_iosize_max);
81 if (uio->uio_resid > MAXPHYS)
82 uprintf("%s: request size=%zd > MAXPHYS=%d; "
83 "cannot split request\n", devtoname(dev),
84 uio->uio_resid, MAXPHYS);
85 if (uio->uio_iovcnt > 1)
86 uprintf("%s: request vectors=%d > 1; "
87 "cannot split request\n", devtoname(dev),
88 uio->uio_iovcnt);
89 return (EFBIG);
90 }
91
92 /*
93 * Keep the process UPAGES from being swapped. Processes swapped
94 * out while holding pbufs, used by swapper, may lead to deadlock.
95 */
96 PHOLD(curproc);
97
98 bp = g_alloc_bio();
99 if (uio->uio_segflg != UIO_USERSPACE) {
100 pbuf = NULL;
101 pages = NULL;
102 } else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
103 pbuf = NULL;
104 maxpages = btoc(MIN(uio->uio_resid, MAXPHYS)) + 1;
105 pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK);
106 } else {
107 pbuf = getpbuf(NULL);
108 sa = pbuf->b_data;
109 maxpages = btoc(MAXPHYS);
110 pages = pbuf->b_pages;
111 }
112 prot = VM_PROT_READ;
113 if (uio->uio_rw == UIO_READ)
114 prot |= VM_PROT_WRITE; /* Less backwards than it looks */
115 error = 0;
116 for (i = 0; i < uio->uio_iovcnt; i++) {
117 #ifdef RACCT
118 if (racct_enable) {
119 PROC_LOCK(curproc);
120 if (uio->uio_rw == UIO_READ) {
121 racct_add_force(curproc, RACCT_READBPS,
122 uio->uio_iov[i].iov_len);
123 racct_add_force(curproc, RACCT_READIOPS, 1);
124 } else {
125 racct_add_force(curproc, RACCT_WRITEBPS,
126 uio->uio_iov[i].iov_len);
127 racct_add_force(curproc, RACCT_WRITEIOPS, 1);
128 }
129 PROC_UNLOCK(curproc);
130 }
131 #endif /* RACCT */
132
133 while (uio->uio_iov[i].iov_len) {
134 g_reset_bio(bp);
135 if (uio->uio_rw == UIO_READ) {
136 bp->bio_cmd = BIO_READ;
137 curthread->td_ru.ru_inblock++;
138 } else {
139 bp->bio_cmd = BIO_WRITE;
140 curthread->td_ru.ru_oublock++;
141 }
142 bp->bio_offset = uio->uio_offset;
143 bp->bio_data = uio->uio_iov[i].iov_base;
144 bp->bio_length = uio->uio_iov[i].iov_len;
145 if (bp->bio_length > dev->si_iosize_max)
146 bp->bio_length = dev->si_iosize_max;
147 if (bp->bio_length > MAXPHYS)
148 bp->bio_length = MAXPHYS;
149
150 /*
151 * Make sure the pbuf can map the request.
152 * The pbuf has kvasize = MAXPHYS, so a request
153 * larger than MAXPHYS - PAGE_SIZE must be
154 * page aligned or it will be fragmented.
155 */
156 poff = (vm_offset_t)bp->bio_data & PAGE_MASK;
157 if (pbuf && bp->bio_length + poff > pbuf->b_kvasize) {
158 if (dev->si_flags & SI_NOSPLIT) {
159 uprintf("%s: request ptr %p is not "
160 "on a page boundary; cannot split "
161 "request\n", devtoname(dev),
162 bp->bio_data);
163 error = EFBIG;
164 goto doerror;
165 }
166 bp->bio_length = pbuf->b_kvasize;
167 if (poff != 0)
168 bp->bio_length -= PAGE_SIZE;
169 }
170
171 bp->bio_bcount = bp->bio_length;
172 bp->bio_dev = dev;
173
174 if (pages) {
175 if ((npages = vm_fault_quick_hold_pages(
176 &curproc->p_vmspace->vm_map,
177 (vm_offset_t)bp->bio_data, bp->bio_length,
178 prot, pages, maxpages)) < 0) {
179 error = EFAULT;
180 goto doerror;
181 }
182 if (pbuf && sa) {
183 pmap_qenter((vm_offset_t)sa,
184 pages, npages);
185 bp->bio_data = sa + poff;
186 } else {
187 bp->bio_ma = pages;
188 bp->bio_ma_n = npages;
189 bp->bio_ma_offset = poff;
190 bp->bio_data = unmapped_buf;
191 bp->bio_flags |= BIO_UNMAPPED;
192 }
193 }
194
195 csw->d_strategy(bp);
196 if (uio->uio_rw == UIO_READ)
197 biowait(bp, "physrd");
198 else
199 biowait(bp, "physwr");
200
201 if (pages) {
202 if (pbuf)
203 pmap_qremove((vm_offset_t)sa, npages);
204 vm_page_unhold_pages(pages, npages);
205 }
206
207 iolen = bp->bio_length - bp->bio_resid;
208 if (iolen == 0 && !(bp->bio_flags & BIO_ERROR))
209 goto doerror; /* EOF */
210 uio->uio_iov[i].iov_len -= iolen;
211 uio->uio_iov[i].iov_base =
212 (char *)uio->uio_iov[i].iov_base + iolen;
213 uio->uio_resid -= iolen;
214 uio->uio_offset += iolen;
215 if (bp->bio_flags & BIO_ERROR) {
216 error = bp->bio_error;
217 goto doerror;
218 }
219 }
220 }
221 doerror:
222 if (pbuf)
223 relpbuf(pbuf, NULL);
224 else if (pages)
225 free(pages, M_DEVBUF);
226 g_destroy_bio(bp);
227 PRELE(curproc);
228 return (error);
229 }
Cache object: 42918eb1d43135487f70a31dad9e2ca9
|