FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_uio.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Copyright (c) 2014 The FreeBSD Foundation
13 *
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
42 */
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/limits.h>
51 #include <sys/lock.h>
52 #include <sys/mman.h>
53 #include <sys/proc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/vnode.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_extern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_pageout.h>
65 #include <vm/vm_map.h>
66
67 #include <machine/bus.h>
68
69 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, UIO_MAXIOV,
70 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
71
72 static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
73
74 int
75 copyin_nofault(const void *udaddr, void *kaddr, size_t len)
76 {
77 int error, save;
78
79 save = vm_fault_disable_pagefaults();
80 error = copyin(udaddr, kaddr, len);
81 vm_fault_enable_pagefaults(save);
82 return (error);
83 }
84
85 int
86 copyout_nofault(const void *kaddr, void *udaddr, size_t len)
87 {
88 int error, save;
89
90 save = vm_fault_disable_pagefaults();
91 error = copyout(kaddr, udaddr, len);
92 vm_fault_enable_pagefaults(save);
93 return (error);
94 }
95
96 #define PHYS_PAGE_COUNT(len) (howmany(len, PAGE_SIZE) + 1)
97
98 int
99 physcopyin(void *src, vm_paddr_t dst, size_t len)
100 {
101 vm_page_t m[PHYS_PAGE_COUNT(len)];
102 struct iovec iov[1];
103 struct uio uio;
104 int i;
105
106 iov[0].iov_base = src;
107 iov[0].iov_len = len;
108 uio.uio_iov = iov;
109 uio.uio_iovcnt = 1;
110 uio.uio_offset = 0;
111 uio.uio_resid = len;
112 uio.uio_segflg = UIO_SYSSPACE;
113 uio.uio_rw = UIO_WRITE;
114 for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
115 m[i] = PHYS_TO_VM_PAGE(dst);
116 return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
117 }
118
119 int
120 physcopyout(vm_paddr_t src, void *dst, size_t len)
121 {
122 vm_page_t m[PHYS_PAGE_COUNT(len)];
123 struct iovec iov[1];
124 struct uio uio;
125 int i;
126
127 iov[0].iov_base = dst;
128 iov[0].iov_len = len;
129 uio.uio_iov = iov;
130 uio.uio_iovcnt = 1;
131 uio.uio_offset = 0;
132 uio.uio_resid = len;
133 uio.uio_segflg = UIO_SYSSPACE;
134 uio.uio_rw = UIO_READ;
135 for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
136 m[i] = PHYS_TO_VM_PAGE(src);
137 return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
138 }
139
140 #undef PHYS_PAGE_COUNT
141
142 int
143 physcopyin_vlist(bus_dma_segment_t *src, off_t offset, vm_paddr_t dst,
144 size_t len)
145 {
146 size_t seg_len;
147 int error;
148
149 error = 0;
150 while (offset >= src->ds_len) {
151 offset -= src->ds_len;
152 src++;
153 }
154
155 while (len > 0 && error == 0) {
156 seg_len = MIN(src->ds_len - offset, len);
157 error = physcopyin((void *)(uintptr_t)(src->ds_addr + offset),
158 dst, seg_len);
159 offset = 0;
160 src++;
161 len -= seg_len;
162 dst += seg_len;
163 }
164
165 return (error);
166 }
167
168 int
169 physcopyout_vlist(vm_paddr_t src, bus_dma_segment_t *dst, off_t offset,
170 size_t len)
171 {
172 size_t seg_len;
173 int error;
174
175 error = 0;
176 while (offset >= dst->ds_len) {
177 offset -= dst->ds_len;
178 dst++;
179 }
180
181 while (len > 0 && error == 0) {
182 seg_len = MIN(dst->ds_len - offset, len);
183 error = physcopyout(src, (void *)(uintptr_t)(dst->ds_addr +
184 offset), seg_len);
185 offset = 0;
186 dst++;
187 len -= seg_len;
188 src += seg_len;
189 }
190
191 return (error);
192 }
193
194 int
195 uiomove(void *cp, int n, struct uio *uio)
196 {
197
198 return (uiomove_faultflag(cp, n, uio, 0));
199 }
200
201 int
202 uiomove_nofault(void *cp, int n, struct uio *uio)
203 {
204
205 return (uiomove_faultflag(cp, n, uio, 1));
206 }
207
208 static int
209 uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
210 {
211 struct iovec *iov;
212 size_t cnt;
213 int error, newflags, save;
214
215 save = error = 0;
216
217 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
218 ("uiomove: mode"));
219 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
220 ("uiomove proc"));
221
222 if (uio->uio_segflg == UIO_USERSPACE) {
223 newflags = TDP_DEADLKTREAT;
224 if (nofault) {
225 /*
226 * Fail if a non-spurious page fault occurs.
227 */
228 newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
229 } else {
230 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
231 "Calling uiomove()");
232 }
233 save = curthread_pflags_set(newflags);
234 } else {
235 KASSERT(nofault == 0, ("uiomove: nofault"));
236 }
237
238 while (n > 0 && uio->uio_resid) {
239 iov = uio->uio_iov;
240 cnt = iov->iov_len;
241 if (cnt == 0) {
242 uio->uio_iov++;
243 uio->uio_iovcnt--;
244 continue;
245 }
246 if (cnt > n)
247 cnt = n;
248
249 switch (uio->uio_segflg) {
250 case UIO_USERSPACE:
251 maybe_yield();
252 if (uio->uio_rw == UIO_READ)
253 error = copyout(cp, iov->iov_base, cnt);
254 else
255 error = copyin(iov->iov_base, cp, cnt);
256 if (error)
257 goto out;
258 break;
259
260 case UIO_SYSSPACE:
261 if (uio->uio_rw == UIO_READ)
262 bcopy(cp, iov->iov_base, cnt);
263 else
264 bcopy(iov->iov_base, cp, cnt);
265 break;
266 case UIO_NOCOPY:
267 break;
268 }
269 iov->iov_base = (char *)iov->iov_base + cnt;
270 iov->iov_len -= cnt;
271 uio->uio_resid -= cnt;
272 uio->uio_offset += cnt;
273 cp = (char *)cp + cnt;
274 n -= cnt;
275 }
276 out:
277 if (save)
278 curthread_pflags_restore(save);
279 return (error);
280 }
281
282 /*
283 * Wrapper for uiomove() that validates the arguments against a known-good
284 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
285 * is almost definitely a bad thing, so we catch that here as well. We
286 * return a runtime failure, but it might be desirable to generate a runtime
287 * assertion failure instead.
288 */
289 int
290 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
291 {
292 size_t offset, n;
293
294 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
295 (offset = uio->uio_offset) != uio->uio_offset)
296 return (EINVAL);
297 if (buflen <= 0 || offset >= buflen)
298 return (0);
299 if ((n = buflen - offset) > IOSIZE_MAX)
300 return (EINVAL);
301 return (uiomove((char *)buf + offset, n, uio));
302 }
303
304 /*
305 * Give next character to user as result of read.
306 */
307 int
308 ureadc(int c, struct uio *uio)
309 {
310 struct iovec *iov;
311 char *iov_base;
312
313 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
314 "Calling ureadc()");
315
316 again:
317 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
318 panic("ureadc");
319 iov = uio->uio_iov;
320 if (iov->iov_len == 0) {
321 uio->uio_iovcnt--;
322 uio->uio_iov++;
323 goto again;
324 }
325 switch (uio->uio_segflg) {
326 case UIO_USERSPACE:
327 if (subyte(iov->iov_base, c) < 0)
328 return (EFAULT);
329 break;
330
331 case UIO_SYSSPACE:
332 iov_base = iov->iov_base;
333 *iov_base = c;
334 break;
335
336 case UIO_NOCOPY:
337 break;
338 }
339 iov->iov_base = (char *)iov->iov_base + 1;
340 iov->iov_len--;
341 uio->uio_resid--;
342 uio->uio_offset++;
343 return (0);
344 }
345
346 int
347 copyiniov(const struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
348 {
349 u_int iovlen;
350
351 *iov = NULL;
352 if (iovcnt > UIO_MAXIOV)
353 return (error);
354 iovlen = iovcnt * sizeof (struct iovec);
355 *iov = malloc(iovlen, M_IOV, M_WAITOK);
356 error = copyin(iovp, *iov, iovlen);
357 if (error) {
358 free(*iov, M_IOV);
359 *iov = NULL;
360 }
361 return (error);
362 }
363
364 int
365 copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
366 {
367 struct iovec *iov;
368 struct uio *uio;
369 u_int iovlen;
370 int error, i;
371
372 *uiop = NULL;
373 if (iovcnt > UIO_MAXIOV)
374 return (EINVAL);
375 iovlen = iovcnt * sizeof (struct iovec);
376 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
377 iov = (struct iovec *)(uio + 1);
378 error = copyin(iovp, iov, iovlen);
379 if (error) {
380 free(uio, M_IOV);
381 return (error);
382 }
383 uio->uio_iov = iov;
384 uio->uio_iovcnt = iovcnt;
385 uio->uio_segflg = UIO_USERSPACE;
386 uio->uio_offset = -1;
387 uio->uio_resid = 0;
388 for (i = 0; i < iovcnt; i++) {
389 if (iov->iov_len > IOSIZE_MAX - uio->uio_resid) {
390 free(uio, M_IOV);
391 return (EINVAL);
392 }
393 uio->uio_resid += iov->iov_len;
394 iov++;
395 }
396 *uiop = uio;
397 return (0);
398 }
399
400 struct uio *
401 cloneuio(struct uio *uiop)
402 {
403 struct uio *uio;
404 int iovlen;
405
406 iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
407 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
408 *uio = *uiop;
409 uio->uio_iov = (struct iovec *)(uio + 1);
410 bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
411 return (uio);
412 }
413
414 /*
415 * Map some anonymous memory in user space of size sz, rounded up to the page
416 * boundary.
417 */
418 int
419 copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
420 {
421 struct vmspace *vms;
422 int error;
423 vm_size_t size;
424
425 vms = td->td_proc->p_vmspace;
426
427 /*
428 * Map somewhere after heap in process memory.
429 */
430 *addr = round_page((vm_offset_t)vms->vm_daddr +
431 lim_max(td, RLIMIT_DATA));
432
433 /* round size up to page boundary */
434 size = (vm_size_t)round_page(sz);
435 if (size == 0)
436 return (EINVAL);
437 error = vm_mmap_object(&vms->vm_map, addr, size, VM_PROT_READ |
438 VM_PROT_WRITE, VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, NULL, 0,
439 FALSE, td);
440 return (error);
441 }
442
443 /*
444 * Unmap memory in user space.
445 */
446 int
447 copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
448 {
449 vm_map_t map;
450 vm_size_t size;
451
452 if (sz == 0)
453 return (0);
454
455 map = &td->td_proc->p_vmspace->vm_map;
456 size = (vm_size_t)round_page(sz);
457
458 if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
459 return (EINVAL);
460
461 return (0);
462 }
463
464 int32_t
465 fuword32(volatile const void *addr)
466 {
467 int rv;
468 int32_t val;
469
470 rv = fueword32(addr, &val);
471 return (rv == -1 ? -1 : val);
472 }
473
474 #ifdef _LP64
475 int64_t
476 fuword64(volatile const void *addr)
477 {
478 int rv;
479 int64_t val;
480
481 rv = fueword64(addr, &val);
482 return (rv == -1 ? -1 : val);
483 }
484 #endif /* _LP64 */
485
486 long
487 fuword(volatile const void *addr)
488 {
489 long val;
490 int rv;
491
492 rv = fueword(addr, &val);
493 return (rv == -1 ? -1 : val);
494 }
495
496 uint32_t
497 casuword32(volatile uint32_t *addr, uint32_t old, uint32_t new)
498 {
499 int rv;
500 uint32_t val;
501
502 rv = casueword32(addr, old, &val, new);
503 return (rv == -1 ? -1 : val);
504 }
505
506 u_long
507 casuword(volatile u_long *addr, u_long old, u_long new)
508 {
509 int rv;
510 u_long val;
511
512 rv = casueword(addr, old, &val, new);
513 return (rv == -1 ? -1 : val);
514 }
Cache object: dc878cc04407f2fc3447396157971cd9
|