FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_subr.c
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.4/sys/kern/kern_subr.c 233649 2012-03-29 06:01:11Z alc $");
39
40 #include "opt_zero.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/mman.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
54 #include <sys/sysctl.h>
55 #include <sys/vnode.h>
56
57 #include <vm/vm.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #ifdef ZERO_COPY_SOCKETS
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #endif
65
66 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
67 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
68
69 static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
70
71 #ifdef ZERO_COPY_SOCKETS
72 /* Declared in uipc_socket.c */
73 extern int so_zero_copy_receive;
74
75 /*
76 * Identify the physical page mapped at the given kernel virtual
77 * address. Insert this physical page into the given address space at
78 * the given virtual address, replacing the physical page, if any,
79 * that already exists there.
80 */
81 static int
82 vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
83 {
84 vm_map_t map = mapa;
85 vm_page_t kern_pg, user_pg;
86 vm_object_t uobject;
87 vm_map_entry_t entry;
88 vm_pindex_t upindex;
89 vm_prot_t prot;
90 boolean_t wired;
91
92 KASSERT((uaddr & PAGE_MASK) == 0,
93 ("vm_pgmoveco: uaddr is not page aligned"));
94
95 /*
96 * Herein the physical page is validated and dirtied. It is
97 * unwired in sf_buf_mext().
98 */
99 kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
100 kern_pg->valid = VM_PAGE_BITS_ALL;
101 KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
102 ("vm_pgmoveco: kern_pg is not correctly wired"));
103
104 if ((vm_map_lookup(&map, uaddr,
105 VM_PROT_WRITE, &entry, &uobject,
106 &upindex, &prot, &wired)) != KERN_SUCCESS) {
107 return(EFAULT);
108 }
109 VM_OBJECT_LOCK(uobject);
110 retry:
111 if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
112 if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
113 goto retry;
114 vm_page_lock_queues();
115 pmap_remove_all(user_pg);
116 vm_page_free(user_pg);
117 } else {
118 /*
119 * Even if a physical page does not exist in the
120 * object chain's first object, a physical page from a
121 * backing object may be mapped read only.
122 */
123 if (uobject->backing_object != NULL)
124 pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
125 vm_page_lock_queues();
126 }
127 vm_page_insert(kern_pg, uobject, upindex);
128 vm_page_dirty(kern_pg);
129 vm_page_unlock_queues();
130 VM_OBJECT_UNLOCK(uobject);
131 vm_map_lookup_done(map, entry);
132 return(KERN_SUCCESS);
133 }
134 #endif /* ZERO_COPY_SOCKETS */
135
136 int
137 copyin_nofault(const void *udaddr, void *kaddr, size_t len)
138 {
139 int error, save;
140
141 save = vm_fault_disable_pagefaults();
142 error = copyin(udaddr, kaddr, len);
143 vm_fault_enable_pagefaults(save);
144 return (error);
145 }
146
147 int
148 copyout_nofault(const void *kaddr, void *udaddr, size_t len)
149 {
150 int error, save;
151
152 save = vm_fault_disable_pagefaults();
153 error = copyout(kaddr, udaddr, len);
154 vm_fault_enable_pagefaults(save);
155 return (error);
156 }
157
158 int
159 uiomove(void *cp, int n, struct uio *uio)
160 {
161
162 return (uiomove_faultflag(cp, n, uio, 0));
163 }
164
165 int
166 uiomove_nofault(void *cp, int n, struct uio *uio)
167 {
168
169 return (uiomove_faultflag(cp, n, uio, 1));
170 }
171
172 static int
173 uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
174 {
175 struct thread *td;
176 struct iovec *iov;
177 u_int cnt;
178 int error, newflags, save;
179
180 td = curthread;
181 error = 0;
182
183 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
184 ("uiomove: mode"));
185 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
186 ("uiomove proc"));
187 if (!nofault)
188 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
189 "Calling uiomove()");
190
191 /* XXX does it make a sense to set TDP_DEADLKTREAT for UIO_SYSSPACE ? */
192 newflags = TDP_DEADLKTREAT;
193 if (uio->uio_segflg == UIO_USERSPACE && nofault) {
194 /*
195 * Fail if a non-spurious page fault occurs.
196 */
197 newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
198 }
199 save = curthread_pflags_set(newflags);
200
201 while (n > 0 && uio->uio_resid) {
202 iov = uio->uio_iov;
203 cnt = iov->iov_len;
204 if (cnt == 0) {
205 uio->uio_iov++;
206 uio->uio_iovcnt--;
207 continue;
208 }
209 if (cnt > n)
210 cnt = n;
211
212 switch (uio->uio_segflg) {
213
214 case UIO_USERSPACE:
215 if (ticks - PCPU_GET(switchticks) >= hogticks)
216 uio_yield();
217 if (uio->uio_rw == UIO_READ)
218 error = copyout(cp, iov->iov_base, cnt);
219 else
220 error = copyin(iov->iov_base, cp, cnt);
221 if (error)
222 goto out;
223 break;
224
225 case UIO_SYSSPACE:
226 if (uio->uio_rw == UIO_READ)
227 bcopy(cp, iov->iov_base, cnt);
228 else
229 bcopy(iov->iov_base, cp, cnt);
230 break;
231 case UIO_NOCOPY:
232 break;
233 }
234 iov->iov_base = (char *)iov->iov_base + cnt;
235 iov->iov_len -= cnt;
236 uio->uio_resid -= cnt;
237 uio->uio_offset += cnt;
238 cp = (char *)cp + cnt;
239 n -= cnt;
240 }
241 out:
242 curthread_pflags_restore(save);
243 return (error);
244 }
245
246 /*
247 * Wrapper for uiomove() that validates the arguments against a known-good
248 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
249 * is almost definitely a bad thing, so we catch that here as well. We
250 * return a runtime failure, but it might be desirable to generate a runtime
251 * assertion failure instead.
252 */
253 int
254 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
255 {
256 unsigned int offset, n;
257
258 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
259 (offset = uio->uio_offset) != uio->uio_offset)
260 return (EINVAL);
261 if (buflen <= 0 || offset >= buflen)
262 return (0);
263 if ((n = buflen - offset) > INT_MAX)
264 return (EINVAL);
265 return (uiomove((char *)buf + offset, n, uio));
266 }
267
268 #ifdef ZERO_COPY_SOCKETS
269 /*
270 * Experimental support for zero-copy I/O
271 */
272 static int
273 userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable)
274 {
275 struct iovec *iov;
276 int error;
277
278 iov = uio->uio_iov;
279 if (uio->uio_rw == UIO_READ) {
280 if ((so_zero_copy_receive != 0)
281 && ((cnt & PAGE_MASK) == 0)
282 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
283 && ((uio->uio_offset & PAGE_MASK) == 0)
284 && ((((intptr_t) cp) & PAGE_MASK) == 0)
285 && (disposable != 0)) {
286 /* SOCKET: use page-trading */
287 /*
288 * We only want to call vm_pgmoveco() on
289 * disposeable pages, since it gives the
290 * kernel page to the userland process.
291 */
292 error = vm_pgmoveco(&curproc->p_vmspace->vm_map,
293 (vm_offset_t)cp, (vm_offset_t)iov->iov_base);
294
295 /*
296 * If we get an error back, attempt
297 * to use copyout() instead. The
298 * disposable page should be freed
299 * automatically if we weren't able to move
300 * it into userland.
301 */
302 if (error != 0)
303 error = copyout(cp, iov->iov_base, cnt);
304 } else {
305 error = copyout(cp, iov->iov_base, cnt);
306 }
307 } else {
308 error = copyin(iov->iov_base, cp, cnt);
309 }
310 return (error);
311 }
312
313 int
314 uiomoveco(void *cp, int n, struct uio *uio, int disposable)
315 {
316 struct iovec *iov;
317 u_int cnt;
318 int error;
319
320 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
321 ("uiomoveco: mode"));
322 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
323 ("uiomoveco proc"));
324
325 while (n > 0 && uio->uio_resid) {
326 iov = uio->uio_iov;
327 cnt = iov->iov_len;
328 if (cnt == 0) {
329 uio->uio_iov++;
330 uio->uio_iovcnt--;
331 continue;
332 }
333 if (cnt > n)
334 cnt = n;
335
336 switch (uio->uio_segflg) {
337
338 case UIO_USERSPACE:
339 if (ticks - PCPU_GET(switchticks) >= hogticks)
340 uio_yield();
341
342 error = userspaceco(cp, cnt, uio, disposable);
343
344 if (error)
345 return (error);
346 break;
347
348 case UIO_SYSSPACE:
349 if (uio->uio_rw == UIO_READ)
350 bcopy(cp, iov->iov_base, cnt);
351 else
352 bcopy(iov->iov_base, cp, cnt);
353 break;
354 case UIO_NOCOPY:
355 break;
356 }
357 iov->iov_base = (char *)iov->iov_base + cnt;
358 iov->iov_len -= cnt;
359 uio->uio_resid -= cnt;
360 uio->uio_offset += cnt;
361 cp = (char *)cp + cnt;
362 n -= cnt;
363 }
364 return (0);
365 }
366 #endif /* ZERO_COPY_SOCKETS */
367
368 /*
369 * Give next character to user as result of read.
370 */
371 int
372 ureadc(int c, struct uio *uio)
373 {
374 struct iovec *iov;
375 char *iov_base;
376
377 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
378 "Calling ureadc()");
379
380 again:
381 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
382 panic("ureadc");
383 iov = uio->uio_iov;
384 if (iov->iov_len == 0) {
385 uio->uio_iovcnt--;
386 uio->uio_iov++;
387 goto again;
388 }
389 switch (uio->uio_segflg) {
390
391 case UIO_USERSPACE:
392 if (subyte(iov->iov_base, c) < 0)
393 return (EFAULT);
394 break;
395
396 case UIO_SYSSPACE:
397 iov_base = iov->iov_base;
398 *iov_base = c;
399 iov->iov_base = iov_base;
400 break;
401
402 case UIO_NOCOPY:
403 break;
404 }
405 iov->iov_base = (char *)iov->iov_base + 1;
406 iov->iov_len--;
407 uio->uio_resid--;
408 uio->uio_offset++;
409 return (0);
410 }
411
412 /*
413 * General routine to allocate a hash table with control of memory flags.
414 */
415 void *
416 hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
417 int flags)
418 {
419 long hashsize;
420 LIST_HEAD(generic, generic) *hashtbl;
421 int i;
422
423 KASSERT(elements > 0, ("%s: bad elements", __func__));
424 /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
425 KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
426 ("Bad flags (0x%x) passed to hashinit_flags", flags));
427
428 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
429 continue;
430 hashsize >>= 1;
431
432 if (flags & HASH_NOWAIT)
433 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
434 type, M_NOWAIT);
435 else
436 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
437 type, M_WAITOK);
438
439 if (hashtbl != NULL) {
440 for (i = 0; i < hashsize; i++)
441 LIST_INIT(&hashtbl[i]);
442 *hashmask = hashsize - 1;
443 }
444 return (hashtbl);
445 }
446
447 /*
448 * Allocate and initialize a hash table with default flag: may sleep.
449 */
450 void *
451 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
452 {
453
454 return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
455 }
456
457 void
458 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
459 {
460 LIST_HEAD(generic, generic) *hashtbl, *hp;
461
462 hashtbl = vhashtbl;
463 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
464 KASSERT(LIST_EMPTY(hp), ("%s: hash not empty", __func__));
465 free(hashtbl, type);
466 }
467
468 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
469 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
470 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
471 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
472
473 /*
474 * General routine to allocate a prime number sized hash table.
475 */
476 void *
477 phashinit(int elements, struct malloc_type *type, u_long *nentries)
478 {
479 long hashsize;
480 LIST_HEAD(generic, generic) *hashtbl;
481 int i;
482
483 KASSERT(elements > 0, ("%s: bad elements", __func__));
484 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
485 i++;
486 if (i == NPRIMES)
487 break;
488 hashsize = primes[i];
489 }
490 hashsize = primes[i - 1];
491 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
492 for (i = 0; i < hashsize; i++)
493 LIST_INIT(&hashtbl[i]);
494 *nentries = hashsize;
495 return (hashtbl);
496 }
497
498 void
499 uio_yield(void)
500 {
501
502 kern_yield(PRI_USER);
503 }
504
505 int
506 copyinfrom(const void * __restrict src, void * __restrict dst, size_t len,
507 int seg)
508 {
509 int error = 0;
510
511 switch (seg) {
512 case UIO_USERSPACE:
513 error = copyin(src, dst, len);
514 break;
515 case UIO_SYSSPACE:
516 bcopy(src, dst, len);
517 break;
518 default:
519 panic("copyinfrom: bad seg %d\n", seg);
520 }
521 return (error);
522 }
523
524 int
525 copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len,
526 size_t * __restrict copied, int seg)
527 {
528 int error = 0;
529
530 switch (seg) {
531 case UIO_USERSPACE:
532 error = copyinstr(src, dst, len, copied);
533 break;
534 case UIO_SYSSPACE:
535 error = copystr(src, dst, len, copied);
536 break;
537 default:
538 panic("copyinstrfrom: bad seg %d\n", seg);
539 }
540 return (error);
541 }
542
543 int
544 copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
545 {
546 u_int iovlen;
547
548 *iov = NULL;
549 if (iovcnt > UIO_MAXIOV)
550 return (error);
551 iovlen = iovcnt * sizeof (struct iovec);
552 *iov = malloc(iovlen, M_IOV, M_WAITOK);
553 error = copyin(iovp, *iov, iovlen);
554 if (error) {
555 free(*iov, M_IOV);
556 *iov = NULL;
557 }
558 return (error);
559 }
560
561 int
562 copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
563 {
564 struct iovec *iov;
565 struct uio *uio;
566 u_int iovlen;
567 int error, i;
568
569 *uiop = NULL;
570 if (iovcnt > UIO_MAXIOV)
571 return (EINVAL);
572 iovlen = iovcnt * sizeof (struct iovec);
573 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
574 iov = (struct iovec *)(uio + 1);
575 error = copyin(iovp, iov, iovlen);
576 if (error) {
577 free(uio, M_IOV);
578 return (error);
579 }
580 uio->uio_iov = iov;
581 uio->uio_iovcnt = iovcnt;
582 uio->uio_segflg = UIO_USERSPACE;
583 uio->uio_offset = -1;
584 uio->uio_resid = 0;
585 for (i = 0; i < iovcnt; i++) {
586 if (iov->iov_len > INT_MAX - uio->uio_resid) {
587 free(uio, M_IOV);
588 return (EINVAL);
589 }
590 uio->uio_resid += iov->iov_len;
591 iov++;
592 }
593 *uiop = uio;
594 return (0);
595 }
596
597 struct uio *
598 cloneuio(struct uio *uiop)
599 {
600 struct uio *uio;
601 int iovlen;
602
603 iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
604 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
605 *uio = *uiop;
606 uio->uio_iov = (struct iovec *)(uio + 1);
607 bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
608 return (uio);
609 }
610
611 /*
612 * Map some anonymous memory in user space of size sz, rounded up to the page
613 * boundary.
614 */
615 int
616 copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
617 {
618 struct vmspace *vms;
619 int error;
620 vm_size_t size;
621
622 vms = td->td_proc->p_vmspace;
623
624 /*
625 * Map somewhere after heap in process memory.
626 */
627 PROC_LOCK(td->td_proc);
628 *addr = round_page((vm_offset_t)vms->vm_daddr +
629 lim_max(td->td_proc, RLIMIT_DATA));
630 PROC_UNLOCK(td->td_proc);
631
632 /* round size up to page boundry */
633 size = (vm_size_t)round_page(sz);
634
635 error = vm_mmap(&vms->vm_map, addr, size, PROT_READ | PROT_WRITE,
636 VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, OBJT_DEFAULT, NULL, 0);
637
638 return (error);
639 }
640
641 /*
642 * Unmap memory in user space.
643 */
644 int
645 copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
646 {
647 vm_map_t map;
648 vm_size_t size;
649
650 if (sz == 0)
651 return (0);
652
653 map = &td->td_proc->p_vmspace->vm_map;
654 size = (vm_size_t)round_page(sz);
655
656 if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
657 return (EINVAL);
658
659 return (0);
660 }
Cache object: 7fdb318fc79b8ccf246b2a694a179273
|