FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_subr.c
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.3/sys/kern/kern_subr.c 231696 2012-02-14 17:35:44Z glebius $");
39
40 #include "opt_zero.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/mman.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
54 #include <sys/sysctl.h>
55 #include <sys/vnode.h>
56
57 #include <vm/vm.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #ifdef ZERO_COPY_SOCKETS
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #endif
65
66 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
67 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
68
69 #ifdef ZERO_COPY_SOCKETS
70 /* Declared in uipc_socket.c */
71 extern int so_zero_copy_receive;
72
73 /*
74 * Identify the physical page mapped at the given kernel virtual
75 * address. Insert this physical page into the given address space at
76 * the given virtual address, replacing the physical page, if any,
77 * that already exists there.
78 */
79 static int
80 vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
81 {
82 vm_map_t map = mapa;
83 vm_page_t kern_pg, user_pg;
84 vm_object_t uobject;
85 vm_map_entry_t entry;
86 vm_pindex_t upindex;
87 vm_prot_t prot;
88 boolean_t wired;
89
90 KASSERT((uaddr & PAGE_MASK) == 0,
91 ("vm_pgmoveco: uaddr is not page aligned"));
92
93 /*
94 * Herein the physical page is validated and dirtied. It is
95 * unwired in sf_buf_mext().
96 */
97 kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
98 kern_pg->valid = VM_PAGE_BITS_ALL;
99 KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
100 ("vm_pgmoveco: kern_pg is not correctly wired"));
101
102 if ((vm_map_lookup(&map, uaddr,
103 VM_PROT_WRITE, &entry, &uobject,
104 &upindex, &prot, &wired)) != KERN_SUCCESS) {
105 return(EFAULT);
106 }
107 VM_OBJECT_LOCK(uobject);
108 retry:
109 if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
110 if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
111 goto retry;
112 vm_page_lock_queues();
113 pmap_remove_all(user_pg);
114 vm_page_free(user_pg);
115 } else {
116 /*
117 * Even if a physical page does not exist in the
118 * object chain's first object, a physical page from a
119 * backing object may be mapped read only.
120 */
121 if (uobject->backing_object != NULL)
122 pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
123 vm_page_lock_queues();
124 }
125 vm_page_insert(kern_pg, uobject, upindex);
126 vm_page_dirty(kern_pg);
127 vm_page_unlock_queues();
128 VM_OBJECT_UNLOCK(uobject);
129 vm_map_lookup_done(map, entry);
130 return(KERN_SUCCESS);
131 }
132 #endif /* ZERO_COPY_SOCKETS */
133
134 int
135 uiomove(void *cp, int n, struct uio *uio)
136 {
137 struct thread *td = curthread;
138 struct iovec *iov;
139 u_int cnt;
140 int error = 0;
141 int save = 0;
142
143 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
144 ("uiomove: mode"));
145 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
146 ("uiomove proc"));
147 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
148 "Calling uiomove()");
149
150 save = td->td_pflags & TDP_DEADLKTREAT;
151 td->td_pflags |= TDP_DEADLKTREAT;
152
153 while (n > 0 && uio->uio_resid) {
154 iov = uio->uio_iov;
155 cnt = iov->iov_len;
156 if (cnt == 0) {
157 uio->uio_iov++;
158 uio->uio_iovcnt--;
159 continue;
160 }
161 if (cnt > n)
162 cnt = n;
163
164 switch (uio->uio_segflg) {
165
166 case UIO_USERSPACE:
167 if (ticks - PCPU_GET(switchticks) >= hogticks)
168 uio_yield();
169 if (uio->uio_rw == UIO_READ)
170 error = copyout(cp, iov->iov_base, cnt);
171 else
172 error = copyin(iov->iov_base, cp, cnt);
173 if (error)
174 goto out;
175 break;
176
177 case UIO_SYSSPACE:
178 if (uio->uio_rw == UIO_READ)
179 bcopy(cp, iov->iov_base, cnt);
180 else
181 bcopy(iov->iov_base, cp, cnt);
182 break;
183 case UIO_NOCOPY:
184 break;
185 }
186 iov->iov_base = (char *)iov->iov_base + cnt;
187 iov->iov_len -= cnt;
188 uio->uio_resid -= cnt;
189 uio->uio_offset += cnt;
190 cp = (char *)cp + cnt;
191 n -= cnt;
192 }
193 out:
194 if (save == 0)
195 td->td_pflags &= ~TDP_DEADLKTREAT;
196 return (error);
197 }
198
199 /*
200 * Wrapper for uiomove() that validates the arguments against a known-good
201 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
202 * is almost definitely a bad thing, so we catch that here as well. We
203 * return a runtime failure, but it might be desirable to generate a runtime
204 * assertion failure instead.
205 */
206 int
207 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
208 {
209 unsigned int offset, n;
210
211 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
212 (offset = uio->uio_offset) != uio->uio_offset)
213 return (EINVAL);
214 if (buflen <= 0 || offset >= buflen)
215 return (0);
216 if ((n = buflen - offset) > INT_MAX)
217 return (EINVAL);
218 return (uiomove((char *)buf + offset, n, uio));
219 }
220
221 #ifdef ZERO_COPY_SOCKETS
222 /*
223 * Experimental support for zero-copy I/O
224 */
225 static int
226 userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable)
227 {
228 struct iovec *iov;
229 int error;
230
231 iov = uio->uio_iov;
232 if (uio->uio_rw == UIO_READ) {
233 if ((so_zero_copy_receive != 0)
234 && ((cnt & PAGE_MASK) == 0)
235 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
236 && ((uio->uio_offset & PAGE_MASK) == 0)
237 && ((((intptr_t) cp) & PAGE_MASK) == 0)
238 && (disposable != 0)) {
239 /* SOCKET: use page-trading */
240 /*
241 * We only want to call vm_pgmoveco() on
242 * disposeable pages, since it gives the
243 * kernel page to the userland process.
244 */
245 error = vm_pgmoveco(&curproc->p_vmspace->vm_map,
246 (vm_offset_t)cp, (vm_offset_t)iov->iov_base);
247
248 /*
249 * If we get an error back, attempt
250 * to use copyout() instead. The
251 * disposable page should be freed
252 * automatically if we weren't able to move
253 * it into userland.
254 */
255 if (error != 0)
256 error = copyout(cp, iov->iov_base, cnt);
257 } else {
258 error = copyout(cp, iov->iov_base, cnt);
259 }
260 } else {
261 error = copyin(iov->iov_base, cp, cnt);
262 }
263 return (error);
264 }
265
266 int
267 uiomoveco(void *cp, int n, struct uio *uio, int disposable)
268 {
269 struct iovec *iov;
270 u_int cnt;
271 int error;
272
273 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
274 ("uiomoveco: mode"));
275 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
276 ("uiomoveco proc"));
277
278 while (n > 0 && uio->uio_resid) {
279 iov = uio->uio_iov;
280 cnt = iov->iov_len;
281 if (cnt == 0) {
282 uio->uio_iov++;
283 uio->uio_iovcnt--;
284 continue;
285 }
286 if (cnt > n)
287 cnt = n;
288
289 switch (uio->uio_segflg) {
290
291 case UIO_USERSPACE:
292 if (ticks - PCPU_GET(switchticks) >= hogticks)
293 uio_yield();
294
295 error = userspaceco(cp, cnt, uio, disposable);
296
297 if (error)
298 return (error);
299 break;
300
301 case UIO_SYSSPACE:
302 if (uio->uio_rw == UIO_READ)
303 bcopy(cp, iov->iov_base, cnt);
304 else
305 bcopy(iov->iov_base, cp, cnt);
306 break;
307 case UIO_NOCOPY:
308 break;
309 }
310 iov->iov_base = (char *)iov->iov_base + cnt;
311 iov->iov_len -= cnt;
312 uio->uio_resid -= cnt;
313 uio->uio_offset += cnt;
314 cp = (char *)cp + cnt;
315 n -= cnt;
316 }
317 return (0);
318 }
319 #endif /* ZERO_COPY_SOCKETS */
320
321 /*
322 * Give next character to user as result of read.
323 */
324 int
325 ureadc(int c, struct uio *uio)
326 {
327 struct iovec *iov;
328 char *iov_base;
329
330 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
331 "Calling ureadc()");
332
333 again:
334 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
335 panic("ureadc");
336 iov = uio->uio_iov;
337 if (iov->iov_len == 0) {
338 uio->uio_iovcnt--;
339 uio->uio_iov++;
340 goto again;
341 }
342 switch (uio->uio_segflg) {
343
344 case UIO_USERSPACE:
345 if (subyte(iov->iov_base, c) < 0)
346 return (EFAULT);
347 break;
348
349 case UIO_SYSSPACE:
350 iov_base = iov->iov_base;
351 *iov_base = c;
352 iov->iov_base = iov_base;
353 break;
354
355 case UIO_NOCOPY:
356 break;
357 }
358 iov->iov_base = (char *)iov->iov_base + 1;
359 iov->iov_len--;
360 uio->uio_resid--;
361 uio->uio_offset++;
362 return (0);
363 }
364
365 /*
366 * General routine to allocate a hash table with control of memory flags.
367 */
368 void *
369 hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
370 int flags)
371 {
372 long hashsize;
373 LIST_HEAD(generic, generic) *hashtbl;
374 int i;
375
376 KASSERT(elements > 0, ("%s: bad elements", __func__));
377 /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
378 KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
379 ("Bad flags (0x%x) passed to hashinit_flags", flags));
380
381 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
382 continue;
383 hashsize >>= 1;
384
385 if (flags & HASH_NOWAIT)
386 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
387 type, M_NOWAIT);
388 else
389 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
390 type, M_WAITOK);
391
392 if (hashtbl != NULL) {
393 for (i = 0; i < hashsize; i++)
394 LIST_INIT(&hashtbl[i]);
395 *hashmask = hashsize - 1;
396 }
397 return (hashtbl);
398 }
399
400 /*
401 * Allocate and initialize a hash table with default flag: may sleep.
402 */
403 void *
404 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
405 {
406
407 return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
408 }
409
410 void
411 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
412 {
413 LIST_HEAD(generic, generic) *hashtbl, *hp;
414
415 hashtbl = vhashtbl;
416 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
417 KASSERT(LIST_EMPTY(hp), ("%s: hash not empty", __func__));
418 free(hashtbl, type);
419 }
420
421 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
422 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
423 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
424 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
425
426 /*
427 * General routine to allocate a prime number sized hash table.
428 */
429 void *
430 phashinit(int elements, struct malloc_type *type, u_long *nentries)
431 {
432 long hashsize;
433 LIST_HEAD(generic, generic) *hashtbl;
434 int i;
435
436 KASSERT(elements > 0, ("%s: bad elements", __func__));
437 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
438 i++;
439 if (i == NPRIMES)
440 break;
441 hashsize = primes[i];
442 }
443 hashsize = primes[i - 1];
444 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
445 for (i = 0; i < hashsize; i++)
446 LIST_INIT(&hashtbl[i]);
447 *nentries = hashsize;
448 return (hashtbl);
449 }
450
451 void
452 uio_yield(void)
453 {
454
455 kern_yield(PRI_USER);
456 }
457
458 int
459 copyinfrom(const void * __restrict src, void * __restrict dst, size_t len,
460 int seg)
461 {
462 int error = 0;
463
464 switch (seg) {
465 case UIO_USERSPACE:
466 error = copyin(src, dst, len);
467 break;
468 case UIO_SYSSPACE:
469 bcopy(src, dst, len);
470 break;
471 default:
472 panic("copyinfrom: bad seg %d\n", seg);
473 }
474 return (error);
475 }
476
477 int
478 copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len,
479 size_t * __restrict copied, int seg)
480 {
481 int error = 0;
482
483 switch (seg) {
484 case UIO_USERSPACE:
485 error = copyinstr(src, dst, len, copied);
486 break;
487 case UIO_SYSSPACE:
488 error = copystr(src, dst, len, copied);
489 break;
490 default:
491 panic("copyinstrfrom: bad seg %d\n", seg);
492 }
493 return (error);
494 }
495
496 int
497 copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
498 {
499 u_int iovlen;
500
501 *iov = NULL;
502 if (iovcnt > UIO_MAXIOV)
503 return (error);
504 iovlen = iovcnt * sizeof (struct iovec);
505 *iov = malloc(iovlen, M_IOV, M_WAITOK);
506 error = copyin(iovp, *iov, iovlen);
507 if (error) {
508 free(*iov, M_IOV);
509 *iov = NULL;
510 }
511 return (error);
512 }
513
514 int
515 copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
516 {
517 struct iovec *iov;
518 struct uio *uio;
519 u_int iovlen;
520 int error, i;
521
522 *uiop = NULL;
523 if (iovcnt > UIO_MAXIOV)
524 return (EINVAL);
525 iovlen = iovcnt * sizeof (struct iovec);
526 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
527 iov = (struct iovec *)(uio + 1);
528 error = copyin(iovp, iov, iovlen);
529 if (error) {
530 free(uio, M_IOV);
531 return (error);
532 }
533 uio->uio_iov = iov;
534 uio->uio_iovcnt = iovcnt;
535 uio->uio_segflg = UIO_USERSPACE;
536 uio->uio_offset = -1;
537 uio->uio_resid = 0;
538 for (i = 0; i < iovcnt; i++) {
539 if (iov->iov_len > INT_MAX - uio->uio_resid) {
540 free(uio, M_IOV);
541 return (EINVAL);
542 }
543 uio->uio_resid += iov->iov_len;
544 iov++;
545 }
546 *uiop = uio;
547 return (0);
548 }
549
550 struct uio *
551 cloneuio(struct uio *uiop)
552 {
553 struct uio *uio;
554 int iovlen;
555
556 iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
557 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
558 *uio = *uiop;
559 uio->uio_iov = (struct iovec *)(uio + 1);
560 bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
561 return (uio);
562 }
563
564 /*
565 * Map some anonymous memory in user space of size sz, rounded up to the page
566 * boundary.
567 */
568 int
569 copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
570 {
571 struct vmspace *vms;
572 int error;
573 vm_size_t size;
574
575 vms = td->td_proc->p_vmspace;
576
577 /*
578 * Map somewhere after heap in process memory.
579 */
580 PROC_LOCK(td->td_proc);
581 *addr = round_page((vm_offset_t)vms->vm_daddr +
582 lim_max(td->td_proc, RLIMIT_DATA));
583 PROC_UNLOCK(td->td_proc);
584
585 /* round size up to page boundry */
586 size = (vm_size_t)round_page(sz);
587
588 error = vm_mmap(&vms->vm_map, addr, size, PROT_READ | PROT_WRITE,
589 VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, OBJT_DEFAULT, NULL, 0);
590
591 return (error);
592 }
593
594 /*
595 * Unmap memory in user space.
596 */
597 int
598 copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
599 {
600 vm_map_t map;
601 vm_size_t size;
602
603 if (sz == 0)
604 return (0);
605
606 map = &td->td_proc->p_vmspace->vm_map;
607 size = (vm_size_t)round_page(sz);
608
609 if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
610 return (EINVAL);
611
612 return (0);
613 }
Cache object: 536045649b009c8ae6a86b681c45c1cf
|