FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_subr.c
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "opt_zero.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sched.h>
53 #include <sys/sysctl.h>
54 #include <sys/vnode.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_map.h>
59 #ifdef ZERO_COPY_SOCKETS
60 #include <vm/vm_param.h>
61 #include <vm/vm_object.h>
62 #endif
63
64 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
65 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
66
67 #ifdef ZERO_COPY_SOCKETS
68 /* Declared in uipc_socket.c */
69 extern int so_zero_copy_receive;
70
71 /*
72 * Identify the physical page mapped at the given kernel virtual
73 * address. Insert this physical page into the given address space at
74 * the given virtual address, replacing the physical page, if any,
75 * that already exists there.
76 */
77 static int
78 vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
79 {
80 vm_map_t map = mapa;
81 vm_page_t kern_pg, user_pg;
82 vm_object_t uobject;
83 vm_map_entry_t entry;
84 vm_pindex_t upindex;
85 vm_prot_t prot;
86 boolean_t wired;
87
88 KASSERT((uaddr & PAGE_MASK) == 0,
89 ("vm_pgmoveco: uaddr is not page aligned"));
90
91 /*
92 * Herein the physical page is validated and dirtied. It is
93 * unwired in sf_buf_mext().
94 */
95 kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
96 kern_pg->valid = VM_PAGE_BITS_ALL;
97 KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
98 ("vm_pgmoveco: kern_pg is not correctly wired"));
99
100 if ((vm_map_lookup(&map, uaddr,
101 VM_PROT_WRITE, &entry, &uobject,
102 &upindex, &prot, &wired)) != KERN_SUCCESS) {
103 return(EFAULT);
104 }
105 VM_OBJECT_LOCK(uobject);
106 retry:
107 if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
108 if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
109 goto retry;
110 vm_page_lock_queues();
111 pmap_remove_all(user_pg);
112 vm_page_free(user_pg);
113 } else {
114 /*
115 * Even if a physical page does not exist in the
116 * object chain's first object, a physical page from a
117 * backing object may be mapped read only.
118 */
119 if (uobject->backing_object != NULL)
120 pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
121 vm_page_lock_queues();
122 }
123 vm_page_insert(kern_pg, uobject, upindex);
124 vm_page_dirty(kern_pg);
125 vm_page_unlock_queues();
126 VM_OBJECT_UNLOCK(uobject);
127 vm_map_lookup_done(map, entry);
128 return(KERN_SUCCESS);
129 }
130 #endif /* ZERO_COPY_SOCKETS */
131
132 int
133 uiomove(void *cp, int n, struct uio *uio)
134 {
135 struct thread *td = curthread;
136 struct iovec *iov;
137 u_int cnt;
138 int error = 0;
139 int save = 0;
140
141 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
142 ("uiomove: mode"));
143 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
144 ("uiomove proc"));
145 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
146 "Calling uiomove()");
147
148 save = td->td_pflags & TDP_DEADLKTREAT;
149 td->td_pflags |= TDP_DEADLKTREAT;
150
151 while (n > 0 && uio->uio_resid) {
152 iov = uio->uio_iov;
153 cnt = iov->iov_len;
154 if (cnt == 0) {
155 uio->uio_iov++;
156 uio->uio_iovcnt--;
157 continue;
158 }
159 if (cnt > n)
160 cnt = n;
161
162 switch (uio->uio_segflg) {
163
164 case UIO_USERSPACE:
165 if (ticks - PCPU_GET(switchticks) >= hogticks)
166 uio_yield();
167 if (uio->uio_rw == UIO_READ)
168 error = copyout(cp, iov->iov_base, cnt);
169 else
170 error = copyin(iov->iov_base, cp, cnt);
171 if (error)
172 goto out;
173 break;
174
175 case UIO_SYSSPACE:
176 if (uio->uio_rw == UIO_READ)
177 bcopy(cp, iov->iov_base, cnt);
178 else
179 bcopy(iov->iov_base, cp, cnt);
180 break;
181 case UIO_NOCOPY:
182 break;
183 }
184 iov->iov_base = (char *)iov->iov_base + cnt;
185 iov->iov_len -= cnt;
186 uio->uio_resid -= cnt;
187 uio->uio_offset += cnt;
188 cp = (char *)cp + cnt;
189 n -= cnt;
190 }
191 out:
192 if (save == 0)
193 td->td_pflags &= ~TDP_DEADLKTREAT;
194 return (error);
195 }
196
197 /*
198 * Wrapper for uiomove() that validates the arguments against a known-good
199 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
200 * is almost definitely a bad thing, so we catch that here as well. We
201 * return a runtime failure, but it might be desirable to generate a runtime
202 * assertion failure instead.
203 */
204 int
205 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
206 {
207 unsigned int offset, n;
208
209 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
210 (offset = uio->uio_offset) != uio->uio_offset)
211 return (EINVAL);
212 if (buflen <= 0 || offset >= buflen)
213 return (0);
214 if ((n = buflen - offset) > INT_MAX)
215 return (EINVAL);
216 return (uiomove((char *)buf + offset, n, uio));
217 }
218
219 #ifdef ZERO_COPY_SOCKETS
220 /*
221 * Experimental support for zero-copy I/O
222 */
223 static int
224 userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable)
225 {
226 struct iovec *iov;
227 int error;
228
229 iov = uio->uio_iov;
230 if (uio->uio_rw == UIO_READ) {
231 if ((so_zero_copy_receive != 0)
232 && ((cnt & PAGE_MASK) == 0)
233 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
234 && ((uio->uio_offset & PAGE_MASK) == 0)
235 && ((((intptr_t) cp) & PAGE_MASK) == 0)
236 && (disposable != 0)) {
237 /* SOCKET: use page-trading */
238 /*
239 * We only want to call vm_pgmoveco() on
240 * disposeable pages, since it gives the
241 * kernel page to the userland process.
242 */
243 error = vm_pgmoveco(&curproc->p_vmspace->vm_map,
244 (vm_offset_t)cp, (vm_offset_t)iov->iov_base);
245
246 /*
247 * If we get an error back, attempt
248 * to use copyout() instead. The
249 * disposable page should be freed
250 * automatically if we weren't able to move
251 * it into userland.
252 */
253 if (error != 0)
254 error = copyout(cp, iov->iov_base, cnt);
255 } else {
256 error = copyout(cp, iov->iov_base, cnt);
257 }
258 } else {
259 error = copyin(iov->iov_base, cp, cnt);
260 }
261 return (error);
262 }
263
264 int
265 uiomoveco(void *cp, int n, struct uio *uio, int disposable)
266 {
267 struct iovec *iov;
268 u_int cnt;
269 int error;
270
271 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
272 ("uiomoveco: mode"));
273 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
274 ("uiomoveco proc"));
275
276 while (n > 0 && uio->uio_resid) {
277 iov = uio->uio_iov;
278 cnt = iov->iov_len;
279 if (cnt == 0) {
280 uio->uio_iov++;
281 uio->uio_iovcnt--;
282 continue;
283 }
284 if (cnt > n)
285 cnt = n;
286
287 switch (uio->uio_segflg) {
288
289 case UIO_USERSPACE:
290 if (ticks - PCPU_GET(switchticks) >= hogticks)
291 uio_yield();
292
293 error = userspaceco(cp, cnt, uio, disposable);
294
295 if (error)
296 return (error);
297 break;
298
299 case UIO_SYSSPACE:
300 if (uio->uio_rw == UIO_READ)
301 bcopy(cp, iov->iov_base, cnt);
302 else
303 bcopy(iov->iov_base, cp, cnt);
304 break;
305 case UIO_NOCOPY:
306 break;
307 }
308 iov->iov_base = (char *)iov->iov_base + cnt;
309 iov->iov_len -= cnt;
310 uio->uio_resid -= cnt;
311 uio->uio_offset += cnt;
312 cp = (char *)cp + cnt;
313 n -= cnt;
314 }
315 return (0);
316 }
317 #endif /* ZERO_COPY_SOCKETS */
318
319 /*
320 * Give next character to user as result of read.
321 */
322 int
323 ureadc(int c, struct uio *uio)
324 {
325 struct iovec *iov;
326 char *iov_base;
327
328 again:
329 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
330 panic("ureadc");
331 iov = uio->uio_iov;
332 if (iov->iov_len == 0) {
333 uio->uio_iovcnt--;
334 uio->uio_iov++;
335 goto again;
336 }
337 switch (uio->uio_segflg) {
338
339 case UIO_USERSPACE:
340 if (subyte(iov->iov_base, c) < 0)
341 return (EFAULT);
342 break;
343
344 case UIO_SYSSPACE:
345 iov_base = iov->iov_base;
346 *iov_base = c;
347 iov->iov_base = iov_base;
348 break;
349
350 case UIO_NOCOPY:
351 break;
352 }
353 iov->iov_base = (char *)iov->iov_base + 1;
354 iov->iov_len--;
355 uio->uio_resid--;
356 uio->uio_offset++;
357 return (0);
358 }
359
360 /*
361 * General routine to allocate a hash table with control of memory flags.
362 */
363 void *
364 hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
365 int flags)
366 {
367 long hashsize;
368 LIST_HEAD(generic, generic) *hashtbl;
369 int i;
370
371 if (elements <= 0)
372 panic("hashinit: bad elements");
373
374 /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
375 KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
376 ("Bad flags (0x%x) passed to hashinit_flags", flags));
377
378 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
379 continue;
380 hashsize >>= 1;
381
382 if (flags & HASH_NOWAIT)
383 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
384 type, M_NOWAIT);
385 else
386 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
387 type, M_WAITOK);
388
389 if (hashtbl != NULL) {
390 for (i = 0; i < hashsize; i++)
391 LIST_INIT(&hashtbl[i]);
392 *hashmask = hashsize - 1;
393 }
394 return (hashtbl);
395 }
396
397 /*
398 * Allocate and initialize a hash table with default flag: may sleep.
399 */
400 void *
401 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
402 {
403
404 return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
405 }
406
407 void
408 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
409 {
410 LIST_HEAD(generic, generic) *hashtbl, *hp;
411
412 hashtbl = vhashtbl;
413 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
414 if (!LIST_EMPTY(hp))
415 panic("hashdestroy: hash not empty");
416 free(hashtbl, type);
417 }
418
419 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
420 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
421 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
422 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
423
424 /*
425 * General routine to allocate a prime number sized hash table.
426 */
427 void *
428 phashinit(int elements, struct malloc_type *type, u_long *nentries)
429 {
430 long hashsize;
431 LIST_HEAD(generic, generic) *hashtbl;
432 int i;
433
434 if (elements <= 0)
435 panic("phashinit: bad elements");
436 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
437 i++;
438 if (i == NPRIMES)
439 break;
440 hashsize = primes[i];
441 }
442 hashsize = primes[i - 1];
443 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
444 for (i = 0; i < hashsize; i++)
445 LIST_INIT(&hashtbl[i]);
446 *nentries = hashsize;
447 return (hashtbl);
448 }
449
450 void
451 uio_yield(void)
452 {
453 struct thread *td;
454
455 td = curthread;
456 DROP_GIANT();
457 thread_lock(td);
458 sched_prio(td, td->td_user_pri);
459 mi_switch(SW_INVOL, NULL);
460 thread_unlock(td);
461 PICKUP_GIANT();
462 }
463
464 int
465 copyinfrom(const void * __restrict src, void * __restrict dst, size_t len,
466 int seg)
467 {
468 int error = 0;
469
470 switch (seg) {
471 case UIO_USERSPACE:
472 error = copyin(src, dst, len);
473 break;
474 case UIO_SYSSPACE:
475 bcopy(src, dst, len);
476 break;
477 default:
478 panic("copyinfrom: bad seg %d\n", seg);
479 }
480 return (error);
481 }
482
483 int
484 copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len,
485 size_t * __restrict copied, int seg)
486 {
487 int error = 0;
488
489 switch (seg) {
490 case UIO_USERSPACE:
491 error = copyinstr(src, dst, len, copied);
492 break;
493 case UIO_SYSSPACE:
494 error = copystr(src, dst, len, copied);
495 break;
496 default:
497 panic("copyinstrfrom: bad seg %d\n", seg);
498 }
499 return (error);
500 }
501
502 int
503 copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
504 {
505 u_int iovlen;
506
507 *iov = NULL;
508 if (iovcnt > UIO_MAXIOV)
509 return (error);
510 iovlen = iovcnt * sizeof (struct iovec);
511 *iov = malloc(iovlen, M_IOV, M_WAITOK);
512 error = copyin(iovp, *iov, iovlen);
513 if (error) {
514 free(*iov, M_IOV);
515 *iov = NULL;
516 }
517 return (error);
518 }
519
520 int
521 copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
522 {
523 struct iovec *iov;
524 struct uio *uio;
525 u_int iovlen;
526 int error, i;
527
528 *uiop = NULL;
529 if (iovcnt > UIO_MAXIOV)
530 return (EINVAL);
531 iovlen = iovcnt * sizeof (struct iovec);
532 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
533 iov = (struct iovec *)(uio + 1);
534 error = copyin(iovp, iov, iovlen);
535 if (error) {
536 free(uio, M_IOV);
537 return (error);
538 }
539 uio->uio_iov = iov;
540 uio->uio_iovcnt = iovcnt;
541 uio->uio_segflg = UIO_USERSPACE;
542 uio->uio_offset = -1;
543 uio->uio_resid = 0;
544 for (i = 0; i < iovcnt; i++) {
545 if (iov->iov_len > INT_MAX - uio->uio_resid) {
546 free(uio, M_IOV);
547 return (EINVAL);
548 }
549 uio->uio_resid += iov->iov_len;
550 iov++;
551 }
552 *uiop = uio;
553 return (0);
554 }
555
556 struct uio *
557 cloneuio(struct uio *uiop)
558 {
559 struct uio *uio;
560 int iovlen;
561
562 iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
563 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
564 *uio = *uiop;
565 uio->uio_iov = (struct iovec *)(uio + 1);
566 bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
567 return (uio);
568 }
Cache object: bd21db3197236cca0fdc4559d944e861
|