FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_subr.c
1 /*
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
39 * $FreeBSD: releng/5.1/sys/kern/kern_subr.c 120688 2003-10-03 13:02:50Z nectar $
40 */
41
42 #include "opt_zero.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/ktr.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56 #include <sys/vnode.h>
57
58 #include <vm/vm.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #ifdef ZERO_COPY_SOCKETS
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #endif
65
66 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
67 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
68
69 #ifdef ZERO_COPY_SOCKETS
70 /* Declared in uipc_socket.c */
71 extern int so_zero_copy_receive;
72
73 static int
74 vm_pgmoveco(vm_map_t mapa, vm_object_t srcobj, vm_offset_t kaddr,
75 vm_offset_t uaddr)
76 {
77 vm_map_t map = mapa;
78 vm_page_t kern_pg, user_pg;
79 vm_object_t uobject;
80 vm_map_entry_t entry;
81 vm_pindex_t upindex, kpindex;
82 vm_prot_t prot;
83 boolean_t wired;
84
85 /*
86 * First lookup the kernel page.
87 */
88 kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
89
90 if ((vm_map_lookup(&map, uaddr,
91 VM_PROT_WRITE, &entry, &uobject,
92 &upindex, &prot, &wired)) != KERN_SUCCESS) {
93 return(EFAULT);
94 }
95 if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
96 do
97 vm_page_lock_queues();
98 while (vm_page_sleep_if_busy(user_pg, 1, "vm_pgmoveco"));
99 vm_page_busy(user_pg);
100 pmap_remove_all(user_pg);
101 vm_page_free(user_pg);
102 } else
103 vm_page_lock_queues();
104 if (kern_pg->busy || ((kern_pg->queue - kern_pg->pc) == PQ_FREE) ||
105 (kern_pg->hold_count != 0)|| (kern_pg->flags & PG_BUSY)) {
106 printf("vm_pgmoveco: pindex(%lu), busy(%d), PG_BUSY(%d), "
107 "hold(%d) paddr(0x%lx)\n", (u_long)kern_pg->pindex,
108 kern_pg->busy, (kern_pg->flags & PG_BUSY) ? 1 : 0,
109 kern_pg->hold_count, (u_long)kern_pg->phys_addr);
110 if ((kern_pg->queue - kern_pg->pc) == PQ_FREE)
111 panic("vm_pgmoveco: renaming free page");
112 else
113 panic("vm_pgmoveco: renaming busy page");
114 }
115 kpindex = kern_pg->pindex;
116 vm_page_busy(kern_pg);
117 vm_page_rename(kern_pg, uobject, upindex);
118 vm_page_flag_clear(kern_pg, PG_BUSY);
119 kern_pg->valid = VM_PAGE_BITS_ALL;
120 vm_page_unlock_queues();
121
122 vm_map_lookup_done(map, entry);
123 return(KERN_SUCCESS);
124 }
125 #endif /* ZERO_COPY_SOCKETS */
126
127 int
128 uiomove(void *cp, int n, struct uio *uio)
129 {
130 struct thread *td = curthread;
131 struct iovec *iov;
132 u_int cnt;
133 int error = 0;
134 int save = 0;
135
136 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
137 ("uiomove: mode"));
138 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
139 ("uiomove proc"));
140
141 if (td) {
142 mtx_lock_spin(&sched_lock);
143 save = td->td_flags & TDF_DEADLKTREAT;
144 td->td_flags |= TDF_DEADLKTREAT;
145 mtx_unlock_spin(&sched_lock);
146 }
147
148 while (n > 0 && uio->uio_resid) {
149 iov = uio->uio_iov;
150 cnt = iov->iov_len;
151 if (cnt == 0) {
152 uio->uio_iov++;
153 uio->uio_iovcnt--;
154 continue;
155 }
156 if (cnt > n)
157 cnt = n;
158
159 switch (uio->uio_segflg) {
160
161 case UIO_USERSPACE:
162 if (ticks - PCPU_GET(switchticks) >= hogticks)
163 uio_yield();
164 if (uio->uio_rw == UIO_READ)
165 error = copyout(cp, iov->iov_base, cnt);
166 else
167 error = copyin(iov->iov_base, cp, cnt);
168 if (error)
169 goto out;
170 break;
171
172 case UIO_SYSSPACE:
173 if (uio->uio_rw == UIO_READ)
174 bcopy(cp, iov->iov_base, cnt);
175 else
176 bcopy(iov->iov_base, cp, cnt);
177 break;
178 case UIO_NOCOPY:
179 break;
180 }
181 iov->iov_base = (char *)iov->iov_base + cnt;
182 iov->iov_len -= cnt;
183 uio->uio_resid -= cnt;
184 uio->uio_offset += cnt;
185 cp = (char *)cp + cnt;
186 n -= cnt;
187 }
188 out:
189 if (td && save == 0) {
190 mtx_lock_spin(&sched_lock);
191 td->td_flags &= ~TDF_DEADLKTREAT;
192 mtx_unlock_spin(&sched_lock);
193 }
194 return (error);
195 }
196
197 /*
198 * Wrapper for uiomove() that validates the arguments against a known-good
199 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
200 * is almost definitely a bad thing, so we catch that here as well. We
201 * return a runtime failure, but it might be desirable to generate a runtime
202 * assertion failure instead.
203 */
204 int
205 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
206 {
207 unsigned int offset, n;
208
209 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
210 (offset = uio->uio_offset) != uio->uio_offset)
211 return (EINVAL);
212 if (buflen <= 0 || offset >= buflen)
213 return (0);
214 if ((n = buflen - offset) > INT_MAX)
215 return (EINVAL);
216 return (uiomove((char *)buf + offset, n, uio));
217 }
218
219 #ifdef ZERO_COPY_SOCKETS
220 /*
221 * Experimental support for zero-copy I/O
222 */
223 static int
224 userspaceco(void *cp, u_int cnt, struct uio *uio, struct vm_object *obj,
225 int disposable)
226 {
227 struct iovec *iov;
228 int error;
229
230 iov = uio->uio_iov;
231 if (uio->uio_rw == UIO_READ) {
232 if ((so_zero_copy_receive != 0)
233 && (obj != NULL)
234 && ((cnt & PAGE_MASK) == 0)
235 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
236 && ((uio->uio_offset & PAGE_MASK) == 0)
237 && ((((intptr_t) cp) & PAGE_MASK) == 0)
238 && (obj->type == OBJT_DEFAULT)
239 && (disposable != 0)) {
240 /* SOCKET: use page-trading */
241 /*
242 * We only want to call vm_pgmoveco() on
243 * disposeable pages, since it gives the
244 * kernel page to the userland process.
245 */
246 error = vm_pgmoveco(&curproc->p_vmspace->vm_map,
247 obj, (vm_offset_t)cp,
248 (vm_offset_t)iov->iov_base);
249
250 /*
251 * If we get an error back, attempt
252 * to use copyout() instead. The
253 * disposable page should be freed
254 * automatically if we weren't able to move
255 * it into userland.
256 */
257 if (error != 0)
258 error = copyout(cp, iov->iov_base, cnt);
259 } else {
260 error = copyout(cp, iov->iov_base, cnt);
261 }
262 } else {
263 error = copyin(iov->iov_base, cp, cnt);
264 }
265 return (error);
266 }
267
268 int
269 uiomoveco(void *cp, int n, struct uio *uio, struct vm_object *obj,
270 int disposable)
271 {
272 struct iovec *iov;
273 u_int cnt;
274 int error;
275
276 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
277 ("uiomoveco: mode"));
278 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
279 ("uiomoveco proc"));
280
281 while (n > 0 && uio->uio_resid) {
282 iov = uio->uio_iov;
283 cnt = iov->iov_len;
284 if (cnt == 0) {
285 uio->uio_iov++;
286 uio->uio_iovcnt--;
287 continue;
288 }
289 if (cnt > n)
290 cnt = n;
291
292 switch (uio->uio_segflg) {
293
294 case UIO_USERSPACE:
295 if (ticks - PCPU_GET(switchticks) >= hogticks)
296 uio_yield();
297
298 error = userspaceco(cp, cnt, uio, obj, disposable);
299
300 if (error)
301 return (error);
302 break;
303
304 case UIO_SYSSPACE:
305 if (uio->uio_rw == UIO_READ)
306 bcopy(cp, iov->iov_base, cnt);
307 else
308 bcopy(iov->iov_base, cp, cnt);
309 break;
310 case UIO_NOCOPY:
311 break;
312 }
313 iov->iov_base = (char *)iov->iov_base + cnt;
314 iov->iov_len -= cnt;
315 uio->uio_resid -= cnt;
316 uio->uio_offset += cnt;
317 cp = (char *)cp + cnt;
318 n -= cnt;
319 }
320 return (0);
321 }
322 #endif /* ZERO_COPY_SOCKETS */
323
324 /*
325 * Give next character to user as result of read.
326 */
327 int
328 ureadc(int c, struct uio *uio)
329 {
330 struct iovec *iov;
331 char *iov_base;
332
333 again:
334 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
335 panic("ureadc");
336 iov = uio->uio_iov;
337 if (iov->iov_len == 0) {
338 uio->uio_iovcnt--;
339 uio->uio_iov++;
340 goto again;
341 }
342 switch (uio->uio_segflg) {
343
344 case UIO_USERSPACE:
345 if (subyte(iov->iov_base, c) < 0)
346 return (EFAULT);
347 break;
348
349 case UIO_SYSSPACE:
350 iov_base = iov->iov_base;
351 *iov_base = c;
352 iov->iov_base = iov_base;
353 break;
354
355 case UIO_NOCOPY:
356 break;
357 }
358 iov->iov_base = (char *)iov->iov_base + 1;
359 iov->iov_len--;
360 uio->uio_resid--;
361 uio->uio_offset++;
362 return (0);
363 }
364
365 /*
366 * General routine to allocate a hash table.
367 */
368 void *
369 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
370 {
371 long hashsize;
372 LIST_HEAD(generic, generic) *hashtbl;
373 int i;
374
375 if (elements <= 0)
376 panic("hashinit: bad elements");
377 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
378 continue;
379 hashsize >>= 1;
380 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
381 for (i = 0; i < hashsize; i++)
382 LIST_INIT(&hashtbl[i]);
383 *hashmask = hashsize - 1;
384 return (hashtbl);
385 }
386
387 void
388 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
389 {
390 LIST_HEAD(generic, generic) *hashtbl, *hp;
391
392 hashtbl = vhashtbl;
393 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
394 if (!LIST_EMPTY(hp))
395 panic("hashdestroy: hash not empty");
396 free(hashtbl, type);
397 }
398
399 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
400 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
401 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
402 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
403
404 /*
405 * General routine to allocate a prime number sized hash table.
406 */
407 void *
408 phashinit(int elements, struct malloc_type *type, u_long *nentries)
409 {
410 long hashsize;
411 LIST_HEAD(generic, generic) *hashtbl;
412 int i;
413
414 if (elements <= 0)
415 panic("phashinit: bad elements");
416 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
417 i++;
418 if (i == NPRIMES)
419 break;
420 hashsize = primes[i];
421 }
422 hashsize = primes[i - 1];
423 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
424 for (i = 0; i < hashsize; i++)
425 LIST_INIT(&hashtbl[i]);
426 *nentries = hashsize;
427 return (hashtbl);
428 }
429
430 void
431 uio_yield(void)
432 {
433 struct thread *td;
434
435 td = curthread;
436 mtx_lock_spin(&sched_lock);
437 DROP_GIANT();
438 sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
439 td->td_proc->p_stats->p_ru.ru_nivcsw++;
440 mi_switch();
441 mtx_unlock_spin(&sched_lock);
442 PICKUP_GIANT();
443 }
444
445 int
446 copyinfrom(const void *src, void *dst, size_t len, int seg)
447 {
448 int error = 0;
449
450 switch (seg) {
451 case UIO_USERSPACE:
452 error = copyin(src, dst, len);
453 break;
454 case UIO_SYSSPACE:
455 bcopy(src, dst, len);
456 break;
457 default:
458 panic("copyinfrom: bad seg %d\n", seg);
459 }
460 return (error);
461 }
462
463 int
464 copyinstrfrom(const void *src, void *dst, size_t len, size_t *copied, int seg)
465 {
466 int error = 0;
467
468 switch (seg) {
469 case UIO_USERSPACE:
470 error = copyinstr(src, dst, len, copied);
471 break;
472 case UIO_SYSSPACE:
473 error = copystr(src, dst, len, copied);
474 break;
475 default:
476 panic("copyinstrfrom: bad seg %d\n", seg);
477 }
478 return (error);
479 }
Cache object: 82c9606a70599dc492a015849a2cf98a
|