1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD: releng/5.2/sys/powerpc/powerpc/vm_machdep.c 122821 2003-11-16 23:40:06Z alc $
42 */
43 /*
44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
45 * All rights reserved.
46 *
47 * Author: Chris G. Demetriou
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70 #include "opt_kstack_pages.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/malloc.h>
76 #include <sys/bio.h>
77 #include <sys/buf.h>
78 #include <sys/ktr.h>
79 #include <sys/lock.h>
80 #include <sys/mutex.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/kernel.h>
84 #include <sys/mbuf.h>
85 #include <sys/sf_buf.h>
86 #include <sys/sysctl.h>
87 #include <sys/unistd.h>
88
89 #include <machine/clock.h>
90 #include <machine/cpu.h>
91 #include <machine/fpu.h>
92 #include <machine/frame.h>
93 #include <machine/md_var.h>
94
95 #include <dev/ofw/openfirm.h>
96
97 #include <vm/vm.h>
98 #include <vm/vm_param.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_extern.h>
103
104 #include <sys/user.h>
105
106 static void sf_buf_init(void *arg);
107 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
108
109 /*
110 * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
111 * sf_freelist head with the sf_lock mutex.
112 */
113 static struct {
114 SLIST_HEAD(, sf_buf) sf_head;
115 struct mtx sf_lock;
116 } sf_freelist;
117
118 static u_int sf_buf_alloc_want;
119
120 /*
121 * Finish a fork operation, with process p2 nearly set up.
122 * Copy and update the pcb, set up the stack so that the child
123 * ready to run and return to user mode.
124 */
125 void
126 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
127 {
128 struct proc *p1;
129 struct trapframe *tf;
130 struct callframe *cf;
131 struct pcb *pcb;
132
133 KASSERT(td1 == curthread || td1 == &thread0,
134 ("cpu_fork: p1 not curproc and not proc0"));
135 CTR3(KTR_PROC, "cpu_fork: called td1=%08x p2=%08x flags=%x", (u_int)td1, (u_int)p2, flags);
136
137 if ((flags & RFPROC) == 0)
138 return;
139
140 p1 = td1->td_proc;
141
142 pcb = (struct pcb *)((td2->td_kstack + KSTACK_PAGES * PAGE_SIZE -
143 sizeof(struct pcb)) & ~0x2fU);
144 td2->td_pcb = pcb;
145
146 /* Copy the pcb */
147 bcopy(td1->td_pcb, pcb, sizeof(struct pcb));
148
149 /*
150 * Create a fresh stack for the new process.
151 * Copy the trap frame for the return to user mode as if from a
152 * syscall. This copies most of the user mode register values.
153 */
154 tf = (struct trapframe *)pcb - 1;
155 bcopy(td1->td_frame, tf, sizeof(*tf));
156
157 /* Set up trap frame. */
158 tf->fixreg[FIRSTARG] = 0;
159 tf->fixreg[FIRSTARG + 1] = 0;
160 tf->cr &= ~0x10000000;
161
162 td2->td_frame = tf;
163
164 cf = (struct callframe *)tf - 1;
165 cf->cf_func = (register_t)fork_return;
166 cf->cf_arg0 = (register_t)td2;
167 cf->cf_arg1 = (register_t)tf;
168
169 pcb->pcb_sp = (register_t)cf;
170 pcb->pcb_lr = (register_t)fork_trampoline;
171 pcb->pcb_usr = kernel_pmap->pm_sr[USER_SR];
172
173 /*
174 * Now cpu_switch() can schedule the new process.
175 */
176 }
177
178 /*
179 * Intercept the return address from a freshly forked process that has NOT
180 * been scheduled yet.
181 *
182 * This is needed to make kernel threads stay in kernel mode.
183 */
184 void
185 cpu_set_fork_handler(td, func, arg)
186 struct thread *td;
187 void (*func)(void *);
188 void *arg;
189 {
190 struct callframe *cf;
191
192 CTR3(KTR_PROC, "cpu_set_fork_handler: called with td=%08x func=%08x arg=%08x",
193 (u_int)td, (u_int)func, (u_int)arg);
194
195 cf = (struct callframe *)td->td_pcb->pcb_sp;
196
197 cf->cf_func = (register_t)func;
198 cf->cf_arg0 = (register_t)arg;
199 }
200
201 /*
202 * cpu_exit is called as the last action during exit.
203 * We release the address space of the process, block interrupts,
204 * and call switch_exit. switch_exit switches to proc0's PCB and stack,
205 * then jumps into the middle of cpu_switch, as if it were switching
206 * from proc0.
207 */
208 void
209 cpu_exit(td)
210 register struct thread *td;
211 {
212 }
213
214 void
215 cpu_sched_exit(td)
216 register struct thread *td;
217 {
218 }
219
220 /* Temporary helper */
221 void
222 cpu_throw(struct thread *old, struct thread *new)
223 {
224
225 cpu_switch(old, new);
226 panic("cpu_throw() didn't");
227 }
228
229 /*
230 * Reset back to firmware.
231 */
232 void
233 cpu_reset()
234 {
235 OF_exit();
236 }
237
238 /*
239 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
240 */
241 static void
242 sf_buf_init(void *arg)
243 {
244 struct sf_buf *sf_bufs;
245 vm_offset_t sf_base;
246 int i;
247
248 mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
249 SLIST_INIT(&sf_freelist.sf_head);
250 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
251 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
252 M_NOWAIT | M_ZERO);
253 for (i = 0; i < nsfbufs; i++) {
254 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
255 SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
256 }
257 sf_buf_alloc_want = 0;
258 }
259
260 /*
261 * Get an sf_buf from the freelist. Will block if none are available.
262 */
263 struct sf_buf *
264 sf_buf_alloc(struct vm_page *m)
265 {
266 struct sf_buf *sf;
267 int error;
268
269 mtx_lock(&sf_freelist.sf_lock);
270 while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
271 sf_buf_alloc_want++;
272 error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
273 "sfbufa", 0);
274 sf_buf_alloc_want--;
275
276 /*
277 * If we got a signal, don't risk going back to sleep.
278 */
279 if (error)
280 break;
281 }
282 if (sf != NULL) {
283 SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
284 sf->m = m;
285 pmap_qenter(sf->kva, &sf->m, 1);
286 }
287 mtx_unlock(&sf_freelist.sf_lock);
288 return (sf);
289 }
290
291 /*
292 * Detatch mapped page and release resources back to the system.
293 */
294 void
295 sf_buf_free(void *addr, void *args)
296 {
297 struct sf_buf *sf;
298 struct vm_page *m;
299
300 sf = args;
301 pmap_qremove((vm_offset_t)addr, 1);
302 m = sf->m;
303 vm_page_lock_queues();
304 vm_page_unwire(m, 0);
305 /*
306 * Check for the object going away on us. This can
307 * happen since we don't hold a reference to it.
308 * If so, we're responsible for freeing the page.
309 */
310 if (m->wire_count == 0 && m->object == NULL)
311 vm_page_free(m);
312 vm_page_unlock_queues();
313 sf->m = NULL;
314 mtx_lock(&sf_freelist.sf_lock);
315 SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
316 if (sf_buf_alloc_want > 0)
317 wakeup_one(&sf_freelist);
318 mtx_unlock(&sf_freelist.sf_lock);
319 }
320
321 /*
322 * Software interrupt handler for queued VM system processing.
323 */
324 void
325 swi_vm(void *dummy)
326 {
327 #if 0 /* XXX: Don't have busdma stuff yet */
328 if (busdma_swi_pending != 0)
329 busdma_swi();
330 #endif
331 }
332
333 /*
334 * Tell whether this address is in some physical memory region.
335 * Currently used by the kernel coredump code in order to avoid
336 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
337 * or other unpredictable behaviour.
338 */
339
340
341 int
342 is_physical_memory(addr)
343 vm_offset_t addr;
344 {
345 /*
346 * stuff other tests for known memory-mapped devices (PCI?)
347 * here
348 */
349
350 return 1;
351 }
352
353 /*
354 * KSE functions
355 */
356 void
357 cpu_thread_exit(struct thread *td)
358 {
359 }
360
361 void
362 cpu_thread_clean(struct thread *td)
363 {
364 }
365
366 void
367 cpu_thread_setup(struct thread *td)
368 {
369 }
370
371 void
372 cpu_thread_swapin(struct thread *td)
373 {
374 }
375
376 void
377 cpu_thread_swapout(struct thread *td)
378 {
379 }
380
381 void
382 cpu_set_upcall(struct thread *td, struct thread *td0)
383 {
384 }
385
386 void
387 cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
388 {
389 }
Cache object: 71c9d03985c3c1cb12ca57e5ce9148c2
|