1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
36 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
37 * from: src/sys/i386/i386/vm_machdep.c,v 1.132.2.2 2000/08/26 04:19:26 yokota
38 * JNPR: vm_machdep.c,v 1.8.2.2 2007/08/16 15:59:17 girish
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD: releng/8.1/sys/mips/mips/vm_machdep.c 200443 2009-12-12 20:06:25Z kib $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/syscall.h>
49 #include <sys/buf.h>
50 #include <sys/vnode.h>
51 #include <sys/vmmeter.h>
52 #include <sys/kernel.h>
53 #include <sys/sysctl.h>
54 #include <sys/unistd.h>
55
56 #include <machine/clock.h>
57 #include <machine/cpu.h>
58 #include <machine/md_var.h>
59 #include <machine/pcb.h>
60 #include <machine/pltfm.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <sys/lock.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_extern.h>
69
70 #include <sys/user.h>
71 #include <sys/mbuf.h>
72 #include <sys/sf_buf.h>
73
74 #ifndef NSFBUFS
75 #define NSFBUFS (512 + maxusers * 16)
76 #endif
77
78 static void sf_buf_init(void *arg);
79 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
80
81 LIST_HEAD(sf_head, sf_buf);
82
83
84 /*
85 * A hash table of active sendfile(2) buffers
86 */
87 static struct sf_head *sf_buf_active;
88 static u_long sf_buf_hashmask;
89
90 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
91
92 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
93 static u_int sf_buf_alloc_want;
94
95 /*
96 * A lock used to synchronize access to the hash table and free list
97 */
98 static struct mtx sf_buf_lock;
99
100 /*
101 * Finish a fork operation, with process p2 nearly set up.
102 * Copy and update the pcb, set up the stack so that the child
103 * ready to run and return to user mode.
104 */
105 void
106 cpu_fork(register struct thread *td1,register struct proc *p2,
107 struct thread *td2,int flags)
108 {
109 register struct proc *p1;
110 struct pcb *pcb2;
111
112 p1 = td1->td_proc;
113 if ((flags & RFPROC) == 0)
114 return;
115 /* It is assumed that the vm_thread_alloc called
116 * cpu_thread_alloc() before cpu_fork is called.
117 */
118
119 /* Point the pcb to the top of the stack */
120 pcb2 = td2->td_pcb;
121
122 /* Copy p1's pcb, note that in this case
123 * our pcb also includes the td_frame being copied
124 * too. The older mips2 code did an additional copy
125 * of the td_frame, for us thats not needed any
126 * longer (this copy does them both
127 */
128 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
129
130 /* Point mdproc and then copy over td1's contents
131 * md_proc is empty for MIPS
132 */
133 td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED;
134
135 /*
136 * Set up return-value registers as fork() libc stub expects.
137 */
138 td2->td_frame->v0 = 0;
139 td2->td_frame->v1 = 1;
140 td2->td_frame->a3 = 0;
141
142 if (td1 == PCPU_GET(fpcurthread))
143 MipsSaveCurFPState(td1);
144
145 pcb2->pcb_context.val[PCB_REG_RA] = (register_t)fork_trampoline;
146 /* Make sp 64-bit aligned */
147 pcb2->pcb_context.val[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb &
148 ~(sizeof(__int64_t) - 1)) - STAND_FRAME_SIZE);
149 pcb2->pcb_context.val[PCB_REG_S0] = (register_t)fork_return;
150 pcb2->pcb_context.val[PCB_REG_S1] = (register_t)td2;
151 pcb2->pcb_context.val[PCB_REG_S2] = (register_t)td2->td_frame;
152 pcb2->pcb_context.val[PCB_REG_SR] = SR_INT_MASK;
153 /*
154 * FREEBSD_DEVELOPERS_FIXME:
155 * Setup any other CPU-Specific registers (Not MIPS Standard)
156 * and/or bits in other standard MIPS registers (if CPU-Specific)
157 * that are needed.
158 */
159
160 td2->td_md.md_saved_intr = MIPS_SR_INT_IE;
161 td2->td_md.md_spinlock_count = 1;
162 #ifdef TARGET_OCTEON
163 pcb2->pcb_context.val[PCB_REG_SR] |= MIPS_SR_COP_2_BIT | MIPS32_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX;
164 #endif
165
166 }
167
168 /*
169 * Intercept the return address from a freshly forked process that has NOT
170 * been scheduled yet.
171 *
172 * This is needed to make kernel threads stay in kernel mode.
173 */
174 void
175 cpu_set_fork_handler(struct thread *td, void (*func) __P((void *)), void *arg)
176 {
177 /*
178 * Note that the trap frame follows the args, so the function
179 * is really called like this: func(arg, frame);
180 */
181 td->td_pcb->pcb_context.val[PCB_REG_S0] = (register_t) func;
182 td->td_pcb->pcb_context.val[PCB_REG_S1] = (register_t) arg;
183 }
184
185 void
186 cpu_exit(struct thread *td)
187 {
188 }
189
190 void
191 cpu_thread_exit(struct thread *td)
192 {
193
194 if (PCPU_GET(fpcurthread) == td)
195 PCPU_GET(fpcurthread) = (struct thread *)0;
196 }
197
198 void
199 cpu_thread_free(struct thread *td)
200 {
201 }
202
203 void
204 cpu_thread_clean(struct thread *td)
205 {
206 }
207
208 void
209 cpu_thread_swapin(struct thread *td)
210 {
211 pt_entry_t *pte;
212 int i;
213
214 /*
215 * The kstack may be at a different physical address now.
216 * Cache the PTEs for the Kernel stack in the machine dependent
217 * part of the thread struct so cpu_switch() can quickly map in
218 * the pcb struct and kernel stack.
219 */
220 if (!(pte = pmap_segmap(kernel_pmap, td->td_md.md_realstack)))
221 panic("cpu_thread_swapin: invalid segmap");
222 pte += ((vm_offset_t)td->td_md.md_realstack >> PGSHIFT) & (NPTEPG - 1);
223
224 for (i = 0; i < KSTACK_PAGES - 1; i++) {
225 td->td_md.md_upte[i] = *pte & ~(PTE_RO|PTE_WIRED);
226 pte++;
227 }
228 }
229
230 void
231 cpu_thread_swapout(struct thread *td)
232 {
233 }
234
235 void
236 cpu_thread_alloc(struct thread *td)
237 {
238 pt_entry_t *pte;
239 int i;
240
241 if(td->td_kstack & (1 << PAGE_SHIFT))
242 td->td_md.md_realstack = td->td_kstack + PAGE_SIZE;
243 else
244 td->td_md.md_realstack = td->td_kstack;
245
246 td->td_pcb = (struct pcb *)(td->td_md.md_realstack +
247 (td->td_kstack_pages - 1) * PAGE_SIZE) - 1;
248 td->td_frame = &td->td_pcb->pcb_regs;
249
250 if (!(pte = pmap_segmap(kernel_pmap, td->td_md.md_realstack)))
251 panic("cpu_thread_alloc: invalid segmap");
252 pte += ((vm_offset_t)td->td_md.md_realstack >> PGSHIFT) & (NPTEPG - 1);
253
254 for (i = 0; i < KSTACK_PAGES - 1; i++) {
255 td->td_md.md_upte[i] = *pte & ~(PTE_RO|PTE_WIRED);
256 pte++;
257 }
258 }
259
260 void
261 cpu_set_syscall_retval(struct thread *td, int error)
262 {
263 struct trapframe *locr0 = td->td_frame;
264 unsigned int code;
265 int quad_syscall;
266
267 code = locr0->v0;
268 quad_syscall = 0;
269 if (code == SYS_syscall)
270 code = locr0->a0;
271 else if (code == SYS___syscall) {
272 code = _QUAD_LOWWORD ? locr0->a1 : locr0->a0;
273 quad_syscall = 1;
274 }
275
276 switch (error) {
277 case 0:
278 if (quad_syscall && code != SYS_lseek) {
279 /*
280 * System call invoked through the
281 * SYS___syscall interface but the
282 * return value is really just 32
283 * bits.
284 */
285 locr0->v0 = td->td_retval[0];
286 if (_QUAD_LOWWORD)
287 locr0->v1 = td->td_retval[0];
288 locr0->a3 = 0;
289 } else {
290 locr0->v0 = td->td_retval[0];
291 locr0->v1 = td->td_retval[1];
292 locr0->a3 = 0;
293 }
294 break;
295
296 case ERESTART:
297 locr0->pc = td->td_pcb->pcb_tpc;
298 break;
299
300 case EJUSTRETURN:
301 break; /* nothing to do */
302
303 default:
304 if (quad_syscall && code != SYS_lseek) {
305 locr0->v0 = error;
306 if (_QUAD_LOWWORD)
307 locr0->v1 = error;
308 locr0->a3 = 1;
309 } else {
310 locr0->v0 = error;
311 locr0->a3 = 1;
312 }
313 }
314 }
315
316 /*
317 * Initialize machine state (pcb and trap frame) for a new thread about to
318 * upcall. Put enough state in the new thread's PCB to get it to go back
319 * userret(), where we can intercept it again to set the return (upcall)
320 * Address and stack, along with those from upcals that are from other sources
321 * such as those generated in thread_userret() itself.
322 */
323 void
324 cpu_set_upcall(struct thread *td, struct thread *td0)
325 {
326 struct pcb *pcb2;
327
328 /* Point the pcb to the top of the stack. */
329 pcb2 = td->td_pcb;
330
331 /*
332 * Copy the upcall pcb. This loads kernel regs.
333 * Those not loaded individually below get their default
334 * values here.
335 *
336 * XXXKSE It might be a good idea to simply skip this as
337 * the values of the other registers may be unimportant.
338 * This would remove any requirement for knowing the KSE
339 * at this time (see the matching comment below for
340 * more analysis) (need a good safe default).
341 * In MIPS, the trapframe is the first element of the PCB
342 * and gets copied when we copy the PCB. No seperate copy
343 * is needed.
344 */
345 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
346
347 /*
348 * Set registers for trampoline to user mode.
349 */
350
351 pcb2->pcb_context.val[PCB_REG_RA] = (register_t)fork_trampoline;
352 /* Make sp 64-bit aligned */
353 pcb2->pcb_context.val[PCB_REG_SP] = (register_t)(((vm_offset_t)td->td_pcb &
354 ~(sizeof(__int64_t) - 1)) - STAND_FRAME_SIZE);
355 pcb2->pcb_context.val[PCB_REG_S0] = (register_t)fork_return;
356 pcb2->pcb_context.val[PCB_REG_S1] = (register_t)td;
357 pcb2->pcb_context.val[PCB_REG_S2] = (register_t)td->td_frame;
358
359
360 /* Dont set IE bit in SR. sched lock release will take care of it */
361 /* idle_mask is jmips pcb2->pcb_context.val[11] = (ALL_INT_MASK & idle_mask); */
362 pcb2->pcb_context.val[PCB_REG_SR] = SR_INT_MASK;
363 #ifdef TARGET_OCTEON
364 pcb2->pcb_context.val[PCB_REG_SR] |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT |
365 MIPS32_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX;
366 #endif
367
368 /*
369 * FREEBSD_DEVELOPERS_FIXME:
370 * Setup any other CPU-Specific registers (Not MIPS Standard)
371 * that are needed.
372 */
373
374 /* SMP Setup to release sched_lock in fork_exit(). */
375 td->td_md.md_spinlock_count = 1;
376 td->td_md.md_saved_intr = MIPS_SR_INT_IE;
377 #if 0
378 /* Maybe we need to fix this? */
379 td->td_md.md_saved_sr = ( (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT) |
380 (MIPS32_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX) |
381 (MIPS_SR_INT_IE | MIPS_HARD_INT_MASK));
382 #endif
383 }
384
385 /*
386 * Set that machine state for performing an upcall that has to
387 * be done in thread_userret() so that those upcalls generated
388 * in thread_userret() itself can be done as well.
389 */
390 void
391 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
392 stack_t *stack)
393 {
394 struct trapframe *tf;
395 u_int32_t sp;
396
397 /*
398 * At the point where a function is called, sp must be 8
399 * byte aligned[for compatibility with 64-bit CPUs]
400 * in ``See MIPS Run'' by D. Sweetman, p. 269
401 * align stack */
402 sp = ((uint32_t)(stack->ss_sp + stack->ss_size) & ~0x7) -
403 STAND_FRAME_SIZE;
404
405 /*
406 * Set the trap frame to point at the beginning of the uts
407 * function.
408 */
409 tf = td->td_frame;
410 bzero(tf, sizeof(struct trapframe));
411 tf->sp = (register_t)sp;
412 tf->pc = (register_t)entry;
413 tf->a0 = (register_t)arg;
414
415 tf->sr = SR_KSU_USER | SR_EXL;
416 #ifdef TARGET_OCTEON
417 tf->sr |= MIPS_SR_INT_IE | MIPS_SR_COP_0_BIT | MIPS_SR_UX |
418 MIPS_SR_KX;
419 #endif
420 /* tf->sr |= (ALL_INT_MASK & idle_mask) | SR_INT_ENAB; */
421 /**XXX the above may now be wrong -- mips2 implements this as panic */
422 /*
423 * FREEBSD_DEVELOPERS_FIXME:
424 * Setup any other CPU-Specific registers (Not MIPS Standard)
425 * that are needed.
426 */
427 }
428 /*
429 * Convert kernel VA to physical address
430 */
431 u_long
432 kvtop(void *addr)
433 {
434 vm_offset_t va;
435
436 va = pmap_kextract((vm_offset_t)addr);
437 if (va == 0)
438 panic("kvtop: zero page frame");
439 return((int)va);
440 }
441
442 /*
443 * Implement the pre-zeroed page mechanism.
444 * This routine is called from the idle loop.
445 */
446
447 #define ZIDLE_LO(v) ((v) * 2 / 3)
448 #define ZIDLE_HI(v) ((v) * 4 / 5)
449
450 /*
451 * Tell whether this address is in some physical memory region.
452 * Currently used by the kernel coredump code in order to avoid
453 * dumping non-memory physical address space.
454 */
455 int
456 is_physical_memory(vm_offset_t addr)
457 {
458 if (addr >= SDRAM_ADDR_START && addr <= SDRAM_ADDR_END)
459 return 1;
460 else
461 return 0;
462 }
463
464 int
465 is_cacheable_mem(vm_offset_t pa)
466 {
467 if ((pa >= SDRAM_ADDR_START && pa <= SDRAM_ADDR_END) ||
468 #ifdef FLASH_ADDR_START
469 (pa >= FLASH_ADDR_START && pa <= FLASH_ADDR_END))
470 #else
471 0)
472 #endif
473 return 1;
474 else
475 return 0;
476 }
477
478 /*
479 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
480 */
481 static void
482 sf_buf_init(void *arg)
483 {
484 struct sf_buf *sf_bufs;
485 vm_offset_t sf_base;
486 int i;
487
488 nsfbufs = NSFBUFS;
489 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
490
491 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
492 TAILQ_INIT(&sf_buf_freelist);
493 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
494 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
495 M_NOWAIT | M_ZERO);
496 for (i = 0; i < nsfbufs; i++) {
497 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
498 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
499 }
500 sf_buf_alloc_want = 0;
501 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
502 }
503
504 /*
505 * Allocate an sf_buf for the given vm_page. On this machine, however, there
506 * is no sf_buf object. Instead, an opaque pointer to the given vm_page is
507 * returned.
508 */
509 struct sf_buf *
510 sf_buf_alloc(struct vm_page *m, int flags)
511 {
512 struct sf_head *hash_list;
513 struct sf_buf *sf;
514 int error;
515
516 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
517 mtx_lock(&sf_buf_lock);
518 LIST_FOREACH(sf, hash_list, list_entry) {
519 if (sf->m == m) {
520 sf->ref_count++;
521 if (sf->ref_count == 1) {
522 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
523 nsfbufsused++;
524 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
525 }
526 goto done;
527 }
528 }
529 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
530 if (flags & SFB_NOWAIT)
531 goto done;
532 sf_buf_alloc_want++;
533 mbstat.sf_allocwait++;
534 error = msleep(&sf_buf_freelist, &sf_buf_lock,
535 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
536 sf_buf_alloc_want--;
537
538 /*
539 * If we got a signal, don't risk going back to sleep.
540 */
541 if (error)
542 goto done;
543 }
544 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
545 if (sf->m != NULL)
546 LIST_REMOVE(sf, list_entry);
547 LIST_INSERT_HEAD(hash_list, sf, list_entry);
548 sf->ref_count = 1;
549 sf->m = m;
550 nsfbufsused++;
551 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
552 pmap_qenter(sf->kva, &sf->m, 1);
553 done:
554 mtx_unlock(&sf_buf_lock);
555 return (sf);
556 }
557
558 /*
559 * Free the sf_buf. In fact, do nothing because there are no resources
560 * associated with the sf_buf.
561 */
562 void
563 sf_buf_free(struct sf_buf *sf)
564 {
565 mtx_lock(&sf_buf_lock);
566 sf->ref_count--;
567 if (sf->ref_count == 0) {
568 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
569 nsfbufsused--;
570 if (sf_buf_alloc_want > 0)
571 wakeup_one(&sf_buf_freelist);
572 }
573 mtx_unlock(&sf_buf_lock);
574 }
575
576 /*
577 * Software interrupt handler for queued VM system processing.
578 */
579 void
580 swi_vm(void *dummy)
581 {
582 }
583
584 int
585 cpu_set_user_tls(struct thread *td, void *tls_base)
586 {
587
588 /* TBD */
589 return (0);
590 }
591
592 void
593 cpu_throw(struct thread *old, struct thread *new)
594 {
595
596 func_2args_asmmacro(&mips_cpu_throw, old, new);
597 panic("mips_cpu_throw() returned");
598 }
Cache object: daf7015b954846c8383091806e1e2921
|