1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary :forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/proc.h>
52 #include <sys/socketvar.h>
53 #include <sys/sf_buf.h>
54 #include <sys/unistd.h>
55 #include <machine/cpu.h>
56 #include <machine/pcb.h>
57 #include <machine/sysarch.h>
58 #include <sys/lock.h>
59 #include <sys/mutex.h>
60
61 #include <vm/vm.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_pageout.h>
69 #include <vm/uma.h>
70 #include <vm/uma_int.h>
71
72 #include <machine/md_var.h>
73
74 #ifndef NSFBUFS
75 #define NSFBUFS (512 + maxusers * 16)
76 #endif
77
78 #ifndef ARM_USE_SMALL_ALLOC
79 static void sf_buf_init(void *arg);
80 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
81
82 LIST_HEAD(sf_head, sf_buf);
83
84
85 /*
86 * A hash table of active sendfile(2) buffers
87 */
88 static struct sf_head *sf_buf_active;
89 static u_long sf_buf_hashmask;
90
91 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
92
93 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
94 static u_int sf_buf_alloc_want;
95
96 /*
97 * A lock used to synchronize access to the hash table and free list
98 */
99 static struct mtx sf_buf_lock;
100 #endif
101
102 /*
103 * Finish a fork operation, with process p2 nearly set up.
104 * Copy and update the pcb, set up the stack so that the child
105 * ready to run and return to user mode.
106 */
107 void
108 cpu_fork(register struct thread *td1, register struct proc *p2,
109 struct thread *td2, int flags)
110 {
111 struct pcb *pcb1, *pcb2;
112 struct trapframe *tf;
113 struct switchframe *sf;
114 struct mdproc *mdp2;
115
116 if ((flags & RFPROC) == 0)
117 return;
118 pcb1 = td1->td_pcb;
119 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
120 #ifdef __XSCALE__
121 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
122 if (td2->td_altkstack)
123 pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages *
124 PAGE_SIZE);
125 #endif
126 td2->td_pcb = pcb2;
127 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
128 mdp2 = &p2->p_md;
129 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
130 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
131 pcb2->un_32.pcb32_sp = td2->td_kstack +
132 USPACE_SVC_STACK_TOP - sizeof(*pcb2);
133 pmap_activate(td2);
134 td2->td_frame = tf =
135 (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
136 *tf = *td1->td_frame;
137 sf = (struct switchframe *)tf - 1;
138 sf->sf_r4 = (u_int)fork_return;
139 sf->sf_r5 = (u_int)td2;
140 sf->sf_pc = (u_int)fork_trampoline;
141 tf->tf_spsr &= ~PSR_C_bit;
142 tf->tf_r0 = 0;
143 tf->tf_r1 = 0;
144 pcb2->un_32.pcb32_sp = (u_int)sf;
145
146 /* Setup to release spin count in fork_exit(). */
147 td2->td_md.md_spinlock_count = 1;
148 td2->td_md.md_saved_cspr = 0;
149 td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS;
150 }
151
152 void
153 cpu_thread_swapin(struct thread *td)
154 {
155 }
156
157 void
158 cpu_thread_swapout(struct thread *td)
159 {
160 }
161
162 /*
163 * Detatch mapped page and release resources back to the system.
164 */
165 void
166 sf_buf_free(struct sf_buf *sf)
167 {
168 #ifndef ARM_USE_SMALL_ALLOC
169 mtx_lock(&sf_buf_lock);
170 sf->ref_count--;
171 if (sf->ref_count == 0) {
172 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
173 nsfbufsused--;
174 if (sf_buf_alloc_want > 0)
175 wakeup_one(&sf_buf_freelist);
176 }
177 mtx_unlock(&sf_buf_lock);
178 #endif
179 }
180
181 #ifndef ARM_USE_SMALL_ALLOC
182 /*
183 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
184 */
185 static void
186 sf_buf_init(void *arg)
187 {
188 struct sf_buf *sf_bufs;
189 vm_offset_t sf_base;
190 int i;
191
192 nsfbufs = NSFBUFS;
193 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
194
195 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
196 TAILQ_INIT(&sf_buf_freelist);
197 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
198 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
199 M_NOWAIT | M_ZERO);
200 for (i = 0; i < nsfbufs; i++) {
201 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
202 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
203 }
204 sf_buf_alloc_want = 0;
205 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
206 }
207 #endif
208
209 /*
210 * Get an sf_buf from the freelist. Will block if none are available.
211 */
212 struct sf_buf *
213 sf_buf_alloc(struct vm_page *m, int flags)
214 {
215 #ifdef ARM_USE_SMALL_ALLOC
216 return ((struct sf_buf *)m);
217 #else
218 struct sf_head *hash_list;
219 struct sf_buf *sf;
220 int error;
221
222 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
223 mtx_lock(&sf_buf_lock);
224 LIST_FOREACH(sf, hash_list, list_entry) {
225 if (sf->m == m) {
226 sf->ref_count++;
227 if (sf->ref_count == 1) {
228 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
229 nsfbufsused++;
230 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
231 }
232 goto done;
233 }
234 }
235 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
236 if (flags & SFB_NOWAIT)
237 goto done;
238 sf_buf_alloc_want++;
239 mbstat.sf_allocwait++;
240 error = msleep(&sf_buf_freelist, &sf_buf_lock,
241 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
242 sf_buf_alloc_want--;
243
244
245 /*
246 * If we got a signal, don't risk going back to sleep.
247 */
248 if (error)
249 goto done;
250 }
251 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
252 if (sf->m != NULL)
253 LIST_REMOVE(sf, list_entry);
254 LIST_INSERT_HEAD(hash_list, sf, list_entry);
255 sf->ref_count = 1;
256 sf->m = m;
257 nsfbufsused++;
258 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
259 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
260 done:
261 mtx_unlock(&sf_buf_lock);
262 return (sf);
263 #endif
264 }
265
266 /*
267 * Initialize machine state (pcb and trap frame) for a new thread about to
268 * upcall. Put enough state in the new thread's PCB to get it to go back
269 * userret(), where we can intercept it again to set the return (upcall)
270 * Address and stack, along with those from upcals that are from other sources
271 * such as those generated in thread_userret() itself.
272 */
273 void
274 cpu_set_upcall(struct thread *td, struct thread *td0)
275 {
276 struct trapframe *tf;
277 struct switchframe *sf;
278
279 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
280 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
281 tf = td->td_frame;
282 sf = (struct switchframe *)tf - 1;
283 sf->sf_r4 = (u_int)fork_return;
284 sf->sf_r5 = (u_int)td;
285 sf->sf_pc = (u_int)fork_trampoline;
286 tf->tf_spsr &= ~PSR_C_bit;
287 tf->tf_r0 = 0;
288 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
289 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
290
291 /* Setup to release spin count in fork_exit(). */
292 td->td_md.md_spinlock_count = 1;
293 td->td_md.md_saved_cspr = 0;
294 }
295
296 /*
297 * Set that machine state for performing an upcall that has to
298 * be done in thread_userret() so that those upcalls generated
299 * in thread_userret() itself can be done as well.
300 */
301 void
302 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
303 stack_t *stack)
304 {
305 struct trapframe *tf = td->td_frame;
306
307 tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size
308 - sizeof(struct trapframe)) & ~7;
309 tf->tf_pc = (int)entry;
310 tf->tf_r0 = (int)arg;
311 tf->tf_spsr = PSR_USR32_MODE;
312 }
313
314 int
315 cpu_set_user_tls(struct thread *td, void *tls_base)
316 {
317
318 if (td != curthread)
319 td->td_md.md_tp = tls_base;
320 else {
321 critical_enter();
322 *(void **)ARM_TP_ADDRESS = tls_base;
323 critical_exit();
324 }
325 return (0);
326 }
327
328 void
329 cpu_thread_exit(struct thread *td)
330 {
331 }
332
333 void
334 cpu_thread_setup(struct thread *td)
335 {
336 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
337 PAGE_SIZE) - 1;
338 td->td_frame = (struct trapframe *)
339 ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
340 #ifdef __XSCALE__
341 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
342 #endif
343
344 }
345 void
346 cpu_thread_clean(struct thread *td)
347 {
348 }
349
350 /*
351 * Intercept the return address from a freshly forked process that has NOT
352 * been scheduled yet.
353 *
354 * This is needed to make kernel threads stay in kernel mode.
355 */
356 void
357 cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
358 {
359 struct switchframe *sf;
360 struct trapframe *tf;
361
362 tf = td->td_frame;
363 sf = (struct switchframe *)tf - 1;
364 sf->sf_r4 = (u_int)func;
365 sf->sf_r5 = (u_int)arg;
366 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
367 }
368
369 /*
370 * Software interrupt handler for queued VM system processing.
371 */
372 void
373 swi_vm(void *dummy)
374 {
375
376 if (busdma_swi_pending)
377 busdma_swi();
378 }
379
380 void
381 cpu_exit(struct thread *td)
382 {
383 }
384
385 #define BITS_PER_INT (8 * sizeof(int))
386 vm_offset_t arm_nocache_startaddr;
387 static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
388 BITS_PER_INT)];
389
390 /*
391 * Functions to map and unmap memory non-cached into KVA the kernel won't try
392 * to allocate. The goal is to provide uncached memory to busdma, to honor
393 * BUS_DMA_COHERENT.
394 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
395 * The allocator is rather dummy, each page is represented by a bit in
396 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
397 * As soon as it finds enough contiguous pages to satisfy the request,
398 * it returns the address.
399 */
400 void *
401 arm_remap_nocache(void *addr, vm_size_t size)
402 {
403 int i, j;
404
405 size = round_page(size);
406 for (i = 0; i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT),
407 ARM_TP_ADDRESS); i++) {
408 if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
409 BITS_PER_INT)))) {
410 for (j = i; j < i + (size / (PAGE_SIZE)); j++)
411 if (arm_nocache_allocated[j / BITS_PER_INT] &
412 (1 << (j % BITS_PER_INT)))
413 break;
414 if (j == i + (size / (PAGE_SIZE)))
415 break;
416 }
417 }
418 if (i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT),
419 ARM_TP_ADDRESS)) {
420 vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
421 void *ret = (void *)tomap;
422 vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
423
424 for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
425 physaddr += PAGE_SIZE, i++) {
426 pmap_kenter_nocache(tomap, physaddr);
427 arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
428 BITS_PER_INT);
429 }
430 return (ret);
431 }
432 return (NULL);
433 }
434
435 void
436 arm_unmap_nocache(void *addr, vm_size_t size)
437 {
438 vm_offset_t raddr = (vm_offset_t)addr;
439 int i;
440
441 size = round_page(size);
442 i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
443 for (; size > 0; size -= PAGE_SIZE, i++)
444 arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
445 BITS_PER_INT));
446 }
447
448 #ifdef ARM_USE_SMALL_ALLOC
449
450 static TAILQ_HEAD(,arm_small_page) pages_normal =
451 TAILQ_HEAD_INITIALIZER(pages_normal);
452 static TAILQ_HEAD(,arm_small_page) pages_wt =
453 TAILQ_HEAD_INITIALIZER(pages_wt);
454 static TAILQ_HEAD(,arm_small_page) free_pgdesc =
455 TAILQ_HEAD_INITIALIZER(free_pgdesc);
456
457 extern uma_zone_t l2zone;
458
459 struct mtx smallalloc_mtx;
460
461 MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data");
462
463 vm_offset_t alloc_firstaddr;
464
465 #ifdef ARM_HAVE_SUPERSECTIONS
466 #define S_FRAME L1_SUP_FRAME
467 #define S_SIZE L1_SUP_SIZE
468 #else
469 #define S_FRAME L1_S_FRAME
470 #define S_SIZE L1_S_SIZE
471 #endif
472
473 vm_offset_t
474 arm_ptovirt(vm_paddr_t pa)
475 {
476 int i;
477 vm_offset_t addr = alloc_firstaddr;
478
479 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called to early ?"));
480 for (i = 0; dump_avail[i + 1]; i += 2) {
481 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
482 break;
483 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
484 (dump_avail[i] & S_FRAME);
485 }
486 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
487 return (addr + (pa - (dump_avail[i] & S_FRAME)));
488 }
489
490 void
491 arm_init_smallalloc(void)
492 {
493 vm_offset_t to_map = 0, mapaddr;
494 int i;
495
496 /*
497 * We need to use dump_avail and not phys_avail, since we want to
498 * map the whole memory and not just the memory available to the VM
499 * to be able to do a pa => va association for any address.
500 */
501
502 for (i = 0; dump_avail[i + 1]; i+= 2) {
503 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
504 (dump_avail[i] & S_FRAME);
505 }
506 alloc_firstaddr = mapaddr = KERNBASE - to_map;
507 for (i = 0; dump_avail[i + 1]; i+= 2) {
508 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) +
509 S_SIZE - (dump_avail[i] & S_FRAME);
510 vm_offset_t did = 0;
511 while (size > 0) {
512 #ifdef ARM_HAVE_SUPERSECTIONS
513 pmap_kenter_supersection(mapaddr,
514 (dump_avail[i] & L1_SUP_FRAME) + did,
515 SECTION_CACHE);
516 #else
517 pmap_kenter_section(mapaddr,
518 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
519 #endif
520 mapaddr += S_SIZE;
521 did += S_SIZE;
522 size -= S_SIZE;
523 }
524 }
525 }
526
527 void
528 arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
529 {
530 struct arm_small_page *pg;
531
532 bytes &= ~PAGE_MASK;
533 while (bytes > 0) {
534 pg = (struct arm_small_page *)list;
535 pg->addr = mem;
536 if (pagetable)
537 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
538 else
539 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
540 list = (char *)list + sizeof(*pg);
541 mem = (char *)mem + PAGE_SIZE;
542 bytes -= PAGE_SIZE;
543 }
544 }
545
546 void *
547 uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
548 {
549 void *ret;
550 struct arm_small_page *sp;
551 TAILQ_HEAD(,arm_small_page) *head;
552 static vm_pindex_t color;
553 vm_page_t m;
554
555 *flags = UMA_SLAB_PRIV;
556 /*
557 * For CPUs where we setup page tables as write back, there's no
558 * need to maintain two separate pools.
559 */
560 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
561 head = (void *)&pages_wt;
562 else
563 head = (void *)&pages_normal;
564
565 mtx_lock(&smallalloc_mtx);
566 sp = TAILQ_FIRST(head);
567
568 if (!sp) {
569 int pflags;
570
571 mtx_unlock(&smallalloc_mtx);
572 if (zone == l2zone &&
573 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
574 *flags = UMA_SLAB_KMEM;
575 ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
576 return (ret);
577 }
578 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
579 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
580 else
581 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
582 if (wait & M_ZERO)
583 pflags |= VM_ALLOC_ZERO;
584 for (;;) {
585 m = vm_page_alloc(NULL, color++,
586 pflags | VM_ALLOC_NOOBJ);
587 if (m == NULL) {
588 if (wait & M_NOWAIT)
589 return (NULL);
590 VM_WAIT;
591 } else
592 break;
593 }
594 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
595 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
596 bzero(ret, PAGE_SIZE);
597 return (ret);
598 }
599 TAILQ_REMOVE(head, sp, pg_list);
600 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
601 ret = sp->addr;
602 mtx_unlock(&smallalloc_mtx);
603 if ((wait & M_ZERO))
604 bzero(ret, bytes);
605 return (ret);
606 }
607
608 void
609 uma_small_free(void *mem, int size, u_int8_t flags)
610 {
611 pd_entry_t *pd;
612 pt_entry_t *pt;
613
614 if (flags & UMA_SLAB_KMEM)
615 kmem_free(kmem_map, (vm_offset_t)mem, size);
616 else {
617 struct arm_small_page *sp;
618
619 if ((vm_offset_t)mem >= KERNBASE) {
620 mtx_lock(&smallalloc_mtx);
621 sp = TAILQ_FIRST(&free_pgdesc);
622 KASSERT(sp != NULL, ("No more free page descriptor ?"));
623 TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
624 sp->addr = mem;
625 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
626 &pt);
627 if ((*pd & pte_l1_s_cache_mask) ==
628 pte_l1_s_cache_mode_pt &&
629 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
630 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
631 else
632 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
633 mtx_unlock(&smallalloc_mtx);
634 } else {
635 vm_page_t m;
636 vm_paddr_t pa = vtophys((vm_offset_t)mem);
637
638 m = PHYS_TO_VM_PAGE(pa);
639 m->wire_count--;
640 vm_page_free(m);
641 atomic_subtract_int(&cnt.v_wire_count, 1);
642 }
643 }
644 }
645
646 #endif
Cache object: 99523896ac35e65d2c216c5c350b0d87
|