1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary :forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/proc.h>
52 #include <sys/socketvar.h>
53 #include <sys/sf_buf.h>
54 #include <sys/unistd.h>
55 #include <machine/cpu.h>
56 #include <machine/pcb.h>
57 #include <machine/sysarch.h>
58 #include <sys/lock.h>
59 #include <sys/mutex.h>
60
61 #include <vm/vm.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_pageout.h>
69 #include <vm/uma.h>
70 #include <vm/uma_int.h>
71
72 #include <machine/md_var.h>
73
74 #ifndef NSFBUFS
75 #define NSFBUFS (512 + maxusers * 16)
76 #endif
77
78 #ifndef ARM_USE_SMALL_ALLOC
79 static void sf_buf_init(void *arg);
80 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
81
82 LIST_HEAD(sf_head, sf_buf);
83
84
85 /*
86 * A hash table of active sendfile(2) buffers
87 */
88 static struct sf_head *sf_buf_active;
89 static u_long sf_buf_hashmask;
90
91 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
92
93 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
94 static u_int sf_buf_alloc_want;
95
96 /*
97 * A lock used to synchronize access to the hash table and free list
98 */
99 static struct mtx sf_buf_lock;
100 #endif
101
102 /*
103 * Finish a fork operation, with process p2 nearly set up.
104 * Copy and update the pcb, set up the stack so that the child
105 * ready to run and return to user mode.
106 */
107 void
108 cpu_fork(register struct thread *td1, register struct proc *p2,
109 struct thread *td2, int flags)
110 {
111 struct pcb *pcb1, *pcb2;
112 struct trapframe *tf;
113 struct switchframe *sf;
114 struct mdproc *mdp2;
115
116 if ((flags & RFPROC) == 0)
117 return;
118 pcb1 = td1->td_pcb;
119 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
120 #ifdef __XSCALE__
121 #ifndef CPU_XSCALE_CORE3
122 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
123 if (td2->td_altkstack)
124 pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages *
125 PAGE_SIZE);
126 #endif
127 #endif
128 td2->td_pcb = pcb2;
129 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
130 mdp2 = &p2->p_md;
131 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
132 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
133 pcb2->un_32.pcb32_sp = td2->td_kstack +
134 USPACE_SVC_STACK_TOP - sizeof(*pcb2);
135 pmap_activate(td2);
136 td2->td_frame = tf =
137 (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
138 *tf = *td1->td_frame;
139 sf = (struct switchframe *)tf - 1;
140 sf->sf_r4 = (u_int)fork_return;
141 sf->sf_r5 = (u_int)td2;
142 sf->sf_pc = (u_int)fork_trampoline;
143 tf->tf_spsr &= ~PSR_C_bit;
144 tf->tf_r0 = 0;
145 tf->tf_r1 = 0;
146 pcb2->un_32.pcb32_sp = (u_int)sf;
147
148 /* Setup to release spin count in fork_exit(). */
149 td2->td_md.md_spinlock_count = 1;
150 td2->td_md.md_saved_cspr = 0;
151 td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS;
152 }
153
154 void
155 cpu_thread_swapin(struct thread *td)
156 {
157 }
158
159 void
160 cpu_thread_swapout(struct thread *td)
161 {
162 }
163
164 /*
165 * Detatch mapped page and release resources back to the system.
166 */
167 void
168 sf_buf_free(struct sf_buf *sf)
169 {
170 #ifndef ARM_USE_SMALL_ALLOC
171 mtx_lock(&sf_buf_lock);
172 sf->ref_count--;
173 if (sf->ref_count == 0) {
174 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
175 nsfbufsused--;
176 if (sf_buf_alloc_want > 0)
177 wakeup_one(&sf_buf_freelist);
178 }
179 mtx_unlock(&sf_buf_lock);
180 #endif
181 }
182
183 #ifndef ARM_USE_SMALL_ALLOC
184 /*
185 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
186 */
187 static void
188 sf_buf_init(void *arg)
189 {
190 struct sf_buf *sf_bufs;
191 vm_offset_t sf_base;
192 int i;
193
194 nsfbufs = NSFBUFS;
195 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
196
197 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
198 TAILQ_INIT(&sf_buf_freelist);
199 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
200 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
201 M_NOWAIT | M_ZERO);
202 for (i = 0; i < nsfbufs; i++) {
203 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
204 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
205 }
206 sf_buf_alloc_want = 0;
207 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
208 }
209 #endif
210
211 /*
212 * Get an sf_buf from the freelist. Will block if none are available.
213 */
214 struct sf_buf *
215 sf_buf_alloc(struct vm_page *m, int flags)
216 {
217 #ifdef ARM_USE_SMALL_ALLOC
218 return ((struct sf_buf *)m);
219 #else
220 struct sf_head *hash_list;
221 struct sf_buf *sf;
222 int error;
223
224 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
225 mtx_lock(&sf_buf_lock);
226 LIST_FOREACH(sf, hash_list, list_entry) {
227 if (sf->m == m) {
228 sf->ref_count++;
229 if (sf->ref_count == 1) {
230 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
231 nsfbufsused++;
232 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
233 }
234 goto done;
235 }
236 }
237 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
238 if (flags & SFB_NOWAIT)
239 goto done;
240 sf_buf_alloc_want++;
241 mbstat.sf_allocwait++;
242 error = msleep(&sf_buf_freelist, &sf_buf_lock,
243 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
244 sf_buf_alloc_want--;
245
246
247 /*
248 * If we got a signal, don't risk going back to sleep.
249 */
250 if (error)
251 goto done;
252 }
253 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
254 if (sf->m != NULL)
255 LIST_REMOVE(sf, list_entry);
256 LIST_INSERT_HEAD(hash_list, sf, list_entry);
257 sf->ref_count = 1;
258 sf->m = m;
259 nsfbufsused++;
260 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
261 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
262 done:
263 mtx_unlock(&sf_buf_lock);
264 return (sf);
265 #endif
266 }
267
268 /*
269 * Initialize machine state (pcb and trap frame) for a new thread about to
270 * upcall. Put enough state in the new thread's PCB to get it to go back
271 * userret(), where we can intercept it again to set the return (upcall)
272 * Address and stack, along with those from upcals that are from other sources
273 * such as those generated in thread_userret() itself.
274 */
275 void
276 cpu_set_upcall(struct thread *td, struct thread *td0)
277 {
278 struct trapframe *tf;
279 struct switchframe *sf;
280
281 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
282 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
283 tf = td->td_frame;
284 sf = (struct switchframe *)tf - 1;
285 sf->sf_r4 = (u_int)fork_return;
286 sf->sf_r5 = (u_int)td;
287 sf->sf_pc = (u_int)fork_trampoline;
288 tf->tf_spsr &= ~PSR_C_bit;
289 tf->tf_r0 = 0;
290 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
291 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
292
293 /* Setup to release spin count in fork_exit(). */
294 td->td_md.md_spinlock_count = 1;
295 td->td_md.md_saved_cspr = 0;
296 }
297
298 /*
299 * Set that machine state for performing an upcall that has to
300 * be done in thread_userret() so that those upcalls generated
301 * in thread_userret() itself can be done as well.
302 */
303 void
304 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
305 stack_t *stack)
306 {
307 struct trapframe *tf = td->td_frame;
308
309 tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size
310 - sizeof(struct trapframe)) & ~7;
311 tf->tf_pc = (int)entry;
312 tf->tf_r0 = (int)arg;
313 tf->tf_spsr = PSR_USR32_MODE;
314 }
315
316 int
317 cpu_set_user_tls(struct thread *td, void *tls_base)
318 {
319
320 if (td != curthread)
321 td->td_md.md_tp = tls_base;
322 else {
323 critical_enter();
324 *(void **)ARM_TP_ADDRESS = tls_base;
325 critical_exit();
326 }
327 return (0);
328 }
329
330 void
331 cpu_thread_exit(struct thread *td)
332 {
333 }
334
335 void
336 cpu_thread_alloc(struct thread *td)
337 {
338 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
339 PAGE_SIZE) - 1;
340 td->td_frame = (struct trapframe *)
341 ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
342 #ifdef __XSCALE__
343 #ifndef CPU_XSCALE_CORE3
344 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
345 #endif
346 #endif
347 }
348
349 void
350 cpu_thread_free(struct thread *td)
351 {
352 }
353
354 void
355 cpu_thread_clean(struct thread *td)
356 {
357 }
358
359 /*
360 * Intercept the return address from a freshly forked process that has NOT
361 * been scheduled yet.
362 *
363 * This is needed to make kernel threads stay in kernel mode.
364 */
365 void
366 cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
367 {
368 struct switchframe *sf;
369 struct trapframe *tf;
370
371 tf = td->td_frame;
372 sf = (struct switchframe *)tf - 1;
373 sf->sf_r4 = (u_int)func;
374 sf->sf_r5 = (u_int)arg;
375 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
376 }
377
378 /*
379 * Software interrupt handler for queued VM system processing.
380 */
381 void
382 swi_vm(void *dummy)
383 {
384
385 if (busdma_swi_pending)
386 busdma_swi();
387 }
388
389 void
390 cpu_exit(struct thread *td)
391 {
392 }
393
394 #define BITS_PER_INT (8 * sizeof(int))
395 vm_offset_t arm_nocache_startaddr;
396 static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
397 BITS_PER_INT)];
398
399 /*
400 * Functions to map and unmap memory non-cached into KVA the kernel won't try
401 * to allocate. The goal is to provide uncached memory to busdma, to honor
402 * BUS_DMA_COHERENT.
403 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
404 * The allocator is rather dummy, each page is represented by a bit in
405 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
406 * As soon as it finds enough contiguous pages to satisfy the request,
407 * it returns the address.
408 */
409 void *
410 arm_remap_nocache(void *addr, vm_size_t size)
411 {
412 int i, j;
413
414 size = round_page(size);
415 for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) {
416 if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
417 BITS_PER_INT)))) {
418 for (j = i; j < i + (size / (PAGE_SIZE)); j++)
419 if (arm_nocache_allocated[j / BITS_PER_INT] &
420 (1 << (j % BITS_PER_INT)))
421 break;
422 if (j == i + (size / (PAGE_SIZE)))
423 break;
424 }
425 }
426 if (i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE) {
427 vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
428 void *ret = (void *)tomap;
429 vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
430
431 for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
432 physaddr += PAGE_SIZE, i++) {
433 pmap_kenter_nocache(tomap, physaddr);
434 arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
435 BITS_PER_INT);
436 }
437 return (ret);
438 }
439
440 return (NULL);
441 }
442
443 void
444 arm_unmap_nocache(void *addr, vm_size_t size)
445 {
446 vm_offset_t raddr = (vm_offset_t)addr;
447 int i;
448
449 size = round_page(size);
450 i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
451 for (; size > 0; size -= PAGE_SIZE, i++)
452 arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
453 BITS_PER_INT));
454 }
455
456 #ifdef ARM_USE_SMALL_ALLOC
457
458 static TAILQ_HEAD(,arm_small_page) pages_normal =
459 TAILQ_HEAD_INITIALIZER(pages_normal);
460 static TAILQ_HEAD(,arm_small_page) pages_wt =
461 TAILQ_HEAD_INITIALIZER(pages_wt);
462 static TAILQ_HEAD(,arm_small_page) free_pgdesc =
463 TAILQ_HEAD_INITIALIZER(free_pgdesc);
464
465 extern uma_zone_t l2zone;
466
467 struct mtx smallalloc_mtx;
468
469 MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data");
470
471 vm_offset_t alloc_firstaddr;
472
473 #ifdef ARM_HAVE_SUPERSECTIONS
474 #define S_FRAME L1_SUP_FRAME
475 #define S_SIZE L1_SUP_SIZE
476 #else
477 #define S_FRAME L1_S_FRAME
478 #define S_SIZE L1_S_SIZE
479 #endif
480
481 vm_offset_t
482 arm_ptovirt(vm_paddr_t pa)
483 {
484 int i;
485 vm_offset_t addr = alloc_firstaddr;
486
487 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called to early ?"));
488 for (i = 0; dump_avail[i + 1]; i += 2) {
489 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
490 break;
491 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
492 (dump_avail[i] & S_FRAME);
493 }
494 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
495 return (addr + (pa - (dump_avail[i] & S_FRAME)));
496 }
497
498 void
499 arm_init_smallalloc(void)
500 {
501 vm_offset_t to_map = 0, mapaddr;
502 int i;
503
504 /*
505 * We need to use dump_avail and not phys_avail, since we want to
506 * map the whole memory and not just the memory available to the VM
507 * to be able to do a pa => va association for any address.
508 */
509
510 for (i = 0; dump_avail[i + 1]; i+= 2) {
511 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
512 (dump_avail[i] & S_FRAME);
513 }
514 alloc_firstaddr = mapaddr = KERNBASE - to_map;
515 for (i = 0; dump_avail[i + 1]; i+= 2) {
516 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) +
517 S_SIZE - (dump_avail[i] & S_FRAME);
518 vm_offset_t did = 0;
519 while (size > 0) {
520 #ifdef ARM_HAVE_SUPERSECTIONS
521 pmap_kenter_supersection(mapaddr,
522 (dump_avail[i] & L1_SUP_FRAME) + did,
523 SECTION_CACHE);
524 #else
525 pmap_kenter_section(mapaddr,
526 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
527 #endif
528 mapaddr += S_SIZE;
529 did += S_SIZE;
530 size -= S_SIZE;
531 }
532 }
533 }
534
535 void
536 arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
537 {
538 struct arm_small_page *pg;
539
540 bytes &= ~PAGE_MASK;
541 while (bytes > 0) {
542 pg = (struct arm_small_page *)list;
543 pg->addr = mem;
544 if (pagetable)
545 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
546 else
547 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
548 list = (char *)list + sizeof(*pg);
549 mem = (char *)mem + PAGE_SIZE;
550 bytes -= PAGE_SIZE;
551 }
552 }
553
554 void *
555 uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
556 {
557 void *ret;
558 struct arm_small_page *sp;
559 TAILQ_HEAD(,arm_small_page) *head;
560 static vm_pindex_t color;
561 vm_page_t m;
562
563 *flags = UMA_SLAB_PRIV;
564 /*
565 * For CPUs where we setup page tables as write back, there's no
566 * need to maintain two separate pools.
567 */
568 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
569 head = (void *)&pages_wt;
570 else
571 head = (void *)&pages_normal;
572
573 mtx_lock(&smallalloc_mtx);
574 sp = TAILQ_FIRST(head);
575
576 if (!sp) {
577 int pflags;
578
579 mtx_unlock(&smallalloc_mtx);
580 if (zone == l2zone &&
581 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
582 *flags = UMA_SLAB_KMEM;
583 ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
584 return (ret);
585 }
586 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
587 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
588 else
589 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
590 if (wait & M_ZERO)
591 pflags |= VM_ALLOC_ZERO;
592 for (;;) {
593 m = vm_page_alloc(NULL, color++,
594 pflags | VM_ALLOC_NOOBJ);
595 if (m == NULL) {
596 if (wait & M_NOWAIT)
597 return (NULL);
598 VM_WAIT;
599 } else
600 break;
601 }
602 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
603 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
604 bzero(ret, PAGE_SIZE);
605 return (ret);
606 }
607 TAILQ_REMOVE(head, sp, pg_list);
608 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
609 ret = sp->addr;
610 mtx_unlock(&smallalloc_mtx);
611 if ((wait & M_ZERO))
612 bzero(ret, bytes);
613 return (ret);
614 }
615
616 void
617 uma_small_free(void *mem, int size, u_int8_t flags)
618 {
619 pd_entry_t *pd;
620 pt_entry_t *pt;
621
622 if (flags & UMA_SLAB_KMEM)
623 kmem_free(kmem_map, (vm_offset_t)mem, size);
624 else {
625 struct arm_small_page *sp;
626
627 if ((vm_offset_t)mem >= KERNBASE) {
628 mtx_lock(&smallalloc_mtx);
629 sp = TAILQ_FIRST(&free_pgdesc);
630 KASSERT(sp != NULL, ("No more free page descriptor ?"));
631 TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
632 sp->addr = mem;
633 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
634 &pt);
635 if ((*pd & pte_l1_s_cache_mask) ==
636 pte_l1_s_cache_mode_pt &&
637 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
638 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
639 else
640 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
641 mtx_unlock(&smallalloc_mtx);
642 } else {
643 vm_page_t m;
644 vm_paddr_t pa = vtophys((vm_offset_t)mem);
645
646 m = PHYS_TO_VM_PAGE(pa);
647 m->wire_count--;
648 vm_page_free(m);
649 atomic_subtract_int(&cnt.v_wire_count, 1);
650 }
651 }
652 }
653
654 #endif
Cache object: 32dde9aa98e3b10e51e421858df4b103
|