1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * Copyright (c) 1989, 1990 William Jolitz
6 * Copyright (c) 1994 John Dyson
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department, and William Jolitz.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include "opt_isa.h"
49 #include "opt_cpu.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/lock.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/mutex.h>
61 #include <sys/pioctl.h>
62 #include <sys/priv.h>
63 #include <sys/proc.h>
64 #include <sys/procctl.h>
65 #include <sys/smp.h>
66 #include <sys/sysctl.h>
67 #include <sys/sysent.h>
68 #include <sys/unistd.h>
69 #include <sys/vnode.h>
70 #include <sys/vmmeter.h>
71 #include <sys/wait.h>
72
73 #include <machine/cpu.h>
74 #include <machine/md_var.h>
75 #include <machine/pcb.h>
76 #include <machine/smp.h>
77 #include <machine/specialreg.h>
78 #include <machine/tss.h>
79
80 #include <vm/vm.h>
81 #include <vm/vm_extern.h>
82 #include <vm/vm_kern.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_param.h>
86
87 _Static_assert(OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
88 "OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf.");
89
90 void
91 set_top_of_stack_td(struct thread *td)
92 {
93 td->td_md.md_stack_base = td->td_kstack +
94 td->td_kstack_pages * PAGE_SIZE -
95 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
96 }
97
98 struct savefpu *
99 get_pcb_user_save_td(struct thread *td)
100 {
101 vm_offset_t p;
102
103 p = td->td_md.md_stack_base;
104 KASSERT((p % XSAVE_AREA_ALIGN) == 0,
105 ("Unaligned pcb_user_save area ptr %#lx td %p", p, td));
106 return ((struct savefpu *)p);
107 }
108
109 struct pcb *
110 get_pcb_td(struct thread *td)
111 {
112
113 return (&td->td_md.md_pcb);
114 }
115
116 struct savefpu *
117 get_pcb_user_save_pcb(struct pcb *pcb)
118 {
119 struct thread *td;
120
121 td = __containerof(pcb, struct thread, td_md.md_pcb);
122 return (get_pcb_user_save_td(td));
123 }
124
125 void *
126 alloc_fpusave(int flags)
127 {
128 void *res;
129 struct savefpu_ymm *sf;
130
131 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
132 if (use_xsave) {
133 sf = (struct savefpu_ymm *)res;
134 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
135 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
136 }
137 return (res);
138 }
139
140 /*
141 * Finish a fork operation, with process p2 nearly set up.
142 * Copy and update the pcb, set up the stack so that the child
143 * ready to run and return to user mode.
144 */
145 void
146 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
147 {
148 struct proc *p1;
149 struct pcb *pcb2;
150 struct mdproc *mdp1, *mdp2;
151 struct proc_ldt *pldt;
152
153 p1 = td1->td_proc;
154 if ((flags & RFPROC) == 0) {
155 if ((flags & RFMEM) == 0) {
156 /* unshare user LDT */
157 mdp1 = &p1->p_md;
158 mtx_lock(&dt_lock);
159 if ((pldt = mdp1->md_ldt) != NULL &&
160 pldt->ldt_refcnt > 1 &&
161 user_ldt_alloc(p1, 1) == NULL)
162 panic("could not copy LDT");
163 mtx_unlock(&dt_lock);
164 }
165 return;
166 }
167
168 /* Ensure that td1's pcb is up to date. */
169 fpuexit(td1);
170 update_pcb_bases(td1->td_pcb);
171
172 /* Point the stack and pcb to the actual location */
173 set_top_of_stack_td(td2);
174 td2->td_pcb = pcb2 = get_pcb_td(td2);
175
176 /* Copy td1's pcb */
177 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
178
179 /* Properly initialize pcb_save */
180 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
181 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
182 cpu_max_ext_state_size);
183
184 /* Reset debug registers in the new process */
185 x86_clear_dbregs(pcb2);
186
187 /* Point mdproc and then copy over td1's contents */
188 mdp2 = &p2->p_md;
189 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
190 p2->p_amd64_md_flags = p1->p_amd64_md_flags;
191
192 /*
193 * Create a new fresh stack for the new process.
194 * Copy the trap frame for the return to user mode as if from a
195 * syscall. This copies most of the user mode register values.
196 */
197 td2->td_frame = (struct trapframe *)td2->td_md.md_stack_base - 1;
198 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
199
200 td2->td_frame->tf_rax = 0; /* Child returns zero */
201 td2->td_frame->tf_rflags &= ~PSL_C; /* success */
202 td2->td_frame->tf_rdx = 1;
203
204 /*
205 * If the parent process has the trap bit set (i.e. a debugger had
206 * single stepped the process to the system call), we need to clear
207 * the trap flag from the new frame unless the debugger had set PF_FORK
208 * on the parent. Otherwise, the child will receive a (likely
209 * unexpected) SIGTRAP when it executes the first instruction after
210 * returning to userland.
211 */
212 if ((p1->p_pfsflags & PF_FORK) == 0)
213 td2->td_frame->tf_rflags &= ~PSL_T;
214
215 /*
216 * Set registers for trampoline to user mode. Leave space for the
217 * return address on stack. These are the kernel mode register values.
218 */
219 pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */
220 pcb2->pcb_rbp = 0;
221 pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *);
222 pcb2->pcb_rbx = (register_t)td2; /* fork_trampoline argument */
223 pcb2->pcb_rip = (register_t)fork_trampoline;
224 /*-
225 * pcb2->pcb_dr*: cloned above.
226 * pcb2->pcb_savefpu: cloned above.
227 * pcb2->pcb_flags: cloned above.
228 * pcb2->pcb_onfault: cloned above (always NULL here?).
229 * pcb2->pcb_[fg]sbase: cloned above
230 */
231
232 /* Setup to release spin count in fork_exit(). */
233 td2->td_md.md_spinlock_count = 1;
234 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
235 pmap_thread_init_invl_gen(td2);
236
237 /* As an i386, do not copy io permission bitmap. */
238 pcb2->pcb_tssp = NULL;
239
240 /* New segment registers. */
241 set_pcb_flags_raw(pcb2, PCB_FULL_IRET);
242
243 /* Copy the LDT, if necessary. */
244 mdp1 = &td1->td_proc->p_md;
245 mdp2 = &p2->p_md;
246 if (mdp1->md_ldt == NULL) {
247 mdp2->md_ldt = NULL;
248 return;
249 }
250 mtx_lock(&dt_lock);
251 if (mdp1->md_ldt != NULL) {
252 if (flags & RFMEM) {
253 mdp1->md_ldt->ldt_refcnt++;
254 mdp2->md_ldt = mdp1->md_ldt;
255 bcopy(&mdp1->md_ldt_sd, &mdp2->md_ldt_sd, sizeof(struct
256 system_segment_descriptor));
257 } else {
258 mdp2->md_ldt = NULL;
259 mdp2->md_ldt = user_ldt_alloc(p2, 0);
260 if (mdp2->md_ldt == NULL)
261 panic("could not copy LDT");
262 amd64_set_ldt_data(td2, 0, max_ldt_segment,
263 (struct user_segment_descriptor *)
264 mdp1->md_ldt->ldt_base);
265 }
266 } else
267 mdp2->md_ldt = NULL;
268 mtx_unlock(&dt_lock);
269
270 /*
271 * Now, cpu_switch() can schedule the new process.
272 * pcb_rsp is loaded pointing to the cpu_switch() stack frame
273 * containing the return address when exiting cpu_switch.
274 * This will normally be to fork_trampoline(), which will have
275 * %ebx loaded with the new proc's pointer. fork_trampoline()
276 * will set up a stack to call fork_return(p, frame); to complete
277 * the return to user-mode.
278 */
279 }
280
281 /*
282 * Intercept the return address from a freshly forked process that has NOT
283 * been scheduled yet.
284 *
285 * This is needed to make kernel threads stay in kernel mode.
286 */
287 void
288 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
289 {
290 /*
291 * Note that the trap frame follows the args, so the function
292 * is really called like this: func(arg, frame);
293 */
294 td->td_pcb->pcb_r12 = (long) func; /* function */
295 td->td_pcb->pcb_rbx = (long) arg; /* first arg */
296 }
297
298 void
299 cpu_exit(struct thread *td)
300 {
301
302 /*
303 * If this process has a custom LDT, release it.
304 */
305 if (td->td_proc->p_md.md_ldt != NULL)
306 user_ldt_free(td);
307 }
308
309 void
310 cpu_thread_exit(struct thread *td)
311 {
312 struct pcb *pcb;
313
314 critical_enter();
315 if (td == PCPU_GET(fpcurthread))
316 fpudrop();
317 critical_exit();
318
319 pcb = td->td_pcb;
320
321 /* Disable any hardware breakpoints. */
322 if (pcb->pcb_flags & PCB_DBREGS) {
323 reset_dbregs();
324 clear_pcb_flags(pcb, PCB_DBREGS);
325 }
326 }
327
328 void
329 cpu_thread_clean(struct thread *td)
330 {
331 struct pcb *pcb;
332
333 pcb = td->td_pcb;
334
335 /*
336 * Clean TSS/iomap
337 */
338 if (pcb->pcb_tssp != NULL) {
339 pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp,
340 (vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1));
341 kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1));
342 pcb->pcb_tssp = NULL;
343 }
344 }
345
346 void
347 cpu_thread_swapin(struct thread *td)
348 {
349 }
350
351 void
352 cpu_thread_swapout(struct thread *td)
353 {
354 }
355
356 void
357 cpu_thread_alloc(struct thread *td)
358 {
359 struct pcb *pcb;
360 struct xstate_hdr *xhdr;
361
362 set_top_of_stack_td(td);
363 td->td_pcb = pcb = get_pcb_td(td);
364 td->td_frame = (struct trapframe *)td->td_md.md_stack_base - 1;
365 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
366 if (use_xsave) {
367 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
368 bzero(xhdr, sizeof(*xhdr));
369 xhdr->xstate_bv = xsave_mask;
370 }
371 }
372
373 void
374 cpu_thread_free(struct thread *td)
375 {
376
377 cpu_thread_clean(td);
378 }
379
380 bool
381 cpu_exec_vmspace_reuse(struct proc *p, vm_map_t map)
382 {
383
384 return (((curproc->p_amd64_md_flags & P_MD_KPTI) != 0) ==
385 (vm_map_pmap(map)->pm_ucr3 != PMAP_NO_CR3));
386 }
387
388 static void
389 cpu_procctl_kpti(struct proc *p, int com, int *val)
390 {
391
392 if (com == PROC_KPTI_CTL) {
393 if (pti && *val == PROC_KPTI_CTL_ENABLE_ON_EXEC)
394 p->p_amd64_md_flags |= P_MD_KPTI;
395 if (*val == PROC_KPTI_CTL_DISABLE_ON_EXEC)
396 p->p_amd64_md_flags &= ~P_MD_KPTI;
397 } else /* PROC_KPTI_STATUS */ {
398 *val = (p->p_amd64_md_flags & P_MD_KPTI) != 0 ?
399 PROC_KPTI_CTL_ENABLE_ON_EXEC:
400 PROC_KPTI_CTL_DISABLE_ON_EXEC;
401 if (vmspace_pmap(p->p_vmspace)->pm_ucr3 != PMAP_NO_CR3)
402 *val |= PROC_KPTI_STATUS_ACTIVE;
403 }
404 }
405
406 int
407 cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data)
408 {
409 struct proc *p;
410 int error, val;
411
412 switch (com) {
413 case PROC_KPTI_CTL:
414 case PROC_KPTI_STATUS:
415 if (idtype != P_PID) {
416 error = EINVAL;
417 break;
418 }
419 if (com == PROC_KPTI_CTL) {
420 /* sad but true and not a joke */
421 error = priv_check(td, PRIV_IO);
422 if (error != 0)
423 break;
424 error = copyin(data, &val, sizeof(val));
425 if (error != 0)
426 break;
427 if (val != PROC_KPTI_CTL_ENABLE_ON_EXEC &&
428 val != PROC_KPTI_CTL_DISABLE_ON_EXEC) {
429 error = EINVAL;
430 break;
431 }
432 }
433 error = pget(id, PGET_CANSEE | PGET_NOTWEXIT | PGET_NOTID, &p);
434 if (error == 0) {
435 cpu_procctl_kpti(p, com, &val);
436 PROC_UNLOCK(p);
437 if (com == PROC_KPTI_STATUS)
438 error = copyout(&val, data, sizeof(val));
439 }
440 break;
441 default:
442 error = EINVAL;
443 break;
444 }
445 return (error);
446 }
447
448 void
449 cpu_set_syscall_retval(struct thread *td, int error)
450 {
451 struct trapframe *frame;
452
453 frame = td->td_frame;
454 if (__predict_true(error == 0)) {
455 frame->tf_rax = td->td_retval[0];
456 frame->tf_rdx = td->td_retval[1];
457 frame->tf_rflags &= ~PSL_C;
458 return;
459 }
460
461 switch (error) {
462 case ERESTART:
463 /*
464 * Reconstruct pc, we know that 'syscall' is 2 bytes,
465 * lcall $X,y is 7 bytes, int 0x80 is 2 bytes.
466 * We saved this in tf_err.
467 * %r10 (which was holding the value of %rcx) is restored
468 * for the next iteration.
469 * %r10 restore is only required for freebsd/amd64 processes,
470 * but shall be innocent for any ia32 ABI.
471 *
472 * Require full context restore to get the arguments
473 * in the registers reloaded at return to usermode.
474 */
475 frame->tf_rip -= frame->tf_err;
476 frame->tf_r10 = frame->tf_rcx;
477 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
478 break;
479
480 case EJUSTRETURN:
481 break;
482
483 default:
484 frame->tf_rax = SV_ABI_ERRNO(td->td_proc, error);
485 frame->tf_rflags |= PSL_C;
486 break;
487 }
488 }
489
490 /*
491 * Initialize machine state, mostly pcb and trap frame for a new
492 * thread, about to return to userspace. Put enough state in the new
493 * thread's PCB to get it to go back to the fork_return(), which
494 * finalizes the thread state and handles peculiarities of the first
495 * return to userspace for the new thread.
496 */
497 void
498 cpu_copy_thread(struct thread *td, struct thread *td0)
499 {
500 struct pcb *pcb2;
501
502 pcb2 = td->td_pcb;
503
504 /*
505 * Copy the upcall pcb. This loads kernel regs.
506 * Those not loaded individually below get their default
507 * values here.
508 */
509 update_pcb_bases(td0->td_pcb);
510 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
511 clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE |
512 PCB_KERNFPU);
513 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
514 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
515 cpu_max_ext_state_size);
516 set_pcb_flags_raw(pcb2, PCB_FULL_IRET);
517
518 /*
519 * Create a new fresh stack for the new thread.
520 */
521 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
522
523 /* If the current thread has the trap bit set (i.e. a debugger had
524 * single stepped the process to the system call), we need to clear
525 * the trap flag from the new frame. Otherwise, the new thread will
526 * receive a (likely unexpected) SIGTRAP when it executes the first
527 * instruction after returning to userland.
528 */
529 td->td_frame->tf_rflags &= ~PSL_T;
530
531 /*
532 * Set registers for trampoline to user mode. Leave space for the
533 * return address on stack. These are the kernel mode register values.
534 */
535 pcb2->pcb_r12 = (register_t)fork_return; /* trampoline arg */
536 pcb2->pcb_rbp = 0;
537 pcb2->pcb_rsp = (register_t)td->td_frame - sizeof(void *); /* trampoline arg */
538 pcb2->pcb_rbx = (register_t)td; /* trampoline arg */
539 pcb2->pcb_rip = (register_t)fork_trampoline;
540 /*
541 * If we didn't copy the pcb, we'd need to do the following registers:
542 * pcb2->pcb_dr*: cloned above.
543 * pcb2->pcb_savefpu: cloned above.
544 * pcb2->pcb_onfault: cloned above (always NULL here?).
545 * pcb2->pcb_[fg]sbase: cloned above
546 */
547
548 /* Setup to release spin count in fork_exit(). */
549 td->td_md.md_spinlock_count = 1;
550 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
551 pmap_thread_init_invl_gen(td);
552 }
553
554 /*
555 * Set that machine state for performing an upcall that starts
556 * the entry function with the given argument.
557 */
558 void
559 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
560 stack_t *stack)
561 {
562
563 /*
564 * Do any extra cleaning that needs to be done.
565 * The thread may have optional components
566 * that are not present in a fresh thread.
567 * This may be a recycled thread so make it look
568 * as though it's newly allocated.
569 */
570 cpu_thread_clean(td);
571
572 #ifdef COMPAT_FREEBSD32
573 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
574 /*
575 * Set the trap frame to point at the beginning of the entry
576 * function.
577 */
578 td->td_frame->tf_rbp = 0;
579 td->td_frame->tf_rsp =
580 (((uintptr_t)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
581 td->td_frame->tf_rip = (uintptr_t)entry;
582
583 /* Return address sentinel value to stop stack unwinding. */
584 suword32((void *)td->td_frame->tf_rsp, 0);
585
586 /* Pass the argument to the entry point. */
587 suword32((void *)(td->td_frame->tf_rsp + sizeof(int32_t)),
588 (uint32_t)(uintptr_t)arg);
589
590 return;
591 }
592 #endif
593
594 /*
595 * Set the trap frame to point at the beginning of the uts
596 * function.
597 */
598 td->td_frame->tf_rbp = 0;
599 td->td_frame->tf_rsp =
600 ((register_t)stack->ss_sp + stack->ss_size) & ~0x0f;
601 td->td_frame->tf_rsp -= 8;
602 td->td_frame->tf_rip = (register_t)entry;
603 td->td_frame->tf_ds = _udatasel;
604 td->td_frame->tf_es = _udatasel;
605 td->td_frame->tf_fs = _ufssel;
606 td->td_frame->tf_gs = _ugssel;
607 td->td_frame->tf_flags = TF_HASSEGS;
608
609 /* Return address sentinel value to stop stack unwinding. */
610 suword((void *)td->td_frame->tf_rsp, 0);
611
612 /* Pass the argument to the entry point. */
613 td->td_frame->tf_rdi = (register_t)arg;
614 }
615
616 int
617 cpu_set_user_tls(struct thread *td, void *tls_base)
618 {
619 struct pcb *pcb;
620
621 if ((u_int64_t)tls_base >= VM_MAXUSER_ADDRESS)
622 return (EINVAL);
623
624 pcb = td->td_pcb;
625 set_pcb_flags(pcb, PCB_FULL_IRET);
626 #ifdef COMPAT_FREEBSD32
627 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
628 pcb->pcb_gsbase = (register_t)tls_base;
629 return (0);
630 }
631 #endif
632 pcb->pcb_fsbase = (register_t)tls_base;
633 return (0);
634 }
635
636 /*
637 * Software interrupt handler for queued VM system processing.
638 */
639 void
640 swi_vm(void *dummy)
641 {
642 if (busdma_swi_pending != 0)
643 busdma_swi();
644 }
645
646 /*
647 * Tell whether this address is in some physical memory region.
648 * Currently used by the kernel coredump code in order to avoid
649 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
650 * or other unpredictable behaviour.
651 */
652
653 int
654 is_physical_memory(vm_paddr_t addr)
655 {
656
657 #ifdef DEV_ISA
658 /* The ISA ``memory hole''. */
659 if (addr >= 0xa0000 && addr < 0x100000)
660 return 0;
661 #endif
662
663 /*
664 * stuff other tests for known memory-mapped devices (PCI?)
665 * here
666 */
667
668 return 1;
669 }
Cache object: c6c9e2d36888792a9195d39d8dc2cd39
|