1 /*-
2 * Copyright (c) 2003,2004 Marcel Moolenaar
3 * Copyright (c) 2000,2001 Doug Rabson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_compat.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_msgbuf.h"
35
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/buf.h>
41 #include <sys/bus.h>
42 #include <sys/cons.h>
43 #include <sys/cpu.h>
44 #include <sys/eventhandler.h>
45 #include <sys/exec.h>
46 #include <sys/imgact.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/linker.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #include <sys/msgbuf.h>
54 #include <sys/pcpu.h>
55 #include <sys/ptrace.h>
56 #include <sys/random.h>
57 #include <sys/reboot.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/syscall.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysproto.h>
63 #include <sys/ucontext.h>
64 #include <sys/uio.h>
65 #include <sys/uuid.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
68
69 #include <ddb/ddb.h>
70
71 #include <net/netisr.h>
72
73 #include <vm/vm.h>
74 #include <vm/vm_extern.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_pager.h>
80
81 #include <machine/bootinfo.h>
82 #include <machine/clock.h>
83 #include <machine/cpu.h>
84 #include <machine/efi.h>
85 #include <machine/elf.h>
86 #include <machine/fpu.h>
87 #include <machine/mca.h>
88 #include <machine/md_var.h>
89 #include <machine/mutex.h>
90 #include <machine/pal.h>
91 #include <machine/pcb.h>
92 #include <machine/reg.h>
93 #include <machine/sal.h>
94 #include <machine/sigframe.h>
95 #ifdef SMP
96 #include <machine/smp.h>
97 #endif
98 #include <machine/unwind.h>
99 #include <machine/vmparam.h>
100
101 #include <i386/include/specialreg.h>
102
103 u_int64_t processor_frequency;
104 u_int64_t bus_frequency;
105 u_int64_t itc_frequency;
106 int cold = 1;
107
108 u_int64_t pa_bootinfo;
109 struct bootinfo bootinfo;
110
111 struct pcpu pcpu0;
112 extern char kstack[];
113 vm_offset_t proc0kstack;
114
115 extern u_int64_t kernel_text[], _end[];
116
117 extern u_int64_t ia64_gateway_page[];
118 extern u_int64_t break_sigtramp[];
119 extern u_int64_t epc_sigtramp[];
120
121 struct fpswa_iface *fpswa_iface;
122
123 u_int64_t ia64_pal_base;
124 u_int64_t ia64_port_base;
125
126 static int ia64_inval_icache_needed;
127
128 char machine[] = MACHINE;
129 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
130
131 static char cpu_model[64];
132 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
133 "The CPU model name");
134
135 static char cpu_family[64];
136 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
137 "The CPU family name");
138
139 #ifdef DDB
140 extern vm_offset_t ksym_start, ksym_end;
141 #endif
142
143 static void cpu_startup(void *);
144 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
145
146 struct msgbuf *msgbufp=0;
147
148 long Maxmem = 0;
149 long realmem = 0;
150
151 #define PHYSMAP_SIZE (2 * VM_PHYSSEG_MAX)
152
153 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
154
155 /* must be 2 less so 0 0 can signal end of chunks */
156 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
157
158 void mi_startup(void); /* XXX should be in a MI header */
159
160 struct kva_md_info kmi;
161
162 #define Mhz 1000000L
163 #define Ghz (1000L*Mhz)
164
165 void setPQL2(int *const size, int *const ways);
166
167 void
168 setPQL2(int *const size, int *const ways)
169 {
170 return;
171 }
172
173 static void
174 identifycpu(void)
175 {
176 char vendor[17];
177 char *family_name, *model_name;
178 u_int64_t features, tmp;
179 int number, revision, model, family, archrev;
180
181 /*
182 * Assumes little-endian.
183 */
184 *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
185 *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
186 vendor[16] = '\0';
187
188 tmp = ia64_get_cpuid(3);
189 number = (tmp >> 0) & 0xff;
190 revision = (tmp >> 8) & 0xff;
191 model = (tmp >> 16) & 0xff;
192 family = (tmp >> 24) & 0xff;
193 archrev = (tmp >> 32) & 0xff;
194
195 family_name = model_name = "unknown";
196 switch (family) {
197 case 0x07:
198 family_name = "Itanium";
199 model_name = "Merced";
200 break;
201 case 0x1f:
202 family_name = "Itanium 2";
203 switch (model) {
204 case 0x00:
205 model_name = "McKinley";
206 break;
207 case 0x01:
208 /*
209 * Deerfield is a low-voltage variant based on the
210 * Madison core. We need circumstantial evidence
211 * (i.e. the clock frequency) to identify those.
212 * Allow for roughly 1% error margin.
213 */
214 tmp = processor_frequency >> 7;
215 if ((processor_frequency - tmp) < 1*Ghz &&
216 (processor_frequency + tmp) >= 1*Ghz)
217 model_name = "Deerfield";
218 else
219 model_name = "Madison";
220 break;
221 case 0x02:
222 model_name = "Madison II";
223 break;
224 }
225 break;
226 case 0x20:
227 ia64_inval_icache_needed = 1;
228
229 family_name = "Itanium 2";
230 switch (model) {
231 case 0x00:
232 model_name = "Montecito";
233 break;
234 }
235 break;
236 }
237 snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
238 snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
239
240 features = ia64_get_cpuid(4);
241
242 printf("CPU: %s (", model_name);
243 if (processor_frequency) {
244 printf("%ld.%02ld-Mhz ",
245 (processor_frequency + 4999) / Mhz,
246 ((processor_frequency + 4999) / (Mhz/100)) % 100);
247 }
248 printf("%s)\n", family_name);
249 printf(" Origin = \"%s\" Revision = %d\n", vendor, revision);
250 printf(" Features = 0x%b\n", (u_int32_t) features,
251 "\020"
252 "\001LB" /* long branch (brl) instruction. */
253 "\002SD" /* Spontaneous deferral. */
254 "\003AO" /* 16-byte atomic operations (ld, st, cmpxchg). */ );
255 }
256
257 static void
258 cpu_startup(dummy)
259 void *dummy;
260 {
261
262 /*
263 * Good {morning,afternoon,evening,night}.
264 */
265 identifycpu();
266
267 /* startrtclock(); */
268 #ifdef PERFMON
269 perfmon_init();
270 #endif
271 printf("real memory = %ld (%ld MB)\n", ia64_ptob(Maxmem),
272 ia64_ptob(Maxmem) / 1048576);
273 realmem = Maxmem;
274
275 /*
276 * Display any holes after the first chunk of extended memory.
277 */
278 if (bootverbose) {
279 int indx;
280
281 printf("Physical memory chunk(s):\n");
282 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
283 long size1 = phys_avail[indx + 1] - phys_avail[indx];
284
285 printf("0x%08lx - 0x%08lx, %ld bytes (%ld pages)\n",
286 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
287 size1 >> PAGE_SHIFT);
288 }
289 }
290
291 vm_ksubmap_init(&kmi);
292
293 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
294 ptoa(cnt.v_free_count) / 1048576);
295
296 if (fpswa_iface == NULL)
297 printf("Warning: no FPSWA package supplied\n");
298 else
299 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
300 (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
301
302 /*
303 * Set up buffers, so they can be used to read disk labels.
304 */
305 bufinit();
306 vm_pager_bufferinit();
307
308 /*
309 * Traverse the MADT to discover IOSAPIC and Local SAPIC
310 * information.
311 */
312 ia64_probe_sapics();
313 ia64_mca_init();
314 }
315
316 void
317 cpu_boot(int howto)
318 {
319
320 efi_reset_system();
321 }
322
323 /* Get current clock frequency for the given cpu id. */
324 int
325 cpu_est_clockrate(int cpu_id, uint64_t *rate)
326 {
327
328 if (pcpu_find(cpu_id) == NULL || rate == NULL)
329 return (EINVAL);
330 *rate = processor_frequency;
331 return (0);
332 }
333
334 void
335 cpu_halt()
336 {
337
338 efi_reset_system();
339 }
340
341 static void
342 cpu_idle_default(void)
343 {
344 struct ia64_pal_result res;
345
346 res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
347 }
348
349 void
350 cpu_idle()
351 {
352 (*cpu_idle_hook)();
353 }
354
355 /* Other subsystems (e.g., ACPI) can hook this later. */
356 void (*cpu_idle_hook)(void) = cpu_idle_default;
357
358 void
359 cpu_reset()
360 {
361
362 cpu_boot(0);
363 }
364
365 void
366 cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
367 {
368 struct pcb *oldpcb, *newpcb;
369
370 oldpcb = old->td_pcb;
371 #ifdef COMPAT_IA32
372 ia32_savectx(oldpcb);
373 #endif
374 if (PCPU_GET(fpcurthread) == old)
375 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
376 if (!savectx(oldpcb)) {
377 newpcb = new->td_pcb;
378 oldpcb->pcb_current_pmap =
379 pmap_switch(newpcb->pcb_current_pmap);
380 PCPU_SET(curthread, new);
381 #ifdef COMPAT_IA32
382 ia32_restorectx(newpcb);
383 #endif
384 if (PCPU_GET(fpcurthread) == new)
385 new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
386 restorectx(newpcb);
387 /* We should not get here. */
388 panic("cpu_switch: restorectx() returned");
389 /* NOTREACHED */
390 }
391 }
392
393 void
394 cpu_throw(struct thread *old __unused, struct thread *new)
395 {
396 struct pcb *newpcb;
397
398 newpcb = new->td_pcb;
399 (void)pmap_switch(newpcb->pcb_current_pmap);
400 PCPU_SET(curthread, new);
401 #ifdef COMPAT_IA32
402 ia32_restorectx(newpcb);
403 #endif
404 restorectx(newpcb);
405 /* We should not get here. */
406 panic("cpu_throw: restorectx() returned");
407 /* NOTREACHED */
408 }
409
410 void
411 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
412 {
413
414 pcpu->pc_acpi_id = cpuid;
415 }
416
417 void
418 spinlock_enter(void)
419 {
420 struct thread *td;
421
422 td = curthread;
423 if (td->td_md.md_spinlock_count == 0)
424 td->td_md.md_saved_intr = intr_disable();
425 td->td_md.md_spinlock_count++;
426 critical_enter();
427 }
428
429 void
430 spinlock_exit(void)
431 {
432 struct thread *td;
433
434 td = curthread;
435 critical_exit();
436 td->td_md.md_spinlock_count--;
437 if (td->td_md.md_spinlock_count == 0)
438 intr_restore(td->td_md.md_saved_intr);
439 }
440
441 void
442 map_vhpt(uintptr_t vhpt)
443 {
444 pt_entry_t pte;
445 uint64_t psr;
446
447 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
448 PTE_PL_KERN | PTE_AR_RW;
449 pte |= vhpt & PTE_PPN_MASK;
450
451 __asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
452 "r"(IA64_ID_PAGE_SHIFT<<2));
453
454 __asm __volatile("mov %0=psr" : "=r"(psr));
455 __asm __volatile("rsm psr.ic|psr.i");
456 ia64_srlz_i();
457 ia64_set_ifa(vhpt);
458 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
459 ia64_srlz_d();
460 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(2), "r"(pte));
461 __asm __volatile("mov psr.l=%0" :: "r" (psr));
462 ia64_srlz_i();
463 }
464
465 void
466 map_pal_code(void)
467 {
468 pt_entry_t pte;
469 uint64_t psr;
470
471 if (ia64_pal_base == 0)
472 return;
473
474 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
475 PTE_PL_KERN | PTE_AR_RWX;
476 pte |= ia64_pal_base & PTE_PPN_MASK;
477
478 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
479 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
480
481 __asm __volatile("mov %0=psr" : "=r"(psr));
482 __asm __volatile("rsm psr.ic|psr.i");
483 ia64_srlz_i();
484 ia64_set_ifa(IA64_PHYS_TO_RR7(ia64_pal_base));
485 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
486 ia64_srlz_d();
487 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(pte));
488 ia64_srlz_d();
489 __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
490 __asm __volatile("mov psr.l=%0" :: "r" (psr));
491 ia64_srlz_i();
492 }
493
494 void
495 map_gateway_page(void)
496 {
497 pt_entry_t pte;
498 uint64_t psr;
499
500 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
501 PTE_PL_KERN | PTE_AR_X_RX;
502 pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
503
504 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
505 "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
506
507 __asm __volatile("mov %0=psr" : "=r"(psr));
508 __asm __volatile("rsm psr.ic|psr.i");
509 ia64_srlz_i();
510 ia64_set_ifa(VM_MAX_ADDRESS);
511 ia64_set_itir(PAGE_SHIFT << 2);
512 ia64_srlz_d();
513 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
514 ia64_srlz_d();
515 __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(pte));
516 __asm __volatile("mov psr.l=%0" :: "r" (psr));
517 ia64_srlz_i();
518
519 /* Expose the mapping to userland in ar.k5 */
520 ia64_set_k5(VM_MAX_ADDRESS);
521 }
522
523 static void
524 calculate_frequencies(void)
525 {
526 struct ia64_sal_result sal;
527 struct ia64_pal_result pal;
528
529 sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
530 pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
531
532 if (sal.sal_status == 0 && pal.pal_status == 0) {
533 if (bootverbose) {
534 printf("Platform clock frequency %ld Hz\n",
535 sal.sal_result[0]);
536 printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
537 "ITC ratio %ld/%ld\n",
538 pal.pal_result[0] >> 32,
539 pal.pal_result[0] & ((1L << 32) - 1),
540 pal.pal_result[1] >> 32,
541 pal.pal_result[1] & ((1L << 32) - 1),
542 pal.pal_result[2] >> 32,
543 pal.pal_result[2] & ((1L << 32) - 1));
544 }
545 processor_frequency =
546 sal.sal_result[0] * (pal.pal_result[0] >> 32)
547 / (pal.pal_result[0] & ((1L << 32) - 1));
548 bus_frequency =
549 sal.sal_result[0] * (pal.pal_result[1] >> 32)
550 / (pal.pal_result[1] & ((1L << 32) - 1));
551 itc_frequency =
552 sal.sal_result[0] * (pal.pal_result[2] >> 32)
553 / (pal.pal_result[2] & ((1L << 32) - 1));
554 }
555 }
556
557 void
558 ia64_init(void)
559 {
560 int phys_avail_cnt;
561 vm_offset_t kernstart, kernend;
562 vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
563 char *p;
564 struct efi_md *md;
565 int metadata_missing;
566
567 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
568
569 /*
570 * TODO: Disable interrupts, floating point etc.
571 * Maybe flush cache and tlb
572 */
573 ia64_set_fpsr(IA64_FPSR_DEFAULT);
574
575 /*
576 * TODO: Get critical system information (if possible, from the
577 * information provided by the boot program).
578 */
579
580 /*
581 * pa_bootinfo is the physical address of the bootinfo block as
582 * passed to us by the loader and set in locore.s.
583 */
584 bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
585
586 if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
587 bzero(&bootinfo, sizeof(bootinfo));
588 bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
589 }
590
591 /*
592 * Look for the I/O ports first - we need them for console
593 * probing.
594 */
595 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
596 switch (md->md_type) {
597 case EFI_MD_TYPE_IOPORT:
598 ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys);
599 break;
600 case EFI_MD_TYPE_PALCODE:
601 ia64_pal_base = md->md_phys;
602 break;
603 }
604 }
605
606 metadata_missing = 0;
607 if (bootinfo.bi_modulep)
608 preload_metadata = (caddr_t)bootinfo.bi_modulep;
609 else
610 metadata_missing = 1;
611
612 if (envmode == 0 && bootinfo.bi_envp)
613 kern_envp = (caddr_t)bootinfo.bi_envp;
614 else
615 kern_envp = static_env;
616
617 /*
618 * Look at arguments passed to us and compute boothowto.
619 */
620 boothowto = bootinfo.bi_boothowto;
621
622 /*
623 * Catch case of boot_verbose set in environment.
624 */
625 if ((p = getenv("boot_verbose")) != NULL) {
626 if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
627 boothowto |= RB_VERBOSE;
628 }
629 freeenv(p);
630 }
631
632 if (boothowto & RB_VERBOSE)
633 bootverbose = 1;
634
635 /*
636 * Setup the PCPU data for the bootstrap processor. It is needed
637 * by printf(). Also, since printf() has critical sections, we
638 * need to initialize at least pc_curthread.
639 */
640 pcpup = &pcpu0;
641 ia64_set_k4((u_int64_t)pcpup);
642 pcpu_init(pcpup, 0, sizeof(pcpu0));
643 PCPU_SET(curthread, &thread0);
644
645 /*
646 * Initialize the console before we print anything out.
647 */
648 cninit();
649
650 /* OUTPUT NOW ALLOWED */
651
652 if (ia64_pal_base != 0) {
653 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
654 /*
655 * We use a TR to map the first 256M of memory - this might
656 * cover the palcode too.
657 */
658 if (ia64_pal_base == 0)
659 printf("PAL code mapped by the kernel's TR\n");
660 } else
661 printf("PAL code not found\n");
662
663 /*
664 * Wire things up so we can call the firmware.
665 */
666 map_pal_code();
667 efi_boot_minimal(bootinfo.bi_systab);
668 ia64_sal_init();
669 calculate_frequencies();
670
671 /*
672 * Find the beginning and end of the kernel.
673 */
674 kernstart = trunc_page(kernel_text);
675 #ifdef DDB
676 ksym_start = bootinfo.bi_symtab;
677 ksym_end = bootinfo.bi_esymtab;
678 kernend = (vm_offset_t)round_page(ksym_end);
679 #else
680 kernend = (vm_offset_t)round_page(_end);
681 #endif
682
683 /* But if the bootstrap tells us otherwise, believe it! */
684 if (bootinfo.bi_kernend)
685 kernend = round_page(bootinfo.bi_kernend);
686 if (metadata_missing)
687 printf("WARNING: loader(8) metadata is missing!\n");
688
689 /* Get FPSWA interface */
690 fpswa_iface = (bootinfo.bi_fpswa == 0) ? NULL :
691 (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
692
693 /* Init basic tunables, including hz */
694 init_param1();
695
696 p = getenv("kernelname");
697 if (p) {
698 strncpy(kernelname, p, sizeof(kernelname) - 1);
699 freeenv(p);
700 }
701
702 kernstartpfn = atop(IA64_RR_MASK(kernstart));
703 kernendpfn = atop(IA64_RR_MASK(kernend));
704
705 /*
706 * Size the memory regions and load phys_avail[] with the results.
707 */
708
709 /*
710 * Find out how much memory is available, by looking at
711 * the memory descriptors.
712 */
713
714 #ifdef DEBUG_MD
715 printf("Memory descriptor count: %d\n", mdcount);
716 #endif
717
718 phys_avail_cnt = 0;
719 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
720 #ifdef DEBUG_MD
721 printf("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md,
722 md->md_type, md->md_phys, md->md_pages);
723 #endif
724
725 pfn0 = ia64_btop(round_page(md->md_phys));
726 pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096));
727 if (pfn1 <= pfn0)
728 continue;
729
730 if (md->md_type != EFI_MD_TYPE_FREE)
731 continue;
732
733 /*
734 * We have a memory descriptor that describes conventional
735 * memory that is for general use. We must determine if the
736 * loader has put the kernel in this region.
737 */
738 physmem += (pfn1 - pfn0);
739 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
740 /*
741 * Must compute the location of the kernel
742 * within the segment.
743 */
744 #ifdef DEBUG_MD
745 printf("Descriptor %p contains kernel\n", mp);
746 #endif
747 if (pfn0 < kernstartpfn) {
748 /*
749 * There is a chunk before the kernel.
750 */
751 #ifdef DEBUG_MD
752 printf("Loading chunk before kernel: "
753 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
754 #endif
755 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
756 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
757 phys_avail_cnt += 2;
758 }
759 if (kernendpfn < pfn1) {
760 /*
761 * There is a chunk after the kernel.
762 */
763 #ifdef DEBUG_MD
764 printf("Loading chunk after kernel: "
765 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
766 #endif
767 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
768 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
769 phys_avail_cnt += 2;
770 }
771 } else {
772 /*
773 * Just load this cluster as one chunk.
774 */
775 #ifdef DEBUG_MD
776 printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
777 pfn0, pfn1);
778 #endif
779 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
780 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
781 phys_avail_cnt += 2;
782
783 }
784 }
785 phys_avail[phys_avail_cnt] = 0;
786
787 Maxmem = physmem;
788 init_param2(physmem);
789
790 /*
791 * Initialize error message buffer (at end of core).
792 */
793 msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
794 msgbufinit(msgbufp, MSGBUF_SIZE);
795
796 proc_linkup0(&proc0, &thread0);
797 /*
798 * Init mapping for kernel stack for proc 0
799 */
800 proc0kstack = (vm_offset_t)kstack;
801 thread0.td_kstack = proc0kstack;
802 thread0.td_kstack_pages = KSTACK_PAGES;
803
804 mutex_init();
805
806 /*
807 * Initialize the rest of proc 0's PCB.
808 *
809 * Set the kernel sp, reserving space for an (empty) trapframe,
810 * and make proc0's trapframe pointer point to it for sanity.
811 * Initialise proc0's backing store to start after u area.
812 */
813 cpu_thread_alloc(&thread0);
814 thread0.td_frame->tf_flags = FRAME_SYSCALL;
815 thread0.td_pcb->pcb_special.sp =
816 (u_int64_t)thread0.td_frame - 16;
817 thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
818
819 /*
820 * Initialize the virtual memory system.
821 */
822 pmap_bootstrap();
823
824 /*
825 * Initialize debuggers, and break into them if appropriate.
826 */
827 kdb_init();
828
829 #ifdef KDB
830 if (boothowto & RB_KDB)
831 kdb_enter_why(KDB_WHY_BOOTFLAGS,
832 "Boot flags requested debugger\n");
833 #endif
834
835 ia64_set_tpr(0);
836 ia64_srlz_d();
837
838 /*
839 * Save our current context so that we have a known (maybe even
840 * sane) context as the initial context for new threads that are
841 * forked from us. If any of those threads (including thread0)
842 * does something wrong, we may be lucky and return here where
843 * we're ready for them with a nice panic.
844 */
845 if (!savectx(thread0.td_pcb))
846 mi_startup();
847
848 /* We should not get here. */
849 panic("ia64_init: Whooaa there!");
850 /* NOTREACHED */
851 }
852
853 __volatile void *
854 ia64_ioport_address(u_int port)
855 {
856 uint64_t addr;
857
858 addr = (port > 0xffff) ? IA64_PHYS_TO_RR6((uint64_t)port) :
859 ia64_port_base | ((port & 0xfffc) << 10) | (port & 0xFFF);
860 return ((__volatile void *)addr);
861 }
862
863 uint64_t
864 ia64_get_hcdp(void)
865 {
866
867 return (bootinfo.bi_hcdp);
868 }
869
870 void
871 bzero(void *buf, size_t len)
872 {
873 caddr_t p = buf;
874
875 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
876 *p++ = 0;
877 len--;
878 }
879 while (len >= sizeof(u_long) * 8) {
880 *(u_long*) p = 0;
881 *((u_long*) p + 1) = 0;
882 *((u_long*) p + 2) = 0;
883 *((u_long*) p + 3) = 0;
884 len -= sizeof(u_long) * 8;
885 *((u_long*) p + 4) = 0;
886 *((u_long*) p + 5) = 0;
887 *((u_long*) p + 6) = 0;
888 *((u_long*) p + 7) = 0;
889 p += sizeof(u_long) * 8;
890 }
891 while (len >= sizeof(u_long)) {
892 *(u_long*) p = 0;
893 len -= sizeof(u_long);
894 p += sizeof(u_long);
895 }
896 while (len) {
897 *p++ = 0;
898 len--;
899 }
900 }
901
902 void
903 DELAY(int n)
904 {
905 u_int64_t start, end, now;
906
907 start = ia64_get_itc();
908 end = start + (itc_frequency * n) / 1000000;
909 /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
910 do {
911 now = ia64_get_itc();
912 } while (now < end || (now > start && end < start));
913 }
914
915 /*
916 * Send an interrupt (signal) to a process.
917 */
918 void
919 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
920 {
921 struct proc *p;
922 struct thread *td;
923 struct trapframe *tf;
924 struct sigacts *psp;
925 struct sigframe sf, *sfp;
926 u_int64_t sbs, sp;
927 int oonstack;
928 int sig;
929 u_long code;
930
931 td = curthread;
932 p = td->td_proc;
933 PROC_LOCK_ASSERT(p, MA_OWNED);
934 sig = ksi->ksi_signo;
935 code = ksi->ksi_code;
936 psp = p->p_sigacts;
937 mtx_assert(&psp->ps_mtx, MA_OWNED);
938 tf = td->td_frame;
939 sp = tf->tf_special.sp;
940 oonstack = sigonstack(sp);
941 sbs = 0;
942
943 /* save user context */
944 bzero(&sf, sizeof(struct sigframe));
945 sf.sf_uc.uc_sigmask = *mask;
946 sf.sf_uc.uc_stack = td->td_sigstk;
947 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
948 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
949
950 /*
951 * Allocate and validate space for the signal handler
952 * context. Note that if the stack is in P0 space, the
953 * call to grow() is a nop, and the useracc() check
954 * will fail if the process has not already allocated
955 * the space with a `brk'.
956 */
957 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
958 SIGISMEMBER(psp->ps_sigonstack, sig)) {
959 sbs = (u_int64_t)td->td_sigstk.ss_sp;
960 sbs = (sbs + 15) & ~15;
961 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
962 #if defined(COMPAT_43)
963 td->td_sigstk.ss_flags |= SS_ONSTACK;
964 #endif
965 } else
966 sfp = (struct sigframe *)sp;
967 sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
968
969 /* Fill in the siginfo structure for POSIX handlers. */
970 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
971 sf.sf_si = ksi->ksi_info;
972 sf.sf_si.si_signo = sig;
973 /*
974 * XXX this shouldn't be here after code in trap.c
975 * is fixed
976 */
977 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
978 code = (u_int64_t)&sfp->sf_si;
979 }
980
981 mtx_unlock(&psp->ps_mtx);
982 PROC_UNLOCK(p);
983
984 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
985
986 /* Copy the frame out to userland. */
987 if (copyout(&sf, sfp, sizeof(sf)) != 0) {
988 /*
989 * Process has trashed its stack; give it an illegal
990 * instruction to halt it in its tracks.
991 */
992 PROC_LOCK(p);
993 sigexit(td, SIGILL);
994 return;
995 }
996
997 if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
998 tf->tf_special.psr &= ~IA64_PSR_RI;
999 tf->tf_special.iip = ia64_get_k5() +
1000 ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
1001 } else
1002 tf->tf_special.iip = ia64_get_k5() +
1003 ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
1004
1005 /*
1006 * Setup the trapframe to return to the signal trampoline. We pass
1007 * information to the trampoline in the following registers:
1008 *
1009 * gp new backing store or NULL
1010 * r8 signal number
1011 * r9 signal code or siginfo pointer
1012 * r10 signal handler (function descriptor)
1013 */
1014 tf->tf_special.sp = (u_int64_t)sfp - 16;
1015 tf->tf_special.gp = sbs;
1016 tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
1017 tf->tf_special.ndirty = 0;
1018 tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
1019 tf->tf_scratch.gr8 = sig;
1020 tf->tf_scratch.gr9 = code;
1021 tf->tf_scratch.gr10 = (u_int64_t)catcher;
1022
1023 PROC_LOCK(p);
1024 mtx_lock(&psp->ps_mtx);
1025 }
1026
1027 /*
1028 * System call to cleanup state after a signal
1029 * has been taken. Reset signal mask and
1030 * stack state from context left by sendsig (above).
1031 * Return to previous pc and psl as specified by
1032 * context left by sendsig. Check carefully to
1033 * make sure that the user has not modified the
1034 * state to gain improper privileges.
1035 *
1036 * MPSAFE
1037 */
1038 int
1039 sigreturn(struct thread *td,
1040 struct sigreturn_args /* {
1041 ucontext_t *sigcntxp;
1042 } */ *uap)
1043 {
1044 ucontext_t uc;
1045 struct trapframe *tf;
1046 struct proc *p;
1047 struct pcb *pcb;
1048
1049 tf = td->td_frame;
1050 p = td->td_proc;
1051 pcb = td->td_pcb;
1052
1053 /*
1054 * Fetch the entire context structure at once for speed.
1055 * We don't use a normal argument to simplify RSE handling.
1056 */
1057 if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
1058 return (EFAULT);
1059
1060 set_mcontext(td, &uc.uc_mcontext);
1061
1062 PROC_LOCK(p);
1063 #if defined(COMPAT_43)
1064 if (sigonstack(tf->tf_special.sp))
1065 td->td_sigstk.ss_flags |= SS_ONSTACK;
1066 else
1067 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1068 #endif
1069 td->td_sigmask = uc.uc_sigmask;
1070 SIG_CANTMASK(td->td_sigmask);
1071 signotify(td);
1072 PROC_UNLOCK(p);
1073
1074 return (EJUSTRETURN);
1075 }
1076
1077 #ifdef COMPAT_FREEBSD4
1078 int
1079 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1080 {
1081
1082 return sigreturn(td, (struct sigreturn_args *)uap);
1083 }
1084 #endif
1085
1086 /*
1087 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1088 * we want to start a backtrace from the function that caused us to enter
1089 * the debugger. We have the context in the trapframe, but base the trace
1090 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1091 * enough for a backtrace.
1092 */
1093 void
1094 makectx(struct trapframe *tf, struct pcb *pcb)
1095 {
1096
1097 pcb->pcb_special = tf->tf_special;
1098 pcb->pcb_special.__spare = ~0UL; /* XXX see unwind.c */
1099 save_callee_saved(&pcb->pcb_preserved);
1100 save_callee_saved_fp(&pcb->pcb_preserved_fp);
1101 }
1102
1103 int
1104 ia64_flush_dirty(struct thread *td, struct _special *r)
1105 {
1106 struct iovec iov;
1107 struct uio uio;
1108 uint64_t bspst, kstk, rnat;
1109 int error, locked;
1110
1111 if (r->ndirty == 0)
1112 return (0);
1113
1114 kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
1115 if (td == curthread) {
1116 __asm __volatile("mov ar.rsc=0;;");
1117 __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst));
1118 /* Make sure we have all the user registers written out. */
1119 if (bspst - kstk < r->ndirty) {
1120 __asm __volatile("flushrs;;");
1121 __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst));
1122 }
1123 __asm __volatile("mov %0=ar.rnat;;" : "=r"(rnat));
1124 __asm __volatile("mov ar.rsc=3");
1125 error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
1126 kstk += r->ndirty;
1127 r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
1128 ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
1129 } else {
1130 locked = PROC_LOCKED(td->td_proc);
1131 if (!locked)
1132 PHOLD(td->td_proc);
1133 iov.iov_base = (void*)(uintptr_t)kstk;
1134 iov.iov_len = r->ndirty;
1135 uio.uio_iov = &iov;
1136 uio.uio_iovcnt = 1;
1137 uio.uio_offset = r->bspstore;
1138 uio.uio_resid = r->ndirty;
1139 uio.uio_segflg = UIO_SYSSPACE;
1140 uio.uio_rw = UIO_WRITE;
1141 uio.uio_td = td;
1142 error = proc_rwmem(td->td_proc, &uio);
1143 /*
1144 * XXX proc_rwmem() doesn't currently return ENOSPC,
1145 * so I think it can bogusly return 0. Neither do
1146 * we allow short writes.
1147 */
1148 if (uio.uio_resid != 0 && error == 0)
1149 error = ENOSPC;
1150 if (!locked)
1151 PRELE(td->td_proc);
1152 }
1153
1154 r->bspstore += r->ndirty;
1155 r->ndirty = 0;
1156 return (error);
1157 }
1158
1159 int
1160 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
1161 {
1162 struct trapframe *tf;
1163 int error;
1164
1165 tf = td->td_frame;
1166 bzero(mc, sizeof(*mc));
1167 mc->mc_special = tf->tf_special;
1168 error = ia64_flush_dirty(td, &mc->mc_special);
1169 if (tf->tf_flags & FRAME_SYSCALL) {
1170 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
1171 mc->mc_scratch = tf->tf_scratch;
1172 if (flags & GET_MC_CLEAR_RET) {
1173 mc->mc_scratch.gr8 = 0;
1174 mc->mc_scratch.gr9 = 0;
1175 mc->mc_scratch.gr10 = 0;
1176 mc->mc_scratch.gr11 = 0;
1177 }
1178 } else {
1179 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
1180 mc->mc_scratch = tf->tf_scratch;
1181 mc->mc_scratch_fp = tf->tf_scratch_fp;
1182 /*
1183 * XXX If the thread never used the high FP registers, we
1184 * probably shouldn't waste time saving them.
1185 */
1186 ia64_highfp_save(td);
1187 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
1188 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
1189 }
1190 save_callee_saved(&mc->mc_preserved);
1191 save_callee_saved_fp(&mc->mc_preserved_fp);
1192 return (error);
1193 }
1194
1195 int
1196 set_mcontext(struct thread *td, const mcontext_t *mc)
1197 {
1198 struct _special s;
1199 struct trapframe *tf;
1200 uint64_t psrmask;
1201
1202 tf = td->td_frame;
1203
1204 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1205 ("Whoa there! We have more than 8KB of dirty registers!"));
1206
1207 s = mc->mc_special;
1208 /*
1209 * Only copy the user mask and the restart instruction bit from
1210 * the new context.
1211 */
1212 psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1213 IA64_PSR_MFH | IA64_PSR_RI;
1214 s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
1215 /* We don't have any dirty registers of the new context. */
1216 s.ndirty = 0;
1217 if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
1218 /*
1219 * We can get an async context passed to us while we
1220 * entered the kernel through a syscall: sigreturn(2)
1221 * and kse_switchin(2) both take contexts that could
1222 * previously be the result of a trap or interrupt.
1223 * Hence, we cannot assert that the trapframe is not
1224 * a syscall frame, but we can assert that it's at
1225 * least an expected syscall.
1226 */
1227 if (tf->tf_flags & FRAME_SYSCALL) {
1228 KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn ||
1229 tf->tf_scratch.gr15 == SYS_kse_switchin, ("foo"));
1230 tf->tf_flags &= ~FRAME_SYSCALL;
1231 }
1232 tf->tf_scratch = mc->mc_scratch;
1233 tf->tf_scratch_fp = mc->mc_scratch_fp;
1234 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
1235 td->td_pcb->pcb_high_fp = mc->mc_high_fp;
1236 } else {
1237 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
1238 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
1239 s.cfm = tf->tf_special.cfm;
1240 s.iip = tf->tf_special.iip;
1241 tf->tf_scratch.gr15 = 0; /* Clear syscall nr. */
1242 } else
1243 tf->tf_scratch = mc->mc_scratch;
1244 }
1245 tf->tf_special = s;
1246 restore_callee_saved(&mc->mc_preserved);
1247 restore_callee_saved_fp(&mc->mc_preserved_fp);
1248
1249 if (mc->mc_flags & _MC_FLAGS_KSE_SET_MBOX)
1250 suword((caddr_t)mc->mc_special.ifa, mc->mc_special.isr);
1251
1252 return (0);
1253 }
1254
1255 /*
1256 * Clear registers on exec.
1257 */
1258 void
1259 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
1260 {
1261 struct trapframe *tf;
1262 uint64_t *ksttop, *kst;
1263
1264 tf = td->td_frame;
1265 ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
1266 (tf->tf_special.bspstore & 0x1ffUL));
1267
1268 /*
1269 * We can ignore up to 8KB of dirty registers by masking off the
1270 * lower 13 bits in exception_restore() or epc_syscall(). This
1271 * should be enough for a couple of years, but if there are more
1272 * than 8KB of dirty registers, we lose track of the bottom of
1273 * the kernel stack. The solution is to copy the active part of
1274 * the kernel stack down 1 page (or 2, but not more than that)
1275 * so that we always have less than 8KB of dirty registers.
1276 */
1277 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1278 ("Whoa there! We have more than 8KB of dirty registers!"));
1279
1280 bzero(&tf->tf_special, sizeof(tf->tf_special));
1281 if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */
1282 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1283 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1284 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1285 tf->tf_special.bspstore = IA64_BACKINGSTORE;
1286 /*
1287 * Copy the arguments onto the kernel register stack so that
1288 * they get loaded by the loadrs instruction. Skip over the
1289 * NaT collection points.
1290 */
1291 kst = ksttop - 1;
1292 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1293 *kst-- = 0;
1294 *kst-- = 0;
1295 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1296 *kst-- = 0;
1297 *kst-- = ps_strings;
1298 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1299 *kst-- = 0;
1300 *kst = stack;
1301 tf->tf_special.ndirty = (ksttop - kst) << 3;
1302 } else { /* epc syscalls (default). */
1303 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
1304 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
1305 /*
1306 * Write values for out0, out1 and out2 to the user's backing
1307 * store and arrange for them to be restored into the user's
1308 * initial register frame.
1309 * Assumes that (bspstore & 0x1f8) < 0x1e0.
1310 */
1311 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1312 suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
1313 suword((caddr_t)tf->tf_special.bspstore - 8, 0);
1314 }
1315
1316 tf->tf_special.iip = entry;
1317 tf->tf_special.sp = (stack & ~15) - 16;
1318 tf->tf_special.rsc = 0xf;
1319 tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1320 tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1321 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1322 IA64_PSR_CPL_USER;
1323 }
1324
1325 int
1326 ptrace_set_pc(struct thread *td, unsigned long addr)
1327 {
1328 uint64_t slot;
1329
1330 switch (addr & 0xFUL) {
1331 case 0:
1332 slot = IA64_PSR_RI_0;
1333 break;
1334 case 1:
1335 /* XXX we need to deal with MLX bundles here */
1336 slot = IA64_PSR_RI_1;
1337 break;
1338 case 2:
1339 slot = IA64_PSR_RI_2;
1340 break;
1341 default:
1342 return (EINVAL);
1343 }
1344
1345 td->td_frame->tf_special.iip = addr & ~0x0FULL;
1346 td->td_frame->tf_special.psr =
1347 (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1348 return (0);
1349 }
1350
1351 int
1352 ptrace_single_step(struct thread *td)
1353 {
1354 struct trapframe *tf;
1355
1356 /*
1357 * There's no way to set single stepping when we're leaving the
1358 * kernel through the EPC syscall path. The way we solve this is
1359 * by enabling the lower-privilege trap so that we re-enter the
1360 * kernel as soon as the privilege level changes. See trap.c for
1361 * how we proceed from there.
1362 */
1363 tf = td->td_frame;
1364 if (tf->tf_flags & FRAME_SYSCALL)
1365 tf->tf_special.psr |= IA64_PSR_LP;
1366 else
1367 tf->tf_special.psr |= IA64_PSR_SS;
1368 return (0);
1369 }
1370
1371 int
1372 ptrace_clear_single_step(struct thread *td)
1373 {
1374 struct trapframe *tf;
1375
1376 /*
1377 * Clear any and all status bits we may use to implement single
1378 * stepping.
1379 */
1380 tf = td->td_frame;
1381 tf->tf_special.psr &= ~IA64_PSR_SS;
1382 tf->tf_special.psr &= ~IA64_PSR_LP;
1383 tf->tf_special.psr &= ~IA64_PSR_TB;
1384 return (0);
1385 }
1386
1387 int
1388 fill_regs(struct thread *td, struct reg *regs)
1389 {
1390 struct trapframe *tf;
1391
1392 tf = td->td_frame;
1393 regs->r_special = tf->tf_special;
1394 regs->r_scratch = tf->tf_scratch;
1395 save_callee_saved(®s->r_preserved);
1396 return (0);
1397 }
1398
1399 int
1400 set_regs(struct thread *td, struct reg *regs)
1401 {
1402 struct trapframe *tf;
1403 int error;
1404
1405 tf = td->td_frame;
1406 error = ia64_flush_dirty(td, &tf->tf_special);
1407 if (!error) {
1408 tf->tf_special = regs->r_special;
1409 tf->tf_special.bspstore += tf->tf_special.ndirty;
1410 tf->tf_special.ndirty = 0;
1411 tf->tf_scratch = regs->r_scratch;
1412 restore_callee_saved(®s->r_preserved);
1413 }
1414 return (error);
1415 }
1416
1417 int
1418 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1419 {
1420
1421 return (ENOSYS);
1422 }
1423
1424 int
1425 set_dbregs(struct thread *td, struct dbreg *dbregs)
1426 {
1427
1428 return (ENOSYS);
1429 }
1430
1431 int
1432 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1433 {
1434 struct trapframe *frame = td->td_frame;
1435 struct pcb *pcb = td->td_pcb;
1436
1437 /* Save the high FP registers. */
1438 ia64_highfp_save(td);
1439
1440 fpregs->fpr_scratch = frame->tf_scratch_fp;
1441 save_callee_saved_fp(&fpregs->fpr_preserved);
1442 fpregs->fpr_high = pcb->pcb_high_fp;
1443 return (0);
1444 }
1445
1446 int
1447 set_fpregs(struct thread *td, struct fpreg *fpregs)
1448 {
1449 struct trapframe *frame = td->td_frame;
1450 struct pcb *pcb = td->td_pcb;
1451
1452 /* Throw away the high FP registers (should be redundant). */
1453 ia64_highfp_drop(td);
1454
1455 frame->tf_scratch_fp = fpregs->fpr_scratch;
1456 restore_callee_saved_fp(&fpregs->fpr_preserved);
1457 pcb->pcb_high_fp = fpregs->fpr_high;
1458 return (0);
1459 }
1460
1461 /*
1462 * High FP register functions.
1463 */
1464
1465 int
1466 ia64_highfp_drop(struct thread *td)
1467 {
1468 struct pcb *pcb;
1469 struct pcpu *cpu;
1470 struct thread *thr;
1471
1472 mtx_lock_spin(&td->td_md.md_highfp_mtx);
1473 pcb = td->td_pcb;
1474 cpu = pcb->pcb_fpcpu;
1475 if (cpu == NULL) {
1476 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1477 return (0);
1478 }
1479 pcb->pcb_fpcpu = NULL;
1480 thr = cpu->pc_fpcurthread;
1481 cpu->pc_fpcurthread = NULL;
1482 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1483
1484 /* Post-mortem sanity checking. */
1485 KASSERT(thr == td, ("Inconsistent high FP state"));
1486 return (1);
1487 }
1488
1489 int
1490 ia64_highfp_save(struct thread *td)
1491 {
1492 struct pcb *pcb;
1493 struct pcpu *cpu;
1494 struct thread *thr;
1495
1496 /* Don't save if the high FP registers weren't modified. */
1497 if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
1498 return (ia64_highfp_drop(td));
1499
1500 mtx_lock_spin(&td->td_md.md_highfp_mtx);
1501 pcb = td->td_pcb;
1502 cpu = pcb->pcb_fpcpu;
1503 if (cpu == NULL) {
1504 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1505 return (0);
1506 }
1507 #ifdef SMP
1508 if (td == curthread)
1509 sched_pin();
1510 if (cpu != pcpup) {
1511 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1512 ipi_send(cpu, IPI_HIGH_FP);
1513 if (td == curthread)
1514 sched_unpin();
1515 while (pcb->pcb_fpcpu == cpu)
1516 DELAY(100);
1517 return (1);
1518 } else {
1519 save_high_fp(&pcb->pcb_high_fp);
1520 if (td == curthread)
1521 sched_unpin();
1522 }
1523 #else
1524 save_high_fp(&pcb->pcb_high_fp);
1525 #endif
1526 pcb->pcb_fpcpu = NULL;
1527 thr = cpu->pc_fpcurthread;
1528 cpu->pc_fpcurthread = NULL;
1529 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1530
1531 /* Post-mortem sanity cxhecking. */
1532 KASSERT(thr == td, ("Inconsistent high FP state"));
1533 return (1);
1534 }
1535
1536 void
1537 ia64_invalidate_icache(vm_offset_t va, vm_offset_t sz)
1538 {
1539 vm_offset_t lim;
1540
1541 if (!ia64_inval_icache_needed)
1542 return;
1543
1544 lim = va + sz;
1545 while (va < lim) {
1546 __asm __volatile("fc.i %0" :: "r"(va));
1547 va += 32; /* XXX */
1548 }
1549 }
1550
1551 int
1552 sysbeep(int pitch, int period)
1553 {
1554 return (ENODEV);
1555 }
Cache object: 4a987a41caf12134ad116188bff8fac3
|