1 /*-
2 * Copyright (c) 2003,2004 Marcel Moolenaar
3 * Copyright (c) 2000,2001 Doug Rabson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/8.0/sys/ia64/ia64/machdep.c 196270 2009-08-16 02:12:13Z marcel $");
30
31 #include "opt_compat.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_msgbuf.h"
35 #include "opt_sched.h"
36
37 #include <sys/param.h>
38 #include <sys/proc.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/bus.h>
43 #include <sys/cons.h>
44 #include <sys/cpu.h>
45 #include <sys/eventhandler.h>
46 #include <sys/exec.h>
47 #include <sys/imgact.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/linker.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/msgbuf.h>
55 #include <sys/pcpu.h>
56 #include <sys/ptrace.h>
57 #include <sys/random.h>
58 #include <sys/reboot.h>
59 #include <sys/sched.h>
60 #include <sys/signalvar.h>
61 #include <sys/syscall.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysproto.h>
64 #include <sys/ucontext.h>
65 #include <sys/uio.h>
66 #include <sys/uuid.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69
70 #include <ddb/ddb.h>
71
72 #include <net/netisr.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_pager.h>
81
82 #include <machine/bootinfo.h>
83 #include <machine/clock.h>
84 #include <machine/cpu.h>
85 #include <machine/efi.h>
86 #include <machine/elf.h>
87 #include <machine/fpu.h>
88 #include <machine/mca.h>
89 #include <machine/md_var.h>
90 #include <machine/mutex.h>
91 #include <machine/pal.h>
92 #include <machine/pcb.h>
93 #include <machine/reg.h>
94 #include <machine/sal.h>
95 #include <machine/sigframe.h>
96 #ifdef SMP
97 #include <machine/smp.h>
98 #endif
99 #include <machine/unwind.h>
100 #include <machine/vmparam.h>
101
102 #include <i386/include/specialreg.h>
103
104 u_int64_t processor_frequency;
105 u_int64_t bus_frequency;
106 u_int64_t itc_frequency;
107 int cold = 1;
108
109 u_int64_t pa_bootinfo;
110 struct bootinfo bootinfo;
111
112 struct pcpu pcpu0;
113
114 extern u_int64_t kernel_text[], _end[];
115
116 extern u_int64_t ia64_gateway_page[];
117 extern u_int64_t break_sigtramp[];
118 extern u_int64_t epc_sigtramp[];
119
120 struct fpswa_iface *fpswa_iface;
121
122 u_int64_t ia64_pal_base;
123 u_int64_t ia64_port_base;
124
125 static int ia64_sync_icache_needed;
126
127 char machine[] = MACHINE;
128 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
129
130 static char cpu_model[64];
131 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
132 "The CPU model name");
133
134 static char cpu_family[64];
135 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
136 "The CPU family name");
137
138 #ifdef DDB
139 extern vm_offset_t ksym_start, ksym_end;
140 #endif
141
142 static void cpu_startup(void *);
143 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
144
145 struct msgbuf *msgbufp = NULL;
146
147 /* Other subsystems (e.g., ACPI) can hook this later. */
148 void (*cpu_idle_hook)(void) = NULL;
149
150 long Maxmem = 0;
151 long realmem = 0;
152
153 #define PHYSMAP_SIZE (2 * VM_PHYSSEG_MAX)
154
155 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
156
157 /* must be 2 less so 0 0 can signal end of chunks */
158 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
159
160 struct kva_md_info kmi;
161
162 #define Mhz 1000000L
163 #define Ghz (1000L*Mhz)
164
165 static void
166 identifycpu(void)
167 {
168 char vendor[17];
169 char *family_name, *model_name;
170 u_int64_t features, tmp;
171 int number, revision, model, family, archrev;
172
173 /*
174 * Assumes little-endian.
175 */
176 *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
177 *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
178 vendor[16] = '\0';
179
180 tmp = ia64_get_cpuid(3);
181 number = (tmp >> 0) & 0xff;
182 revision = (tmp >> 8) & 0xff;
183 model = (tmp >> 16) & 0xff;
184 family = (tmp >> 24) & 0xff;
185 archrev = (tmp >> 32) & 0xff;
186
187 family_name = model_name = "unknown";
188 switch (family) {
189 case 0x07:
190 family_name = "Itanium";
191 model_name = "Merced";
192 break;
193 case 0x1f:
194 family_name = "Itanium 2";
195 switch (model) {
196 case 0x00:
197 model_name = "McKinley";
198 break;
199 case 0x01:
200 /*
201 * Deerfield is a low-voltage variant based on the
202 * Madison core. We need circumstantial evidence
203 * (i.e. the clock frequency) to identify those.
204 * Allow for roughly 1% error margin.
205 */
206 tmp = processor_frequency >> 7;
207 if ((processor_frequency - tmp) < 1*Ghz &&
208 (processor_frequency + tmp) >= 1*Ghz)
209 model_name = "Deerfield";
210 else
211 model_name = "Madison";
212 break;
213 case 0x02:
214 model_name = "Madison II";
215 break;
216 }
217 break;
218 case 0x20:
219 ia64_sync_icache_needed = 1;
220
221 family_name = "Itanium 2";
222 switch (model) {
223 case 0x00:
224 model_name = "Montecito";
225 break;
226 }
227 break;
228 }
229 snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
230 snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
231
232 features = ia64_get_cpuid(4);
233
234 printf("CPU: %s (", model_name);
235 if (processor_frequency) {
236 printf("%ld.%02ld-Mhz ",
237 (processor_frequency + 4999) / Mhz,
238 ((processor_frequency + 4999) / (Mhz/100)) % 100);
239 }
240 printf("%s)\n", family_name);
241 printf(" Origin = \"%s\" Revision = %d\n", vendor, revision);
242 printf(" Features = 0x%b\n", (u_int32_t) features,
243 "\020"
244 "\001LB" /* long branch (brl) instruction. */
245 "\002SD" /* Spontaneous deferral. */
246 "\003AO" /* 16-byte atomic operations (ld, st, cmpxchg). */ );
247 }
248
249 static void
250 cpu_startup(dummy)
251 void *dummy;
252 {
253
254 /*
255 * Good {morning,afternoon,evening,night}.
256 */
257 identifycpu();
258
259 #ifdef PERFMON
260 perfmon_init();
261 #endif
262 printf("real memory = %ld (%ld MB)\n", ia64_ptob(Maxmem),
263 ia64_ptob(Maxmem) / 1048576);
264 realmem = Maxmem;
265
266 /*
267 * Display any holes after the first chunk of extended memory.
268 */
269 if (bootverbose) {
270 int indx;
271
272 printf("Physical memory chunk(s):\n");
273 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
274 long size1 = phys_avail[indx + 1] - phys_avail[indx];
275
276 printf("0x%08lx - 0x%08lx, %ld bytes (%ld pages)\n",
277 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
278 size1 >> PAGE_SHIFT);
279 }
280 }
281
282 vm_ksubmap_init(&kmi);
283
284 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
285 ptoa(cnt.v_free_count) / 1048576);
286
287 if (fpswa_iface == NULL)
288 printf("Warning: no FPSWA package supplied\n");
289 else
290 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
291 (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
292
293 /*
294 * Set up buffers, so they can be used to read disk labels.
295 */
296 bufinit();
297 vm_pager_bufferinit();
298
299 /*
300 * Traverse the MADT to discover IOSAPIC and Local SAPIC
301 * information.
302 */
303 ia64_probe_sapics();
304 ia64_mca_init();
305 }
306
307 void
308 cpu_boot(int howto)
309 {
310
311 efi_reset_system();
312 }
313
314 void
315 cpu_flush_dcache(void *ptr, size_t len)
316 {
317 vm_offset_t lim, va;
318
319 va = (uintptr_t)ptr & ~31;
320 lim = (uintptr_t)ptr + len;
321 while (va < lim) {
322 ia64_fc(va);
323 va += 32;
324 }
325
326 ia64_srlz_d();
327 }
328
329 /* Get current clock frequency for the given cpu id. */
330 int
331 cpu_est_clockrate(int cpu_id, uint64_t *rate)
332 {
333
334 if (pcpu_find(cpu_id) == NULL || rate == NULL)
335 return (EINVAL);
336 *rate = processor_frequency;
337 return (0);
338 }
339
340 void
341 cpu_halt()
342 {
343
344 efi_reset_system();
345 }
346
347 void
348 cpu_idle(int busy)
349 {
350 struct ia64_pal_result res;
351
352 if (cpu_idle_hook != NULL)
353 (*cpu_idle_hook)();
354 else
355 res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
356 }
357
358 int
359 cpu_idle_wakeup(int cpu)
360 {
361
362 return (0);
363 }
364
365 void
366 cpu_reset()
367 {
368
369 cpu_boot(0);
370 }
371
372 void
373 cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
374 {
375 struct pcb *oldpcb, *newpcb;
376
377 oldpcb = old->td_pcb;
378 #ifdef COMPAT_IA32
379 ia32_savectx(oldpcb);
380 #endif
381 if (PCPU_GET(fpcurthread) == old)
382 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
383 if (!savectx(oldpcb)) {
384 old->td_lock = mtx;
385 #if defined(SCHED_ULE) && defined(SMP)
386 /* td_lock is volatile */
387 while (new->td_lock == &blocked_lock)
388 ;
389 #endif
390 newpcb = new->td_pcb;
391 oldpcb->pcb_current_pmap =
392 pmap_switch(newpcb->pcb_current_pmap);
393 PCPU_SET(curthread, new);
394 #ifdef COMPAT_IA32
395 ia32_restorectx(newpcb);
396 #endif
397 if (PCPU_GET(fpcurthread) == new)
398 new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
399 restorectx(newpcb);
400 /* We should not get here. */
401 panic("cpu_switch: restorectx() returned");
402 /* NOTREACHED */
403 }
404 }
405
406 void
407 cpu_throw(struct thread *old __unused, struct thread *new)
408 {
409 struct pcb *newpcb;
410
411 newpcb = new->td_pcb;
412 (void)pmap_switch(newpcb->pcb_current_pmap);
413 PCPU_SET(curthread, new);
414 #ifdef COMPAT_IA32
415 ia32_restorectx(newpcb);
416 #endif
417 restorectx(newpcb);
418 /* We should not get here. */
419 panic("cpu_throw: restorectx() returned");
420 /* NOTREACHED */
421 }
422
423 void
424 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
425 {
426
427 /*
428 * Set pc_acpi_id to "uninitialized".
429 * See sys/dev/acpica/acpi_cpu.c
430 */
431 pcpu->pc_acpi_id = 0xffffffff;
432 }
433
434 void
435 spinlock_enter(void)
436 {
437 struct thread *td;
438
439 td = curthread;
440 if (td->td_md.md_spinlock_count == 0)
441 td->td_md.md_saved_intr = intr_disable();
442 td->td_md.md_spinlock_count++;
443 critical_enter();
444 }
445
446 void
447 spinlock_exit(void)
448 {
449 struct thread *td;
450
451 td = curthread;
452 critical_exit();
453 td->td_md.md_spinlock_count--;
454 if (td->td_md.md_spinlock_count == 0)
455 intr_restore(td->td_md.md_saved_intr);
456 }
457
458 void
459 map_vhpt(uintptr_t vhpt)
460 {
461 pt_entry_t pte;
462 uint64_t psr;
463
464 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
465 PTE_PL_KERN | PTE_AR_RW;
466 pte |= vhpt & PTE_PPN_MASK;
467
468 __asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
469 "r"(IA64_ID_PAGE_SHIFT<<2));
470
471 __asm __volatile("mov %0=psr" : "=r"(psr));
472 __asm __volatile("rsm psr.ic|psr.i");
473 ia64_srlz_i();
474 ia64_set_ifa(vhpt);
475 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
476 ia64_srlz_d();
477 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(2), "r"(pte));
478 __asm __volatile("mov psr.l=%0" :: "r" (psr));
479 ia64_srlz_i();
480 }
481
482 void
483 map_pal_code(void)
484 {
485 pt_entry_t pte;
486 uint64_t psr;
487
488 if (ia64_pal_base == 0)
489 return;
490
491 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
492 PTE_PL_KERN | PTE_AR_RWX;
493 pte |= ia64_pal_base & PTE_PPN_MASK;
494
495 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
496 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
497
498 __asm __volatile("mov %0=psr" : "=r"(psr));
499 __asm __volatile("rsm psr.ic|psr.i");
500 ia64_srlz_i();
501 ia64_set_ifa(IA64_PHYS_TO_RR7(ia64_pal_base));
502 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
503 ia64_srlz_d();
504 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(pte));
505 ia64_srlz_d();
506 __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
507 __asm __volatile("mov psr.l=%0" :: "r" (psr));
508 ia64_srlz_i();
509 }
510
511 void
512 map_gateway_page(void)
513 {
514 pt_entry_t pte;
515 uint64_t psr;
516
517 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
518 PTE_PL_KERN | PTE_AR_X_RX;
519 pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
520
521 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
522 "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
523
524 __asm __volatile("mov %0=psr" : "=r"(psr));
525 __asm __volatile("rsm psr.ic|psr.i");
526 ia64_srlz_i();
527 ia64_set_ifa(VM_MAX_ADDRESS);
528 ia64_set_itir(PAGE_SHIFT << 2);
529 ia64_srlz_d();
530 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
531 ia64_srlz_d();
532 __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(pte));
533 __asm __volatile("mov psr.l=%0" :: "r" (psr));
534 ia64_srlz_i();
535
536 /* Expose the mapping to userland in ar.k5 */
537 ia64_set_k5(VM_MAX_ADDRESS);
538 }
539
540 static void
541 calculate_frequencies(void)
542 {
543 struct ia64_sal_result sal;
544 struct ia64_pal_result pal;
545
546 sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
547 pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
548
549 if (sal.sal_status == 0 && pal.pal_status == 0) {
550 if (bootverbose) {
551 printf("Platform clock frequency %ld Hz\n",
552 sal.sal_result[0]);
553 printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
554 "ITC ratio %ld/%ld\n",
555 pal.pal_result[0] >> 32,
556 pal.pal_result[0] & ((1L << 32) - 1),
557 pal.pal_result[1] >> 32,
558 pal.pal_result[1] & ((1L << 32) - 1),
559 pal.pal_result[2] >> 32,
560 pal.pal_result[2] & ((1L << 32) - 1));
561 }
562 processor_frequency =
563 sal.sal_result[0] * (pal.pal_result[0] >> 32)
564 / (pal.pal_result[0] & ((1L << 32) - 1));
565 bus_frequency =
566 sal.sal_result[0] * (pal.pal_result[1] >> 32)
567 / (pal.pal_result[1] & ((1L << 32) - 1));
568 itc_frequency =
569 sal.sal_result[0] * (pal.pal_result[2] >> 32)
570 / (pal.pal_result[2] & ((1L << 32) - 1));
571 }
572 }
573
574 struct ia64_init_return
575 ia64_init(void)
576 {
577 struct ia64_init_return ret;
578 int phys_avail_cnt;
579 vm_offset_t kernstart, kernend;
580 vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
581 char *p;
582 struct efi_md *md;
583 int metadata_missing;
584
585 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
586
587 /*
588 * TODO: Disable interrupts, floating point etc.
589 * Maybe flush cache and tlb
590 */
591 ia64_set_fpsr(IA64_FPSR_DEFAULT);
592
593 /*
594 * TODO: Get critical system information (if possible, from the
595 * information provided by the boot program).
596 */
597
598 /*
599 * pa_bootinfo is the physical address of the bootinfo block as
600 * passed to us by the loader and set in locore.s.
601 */
602 bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
603
604 if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
605 bzero(&bootinfo, sizeof(bootinfo));
606 bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
607 }
608
609 /*
610 * Look for the I/O ports first - we need them for console
611 * probing.
612 */
613 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
614 switch (md->md_type) {
615 case EFI_MD_TYPE_IOPORT:
616 ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys);
617 break;
618 case EFI_MD_TYPE_PALCODE:
619 ia64_pal_base = md->md_phys;
620 break;
621 }
622 }
623
624 metadata_missing = 0;
625 if (bootinfo.bi_modulep)
626 preload_metadata = (caddr_t)bootinfo.bi_modulep;
627 else
628 metadata_missing = 1;
629
630 if (envmode == 0 && bootinfo.bi_envp)
631 kern_envp = (caddr_t)bootinfo.bi_envp;
632 else
633 kern_envp = static_env;
634
635 /*
636 * Look at arguments passed to us and compute boothowto.
637 */
638 boothowto = bootinfo.bi_boothowto;
639
640 /*
641 * Catch case of boot_verbose set in environment.
642 */
643 if ((p = getenv("boot_verbose")) != NULL) {
644 if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
645 boothowto |= RB_VERBOSE;
646 }
647 freeenv(p);
648 }
649
650 if (boothowto & RB_VERBOSE)
651 bootverbose = 1;
652
653 /*
654 * Find the beginning and end of the kernel.
655 */
656 kernstart = trunc_page(kernel_text);
657 #ifdef DDB
658 ksym_start = bootinfo.bi_symtab;
659 ksym_end = bootinfo.bi_esymtab;
660 kernend = (vm_offset_t)round_page(ksym_end);
661 #else
662 kernend = (vm_offset_t)round_page(_end);
663 #endif
664 /* But if the bootstrap tells us otherwise, believe it! */
665 if (bootinfo.bi_kernend)
666 kernend = round_page(bootinfo.bi_kernend);
667
668 /*
669 * Setup the PCPU data for the bootstrap processor. It is needed
670 * by printf(). Also, since printf() has critical sections, we
671 * need to initialize at least pc_curthread.
672 */
673 pcpup = &pcpu0;
674 ia64_set_k4((u_int64_t)pcpup);
675 pcpu_init(pcpup, 0, sizeof(pcpu0));
676 dpcpu_init((void *)kernend, 0);
677 kernend += DPCPU_SIZE;
678 PCPU_SET(curthread, &thread0);
679
680 /*
681 * Initialize the console before we print anything out.
682 */
683 cninit();
684
685 /* OUTPUT NOW ALLOWED */
686
687 if (ia64_pal_base != 0) {
688 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
689 /*
690 * We use a TR to map the first 256M of memory - this might
691 * cover the palcode too.
692 */
693 if (ia64_pal_base == 0)
694 printf("PAL code mapped by the kernel's TR\n");
695 } else
696 printf("PAL code not found\n");
697
698 /*
699 * Wire things up so we can call the firmware.
700 */
701 map_pal_code();
702 efi_boot_minimal(bootinfo.bi_systab);
703 ia64_sal_init();
704 calculate_frequencies();
705
706 if (metadata_missing)
707 printf("WARNING: loader(8) metadata is missing!\n");
708
709 /* Get FPSWA interface */
710 fpswa_iface = (bootinfo.bi_fpswa == 0) ? NULL :
711 (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
712
713 /* Init basic tunables, including hz */
714 init_param1();
715
716 p = getenv("kernelname");
717 if (p) {
718 strncpy(kernelname, p, sizeof(kernelname) - 1);
719 freeenv(p);
720 }
721
722 kernstartpfn = atop(IA64_RR_MASK(kernstart));
723 kernendpfn = atop(IA64_RR_MASK(kernend));
724
725 /*
726 * Size the memory regions and load phys_avail[] with the results.
727 */
728
729 /*
730 * Find out how much memory is available, by looking at
731 * the memory descriptors.
732 */
733
734 #ifdef DEBUG_MD
735 printf("Memory descriptor count: %d\n", mdcount);
736 #endif
737
738 phys_avail_cnt = 0;
739 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
740 #ifdef DEBUG_MD
741 printf("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md,
742 md->md_type, md->md_phys, md->md_pages);
743 #endif
744
745 pfn0 = ia64_btop(round_page(md->md_phys));
746 pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096));
747 if (pfn1 <= pfn0)
748 continue;
749
750 if (md->md_type != EFI_MD_TYPE_FREE)
751 continue;
752
753 /*
754 * We have a memory descriptor that describes conventional
755 * memory that is for general use. We must determine if the
756 * loader has put the kernel in this region.
757 */
758 physmem += (pfn1 - pfn0);
759 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
760 /*
761 * Must compute the location of the kernel
762 * within the segment.
763 */
764 #ifdef DEBUG_MD
765 printf("Descriptor %p contains kernel\n", mp);
766 #endif
767 if (pfn0 < kernstartpfn) {
768 /*
769 * There is a chunk before the kernel.
770 */
771 #ifdef DEBUG_MD
772 printf("Loading chunk before kernel: "
773 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
774 #endif
775 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
776 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
777 phys_avail_cnt += 2;
778 }
779 if (kernendpfn < pfn1) {
780 /*
781 * There is a chunk after the kernel.
782 */
783 #ifdef DEBUG_MD
784 printf("Loading chunk after kernel: "
785 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
786 #endif
787 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
788 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
789 phys_avail_cnt += 2;
790 }
791 } else {
792 /*
793 * Just load this cluster as one chunk.
794 */
795 #ifdef DEBUG_MD
796 printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
797 pfn0, pfn1);
798 #endif
799 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
800 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
801 phys_avail_cnt += 2;
802
803 }
804 }
805 phys_avail[phys_avail_cnt] = 0;
806
807 Maxmem = physmem;
808 init_param2(physmem);
809
810 /*
811 * Initialize error message buffer (at end of core).
812 */
813 msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
814 msgbufinit(msgbufp, MSGBUF_SIZE);
815
816 proc_linkup0(&proc0, &thread0);
817 /*
818 * Init mapping for kernel stack for proc 0
819 */
820 thread0.td_kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE);
821 thread0.td_kstack_pages = KSTACK_PAGES;
822
823 mutex_init();
824
825 /*
826 * Initialize the rest of proc 0's PCB.
827 *
828 * Set the kernel sp, reserving space for an (empty) trapframe,
829 * and make proc0's trapframe pointer point to it for sanity.
830 * Initialise proc0's backing store to start after u area.
831 */
832 cpu_thread_alloc(&thread0);
833 thread0.td_frame->tf_flags = FRAME_SYSCALL;
834 thread0.td_pcb->pcb_special.sp =
835 (u_int64_t)thread0.td_frame - 16;
836 thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
837
838 /*
839 * Initialize the virtual memory system.
840 */
841 pmap_bootstrap();
842
843 /*
844 * Initialize debuggers, and break into them if appropriate.
845 */
846 kdb_init();
847
848 #ifdef KDB
849 if (boothowto & RB_KDB)
850 kdb_enter(KDB_WHY_BOOTFLAGS,
851 "Boot flags requested debugger\n");
852 #endif
853
854 ia64_set_tpr(0);
855 ia64_srlz_d();
856
857 ret.bspstore = thread0.td_pcb->pcb_special.bspstore;
858 ret.sp = thread0.td_pcb->pcb_special.sp;
859 return (ret);
860 }
861
862 __volatile void *
863 ia64_ioport_address(u_int port)
864 {
865 uint64_t addr;
866
867 addr = (port > 0xffff) ? IA64_PHYS_TO_RR6((uint64_t)port) :
868 ia64_port_base | ((port & 0xfffc) << 10) | (port & 0xFFF);
869 return ((__volatile void *)addr);
870 }
871
872 uint64_t
873 ia64_get_hcdp(void)
874 {
875
876 return (bootinfo.bi_hcdp);
877 }
878
879 void
880 bzero(void *buf, size_t len)
881 {
882 caddr_t p = buf;
883
884 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
885 *p++ = 0;
886 len--;
887 }
888 while (len >= sizeof(u_long) * 8) {
889 *(u_long*) p = 0;
890 *((u_long*) p + 1) = 0;
891 *((u_long*) p + 2) = 0;
892 *((u_long*) p + 3) = 0;
893 len -= sizeof(u_long) * 8;
894 *((u_long*) p + 4) = 0;
895 *((u_long*) p + 5) = 0;
896 *((u_long*) p + 6) = 0;
897 *((u_long*) p + 7) = 0;
898 p += sizeof(u_long) * 8;
899 }
900 while (len >= sizeof(u_long)) {
901 *(u_long*) p = 0;
902 len -= sizeof(u_long);
903 p += sizeof(u_long);
904 }
905 while (len) {
906 *p++ = 0;
907 len--;
908 }
909 }
910
911 void
912 DELAY(int n)
913 {
914 u_int64_t start, end, now;
915
916 sched_pin();
917
918 start = ia64_get_itc();
919 end = start + (itc_frequency * n) / 1000000;
920 /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
921 do {
922 now = ia64_get_itc();
923 } while (now < end || (now > start && end < start));
924
925 sched_unpin();
926 }
927
928 /*
929 * Send an interrupt (signal) to a process.
930 */
931 void
932 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
933 {
934 struct proc *p;
935 struct thread *td;
936 struct trapframe *tf;
937 struct sigacts *psp;
938 struct sigframe sf, *sfp;
939 u_int64_t sbs, sp;
940 int oonstack;
941 int sig;
942 u_long code;
943
944 td = curthread;
945 p = td->td_proc;
946 PROC_LOCK_ASSERT(p, MA_OWNED);
947 sig = ksi->ksi_signo;
948 code = ksi->ksi_code;
949 psp = p->p_sigacts;
950 mtx_assert(&psp->ps_mtx, MA_OWNED);
951 tf = td->td_frame;
952 sp = tf->tf_special.sp;
953 oonstack = sigonstack(sp);
954 sbs = 0;
955
956 /* save user context */
957 bzero(&sf, sizeof(struct sigframe));
958 sf.sf_uc.uc_sigmask = *mask;
959 sf.sf_uc.uc_stack = td->td_sigstk;
960 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
961 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
962
963 /*
964 * Allocate and validate space for the signal handler
965 * context. Note that if the stack is in P0 space, the
966 * call to grow() is a nop, and the useracc() check
967 * will fail if the process has not already allocated
968 * the space with a `brk'.
969 */
970 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
971 SIGISMEMBER(psp->ps_sigonstack, sig)) {
972 sbs = (u_int64_t)td->td_sigstk.ss_sp;
973 sbs = (sbs + 15) & ~15;
974 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
975 #if defined(COMPAT_43)
976 td->td_sigstk.ss_flags |= SS_ONSTACK;
977 #endif
978 } else
979 sfp = (struct sigframe *)sp;
980 sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
981
982 /* Fill in the siginfo structure for POSIX handlers. */
983 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
984 sf.sf_si = ksi->ksi_info;
985 sf.sf_si.si_signo = sig;
986 /*
987 * XXX this shouldn't be here after code in trap.c
988 * is fixed
989 */
990 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
991 code = (u_int64_t)&sfp->sf_si;
992 }
993
994 mtx_unlock(&psp->ps_mtx);
995 PROC_UNLOCK(p);
996
997 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
998
999 /* Copy the frame out to userland. */
1000 if (copyout(&sf, sfp, sizeof(sf)) != 0) {
1001 /*
1002 * Process has trashed its stack; give it an illegal
1003 * instruction to halt it in its tracks.
1004 */
1005 PROC_LOCK(p);
1006 sigexit(td, SIGILL);
1007 return;
1008 }
1009
1010 if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
1011 tf->tf_special.psr &= ~IA64_PSR_RI;
1012 tf->tf_special.iip = ia64_get_k5() +
1013 ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
1014 } else
1015 tf->tf_special.iip = ia64_get_k5() +
1016 ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
1017
1018 /*
1019 * Setup the trapframe to return to the signal trampoline. We pass
1020 * information to the trampoline in the following registers:
1021 *
1022 * gp new backing store or NULL
1023 * r8 signal number
1024 * r9 signal code or siginfo pointer
1025 * r10 signal handler (function descriptor)
1026 */
1027 tf->tf_special.sp = (u_int64_t)sfp - 16;
1028 tf->tf_special.gp = sbs;
1029 tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
1030 tf->tf_special.ndirty = 0;
1031 tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
1032 tf->tf_scratch.gr8 = sig;
1033 tf->tf_scratch.gr9 = code;
1034 tf->tf_scratch.gr10 = (u_int64_t)catcher;
1035
1036 PROC_LOCK(p);
1037 mtx_lock(&psp->ps_mtx);
1038 }
1039
1040 /*
1041 * System call to cleanup state after a signal
1042 * has been taken. Reset signal mask and
1043 * stack state from context left by sendsig (above).
1044 * Return to previous pc and psl as specified by
1045 * context left by sendsig. Check carefully to
1046 * make sure that the user has not modified the
1047 * state to gain improper privileges.
1048 *
1049 * MPSAFE
1050 */
1051 int
1052 sigreturn(struct thread *td,
1053 struct sigreturn_args /* {
1054 ucontext_t *sigcntxp;
1055 } */ *uap)
1056 {
1057 ucontext_t uc;
1058 struct trapframe *tf;
1059 struct proc *p;
1060 struct pcb *pcb;
1061
1062 tf = td->td_frame;
1063 p = td->td_proc;
1064 pcb = td->td_pcb;
1065
1066 /*
1067 * Fetch the entire context structure at once for speed.
1068 * We don't use a normal argument to simplify RSE handling.
1069 */
1070 if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
1071 return (EFAULT);
1072
1073 set_mcontext(td, &uc.uc_mcontext);
1074
1075 PROC_LOCK(p);
1076 #if defined(COMPAT_43)
1077 if (sigonstack(tf->tf_special.sp))
1078 td->td_sigstk.ss_flags |= SS_ONSTACK;
1079 else
1080 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1081 #endif
1082 td->td_sigmask = uc.uc_sigmask;
1083 SIG_CANTMASK(td->td_sigmask);
1084 signotify(td);
1085 PROC_UNLOCK(p);
1086
1087 return (EJUSTRETURN);
1088 }
1089
1090 #ifdef COMPAT_FREEBSD4
1091 int
1092 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1093 {
1094
1095 return sigreturn(td, (struct sigreturn_args *)uap);
1096 }
1097 #endif
1098
1099 /*
1100 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1101 * we want to start a backtrace from the function that caused us to enter
1102 * the debugger. We have the context in the trapframe, but base the trace
1103 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1104 * enough for a backtrace.
1105 */
1106 void
1107 makectx(struct trapframe *tf, struct pcb *pcb)
1108 {
1109
1110 pcb->pcb_special = tf->tf_special;
1111 pcb->pcb_special.__spare = ~0UL; /* XXX see unwind.c */
1112 save_callee_saved(&pcb->pcb_preserved);
1113 save_callee_saved_fp(&pcb->pcb_preserved_fp);
1114 }
1115
1116 int
1117 ia64_flush_dirty(struct thread *td, struct _special *r)
1118 {
1119 struct iovec iov;
1120 struct uio uio;
1121 uint64_t bspst, kstk, rnat;
1122 int error, locked;
1123
1124 if (r->ndirty == 0)
1125 return (0);
1126
1127 kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
1128 if (td == curthread) {
1129 __asm __volatile("mov ar.rsc=0;;");
1130 __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst));
1131 /* Make sure we have all the user registers written out. */
1132 if (bspst - kstk < r->ndirty) {
1133 __asm __volatile("flushrs;;");
1134 __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst));
1135 }
1136 __asm __volatile("mov %0=ar.rnat;;" : "=r"(rnat));
1137 __asm __volatile("mov ar.rsc=3");
1138 error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
1139 kstk += r->ndirty;
1140 r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
1141 ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
1142 } else {
1143 locked = PROC_LOCKED(td->td_proc);
1144 if (!locked)
1145 PHOLD(td->td_proc);
1146 iov.iov_base = (void*)(uintptr_t)kstk;
1147 iov.iov_len = r->ndirty;
1148 uio.uio_iov = &iov;
1149 uio.uio_iovcnt = 1;
1150 uio.uio_offset = r->bspstore;
1151 uio.uio_resid = r->ndirty;
1152 uio.uio_segflg = UIO_SYSSPACE;
1153 uio.uio_rw = UIO_WRITE;
1154 uio.uio_td = td;
1155 error = proc_rwmem(td->td_proc, &uio);
1156 /*
1157 * XXX proc_rwmem() doesn't currently return ENOSPC,
1158 * so I think it can bogusly return 0. Neither do
1159 * we allow short writes.
1160 */
1161 if (uio.uio_resid != 0 && error == 0)
1162 error = ENOSPC;
1163 if (!locked)
1164 PRELE(td->td_proc);
1165 }
1166
1167 r->bspstore += r->ndirty;
1168 r->ndirty = 0;
1169 return (error);
1170 }
1171
1172 int
1173 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
1174 {
1175 struct trapframe *tf;
1176 int error;
1177
1178 tf = td->td_frame;
1179 bzero(mc, sizeof(*mc));
1180 mc->mc_special = tf->tf_special;
1181 error = ia64_flush_dirty(td, &mc->mc_special);
1182 if (tf->tf_flags & FRAME_SYSCALL) {
1183 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
1184 mc->mc_scratch = tf->tf_scratch;
1185 if (flags & GET_MC_CLEAR_RET) {
1186 mc->mc_scratch.gr8 = 0;
1187 mc->mc_scratch.gr9 = 0;
1188 mc->mc_scratch.gr10 = 0;
1189 mc->mc_scratch.gr11 = 0;
1190 }
1191 } else {
1192 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
1193 mc->mc_scratch = tf->tf_scratch;
1194 mc->mc_scratch_fp = tf->tf_scratch_fp;
1195 /*
1196 * XXX If the thread never used the high FP registers, we
1197 * probably shouldn't waste time saving them.
1198 */
1199 ia64_highfp_save(td);
1200 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
1201 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
1202 }
1203 save_callee_saved(&mc->mc_preserved);
1204 save_callee_saved_fp(&mc->mc_preserved_fp);
1205 return (error);
1206 }
1207
1208 int
1209 set_mcontext(struct thread *td, const mcontext_t *mc)
1210 {
1211 struct _special s;
1212 struct trapframe *tf;
1213 uint64_t psrmask;
1214
1215 tf = td->td_frame;
1216
1217 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1218 ("Whoa there! We have more than 8KB of dirty registers!"));
1219
1220 s = mc->mc_special;
1221 /*
1222 * Only copy the user mask and the restart instruction bit from
1223 * the new context.
1224 */
1225 psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1226 IA64_PSR_MFH | IA64_PSR_RI;
1227 s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
1228 /* We don't have any dirty registers of the new context. */
1229 s.ndirty = 0;
1230 if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
1231 /*
1232 * We can get an async context passed to us while we
1233 * entered the kernel through a syscall: sigreturn(2)
1234 * takes contexts that could previously be the result of
1235 * a trap or interrupt.
1236 * Hence, we cannot assert that the trapframe is not
1237 * a syscall frame, but we can assert that it's at
1238 * least an expected syscall.
1239 */
1240 if (tf->tf_flags & FRAME_SYSCALL) {
1241 KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn, ("foo"));
1242 tf->tf_flags &= ~FRAME_SYSCALL;
1243 }
1244 tf->tf_scratch = mc->mc_scratch;
1245 tf->tf_scratch_fp = mc->mc_scratch_fp;
1246 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
1247 td->td_pcb->pcb_high_fp = mc->mc_high_fp;
1248 } else {
1249 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
1250 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
1251 s.cfm = tf->tf_special.cfm;
1252 s.iip = tf->tf_special.iip;
1253 tf->tf_scratch.gr15 = 0; /* Clear syscall nr. */
1254 } else
1255 tf->tf_scratch = mc->mc_scratch;
1256 }
1257 tf->tf_special = s;
1258 restore_callee_saved(&mc->mc_preserved);
1259 restore_callee_saved_fp(&mc->mc_preserved_fp);
1260
1261 return (0);
1262 }
1263
1264 /*
1265 * Clear registers on exec.
1266 */
1267 void
1268 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
1269 {
1270 struct trapframe *tf;
1271 uint64_t *ksttop, *kst;
1272
1273 tf = td->td_frame;
1274 ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
1275 (tf->tf_special.bspstore & 0x1ffUL));
1276
1277 /*
1278 * We can ignore up to 8KB of dirty registers by masking off the
1279 * lower 13 bits in exception_restore() or epc_syscall(). This
1280 * should be enough for a couple of years, but if there are more
1281 * than 8KB of dirty registers, we lose track of the bottom of
1282 * the kernel stack. The solution is to copy the active part of
1283 * the kernel stack down 1 page (or 2, but not more than that)
1284 * so that we always have less than 8KB of dirty registers.
1285 */
1286 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1287 ("Whoa there! We have more than 8KB of dirty registers!"));
1288
1289 bzero(&tf->tf_special, sizeof(tf->tf_special));
1290 if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */
1291 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1292 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1293 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1294 tf->tf_special.bspstore = IA64_BACKINGSTORE;
1295 /*
1296 * Copy the arguments onto the kernel register stack so that
1297 * they get loaded by the loadrs instruction. Skip over the
1298 * NaT collection points.
1299 */
1300 kst = ksttop - 1;
1301 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1302 *kst-- = 0;
1303 *kst-- = 0;
1304 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1305 *kst-- = 0;
1306 *kst-- = ps_strings;
1307 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1308 *kst-- = 0;
1309 *kst = stack;
1310 tf->tf_special.ndirty = (ksttop - kst) << 3;
1311 } else { /* epc syscalls (default). */
1312 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
1313 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
1314 /*
1315 * Write values for out0, out1 and out2 to the user's backing
1316 * store and arrange for them to be restored into the user's
1317 * initial register frame.
1318 * Assumes that (bspstore & 0x1f8) < 0x1e0.
1319 */
1320 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1321 suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
1322 suword((caddr_t)tf->tf_special.bspstore - 8, 0);
1323 }
1324
1325 tf->tf_special.iip = entry;
1326 tf->tf_special.sp = (stack & ~15) - 16;
1327 tf->tf_special.rsc = 0xf;
1328 tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1329 tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1330 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1331 IA64_PSR_CPL_USER;
1332 }
1333
1334 int
1335 ptrace_set_pc(struct thread *td, unsigned long addr)
1336 {
1337 uint64_t slot;
1338
1339 switch (addr & 0xFUL) {
1340 case 0:
1341 slot = IA64_PSR_RI_0;
1342 break;
1343 case 1:
1344 /* XXX we need to deal with MLX bundles here */
1345 slot = IA64_PSR_RI_1;
1346 break;
1347 case 2:
1348 slot = IA64_PSR_RI_2;
1349 break;
1350 default:
1351 return (EINVAL);
1352 }
1353
1354 td->td_frame->tf_special.iip = addr & ~0x0FULL;
1355 td->td_frame->tf_special.psr =
1356 (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1357 return (0);
1358 }
1359
1360 int
1361 ptrace_single_step(struct thread *td)
1362 {
1363 struct trapframe *tf;
1364
1365 /*
1366 * There's no way to set single stepping when we're leaving the
1367 * kernel through the EPC syscall path. The way we solve this is
1368 * by enabling the lower-privilege trap so that we re-enter the
1369 * kernel as soon as the privilege level changes. See trap.c for
1370 * how we proceed from there.
1371 */
1372 tf = td->td_frame;
1373 if (tf->tf_flags & FRAME_SYSCALL)
1374 tf->tf_special.psr |= IA64_PSR_LP;
1375 else
1376 tf->tf_special.psr |= IA64_PSR_SS;
1377 return (0);
1378 }
1379
1380 int
1381 ptrace_clear_single_step(struct thread *td)
1382 {
1383 struct trapframe *tf;
1384
1385 /*
1386 * Clear any and all status bits we may use to implement single
1387 * stepping.
1388 */
1389 tf = td->td_frame;
1390 tf->tf_special.psr &= ~IA64_PSR_SS;
1391 tf->tf_special.psr &= ~IA64_PSR_LP;
1392 tf->tf_special.psr &= ~IA64_PSR_TB;
1393 return (0);
1394 }
1395
1396 int
1397 fill_regs(struct thread *td, struct reg *regs)
1398 {
1399 struct trapframe *tf;
1400
1401 tf = td->td_frame;
1402 regs->r_special = tf->tf_special;
1403 regs->r_scratch = tf->tf_scratch;
1404 save_callee_saved(®s->r_preserved);
1405 return (0);
1406 }
1407
1408 int
1409 set_regs(struct thread *td, struct reg *regs)
1410 {
1411 struct trapframe *tf;
1412 int error;
1413
1414 tf = td->td_frame;
1415 error = ia64_flush_dirty(td, &tf->tf_special);
1416 if (!error) {
1417 tf->tf_special = regs->r_special;
1418 tf->tf_special.bspstore += tf->tf_special.ndirty;
1419 tf->tf_special.ndirty = 0;
1420 tf->tf_scratch = regs->r_scratch;
1421 restore_callee_saved(®s->r_preserved);
1422 }
1423 return (error);
1424 }
1425
1426 int
1427 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1428 {
1429
1430 return (ENOSYS);
1431 }
1432
1433 int
1434 set_dbregs(struct thread *td, struct dbreg *dbregs)
1435 {
1436
1437 return (ENOSYS);
1438 }
1439
1440 int
1441 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1442 {
1443 struct trapframe *frame = td->td_frame;
1444 struct pcb *pcb = td->td_pcb;
1445
1446 /* Save the high FP registers. */
1447 ia64_highfp_save(td);
1448
1449 fpregs->fpr_scratch = frame->tf_scratch_fp;
1450 save_callee_saved_fp(&fpregs->fpr_preserved);
1451 fpregs->fpr_high = pcb->pcb_high_fp;
1452 return (0);
1453 }
1454
1455 int
1456 set_fpregs(struct thread *td, struct fpreg *fpregs)
1457 {
1458 struct trapframe *frame = td->td_frame;
1459 struct pcb *pcb = td->td_pcb;
1460
1461 /* Throw away the high FP registers (should be redundant). */
1462 ia64_highfp_drop(td);
1463
1464 frame->tf_scratch_fp = fpregs->fpr_scratch;
1465 restore_callee_saved_fp(&fpregs->fpr_preserved);
1466 pcb->pcb_high_fp = fpregs->fpr_high;
1467 return (0);
1468 }
1469
1470 /*
1471 * High FP register functions.
1472 */
1473
1474 int
1475 ia64_highfp_drop(struct thread *td)
1476 {
1477 struct pcb *pcb;
1478 struct pcpu *cpu;
1479 struct thread *thr;
1480
1481 mtx_lock_spin(&td->td_md.md_highfp_mtx);
1482 pcb = td->td_pcb;
1483 cpu = pcb->pcb_fpcpu;
1484 if (cpu == NULL) {
1485 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1486 return (0);
1487 }
1488 pcb->pcb_fpcpu = NULL;
1489 thr = cpu->pc_fpcurthread;
1490 cpu->pc_fpcurthread = NULL;
1491 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1492
1493 /* Post-mortem sanity checking. */
1494 KASSERT(thr == td, ("Inconsistent high FP state"));
1495 return (1);
1496 }
1497
1498 int
1499 ia64_highfp_save(struct thread *td)
1500 {
1501 struct pcb *pcb;
1502 struct pcpu *cpu;
1503 struct thread *thr;
1504
1505 /* Don't save if the high FP registers weren't modified. */
1506 if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
1507 return (ia64_highfp_drop(td));
1508
1509 mtx_lock_spin(&td->td_md.md_highfp_mtx);
1510 pcb = td->td_pcb;
1511 cpu = pcb->pcb_fpcpu;
1512 if (cpu == NULL) {
1513 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1514 return (0);
1515 }
1516 #ifdef SMP
1517 if (td == curthread)
1518 sched_pin();
1519 if (cpu != pcpup) {
1520 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1521 ipi_send(cpu, IPI_HIGH_FP);
1522 if (td == curthread)
1523 sched_unpin();
1524 while (pcb->pcb_fpcpu == cpu)
1525 DELAY(100);
1526 return (1);
1527 } else {
1528 save_high_fp(&pcb->pcb_high_fp);
1529 if (td == curthread)
1530 sched_unpin();
1531 }
1532 #else
1533 save_high_fp(&pcb->pcb_high_fp);
1534 #endif
1535 pcb->pcb_fpcpu = NULL;
1536 thr = cpu->pc_fpcurthread;
1537 cpu->pc_fpcurthread = NULL;
1538 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1539
1540 /* Post-mortem sanity cxhecking. */
1541 KASSERT(thr == td, ("Inconsistent high FP state"));
1542 return (1);
1543 }
1544
1545 void
1546 ia64_sync_icache(vm_offset_t va, vm_offset_t sz)
1547 {
1548 vm_offset_t lim;
1549
1550 if (!ia64_sync_icache_needed)
1551 return;
1552
1553 lim = va + sz;
1554 while (va < lim) {
1555 ia64_fc_i(va);
1556 va += 32; /* XXX */
1557 }
1558
1559 ia64_sync_i();
1560 ia64_srlz_i();
1561 }
Cache object: 3a53dcc86bbd75c89b7ea50966317b9f
|