1 /*-
2 * Copyright (c) 2003,2004 Marcel Moolenaar
3 * Copyright (c) 2000,2001 Doug Rabson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/7.3/sys/ia64/ia64/machdep.c 195193 2009-06-30 14:11:43Z avg $");
30
31 #include "opt_compat.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_msgbuf.h"
35
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/buf.h>
41 #include <sys/bus.h>
42 #include <sys/cons.h>
43 #include <sys/cpu.h>
44 #include <sys/eventhandler.h>
45 #include <sys/exec.h>
46 #include <sys/imgact.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/linker.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #include <sys/msgbuf.h>
54 #include <sys/pcpu.h>
55 #include <sys/ptrace.h>
56 #include <sys/random.h>
57 #include <sys/reboot.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/syscall.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysproto.h>
63 #include <sys/ucontext.h>
64 #include <sys/uio.h>
65 #include <sys/uuid.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
68
69 #include <ddb/ddb.h>
70
71 #include <net/netisr.h>
72
73 #include <vm/vm.h>
74 #include <vm/vm_extern.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_pager.h>
80
81 #include <machine/bootinfo.h>
82 #include <machine/clock.h>
83 #include <machine/cpu.h>
84 #include <machine/efi.h>
85 #include <machine/elf.h>
86 #include <machine/fpu.h>
87 #include <machine/mca.h>
88 #include <machine/md_var.h>
89 #include <machine/mutex.h>
90 #include <machine/pal.h>
91 #include <machine/pcb.h>
92 #include <machine/reg.h>
93 #include <machine/sal.h>
94 #include <machine/sigframe.h>
95 #ifdef SMP
96 #include <machine/smp.h>
97 #endif
98 #include <machine/unwind.h>
99 #include <machine/vmparam.h>
100
101 #include <i386/include/specialreg.h>
102
103 u_int64_t processor_frequency;
104 u_int64_t bus_frequency;
105 u_int64_t itc_frequency;
106 int cold = 1;
107
108 u_int64_t pa_bootinfo;
109 struct bootinfo bootinfo;
110
111 struct pcpu pcpu0;
112 extern char kstack[];
113 vm_offset_t proc0kstack;
114
115 extern u_int64_t kernel_text[], _end[];
116
117 extern u_int64_t ia64_gateway_page[];
118 extern u_int64_t break_sigtramp[];
119 extern u_int64_t epc_sigtramp[];
120
121 struct fpswa_iface *fpswa_iface;
122
123 u_int64_t ia64_pal_base;
124 u_int64_t ia64_port_base;
125
126 static int ia64_inval_icache_needed;
127
128 char machine[] = MACHINE;
129 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
130
131 static char cpu_model[64];
132 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
133 "The CPU model name");
134
135 static char cpu_family[64];
136 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
137 "The CPU family name");
138
139 #ifdef DDB
140 extern vm_offset_t ksym_start, ksym_end;
141 #endif
142
143 static void cpu_startup(void *);
144 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
145
146 struct msgbuf *msgbufp=0;
147
148 long Maxmem = 0;
149 long realmem = 0;
150
151 #define PHYSMAP_SIZE (2 * VM_PHYSSEG_MAX)
152
153 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
154
155 /* must be 2 less so 0 0 can signal end of chunks */
156 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
157
158 void mi_startup(void); /* XXX should be in a MI header */
159
160 struct kva_md_info kmi;
161
162 #define Mhz 1000000L
163 #define Ghz (1000L*Mhz)
164
165 static void
166 identifycpu(void)
167 {
168 char vendor[17];
169 char *family_name, *model_name;
170 u_int64_t features, tmp;
171 int number, revision, model, family, archrev;
172
173 /*
174 * Assumes little-endian.
175 */
176 *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
177 *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
178 vendor[16] = '\0';
179
180 tmp = ia64_get_cpuid(3);
181 number = (tmp >> 0) & 0xff;
182 revision = (tmp >> 8) & 0xff;
183 model = (tmp >> 16) & 0xff;
184 family = (tmp >> 24) & 0xff;
185 archrev = (tmp >> 32) & 0xff;
186
187 family_name = model_name = "unknown";
188 switch (family) {
189 case 0x07:
190 family_name = "Itanium";
191 model_name = "Merced";
192 break;
193 case 0x1f:
194 family_name = "Itanium 2";
195 switch (model) {
196 case 0x00:
197 model_name = "McKinley";
198 break;
199 case 0x01:
200 /*
201 * Deerfield is a low-voltage variant based on the
202 * Madison core. We need circumstantial evidence
203 * (i.e. the clock frequency) to identify those.
204 * Allow for roughly 1% error margin.
205 */
206 tmp = processor_frequency >> 7;
207 if ((processor_frequency - tmp) < 1*Ghz &&
208 (processor_frequency + tmp) >= 1*Ghz)
209 model_name = "Deerfield";
210 else
211 model_name = "Madison";
212 break;
213 case 0x02:
214 model_name = "Madison II";
215 break;
216 }
217 break;
218 case 0x20:
219 ia64_inval_icache_needed = 1;
220
221 family_name = "Itanium 2";
222 switch (model) {
223 case 0x00:
224 model_name = "Montecito";
225 break;
226 }
227 break;
228 }
229 snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
230 snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
231
232 features = ia64_get_cpuid(4);
233
234 printf("CPU: %s (", model_name);
235 if (processor_frequency) {
236 printf("%ld.%02ld-Mhz ",
237 (processor_frequency + 4999) / Mhz,
238 ((processor_frequency + 4999) / (Mhz/100)) % 100);
239 }
240 printf("%s)\n", family_name);
241 printf(" Origin = \"%s\" Revision = %d\n", vendor, revision);
242 printf(" Features = 0x%b\n", (u_int32_t) features,
243 "\020"
244 "\001LB" /* long branch (brl) instruction. */
245 "\002SD" /* Spontaneous deferral. */
246 "\003AO" /* 16-byte atomic operations (ld, st, cmpxchg). */ );
247 }
248
249 static void
250 cpu_startup(dummy)
251 void *dummy;
252 {
253
254 /*
255 * Good {morning,afternoon,evening,night}.
256 */
257 identifycpu();
258
259 /* startrtclock(); */
260 #ifdef PERFMON
261 perfmon_init();
262 #endif
263 printf("real memory = %ld (%ld MB)\n", ia64_ptob(Maxmem),
264 ia64_ptob(Maxmem) / 1048576);
265 realmem = Maxmem;
266
267 /*
268 * Display any holes after the first chunk of extended memory.
269 */
270 if (bootverbose) {
271 int indx;
272
273 printf("Physical memory chunk(s):\n");
274 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
275 long size1 = phys_avail[indx + 1] - phys_avail[indx];
276
277 printf("0x%08lx - 0x%08lx, %ld bytes (%ld pages)\n",
278 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
279 size1 >> PAGE_SHIFT);
280 }
281 }
282
283 vm_ksubmap_init(&kmi);
284
285 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
286 ptoa(cnt.v_free_count) / 1048576);
287
288 if (fpswa_iface == NULL)
289 printf("Warning: no FPSWA package supplied\n");
290 else
291 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
292 (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
293
294 /*
295 * Set up buffers, so they can be used to read disk labels.
296 */
297 bufinit();
298 vm_pager_bufferinit();
299
300 /*
301 * Traverse the MADT to discover IOSAPIC and Local SAPIC
302 * information.
303 */
304 ia64_probe_sapics();
305 ia64_mca_init();
306 }
307
308 void
309 cpu_boot(int howto)
310 {
311
312 efi_reset_system();
313 }
314
315 /* Get current clock frequency for the given cpu id. */
316 int
317 cpu_est_clockrate(int cpu_id, uint64_t *rate)
318 {
319
320 if (pcpu_find(cpu_id) == NULL || rate == NULL)
321 return (EINVAL);
322 *rate = processor_frequency;
323 return (0);
324 }
325
326 void
327 cpu_halt()
328 {
329
330 efi_reset_system();
331 }
332
333 static void
334 cpu_idle_default(void)
335 {
336 struct ia64_pal_result res;
337
338 res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
339 }
340
341 void
342 cpu_idle()
343 {
344 (*cpu_idle_hook)();
345 }
346
347 /* Other subsystems (e.g., ACPI) can hook this later. */
348 void (*cpu_idle_hook)(void) = cpu_idle_default;
349
350 void
351 cpu_reset()
352 {
353
354 cpu_boot(0);
355 }
356
357 void
358 cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
359 {
360 struct pcb *oldpcb, *newpcb;
361
362 oldpcb = old->td_pcb;
363 #ifdef COMPAT_IA32
364 ia32_savectx(oldpcb);
365 #endif
366 if (PCPU_GET(fpcurthread) == old)
367 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
368 if (!savectx(oldpcb)) {
369 newpcb = new->td_pcb;
370 oldpcb->pcb_current_pmap =
371 pmap_switch(newpcb->pcb_current_pmap);
372 PCPU_SET(curthread, new);
373 #ifdef COMPAT_IA32
374 ia32_restorectx(newpcb);
375 #endif
376 if (PCPU_GET(fpcurthread) == new)
377 new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
378 restorectx(newpcb);
379 /* We should not get here. */
380 panic("cpu_switch: restorectx() returned");
381 /* NOTREACHED */
382 }
383 }
384
385 void
386 cpu_throw(struct thread *old __unused, struct thread *new)
387 {
388 struct pcb *newpcb;
389
390 newpcb = new->td_pcb;
391 (void)pmap_switch(newpcb->pcb_current_pmap);
392 PCPU_SET(curthread, new);
393 #ifdef COMPAT_IA32
394 ia32_restorectx(newpcb);
395 #endif
396 restorectx(newpcb);
397 /* We should not get here. */
398 panic("cpu_throw: restorectx() returned");
399 /* NOTREACHED */
400 }
401
402 void
403 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
404 {
405
406 pcpu->pc_acpi_id = cpuid;
407 }
408
409 void
410 spinlock_enter(void)
411 {
412 struct thread *td;
413
414 td = curthread;
415 if (td->td_md.md_spinlock_count == 0)
416 td->td_md.md_saved_intr = intr_disable();
417 td->td_md.md_spinlock_count++;
418 critical_enter();
419 }
420
421 void
422 spinlock_exit(void)
423 {
424 struct thread *td;
425
426 td = curthread;
427 critical_exit();
428 td->td_md.md_spinlock_count--;
429 if (td->td_md.md_spinlock_count == 0)
430 intr_restore(td->td_md.md_saved_intr);
431 }
432
433 void
434 map_vhpt(uintptr_t vhpt)
435 {
436 pt_entry_t pte;
437 uint64_t psr;
438
439 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
440 PTE_PL_KERN | PTE_AR_RW;
441 pte |= vhpt & PTE_PPN_MASK;
442
443 __asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
444 "r"(IA64_ID_PAGE_SHIFT<<2));
445
446 __asm __volatile("mov %0=psr" : "=r"(psr));
447 __asm __volatile("rsm psr.ic|psr.i");
448 ia64_srlz_i();
449 ia64_set_ifa(vhpt);
450 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
451 ia64_srlz_d();
452 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(2), "r"(pte));
453 __asm __volatile("mov psr.l=%0" :: "r" (psr));
454 ia64_srlz_i();
455 }
456
457 void
458 map_pal_code(void)
459 {
460 pt_entry_t pte;
461 uint64_t psr;
462
463 if (ia64_pal_base == 0)
464 return;
465
466 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
467 PTE_PL_KERN | PTE_AR_RWX;
468 pte |= ia64_pal_base & PTE_PPN_MASK;
469
470 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
471 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
472
473 __asm __volatile("mov %0=psr" : "=r"(psr));
474 __asm __volatile("rsm psr.ic|psr.i");
475 ia64_srlz_i();
476 ia64_set_ifa(IA64_PHYS_TO_RR7(ia64_pal_base));
477 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
478 ia64_srlz_d();
479 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(pte));
480 ia64_srlz_d();
481 __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
482 __asm __volatile("mov psr.l=%0" :: "r" (psr));
483 ia64_srlz_i();
484 }
485
486 void
487 map_gateway_page(void)
488 {
489 pt_entry_t pte;
490 uint64_t psr;
491
492 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
493 PTE_PL_KERN | PTE_AR_X_RX;
494 pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
495
496 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
497 "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
498
499 __asm __volatile("mov %0=psr" : "=r"(psr));
500 __asm __volatile("rsm psr.ic|psr.i");
501 ia64_srlz_i();
502 ia64_set_ifa(VM_MAX_ADDRESS);
503 ia64_set_itir(PAGE_SHIFT << 2);
504 ia64_srlz_d();
505 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
506 ia64_srlz_d();
507 __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(pte));
508 __asm __volatile("mov psr.l=%0" :: "r" (psr));
509 ia64_srlz_i();
510
511 /* Expose the mapping to userland in ar.k5 */
512 ia64_set_k5(VM_MAX_ADDRESS);
513 }
514
515 static void
516 calculate_frequencies(void)
517 {
518 struct ia64_sal_result sal;
519 struct ia64_pal_result pal;
520
521 sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
522 pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
523
524 if (sal.sal_status == 0 && pal.pal_status == 0) {
525 if (bootverbose) {
526 printf("Platform clock frequency %ld Hz\n",
527 sal.sal_result[0]);
528 printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
529 "ITC ratio %ld/%ld\n",
530 pal.pal_result[0] >> 32,
531 pal.pal_result[0] & ((1L << 32) - 1),
532 pal.pal_result[1] >> 32,
533 pal.pal_result[1] & ((1L << 32) - 1),
534 pal.pal_result[2] >> 32,
535 pal.pal_result[2] & ((1L << 32) - 1));
536 }
537 processor_frequency =
538 sal.sal_result[0] * (pal.pal_result[0] >> 32)
539 / (pal.pal_result[0] & ((1L << 32) - 1));
540 bus_frequency =
541 sal.sal_result[0] * (pal.pal_result[1] >> 32)
542 / (pal.pal_result[1] & ((1L << 32) - 1));
543 itc_frequency =
544 sal.sal_result[0] * (pal.pal_result[2] >> 32)
545 / (pal.pal_result[2] & ((1L << 32) - 1));
546 }
547 }
548
549 void
550 ia64_init(void)
551 {
552 int phys_avail_cnt;
553 vm_offset_t kernstart, kernend;
554 vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
555 char *p;
556 struct efi_md *md;
557 int metadata_missing;
558
559 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
560
561 /*
562 * TODO: Disable interrupts, floating point etc.
563 * Maybe flush cache and tlb
564 */
565 ia64_set_fpsr(IA64_FPSR_DEFAULT);
566
567 /*
568 * TODO: Get critical system information (if possible, from the
569 * information provided by the boot program).
570 */
571
572 /*
573 * pa_bootinfo is the physical address of the bootinfo block as
574 * passed to us by the loader and set in locore.s.
575 */
576 bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
577
578 if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
579 bzero(&bootinfo, sizeof(bootinfo));
580 bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
581 }
582
583 /*
584 * Look for the I/O ports first - we need them for console
585 * probing.
586 */
587 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
588 switch (md->md_type) {
589 case EFI_MD_TYPE_IOPORT:
590 ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys);
591 break;
592 case EFI_MD_TYPE_PALCODE:
593 ia64_pal_base = md->md_phys;
594 break;
595 }
596 }
597
598 metadata_missing = 0;
599 if (bootinfo.bi_modulep)
600 preload_metadata = (caddr_t)bootinfo.bi_modulep;
601 else
602 metadata_missing = 1;
603
604 if (envmode == 0 && bootinfo.bi_envp)
605 kern_envp = (caddr_t)bootinfo.bi_envp;
606 else
607 kern_envp = static_env;
608
609 /*
610 * Look at arguments passed to us and compute boothowto.
611 */
612 boothowto = bootinfo.bi_boothowto;
613
614 /*
615 * Catch case of boot_verbose set in environment.
616 */
617 if ((p = getenv("boot_verbose")) != NULL) {
618 if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
619 boothowto |= RB_VERBOSE;
620 }
621 freeenv(p);
622 }
623
624 if (boothowto & RB_VERBOSE)
625 bootverbose = 1;
626
627 /*
628 * Setup the PCPU data for the bootstrap processor. It is needed
629 * by printf(). Also, since printf() has critical sections, we
630 * need to initialize at least pc_curthread.
631 */
632 pcpup = &pcpu0;
633 ia64_set_k4((u_int64_t)pcpup);
634 pcpu_init(pcpup, 0, sizeof(pcpu0));
635 PCPU_SET(curthread, &thread0);
636
637 /*
638 * Initialize the console before we print anything out.
639 */
640 cninit();
641
642 /* OUTPUT NOW ALLOWED */
643
644 if (ia64_pal_base != 0) {
645 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
646 /*
647 * We use a TR to map the first 256M of memory - this might
648 * cover the palcode too.
649 */
650 if (ia64_pal_base == 0)
651 printf("PAL code mapped by the kernel's TR\n");
652 } else
653 printf("PAL code not found\n");
654
655 /*
656 * Wire things up so we can call the firmware.
657 */
658 map_pal_code();
659 efi_boot_minimal(bootinfo.bi_systab);
660 ia64_sal_init();
661 calculate_frequencies();
662
663 /*
664 * Find the beginning and end of the kernel.
665 */
666 kernstart = trunc_page(kernel_text);
667 #ifdef DDB
668 ksym_start = bootinfo.bi_symtab;
669 ksym_end = bootinfo.bi_esymtab;
670 kernend = (vm_offset_t)round_page(ksym_end);
671 #else
672 kernend = (vm_offset_t)round_page(_end);
673 #endif
674
675 /* But if the bootstrap tells us otherwise, believe it! */
676 if (bootinfo.bi_kernend)
677 kernend = round_page(bootinfo.bi_kernend);
678 if (metadata_missing)
679 printf("WARNING: loader(8) metadata is missing!\n");
680
681 /* Get FPSWA interface */
682 fpswa_iface = (bootinfo.bi_fpswa == 0) ? NULL :
683 (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
684
685 /* Init basic tunables, including hz */
686 init_param1();
687
688 p = getenv("kernelname");
689 if (p) {
690 strncpy(kernelname, p, sizeof(kernelname) - 1);
691 freeenv(p);
692 }
693
694 kernstartpfn = atop(IA64_RR_MASK(kernstart));
695 kernendpfn = atop(IA64_RR_MASK(kernend));
696
697 /*
698 * Size the memory regions and load phys_avail[] with the results.
699 */
700
701 /*
702 * Find out how much memory is available, by looking at
703 * the memory descriptors.
704 */
705
706 #ifdef DEBUG_MD
707 printf("Memory descriptor count: %d\n", mdcount);
708 #endif
709
710 phys_avail_cnt = 0;
711 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
712 #ifdef DEBUG_MD
713 printf("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md,
714 md->md_type, md->md_phys, md->md_pages);
715 #endif
716
717 pfn0 = ia64_btop(round_page(md->md_phys));
718 pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096));
719 if (pfn1 <= pfn0)
720 continue;
721
722 if (md->md_type != EFI_MD_TYPE_FREE)
723 continue;
724
725 /*
726 * We have a memory descriptor that describes conventional
727 * memory that is for general use. We must determine if the
728 * loader has put the kernel in this region.
729 */
730 physmem += (pfn1 - pfn0);
731 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
732 /*
733 * Must compute the location of the kernel
734 * within the segment.
735 */
736 #ifdef DEBUG_MD
737 printf("Descriptor %p contains kernel\n", mp);
738 #endif
739 if (pfn0 < kernstartpfn) {
740 /*
741 * There is a chunk before the kernel.
742 */
743 #ifdef DEBUG_MD
744 printf("Loading chunk before kernel: "
745 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
746 #endif
747 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
748 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
749 phys_avail_cnt += 2;
750 }
751 if (kernendpfn < pfn1) {
752 /*
753 * There is a chunk after the kernel.
754 */
755 #ifdef DEBUG_MD
756 printf("Loading chunk after kernel: "
757 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
758 #endif
759 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
760 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
761 phys_avail_cnt += 2;
762 }
763 } else {
764 /*
765 * Just load this cluster as one chunk.
766 */
767 #ifdef DEBUG_MD
768 printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
769 pfn0, pfn1);
770 #endif
771 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
772 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
773 phys_avail_cnt += 2;
774
775 }
776 }
777 phys_avail[phys_avail_cnt] = 0;
778
779 Maxmem = physmem;
780 init_param2(physmem);
781
782 /*
783 * Initialize error message buffer (at end of core).
784 */
785 msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
786 msgbufinit(msgbufp, MSGBUF_SIZE);
787
788 proc_linkup0(&proc0, &thread0);
789 /*
790 * Init mapping for kernel stack for proc 0
791 */
792 proc0kstack = (vm_offset_t)kstack;
793 thread0.td_kstack = proc0kstack;
794 thread0.td_kstack_pages = KSTACK_PAGES;
795
796 mutex_init();
797
798 /*
799 * Initialize the rest of proc 0's PCB.
800 *
801 * Set the kernel sp, reserving space for an (empty) trapframe,
802 * and make proc0's trapframe pointer point to it for sanity.
803 * Initialise proc0's backing store to start after u area.
804 */
805 cpu_thread_alloc(&thread0);
806 thread0.td_frame->tf_flags = FRAME_SYSCALL;
807 thread0.td_pcb->pcb_special.sp =
808 (u_int64_t)thread0.td_frame - 16;
809 thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
810
811 /*
812 * Initialize the virtual memory system.
813 */
814 pmap_bootstrap();
815
816 /*
817 * Initialize debuggers, and break into them if appropriate.
818 */
819 kdb_init();
820
821 #ifdef KDB
822 if (boothowto & RB_KDB)
823 kdb_enter_why(KDB_WHY_BOOTFLAGS,
824 "Boot flags requested debugger\n");
825 #endif
826
827 ia64_set_tpr(0);
828 ia64_srlz_d();
829
830 /*
831 * Save our current context so that we have a known (maybe even
832 * sane) context as the initial context for new threads that are
833 * forked from us. If any of those threads (including thread0)
834 * does something wrong, we may be lucky and return here where
835 * we're ready for them with a nice panic.
836 */
837 if (!savectx(thread0.td_pcb))
838 mi_startup();
839
840 /* We should not get here. */
841 panic("ia64_init: Whooaa there!");
842 /* NOTREACHED */
843 }
844
845 __volatile void *
846 ia64_ioport_address(u_int port)
847 {
848 uint64_t addr;
849
850 addr = (port > 0xffff) ? IA64_PHYS_TO_RR6((uint64_t)port) :
851 ia64_port_base | ((port & 0xfffc) << 10) | (port & 0xFFF);
852 return ((__volatile void *)addr);
853 }
854
855 uint64_t
856 ia64_get_hcdp(void)
857 {
858
859 return (bootinfo.bi_hcdp);
860 }
861
862 void
863 bzero(void *buf, size_t len)
864 {
865 caddr_t p = buf;
866
867 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
868 *p++ = 0;
869 len--;
870 }
871 while (len >= sizeof(u_long) * 8) {
872 *(u_long*) p = 0;
873 *((u_long*) p + 1) = 0;
874 *((u_long*) p + 2) = 0;
875 *((u_long*) p + 3) = 0;
876 len -= sizeof(u_long) * 8;
877 *((u_long*) p + 4) = 0;
878 *((u_long*) p + 5) = 0;
879 *((u_long*) p + 6) = 0;
880 *((u_long*) p + 7) = 0;
881 p += sizeof(u_long) * 8;
882 }
883 while (len >= sizeof(u_long)) {
884 *(u_long*) p = 0;
885 len -= sizeof(u_long);
886 p += sizeof(u_long);
887 }
888 while (len) {
889 *p++ = 0;
890 len--;
891 }
892 }
893
894 void
895 DELAY(int n)
896 {
897 u_int64_t start, end, now;
898
899 start = ia64_get_itc();
900 end = start + (itc_frequency * n) / 1000000;
901 /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
902 do {
903 now = ia64_get_itc();
904 } while (now < end || (now > start && end < start));
905 }
906
907 /*
908 * Send an interrupt (signal) to a process.
909 */
910 void
911 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
912 {
913 struct proc *p;
914 struct thread *td;
915 struct trapframe *tf;
916 struct sigacts *psp;
917 struct sigframe sf, *sfp;
918 u_int64_t sbs, sp;
919 int oonstack;
920 int sig;
921 u_long code;
922
923 td = curthread;
924 p = td->td_proc;
925 PROC_LOCK_ASSERT(p, MA_OWNED);
926 sig = ksi->ksi_signo;
927 code = ksi->ksi_code;
928 psp = p->p_sigacts;
929 mtx_assert(&psp->ps_mtx, MA_OWNED);
930 tf = td->td_frame;
931 sp = tf->tf_special.sp;
932 oonstack = sigonstack(sp);
933 sbs = 0;
934
935 /* save user context */
936 bzero(&sf, sizeof(struct sigframe));
937 sf.sf_uc.uc_sigmask = *mask;
938 sf.sf_uc.uc_stack = td->td_sigstk;
939 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
940 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
941
942 /*
943 * Allocate and validate space for the signal handler
944 * context. Note that if the stack is in P0 space, the
945 * call to grow() is a nop, and the useracc() check
946 * will fail if the process has not already allocated
947 * the space with a `brk'.
948 */
949 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
950 SIGISMEMBER(psp->ps_sigonstack, sig)) {
951 sbs = (u_int64_t)td->td_sigstk.ss_sp;
952 sbs = (sbs + 15) & ~15;
953 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
954 #if defined(COMPAT_43)
955 td->td_sigstk.ss_flags |= SS_ONSTACK;
956 #endif
957 } else
958 sfp = (struct sigframe *)sp;
959 sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
960
961 /* Fill in the siginfo structure for POSIX handlers. */
962 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
963 sf.sf_si = ksi->ksi_info;
964 sf.sf_si.si_signo = sig;
965 /*
966 * XXX this shouldn't be here after code in trap.c
967 * is fixed
968 */
969 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
970 code = (u_int64_t)&sfp->sf_si;
971 }
972
973 mtx_unlock(&psp->ps_mtx);
974 PROC_UNLOCK(p);
975
976 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
977
978 /* Copy the frame out to userland. */
979 if (copyout(&sf, sfp, sizeof(sf)) != 0) {
980 /*
981 * Process has trashed its stack; give it an illegal
982 * instruction to halt it in its tracks.
983 */
984 PROC_LOCK(p);
985 sigexit(td, SIGILL);
986 return;
987 }
988
989 if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
990 tf->tf_special.psr &= ~IA64_PSR_RI;
991 tf->tf_special.iip = ia64_get_k5() +
992 ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
993 } else
994 tf->tf_special.iip = ia64_get_k5() +
995 ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
996
997 /*
998 * Setup the trapframe to return to the signal trampoline. We pass
999 * information to the trampoline in the following registers:
1000 *
1001 * gp new backing store or NULL
1002 * r8 signal number
1003 * r9 signal code or siginfo pointer
1004 * r10 signal handler (function descriptor)
1005 */
1006 tf->tf_special.sp = (u_int64_t)sfp - 16;
1007 tf->tf_special.gp = sbs;
1008 tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
1009 tf->tf_special.ndirty = 0;
1010 tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
1011 tf->tf_scratch.gr8 = sig;
1012 tf->tf_scratch.gr9 = code;
1013 tf->tf_scratch.gr10 = (u_int64_t)catcher;
1014
1015 PROC_LOCK(p);
1016 mtx_lock(&psp->ps_mtx);
1017 }
1018
1019 /*
1020 * System call to cleanup state after a signal
1021 * has been taken. Reset signal mask and
1022 * stack state from context left by sendsig (above).
1023 * Return to previous pc and psl as specified by
1024 * context left by sendsig. Check carefully to
1025 * make sure that the user has not modified the
1026 * state to gain improper privileges.
1027 *
1028 * MPSAFE
1029 */
1030 int
1031 sigreturn(struct thread *td,
1032 struct sigreturn_args /* {
1033 ucontext_t *sigcntxp;
1034 } */ *uap)
1035 {
1036 ucontext_t uc;
1037 struct trapframe *tf;
1038 struct proc *p;
1039 struct pcb *pcb;
1040
1041 tf = td->td_frame;
1042 p = td->td_proc;
1043 pcb = td->td_pcb;
1044
1045 /*
1046 * Fetch the entire context structure at once for speed.
1047 * We don't use a normal argument to simplify RSE handling.
1048 */
1049 if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
1050 return (EFAULT);
1051
1052 set_mcontext(td, &uc.uc_mcontext);
1053
1054 PROC_LOCK(p);
1055 #if defined(COMPAT_43)
1056 if (sigonstack(tf->tf_special.sp))
1057 td->td_sigstk.ss_flags |= SS_ONSTACK;
1058 else
1059 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1060 #endif
1061 td->td_sigmask = uc.uc_sigmask;
1062 SIG_CANTMASK(td->td_sigmask);
1063 signotify(td);
1064 PROC_UNLOCK(p);
1065
1066 return (EJUSTRETURN);
1067 }
1068
1069 #ifdef COMPAT_FREEBSD4
1070 int
1071 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1072 {
1073
1074 return sigreturn(td, (struct sigreturn_args *)uap);
1075 }
1076 #endif
1077
1078 /*
1079 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1080 * we want to start a backtrace from the function that caused us to enter
1081 * the debugger. We have the context in the trapframe, but base the trace
1082 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1083 * enough for a backtrace.
1084 */
1085 void
1086 makectx(struct trapframe *tf, struct pcb *pcb)
1087 {
1088
1089 pcb->pcb_special = tf->tf_special;
1090 pcb->pcb_special.__spare = ~0UL; /* XXX see unwind.c */
1091 save_callee_saved(&pcb->pcb_preserved);
1092 save_callee_saved_fp(&pcb->pcb_preserved_fp);
1093 }
1094
1095 int
1096 ia64_flush_dirty(struct thread *td, struct _special *r)
1097 {
1098 struct iovec iov;
1099 struct uio uio;
1100 uint64_t bspst, kstk, rnat;
1101 int error, locked;
1102
1103 if (r->ndirty == 0)
1104 return (0);
1105
1106 kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
1107 if (td == curthread) {
1108 __asm __volatile("mov ar.rsc=0;;");
1109 __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst));
1110 /* Make sure we have all the user registers written out. */
1111 if (bspst - kstk < r->ndirty) {
1112 __asm __volatile("flushrs;;");
1113 __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst));
1114 }
1115 __asm __volatile("mov %0=ar.rnat;;" : "=r"(rnat));
1116 __asm __volatile("mov ar.rsc=3");
1117 error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
1118 kstk += r->ndirty;
1119 r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
1120 ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
1121 } else {
1122 locked = PROC_LOCKED(td->td_proc);
1123 if (!locked)
1124 PHOLD(td->td_proc);
1125 iov.iov_base = (void*)(uintptr_t)kstk;
1126 iov.iov_len = r->ndirty;
1127 uio.uio_iov = &iov;
1128 uio.uio_iovcnt = 1;
1129 uio.uio_offset = r->bspstore;
1130 uio.uio_resid = r->ndirty;
1131 uio.uio_segflg = UIO_SYSSPACE;
1132 uio.uio_rw = UIO_WRITE;
1133 uio.uio_td = td;
1134 error = proc_rwmem(td->td_proc, &uio);
1135 /*
1136 * XXX proc_rwmem() doesn't currently return ENOSPC,
1137 * so I think it can bogusly return 0. Neither do
1138 * we allow short writes.
1139 */
1140 if (uio.uio_resid != 0 && error == 0)
1141 error = ENOSPC;
1142 if (!locked)
1143 PRELE(td->td_proc);
1144 }
1145
1146 r->bspstore += r->ndirty;
1147 r->ndirty = 0;
1148 return (error);
1149 }
1150
1151 int
1152 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
1153 {
1154 struct trapframe *tf;
1155 int error;
1156
1157 tf = td->td_frame;
1158 bzero(mc, sizeof(*mc));
1159 mc->mc_special = tf->tf_special;
1160 error = ia64_flush_dirty(td, &mc->mc_special);
1161 if (tf->tf_flags & FRAME_SYSCALL) {
1162 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
1163 mc->mc_scratch = tf->tf_scratch;
1164 if (flags & GET_MC_CLEAR_RET) {
1165 mc->mc_scratch.gr8 = 0;
1166 mc->mc_scratch.gr9 = 0;
1167 mc->mc_scratch.gr10 = 0;
1168 mc->mc_scratch.gr11 = 0;
1169 }
1170 } else {
1171 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
1172 mc->mc_scratch = tf->tf_scratch;
1173 mc->mc_scratch_fp = tf->tf_scratch_fp;
1174 /*
1175 * XXX If the thread never used the high FP registers, we
1176 * probably shouldn't waste time saving them.
1177 */
1178 ia64_highfp_save(td);
1179 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
1180 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
1181 }
1182 save_callee_saved(&mc->mc_preserved);
1183 save_callee_saved_fp(&mc->mc_preserved_fp);
1184 return (error);
1185 }
1186
1187 int
1188 set_mcontext(struct thread *td, const mcontext_t *mc)
1189 {
1190 struct _special s;
1191 struct trapframe *tf;
1192 uint64_t psrmask;
1193
1194 tf = td->td_frame;
1195
1196 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1197 ("Whoa there! We have more than 8KB of dirty registers!"));
1198
1199 s = mc->mc_special;
1200 /*
1201 * Only copy the user mask and the restart instruction bit from
1202 * the new context.
1203 */
1204 psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1205 IA64_PSR_MFH | IA64_PSR_RI;
1206 s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
1207 /* We don't have any dirty registers of the new context. */
1208 s.ndirty = 0;
1209 if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
1210 /*
1211 * We can get an async context passed to us while we
1212 * entered the kernel through a syscall: sigreturn(2)
1213 * and kse_switchin(2) both take contexts that could
1214 * previously be the result of a trap or interrupt.
1215 * Hence, we cannot assert that the trapframe is not
1216 * a syscall frame, but we can assert that it's at
1217 * least an expected syscall.
1218 */
1219 if (tf->tf_flags & FRAME_SYSCALL) {
1220 KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn ||
1221 tf->tf_scratch.gr15 == SYS_kse_switchin, ("foo"));
1222 tf->tf_flags &= ~FRAME_SYSCALL;
1223 }
1224 tf->tf_scratch = mc->mc_scratch;
1225 tf->tf_scratch_fp = mc->mc_scratch_fp;
1226 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
1227 td->td_pcb->pcb_high_fp = mc->mc_high_fp;
1228 } else {
1229 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
1230 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
1231 s.cfm = tf->tf_special.cfm;
1232 s.iip = tf->tf_special.iip;
1233 tf->tf_scratch.gr15 = 0; /* Clear syscall nr. */
1234 } else
1235 tf->tf_scratch = mc->mc_scratch;
1236 }
1237 tf->tf_special = s;
1238 restore_callee_saved(&mc->mc_preserved);
1239 restore_callee_saved_fp(&mc->mc_preserved_fp);
1240
1241 if (mc->mc_flags & _MC_FLAGS_KSE_SET_MBOX)
1242 suword((caddr_t)mc->mc_special.ifa, mc->mc_special.isr);
1243
1244 return (0);
1245 }
1246
1247 /*
1248 * Clear registers on exec.
1249 */
1250 void
1251 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
1252 {
1253 struct trapframe *tf;
1254 uint64_t *ksttop, *kst;
1255
1256 tf = td->td_frame;
1257 ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
1258 (tf->tf_special.bspstore & 0x1ffUL));
1259
1260 /*
1261 * We can ignore up to 8KB of dirty registers by masking off the
1262 * lower 13 bits in exception_restore() or epc_syscall(). This
1263 * should be enough for a couple of years, but if there are more
1264 * than 8KB of dirty registers, we lose track of the bottom of
1265 * the kernel stack. The solution is to copy the active part of
1266 * the kernel stack down 1 page (or 2, but not more than that)
1267 * so that we always have less than 8KB of dirty registers.
1268 */
1269 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1270 ("Whoa there! We have more than 8KB of dirty registers!"));
1271
1272 bzero(&tf->tf_special, sizeof(tf->tf_special));
1273 if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */
1274 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1275 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1276 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1277 tf->tf_special.bspstore = IA64_BACKINGSTORE;
1278 /*
1279 * Copy the arguments onto the kernel register stack so that
1280 * they get loaded by the loadrs instruction. Skip over the
1281 * NaT collection points.
1282 */
1283 kst = ksttop - 1;
1284 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1285 *kst-- = 0;
1286 *kst-- = 0;
1287 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1288 *kst-- = 0;
1289 *kst-- = ps_strings;
1290 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1291 *kst-- = 0;
1292 *kst = stack;
1293 tf->tf_special.ndirty = (ksttop - kst) << 3;
1294 } else { /* epc syscalls (default). */
1295 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
1296 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
1297 /*
1298 * Write values for out0, out1 and out2 to the user's backing
1299 * store and arrange for them to be restored into the user's
1300 * initial register frame.
1301 * Assumes that (bspstore & 0x1f8) < 0x1e0.
1302 */
1303 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1304 suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
1305 suword((caddr_t)tf->tf_special.bspstore - 8, 0);
1306 }
1307
1308 tf->tf_special.iip = entry;
1309 tf->tf_special.sp = (stack & ~15) - 16;
1310 tf->tf_special.rsc = 0xf;
1311 tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1312 tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1313 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1314 IA64_PSR_CPL_USER;
1315 }
1316
1317 int
1318 ptrace_set_pc(struct thread *td, unsigned long addr)
1319 {
1320 uint64_t slot;
1321
1322 switch (addr & 0xFUL) {
1323 case 0:
1324 slot = IA64_PSR_RI_0;
1325 break;
1326 case 1:
1327 /* XXX we need to deal with MLX bundles here */
1328 slot = IA64_PSR_RI_1;
1329 break;
1330 case 2:
1331 slot = IA64_PSR_RI_2;
1332 break;
1333 default:
1334 return (EINVAL);
1335 }
1336
1337 td->td_frame->tf_special.iip = addr & ~0x0FULL;
1338 td->td_frame->tf_special.psr =
1339 (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1340 return (0);
1341 }
1342
1343 int
1344 ptrace_single_step(struct thread *td)
1345 {
1346 struct trapframe *tf;
1347
1348 /*
1349 * There's no way to set single stepping when we're leaving the
1350 * kernel through the EPC syscall path. The way we solve this is
1351 * by enabling the lower-privilege trap so that we re-enter the
1352 * kernel as soon as the privilege level changes. See trap.c for
1353 * how we proceed from there.
1354 */
1355 tf = td->td_frame;
1356 if (tf->tf_flags & FRAME_SYSCALL)
1357 tf->tf_special.psr |= IA64_PSR_LP;
1358 else
1359 tf->tf_special.psr |= IA64_PSR_SS;
1360 return (0);
1361 }
1362
1363 int
1364 ptrace_clear_single_step(struct thread *td)
1365 {
1366 struct trapframe *tf;
1367
1368 /*
1369 * Clear any and all status bits we may use to implement single
1370 * stepping.
1371 */
1372 tf = td->td_frame;
1373 tf->tf_special.psr &= ~IA64_PSR_SS;
1374 tf->tf_special.psr &= ~IA64_PSR_LP;
1375 tf->tf_special.psr &= ~IA64_PSR_TB;
1376 return (0);
1377 }
1378
1379 int
1380 fill_regs(struct thread *td, struct reg *regs)
1381 {
1382 struct trapframe *tf;
1383
1384 tf = td->td_frame;
1385 regs->r_special = tf->tf_special;
1386 regs->r_scratch = tf->tf_scratch;
1387 save_callee_saved(®s->r_preserved);
1388 return (0);
1389 }
1390
1391 int
1392 set_regs(struct thread *td, struct reg *regs)
1393 {
1394 struct trapframe *tf;
1395 int error;
1396
1397 tf = td->td_frame;
1398 error = ia64_flush_dirty(td, &tf->tf_special);
1399 if (!error) {
1400 tf->tf_special = regs->r_special;
1401 tf->tf_special.bspstore += tf->tf_special.ndirty;
1402 tf->tf_special.ndirty = 0;
1403 tf->tf_scratch = regs->r_scratch;
1404 restore_callee_saved(®s->r_preserved);
1405 }
1406 return (error);
1407 }
1408
1409 int
1410 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1411 {
1412
1413 return (ENOSYS);
1414 }
1415
1416 int
1417 set_dbregs(struct thread *td, struct dbreg *dbregs)
1418 {
1419
1420 return (ENOSYS);
1421 }
1422
1423 int
1424 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1425 {
1426 struct trapframe *frame = td->td_frame;
1427 struct pcb *pcb = td->td_pcb;
1428
1429 /* Save the high FP registers. */
1430 ia64_highfp_save(td);
1431
1432 fpregs->fpr_scratch = frame->tf_scratch_fp;
1433 save_callee_saved_fp(&fpregs->fpr_preserved);
1434 fpregs->fpr_high = pcb->pcb_high_fp;
1435 return (0);
1436 }
1437
1438 int
1439 set_fpregs(struct thread *td, struct fpreg *fpregs)
1440 {
1441 struct trapframe *frame = td->td_frame;
1442 struct pcb *pcb = td->td_pcb;
1443
1444 /* Throw away the high FP registers (should be redundant). */
1445 ia64_highfp_drop(td);
1446
1447 frame->tf_scratch_fp = fpregs->fpr_scratch;
1448 restore_callee_saved_fp(&fpregs->fpr_preserved);
1449 pcb->pcb_high_fp = fpregs->fpr_high;
1450 return (0);
1451 }
1452
1453 /*
1454 * High FP register functions.
1455 */
1456
1457 int
1458 ia64_highfp_drop(struct thread *td)
1459 {
1460 struct pcb *pcb;
1461 struct pcpu *cpu;
1462 struct thread *thr;
1463
1464 mtx_lock_spin(&td->td_md.md_highfp_mtx);
1465 pcb = td->td_pcb;
1466 cpu = pcb->pcb_fpcpu;
1467 if (cpu == NULL) {
1468 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1469 return (0);
1470 }
1471 pcb->pcb_fpcpu = NULL;
1472 thr = cpu->pc_fpcurthread;
1473 cpu->pc_fpcurthread = NULL;
1474 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1475
1476 /* Post-mortem sanity checking. */
1477 KASSERT(thr == td, ("Inconsistent high FP state"));
1478 return (1);
1479 }
1480
1481 int
1482 ia64_highfp_save(struct thread *td)
1483 {
1484 struct pcb *pcb;
1485 struct pcpu *cpu;
1486 struct thread *thr;
1487
1488 /* Don't save if the high FP registers weren't modified. */
1489 if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
1490 return (ia64_highfp_drop(td));
1491
1492 mtx_lock_spin(&td->td_md.md_highfp_mtx);
1493 pcb = td->td_pcb;
1494 cpu = pcb->pcb_fpcpu;
1495 if (cpu == NULL) {
1496 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1497 return (0);
1498 }
1499 #ifdef SMP
1500 if (td == curthread)
1501 sched_pin();
1502 if (cpu != pcpup) {
1503 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1504 ipi_send(cpu, IPI_HIGH_FP);
1505 if (td == curthread)
1506 sched_unpin();
1507 while (pcb->pcb_fpcpu == cpu)
1508 DELAY(100);
1509 return (1);
1510 } else {
1511 save_high_fp(&pcb->pcb_high_fp);
1512 if (td == curthread)
1513 sched_unpin();
1514 }
1515 #else
1516 save_high_fp(&pcb->pcb_high_fp);
1517 #endif
1518 pcb->pcb_fpcpu = NULL;
1519 thr = cpu->pc_fpcurthread;
1520 cpu->pc_fpcurthread = NULL;
1521 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
1522
1523 /* Post-mortem sanity cxhecking. */
1524 KASSERT(thr == td, ("Inconsistent high FP state"));
1525 return (1);
1526 }
1527
1528 void
1529 ia64_invalidate_icache(vm_offset_t va, vm_offset_t sz)
1530 {
1531 vm_offset_t lim;
1532
1533 if (!ia64_inval_icache_needed)
1534 return;
1535
1536 lim = va + sz;
1537 while (va < lim) {
1538 __asm __volatile("fc.i %0" :: "r"(va));
1539 va += 32; /* XXX */
1540 }
1541 }
1542
1543 int
1544 sysbeep(int pitch, int period)
1545 {
1546 return (ENODEV);
1547 }
Cache object: c32c12a1c36fff3a3b7128e63329967c
|