1 /*-
2 * Copyright (c) 2000,2001 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/5.1/sys/ia64/ia64/machdep.c 115378 2003-05-29 06:30:36Z marcel $
27 */
28
29 #include "opt_compat.h"
30 #include "opt_ddb.h"
31 #include "opt_msgbuf.h"
32 #include "opt_acpi.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/eventhandler.h>
37 #include <sys/sysproto.h>
38 #include <sys/signalvar.h>
39 #include <sys/imgact.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/pcpu.h>
44 #include <sys/malloc.h>
45 #include <sys/reboot.h>
46 #include <sys/bio.h>
47 #include <sys/buf.h>
48 #include <sys/mbuf.h>
49 #include <sys/vmmeter.h>
50 #include <sys/msgbuf.h>
51 #include <sys/exec.h>
52 #include <sys/sysctl.h>
53 #include <sys/uio.h>
54 #include <sys/linker.h>
55 #include <sys/random.h>
56 #include <sys/cons.h>
57 #include <sys/uuid.h>
58 #include <net/netisr.h>
59 #include <vm/vm.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_pager.h>
66 #include <sys/user.h>
67 #include <sys/ptrace.h>
68 #include <machine/clock.h>
69 #include <machine/cpu.h>
70 #include <machine/md_var.h>
71 #include <machine/reg.h>
72 #include <machine/fpu.h>
73 #include <machine/mca.h>
74 #include <machine/pal.h>
75 #include <machine/sal.h>
76 #ifdef SMP
77 #include <machine/smp.h>
78 #endif
79 #include <machine/bootinfo.h>
80 #include <machine/mutex.h>
81 #include <machine/vmparam.h>
82 #include <machine/elf.h>
83 #include <ddb/ddb.h>
84 #include <sys/vnode.h>
85 #include <sys/ucontext.h>
86 #include <machine/sigframe.h>
87 #include <machine/efi.h>
88 #include <machine/inst.h>
89 #include <machine/unwind.h>
90 #include <i386/include/specialreg.h>
91
92 u_int64_t processor_frequency;
93 u_int64_t bus_frequency;
94 u_int64_t itc_frequency;
95 int cold = 1;
96
97 u_int64_t pa_bootinfo;
98 struct bootinfo bootinfo;
99
100 struct pcpu early_pcpu;
101 extern char kstack[];
102 struct user *proc0uarea;
103 vm_offset_t proc0kstack;
104
105 extern u_int64_t kernel_text[], _end[];
106
107 extern u_int64_t ia64_gateway_page[];
108 extern u_int64_t break_sigtramp[];
109 extern u_int64_t epc_sigtramp[];
110
111 FPSWA_INTERFACE *fpswa_interface;
112
113 u_int64_t ia64_pal_base;
114 u_int64_t ia64_port_base;
115
116 char machine[] = MACHINE;
117 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
118
119 static char cpu_model[128];
120 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0, "");
121
122 #ifdef DDB
123 /* start and end of kernel symbol table */
124 void *ksym_start, *ksym_end;
125 #endif
126
127 static void cpu_startup(void *);
128 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
129
130 struct msgbuf *msgbufp=0;
131
132 long Maxmem = 0;
133
134 vm_offset_t phys_avail[100];
135
136 /* must be 2 less so 0 0 can signal end of chunks */
137 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
138
139 void mi_startup(void); /* XXX should be in a MI header */
140
141 static void identifycpu(void);
142
143 struct kva_md_info kmi;
144
145 static void
146 cpu_startup(dummy)
147 void *dummy;
148 {
149
150 /*
151 * Good {morning,afternoon,evening,night}.
152 */
153 identifycpu();
154
155 /* startrtclock(); */
156 #ifdef PERFMON
157 perfmon_init();
158 #endif
159 printf("real memory = %ld (%ld MB)\n", ia64_ptob(Maxmem),
160 ia64_ptob(Maxmem) / 1048576);
161
162 /*
163 * Display any holes after the first chunk of extended memory.
164 */
165 if (bootverbose) {
166 int indx;
167
168 printf("Physical memory chunk(s):\n");
169 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
170 int size1 = phys_avail[indx + 1] - phys_avail[indx];
171
172 printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx],
173 phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE);
174 }
175 }
176
177 vm_ksubmap_init(&kmi);
178
179 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
180 ptoa(cnt.v_free_count) / 1048576);
181
182 if (fpswa_interface == NULL)
183 printf("Warning: no FPSWA package supplied\n");
184 else
185 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
186 (long)fpswa_interface->Revision,
187 (void *)fpswa_interface->Fpswa);
188
189 /*
190 * Set up buffers, so they can be used to read disk labels.
191 */
192 bufinit();
193 vm_pager_bufferinit();
194
195 /*
196 * Traverse the MADT to discover IOSAPIC and Local SAPIC
197 * information.
198 */
199 ia64_probe_sapics();
200 ia64_mca_init();
201 }
202
203 void
204 cpu_switch(struct thread *old, struct thread *new)
205 {
206 struct pcb *oldpcb, *newpcb;
207
208 oldpcb = old->td_pcb;
209 #if IA32
210 ia32_savectx(oldpcb);
211 #endif
212 if (!savectx(oldpcb)) {
213 newpcb = new->td_pcb;
214 oldpcb->pcb_current_pmap =
215 pmap_switch(newpcb->pcb_current_pmap);
216 PCPU_SET(curthread, new);
217 #if IA32
218 ia32_restorectx(newpcb);
219 #endif
220 restorectx(newpcb);
221 /* We should not get here. */
222 panic("cpu_switch: restorectx() returned");
223 /* NOTREACHED */
224 }
225 }
226
227 void
228 cpu_throw(struct thread *old __unused, struct thread *new)
229 {
230 struct pcb *newpcb;
231
232 newpcb = new->td_pcb;
233 (void)pmap_switch(newpcb->pcb_current_pmap);
234 PCPU_SET(curthread, new);
235 #if IA32
236 ia32_restorectx(newpcb);
237 #endif
238 restorectx(newpcb);
239 /* We should not get here. */
240 panic("cpu_throw: restorectx() returned");
241 /* NOTREACHED */
242 }
243
244 void
245 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
246 {
247 KASSERT(size >= sizeof(struct pcpu) + sizeof(struct pcb),
248 ("%s: too small an allocation for pcpu", __func__));
249 pcpu->pc_pcb = (void*)(pcpu+1);
250 }
251
252 static void
253 identifycpu(void)
254 {
255 char vendor[17];
256 u_int64_t t;
257 int number, revision, model, family, archrev;
258 u_int64_t features;
259
260 /*
261 * Assumes little-endian.
262 */
263 *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
264 *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
265 vendor[16] = '\0';
266
267 t = ia64_get_cpuid(3);
268 number = (t >> 0) & 0xff;
269 revision = (t >> 8) & 0xff;
270 model = (t >> 16) & 0xff;
271 family = (t >> 24) & 0xff;
272 archrev = (t >> 32) & 0xff;
273
274 if (family == 0x7)
275 strcpy(cpu_model, "Itanium");
276 else if (family == 0x1f)
277 strcpy(cpu_model, "Itanium 2"); /* McKinley */
278 else
279 snprintf(cpu_model, sizeof(cpu_model), "Family=%d", family);
280
281 features = ia64_get_cpuid(4);
282
283 printf("CPU: %s", cpu_model);
284 if (processor_frequency)
285 printf(" (%ld.%02ld-Mhz)\n",
286 (processor_frequency + 4999) / 1000000,
287 ((processor_frequency + 4999) / 10000) % 100);
288 else
289 printf("\n");
290 printf(" Origin = \"%s\" Model = %d Revision = %d\n",
291 vendor, model, revision);
292 printf(" Features = 0x%b\n", (u_int32_t) features,
293 "\020"
294 "\001LB" /* long branch (brl) instruction. */
295 "\002SD" /* Spontaneous deferral. */
296 "\003AO" /* 16-byte atomic operations (ld, st, cmpxchg). */ );
297 }
298
299 void
300 map_pal_code(void)
301 {
302 struct ia64_pte pte;
303 u_int64_t psr;
304
305 if (ia64_pal_base == 0)
306 return;
307
308 bzero(&pte, sizeof(pte));
309 pte.pte_p = 1;
310 pte.pte_ma = PTE_MA_WB;
311 pte.pte_a = 1;
312 pte.pte_d = 1;
313 pte.pte_pl = PTE_PL_KERN;
314 pte.pte_ar = PTE_AR_RWX;
315 pte.pte_ppn = ia64_pal_base >> 12;
316
317 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
318 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(28 << 2));
319
320 __asm __volatile("mov %0=psr" : "=r"(psr));
321 __asm __volatile("rsm psr.ic|psr.i");
322 __asm __volatile("srlz.i");
323 __asm __volatile("mov cr.ifa=%0" ::
324 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)));
325 __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2));
326 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
327 __asm __volatile("srlz.d"); /* XXX not needed. */
328 __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
329 __asm __volatile("mov psr.l=%0" :: "r" (psr));
330 __asm __volatile("srlz.i");
331 }
332
333 void
334 map_port_space(void)
335 {
336 struct ia64_pte pte;
337 u_int64_t psr;
338
339 /* XXX we should fail hard if there's no I/O port space. */
340 if (ia64_port_base == 0)
341 return;
342
343 bzero(&pte, sizeof(pte));
344 pte.pte_p = 1;
345 pte.pte_ma = PTE_MA_UC;
346 pte.pte_a = 1;
347 pte.pte_d = 1;
348 pte.pte_pl = PTE_PL_KERN;
349 pte.pte_ar = PTE_AR_RW;
350 pte.pte_ppn = ia64_port_base >> 12;
351
352 __asm __volatile("ptr.d %0,%1" :: "r"(ia64_port_base), "r"(24 << 2));
353
354 __asm __volatile("mov %0=psr" : "=r" (psr));
355 __asm __volatile("rsm psr.ic|psr.i");
356 __asm __volatile("srlz.d");
357 __asm __volatile("mov cr.ifa=%0" :: "r"(ia64_port_base));
358 /* XXX We should use the size from the memory descriptor. */
359 __asm __volatile("mov cr.itir=%0" :: "r"(24 << 2));
360 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(2), "r"(*(u_int64_t*)&pte));
361 __asm __volatile("mov psr.l=%0" :: "r" (psr));
362 __asm __volatile("srlz.d");
363 }
364
365 void
366 map_gateway_page(void)
367 {
368 struct ia64_pte pte;
369 u_int64_t psr;
370
371 bzero(&pte, sizeof(pte));
372 pte.pte_p = 1;
373 pte.pte_ma = PTE_MA_WB;
374 pte.pte_a = 1;
375 pte.pte_d = 1;
376 pte.pte_pl = PTE_PL_KERN;
377 pte.pte_ar = PTE_AR_X_RX;
378 pte.pte_ppn = IA64_RR_MASK((u_int64_t)ia64_gateway_page) >> 12;
379
380 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
381 "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
382
383 __asm __volatile("mov %0=psr" : "=r"(psr));
384 __asm __volatile("rsm psr.ic|psr.i");
385 __asm __volatile("srlz.i");
386 __asm __volatile("mov cr.ifa=%0" :: "r"(VM_MAX_ADDRESS));
387 __asm __volatile("mov cr.itir=%0" :: "r"(PAGE_SHIFT << 2));
388 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
389 __asm __volatile("srlz.d"); /* XXX not needed. */
390 __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
391 __asm __volatile("mov psr.l=%0" :: "r" (psr));
392 __asm __volatile("srlz.i");
393
394 /* Expose the mapping to userland in ar.k5 */
395 ia64_set_k5(VM_MAX_ADDRESS);
396 }
397
398 static void
399 calculate_frequencies(void)
400 {
401 struct ia64_sal_result sal;
402 struct ia64_pal_result pal;
403
404 sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
405 pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
406
407 if (sal.sal_status == 0 && pal.pal_status == 0) {
408 if (bootverbose) {
409 printf("Platform clock frequency %ld Hz\n",
410 sal.sal_result[0]);
411 printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
412 "ITC ratio %ld/%ld\n",
413 pal.pal_result[0] >> 32,
414 pal.pal_result[0] & ((1L << 32) - 1),
415 pal.pal_result[1] >> 32,
416 pal.pal_result[1] & ((1L << 32) - 1),
417 pal.pal_result[2] >> 32,
418 pal.pal_result[2] & ((1L << 32) - 1));
419 }
420 processor_frequency =
421 sal.sal_result[0] * (pal.pal_result[0] >> 32)
422 / (pal.pal_result[0] & ((1L << 32) - 1));
423 bus_frequency =
424 sal.sal_result[0] * (pal.pal_result[1] >> 32)
425 / (pal.pal_result[1] & ((1L << 32) - 1));
426 itc_frequency =
427 sal.sal_result[0] * (pal.pal_result[2] >> 32)
428 / (pal.pal_result[2] & ((1L << 32) - 1));
429 }
430 }
431
432 void
433 ia64_init(void)
434 {
435 int phys_avail_cnt;
436 vm_offset_t kernstart, kernend;
437 vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
438 char *p;
439 EFI_MEMORY_DESCRIPTOR *md, *mdp;
440 int mdcount, i, metadata_missing;
441
442 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
443
444 /*
445 * TODO: Disable interrupts, floating point etc.
446 * Maybe flush cache and tlb
447 */
448 ia64_set_fpsr(IA64_FPSR_DEFAULT);
449
450 /*
451 * TODO: Get critical system information (if possible, from the
452 * information provided by the boot program).
453 */
454
455 /*
456 * pa_bootinfo is the physical address of the bootinfo block as
457 * passed to us by the loader and set in locore.s.
458 */
459 bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
460
461 if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
462 bzero(&bootinfo, sizeof(bootinfo));
463 bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
464 }
465
466 /*
467 * Look for the I/O ports first - we need them for console
468 * probing.
469 */
470 mdcount = bootinfo.bi_memmap_size / bootinfo.bi_memdesc_size;
471 md = (EFI_MEMORY_DESCRIPTOR *) IA64_PHYS_TO_RR7(bootinfo.bi_memmap);
472
473 for (i = 0, mdp = md; i < mdcount; i++,
474 mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) {
475 if (mdp->Type == EfiMemoryMappedIOPortSpace)
476 ia64_port_base = IA64_PHYS_TO_RR6(mdp->PhysicalStart);
477 else if (mdp->Type == EfiPalCode)
478 ia64_pal_base = mdp->PhysicalStart;
479 }
480
481 map_port_space();
482
483 metadata_missing = 0;
484 if (bootinfo.bi_modulep)
485 preload_metadata = (caddr_t)bootinfo.bi_modulep;
486 else
487 metadata_missing = 1;
488 if (envmode == 1)
489 kern_envp = static_env;
490 else
491 kern_envp = (caddr_t)bootinfo.bi_envp;
492
493 /*
494 * Look at arguments passed to us and compute boothowto.
495 */
496 boothowto = bootinfo.bi_boothowto;
497
498 /*
499 * Catch case of boot_verbose set in environment.
500 */
501 if ((p = getenv("boot_verbose")) != NULL) {
502 if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
503 boothowto |= RB_VERBOSE;
504 }
505 freeenv(p);
506 }
507
508 if (boothowto & RB_VERBOSE)
509 bootverbose = 1;
510
511 /*
512 * Initialize the console before we print anything out.
513 */
514 cninit();
515
516 /* OUTPUT NOW ALLOWED */
517
518 if (ia64_pal_base != 0) {
519 ia64_pal_base &= ~((1 << 28) - 1);
520 /*
521 * We use a TR to map the first 256M of memory - this might
522 * cover the palcode too.
523 */
524 if (ia64_pal_base == 0)
525 printf("PAL code mapped by the kernel's TR\n");
526 } else
527 printf("PAL code not found\n");
528
529 /*
530 * Wire things up so we can call the firmware.
531 */
532 map_pal_code();
533 ia64_efi_init();
534 calculate_frequencies();
535
536 /*
537 * Find the beginning and end of the kernel.
538 */
539 kernstart = trunc_page(kernel_text);
540 #ifdef DDB
541 ksym_start = (void *)bootinfo.bi_symtab;
542 ksym_end = (void *)bootinfo.bi_esymtab;
543 kernend = (vm_offset_t)round_page(ksym_end);
544 #else
545 kernend = (vm_offset_t)round_page(_end);
546 #endif
547
548 /* But if the bootstrap tells us otherwise, believe it! */
549 if (bootinfo.bi_kernend)
550 kernend = round_page(bootinfo.bi_kernend);
551 if (metadata_missing)
552 printf("WARNING: loader(8) metadata is missing!\n");
553
554 /* Get FPSWA interface */
555 fpswa_interface = (FPSWA_INTERFACE*)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
556
557 /* Init basic tunables, including hz */
558 init_param1();
559
560 p = getenv("kernelname");
561 if (p) {
562 strncpy(kernelname, p, sizeof(kernelname) - 1);
563 freeenv(p);
564 }
565
566 kernstartpfn = atop(IA64_RR_MASK(kernstart));
567 kernendpfn = atop(IA64_RR_MASK(kernend));
568
569 /*
570 * Size the memory regions and load phys_avail[] with the results.
571 */
572
573 /*
574 * Find out how much memory is available, by looking at
575 * the memory descriptors.
576 */
577
578 #ifdef DEBUG_MD
579 printf("Memory descriptor count: %d\n", mdcount);
580 #endif
581
582 phys_avail_cnt = 0;
583 for (i = 0, mdp = md; i < mdcount; i++,
584 mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) {
585 #ifdef DEBUG_MD
586 printf("MD %d: type %d pa 0x%lx cnt 0x%lx\n", i,
587 mdp->Type,
588 mdp->PhysicalStart,
589 mdp->NumberOfPages);
590 #endif
591
592 pfn0 = ia64_btop(round_page(mdp->PhysicalStart));
593 pfn1 = ia64_btop(trunc_page(mdp->PhysicalStart
594 + mdp->NumberOfPages * 4096));
595 if (pfn1 <= pfn0)
596 continue;
597
598 if (mdp->Type != EfiConventionalMemory)
599 continue;
600
601 /*
602 * Wimp out for now since we do not DTRT here with
603 * pci bus mastering (no bounce buffering, for example).
604 */
605 if (pfn0 >= ia64_btop(0x100000000UL)) {
606 printf("Skipping memory chunk start 0x%lx\n",
607 mdp->PhysicalStart);
608 continue;
609 }
610 if (pfn1 >= ia64_btop(0x100000000UL)) {
611 printf("Skipping memory chunk end 0x%lx\n",
612 mdp->PhysicalStart + mdp->NumberOfPages * 4096);
613 continue;
614 }
615
616 /*
617 * We have a memory descriptor that describes conventional
618 * memory that is for general use. We must determine if the
619 * loader has put the kernel in this region.
620 */
621 physmem += (pfn1 - pfn0);
622 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
623 /*
624 * Must compute the location of the kernel
625 * within the segment.
626 */
627 #ifdef DEBUG_MD
628 printf("Descriptor %d contains kernel\n", i);
629 #endif
630 if (pfn0 < kernstartpfn) {
631 /*
632 * There is a chunk before the kernel.
633 */
634 #ifdef DEBUG_MD
635 printf("Loading chunk before kernel: "
636 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
637 #endif
638 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
639 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
640 phys_avail_cnt += 2;
641 }
642 if (kernendpfn < pfn1) {
643 /*
644 * There is a chunk after the kernel.
645 */
646 #ifdef DEBUG_MD
647 printf("Loading chunk after kernel: "
648 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
649 #endif
650 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
651 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
652 phys_avail_cnt += 2;
653 }
654 } else {
655 /*
656 * Just load this cluster as one chunk.
657 */
658 #ifdef DEBUG_MD
659 printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
660 pfn0, pfn1);
661 #endif
662 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
663 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
664 phys_avail_cnt += 2;
665
666 }
667 }
668 phys_avail[phys_avail_cnt] = 0;
669
670 Maxmem = physmem;
671 init_param2(physmem);
672
673 /*
674 * Initialize error message buffer (at end of core).
675 */
676 {
677 size_t sz = round_page(MSGBUF_SIZE);
678 int i = phys_avail_cnt - 2;
679
680 /* shrink so that it'll fit in the last segment */
681 if (phys_avail[i+1] - phys_avail[i] < sz)
682 sz = phys_avail[i+1] - phys_avail[i];
683
684 phys_avail[i+1] -= sz;
685 msgbufp = (struct msgbuf*) IA64_PHYS_TO_RR7(phys_avail[i+1]);
686
687 msgbufinit(msgbufp, sz);
688
689 /* Remove the last segment if it now has no pages. */
690 if (phys_avail[i] == phys_avail[i+1]) {
691 phys_avail[i] = 0;
692 phys_avail[i+1] = 0;
693 }
694
695 /* warn if the message buffer had to be shrunk */
696 if (sz != round_page(MSGBUF_SIZE))
697 printf("WARNING: %ld bytes not available for msgbuf in last cluster (%ld used)\n",
698 round_page(MSGBUF_SIZE), sz);
699
700 }
701
702 proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
703 /*
704 * Init mapping for u page(s) for proc 0
705 */
706 proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE);
707 proc0kstack = (vm_offset_t)kstack;
708 proc0.p_uarea = proc0uarea;
709 thread0.td_kstack = proc0kstack;
710 thread0.td_pcb = (struct pcb *)
711 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
712 /*
713 * Setup the global data for the bootstrap cpu.
714 */
715 pcpup = (struct pcpu *)pmap_steal_memory(PAGE_SIZE);
716 ia64_set_k4((u_int64_t)pcpup);
717 pcpu_init(pcpup, 0, PAGE_SIZE);
718 PCPU_SET(curthread, &thread0);
719
720 /*
721 * Initialize the rest of proc 0's PCB.
722 *
723 * Set the kernel sp, reserving space for an (empty) trapframe,
724 * and make proc0's trapframe pointer point to it for sanity.
725 * Initialise proc0's backing store to start after u area.
726 *
727 * XXX what is all this +/- 16 stuff?
728 */
729 thread0.td_frame = (struct trapframe *)thread0.td_pcb - 1;
730 thread0.td_frame->tf_length = sizeof(struct trapframe);
731 thread0.td_frame->tf_flags = FRAME_SYSCALL;
732 thread0.td_pcb->pcb_special.sp =
733 (u_int64_t)thread0.td_frame - 16;
734 thread0.td_pcb->pcb_special.bspstore = (u_int64_t)proc0kstack;
735
736 mutex_init();
737
738 /*
739 * Initialize the virtual memory system.
740 */
741 pmap_bootstrap();
742
743 /*
744 * Initialize debuggers, and break into them if appropriate.
745 */
746 #ifdef DDB
747 kdb_init();
748 if (boothowto & RB_KDB) {
749 printf("Boot flags requested debugger\n");
750 breakpoint();
751 }
752 #endif
753 ia64_set_tpr(0);
754
755 /*
756 * Save our current context so that we have a known (maybe even
757 * sane) context as the initial context for new threads that are
758 * forked from us. If any of those threads (including thread0)
759 * does something wrong, we may be lucky and return here where
760 * we're ready for them with a nice panic.
761 */
762 if (!savectx(thread0.td_pcb))
763 mi_startup();
764
765 /* We should not get here. */
766 panic("ia64_init: Whooaa there!");
767 /* NOTREACHED */
768 }
769
770 void
771 bzero(void *buf, size_t len)
772 {
773 caddr_t p = buf;
774
775 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
776 *p++ = 0;
777 len--;
778 }
779 while (len >= sizeof(u_long) * 8) {
780 *(u_long*) p = 0;
781 *((u_long*) p + 1) = 0;
782 *((u_long*) p + 2) = 0;
783 *((u_long*) p + 3) = 0;
784 len -= sizeof(u_long) * 8;
785 *((u_long*) p + 4) = 0;
786 *((u_long*) p + 5) = 0;
787 *((u_long*) p + 6) = 0;
788 *((u_long*) p + 7) = 0;
789 p += sizeof(u_long) * 8;
790 }
791 while (len >= sizeof(u_long)) {
792 *(u_long*) p = 0;
793 len -= sizeof(u_long);
794 p += sizeof(u_long);
795 }
796 while (len) {
797 *p++ = 0;
798 len--;
799 }
800 }
801
802 void
803 DELAY(int n)
804 {
805 u_int64_t start, end, now;
806
807 start = ia64_get_itc();
808 end = start + (itc_frequency * n) / 1000000;
809 /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
810 do {
811 now = ia64_get_itc();
812 } while (now < end || (now > start && end < start));
813 }
814
815 /*
816 * Send an interrupt (signal) to a process.
817 */
818 void
819 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
820 {
821 struct proc *p;
822 struct thread *td;
823 struct trapframe *tf;
824 struct sigacts *psp;
825 struct sigframe sf, *sfp;
826 mcontext_t *mc;
827 u_int64_t sbs, sp;
828 int oonstack;
829
830 td = curthread;
831 p = td->td_proc;
832 PROC_LOCK_ASSERT(p, MA_OWNED);
833 psp = p->p_sigacts;
834 mtx_assert(&psp->ps_mtx, MA_OWNED);
835 tf = td->td_frame;
836 sp = tf->tf_special.sp;
837 oonstack = sigonstack(sp);
838 sbs = 0;
839
840 /* save user context */
841 bzero(&sf, sizeof(struct sigframe));
842 sf.sf_uc.uc_sigmask = *mask;
843 sf.sf_uc.uc_stack = p->p_sigstk;
844 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK)
845 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
846
847 /*
848 * Allocate and validate space for the signal handler
849 * context. Note that if the stack is in P0 space, the
850 * call to grow() is a nop, and the useracc() check
851 * will fail if the process has not already allocated
852 * the space with a `brk'.
853 */
854 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack &&
855 SIGISMEMBER(psp->ps_sigonstack, sig)) {
856 sbs = (u_int64_t)p->p_sigstk.ss_sp;
857 sbs = (sbs + 15) & ~15;
858 sfp = (struct sigframe *)(sbs + p->p_sigstk.ss_size);
859 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
860 p->p_sigstk.ss_flags |= SS_ONSTACK;
861 #endif
862 } else
863 sfp = (struct sigframe *)sp;
864 sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
865
866 /* Fill in the siginfo structure for POSIX handlers. */
867 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
868 sf.sf_si.si_signo = sig;
869 sf.sf_si.si_code = code;
870 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
871 code = (u_int64_t)&sfp->sf_si;
872 }
873
874 mtx_unlock(&psp->ps_mtx);
875 PROC_UNLOCK(p);
876
877 mc = &sf.sf_uc.uc_mcontext;
878 mc->mc_special = tf->tf_special;
879 mc->mc_scratch = tf->tf_scratch;
880 if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
881 mc->mc_flags |= IA64_MC_FLAGS_SCRATCH_VALID;
882 mc->mc_scratch_fp = tf->tf_scratch_fp;
883 /*
884 * XXX High FP. If the process has never used the high FP,
885 * mark the high FP as valid (zero defaults). If the process
886 * did use the high FP, then store them in the PCB if not
887 * already there (ie get them from the CPU that has them)
888 * and write them in the context.
889 */
890 }
891 save_callee_saved(&mc->mc_preserved);
892 save_callee_saved_fp(&mc->mc_preserved_fp);
893
894 /* Copy the frame out to userland. */
895 if (copyout(&sf, sfp, sizeof(sf)) != 0) {
896 /*
897 * Process has trashed its stack; give it an illegal
898 * instruction to halt it in its tracks.
899 */
900 PROC_LOCK(p);
901 sigexit(td, SIGILL);
902 return;
903 }
904
905 if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
906 tf->tf_special.psr &= ~IA64_PSR_RI;
907 tf->tf_special.iip = ia64_get_k5() +
908 ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
909 } else
910 tf->tf_special.rp = ia64_get_k5() +
911 ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
912
913 /*
914 * Setup the trapframe to return to the signal trampoline. We pass
915 * information to the trampoline in the following registers:
916 *
917 * gp new backing store or NULL
918 * r8 signal number
919 * r9 signal code or siginfo pointer
920 * r10 signal handler (function descriptor)
921 */
922 tf->tf_special.sp = (u_int64_t)sfp - 16;
923 tf->tf_special.gp = sbs;
924 tf->tf_scratch.gr8 = sig;
925 tf->tf_scratch.gr9 = code;
926 tf->tf_scratch.gr10 = (u_int64_t)catcher;
927
928 PROC_LOCK(p);
929 mtx_lock(&psp->ps_mtx);
930 }
931
932 /*
933 * System call to cleanup state after a signal
934 * has been taken. Reset signal mask and
935 * stack state from context left by sendsig (above).
936 * Return to previous pc and psl as specified by
937 * context left by sendsig. Check carefully to
938 * make sure that the user has not modified the
939 * state to gain improper privileges.
940 *
941 * MPSAFE
942 */
943 int
944 sigreturn(struct thread *td,
945 struct sigreturn_args /* {
946 ucontext_t *sigcntxp;
947 } */ *uap)
948 {
949 ucontext_t uc;
950 struct trapframe *tf;
951 struct __mcontext *mc;
952 struct proc *p;
953 struct pcb *pcb;
954
955 tf = td->td_frame;
956 p = td->td_proc;
957 pcb = td->td_pcb;
958
959 /*
960 * Fetch the entire context structure at once for speed.
961 * We don't use a normal argument to simplify RSE handling.
962 */
963 if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
964 return (EFAULT);
965
966 /*
967 * XXX make sure ndirty in the current trapframe is less than
968 * 0x1f8 so that if we throw away the current register stack,
969 * we have reached the bottom of the kernel register stack.
970 * See also exec_setregs.
971 */
972
973 /*
974 * Restore the user-supplied information
975 */
976 mc = &uc.uc_mcontext;
977 tf->tf_special = mc->mc_special;
978 tf->tf_scratch = mc->mc_scratch;
979 if ((mc->mc_flags & IA64_MC_FLAGS_SCRATCH_VALID) != 0) {
980 tf->tf_scratch_fp = mc->mc_scratch_fp;
981 /* XXX high FP. */
982 }
983 restore_callee_saved(&mc->mc_preserved);
984 restore_callee_saved_fp(&mc->mc_preserved_fp);
985
986 PROC_LOCK(p);
987 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
988 if (sigonstack(tf->tf_special.sp))
989 p->p_sigstk.ss_flags |= SS_ONSTACK;
990 else
991 p->p_sigstk.ss_flags &= ~SS_ONSTACK;
992 #endif
993 td->td_sigmask = uc.uc_sigmask;
994 SIG_CANTMASK(td->td_sigmask);
995 signotify(td);
996 PROC_UNLOCK(p);
997
998 return (EJUSTRETURN);
999 }
1000
1001 #ifdef COMPAT_FREEBSD4
1002 int
1003 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1004 {
1005
1006 return sigreturn(td, (struct sigreturn_args *)uap);
1007 }
1008 #endif
1009
1010 int
1011 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
1012 {
1013
1014 return (ENOSYS);
1015 }
1016
1017 int
1018 set_mcontext(struct thread *td, const mcontext_t *mcp)
1019 {
1020
1021 return (ENOSYS);
1022 }
1023
1024 /*
1025 * Machine dependent boot() routine
1026 */
1027 void
1028 cpu_boot(int howto)
1029 {
1030
1031 ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0);
1032 }
1033
1034 /*
1035 * Shutdown the CPU as much as possible
1036 */
1037 void
1038 cpu_halt(void)
1039 {
1040
1041 ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0);
1042 }
1043
1044 /*
1045 * Clear registers on exec.
1046 */
1047 void
1048 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
1049 {
1050 struct trapframe *tf;
1051 uint64_t bspst, kstack, ndirty;
1052 size_t rssz;
1053
1054 tf = td->td_frame;
1055 kstack = td->td_kstack;
1056
1057 /*
1058 * RSE magic: We have ndirty registers of the process on the kernel
1059 * stack which don't belong to the new image. Discard them. Note
1060 * that for the "legacy" syscall support we need to keep 3 registers
1061 * worth of dirty bytes. These 3 registers are the initial arguments
1062 * to the newly executing program.
1063 * However, we cannot discard all the ndirty registers by simply
1064 * moving the kernel related registers to the bottom of the kernel
1065 * stack and lowering the current bspstore, because we get into
1066 * trouble with the NaT collections. We need to keep that in sync
1067 * with the registers. Hence, we can only copy a multiple of 512
1068 * bytes. Consequently, we may end up with some registers of the
1069 * previous image on the kernel stack. This we ignore by making
1070 * sure we mask-off the lower 9 bits of the bspstore value just
1071 * prior to saving it in ar.k6.
1072 */
1073 ndirty = tf->tf_special.ndirty & ~0x1ff;
1074 if (ndirty > 0) {
1075 __asm __volatile("mov ar.rsc=0;;");
1076 __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst));
1077 rssz = bspst - kstack - ndirty;
1078 bcopy((void*)(kstack + ndirty), (void*)kstack, rssz);
1079 bspst -= ndirty;
1080 __asm __volatile("mov ar.bspstore=%0;;" :: "r"(bspst));
1081 __asm __volatile("mov ar.rsc=3");
1082 tf->tf_special.ndirty -= ndirty;
1083 }
1084 ndirty = tf->tf_special.ndirty;
1085
1086 bzero(&tf->tf_special, sizeof(tf->tf_special));
1087
1088 if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */
1089 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1090 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1091 tf->tf_special.iip = entry;
1092 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1093 tf->tf_special.bspstore = td->td_md.md_bspstore;
1094 tf->tf_special.ndirty = 24;
1095 /*
1096 * Copy the arguments onto the kernel register stack so that
1097 * they get loaded by the loadrs instruction.
1098 */
1099 *(uint64_t*)(kstack + ndirty - 24) = stack;
1100 *(uint64_t*)(kstack + ndirty - 16) = ps_strings;
1101 *(uint64_t*)(kstack + ndirty - 8) = 0;
1102 } else { /* epc syscalls (default). */
1103 tf->tf_special.rp = entry;
1104 tf->tf_special.pfs = (3UL<<62) | (3UL<<7) | 3UL;
1105 tf->tf_special.bspstore = td->td_md.md_bspstore + 24;
1106 /*
1107 * Write values for out0, out1 and out2 to the user's backing
1108 * store and arrange for them to be restored into the user's
1109 * initial register frame.
1110 * Assumes that (bspstore & 0x1f8) < 0x1e0.
1111 */
1112 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1113 suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
1114 suword((caddr_t)tf->tf_special.bspstore - 8, 0);
1115 }
1116
1117 tf->tf_special.sp = (stack & ~15) - 16;
1118 tf->tf_special.rsc = 0xf;
1119 tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1120 tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1121 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1122 IA64_PSR_CPL_USER;
1123 }
1124
1125 int
1126 ptrace_set_pc(struct thread *td, unsigned long addr)
1127 {
1128 uint64_t slot;
1129
1130 switch (addr & 0xFUL) {
1131 case 0:
1132 slot = IA64_PSR_RI_0;
1133 break;
1134 case 1:
1135 /* XXX we need to deal with MLX bundles here */
1136 slot = IA64_PSR_RI_1;
1137 break;
1138 case 2:
1139 slot = IA64_PSR_RI_2;
1140 break;
1141 default:
1142 return (EINVAL);
1143 }
1144
1145 td->td_frame->tf_special.iip = addr & ~0x0FULL;
1146 td->td_frame->tf_special.psr =
1147 (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1148 return (0);
1149 }
1150
1151 int
1152 ptrace_single_step(struct thread *td)
1153 {
1154
1155 td->td_frame->tf_special.psr |= IA64_PSR_SS;
1156 return (0);
1157 }
1158
1159 int
1160 ia64_pa_access(vm_offset_t pa)
1161 {
1162 return VM_PROT_READ|VM_PROT_WRITE;
1163 }
1164
1165 int
1166 fill_regs(struct thread *td, struct reg *regs)
1167 {
1168 struct trapframe *tf;
1169
1170 tf = td->td_frame;
1171 regs->r_special = tf->tf_special;
1172 regs->r_scratch = tf->tf_scratch;
1173 /* XXX preserved */
1174 return (0);
1175 }
1176
1177 int
1178 set_regs(struct thread *td, struct reg *regs)
1179 {
1180 struct trapframe *tf;
1181
1182 tf = td->td_frame;
1183 tf->tf_special = regs->r_special;
1184 tf->tf_scratch = regs->r_scratch;
1185 /* XXX preserved */
1186 return (0);
1187 }
1188
1189 int
1190 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1191 {
1192
1193 return (ENOSYS);
1194 }
1195
1196 int
1197 set_dbregs(struct thread *td, struct dbreg *dbregs)
1198 {
1199
1200 return (ENOSYS);
1201 }
1202
1203 int
1204 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1205 {
1206 struct trapframe *frame = td->td_frame;
1207 struct pcb *pcb = td->td_pcb;
1208
1209 /* Save the high FP registers. */
1210 ia64_highfp_save(td);
1211
1212 fpregs->fpr_scratch = frame->tf_scratch_fp;
1213 /* XXX preserved_fp */
1214 fpregs->fpr_high = pcb->pcb_high_fp;
1215 return (0);
1216 }
1217
1218 int
1219 set_fpregs(struct thread *td, struct fpreg *fpregs)
1220 {
1221 struct trapframe *frame = td->td_frame;
1222 struct pcb *pcb = td->td_pcb;
1223
1224 /* Throw away the high FP registers (should be redundant). */
1225 ia64_highfp_drop(td);
1226
1227 frame->tf_scratch_fp = fpregs->fpr_scratch;
1228 /* XXX preserved_fp */
1229 pcb->pcb_high_fp = fpregs->fpr_high;
1230 return (0);
1231 }
1232
1233 /*
1234 * High FP register functions.
1235 * XXX no synchronization yet.
1236 */
1237
1238 int
1239 ia64_highfp_drop(struct thread *td)
1240 {
1241 struct pcb *pcb;
1242 struct pcpu *cpu;
1243 struct thread *thr;
1244
1245 pcb = td->td_pcb;
1246 cpu = pcb->pcb_fpcpu;
1247 if (cpu == NULL)
1248 return (0);
1249 pcb->pcb_fpcpu = NULL;
1250 thr = cpu->pc_fpcurthread;
1251 cpu->pc_fpcurthread = NULL;
1252
1253 /* Post-mortem sanity checking. */
1254 KASSERT(thr == td, ("Inconsistent high FP state"));
1255 return (1);
1256 }
1257
1258 int
1259 ia64_highfp_load(struct thread *td)
1260 {
1261 struct pcb *pcb;
1262
1263 pcb = td->td_pcb;
1264 KASSERT(pcb->pcb_fpcpu == NULL, ("FP race on thread"));
1265 KASSERT(PCPU_GET(fpcurthread) == NULL, ("FP race on pcpu"));
1266 restore_high_fp(&pcb->pcb_high_fp);
1267 PCPU_SET(fpcurthread, td);
1268 pcb->pcb_fpcpu = pcpup;
1269 return (1);
1270 }
1271
1272 int
1273 ia64_highfp_save(struct thread *td)
1274 {
1275 struct pcb *pcb;
1276 struct pcpu *cpu;
1277 struct thread *thr;
1278
1279 /* Don't save if the high FP registers weren't modified. */
1280 if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
1281 return (ia64_highfp_drop(td));
1282
1283 pcb = td->td_pcb;
1284 cpu = pcb->pcb_fpcpu;
1285 if (cpu == NULL)
1286 return (0);
1287 #ifdef SMP
1288 if (cpu != pcpup) {
1289 ipi_send(cpu->pc_lid, IPI_HIGH_FP);
1290 while (pcb->pcb_fpcpu != cpu)
1291 DELAY(100);
1292 return (1);
1293 }
1294 #endif
1295 save_high_fp(&pcb->pcb_high_fp);
1296 pcb->pcb_fpcpu = NULL;
1297 thr = cpu->pc_fpcurthread;
1298 cpu->pc_fpcurthread = NULL;
1299
1300 /* Post-mortem sanity cxhecking. */
1301 KASSERT(thr == td, ("Inconsistent high FP state"));
1302 return (1);
1303 }
1304
1305 #ifndef DDB
1306 void
1307 Debugger(const char *msg)
1308 {
1309 printf("Debugger(\"%s\") called.\n", msg);
1310 }
1311 #endif /* no DDB */
1312
1313 static int
1314 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1315 {
1316 int error;
1317 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1318 req);
1319 if (!error && req->newptr)
1320 resettodr();
1321 return (error);
1322 }
1323
1324 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1325 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1326
1327 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1328 CTLFLAG_RW, &disable_rtc_set, 0, "");
1329
1330 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1331 CTLFLAG_RW, &wall_cmos_clock, 0, "");
1332
1333 /*
1334 * Utility functions for manipulating instruction bundles.
1335 */
1336 void
1337 ia64_unpack_bundle(u_int64_t low, u_int64_t high, struct ia64_bundle *bp)
1338 {
1339 bp->template = low & 0x1f;
1340 bp->slot[0] = (low >> 5) & ((1L<<41) - 1);
1341 bp->slot[1] = (low >> 46) | ((high & ((1L<<23) - 1)) << 18);
1342 bp->slot[2] = (high >> 23);
1343 }
1344
1345 void
1346 ia64_pack_bundle(u_int64_t *lowp, u_int64_t *highp,
1347 const struct ia64_bundle *bp)
1348 {
1349 u_int64_t low, high;
1350
1351 low = bp->template | (bp->slot[0] << 5) | (bp->slot[1] << 46);
1352 high = (bp->slot[1] >> 18) | (bp->slot[2] << 23);
1353 *lowp = low;
1354 *highp = high;
1355 }
Cache object: 6238af15eadcf4e988467dd124d940d2
|