FreeBSD/Linux Kernel Cross Reference
sys/i386/pcb.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: pcb.c,v $
29 * Revision 2.15 93/11/17 16:37:27 dbg
30 * Added ANSI function prototypes.
31 * [93/05/04 dbg]
32 *
33 * Revision 2.14 93/01/14 17:29:21 danner
34 * Added include of mach/std_types.h
35 * [92/12/10 17:41:42 af]
36 *
37 * Revision 2.13 92/01/03 20:08:34 dbg
38 * Disable thread_set_state of ISA_PORT_MAP, but have it still
39 * return KERN_SUCCESS (for DOS emulator compatibility).
40 * [91/12/06 dbg]
41 *
42 * Add user ldt management. Move floating-point state
43 * manipulation to i386/fpu.{c,h}.
44 *
45 * Add user_stack_low and set_user_regs for passing control to
46 * bootstrap in user space.
47 * [91/10/30 dbg]
48 *
49 * Revision 2.12 91/10/09 16:07:08 af
50 * Set value of kernel_stack field in stack_handoff().
51 * [91/08/29 tak]
52 *
53 * Revision 2.11 91/07/31 17:39:56 dbg
54 * Add thread_set_syscall_return.
55 *
56 * Save user regs directly in PCB on trap, and switch to separate
57 * kernel stack.
58 *
59 * Add v8086 mode interrupt support.
60 * [91/07/30 16:56:09 dbg]
61 *
62 * Revision 2.10 91/05/14 16:13:06 mrt
63 * Correcting copyright
64 *
65 * Revision 2.9 91/05/08 12:40:34 dbg
66 * Use iopb_tss_t for IO permission bitmap.
67 * [91/03/21 dbg]
68 *
69 * Revision 2.8 91/03/16 14:44:51 rpd
70 * Pulled i386_fpsave_state out of i386_machine_state.
71 * Added pcb_module_init.
72 * [91/02/18 rpd]
73 *
74 * Replaced stack_switch with stack_handoff and
75 * switch_task_context with switch_context.
76 * [91/02/18 rpd]
77 * Added active_stacks.
78 * [91/01/29 rpd]
79 *
80 * Revision 2.7 91/02/05 17:13:19 mrt
81 * Changed to new Mach copyright
82 * [91/02/01 17:36:24 mrt]
83 *
84 * Revision 2.6 91/01/09 22:41:41 rpd
85 * Revised the pcb yet again.
86 * Picked up i386_ISA_PORT_MAP_STATE flavors.
87 * Added load_context, switch_task_context cover functions.
88 * [91/01/09 rpd]
89 *
90 * Revision 2.5 91/01/08 15:10:58 rpd
91 * Removed pcb_synch. Added pcb_collect.
92 * [91/01/03 rpd]
93 *
94 * Split i386_machine_state off of i386_kernel_state.
95 * Set k_stack_top correctly for V8086 threads.
96 * [90/12/31 rpd]
97 * Added stack_switch. Moved stack_alloc_try, stack_alloc,
98 * stack_free, stack_statistics to kern/thread.c.
99 * [90/12/14 rpd]
100 *
101 * Reorganized the pcb.
102 * Added stack_attach, stack_alloc, stack_alloc_try,
103 * stack_free, stack_statistics.
104 * [90/12/11 rpd]
105 *
106 * Revision 2.4 90/08/27 21:57:34 dbg
107 * Return correct count from thread_getstatus.
108 * [90/08/22 dbg]
109 *
110 * Revision 2.3 90/08/07 14:24:47 rpd
111 * Include seg.h for segment names.
112 * [90/07/17 dbg]
113 *
114 * Revision 2.2 90/05/03 15:35:51 dbg
115 * Created.
116 * [90/02/08 dbg]
117 *
118 */
119
120 #include <cpus.h>
121 #include <mach_debug.h>
122
123 #include <mach/std_types.h>
124 #include <mach/kern_return.h>
125 #include <mach/thread_status.h>
126 #include <mach/vm_param.h>
127
128 #include <kern/counters.h>
129 #include <kern/mach_param.h>
130 #include <kern/memory.h>
131 #include <kern/thread.h>
132 #include <kern/sched_prim.h>
133 #include <vm/vm_kern.h>
134 #include <vm/pmap.h>
135
136 #include <i386/thread.h>
137 #include <i386/eflags.h>
138 #include <i386/proc_reg.h>
139 #include <i386/seg.h>
140 #include <i386/tss.h>
141 #include <i386/user_ldt.h>
142 #include <i386/fpu.h>
143
144 #if NCPUS > 1
145 #include <i386/mp_desc.h>
146 #endif
147
148 extern no_return Load_context(thread_t);
149 extern thread_t Switch_context(
150 thread_t old_thread,
151 continuation_t continuation,
152 thread_t new_thread);
153 extern no_return Thread_continue(void);
154
155 extern struct i386_tss ktss;
156 extern struct fake_descriptor gdt[];
157
158 extern iopb_tss_t iopb_create();
159 extern void iopb_destroy();
160 extern void user_ldt_free();
161
162 zone_t pcb_zone;
163
164 vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
165
166 /*
167 * stack_attach:
168 *
169 * Attach a kernel stack to a thread.
170 */
171
172 void stack_attach(
173 register thread_t thread,
174 register vm_offset_t stack,
175 no_return (*continuation)(thread_t))
176 {
177 counter(if (++c_stacks_current > c_stacks_max)
178 c_stacks_max = c_stacks_current);
179
180 thread->kernel_stack = stack;
181
182 /*
183 * We want to run continuation, giving it as an argument
184 * the return value from Load_context/Switch_context.
185 * Thread_continue takes care of the mismatch between
186 * the argument-passing/return-value conventions.
187 * This function will not return normally,
188 * so we don`t have to worry about a return address.
189 */
190 STACK_IKS(stack)->k_eip = (int) Thread_continue;
191 STACK_IKS(stack)->k_ebx = (int) continuation;
192 STACK_IKS(stack)->k_esp = (int) STACK_IEL(stack);
193
194 /*
195 * Point top of kernel stack to user`s registers.
196 */
197 STACK_IEL(stack)->saved_state = &thread->pcb->iss;
198 }
199
200 /*
201 * stack_detach:
202 *
203 * Detaches a kernel stack from a thread, returning the old stack.
204 */
205
206 vm_offset_t stack_detach(
207 register thread_t thread)
208 {
209 register vm_offset_t stack;
210
211 counter(if (--c_stacks_current < c_stacks_min)
212 c_stacks_min = c_stacks_current);
213
214 stack = thread->kernel_stack;
215 thread->kernel_stack = 0;
216
217 return stack;
218 }
219
220 #if NCPUS > 1
221 #define curr_gdt(mycpu) (mp_gdt[mycpu])
222 #define curr_ktss(mycpu) (mp_ktss[mycpu])
223 #else
224 #define curr_gdt(mycpu) (gdt)
225 #define curr_ktss(mycpu) (&ktss)
226 #endif
227
228 #define gdt_desc_p(mycpu,sel) \
229 ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
230
231 void switch_ktss(
232 register pcb_t pcb)
233 {
234 #if NCPUS > 1
235 int mycpu = cpu_number();
236 #endif
237
238 {
239 register iopb_tss_t tss = pcb->ims.io_tss;
240 vm_offset_t pcb_stack_top;
241
242 /*
243 * Save a pointer to the top of the "kernel" stack -
244 * actually the place in the PCB where a trap into
245 * kernel mode will push the registers.
246 * The location depends on V8086 mode. If we are
247 * not in V8086 mode, then a trap into the kernel
248 * won`t save the v86 segments, so we leave room.
249 */
250
251 pcb_stack_top = (pcb->iss.efl & EFL_VM)
252 ? (int) (&pcb->iss + 1)
253 : (int) (&pcb->iss.v86_segs);
254
255 if (tss == 0) {
256 /*
257 * No per-thread IO permissions.
258 * Use standard kernel TSS.
259 */
260 if (!(gdt_desc_p(mycpu,KERNEL_TSS)->access & ACC_TSS_BUSY))
261 set_tr(KERNEL_TSS);
262 curr_ktss(mycpu)->esp0 = pcb_stack_top;
263 }
264 else {
265 /*
266 * Set the IO permissions. Use this thread`s TSS.
267 */
268 *gdt_desc_p(mycpu,USER_TSS)
269 = *(struct real_descriptor *)tss->iopb_desc;
270 tss->tss.esp0 = pcb_stack_top;
271 set_tr(USER_TSS);
272 gdt_desc_p(mycpu,KERNEL_TSS)->access &= ~ ACC_TSS_BUSY;
273 }
274 }
275
276 {
277 register user_ldt_t ldt = pcb->ims.ldt;
278 /*
279 * Set the thread`s LDT.
280 */
281 if (ldt == 0) {
282 /*
283 * Use system LDT.
284 */
285 set_ldt(KERNEL_LDT);
286 }
287 else {
288 /*
289 * Thread has its own LDT.
290 */
291 *gdt_desc_p(mycpu,USER_LDT) = ldt->desc;
292 set_ldt(USER_LDT);
293 }
294 }
295 /*
296 * Load the floating-point context, if necessary.
297 */
298 fpu_load_context(pcb);
299
300 }
301
302 /*
303 * stack_handoff:
304 *
305 * Move the current thread's kernel stack to the new thread.
306 */
307
308 void stack_handoff(
309 register thread_t old,
310 register thread_t new)
311 {
312 register int mycpu = cpu_number();
313 register vm_offset_t stack;
314
315 /*
316 * Save FP registers if in use.
317 */
318 fpu_save_context(old);
319
320 /*
321 * Switch address maps if switching tasks.
322 */
323 {
324 task_t old_task, new_task;
325
326 if ((old_task = old->task) != (new_task = new->task)) {
327 PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
328 old, mycpu);
329 PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
330 new, mycpu);
331 }
332 }
333
334 /*
335 * Load the rest of the user state for the new thread
336 */
337 switch_ktss(new->pcb);
338
339 /*
340 * Switch to new thread
341 */
342 stack = current_stack();
343 old->kernel_stack = 0;
344 new->kernel_stack = stack;
345 active_threads[mycpu] = new;
346
347 /*
348 * Switch exception link to point to new
349 * user registers.
350 */
351
352 STACK_IEL(stack)->saved_state = &new->pcb->iss;
353
354 }
355
356 /*
357 * Switch to the first thread on a CPU.
358 */
359 void load_context(
360 register thread_t new)
361 {
362 switch_ktss(new->pcb);
363 Load_context(new);
364 }
365
366 /*
367 * Switch to a new thread.
368 * Save the old thread`s kernel state or continuation,
369 * and return it.
370 */
371 thread_t switch_context(
372 register thread_t old,
373 continuation_t continuation,
374 register thread_t new)
375 {
376 /*
377 * Save FP registers if in use.
378 */
379 fpu_save_context(old);
380
381 /*
382 * Switch address maps if switching tasks.
383 */
384 {
385 task_t old_task, new_task;
386 #if NCPUS > 1
387 int mycpu = cpu_number();
388 #endif
389
390 if ((old_task = old->task) != (new_task = new->task)) {
391 PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
392 old, mycpu);
393 PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
394 new, mycpu);
395 }
396 }
397
398 /*
399 * Load the rest of the user state for the new thread
400 */
401 switch_ktss(new->pcb);
402
403 return Switch_context(old, continuation, new);
404 }
405
406 void pcb_module_init(void)
407 {
408 pcb_zone = zinit(sizeof(struct pcb),
409 THREAD_MAX * sizeof(struct pcb),
410 THREAD_CHUNK * sizeof(struct pcb),
411 FALSE, "i386 pcb state");
412
413 fpu_module_init();
414 iopb_init();
415 }
416
417 void pcb_init(
418 register thread_t thread)
419 {
420 register pcb_t pcb;
421
422 pcb = (pcb_t) zalloc(pcb_zone);
423 if (pcb == 0)
424 panic("pcb_init");
425
426 counter(if (++c_threads_current > c_threads_max)
427 c_threads_max = c_threads_current);
428
429 /*
430 * We can't let random values leak out to the user.
431 */
432 bzero(pcb, sizeof *pcb);
433 simple_lock_init(&pcb->lock);
434
435 /*
436 * Guarantee that the bootstrapped thread will be in user
437 * mode.
438 */
439 pcb->iss.cs = USER_CS;
440 pcb->iss.ss = USER_DS;
441 pcb->iss.ds = USER_DS;
442 pcb->iss.es = USER_DS;
443 pcb->iss.fs = USER_DS;
444 pcb->iss.gs = USER_DS;
445 pcb->iss.efl = EFL_USER_SET;
446
447 thread->pcb = pcb;
448 }
449
450 void pcb_terminate(
451 register thread_t thread)
452 {
453 register pcb_t pcb = thread->pcb;
454
455 counter(if (--c_threads_current < c_threads_min)
456 c_threads_min = c_threads_current);
457
458 if (pcb->ims.io_tss != 0)
459 iopb_destroy(pcb->ims.io_tss);
460 if (pcb->ims.ifps != 0)
461 fp_free(pcb->ims.ifps);
462 if (pcb->ims.ldt != 0)
463 user_ldt_free(pcb->ims.ldt);
464 zfree(pcb_zone, (vm_offset_t) pcb);
465 thread->pcb = 0;
466 }
467
468 /*
469 * pcb_collect:
470 *
471 * Attempt to free excess pcb memory.
472 */
473
474 void pcb_collect(
475 thread_t thread)
476 {
477 }
478
479
480 /*
481 * thread_setstatus:
482 *
483 * Set the status of the specified thread.
484 */
485
486 kern_return_t thread_setstatus(
487 thread_t thread,
488 int flavor,
489 thread_state_t tstate,
490 natural_t count)
491 {
492 switch (flavor) {
493 case i386_THREAD_STATE:
494 case i386_REGS_SEGS_STATE:
495 {
496 register struct i386_thread_state *state;
497 register struct i386_saved_state *saved_state;
498
499 if (count < i386_THREAD_STATE_COUNT)
500 return KERN_INVALID_ARGUMENT;
501
502 state = (struct i386_thread_state *) tstate;
503
504 if (flavor == i386_REGS_SEGS_STATE) {
505 /*
506 * Code and stack selectors must not be null,
507 * and must have user protection levels.
508 * Only the low 16 bits are valid.
509 */
510 state->cs &= 0xffff;
511 state->ss &= 0xffff;
512 state->ds &= 0xffff;
513 state->es &= 0xffff;
514 state->fs &= 0xffff;
515 state->gs &= 0xffff;
516
517 if (state->cs == 0 || (state->cs & SEL_PL) != SEL_PL_U
518 || state->ss == 0 || (state->ss & SEL_PL) != SEL_PL_U)
519 return KERN_INVALID_ARGUMENT;
520 }
521
522 saved_state = USER_REGS(thread);
523
524 /*
525 * General registers
526 */
527 saved_state->edi = state->edi;
528 saved_state->esi = state->esi;
529 saved_state->ebp = state->ebp;
530 saved_state->uesp = state->uesp;
531 saved_state->ebx = state->ebx;
532 saved_state->edx = state->edx;
533 saved_state->ecx = state->ecx;
534 saved_state->eax = state->eax;
535 saved_state->eip = state->eip;
536 saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
537 | EFL_USER_SET;
538
539 /*
540 * Segment registers. Set differently in V8086 mode.
541 */
542 if (state->efl & EFL_VM) {
543 /*
544 * Set V8086 mode segment registers.
545 */
546 saved_state->cs = state->cs & 0xffff;
547 saved_state->ss = state->ss & 0xffff;
548 saved_state->v86_segs.v86_ds = state->ds & 0xffff;
549 saved_state->v86_segs.v86_es = state->es & 0xffff;
550 saved_state->v86_segs.v86_fs = state->fs & 0xffff;
551 saved_state->v86_segs.v86_gs = state->gs & 0xffff;
552
553 /*
554 * Zero protected mode segment registers.
555 */
556 saved_state->ds = 0;
557 saved_state->es = 0;
558 saved_state->fs = 0;
559 saved_state->gs = 0;
560
561 if (thread->pcb->ims.v86s.int_table) {
562 /*
563 * Hardware assist on.
564 */
565 thread->pcb->ims.v86s.flags =
566 state->efl & (EFL_TF | EFL_IF);
567 }
568 }
569 else if (flavor == i386_THREAD_STATE) {
570 /*
571 * 386 mode. Set segment registers for flat
572 * 32-bit address space.
573 */
574 saved_state->cs = USER_CS;
575 saved_state->ss = USER_DS;
576 saved_state->ds = USER_DS;
577 saved_state->es = USER_DS;
578 saved_state->fs = USER_DS;
579 saved_state->gs = USER_DS;
580 }
581 else {
582 /*
583 * User setting segment registers.
584 * Code and stack selectors have already been
585 * checked. Others will be reset by 'iret'
586 * if they are not valid.
587 */
588 saved_state->cs = state->cs;
589 saved_state->ss = state->ss;
590 saved_state->ds = state->ds;
591 saved_state->es = state->es;
592 saved_state->fs = state->fs;
593 saved_state->gs = state->gs;
594 }
595 break;
596 }
597
598 case i386_FLOAT_STATE: {
599
600 if (count < i386_FLOAT_STATE_COUNT)
601 return KERN_INVALID_ARGUMENT;
602
603 return fpu_set_state(thread,
604 (struct i386_float_state *) tstate);
605 }
606
607 /*
608 * Temporary - replace by i386_io_map
609 */
610 case i386_ISA_PORT_MAP_STATE: {
611 #if 0
612 register struct i386_isa_port_map_state *state;
613 register iopb_tss_t tss;
614 #endif
615
616 if (count < i386_ISA_PORT_MAP_STATE_COUNT)
617 return KERN_INVALID_ARGUMENT;
618
619 #if 0
620 /*
621 * If the thread has no ktss yet,
622 * we must allocate one.
623 */
624
625 state = (struct i386_isa_port_map_state *) tstate;
626 tss = thread->pcb->ims.io_tss;
627 if (tss == 0) {
628 tss = iopb_create();
629 thread->pcb->ims.io_tss = tss;
630 }
631
632 bcopy(state->pm,
633 tss->bitmap,
634 sizeof state->pm);
635 #endif
636 break;
637 }
638
639 case i386_V86_ASSIST_STATE:
640 {
641 register struct i386_v86_assist_state *state;
642 vm_offset_t int_table;
643 int int_count;
644
645 if (count < i386_V86_ASSIST_STATE_COUNT)
646 return KERN_INVALID_ARGUMENT;
647
648 state = (struct i386_v86_assist_state *) tstate;
649 int_table = state->int_table;
650 int_count = state->int_count;
651
652 if (int_table >= VM_MAX_ADDRESS ||
653 int_table +
654 int_count * sizeof(struct v86_interrupt_table)
655 > VM_MAX_ADDRESS)
656 return KERN_INVALID_ARGUMENT;
657
658 thread->pcb->ims.v86s.int_table = int_table;
659 thread->pcb->ims.v86s.int_count = int_count;
660
661 thread->pcb->ims.v86s.flags =
662 USER_REGS(thread)->efl & (EFL_TF | EFL_IF);
663 break;
664 }
665
666 default:
667 return KERN_INVALID_ARGUMENT;
668 }
669
670 return KERN_SUCCESS;
671 }
672
673 /*
674 * thread_getstatus:
675 *
676 * Get the status of the specified thread.
677 */
678
679 kern_return_t thread_getstatus(
680 register thread_t thread,
681 int flavor,
682 thread_state_t tstate, /* pointer to OUT array */
683 natural_t *count) /* IN/OUT */
684 {
685 switch (flavor) {
686 case THREAD_STATE_FLAVOR_LIST:
687 if (*count < 4)
688 return KERN_INVALID_ARGUMENT;
689 tstate[0] = i386_THREAD_STATE;
690 tstate[1] = i386_FLOAT_STATE;
691 tstate[2] = i386_ISA_PORT_MAP_STATE;
692 tstate[3] = i386_V86_ASSIST_STATE;
693 *count = 4;
694 break;
695
696 case i386_THREAD_STATE:
697 case i386_REGS_SEGS_STATE:
698 {
699 register struct i386_thread_state *state;
700 register struct i386_saved_state *saved_state;
701
702 if (*count < i386_THREAD_STATE_COUNT)
703 return KERN_INVALID_ARGUMENT;
704
705 state = (struct i386_thread_state *) tstate;
706 saved_state = USER_REGS(thread);
707
708 /*
709 * General registers.
710 */
711 state->edi = saved_state->edi;
712 state->esi = saved_state->esi;
713 state->ebp = saved_state->ebp;
714 state->ebx = saved_state->ebx;
715 state->edx = saved_state->edx;
716 state->ecx = saved_state->ecx;
717 state->eax = saved_state->eax;
718 state->eip = saved_state->eip;
719 state->efl = saved_state->efl;
720 state->uesp = saved_state->uesp;
721
722 state->cs = saved_state->cs;
723 state->ss = saved_state->ss;
724 if (saved_state->efl & EFL_VM) {
725 /*
726 * V8086 mode.
727 */
728 state->ds = saved_state->v86_segs.v86_ds & 0xffff;
729 state->es = saved_state->v86_segs.v86_es & 0xffff;
730 state->fs = saved_state->v86_segs.v86_fs & 0xffff;
731 state->gs = saved_state->v86_segs.v86_gs & 0xffff;
732
733 if (thread->pcb->ims.v86s.int_table) {
734 /*
735 * Hardware assist on
736 */
737 if ((thread->pcb->ims.v86s.flags &
738 (EFL_IF|V86_IF_PENDING))
739 == 0)
740 state->efl &= ~EFL_IF;
741 }
742 }
743 else {
744 /*
745 * 386 mode.
746 */
747 state->ds = saved_state->ds & 0xffff;
748 state->es = saved_state->es & 0xffff;
749 state->fs = saved_state->fs & 0xffff;
750 state->gs = saved_state->gs & 0xffff;
751 }
752 *count = i386_THREAD_STATE_COUNT;
753 break;
754 }
755
756 case i386_FLOAT_STATE: {
757
758 if (*count < i386_FLOAT_STATE_COUNT)
759 return KERN_INVALID_ARGUMENT;
760
761 *count = i386_FLOAT_STATE_COUNT;
762 return fpu_get_state(thread,
763 (struct i386_float_state *)tstate);
764 }
765
766 /*
767 * Temporary - replace by i386_io_map
768 */
769 case i386_ISA_PORT_MAP_STATE: {
770 register struct i386_isa_port_map_state *state;
771 register iopb_tss_t tss;
772
773 if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
774 return KERN_INVALID_ARGUMENT;
775
776 state = (struct i386_isa_port_map_state *) tstate;
777 tss = thread->pcb->ims.io_tss;
778
779 if (tss == 0) {
780 int i;
781
782 /*
783 * The thread has no ktss, so no IO permissions.
784 */
785
786 for (i = 0; i < sizeof state->pm; i++)
787 state->pm[i] = 0xff;
788 } else {
789 /*
790 * The thread has its own ktss.
791 */
792
793 bcopy(tss->bitmap,
794 state->pm,
795 sizeof state->pm);
796 }
797
798 *count = i386_ISA_PORT_MAP_STATE_COUNT;
799 break;
800 }
801
802 case i386_V86_ASSIST_STATE:
803 {
804 register struct i386_v86_assist_state *state;
805
806 if (*count < i386_V86_ASSIST_STATE_COUNT)
807 return KERN_INVALID_ARGUMENT;
808
809 state = (struct i386_v86_assist_state *) tstate;
810 state->int_table = thread->pcb->ims.v86s.int_table;
811 state->int_count = thread->pcb->ims.v86s.int_count;
812
813 *count = i386_V86_ASSIST_STATE_COUNT;
814 break;
815 }
816
817 default:
818 return KERN_INVALID_ARGUMENT;
819 }
820
821 return KERN_SUCCESS;
822 }
823
824 /*
825 * Alter the thread`s state so that a following thread_exception_return
826 * will make the thread return 'retval' from a syscall.
827 */
828 void
829 thread_set_syscall_return(
830 thread_t thread,
831 kern_return_t retval)
832 {
833 thread->pcb->iss.eax = retval;
834 }
835
836
837 /*
838 * Return prefered address of user stack.
839 * Always returns low address. If stack grows up,
840 * the stack grows away from this address;
841 * if stack grows down, the stack grows towards this
842 * address.
843 */
844 vm_offset_t
845 user_stack_low(
846 vm_size_t stack_size)
847 {
848 return VM_MAX_ADDRESS - stack_size;
849 }
850
851 /*
852 * Allocate argument area and set registers for first user thread.
853 */
854 vm_offset_t
855 set_user_regs(
856 vm_offset_t stack_base, /* low address */
857 vm_offset_t stack_size,
858 natural_t *entry,
859 vm_size_t arg_size)
860 {
861 vm_offset_t arg_addr;
862 register struct i386_saved_state *saved_state;
863
864 arg_size = (arg_size + sizeof(int) - 1) & ~(sizeof(int)-1);
865 arg_addr = stack_base + stack_size - arg_size;
866
867 saved_state = USER_REGS(current_thread());
868 saved_state->uesp = (int)arg_addr;
869 saved_state->eip = entry[0];
870
871 return arg_addr;
872 }
Cache object: 83cf9aaf01c6c3cc7c0ded800a1c1eab
|