FreeBSD/Linux Kernel Cross Reference
sys/sys/savar.h
1 /* $NetBSD: savar.h,v 1.28 2008/10/17 08:16:57 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Internal data usd by the scheduler activation implementation
41 */
42
43 #ifndef _SYS_SAVAR_H_
44 #define _SYS_SAVAR_H_
45
46 #include <sys/lock.h>
47 #include <sys/tree.h>
48 #include <sys/queue.h>
49 #include <sys/sleepq.h>
50 #include <sys/sa.h> /* needed for struct sa_t */
51
52 struct lwp;
53
54 union sau_state {
55 struct {
56 ucontext_t ss_ctx;
57 struct sa_t ss_sa;
58 } ss_captured;
59 struct {
60 struct lwp *ss_lwp;
61 } ss_deferred;
62 };
63
64 struct sa_emul {
65 size_t sae_ucsize; /* Size of ucontext_t */
66 size_t sae_sasize; /* Size of sa_t */
67 size_t sae_sapsize; /* Size of (sa_t *) */
68 int (*sae_sacopyout)(int, const void *, void *);
69 int (*sae_upcallconv)(struct lwp *, int, size_t *, void **,
70 void (**)(void *));
71 void (*sae_upcall)(struct lwp *, int, int, int, void *,
72 void *, void *, sa_upcall_t);
73 void (*sae_getucontext)(struct lwp *, void *);
74 void *(*sae_ucsp)(void *); /* Stack ptr from an ucontext_t */
75 };
76
77 struct sadata_upcall {
78 SIMPLEQ_ENTRY(sadata_upcall) sau_next;
79 int sau_flags;
80 int sau_type;
81 size_t sau_argsize;
82 void *sau_arg;
83 void (*sau_argfreefunc)(void *);
84 stack_t sau_stack;
85 union sau_state sau_event;
86 union sau_state sau_interrupted;
87 };
88
89 #define SAU_FLAG_DEFERRED_EVENT 0x1
90 #define SAU_FLAG_DEFERRED_INTERRUPTED 0x2
91
92 #define SA_UPCALL_TYPE_MASK 0x00FF
93
94 #define SA_UPCALL_DEFER_EVENT 0x1000
95 #define SA_UPCALL_DEFER_INTERRUPTED 0x2000
96 #define SA_UPCALL_DEFER (SA_UPCALL_DEFER_EVENT | \
97 SA_UPCALL_DEFER_INTERRUPTED)
98 #define SA_UPCALL_LOCKED_EVENT 0x4000
99 #define SA_UPCALL_LOCKED_INTERRUPTED 0x8000
100
101 struct sastack {
102 stack_t sast_stack;
103 RB_ENTRY(sastack) sast_node;
104 unsigned int sast_gen;
105 };
106
107 /*
108 * Locking:
109 *
110 * m: sadata::sa_mutex
111 * p: proc::p_lock
112 * v: sadata_vp::savp_mutex
113 * (: unlocked, stable
114 * !: unlocked, may only be reliably accessed by the blessed LWP itself
115 */
116 struct sadata_vp {
117 kmutex_t savp_mutex; /* (: mutex */
118 int savp_id; /* (: "virtual processor" identifier */
119 SLIST_ENTRY(sadata_vp) savp_next; /* m: link to next sadata_vp */
120 struct lwp *savp_lwp; /* !: lwp on "virtual processor" */
121 struct lwp *savp_blocker; /* !: recently blocked lwp */
122 sleepq_t savp_woken; /* m: list of unblocked lwps */
123 sleepq_t savp_lwpcache; /* m: list of cached lwps */
124 vaddr_t savp_faultaddr; /* !: page fault address */
125 vaddr_t savp_ofaultaddr; /* !: old page fault address */
126 struct sadata_upcall *savp_sleeper_upcall;
127 /* !: cached upcall data */
128 SIMPLEQ_HEAD(, sadata_upcall) savp_upcalls; /* ?: pending upcalls */
129 int savp_woken_count; /* m: count of woken lwps */
130 int savp_lwpcache_count; /* m: count of cached lwps */
131 int savp_pflags; /* !: blessed-private flags */
132 };
133
134 #define SAVP_FLAG_NOUPCALLS 0x0001 /* Already did upcalls, don't redo */
135 #define SAVP_FLAG_DELIVERING 0x0002 /* Delivering an upcall, no block */
136
137 /*
138 * Locking:
139 *
140 * m: sadata::sa_mutex
141 * p: proc::p_lock
142 * (: unlocked, stable
143 *
144 * Locking sadata::sa_vps is special. The list of vps is examined
145 * in two locations, signal handling and timer processing, in which
146 * proc::p_lock either is the best lock to use (signal handling) or an
147 * unacceptable lock to use (timer processing, as we hold spinlocks when
148 * locking the list, and p_lock can sleep; spinlocks while sleeping == BAD).
149 * Actually changing the list of vps is exceptionally rare; it only happens
150 * with concurrency > 1 and at app startup. So use both locks to write &
151 * have the code to add vps grab both of them to actually change the list.
152 */
153 struct sadata {
154 kmutex_t sa_mutex; /* (: lock on these fields */
155 int sa_flag; /* m: SA_* flags */
156 sa_upcall_t sa_upcall; /* m: upcall entry point */
157 int sa_concurrency; /* m: current concurrency */
158 int sa_maxconcurrency; /* m: requested concurrency */
159 int sa_stackchg; /* m: stacks change indicator */
160 RB_HEAD(sasttree, sastack) sa_stackstree; /* s, m: tree of upcall stacks */
161 struct sastack *sa_stacknext; /* m: next free stack */
162 ssize_t sa_stackinfo_offset; /* m: offset from ss_sp to stackinfo data */
163 int sa_nstacks; /* m: number of upcall stacks */
164 sigset_t sa_sigmask; /* p: process-wide masked sigs*/
165 SLIST_HEAD(, sadata_vp) sa_vps; /* m,p: virtual processors */
166 kcondvar_t sa_cv; /* m: condvar for sa_yield */
167 };
168
169 #define SA_FLAG_ALL SA_FLAG_PREEMPT
170
171 #define SA_MAXNUMSTACKS 16 /* Maximum number of upcall stacks per VP. */
172
173 struct sadata_upcall *sadata_upcall_alloc(int);
174 void sadata_upcall_free(struct sadata_upcall *);
175 void sadata_upcall_drain(void);
176
177 void sa_awaken(struct lwp *);
178 void sa_release(struct proc *);
179 void sa_switch(struct lwp *);
180 void sa_preempt(struct lwp *);
181 void sa_yield(struct lwp *);
182 int sa_upcall(struct lwp *, int, struct lwp *, struct lwp *, size_t, void *,
183 void (*)(void *));
184
185 void sa_putcachelwp(struct proc *, struct lwp *);
186 struct lwp *sa_getcachelwp(struct proc *, struct sadata_vp *);
187
188 /*
189 * API permitting other parts of the kernel to indicate that they
190 * are entering code paths in which blocking events should NOT generate
191 * upcalls to an SA process. These routines should ONLY be used by code
192 * involved in scheduling or process/thread initialization (such as
193 * stack copying). These calls must be balanced. They may be nested, but
194 * MUST be released in a LIFO order. These calls assume that the lwp is
195 * locked.
196 */
197 typedef int sa_critpath_t;
198 void sa_critpath_enter(struct lwp *, sa_critpath_t *);
199 void sa_critpath_exit(struct lwp *, sa_critpath_t *);
200
201
202 void sa_unblock_userret(struct lwp *);
203 void sa_upcall_userret(struct lwp *);
204 void cpu_upcall(struct lwp *, int, int, int, void *, void *, void *, sa_upcall_t);
205
206 typedef int (*sa_copyin_stack_t)(stack_t *, int, stack_t *);
207 int sa_stacks1(struct lwp *, register_t *, int, stack_t *,
208 sa_copyin_stack_t);
209 int dosa_register(struct lwp *, sa_upcall_t, sa_upcall_t *, int, ssize_t);
210
211 void *sa_ucsp(void *);
212
213 #define SAOUT_UCONTEXT 0
214 #define SAOUT_SA_T 1
215 #define SAOUT_SAP_T 2
216
217 #endif /* !_SYS_SAVAR_H_ */
Cache object: d2e88244444e548e3259e6366c1937e1
|