1 /*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/5.4/sys/dev/acpica/Osd/OsdSchedule.c 145430 2005-04-22 23:31:31Z njl $
28 */
29
30 /*
31 * 6.3 : Scheduling services
32 */
33
34 #include "opt_acpi.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
42 #include <sys/proc.h>
43 #include <sys/taskqueue.h>
44 #include <machine/clock.h>
45
46 #include "acpi.h"
47 #include <dev/acpica/acpivar.h>
48
49 #define _COMPONENT ACPI_OS_SERVICES
50 ACPI_MODULE_NAME("SCHEDULE")
51
52 /*
53 * Allow the user to tune the number of task threads we start. It seems
54 * some systems have problems with increased parallelism.
55 */
56 static int acpi_max_threads = ACPI_MAX_THREADS;
57 TUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads);
58
59 /*
60 * This is a little complicated due to the fact that we need to build and then
61 * free a 'struct task' for each task we enqueue.
62 */
63
64 MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
65
66 static void AcpiOsExecuteQueue(void *arg, int pending);
67
68 struct acpi_task {
69 struct task at_task;
70 ACPI_OSD_EXEC_CALLBACK at_function;
71 void *at_context;
72 };
73
74 struct acpi_task_queue {
75 STAILQ_ENTRY(acpi_task_queue) at_q;
76 struct acpi_task *at;
77 };
78
79 /*
80 * Private task queue definition for ACPI
81 */
82 TASKQUEUE_DECLARE(acpi);
83 static void *taskqueue_acpi_ih;
84
85 static void
86 taskqueue_acpi_enqueue(void *context)
87 {
88 swi_sched(taskqueue_acpi_ih, 0);
89 }
90
91 static void
92 taskqueue_acpi_run(void *dummy)
93 {
94 taskqueue_run(taskqueue_acpi);
95 }
96
97 TASKQUEUE_DEFINE(acpi, taskqueue_acpi_enqueue, 0,
98 swi_add(NULL, "acpitaskq", taskqueue_acpi_run, NULL,
99 SWI_TQ, 0, &taskqueue_acpi_ih));
100
101 static STAILQ_HEAD(, acpi_task_queue) acpi_task_queue;
102 ACPI_LOCK_DECL(taskq, "ACPI task queue");
103
104 static void
105 acpi_task_thread(void *arg)
106 {
107 struct acpi_task_queue *atq;
108 ACPI_OSD_EXEC_CALLBACK Function;
109 void *Context;
110
111 ACPI_LOCK(taskq);
112 for (;;) {
113 while ((atq = STAILQ_FIRST(&acpi_task_queue)) == NULL)
114 msleep(&acpi_task_queue, &taskq_mutex, PCATCH, "actask", 0);
115 STAILQ_REMOVE_HEAD(&acpi_task_queue, at_q);
116 ACPI_UNLOCK(taskq);
117
118 Function = (ACPI_OSD_EXEC_CALLBACK)atq->at->at_function;
119 Context = atq->at->at_context;
120
121 Function(Context);
122
123 free(atq->at, M_ACPITASK);
124 free(atq, M_ACPITASK);
125 ACPI_LOCK(taskq);
126 }
127
128 kthread_exit(0);
129 }
130
131 int
132 acpi_task_thread_init(void)
133 {
134 int i, err;
135 struct proc *acpi_kthread_proc;
136
137 err = 0;
138 STAILQ_INIT(&acpi_task_queue);
139
140 for (i = 0; i < acpi_max_threads; i++) {
141 err = kthread_create(acpi_task_thread, NULL, &acpi_kthread_proc,
142 0, 0, "acpi_task%d", i);
143 if (err != 0) {
144 printf("%s: kthread_create failed(%d)\n", __func__, err);
145 break;
146 }
147 }
148 return (err);
149 }
150
151 /* This function is called in interrupt context. */
152 ACPI_STATUS
153 AcpiOsQueueForExecution(UINT32 Priority, ACPI_OSD_EXEC_CALLBACK Function,
154 void *Context)
155 {
156 struct acpi_task *at;
157 int pri;
158
159 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
160
161 if (Function == NULL)
162 return_ACPI_STATUS (AE_BAD_PARAMETER);
163
164 at = malloc(sizeof(*at), M_ACPITASK, M_NOWAIT | M_ZERO);
165 if (at == NULL)
166 return_ACPI_STATUS (AE_NO_MEMORY);
167
168 at->at_function = Function;
169 at->at_context = Context;
170 switch (Priority) {
171 case OSD_PRIORITY_GPE:
172 pri = 4;
173 break;
174 case OSD_PRIORITY_HIGH:
175 pri = 3;
176 break;
177 case OSD_PRIORITY_MED:
178 pri = 2;
179 break;
180 case OSD_PRIORITY_LO:
181 pri = 1;
182 break;
183 default:
184 free(at, M_ACPITASK);
185 return_ACPI_STATUS (AE_BAD_PARAMETER);
186 }
187 TASK_INIT(&at->at_task, pri, AcpiOsExecuteQueue, at);
188
189 taskqueue_enqueue(taskqueue_acpi, (struct task *)at);
190
191 return_ACPI_STATUS (AE_OK);
192 }
193
194 static void
195 AcpiOsExecuteQueue(void *arg, int pending)
196 {
197 struct acpi_task_queue *atq;
198 ACPI_OSD_EXEC_CALLBACK Function;
199 void *Context;
200
201 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
202
203 atq = NULL;
204 Function = NULL;
205 Context = NULL;
206
207 atq = malloc(sizeof(*atq), M_ACPITASK, M_NOWAIT);
208 if (atq == NULL) {
209 printf("%s: no memory\n", __func__);
210 return;
211 }
212 atq->at = (struct acpi_task *)arg;
213
214 ACPI_LOCK(taskq);
215 STAILQ_INSERT_TAIL(&acpi_task_queue, atq, at_q);
216 wakeup_one(&acpi_task_queue);
217 ACPI_UNLOCK(taskq);
218
219 return_VOID;
220 }
221
222 void
223 AcpiOsSleep(ACPI_INTEGER Milliseconds)
224 {
225 int timo;
226 static int dummy;
227
228 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
229
230 timo = Milliseconds * hz / 1000;
231
232 /*
233 * If requested sleep time is less than our hz resolution, use
234 * DELAY instead for better granularity.
235 */
236 if (timo > 0)
237 tsleep(&dummy, 0, "acpislp", timo);
238 else
239 DELAY(Milliseconds * 1000);
240
241 return_VOID;
242 }
243
244 /*
245 * Return the current time in 100 nanosecond units
246 */
247 UINT64
248 AcpiOsGetTimer(void)
249 {
250 struct bintime bt;
251 UINT64 t;
252
253 /* XXX During early boot there is no (decent) timer available yet. */
254 if (cold)
255 panic("acpi: timer op not yet supported during boot");
256
257 binuptime(&bt);
258 t = ((UINT64)10000000 * (uint32_t)(bt.frac >> 32)) >> 32;
259 t += bt.sec * 10000000;
260
261 return (t);
262 }
263
264 void
265 AcpiOsStall(UINT32 Microseconds)
266 {
267 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
268
269 DELAY(Microseconds);
270 return_VOID;
271 }
272
273 UINT32
274 AcpiOsGetThreadId(void)
275 {
276 struct proc *p;
277
278 /* XXX do not add ACPI_FUNCTION_TRACE here, results in recursive call. */
279
280 p = curproc;
281 KASSERT(p != NULL, ("%s: curproc is NULL!", __func__));
282
283 /* Returning 0 is not allowed. */
284 return (p->p_pid + 1);
285 }
Cache object: b792e8b6e55b5f97a5d21c2152e10be9
|