1 /*-
2 * Copyright (c) 2017 Hans Petter Selasky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <linux/compat.h>
31 #include <linux/kthread.h>
32 #include <linux/sched.h>
33 #include <linux/wait.h>
34
35 #include <sys/bus.h>
36 #include <sys/interrupt.h>
37 #include <sys/priority.h>
38
39 enum {
40 KTHREAD_SHOULD_STOP_MASK = (1 << 0),
41 KTHREAD_SHOULD_PARK_MASK = (1 << 1),
42 KTHREAD_IS_PARKED_MASK = (1 << 2),
43 };
44
45 bool
46 linux_kthread_should_stop_task(struct task_struct *task)
47 {
48
49 return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
50 }
51
52 bool
53 linux_kthread_should_stop(void)
54 {
55
56 return (atomic_read(¤t->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
57 }
58
59 int
60 linux_kthread_stop(struct task_struct *task)
61 {
62 int retval;
63
64 /*
65 * Assume task is still alive else caller should not call
66 * kthread_stop():
67 */
68 atomic_or(KTHREAD_SHOULD_STOP_MASK, &task->kthread_flags);
69 kthread_unpark(task);
70 wake_up_process(task);
71 wait_for_completion(&task->exited);
72
73 /*
74 * Get return code and free task structure:
75 */
76 retval = task->task_ret;
77 put_task_struct(task);
78
79 return (retval);
80 }
81
82 int
83 linux_kthread_park(struct task_struct *task)
84 {
85
86 atomic_or(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
87 wake_up_process(task);
88 wait_for_completion(&task->parked);
89 return (0);
90 }
91
92 void
93 linux_kthread_parkme(void)
94 {
95 struct task_struct *task;
96
97 task = current;
98 set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
99 while (linux_kthread_should_park()) {
100 while ((atomic_fetch_or(KTHREAD_IS_PARKED_MASK,
101 &task->kthread_flags) & KTHREAD_IS_PARKED_MASK) == 0)
102 complete(&task->parked);
103 schedule();
104 set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
105 }
106 atomic_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags);
107 set_task_state(task, TASK_RUNNING);
108 }
109
110 bool
111 linux_kthread_should_park(void)
112 {
113 struct task_struct *task;
114
115 task = current;
116 return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_PARK_MASK);
117 }
118
119 void
120 linux_kthread_unpark(struct task_struct *task)
121 {
122
123 atomic_andnot(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
124 if ((atomic_fetch_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags) &
125 KTHREAD_IS_PARKED_MASK) != 0)
126 wake_up_state(task, TASK_PARKED);
127 }
128
129 struct task_struct *
130 linux_kthread_setup_and_run(struct thread *td, linux_task_fn_t *task_fn, void *arg)
131 {
132 struct task_struct *task;
133
134 linux_set_current(td);
135
136 task = td->td_lkpi_task;
137 task->task_fn = task_fn;
138 task->task_data = arg;
139
140 thread_lock(td);
141 /* make sure the scheduler priority is raised */
142 sched_prio(td, PI_SWI(SWI_NET));
143 /* put thread into run-queue */
144 sched_add(td, SRQ_BORING);
145
146 return (task);
147 }
148
149 void
150 linux_kthread_fn(void *arg __unused)
151 {
152 struct task_struct *task = current;
153
154 if (linux_kthread_should_stop_task(task) == 0)
155 task->task_ret = task->task_fn(task->task_data);
156
157 if (linux_kthread_should_stop_task(task) != 0) {
158 struct thread *td = curthread;
159
160 /* let kthread_stop() free data */
161 td->td_lkpi_task = NULL;
162
163 /* wakeup kthread_stop() */
164 complete(&task->exited);
165 }
166 kthread_exit();
167 }
168
169 void
170 lkpi_kthread_work_fn(void *context, int pending __unused)
171 {
172 struct kthread_work *work = context;
173
174 work->func(work);
175 }
176
177 void
178 lkpi_kthread_worker_init_fn(void *context, int pending __unused)
179 {
180 struct kthread_worker *worker = context;
181
182 worker->task = current;
183 }
Cache object: c6d466323dbb424744a15375e3707899
|