1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/12.0/sys/sys/epoch_private.h 338127 2018-08-21 03:33:54Z mmacy $
28 */
29
30 #ifndef _SYS_EPOCH_PRIVATE_H_
31 #define _SYS_EPOCH_PRIVATE_H_
32 #ifndef _KERNEL
33 #error "no user serviceable parts"
34 #else
35 #include <ck_epoch.h>
36 #include <sys/kpilite.h>
37
38 #include <sys/mutex.h>
39
40 extern void epoch_adjust_prio(struct thread *td, u_char prio);
41 #ifndef _SYS_SYSTM_H_
42 extern void critical_exit_preempt(void);
43 #endif
44
45 #ifdef __amd64__
46 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
47 #else
48 #define EPOCH_ALIGN CACHE_LINE_SIZE
49 #endif
50
51 /*
52 * Standalone (_sa) routines for thread state manipulation
53 */
54 static __inline void
55 critical_enter_sa(void *tdarg)
56 {
57 struct thread_lite *td;
58
59 td = tdarg;
60 td->td_critnest++;
61 __compiler_membar();
62 }
63
64 static __inline void
65 critical_exit_sa(void *tdarg)
66 {
67 struct thread_lite *td;
68
69 td = tdarg;
70 MPASS(td->td_critnest > 0);
71 __compiler_membar();
72 td->td_critnest--;
73 __compiler_membar();
74 if (__predict_false(td->td_owepreempt != 0))
75 critical_exit_preempt();
76 }
77
78 typedef struct epoch_thread {
79 #ifdef EPOCH_TRACKER_DEBUG
80 uint64_t et_magic_pre;
81 #endif
82 TAILQ_ENTRY(epoch_thread) et_link; /* Epoch queue. */
83 struct thread *et_td; /* pointer to thread in section */
84 ck_epoch_section_t et_section; /* epoch section object */
85 #ifdef EPOCH_TRACKER_DEBUG
86 uint64_t et_magic_post;
87 #endif
88 } *epoch_thread_t;
89 TAILQ_HEAD (epoch_tdlist, epoch_thread);
90
91 typedef struct epoch_record {
92 ck_epoch_record_t er_record;
93 volatile struct epoch_tdlist er_tdlist;
94 volatile uint32_t er_gen;
95 uint32_t er_cpuid;
96 } __aligned(EPOCH_ALIGN) *epoch_record_t;
97
98 struct epoch {
99 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
100 epoch_record_t e_pcpu_record;
101 int e_idx;
102 int e_flags;
103 };
104
105 static epoch_record_t
106 epoch_currecord(epoch_t epoch)
107 {
108 return zpcpu_get_cpu(epoch->e_pcpu_record, curcpu);
109 }
110
111 #define INIT_CHECK(epoch) \
112 do { \
113 if (__predict_false((epoch) == NULL)) \
114 return; \
115 } while (0)
116
117 static __inline void
118 epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
119 {
120 struct epoch_record *er;
121 struct epoch_thread *etd;
122 struct thread_lite *td;
123
124 MPASS(cold || epoch != NULL);
125 INIT_CHECK(epoch);
126 etd = (void *)et;
127 MPASS(epoch->e_flags & EPOCH_PREEMPT);
128 #ifdef EPOCH_TRACKER_DEBUG
129 etd->et_magic_pre = EPOCH_MAGIC0;
130 etd->et_magic_post = EPOCH_MAGIC1;
131 #endif
132 td = (struct thread_lite *)curthread;
133 etd->et_td = (void*)td;
134 td->td_epochnest++;
135 critical_enter_sa(td);
136 sched_pin_lite(td);
137
138 td->td_pre_epoch_prio = td->td_priority;
139 er = epoch_currecord(epoch);
140 TAILQ_INSERT_TAIL(&er->er_tdlist, etd, et_link);
141 ck_epoch_begin(&er->er_record, (ck_epoch_section_t *)&etd->et_section);
142 critical_exit_sa(td);
143 }
144
145 static __inline void
146 epoch_enter(epoch_t epoch)
147 {
148 struct thread_lite *td;
149 epoch_record_t er;
150
151 MPASS(cold || epoch != NULL);
152 INIT_CHECK(epoch);
153 td = (struct thread_lite *)curthread;
154
155 td->td_epochnest++;
156 critical_enter_sa(td);
157 er = epoch_currecord(epoch);
158 ck_epoch_begin(&er->er_record, NULL);
159 }
160
161 static __inline void
162 epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
163 {
164 struct epoch_record *er;
165 struct epoch_thread *etd;
166 struct thread_lite *td;
167
168 INIT_CHECK(epoch);
169 td = (struct thread_lite *)curthread;
170 critical_enter_sa(td);
171 sched_unpin_lite(td);
172 MPASS(td->td_epochnest);
173 td->td_epochnest--;
174 er = epoch_currecord(epoch);
175 MPASS(epoch->e_flags & EPOCH_PREEMPT);
176 etd = (void *)et;
177 MPASS(etd != NULL);
178 MPASS(etd->et_td == (struct thread *)td);
179 #ifdef EPOCH_TRACKER_DEBUG
180 MPASS(etd->et_magic_pre == EPOCH_MAGIC0);
181 MPASS(etd->et_magic_post == EPOCH_MAGIC1);
182 etd->et_magic_pre = 0;
183 etd->et_magic_post = 0;
184 #endif
185 etd->et_td = (void*)0xDEADBEEF;
186 ck_epoch_end(&er->er_record,
187 (ck_epoch_section_t *)&etd->et_section);
188 TAILQ_REMOVE(&er->er_tdlist, etd, et_link);
189 er->er_gen++;
190 if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
191 epoch_adjust_prio((struct thread *)td, td->td_pre_epoch_prio);
192 critical_exit_sa(td);
193 }
194
195 static __inline void
196 epoch_exit(epoch_t epoch)
197 {
198 struct thread_lite *td;
199 epoch_record_t er;
200
201 INIT_CHECK(epoch);
202 td = (struct thread_lite *)curthread;
203 MPASS(td->td_epochnest);
204 td->td_epochnest--;
205 er = epoch_currecord(epoch);
206 ck_epoch_end(&er->er_record, NULL);
207 critical_exit_sa(td);
208 }
209 #endif /* _KERNEL */
210 #endif /* _SYS_EPOCH_PRIVATE_H_ */
Cache object: b94695a50e256f5a481a62c48a46ef11
|