1 /* $NetBSD: subr_percpu.c,v 1.8.10.1 2009/02/02 03:34:29 snj Exp $ */
2
3 /*-
4 * Copyright (c)2007,2008 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * per-cpu storage.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.8.10.1 2009/02/02 03:34:29 snj Exp $");
35
36 #include <sys/param.h>
37 #include <sys/cpu.h>
38 #include <sys/kmem.h>
39 #include <sys/kernel.h>
40 #include <sys/mutex.h>
41 #include <sys/percpu.h>
42 #include <sys/rwlock.h>
43 #include <sys/vmem.h>
44 #include <sys/xcall.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #define PERCPU_QUANTUM_SIZE (ALIGNBYTES + 1)
49 #define PERCPU_QCACHE_MAX 0
50 #define PERCPU_IMPORT_SIZE 2048
51
52 #if defined(DIAGNOSTIC)
53 #define MAGIC 0x50435055 /* "PCPU" */
54 #define percpu_encrypt(pc) ((pc) ^ MAGIC)
55 #define percpu_decrypt(pc) ((pc) ^ MAGIC)
56 #else /* defined(DIAGNOSTIC) */
57 #define percpu_encrypt(pc) (pc)
58 #define percpu_decrypt(pc) (pc)
59 #endif /* defined(DIAGNOSTIC) */
60
61 static krwlock_t percpu_swap_lock;
62 static kmutex_t percpu_allocation_lock;
63 static vmem_t *percpu_offset_arena;
64 static unsigned int percpu_nextoff = PERCPU_QUANTUM_SIZE;
65
66 static percpu_cpu_t *
67 cpu_percpu(struct cpu_info *ci)
68 {
69
70 return &ci->ci_data.cpu_percpu;
71 }
72
73 static unsigned int
74 percpu_offset(percpu_t *pc)
75 {
76 const unsigned int off = percpu_decrypt((uintptr_t)pc);
77
78 KASSERT(off < percpu_nextoff);
79 return off;
80 }
81
82 /*
83 * percpu_cpu_swap: crosscall handler for percpu_cpu_enlarge
84 */
85
86 static void
87 percpu_cpu_swap(void *p1, void *p2)
88 {
89 struct cpu_info * const ci = p1;
90 percpu_cpu_t * const newpcc = p2;
91 percpu_cpu_t * const pcc = cpu_percpu(ci);
92
93 /*
94 * swap *pcc and *newpcc unless anyone has beaten us.
95 */
96
97 rw_enter(&percpu_swap_lock, RW_WRITER);
98 if (newpcc->pcc_size > pcc->pcc_size) {
99 percpu_cpu_t tmp;
100 int s;
101
102 tmp = *pcc;
103
104 /*
105 * block interrupts so that we don't lose their modifications.
106 */
107
108 s = splhigh();
109
110 /*
111 * copy data to new storage.
112 */
113
114 memcpy(newpcc->pcc_data, pcc->pcc_data, pcc->pcc_size);
115
116 /*
117 * this assignment needs to be atomic for percpu_getptr_remote.
118 */
119
120 pcc->pcc_data = newpcc->pcc_data;
121
122 splx(s);
123
124 pcc->pcc_size = newpcc->pcc_size;
125 *newpcc = tmp;
126 }
127 rw_exit(&percpu_swap_lock);
128 }
129
130 /*
131 * percpu_cpu_enlarge: ensure that percpu_cpu_t of each cpus have enough space
132 */
133
134 static void
135 percpu_cpu_enlarge(size_t size)
136 {
137 CPU_INFO_ITERATOR cii;
138 struct cpu_info *ci;
139
140 for (CPU_INFO_FOREACH(cii, ci)) {
141 percpu_cpu_t pcc;
142
143 pcc.pcc_data = kmem_alloc(size, KM_SLEEP); /* XXX cacheline */
144 pcc.pcc_size = size;
145 if (!mp_online) {
146 percpu_cpu_swap(ci, &pcc);
147 } else {
148 uint64_t where;
149
150 uvm_lwp_hold(curlwp); /* don't swap out pcc */
151 where = xc_unicast(0, percpu_cpu_swap, ci, &pcc, ci);
152 xc_wait(where);
153 uvm_lwp_rele(curlwp);
154 }
155 KASSERT(pcc.pcc_size < size);
156 if (pcc.pcc_data != NULL) {
157 kmem_free(pcc.pcc_data, pcc.pcc_size);
158 }
159 }
160 }
161
162 /*
163 * percpu_backend_alloc: vmem import callback for percpu_offset_arena
164 */
165
166 static vmem_addr_t
167 percpu_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
168 vm_flag_t vmflags)
169 {
170 unsigned int offset;
171 unsigned int nextoff;
172
173 ASSERT_SLEEPABLE();
174 KASSERT(dummy == NULL);
175
176 if ((vmflags & VM_NOSLEEP) != 0)
177 return VMEM_ADDR_NULL;
178
179 size = roundup(size, PERCPU_IMPORT_SIZE);
180 mutex_enter(&percpu_allocation_lock);
181 offset = percpu_nextoff;
182 percpu_nextoff = nextoff = percpu_nextoff + size;
183 mutex_exit(&percpu_allocation_lock);
184
185 percpu_cpu_enlarge(nextoff);
186
187 *resultsize = size;
188 return (vmem_addr_t)offset;
189 }
190
191 static void
192 percpu_zero_cb(void *vp, void *vp2, struct cpu_info *ci)
193 {
194 size_t sz = (uintptr_t)vp2;
195
196 memset(vp, 0, sz);
197 }
198
199 /*
200 * percpu_zero: initialize percpu storage with zero.
201 */
202
203 static void
204 percpu_zero(percpu_t *pc, size_t sz)
205 {
206
207 percpu_foreach(pc, percpu_zero_cb, (void *)(uintptr_t)sz);
208 }
209
210 /*
211 * percpu_init: subsystem initialization
212 */
213
214 void
215 percpu_init(void)
216 {
217
218 ASSERT_SLEEPABLE();
219 rw_init(&percpu_swap_lock);
220 mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE);
221
222 percpu_offset_arena = vmem_create("percpu", 0, 0, PERCPU_QUANTUM_SIZE,
223 percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP,
224 IPL_NONE);
225 }
226
227 /*
228 * percpu_init_cpu: cpu initialization
229 *
230 * => should be called before the cpu appears on the list for CPU_INFO_FOREACH.
231 */
232
233 void
234 percpu_init_cpu(struct cpu_info *ci)
235 {
236 percpu_cpu_t * const pcc = cpu_percpu(ci);
237 size_t size = percpu_nextoff; /* XXX racy */
238
239 ASSERT_SLEEPABLE();
240 pcc->pcc_size = size;
241 if (size) {
242 pcc->pcc_data = kmem_zalloc(pcc->pcc_size, KM_SLEEP);
243 }
244 }
245
246 /*
247 * percpu_alloc: allocate percpu storage
248 *
249 * => called in thread context.
250 * => considered as an expensive and rare operation.
251 * => allocated storage is initialized with zeros.
252 */
253
254 percpu_t *
255 percpu_alloc(size_t size)
256 {
257 unsigned int offset;
258 percpu_t *pc;
259
260 ASSERT_SLEEPABLE();
261 offset = vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT);
262 pc = (percpu_t *)percpu_encrypt((uintptr_t)offset);
263 percpu_zero(pc, size);
264 return pc;
265 }
266
267 /*
268 * percpu_free: free percpu storage
269 *
270 * => called in thread context.
271 * => considered as an expensive and rare operation.
272 */
273
274 void
275 percpu_free(percpu_t *pc, size_t size)
276 {
277
278 ASSERT_SLEEPABLE();
279 vmem_free(percpu_offset_arena, (vmem_addr_t)percpu_offset(pc), size);
280 }
281
282 /*
283 * percpu_getref:
284 *
285 * => safe to be used in either thread or interrupt context
286 * => disables preemption; must be bracketed with a percpu_putref()
287 */
288
289 void *
290 percpu_getref(percpu_t *pc)
291 {
292
293 KPREEMPT_DISABLE(curlwp);
294 return percpu_getptr_remote(pc, curcpu());
295 }
296
297 /*
298 * percpu_putref:
299 *
300 * => drops the preemption-disabled count after caller is done with per-cpu
301 * data
302 */
303
304 void
305 percpu_putref(percpu_t *pc)
306 {
307
308 KPREEMPT_ENABLE(curlwp);
309 }
310
311 /*
312 * percpu_traverse_enter, percpu_traverse_exit, percpu_getptr_remote:
313 * helpers to access remote cpu's percpu data.
314 *
315 * => called in thread context.
316 * => percpu_traverse_enter can block low-priority xcalls.
317 * => typical usage would be:
318 *
319 * sum = 0;
320 * percpu_traverse_enter();
321 * for (CPU_INFO_FOREACH(cii, ci)) {
322 * unsigned int *p = percpu_getptr_remote(pc, ci);
323 * sum += *p;
324 * }
325 * percpu_traverse_exit();
326 */
327
328 void
329 percpu_traverse_enter(void)
330 {
331
332 ASSERT_SLEEPABLE();
333 rw_enter(&percpu_swap_lock, RW_READER);
334 }
335
336 void
337 percpu_traverse_exit(void)
338 {
339
340 rw_exit(&percpu_swap_lock);
341 }
342
343 void *
344 percpu_getptr_remote(percpu_t *pc, struct cpu_info *ci)
345 {
346
347 return &((char *)cpu_percpu(ci)->pcc_data)[percpu_offset(pc)];
348 }
349
350 /*
351 * percpu_foreach: call the specified callback function for each cpus.
352 *
353 * => called in thread context.
354 * => caller should not rely on the cpu iteration order.
355 * => the callback function should be minimum because it is executed with
356 * holding a global lock, which can block low-priority xcalls.
357 * eg. it's illegal for a callback function to sleep for memory allocation.
358 */
359 void
360 percpu_foreach(percpu_t *pc, percpu_callback_t cb, void *arg)
361 {
362 CPU_INFO_ITERATOR cii;
363 struct cpu_info *ci;
364
365 percpu_traverse_enter();
366 for (CPU_INFO_FOREACH(cii, ci)) {
367 (*cb)(percpu_getptr_remote(pc, ci), arg, ci);
368 }
369 percpu_traverse_exit();
370 }
Cache object: 28c9d001dc2e1c457418d4716a99c6f7
|