1 /*-
2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/9.0/sys/kern/kern_rmlock.c 223758 2011-07-04 12:04:52Z attilio $");
36
37 #include "opt_ddb.h"
38 #include "opt_kdtrace.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/rmlock.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/turnstile.h>
52 #include <sys/lock_profile.h>
53 #include <machine/cpu.h>
54
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58
59 #define RMPF_ONQUEUE 1
60 #define RMPF_SIGNAL 2
61
62 /*
63 * To support usage of rmlock in CVs and msleep yet another list for the
64 * priority tracker would be needed. Using this lock for cv and msleep also
65 * does not seem very useful
66 */
67
68 static __inline void compiler_memory_barrier(void) {
69 __asm __volatile("":::"memory");
70 }
71
72 static void assert_rm(struct lock_object *lock, int what);
73 static void lock_rm(struct lock_object *lock, int how);
74 #ifdef KDTRACE_HOOKS
75 static int owner_rm(struct lock_object *lock, struct thread **owner);
76 #endif
77 static int unlock_rm(struct lock_object *lock);
78
79 struct lock_class lock_class_rm = {
80 .lc_name = "rm",
81 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
82 .lc_assert = assert_rm,
83 #if 0
84 #ifdef DDB
85 .lc_ddb_show = db_show_rwlock,
86 #endif
87 #endif
88 .lc_lock = lock_rm,
89 .lc_unlock = unlock_rm,
90 #ifdef KDTRACE_HOOKS
91 .lc_owner = owner_rm,
92 #endif
93 };
94
95 static void
96 assert_rm(struct lock_object *lock, int what)
97 {
98
99 panic("assert_rm called");
100 }
101
102 static void
103 lock_rm(struct lock_object *lock, int how)
104 {
105
106 panic("lock_rm called");
107 }
108
109 static int
110 unlock_rm(struct lock_object *lock)
111 {
112
113 panic("unlock_rm called");
114 }
115
116 #ifdef KDTRACE_HOOKS
117 static int
118 owner_rm(struct lock_object *lock, struct thread **owner)
119 {
120
121 panic("owner_rm called");
122 }
123 #endif
124
125 static struct mtx rm_spinlock;
126
127 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
128
129 /*
130 * Add or remove tracker from per-cpu list.
131 *
132 * The per-cpu list can be traversed at any time in forward direction from an
133 * interrupt on the *local* cpu.
134 */
135 static void inline
136 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
137 {
138 struct rm_queue *next;
139
140 /* Initialize all tracker pointers */
141 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
142 next = pc->pc_rm_queue.rmq_next;
143 tracker->rmp_cpuQueue.rmq_next = next;
144
145 /* rmq_prev is not used during froward traversal. */
146 next->rmq_prev = &tracker->rmp_cpuQueue;
147
148 /* Update pointer to first element. */
149 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
150 }
151
152 static void inline
153 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
154 {
155 struct rm_queue *next, *prev;
156
157 next = tracker->rmp_cpuQueue.rmq_next;
158 prev = tracker->rmp_cpuQueue.rmq_prev;
159
160 /* Not used during forward traversal. */
161 next->rmq_prev = prev;
162
163 /* Remove from list. */
164 prev->rmq_next = next;
165 }
166
167 static void
168 rm_cleanIPI(void *arg)
169 {
170 struct pcpu *pc;
171 struct rmlock *rm = arg;
172 struct rm_priotracker *tracker;
173 struct rm_queue *queue;
174 pc = pcpu_find(curcpu);
175
176 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
177 queue = queue->rmq_next) {
178 tracker = (struct rm_priotracker *)queue;
179 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
180 tracker->rmp_flags = RMPF_ONQUEUE;
181 mtx_lock_spin(&rm_spinlock);
182 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
183 rmp_qentry);
184 mtx_unlock_spin(&rm_spinlock);
185 }
186 }
187 }
188
189 CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
190
191 void
192 rm_init_flags(struct rmlock *rm, const char *name, int opts)
193 {
194 int liflags;
195
196 liflags = 0;
197 if (!(opts & RM_NOWITNESS))
198 liflags |= LO_WITNESS;
199 if (opts & RM_RECURSE)
200 liflags |= LO_RECURSABLE;
201 rm->rm_writecpus = all_cpus;
202 LIST_INIT(&rm->rm_activeReaders);
203 if (opts & RM_SLEEPABLE) {
204 liflags |= RM_SLEEPABLE;
205 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
206 } else
207 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
208 lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
209 }
210
211 void
212 rm_init(struct rmlock *rm, const char *name)
213 {
214
215 rm_init_flags(rm, name, 0);
216 }
217
218 void
219 rm_destroy(struct rmlock *rm)
220 {
221
222 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
223 sx_destroy(&rm->rm_lock_sx);
224 else
225 mtx_destroy(&rm->rm_lock_mtx);
226 lock_destroy(&rm->lock_object);
227 }
228
229 int
230 rm_wowned(struct rmlock *rm)
231 {
232
233 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
234 return (sx_xlocked(&rm->rm_lock_sx));
235 else
236 return (mtx_owned(&rm->rm_lock_mtx));
237 }
238
239 void
240 rm_sysinit(void *arg)
241 {
242 struct rm_args *args = arg;
243
244 rm_init(args->ra_rm, args->ra_desc);
245 }
246
247 void
248 rm_sysinit_flags(void *arg)
249 {
250 struct rm_args_flags *args = arg;
251
252 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
253 }
254
255 static int
256 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
257 {
258 struct pcpu *pc;
259 struct rm_queue *queue;
260 struct rm_priotracker *atracker;
261
262 critical_enter();
263 pc = pcpu_find(curcpu);
264
265 /* Check if we just need to do a proper critical_exit. */
266 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
267 critical_exit();
268 return (1);
269 }
270
271 /* Remove our tracker from the per-cpu list. */
272 rm_tracker_remove(pc, tracker);
273
274 /* Check to see if the IPI granted us the lock after all. */
275 if (tracker->rmp_flags) {
276 /* Just add back tracker - we hold the lock. */
277 rm_tracker_add(pc, tracker);
278 critical_exit();
279 return (1);
280 }
281
282 /*
283 * We allow readers to aquire a lock even if a writer is blocked if
284 * the lock is recursive and the reader already holds the lock.
285 */
286 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
287 /*
288 * Just grant the lock if this thread already has a tracker
289 * for this lock on the per-cpu queue.
290 */
291 for (queue = pc->pc_rm_queue.rmq_next;
292 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
293 atracker = (struct rm_priotracker *)queue;
294 if ((atracker->rmp_rmlock == rm) &&
295 (atracker->rmp_thread == tracker->rmp_thread)) {
296 mtx_lock_spin(&rm_spinlock);
297 LIST_INSERT_HEAD(&rm->rm_activeReaders,
298 tracker, rmp_qentry);
299 tracker->rmp_flags = RMPF_ONQUEUE;
300 mtx_unlock_spin(&rm_spinlock);
301 rm_tracker_add(pc, tracker);
302 critical_exit();
303 return (1);
304 }
305 }
306 }
307
308 sched_unpin();
309 critical_exit();
310
311 if (trylock) {
312 if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
313 if (!sx_try_xlock(&rm->rm_lock_sx))
314 return (0);
315 } else {
316 if (!mtx_trylock(&rm->rm_lock_mtx))
317 return (0);
318 }
319 } else {
320 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
321 sx_xlock(&rm->rm_lock_sx);
322 else
323 mtx_lock(&rm->rm_lock_mtx);
324 }
325
326 critical_enter();
327 pc = pcpu_find(curcpu);
328 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
329 rm_tracker_add(pc, tracker);
330 sched_pin();
331 critical_exit();
332
333 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
334 sx_xunlock(&rm->rm_lock_sx);
335 else
336 mtx_unlock(&rm->rm_lock_mtx);
337
338 return (1);
339 }
340
341 int
342 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
343 {
344 struct thread *td = curthread;
345 struct pcpu *pc;
346
347 tracker->rmp_flags = 0;
348 tracker->rmp_thread = td;
349 tracker->rmp_rmlock = rm;
350
351 td->td_critnest++; /* critical_enter(); */
352
353 compiler_memory_barrier();
354
355 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
356
357 rm_tracker_add(pc, tracker);
358
359 sched_pin();
360
361 compiler_memory_barrier();
362
363 td->td_critnest--;
364
365 /*
366 * Fast path to combine two common conditions into a single
367 * conditional jump.
368 */
369 if (0 == (td->td_owepreempt |
370 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
371 return (1);
372
373 /* We do not have a read token and need to acquire one. */
374 return _rm_rlock_hard(rm, tracker, trylock);
375 }
376
377 static void
378 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
379 {
380
381 if (td->td_owepreempt) {
382 td->td_critnest++;
383 critical_exit();
384 }
385
386 if (!tracker->rmp_flags)
387 return;
388
389 mtx_lock_spin(&rm_spinlock);
390 LIST_REMOVE(tracker, rmp_qentry);
391
392 if (tracker->rmp_flags & RMPF_SIGNAL) {
393 struct rmlock *rm;
394 struct turnstile *ts;
395
396 rm = tracker->rmp_rmlock;
397
398 turnstile_chain_lock(&rm->lock_object);
399 mtx_unlock_spin(&rm_spinlock);
400
401 ts = turnstile_lookup(&rm->lock_object);
402
403 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
404 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
405 turnstile_chain_unlock(&rm->lock_object);
406 } else
407 mtx_unlock_spin(&rm_spinlock);
408 }
409
410 void
411 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
412 {
413 struct pcpu *pc;
414 struct thread *td = tracker->rmp_thread;
415
416 td->td_critnest++; /* critical_enter(); */
417 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
418 rm_tracker_remove(pc, tracker);
419 td->td_critnest--;
420 sched_unpin();
421
422 if (0 == (td->td_owepreempt | tracker->rmp_flags))
423 return;
424
425 _rm_unlock_hard(td, tracker);
426 }
427
428 void
429 _rm_wlock(struct rmlock *rm)
430 {
431 struct rm_priotracker *prio;
432 struct turnstile *ts;
433 cpuset_t readcpus;
434
435 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
436 sx_xlock(&rm->rm_lock_sx);
437 else
438 mtx_lock(&rm->rm_lock_mtx);
439
440 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
441 /* Get all read tokens back */
442 readcpus = all_cpus;
443 CPU_NAND(&readcpus, &rm->rm_writecpus);
444 rm->rm_writecpus = all_cpus;
445
446 /*
447 * Assumes rm->rm_writecpus update is visible on other CPUs
448 * before rm_cleanIPI is called.
449 */
450 #ifdef SMP
451 smp_rendezvous_cpus(readcpus,
452 smp_no_rendevous_barrier,
453 rm_cleanIPI,
454 smp_no_rendevous_barrier,
455 rm);
456
457 #else
458 rm_cleanIPI(rm);
459 #endif
460
461 mtx_lock_spin(&rm_spinlock);
462 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
463 ts = turnstile_trywait(&rm->lock_object);
464 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
465 mtx_unlock_spin(&rm_spinlock);
466 turnstile_wait(ts, prio->rmp_thread,
467 TS_EXCLUSIVE_QUEUE);
468 mtx_lock_spin(&rm_spinlock);
469 }
470 mtx_unlock_spin(&rm_spinlock);
471 }
472 }
473
474 void
475 _rm_wunlock(struct rmlock *rm)
476 {
477
478 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
479 sx_xunlock(&rm->rm_lock_sx);
480 else
481 mtx_unlock(&rm->rm_lock_mtx);
482 }
483
484 #ifdef LOCK_DEBUG
485
486 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
487 {
488
489 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
490 file, line, NULL);
491
492 _rm_wlock(rm);
493
494 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
495
496 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
497 WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
498 file, line);
499 else
500 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
501
502 curthread->td_locks++;
503
504 }
505
506 void
507 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
508 {
509
510 curthread->td_locks--;
511 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
512 WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
513 file, line);
514 else
515 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
516 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
517 _rm_wunlock(rm);
518 }
519
520 int
521 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
522 int trylock, const char *file, int line)
523 {
524 if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
525 WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
526 file, line, NULL);
527 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
528
529 if (_rm_rlock(rm, tracker, trylock)) {
530 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
531
532 WITNESS_LOCK(&rm->lock_object, 0, file, line);
533
534 curthread->td_locks++;
535
536 return (1);
537 }
538
539 return (0);
540 }
541
542 void
543 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
544 const char *file, int line)
545 {
546
547 curthread->td_locks--;
548 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
549 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
550 _rm_runlock(rm, tracker);
551 }
552
553 #else
554
555 /*
556 * Just strip out file and line arguments if no lock debugging is enabled in
557 * the kernel - we are called from a kernel module.
558 */
559 void
560 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
561 {
562
563 _rm_wlock(rm);
564 }
565
566 void
567 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
568 {
569
570 _rm_wunlock(rm);
571 }
572
573 int
574 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
575 int trylock, const char *file, int line)
576 {
577
578 return _rm_rlock(rm, tracker, trylock);
579 }
580
581 void
582 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
583 const char *file, int line)
584 {
585
586 _rm_runlock(rm, tracker);
587 }
588
589 #endif
Cache object: edecdb6246b122701fc29a56bb8acad2
|