1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifndef __MACHINE_COUNTER_H__
32 #define __MACHINE_COUNTER_H__
33
34 #include <sys/pcpu.h>
35 #ifdef INVARIANTS
36 #include <sys/proc.h>
37 #endif
38
39 #define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter
40
41 #ifdef __powerpc64__
42
43 #define counter_enter() do {} while (0)
44 #define counter_exit() do {} while (0)
45
46 #ifdef IN_SUBR_COUNTER_C
47 static inline uint64_t
48 counter_u64_read_one(uint64_t *p, int cpu)
49 {
50
51 return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
52 }
53
54 static inline uint64_t
55 counter_u64_fetch_inline(uint64_t *p)
56 {
57 uint64_t r;
58 int i;
59
60 r = 0;
61 CPU_FOREACH(i)
62 r += counter_u64_read_one((uint64_t *)p, i);
63
64 return (r);
65 }
66
67 static void
68 counter_u64_zero_one_cpu(void *arg)
69 {
70
71 *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
72 PCPU_GET(cpuid))) = 0;
73 }
74
75 static inline void
76 counter_u64_zero_inline(counter_u64_t c)
77 {
78
79 smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
80 smp_no_rendezvous_barrier, c);
81 }
82 #endif
83
84 #define counter_u64_add_protected(c, i) counter_u64_add(c, i)
85
86 static inline void
87 counter_u64_add(counter_u64_t c, int64_t inc)
88 {
89 uint64_t ccpu, old;
90
91 __asm __volatile("\n"
92 "1:\n\t"
93 "mfsprg %0, 0\n\t"
94 "ldarx %1, %0, %2\n\t"
95 "add %1, %1, %3\n\t"
96 "stdcx. %1, %0, %2\n\t"
97 "bne- 1b"
98 : "=&b" (ccpu), "=&r" (old)
99 : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc)
100 : "cr0", "memory");
101 }
102
103 #else /* !64bit */
104
105 #define counter_enter() critical_enter()
106 #define counter_exit() critical_exit()
107
108 #ifdef IN_SUBR_COUNTER_C
109 /* XXXKIB non-atomic 64bit read */
110 static inline uint64_t
111 counter_u64_read_one(uint64_t *p, int cpu)
112 {
113
114 return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
115 }
116
117 static inline uint64_t
118 counter_u64_fetch_inline(uint64_t *p)
119 {
120 uint64_t r;
121 int i;
122
123 r = 0;
124 for (i = 0; i < mp_ncpus; i++)
125 r += counter_u64_read_one((uint64_t *)p, i);
126
127 return (r);
128 }
129
130 /* XXXKIB non-atomic 64bit store, might interrupt increment */
131 static void
132 counter_u64_zero_one_cpu(void *arg)
133 {
134
135 *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
136 PCPU_GET(cpuid))) = 0;
137 }
138
139 static inline void
140 counter_u64_zero_inline(counter_u64_t c)
141 {
142
143 smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
144 smp_no_rendezvous_barrier, c);
145 }
146 #endif
147
148 #define counter_u64_add_protected(c, inc) do { \
149 CRITICAL_ASSERT(curthread); \
150 *(uint64_t *)zpcpu_get(c) += (inc); \
151 } while (0)
152
153 static inline void
154 counter_u64_add(counter_u64_t c, int64_t inc)
155 {
156
157 counter_enter();
158 counter_u64_add_protected(c, inc);
159 counter_exit();
160 }
161
162 #endif /* 64bit */
163
164 #endif /* ! __MACHINE_COUNTER_H__ */
Cache object: 2e49d3c2fb90a7ec8cc81372586b1db2
|