1 /*-
2 * Copyright (c) 2017 Emmanuel Vadot <manu@freebsd.org>
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
18 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
20 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
21 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34
35 #include <dev/extres/clk/clk.h>
36
37 #include <arm/allwinner/clkng/aw_clk.h>
38 #include <arm/allwinner/clkng/aw_clk_nkmp.h>
39
40 #include "clkdev_if.h"
41
42 /*
43 * clknode for clocks matching the formula :
44 *
45 * clk = (clkin * n * k) / (m * p)
46 *
47 */
48
49 struct aw_clk_nkmp_sc {
50 uint32_t offset;
51
52 struct aw_clk_factor n;
53 struct aw_clk_factor k;
54 struct aw_clk_factor m;
55 struct aw_clk_factor p;
56
57 uint32_t mux_shift;
58 uint32_t mux_mask;
59 uint32_t gate_shift;
60 uint32_t lock_shift;
61 uint32_t lock_retries;
62 uint32_t update_shift;
63
64 uint32_t flags;
65 };
66
67 #define WRITE4(_clk, off, val) \
68 CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
69 #define READ4(_clk, off, val) \
70 CLKDEV_READ_4(clknode_get_device(_clk), off, val)
71 #define MODIFY4(_clk, off, clr, set ) \
72 CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set)
73 #define DEVICE_LOCK(_clk) \
74 CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
75 #define DEVICE_UNLOCK(_clk) \
76 CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
77
78 static int
79 aw_clk_nkmp_init(struct clknode *clk, device_t dev)
80 {
81 struct aw_clk_nkmp_sc *sc;
82 uint32_t val, idx;
83
84 sc = clknode_get_softc(clk);
85
86 idx = 0;
87 if ((sc->flags & AW_CLK_HAS_MUX) != 0) {
88 DEVICE_LOCK(clk);
89 READ4(clk, sc->offset, &val);
90 DEVICE_UNLOCK(clk);
91
92 idx = (val & sc->mux_mask) >> sc->mux_shift;
93 }
94
95 clknode_init_parent_idx(clk, idx);
96 return (0);
97 }
98
99 static int
100 aw_clk_nkmp_set_gate(struct clknode *clk, bool enable)
101 {
102 struct aw_clk_nkmp_sc *sc;
103 uint32_t val;
104
105 sc = clknode_get_softc(clk);
106
107 if ((sc->flags & AW_CLK_HAS_GATE) == 0)
108 return (0);
109
110 DEVICE_LOCK(clk);
111 READ4(clk, sc->offset, &val);
112 if (enable)
113 val |= (1 << sc->gate_shift);
114 else
115 val &= ~(1 << sc->gate_shift);
116 WRITE4(clk, sc->offset, val);
117 DEVICE_UNLOCK(clk);
118
119 return (0);
120 }
121
122 static int
123 aw_clk_nkmp_set_mux(struct clknode *clk, int index)
124 {
125 struct aw_clk_nkmp_sc *sc;
126 uint32_t val;
127
128 sc = clknode_get_softc(clk);
129
130 if ((sc->flags & AW_CLK_HAS_MUX) == 0)
131 return (0);
132
133 DEVICE_LOCK(clk);
134 READ4(clk, sc->offset, &val);
135 val &= ~sc->mux_mask;
136 val |= index << sc->mux_shift;
137 WRITE4(clk, sc->offset, val);
138 DEVICE_UNLOCK(clk);
139
140 return (0);
141 }
142
143 static uint64_t
144 aw_clk_nkmp_find_best(struct aw_clk_nkmp_sc *sc, uint64_t fparent, uint64_t *fout,
145 uint32_t *factor_n, uint32_t *factor_k, uint32_t *factor_m, uint32_t *factor_p)
146 {
147 uint64_t cur, best;
148 uint32_t n, k, m, p;
149
150 best = 0;
151 *factor_n = 0;
152 *factor_k = 0;
153 *factor_m = 0;
154 *factor_p = 0;
155
156 for (n = aw_clk_factor_get_min(&sc->n); n <= aw_clk_factor_get_max(&sc->n); ) {
157 for (k = aw_clk_factor_get_min(&sc->k); k <= aw_clk_factor_get_max(&sc->k); ) {
158 for (m = aw_clk_factor_get_min(&sc->m); m <= aw_clk_factor_get_max(&sc->m); ) {
159 for (p = aw_clk_factor_get_min(&sc->p); p <= aw_clk_factor_get_max(&sc->p); ) {
160 cur = (fparent * n * k) / (m * p);
161 if ((*fout - cur) < (*fout - best)) {
162 best = cur;
163 *factor_n = n;
164 *factor_k = k;
165 *factor_m = m;
166 *factor_p = p;
167 }
168 if (best == *fout)
169 return (best);
170 if ((sc->p.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
171 p <<= 1;
172 else
173 p++;
174 }
175 if ((sc->m.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
176 m <<= 1;
177 else
178 m++;
179 }
180 if ((sc->k.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
181 k <<= 1;
182 else
183 k++;
184 }
185 if ((sc->n.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
186 n <<= 1;
187 else
188 n++;
189 }
190
191 return best;
192 }
193
194 static void
195 aw_clk_nkmp_set_freq_scale(struct clknode *clk, struct aw_clk_nkmp_sc *sc,
196 uint32_t factor_n, uint32_t factor_k, uint32_t factor_m, uint32_t factor_p)
197 {
198 uint32_t val, m, p;
199 int retry;
200
201 DEVICE_LOCK(clk);
202 READ4(clk, sc->offset, &val);
203
204 m = aw_clk_get_factor(val, &sc->m);
205 p = aw_clk_get_factor(val, &sc->p);
206
207 if (p < factor_p) {
208 val &= ~sc->p.mask;
209 val |= aw_clk_factor_get_value(&sc->p, factor_p) << sc->p.shift;
210 WRITE4(clk, sc->offset, val);
211 DELAY(2000);
212 }
213
214 if (m < factor_m) {
215 val &= ~sc->m.mask;
216 val |= aw_clk_factor_get_value(&sc->m, factor_m) << sc->m.shift;
217 WRITE4(clk, sc->offset, val);
218 DELAY(2000);
219 }
220
221 val &= ~sc->n.mask;
222 val &= ~sc->k.mask;
223 val |= aw_clk_factor_get_value(&sc->n, factor_n) << sc->n.shift;
224 val |= aw_clk_factor_get_value(&sc->k, factor_k) << sc->k.shift;
225 WRITE4(clk, sc->offset, val);
226 DELAY(2000);
227
228 if (m > factor_m) {
229 val &= ~sc->m.mask;
230 val |= aw_clk_factor_get_value(&sc->m, factor_m) << sc->m.shift;
231 WRITE4(clk, sc->offset, val);
232 DELAY(2000);
233 }
234
235 if (p > factor_p) {
236 val &= ~sc->p.mask;
237 val |= aw_clk_factor_get_value(&sc->p, factor_p) << sc->p.shift;
238 WRITE4(clk, sc->offset, val);
239 DELAY(2000);
240 }
241
242 if ((sc->flags & AW_CLK_HAS_LOCK) != 0) {
243 for (retry = 0; retry < sc->lock_retries; retry++) {
244 READ4(clk, sc->offset, &val);
245 if ((val & (1 << sc->lock_shift)) != 0)
246 break;
247 DELAY(1000);
248 }
249 }
250
251 DEVICE_UNLOCK(clk);
252 }
253
254 static int
255 aw_clk_nkmp_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
256 int flags, int *stop)
257 {
258 struct aw_clk_nkmp_sc *sc;
259 uint64_t best;
260 uint32_t val, best_n, best_k, best_m, best_p;
261 int retry;
262
263 sc = clknode_get_softc(clk);
264
265 best = aw_clk_nkmp_find_best(sc, fparent, fout,
266 &best_n, &best_k, &best_m, &best_p);
267 if ((flags & CLK_SET_DRYRUN) != 0) {
268 *fout = best;
269 *stop = 1;
270 return (0);
271 }
272
273 if ((best < *fout) &&
274 ((flags & CLK_SET_ROUND_DOWN) != 0)) {
275 *stop = 1;
276 return (ERANGE);
277 }
278 if ((best > *fout) &&
279 ((flags & CLK_SET_ROUND_UP) != 0)) {
280 *stop = 1;
281 return (ERANGE);
282 }
283
284 if ((sc->flags & AW_CLK_SCALE_CHANGE) != 0)
285 aw_clk_nkmp_set_freq_scale(clk, sc,
286 best_n, best_k, best_m, best_p);
287 else {
288 DEVICE_LOCK(clk);
289 READ4(clk, sc->offset, &val);
290 val &= ~sc->n.mask;
291 val &= ~sc->k.mask;
292 val &= ~sc->m.mask;
293 val &= ~sc->p.mask;
294 val |= aw_clk_factor_get_value(&sc->n, best_n) << sc->n.shift;
295 val |= aw_clk_factor_get_value(&sc->k, best_k) << sc->k.shift;
296 val |= aw_clk_factor_get_value(&sc->m, best_m) << sc->m.shift;
297 val |= aw_clk_factor_get_value(&sc->p, best_p) << sc->p.shift;
298 WRITE4(clk, sc->offset, val);
299 DELAY(2000);
300 DEVICE_UNLOCK(clk);
301
302 if ((sc->flags & AW_CLK_HAS_UPDATE) != 0) {
303 DEVICE_LOCK(clk);
304 READ4(clk, sc->offset, &val);
305 val |= 1 << sc->update_shift;
306 WRITE4(clk, sc->offset, val);
307 DELAY(2000);
308 DEVICE_UNLOCK(clk);
309 }
310
311 if ((sc->flags & AW_CLK_HAS_LOCK) != 0) {
312 for (retry = 0; retry < sc->lock_retries; retry++) {
313 READ4(clk, sc->offset, &val);
314 if ((val & (1 << sc->lock_shift)) != 0)
315 break;
316 DELAY(1000);
317 }
318 }
319 }
320
321 *fout = best;
322 *stop = 1;
323
324 return (0);
325 }
326
327 static int
328 aw_clk_nkmp_recalc(struct clknode *clk, uint64_t *freq)
329 {
330 struct aw_clk_nkmp_sc *sc;
331 uint32_t val, m, n, k, p;
332
333 sc = clknode_get_softc(clk);
334
335 DEVICE_LOCK(clk);
336 READ4(clk, sc->offset, &val);
337 DEVICE_UNLOCK(clk);
338
339 n = aw_clk_get_factor(val, &sc->n);
340 k = aw_clk_get_factor(val, &sc->k);
341 m = aw_clk_get_factor(val, &sc->m);
342 p = aw_clk_get_factor(val, &sc->p);
343
344 *freq = (*freq * n * k) / (m * p);
345
346 return (0);
347 }
348
349 static clknode_method_t aw_nkmp_clknode_methods[] = {
350 /* Device interface */
351 CLKNODEMETHOD(clknode_init, aw_clk_nkmp_init),
352 CLKNODEMETHOD(clknode_set_gate, aw_clk_nkmp_set_gate),
353 CLKNODEMETHOD(clknode_set_mux, aw_clk_nkmp_set_mux),
354 CLKNODEMETHOD(clknode_recalc_freq, aw_clk_nkmp_recalc),
355 CLKNODEMETHOD(clknode_set_freq, aw_clk_nkmp_set_freq),
356 CLKNODEMETHOD_END
357 };
358
359 DEFINE_CLASS_1(aw_nkmp_clknode, aw_nkmp_clknode_class, aw_nkmp_clknode_methods,
360 sizeof(struct aw_clk_nkmp_sc), clknode_class);
361
362 int
363 aw_clk_nkmp_register(struct clkdom *clkdom, struct aw_clk_nkmp_def *clkdef)
364 {
365 struct clknode *clk;
366 struct aw_clk_nkmp_sc *sc;
367
368 clk = clknode_create(clkdom, &aw_nkmp_clknode_class, &clkdef->clkdef);
369 if (clk == NULL)
370 return (1);
371
372 sc = clknode_get_softc(clk);
373
374 sc->offset = clkdef->offset;
375
376 sc->n.shift = clkdef->n.shift;
377 sc->n.width = clkdef->n.width;
378 sc->n.mask = ((1 << clkdef->n.width) - 1) << sc->n.shift;
379 sc->n.value = clkdef->n.value;
380 sc->n.flags = clkdef->n.flags;
381
382 sc->k.shift = clkdef->k.shift;
383 sc->k.width = clkdef->k.width;
384 sc->k.mask = ((1 << clkdef->k.width) - 1) << sc->k.shift;
385 sc->k.value = clkdef->k.value;
386 sc->k.flags = clkdef->k.flags;
387
388 sc->m.shift = clkdef->m.shift;
389 sc->m.width = clkdef->m.width;
390 sc->m.mask = ((1 << clkdef->m.width) - 1) << sc->m.shift;
391 sc->m.value = clkdef->m.value;
392 sc->m.flags = clkdef->m.flags;
393
394 sc->p.shift = clkdef->p.shift;
395 sc->p.width = clkdef->p.width;
396 sc->p.mask = ((1 << clkdef->p.width) - 1) << sc->p.shift;
397 sc->p.value = clkdef->p.value;
398 sc->p.flags = clkdef->p.flags;
399
400 sc->mux_shift = clkdef->mux_shift;
401 sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift;
402
403 sc->gate_shift = clkdef->gate_shift;
404 sc->lock_shift = clkdef->lock_shift;
405 sc->lock_retries = clkdef->lock_retries;
406 sc->update_shift = clkdef->update_shift;
407 sc->flags = clkdef->flags;
408
409 clknode_register(clkdom, clk);
410
411 return (0);
412 }
Cache object: 280b9f1b930cded865181b8cf19c1ddc
|