1 /*-
2 * Copyright (c) 2006 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 /*-
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 */
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD: releng/11.0/sys/arm/xscale/ixp425/ixp425_qmgr.c 299069 2016-05-04 15:48:59Z pfg $");
61
62 /*
63 * Intel XScale Queue Manager support.
64 *
65 * Each IXP4XXX device has a hardware block that implements a priority
66 * queue manager that is shared between the XScale cpu and the backend
67 * devices (such as the NPE). Queues are accessed by reading/writing
68 * special memory locations. The queue contents are mapped into a shared
69 * SRAM region with entries managed in a circular buffer. The XScale
70 * processor can receive interrupts based on queue contents (a condition
71 * code determines when interrupts should be delivered).
72 *
73 * The code here basically replaces the qmgr class in the Intel Access
74 * Library (IAL).
75 */
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/module.h>
80 #include <sys/time.h>
81 #include <sys/bus.h>
82 #include <sys/resource.h>
83 #include <sys/rman.h>
84 #include <sys/sysctl.h>
85
86 #include <machine/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/cpufunc.h>
89 #include <machine/resource.h>
90 #include <machine/intr.h>
91 #include <arm/xscale/ixp425/ixp425reg.h>
92 #include <arm/xscale/ixp425/ixp425var.h>
93
94 #include <arm/xscale/ixp425/ixp425_qmgr.h>
95
96 /*
97 * State per AQM hw queue.
98 * This structure holds q configuration and dispatch state.
99 */
100 struct qmgrInfo {
101 int qSizeInWords; /* queue size in words */
102
103 uint32_t qOflowStatBitMask; /* overflow status mask */
104 int qWriteCount; /* queue write count */
105
106 bus_size_t qAccRegAddr; /* access register */
107 bus_size_t qUOStatRegAddr; /* status register */
108 bus_size_t qConfigRegAddr; /* config register */
109 int qSizeInEntries; /* queue size in entries */
110
111 uint32_t qUflowStatBitMask; /* underflow status mask */
112 int qReadCount; /* queue read count */
113
114 /* XXX union */
115 uint32_t qStatRegAddr;
116 uint32_t qStatBitsOffset;
117 uint32_t qStat0BitMask;
118 uint32_t qStat1BitMask;
119
120 uint32_t intRegCheckMask; /* interrupt reg check mask */
121 void (*cb)(int, void *); /* callback function */
122 void *cbarg; /* callback argument */
123 int priority; /* dispatch priority */
124 #if 0
125 /* NB: needed only for A0 parts */
126 u_int statusWordOffset; /* status word offset */
127 uint32_t statusMask; /* status mask */
128 uint32_t statusCheckValue; /* status check value */
129 #endif
130 };
131
132 struct ixpqmgr_softc {
133 device_t sc_dev;
134 bus_space_tag_t sc_iot;
135 bus_space_handle_t sc_ioh;
136
137 struct resource *sc_irq1; /* IRQ resource */
138 void *sc_ih1; /* interrupt handler */
139 int sc_rid1; /* resource id for irq */
140
141 struct resource *sc_irq2;
142 void *sc_ih2;
143 int sc_rid2;
144
145 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
146 /*
147 * This array contains a list of queue identifiers ordered by
148 * priority. The table is split logically between queue
149 * identifiers 0-31 and 32-63. To optimize lookups bit masks
150 * are kept for the first-32 and last-32 q's. When the
151 * table needs to be rebuilt mark rebuildTable and it'll
152 * happen after the next interrupt.
153 */
154 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
155 uint32_t lowPriorityTableFirstHalfMask;
156 uint32_t uppPriorityTableFirstHalfMask;
157 int rebuildTable; /* rebuild priorityTable */
158
159 uint32_t aqmFreeSramAddress; /* SRAM free space */
160 };
161
162 static int qmgr_debug;
163 SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RWTUN, &qmgr_debug,
164 0, "IXP4XX Q-Manager debug msgs");
165 #define DPRINTF(dev, fmt, ...) do { \
166 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
167 } while (0)
168 #define DPRINTFn(n, dev, fmt, ...) do { \
169 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
170 } while (0)
171
172 static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
173
174 static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
175 static void ixpqmgr_intr(void *);
176
177 static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
178 static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
179 static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
180 static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
181 static void aqm_reset(struct ixpqmgr_softc *sc);
182
183 static void
184 dummyCallback(int qId, void *arg)
185 {
186 /* XXX complain */
187 }
188
189 static uint32_t
190 aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
191 {
192 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
193 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
194 }
195
196 static void
197 aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
198 {
199 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
200 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
201 }
202
203 static int
204 ixpqmgr_probe(device_t dev)
205 {
206 device_set_desc(dev, "IXP4XX Q-Manager");
207 return 0;
208 }
209
210 static int
211 ixpqmgr_attach(device_t dev)
212 {
213 struct ixpqmgr_softc *sc = device_get_softc(dev);
214 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
215 int i, err;
216
217 ixpqmgr_sc = sc;
218
219 sc->sc_dev = dev;
220 sc->sc_iot = sa->sc_iot;
221 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
222 0, &sc->sc_ioh))
223 panic("%s: Cannot map registers", device_get_name(dev));
224
225 /* NB: we only use the lower 32 q's */
226
227 /* Set up QMGR interrupts */
228 sc->sc_rid1 = 0;
229 sc->sc_irq1 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid1,
230 IXP425_INT_QUE1_32, IXP425_INT_QUE1_32, 1, RF_ACTIVE);
231 sc->sc_rid2 = 1;
232 sc->sc_irq2 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid2,
233 IXP425_INT_QUE33_64, IXP425_INT_QUE33_64, 1, RF_ACTIVE);
234
235 if (sc->sc_irq1 == NULL || sc->sc_irq2 == NULL)
236 panic("Unable to allocate the qmgr irqs.\n");
237
238 err = bus_setup_intr(dev, sc->sc_irq1, INTR_TYPE_NET | INTR_MPSAFE,
239 NULL, ixpqmgr_intr, NULL, &sc->sc_ih1);
240 if (err) {
241 device_printf(dev, "failed to set up qmgr irq=%d\n",
242 IXP425_INT_QUE1_32);
243 return (ENXIO);
244 }
245 err = bus_setup_intr(dev, sc->sc_irq2, INTR_TYPE_NET | INTR_MPSAFE,
246 NULL, ixpqmgr_intr, NULL, &sc->sc_ih2);
247 if (err) {
248 device_printf(dev, "failed to set up qmgr irq=%d\n",
249 IXP425_INT_QUE33_64);
250 return (ENXIO);
251 }
252
253 /* NB: softc is pre-zero'd */
254 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
255 struct qmgrInfo *qi = &sc->qinfo[i];
256
257 qi->cb = dummyCallback;
258 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
259 /*
260 * There are two interrupt registers, 32 bits each. One
261 * for the lower queues(0-31) and one for the upper
262 * queues(32-63). Therefore need to mod by 32 i.e the
263 * min upper queue identifier.
264 */
265 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
266
267 /*
268 * Register addresses and bit masks are calculated and
269 * stored here to optimize QRead, QWrite and QStatusGet
270 * functions.
271 */
272
273 /* AQM Queue access reg addresses, per queue */
274 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
275 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
276 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
277
278 /* AQM Queue lower-group (0-31), only */
279 if (i < IX_QMGR_MIN_QUEUPP_QID) {
280 /* AQM Q underflow/overflow status reg address, per queue */
281 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
282 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
283 sizeof(uint32_t));
284
285 /* AQM Q underflow status bit masks for status reg per queue */
286 qi->qUflowStatBitMask =
287 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
288 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
289 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
290
291 /* AQM Q overflow status bit masks for status reg, per queue */
292 qi->qOflowStatBitMask =
293 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
294 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
295 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
296
297 /* AQM Q lower-group (0-31) status reg addresses, per queue */
298 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
299 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
300 sizeof(uint32_t));
301
302 /* AQM Q lower-group (0-31) status register bit offset */
303 qi->qStatBitsOffset =
304 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
305 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
306 } else { /* AQM Q upper-group (32-63), only */
307 qi->qUOStatRegAddr = 0; /* XXX */
308
309 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
310 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
311
312 /* AQM Q upper-group (32-63) Full status register bitmasks */
313 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
314 }
315 }
316
317 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
318
319 ixpqmgr_rebuild(sc); /* build initial priority table */
320 aqm_reset(sc); /* reset h/w */
321 return (0);
322 }
323
324 static int
325 ixpqmgr_detach(device_t dev)
326 {
327 struct ixpqmgr_softc *sc = device_get_softc(dev);
328
329 aqm_reset(sc); /* disable interrupts */
330 bus_teardown_intr(dev, sc->sc_irq1, sc->sc_ih1);
331 bus_teardown_intr(dev, sc->sc_irq2, sc->sc_ih2);
332 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid1, sc->sc_irq1);
333 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid2, sc->sc_irq2);
334 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
335 return (0);
336 }
337
338 int
339 ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
340 qconfig_hand_t *cb, void *cbarg)
341 {
342 struct ixpqmgr_softc *sc = ixpqmgr_sc;
343 struct qmgrInfo *qi = &sc->qinfo[qId];
344
345 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
346 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
347
348 /* NB: entry size is always 1 */
349 qi->qSizeInWords = qEntries;
350
351 qi->qReadCount = 0;
352 qi->qWriteCount = 0;
353 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
354
355 if (cb == NULL) {
356 /* Reset to dummy callback */
357 qi->cb = dummyCallback;
358 qi->cbarg = NULL;
359 } else {
360 qi->cb = cb;
361 qi->cbarg = cbarg;
362 }
363
364 /* Write the config register; NB must be AFTER qinfo setup */
365 aqm_qcfg(sc, qId, ne, nf);
366 /*
367 * Account for space just allocated to queue.
368 */
369 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
370
371 /* Set the interrupt source if this queue is in the range 0-31 */
372 if (qId < IX_QMGR_MIN_QUEUPP_QID)
373 aqm_srcsel_write(sc, qId, srcSel);
374
375 if (cb != NULL) /* Enable the interrupt */
376 aqm_int_enable(sc, qId);
377
378 sc->rebuildTable = TRUE;
379
380 return 0; /* XXX */
381 }
382
383 int
384 ixpqmgr_qwrite(int qId, uint32_t entry)
385 {
386 struct ixpqmgr_softc *sc = ixpqmgr_sc;
387 struct qmgrInfo *qi = &sc->qinfo[qId];
388
389 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
390 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
391
392 /* write the entry */
393 aqm_reg_write(sc, qi->qAccRegAddr, entry);
394
395 /* NB: overflow is available for lower queues only */
396 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
397 int qSize = qi->qSizeInEntries;
398 /*
399 * Increment the current number of entries in the queue
400 * and check for overflow .
401 */
402 if (qi->qWriteCount++ == qSize) { /* check for overflow */
403 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
404 int qPtrs;
405
406 /*
407 * Read the status twice because the status may
408 * not be immediately ready after the write operation
409 */
410 if ((status & qi->qOflowStatBitMask) ||
411 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
412 /*
413 * The queue is full, clear the overflow status bit if set.
414 */
415 aqm_reg_write(sc, qi->qUOStatRegAddr,
416 status & ~qi->qOflowStatBitMask);
417 qi->qWriteCount = qSize;
418 DPRINTFn(5, sc->sc_dev,
419 "%s(%u, 0x%x) Q full, overflow status cleared\n",
420 __func__, qId, entry);
421 return ENOSPC;
422 }
423 /*
424 * No overflow occurred : someone is draining the queue
425 * and the current counter needs to be
426 * updated from the current number of entries in the queue
427 */
428
429 /* calculate number of words in q */
430 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
431 DPRINTFn(2, sc->sc_dev,
432 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
433 __func__, qId, entry, qPtrs);
434 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
435
436 if (qPtrs == 0) {
437 /*
438 * The queue may be full at the time of the
439 * snapshot. Next access will check
440 * the overflow status again.
441 */
442 qi->qWriteCount = qSize;
443 } else {
444 /* convert the number of words to a number of entries */
445 qi->qWriteCount = qPtrs & (qSize - 1);
446 }
447 }
448 }
449 return 0;
450 }
451
452 int
453 ixpqmgr_qread(int qId, uint32_t *entry)
454 {
455 struct ixpqmgr_softc *sc = ixpqmgr_sc;
456 struct qmgrInfo *qi = &sc->qinfo[qId];
457 bus_size_t off = qi->qAccRegAddr;
458
459 *entry = aqm_reg_read(sc, off);
460
461 /*
462 * Reset the current read count : next access to the read function
463 * will force a underflow status check.
464 */
465 qi->qReadCount = 0;
466
467 /* Check if underflow occurred on the read */
468 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
469 /* get the queue status */
470 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
471
472 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
473 aqm_reg_write(sc, qi->qUOStatRegAddr,
474 status &~ qi->qUflowStatBitMask);
475 return ENOSPC;
476 }
477 }
478 return 0;
479 }
480
481 int
482 ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
483 {
484 struct ixpqmgr_softc *sc = ixpqmgr_sc;
485 struct qmgrInfo *qi = &sc->qinfo[qId];
486 uint32_t entry;
487 bus_size_t off = qi->qAccRegAddr;
488
489 entry = aqm_reg_read(sc, off);
490 while (--n) {
491 if (entry == 0) {
492 /* if we read a NULL entry, stop. We have underflowed */
493 break;
494 }
495 *p++ = entry; /* store */
496 entry = aqm_reg_read(sc, off);
497 }
498 *p = entry;
499
500 /*
501 * Reset the current read count : next access to the read function
502 * will force a underflow status check.
503 */
504 qi->qReadCount = 0;
505
506 /* Check if underflow occurred on the read */
507 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
508 /* get the queue status */
509 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
510
511 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
512 aqm_reg_write(sc, qi->qUOStatRegAddr,
513 status &~ qi->qUflowStatBitMask);
514 return ENOSPC;
515 }
516 }
517 return 0;
518 }
519
520 uint32_t
521 ixpqmgr_getqstatus(int qId)
522 {
523 #define QLOWSTATMASK \
524 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
525 struct ixpqmgr_softc *sc = ixpqmgr_sc;
526 const struct qmgrInfo *qi = &sc->qinfo[qId];
527 uint32_t status;
528
529 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
530 /* read the status of a queue in the range 0-31 */
531 status = aqm_reg_read(sc, qi->qStatRegAddr);
532
533 /* mask out the status bits relevant only to this queue */
534 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
535 } else { /* read status of a queue in the range 32-63 */
536 status = 0;
537 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
538 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
539 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
540 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
541 }
542 return status;
543 #undef QLOWSTATMASK
544 }
545
546 uint32_t
547 ixpqmgr_getqconfig(int qId)
548 {
549 struct ixpqmgr_softc *sc = ixpqmgr_sc;
550
551 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
552 }
553
554 void
555 ixpqmgr_dump(void)
556 {
557 struct ixpqmgr_softc *sc = ixpqmgr_sc;
558 int i, a;
559
560 /* status registers */
561 printf("0x%04x: %08x %08x %08x %08x\n"
562 , 0x400
563 , aqm_reg_read(sc, 0x400)
564 , aqm_reg_read(sc, 0x400+4)
565 , aqm_reg_read(sc, 0x400+8)
566 , aqm_reg_read(sc, 0x400+12)
567 );
568 printf("0x%04x: %08x %08x %08x %08x\n"
569 , 0x410
570 , aqm_reg_read(sc, 0x410)
571 , aqm_reg_read(sc, 0x410+4)
572 , aqm_reg_read(sc, 0x410+8)
573 , aqm_reg_read(sc, 0x410+12)
574 );
575 printf("0x%04x: %08x %08x %08x %08x\n"
576 , 0x420
577 , aqm_reg_read(sc, 0x420)
578 , aqm_reg_read(sc, 0x420+4)
579 , aqm_reg_read(sc, 0x420+8)
580 , aqm_reg_read(sc, 0x420+12)
581 );
582 printf("0x%04x: %08x %08x %08x %08x\n"
583 , 0x430
584 , aqm_reg_read(sc, 0x430)
585 , aqm_reg_read(sc, 0x430+4)
586 , aqm_reg_read(sc, 0x430+8)
587 , aqm_reg_read(sc, 0x430+12)
588 );
589 /* q configuration registers */
590 for (a = 0x2000; a < 0x20ff; a += 32)
591 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
592 , a
593 , aqm_reg_read(sc, a)
594 , aqm_reg_read(sc, a+4)
595 , aqm_reg_read(sc, a+8)
596 , aqm_reg_read(sc, a+12)
597 , aqm_reg_read(sc, a+16)
598 , aqm_reg_read(sc, a+20)
599 , aqm_reg_read(sc, a+24)
600 , aqm_reg_read(sc, a+28)
601 );
602 /* allocated SRAM */
603 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
604 a = 0x2000 + i;
605 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
606 , a
607 , aqm_reg_read(sc, a)
608 , aqm_reg_read(sc, a+4)
609 , aqm_reg_read(sc, a+8)
610 , aqm_reg_read(sc, a+12)
611 , aqm_reg_read(sc, a+16)
612 , aqm_reg_read(sc, a+20)
613 , aqm_reg_read(sc, a+24)
614 , aqm_reg_read(sc, a+28)
615 );
616 }
617 for (i = 0; i < 16; i++) {
618 printf("Q[%2d] config 0x%08x status 0x%02x "
619 "Q[%2d] config 0x%08x status 0x%02x\n"
620 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
621 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
622 );
623 }
624 }
625
626 void
627 ixpqmgr_notify_enable(int qId, int srcSel)
628 {
629 struct ixpqmgr_softc *sc = ixpqmgr_sc;
630 #if 0
631 /* Calculate the checkMask and checkValue for this q */
632 aqm_calc_statuscheck(sc, qId, srcSel);
633 #endif
634 /* Set the interrupt source if this queue is in the range 0-31 */
635 if (qId < IX_QMGR_MIN_QUEUPP_QID)
636 aqm_srcsel_write(sc, qId, srcSel);
637
638 /* Enable the interrupt */
639 aqm_int_enable(sc, qId);
640 }
641
642 void
643 ixpqmgr_notify_disable(int qId)
644 {
645 struct ixpqmgr_softc *sc = ixpqmgr_sc;
646
647 aqm_int_disable(sc, qId);
648 }
649
650 /*
651 * Rebuild the priority table used by the dispatcher.
652 */
653 static void
654 ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
655 {
656 int q, pri;
657 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
658 struct qmgrInfo *qi;
659
660 sc->lowPriorityTableFirstHalfMask = 0;
661 sc->uppPriorityTableFirstHalfMask = 0;
662
663 lowQuePriorityTableIndex = 0;
664 uppQuePriorityTableIndex = 32;
665 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
666 /* low priority q's */
667 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
668 qi = &sc->qinfo[q];
669 if (qi->priority == pri) {
670 /*
671 * Build the priority table bitmask which match the
672 * queues of the first half of the priority table.
673 */
674 if (lowQuePriorityTableIndex < 16) {
675 sc->lowPriorityTableFirstHalfMask |=
676 qi->intRegCheckMask;
677 }
678 sc->priorityTable[lowQuePriorityTableIndex++] = q;
679 }
680 }
681 /* high priority q's */
682 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
683 qi = &sc->qinfo[q];
684 if (qi->priority == pri) {
685 /*
686 * Build the priority table bitmask which match the
687 * queues of the first half of the priority table .
688 */
689 if (uppQuePriorityTableIndex < 48) {
690 sc->uppPriorityTableFirstHalfMask |=
691 qi->intRegCheckMask;
692 }
693 sc->priorityTable[uppQuePriorityTableIndex++] = q;
694 }
695 }
696 }
697 sc->rebuildTable = FALSE;
698 }
699
700 /*
701 * Count the number of leading zero bits in a word,
702 * and return the same value than the CLZ instruction.
703 * Note this is similar to the standard ffs function but
704 * it counts zero's from the MSB instead of the LSB.
705 *
706 * word (in) return value (out)
707 * 0x80000000 0
708 * 0x40000000 1
709 * ,,, ,,,
710 * 0x00000002 30
711 * 0x00000001 31
712 * 0x00000000 32
713 *
714 * The C version of this function is used as a replacement
715 * for system not providing the equivalent of the CLZ
716 * assembly language instruction.
717 *
718 * Note that this version is big-endian
719 */
720 static unsigned int
721 _lzcount(uint32_t word)
722 {
723 unsigned int lzcount = 0;
724
725 if (word == 0)
726 return 32;
727 while ((word & 0x80000000) == 0) {
728 word <<= 1;
729 lzcount++;
730 }
731 return lzcount;
732 }
733
734 static void
735 ixpqmgr_intr(void *arg)
736 {
737 struct ixpqmgr_softc *sc = ixpqmgr_sc;
738 uint32_t intRegVal; /* Interrupt reg val */
739 struct qmgrInfo *qi;
740 int priorityTableIndex; /* Priority table index */
741 int qIndex; /* Current queue being processed */
742
743 /* Read the interrupt register */
744 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
745 /* Write back to clear interrupt */
746 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
747
748 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
749 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
750
751 /* No queue has interrupt register set */
752 if (intRegVal != 0) {
753 /* get the first queue Id from the interrupt register value */
754 qIndex = (32 - 1) - _lzcount(intRegVal);
755
756 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
757 __func__, intRegVal, qIndex);
758
759 /*
760 * Optimize for single callback case.
761 */
762 qi = &sc->qinfo[qIndex];
763 if (intRegVal == qi->intRegCheckMask) {
764 /*
765 * Only 1 queue event triggered a notification.
766 * Call the callback function for this queue
767 */
768 qi->cb(qIndex, qi->cbarg);
769 } else {
770 /*
771 * The event is triggered by more than 1 queue,
772 * the queue search will start from the beginning
773 * or the middle of the priority table.
774 *
775 * The search will end when all the bits of the interrupt
776 * register are cleared. There is no need to maintain
777 * a separate value and test it at each iteration.
778 */
779 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
780 priorityTableIndex = 0;
781 } else {
782 priorityTableIndex = 16;
783 }
784 /*
785 * Iterate over the priority table until all the bits
786 * of the interrupt register are cleared.
787 */
788 do {
789 qIndex = sc->priorityTable[priorityTableIndex++];
790 qi = &sc->qinfo[qIndex];
791
792 /* If this queue caused this interrupt to be raised */
793 if (intRegVal & qi->intRegCheckMask) {
794 /* Call the callback function for this queue */
795 qi->cb(qIndex, qi->cbarg);
796 /* Clear the interrupt register bit */
797 intRegVal &= ~qi->intRegCheckMask;
798 }
799 } while (intRegVal);
800 }
801 }
802
803 /* Rebuild the priority table if needed */
804 if (sc->rebuildTable)
805 ixpqmgr_rebuild(sc);
806 }
807
808 #if 0
809 /*
810 * Generate the parameters used to check if a Q's status matches
811 * the specified source select. We calculate which status word
812 * to check (statusWordOffset), the value to check the status
813 * against (statusCheckValue) and the mask (statusMask) to mask
814 * out all but the bits to check in the status word.
815 */
816 static void
817 aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
818 {
819 struct qmgrInfo *qi = &qinfo[qId];
820 uint32_t shiftVal;
821
822 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
823 switch (srcSel) {
824 case IX_QMGR_Q_SOURCE_ID_E:
825 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
826 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
827 break;
828 case IX_QMGR_Q_SOURCE_ID_NE:
829 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
830 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
831 break;
832 case IX_QMGR_Q_SOURCE_ID_NF:
833 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
834 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
835 break;
836 case IX_QMGR_Q_SOURCE_ID_F:
837 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
838 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
839 break;
840 case IX_QMGR_Q_SOURCE_ID_NOT_E:
841 qi->statusCheckValue = 0;
842 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
843 break;
844 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
845 qi->statusCheckValue = 0;
846 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
847 break;
848 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
849 qi->statusCheckValue = 0;
850 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
851 break;
852 case IX_QMGR_Q_SOURCE_ID_NOT_F:
853 qi->statusCheckValue = 0;
854 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
855 break;
856 default:
857 /* Should never hit */
858 IX_OSAL_ASSERT(0);
859 break;
860 }
861
862 /* One nibble of status per queue so need to shift the
863 * check value and mask out to the correct position.
864 */
865 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
866 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
867
868 /* Calculate the which status word to check from the qId,
869 * 8 Qs status per word
870 */
871 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
872
873 qi->statusCheckValue <<= shiftVal;
874 qi->statusMask <<= shiftVal;
875 } else {
876 /* One status word */
877 qi->statusWordOffset = 0;
878 /* Single bits per queue and int source bit hardwired NE,
879 * Qs start at 32.
880 */
881 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
882 qi->statusCheckValue = qi->statusMask;
883 }
884 }
885 #endif
886
887 static void
888 aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
889 {
890 bus_size_t reg;
891 uint32_t v;
892
893 if (qId < IX_QMGR_MIN_QUEUPP_QID)
894 reg = IX_QMGR_QUEIEREG0_OFFSET;
895 else
896 reg = IX_QMGR_QUEIEREG1_OFFSET;
897 v = aqm_reg_read(sc, reg);
898 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
899
900 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
901 __func__, qId, reg, v, aqm_reg_read(sc, reg));
902 }
903
904 static void
905 aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
906 {
907 bus_size_t reg;
908 uint32_t v;
909
910 if (qId < IX_QMGR_MIN_QUEUPP_QID)
911 reg = IX_QMGR_QUEIEREG0_OFFSET;
912 else
913 reg = IX_QMGR_QUEIEREG1_OFFSET;
914 v = aqm_reg_read(sc, reg);
915 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
916
917 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
918 __func__, qId, reg, v, aqm_reg_read(sc, reg));
919 }
920
921 static unsigned
922 log2(unsigned n)
923 {
924 unsigned count;
925 /*
926 * N.B. this function will return 0 if supplied 0.
927 */
928 for (count = 0; n/2; count++)
929 n /= 2;
930 return count;
931 }
932
933 static __inline unsigned
934 toAqmEntrySize(int entrySize)
935 {
936 /* entrySize 1("00"),2("01"),4("10") */
937 return log2(entrySize);
938 }
939
940 static __inline unsigned
941 toAqmBufferSize(unsigned bufferSizeInWords)
942 {
943 /* bufferSize 16("00"),32("01),64("10"),128("11") */
944 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
945 }
946
947 static __inline unsigned
948 toAqmWatermark(int watermark)
949 {
950 /*
951 * Watermarks 0("000"),1("001"),2("010"),4("011"),
952 * 8("100"),16("101"),32("110"),64("111")
953 */
954 return log2(2 * watermark);
955 }
956
957 static void
958 aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
959 {
960 const struct qmgrInfo *qi = &sc->qinfo[qId];
961 uint32_t qCfg;
962 uint32_t baseAddress;
963
964 /* Build config register */
965 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
966 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
967 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
968 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
969
970 /* baseAddress, calculated relative to start address */
971 baseAddress = sc->aqmFreeSramAddress;
972
973 /* base address must be word-aligned */
974 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0,
975 ("address not word-aligned"));
976
977 /* Now convert to a 16 word pointer as required by QUECONFIG register */
978 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
979 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
980
981 /* set watermarks */
982 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
983 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
984
985 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
986 __func__, qId, ne, nf,
987 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
988 qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
989
990 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
991 }
992
993 static void
994 aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
995 {
996 bus_size_t off;
997 uint32_t v;
998
999 /*
1000 * Calculate the register offset; multiple queues split across registers
1001 */
1002 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
1003 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
1004
1005 v = aqm_reg_read(sc, off);
1006 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
1007 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
1008 v |= 0x7;
1009 } else {
1010 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
1011 uint32_t mask;
1012 int qshift;
1013
1014 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
1015 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
1016
1017 /* merge sourceId */
1018 v = (v &~ mask) | ((sourceId << qshift) & mask);
1019 }
1020
1021 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1022 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
1023 aqm_reg_write(sc, off, v);
1024 }
1025
1026 /*
1027 * Reset AQM registers to default values.
1028 */
1029 static void
1030 aqm_reset(struct ixpqmgr_softc *sc)
1031 {
1032 int i;
1033
1034 /* Reset queues 0..31 status registers 0..3 */
1035 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1036 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1037 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1038 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1039 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1040 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1041 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1042 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1043
1044 /* Reset underflow/overflow status registers 0..1 */
1045 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1046 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1047 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1048 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1049
1050 /* Reset queues 32..63 nearly empty status registers */
1051 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1052 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1053
1054 /* Reset queues 32..63 full status registers */
1055 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1056 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1057
1058 /* Reset int0 status flag source select registers 0..3 */
1059 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1060 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1061 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1062 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1063 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1064 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1065 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1066 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1067
1068 /* Reset queue interrupt enable register 0..1 */
1069 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1070 IX_QMGR_QUEIEREG_RESET_VALUE);
1071 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1072 IX_QMGR_QUEIEREG_RESET_VALUE);
1073
1074 /* Reset queue interrupt register 0..1 */
1075 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1076 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1077
1078 /* Reset queue configuration words 0..63 */
1079 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1080 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1081 IX_QMGR_QUECONFIG_RESET_VALUE);
1082
1083 /* XXX zero SRAM to simplify debugging */
1084 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1085 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1086 aqm_reg_write(sc, i, 0);
1087 }
1088
1089 static device_method_t ixpqmgr_methods[] = {
1090 DEVMETHOD(device_probe, ixpqmgr_probe),
1091 DEVMETHOD(device_attach, ixpqmgr_attach),
1092 DEVMETHOD(device_detach, ixpqmgr_detach),
1093
1094 { 0, 0 }
1095 };
1096
1097 static driver_t ixpqmgr_driver = {
1098 "ixpqmgr",
1099 ixpqmgr_methods,
1100 sizeof(struct ixpqmgr_softc),
1101 };
1102 static devclass_t ixpqmgr_devclass;
1103
1104 DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);
Cache object: 05860ef32f67aa637191564f3151a4e7
|