1 /*-
2 * Copyright (c) 2006 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 /*-
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 */
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 /*
63 * Intel XScale Queue Manager support.
64 *
65 * Each IXP4XXX device has a hardware block that implements a priority
66 * queue manager that is shared between the XScale cpu and the backend
67 * devices (such as the NPE). Queues are accessed by reading/writing
68 * special memory locations. The queue contents are mapped into a shared
69 * SRAM region with entries managed in a circular buffer. The XScale
70 * processor can receive interrupts based on queue contents (a condition
71 * code determines when interrupts should be delivered).
72 *
73 * The code here basically replaces the qmgr class in the Intel Access
74 * Library (IAL).
75 */
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/module.h>
80 #include <sys/time.h>
81 #include <sys/bus.h>
82 #include <sys/resource.h>
83 #include <sys/rman.h>
84 #include <sys/sysctl.h>
85
86 #include <machine/bus.h>
87 #include <machine/resource.h>
88 #include <machine/intr.h>
89 #include <arm/xscale/ixp425/ixp425reg.h>
90 #include <arm/xscale/ixp425/ixp425var.h>
91
92 #include <arm/xscale/ixp425/ixp425_qmgr.h>
93
94 /*
95 * State per AQM hw queue.
96 * This structure holds q configuration and dispatch state.
97 */
98 struct qmgrInfo {
99 int qSizeInWords; /* queue size in words */
100
101 uint32_t qOflowStatBitMask; /* overflow status mask */
102 int qWriteCount; /* queue write count */
103
104 bus_size_t qAccRegAddr; /* access register */
105 bus_size_t qUOStatRegAddr; /* status register */
106 bus_size_t qConfigRegAddr; /* config register */
107 int qSizeInEntries; /* queue size in entries */
108
109 uint32_t qUflowStatBitMask; /* underflow status mask */
110 int qReadCount; /* queue read count */
111
112 /* XXX union */
113 uint32_t qStatRegAddr;
114 uint32_t qStatBitsOffset;
115 uint32_t qStat0BitMask;
116 uint32_t qStat1BitMask;
117
118 uint32_t intRegCheckMask; /* interrupt reg check mask */
119 void (*cb)(int, void *); /* callback function */
120 void *cbarg; /* callback argument */
121 int priority; /* dispatch priority */
122 #if 0
123 /* NB: needed only for A0 parts */
124 u_int statusWordOffset; /* status word offset */
125 uint32_t statusMask; /* status mask */
126 uint32_t statusCheckValue; /* status check value */
127 #endif
128 };
129
130 struct ixpqmgr_softc {
131 device_t sc_dev;
132 bus_space_tag_t sc_iot;
133 bus_space_handle_t sc_ioh;
134
135 struct resource *sc_irq1; /* IRQ resource */
136 void *sc_ih1; /* interrupt handler */
137 int sc_rid1; /* resource id for irq */
138
139 struct resource *sc_irq2;
140 void *sc_ih2;
141 int sc_rid2;
142
143 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
144 /*
145 * This array contains a list of queue identifiers ordered by
146 * priority. The table is split logically between queue
147 * identifiers 0-31 and 32-63. To optimize lookups bit masks
148 * are kept for the first-32 and last-32 q's. When the
149 * table needs to be rebuilt mark rebuildTable and it'll
150 * happen after the next interrupt.
151 */
152 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
153 uint32_t lowPriorityTableFirstHalfMask;
154 uint32_t uppPriorityTableFirstHalfMask;
155 int rebuildTable; /* rebuild priorityTable */
156
157 uint32_t aqmFreeSramAddress; /* SRAM free space */
158 };
159
160 static int qmgr_debug;
161 SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RWTUN, &qmgr_debug,
162 0, "IXP4XX Q-Manager debug msgs");
163 #define DPRINTF(dev, fmt, ...) do { \
164 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
165 } while (0)
166 #define DPRINTFn(n, dev, fmt, ...) do { \
167 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
168 } while (0)
169
170 static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
171
172 static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
173 static void ixpqmgr_intr(void *);
174
175 static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
176 static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
177 static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
178 static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
179 static void aqm_reset(struct ixpqmgr_softc *sc);
180
181 static void
182 dummyCallback(int qId, void *arg)
183 {
184 /* XXX complain */
185 }
186
187 static uint32_t
188 aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
189 {
190 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
191 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
192 }
193
194 static void
195 aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
196 {
197 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
198 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
199 }
200
201 static int
202 ixpqmgr_probe(device_t dev)
203 {
204 device_set_desc(dev, "IXP4XX Q-Manager");
205 return 0;
206 }
207
208 static int
209 ixpqmgr_attach(device_t dev)
210 {
211 struct ixpqmgr_softc *sc = device_get_softc(dev);
212 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
213 int i, err;
214
215 ixpqmgr_sc = sc;
216
217 sc->sc_dev = dev;
218 sc->sc_iot = sa->sc_iot;
219 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
220 0, &sc->sc_ioh))
221 panic("%s: Cannot map registers", device_get_name(dev));
222
223 /* NB: we only use the lower 32 q's */
224
225 /* Set up QMGR interrupts */
226 sc->sc_rid1 = 0;
227 sc->sc_irq1 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid1,
228 IXP425_INT_QUE1_32, IXP425_INT_QUE1_32, 1, RF_ACTIVE);
229 sc->sc_rid2 = 1;
230 sc->sc_irq2 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid2,
231 IXP425_INT_QUE33_64, IXP425_INT_QUE33_64, 1, RF_ACTIVE);
232
233 if (sc->sc_irq1 == NULL || sc->sc_irq2 == NULL)
234 panic("Unable to allocate the qmgr irqs.\n");
235
236 err = bus_setup_intr(dev, sc->sc_irq1, INTR_TYPE_NET | INTR_MPSAFE,
237 NULL, ixpqmgr_intr, NULL, &sc->sc_ih1);
238 if (err) {
239 device_printf(dev, "failed to set up qmgr irq=%d\n",
240 IXP425_INT_QUE1_32);
241 return (ENXIO);
242 }
243 err = bus_setup_intr(dev, sc->sc_irq2, INTR_TYPE_NET | INTR_MPSAFE,
244 NULL, ixpqmgr_intr, NULL, &sc->sc_ih2);
245 if (err) {
246 device_printf(dev, "failed to set up qmgr irq=%d\n",
247 IXP425_INT_QUE33_64);
248 return (ENXIO);
249 }
250
251 /* NB: softc is pre-zero'd */
252 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
253 struct qmgrInfo *qi = &sc->qinfo[i];
254
255 qi->cb = dummyCallback;
256 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
257 /*
258 * There are two interrupt registers, 32 bits each. One
259 * for the lower queues(0-31) and one for the upper
260 * queues(32-63). Therefore need to mod by 32 i.e the
261 * min upper queue identifier.
262 */
263 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
264
265 /*
266 * Register addresses and bit masks are calculated and
267 * stored here to optimize QRead, QWrite and QStatusGet
268 * functions.
269 */
270
271 /* AQM Queue access reg addresses, per queue */
272 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
273 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
274 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
275
276 /* AQM Queue lower-group (0-31), only */
277 if (i < IX_QMGR_MIN_QUEUPP_QID) {
278 /* AQM Q underflow/overflow status reg address, per queue */
279 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
280 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
281 sizeof(uint32_t));
282
283 /* AQM Q underflow status bit masks for status reg per queue */
284 qi->qUflowStatBitMask =
285 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
286 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
287 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
288
289 /* AQM Q overflow status bit masks for status reg, per queue */
290 qi->qOflowStatBitMask =
291 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
292 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
293 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
294
295 /* AQM Q lower-group (0-31) status reg addresses, per queue */
296 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
297 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
298 sizeof(uint32_t));
299
300 /* AQM Q lower-group (0-31) status register bit offset */
301 qi->qStatBitsOffset =
302 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
303 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
304 } else { /* AQM Q upper-group (32-63), only */
305 qi->qUOStatRegAddr = 0; /* XXX */
306
307 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
308 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
309
310 /* AQM Q upper-group (32-63) Full status register bitmasks */
311 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
312 }
313 }
314
315 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
316
317 ixpqmgr_rebuild(sc); /* build initial priority table */
318 aqm_reset(sc); /* reset h/w */
319 return (0);
320 }
321
322 static int
323 ixpqmgr_detach(device_t dev)
324 {
325 struct ixpqmgr_softc *sc = device_get_softc(dev);
326
327 aqm_reset(sc); /* disable interrupts */
328 bus_teardown_intr(dev, sc->sc_irq1, sc->sc_ih1);
329 bus_teardown_intr(dev, sc->sc_irq2, sc->sc_ih2);
330 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid1, sc->sc_irq1);
331 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid2, sc->sc_irq2);
332 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
333 return (0);
334 }
335
336 int
337 ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
338 qconfig_hand_t *cb, void *cbarg)
339 {
340 struct ixpqmgr_softc *sc = ixpqmgr_sc;
341 struct qmgrInfo *qi = &sc->qinfo[qId];
342
343 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
344 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
345
346 /* NB: entry size is always 1 */
347 qi->qSizeInWords = qEntries;
348
349 qi->qReadCount = 0;
350 qi->qWriteCount = 0;
351 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
352
353 if (cb == NULL) {
354 /* Reset to dummy callback */
355 qi->cb = dummyCallback;
356 qi->cbarg = NULL;
357 } else {
358 qi->cb = cb;
359 qi->cbarg = cbarg;
360 }
361
362 /* Write the config register; NB must be AFTER qinfo setup */
363 aqm_qcfg(sc, qId, ne, nf);
364 /*
365 * Account for space just allocated to queue.
366 */
367 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
368
369 /* Set the interrupt source if this queue is in the range 0-31 */
370 if (qId < IX_QMGR_MIN_QUEUPP_QID)
371 aqm_srcsel_write(sc, qId, srcSel);
372
373 if (cb != NULL) /* Enable the interrupt */
374 aqm_int_enable(sc, qId);
375
376 sc->rebuildTable = TRUE;
377
378 return 0; /* XXX */
379 }
380
381 int
382 ixpqmgr_qwrite(int qId, uint32_t entry)
383 {
384 struct ixpqmgr_softc *sc = ixpqmgr_sc;
385 struct qmgrInfo *qi = &sc->qinfo[qId];
386
387 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
388 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
389
390 /* write the entry */
391 aqm_reg_write(sc, qi->qAccRegAddr, entry);
392
393 /* NB: overflow is available for lower queues only */
394 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
395 int qSize = qi->qSizeInEntries;
396 /*
397 * Increment the current number of entries in the queue
398 * and check for overflow .
399 */
400 if (qi->qWriteCount++ == qSize) { /* check for overflow */
401 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
402 int qPtrs;
403
404 /*
405 * Read the status twice because the status may
406 * not be immediately ready after the write operation
407 */
408 if ((status & qi->qOflowStatBitMask) ||
409 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
410 /*
411 * The queue is full, clear the overflow status bit if set.
412 */
413 aqm_reg_write(sc, qi->qUOStatRegAddr,
414 status & ~qi->qOflowStatBitMask);
415 qi->qWriteCount = qSize;
416 DPRINTFn(5, sc->sc_dev,
417 "%s(%u, 0x%x) Q full, overflow status cleared\n",
418 __func__, qId, entry);
419 return ENOSPC;
420 }
421 /*
422 * No overflow occurred : someone is draining the queue
423 * and the current counter needs to be
424 * updated from the current number of entries in the queue
425 */
426
427 /* calculate number of words in q */
428 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
429 DPRINTFn(2, sc->sc_dev,
430 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
431 __func__, qId, entry, qPtrs);
432 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
433
434 if (qPtrs == 0) {
435 /*
436 * The queue may be full at the time of the
437 * snapshot. Next access will check
438 * the overflow status again.
439 */
440 qi->qWriteCount = qSize;
441 } else {
442 /* convert the number of words to a number of entries */
443 qi->qWriteCount = qPtrs & (qSize - 1);
444 }
445 }
446 }
447 return 0;
448 }
449
450 int
451 ixpqmgr_qread(int qId, uint32_t *entry)
452 {
453 struct ixpqmgr_softc *sc = ixpqmgr_sc;
454 struct qmgrInfo *qi = &sc->qinfo[qId];
455 bus_size_t off = qi->qAccRegAddr;
456
457 *entry = aqm_reg_read(sc, off);
458
459 /*
460 * Reset the current read count : next access to the read function
461 * will force a underflow status check.
462 */
463 qi->qReadCount = 0;
464
465 /* Check if underflow occurred on the read */
466 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
467 /* get the queue status */
468 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
469
470 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
471 aqm_reg_write(sc, qi->qUOStatRegAddr,
472 status &~ qi->qUflowStatBitMask);
473 return ENOSPC;
474 }
475 }
476 return 0;
477 }
478
479 int
480 ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
481 {
482 struct ixpqmgr_softc *sc = ixpqmgr_sc;
483 struct qmgrInfo *qi = &sc->qinfo[qId];
484 uint32_t entry;
485 bus_size_t off = qi->qAccRegAddr;
486
487 entry = aqm_reg_read(sc, off);
488 while (--n) {
489 if (entry == 0) {
490 /* if we read a NULL entry, stop. We have underflowed */
491 break;
492 }
493 *p++ = entry; /* store */
494 entry = aqm_reg_read(sc, off);
495 }
496 *p = entry;
497
498 /*
499 * Reset the current read count : next access to the read function
500 * will force a underflow status check.
501 */
502 qi->qReadCount = 0;
503
504 /* Check if underflow occurred on the read */
505 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
506 /* get the queue status */
507 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
508
509 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
510 aqm_reg_write(sc, qi->qUOStatRegAddr,
511 status &~ qi->qUflowStatBitMask);
512 return ENOSPC;
513 }
514 }
515 return 0;
516 }
517
518 uint32_t
519 ixpqmgr_getqstatus(int qId)
520 {
521 #define QLOWSTATMASK \
522 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
523 struct ixpqmgr_softc *sc = ixpqmgr_sc;
524 const struct qmgrInfo *qi = &sc->qinfo[qId];
525 uint32_t status;
526
527 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
528 /* read the status of a queue in the range 0-31 */
529 status = aqm_reg_read(sc, qi->qStatRegAddr);
530
531 /* mask out the status bits relevant only to this queue */
532 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
533 } else { /* read status of a queue in the range 32-63 */
534 status = 0;
535 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
536 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
537 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
538 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
539 }
540 return status;
541 #undef QLOWSTATMASK
542 }
543
544 uint32_t
545 ixpqmgr_getqconfig(int qId)
546 {
547 struct ixpqmgr_softc *sc = ixpqmgr_sc;
548
549 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
550 }
551
552 void
553 ixpqmgr_dump(void)
554 {
555 struct ixpqmgr_softc *sc = ixpqmgr_sc;
556 int i, a;
557
558 /* status registers */
559 printf("0x%04x: %08x %08x %08x %08x\n"
560 , 0x400
561 , aqm_reg_read(sc, 0x400)
562 , aqm_reg_read(sc, 0x400+4)
563 , aqm_reg_read(sc, 0x400+8)
564 , aqm_reg_read(sc, 0x400+12)
565 );
566 printf("0x%04x: %08x %08x %08x %08x\n"
567 , 0x410
568 , aqm_reg_read(sc, 0x410)
569 , aqm_reg_read(sc, 0x410+4)
570 , aqm_reg_read(sc, 0x410+8)
571 , aqm_reg_read(sc, 0x410+12)
572 );
573 printf("0x%04x: %08x %08x %08x %08x\n"
574 , 0x420
575 , aqm_reg_read(sc, 0x420)
576 , aqm_reg_read(sc, 0x420+4)
577 , aqm_reg_read(sc, 0x420+8)
578 , aqm_reg_read(sc, 0x420+12)
579 );
580 printf("0x%04x: %08x %08x %08x %08x\n"
581 , 0x430
582 , aqm_reg_read(sc, 0x430)
583 , aqm_reg_read(sc, 0x430+4)
584 , aqm_reg_read(sc, 0x430+8)
585 , aqm_reg_read(sc, 0x430+12)
586 );
587 /* q configuration registers */
588 for (a = 0x2000; a < 0x20ff; a += 32)
589 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
590 , a
591 , aqm_reg_read(sc, a)
592 , aqm_reg_read(sc, a+4)
593 , aqm_reg_read(sc, a+8)
594 , aqm_reg_read(sc, a+12)
595 , aqm_reg_read(sc, a+16)
596 , aqm_reg_read(sc, a+20)
597 , aqm_reg_read(sc, a+24)
598 , aqm_reg_read(sc, a+28)
599 );
600 /* allocated SRAM */
601 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
602 a = 0x2000 + i;
603 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
604 , a
605 , aqm_reg_read(sc, a)
606 , aqm_reg_read(sc, a+4)
607 , aqm_reg_read(sc, a+8)
608 , aqm_reg_read(sc, a+12)
609 , aqm_reg_read(sc, a+16)
610 , aqm_reg_read(sc, a+20)
611 , aqm_reg_read(sc, a+24)
612 , aqm_reg_read(sc, a+28)
613 );
614 }
615 for (i = 0; i < 16; i++) {
616 printf("Q[%2d] config 0x%08x status 0x%02x "
617 "Q[%2d] config 0x%08x status 0x%02x\n"
618 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
619 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
620 );
621 }
622 }
623
624 void
625 ixpqmgr_notify_enable(int qId, int srcSel)
626 {
627 struct ixpqmgr_softc *sc = ixpqmgr_sc;
628 #if 0
629 /* Calculate the checkMask and checkValue for this q */
630 aqm_calc_statuscheck(sc, qId, srcSel);
631 #endif
632 /* Set the interrupt source if this queue is in the range 0-31 */
633 if (qId < IX_QMGR_MIN_QUEUPP_QID)
634 aqm_srcsel_write(sc, qId, srcSel);
635
636 /* Enable the interrupt */
637 aqm_int_enable(sc, qId);
638 }
639
640 void
641 ixpqmgr_notify_disable(int qId)
642 {
643 struct ixpqmgr_softc *sc = ixpqmgr_sc;
644
645 aqm_int_disable(sc, qId);
646 }
647
648 /*
649 * Rebuild the priority table used by the dispatcher.
650 */
651 static void
652 ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
653 {
654 int q, pri;
655 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
656 struct qmgrInfo *qi;
657
658 sc->lowPriorityTableFirstHalfMask = 0;
659 sc->uppPriorityTableFirstHalfMask = 0;
660
661 lowQuePriorityTableIndex = 0;
662 uppQuePriorityTableIndex = 32;
663 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
664 /* low priority q's */
665 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
666 qi = &sc->qinfo[q];
667 if (qi->priority == pri) {
668 /*
669 * Build the priority table bitmask which match the
670 * queues of the first half of the priority table.
671 */
672 if (lowQuePriorityTableIndex < 16) {
673 sc->lowPriorityTableFirstHalfMask |=
674 qi->intRegCheckMask;
675 }
676 sc->priorityTable[lowQuePriorityTableIndex++] = q;
677 }
678 }
679 /* high priority q's */
680 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
681 qi = &sc->qinfo[q];
682 if (qi->priority == pri) {
683 /*
684 * Build the priority table bitmask which match the
685 * queues of the first half of the priority table .
686 */
687 if (uppQuePriorityTableIndex < 48) {
688 sc->uppPriorityTableFirstHalfMask |=
689 qi->intRegCheckMask;
690 }
691 sc->priorityTable[uppQuePriorityTableIndex++] = q;
692 }
693 }
694 }
695 sc->rebuildTable = FALSE;
696 }
697
698 /*
699 * Count the number of leading zero bits in a word,
700 * and return the same value than the CLZ instruction.
701 * Note this is similar to the standard ffs function but
702 * it counts zero's from the MSB instead of the LSB.
703 *
704 * word (in) return value (out)
705 * 0x80000000 0
706 * 0x40000000 1
707 * ,,, ,,,
708 * 0x00000002 30
709 * 0x00000001 31
710 * 0x00000000 32
711 *
712 * The C version of this function is used as a replacement
713 * for system not providing the equivalent of the CLZ
714 * assembly language instruction.
715 *
716 * Note that this version is big-endian
717 */
718 static unsigned int
719 _lzcount(uint32_t word)
720 {
721 unsigned int lzcount = 0;
722
723 if (word == 0)
724 return 32;
725 while ((word & 0x80000000) == 0) {
726 word <<= 1;
727 lzcount++;
728 }
729 return lzcount;
730 }
731
732 static void
733 ixpqmgr_intr(void *arg)
734 {
735 struct ixpqmgr_softc *sc = ixpqmgr_sc;
736 uint32_t intRegVal; /* Interrupt reg val */
737 struct qmgrInfo *qi;
738 int priorityTableIndex; /* Priority table index */
739 int qIndex; /* Current queue being processed */
740
741 /* Read the interrupt register */
742 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
743 /* Write back to clear interrupt */
744 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
745
746 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
747 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
748
749 /* No queue has interrupt register set */
750 if (intRegVal != 0) {
751 /* get the first queue Id from the interrupt register value */
752 qIndex = (32 - 1) - _lzcount(intRegVal);
753
754 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
755 __func__, intRegVal, qIndex);
756
757 /*
758 * Optimize for single callback case.
759 */
760 qi = &sc->qinfo[qIndex];
761 if (intRegVal == qi->intRegCheckMask) {
762 /*
763 * Only 1 queue event triggered a notification.
764 * Call the callback function for this queue
765 */
766 qi->cb(qIndex, qi->cbarg);
767 } else {
768 /*
769 * The event is triggered by more than 1 queue,
770 * the queue search will start from the beginning
771 * or the middle of the priority table.
772 *
773 * The search will end when all the bits of the interrupt
774 * register are cleared. There is no need to maintain
775 * a separate value and test it at each iteration.
776 */
777 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
778 priorityTableIndex = 0;
779 } else {
780 priorityTableIndex = 16;
781 }
782 /*
783 * Iterate over the priority table until all the bits
784 * of the interrupt register are cleared.
785 */
786 do {
787 qIndex = sc->priorityTable[priorityTableIndex++];
788 qi = &sc->qinfo[qIndex];
789
790 /* If this queue caused this interrupt to be raised */
791 if (intRegVal & qi->intRegCheckMask) {
792 /* Call the callback function for this queue */
793 qi->cb(qIndex, qi->cbarg);
794 /* Clear the interrupt register bit */
795 intRegVal &= ~qi->intRegCheckMask;
796 }
797 } while (intRegVal);
798 }
799 }
800
801 /* Rebuild the priority table if needed */
802 if (sc->rebuildTable)
803 ixpqmgr_rebuild(sc);
804 }
805
806 #if 0
807 /*
808 * Generate the parameters used to check if a Q's status matches
809 * the specified source select. We calculate which status word
810 * to check (statusWordOffset), the value to check the status
811 * against (statusCheckValue) and the mask (statusMask) to mask
812 * out all but the bits to check in the status word.
813 */
814 static void
815 aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
816 {
817 struct qmgrInfo *qi = &qinfo[qId];
818 uint32_t shiftVal;
819
820 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
821 switch (srcSel) {
822 case IX_QMGR_Q_SOURCE_ID_E:
823 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
824 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
825 break;
826 case IX_QMGR_Q_SOURCE_ID_NE:
827 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
828 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
829 break;
830 case IX_QMGR_Q_SOURCE_ID_NF:
831 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
832 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
833 break;
834 case IX_QMGR_Q_SOURCE_ID_F:
835 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
836 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
837 break;
838 case IX_QMGR_Q_SOURCE_ID_NOT_E:
839 qi->statusCheckValue = 0;
840 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
841 break;
842 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
843 qi->statusCheckValue = 0;
844 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
845 break;
846 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
847 qi->statusCheckValue = 0;
848 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
849 break;
850 case IX_QMGR_Q_SOURCE_ID_NOT_F:
851 qi->statusCheckValue = 0;
852 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
853 break;
854 default:
855 /* Should never hit */
856 IX_OSAL_ASSERT(0);
857 break;
858 }
859
860 /* One nibble of status per queue so need to shift the
861 * check value and mask out to the correct position.
862 */
863 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
864 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
865
866 /* Calculate the which status word to check from the qId,
867 * 8 Qs status per word
868 */
869 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
870
871 qi->statusCheckValue <<= shiftVal;
872 qi->statusMask <<= shiftVal;
873 } else {
874 /* One status word */
875 qi->statusWordOffset = 0;
876 /* Single bits per queue and int source bit hardwired NE,
877 * Qs start at 32.
878 */
879 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
880 qi->statusCheckValue = qi->statusMask;
881 }
882 }
883 #endif
884
885 static void
886 aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
887 {
888 bus_size_t reg;
889 uint32_t v;
890
891 if (qId < IX_QMGR_MIN_QUEUPP_QID)
892 reg = IX_QMGR_QUEIEREG0_OFFSET;
893 else
894 reg = IX_QMGR_QUEIEREG1_OFFSET;
895 v = aqm_reg_read(sc, reg);
896 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
897
898 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
899 __func__, qId, reg, v, aqm_reg_read(sc, reg));
900 }
901
902 static void
903 aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
904 {
905 bus_size_t reg;
906 uint32_t v;
907
908 if (qId < IX_QMGR_MIN_QUEUPP_QID)
909 reg = IX_QMGR_QUEIEREG0_OFFSET;
910 else
911 reg = IX_QMGR_QUEIEREG1_OFFSET;
912 v = aqm_reg_read(sc, reg);
913 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
914
915 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
916 __func__, qId, reg, v, aqm_reg_read(sc, reg));
917 }
918
919 static unsigned
920 log2(unsigned n)
921 {
922 unsigned count;
923 /*
924 * N.B. this function will return 0 if supplied 0.
925 */
926 for (count = 0; n/2; count++)
927 n /= 2;
928 return count;
929 }
930
931 static __inline unsigned
932 toAqmEntrySize(int entrySize)
933 {
934 /* entrySize 1("00"),2("01"),4("10") */
935 return log2(entrySize);
936 }
937
938 static __inline unsigned
939 toAqmBufferSize(unsigned bufferSizeInWords)
940 {
941 /* bufferSize 16("00"),32("01),64("10"),128("11") */
942 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
943 }
944
945 static __inline unsigned
946 toAqmWatermark(int watermark)
947 {
948 /*
949 * Watermarks 0("000"),1("001"),2("010"),4("011"),
950 * 8("100"),16("101"),32("110"),64("111")
951 */
952 return log2(2 * watermark);
953 }
954
955 static void
956 aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
957 {
958 const struct qmgrInfo *qi = &sc->qinfo[qId];
959 uint32_t qCfg;
960 uint32_t baseAddress;
961
962 /* Build config register */
963 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
964 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
965 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
966 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
967
968 /* baseAddress, calculated relative to start address */
969 baseAddress = sc->aqmFreeSramAddress;
970
971 /* base address must be word-aligned */
972 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0,
973 ("address not word-aligned"));
974
975 /* Now convert to a 16 word pointer as required by QUECONFIG register */
976 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
977 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
978
979 /* set watermarks */
980 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
981 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
982
983 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
984 __func__, qId, ne, nf,
985 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
986 qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
987
988 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
989 }
990
991 static void
992 aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
993 {
994 bus_size_t off;
995 uint32_t v;
996
997 /*
998 * Calculate the register offset; multiple queues split across registers
999 */
1000 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
1001 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
1002
1003 v = aqm_reg_read(sc, off);
1004 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
1005 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
1006 v |= 0x7;
1007 } else {
1008 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
1009 uint32_t mask;
1010 int qshift;
1011
1012 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
1013 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
1014
1015 /* merge sourceId */
1016 v = (v &~ mask) | ((sourceId << qshift) & mask);
1017 }
1018
1019 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1020 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
1021 aqm_reg_write(sc, off, v);
1022 }
1023
1024 /*
1025 * Reset AQM registers to default values.
1026 */
1027 static void
1028 aqm_reset(struct ixpqmgr_softc *sc)
1029 {
1030 int i;
1031
1032 /* Reset queues 0..31 status registers 0..3 */
1033 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1034 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1035 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1036 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1037 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1038 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1039 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1040 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1041
1042 /* Reset underflow/overflow status registers 0..1 */
1043 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1044 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1045 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1046 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1047
1048 /* Reset queues 32..63 nearly empty status registers */
1049 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1050 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1051
1052 /* Reset queues 32..63 full status registers */
1053 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1054 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1055
1056 /* Reset int0 status flag source select registers 0..3 */
1057 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1058 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1059 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1060 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1061 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1062 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1063 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1064 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1065
1066 /* Reset queue interrupt enable register 0..1 */
1067 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1068 IX_QMGR_QUEIEREG_RESET_VALUE);
1069 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1070 IX_QMGR_QUEIEREG_RESET_VALUE);
1071
1072 /* Reset queue interrupt register 0..1 */
1073 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1074 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1075
1076 /* Reset queue configuration words 0..63 */
1077 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1078 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1079 IX_QMGR_QUECONFIG_RESET_VALUE);
1080
1081 /* XXX zero SRAM to simplify debugging */
1082 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1083 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1084 aqm_reg_write(sc, i, 0);
1085 }
1086
1087 static device_method_t ixpqmgr_methods[] = {
1088 DEVMETHOD(device_probe, ixpqmgr_probe),
1089 DEVMETHOD(device_attach, ixpqmgr_attach),
1090 DEVMETHOD(device_detach, ixpqmgr_detach),
1091
1092 { 0, 0 }
1093 };
1094
1095 static driver_t ixpqmgr_driver = {
1096 "ixpqmgr",
1097 ixpqmgr_methods,
1098 sizeof(struct ixpqmgr_softc),
1099 };
1100 static devclass_t ixpqmgr_devclass;
1101
1102 DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);
Cache object: cbcaa5969319638da1151e4554d777ff
|