1 /*-
2 * Copyright (c) 2006 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 /*-
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 */
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD: releng/7.4/sys/arm/xscale/ixp425/ixp425_qmgr.c 180878 2008-07-28 15:49:26Z thompsa $");
61
62 /*
63 * Intel XScale Queue Manager support.
64 *
65 * Each IXP4XXX device has a hardware block that implements a priority
66 * queue manager that is shared between the XScale cpu and the backend
67 * devices (such as the NPE). Queues are accessed by reading/writing
68 * special memory locations. The queue contents are mapped into a shared
69 * SRAM region with entries managed in a circular buffer. The XScale
70 * processor can receive interrupts based on queue contents (a condition
71 * code determines when interrupts should be delivered).
72 *
73 * The code here basically replaces the qmgr class in the Intel Access
74 * Library (IAL).
75 */
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/module.h>
80 #include <sys/time.h>
81 #include <sys/bus.h>
82 #include <sys/resource.h>
83 #include <sys/rman.h>
84 #include <sys/sysctl.h>
85
86 #include <machine/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/cpufunc.h>
89 #include <machine/resource.h>
90 #include <machine/intr.h>
91 #include <arm/xscale/ixp425/ixp425reg.h>
92 #include <arm/xscale/ixp425/ixp425var.h>
93
94 #include <arm/xscale/ixp425/ixp425_qmgr.h>
95
96 /*
97 * State per AQM hw queue.
98 * This structure holds q configuration and dispatch state.
99 */
100 struct qmgrInfo {
101 int qSizeInWords; /* queue size in words */
102
103 uint32_t qOflowStatBitMask; /* overflow status mask */
104 int qWriteCount; /* queue write count */
105
106 bus_size_t qAccRegAddr; /* access register */
107 bus_size_t qUOStatRegAddr; /* status register */
108 bus_size_t qConfigRegAddr; /* config register */
109 int qSizeInEntries; /* queue size in entries */
110
111 uint32_t qUflowStatBitMask; /* underflow status mask */
112 int qReadCount; /* queue read count */
113
114 /* XXX union */
115 uint32_t qStatRegAddr;
116 uint32_t qStatBitsOffset;
117 uint32_t qStat0BitMask;
118 uint32_t qStat1BitMask;
119
120 uint32_t intRegCheckMask; /* interrupt reg check mask */
121 void (*cb)(int, void *); /* callback function */
122 void *cbarg; /* callback argument */
123 int priority; /* dispatch priority */
124 #if 0
125 /* NB: needed only for A0 parts */
126 u_int statusWordOffset; /* status word offset */
127 uint32_t statusMask; /* status mask */
128 uint32_t statusCheckValue; /* status check value */
129 #endif
130 };
131
132 struct ixpqmgr_softc {
133 device_t sc_dev;
134 bus_space_tag_t sc_iot;
135 bus_space_handle_t sc_ioh;
136 struct resource *sc_irq; /* IRQ resource */
137 void *sc_ih; /* interrupt handler */
138 int sc_rid; /* resource id for irq */
139
140 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
141 /*
142 * This array contains a list of queue identifiers ordered by
143 * priority. The table is split logically between queue
144 * identifiers 0-31 and 32-63. To optimize lookups bit masks
145 * are kept for the first-32 and last-32 q's. When the
146 * table needs to be rebuilt mark rebuildTable and it'll
147 * happen after the next interrupt.
148 */
149 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
150 uint32_t lowPriorityTableFirstHalfMask;
151 uint32_t uppPriorityTableFirstHalfMask;
152 int rebuildTable; /* rebuild priorityTable */
153
154 uint32_t aqmFreeSramAddress; /* SRAM free space */
155 };
156
157 static int qmgr_debug = 0;
158 SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug,
159 0, "IXP425 Q-Manager debug msgs");
160 TUNABLE_INT("debug.qmgr", &qmgr_debug);
161 #define DPRINTF(dev, fmt, ...) do { \
162 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
163 } while (0)
164 #define DPRINTFn(n, dev, fmt, ...) do { \
165 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
166 } while (0)
167
168 static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
169
170 static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
171 static void ixpqmgr_intr(void *);
172
173 static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
174 static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
175 static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
176 static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
177 static void aqm_reset(struct ixpqmgr_softc *sc);
178
179 static void
180 dummyCallback(int qId, void *arg)
181 {
182 /* XXX complain */
183 }
184
185 static uint32_t
186 aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
187 {
188 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
189 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
190 }
191
192 static void
193 aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
194 {
195 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
196 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
197 }
198
199 static int
200 ixpqmgr_probe(device_t dev)
201 {
202 device_set_desc(dev, "IXP425 Q-Manager");
203 return 0;
204 }
205
206 static void
207 ixpqmgr_attach(device_t dev)
208 {
209 struct ixpqmgr_softc *sc = device_get_softc(dev);
210 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
211 int i;
212
213 ixpqmgr_sc = sc;
214
215 sc->sc_dev = dev;
216 sc->sc_iot = sa->sc_iot;
217 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
218 0, &sc->sc_ioh))
219 panic("%s: Cannot map registers", device_get_name(dev));
220
221 /* NB: we only use the lower 32 q's */
222 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid,
223 IXP425_INT_QUE1_32, IXP425_INT_QUE33_64, 2, RF_ACTIVE);
224 if (!sc->sc_irq)
225 panic("Unable to allocate the qmgr irqs.\n");
226 /* XXX could be a source of entropy */
227 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
228 NULL, ixpqmgr_intr, NULL, &sc->sc_ih);
229
230 /* NB: softc is pre-zero'd */
231 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
232 struct qmgrInfo *qi = &sc->qinfo[i];
233
234 qi->cb = dummyCallback;
235 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
236 /*
237 * There are two interrupt registers, 32 bits each. One
238 * for the lower queues(0-31) and one for the upper
239 * queues(32-63). Therefore need to mod by 32 i.e the
240 * min upper queue identifier.
241 */
242 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
243
244 /*
245 * Register addresses and bit masks are calculated and
246 * stored here to optimize QRead, QWrite and QStatusGet
247 * functions.
248 */
249
250 /* AQM Queue access reg addresses, per queue */
251 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
252 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
253 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
254
255 /* AQM Queue lower-group (0-31), only */
256 if (i < IX_QMGR_MIN_QUEUPP_QID) {
257 /* AQM Q underflow/overflow status reg address, per queue */
258 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
259 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
260 sizeof(uint32_t));
261
262 /* AQM Q underflow status bit masks for status reg per queue */
263 qi->qUflowStatBitMask =
264 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
265 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
266 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
267
268 /* AQM Q overflow status bit masks for status reg, per queue */
269 qi->qOflowStatBitMask =
270 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
271 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
272 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
273
274 /* AQM Q lower-group (0-31) status reg addresses, per queue */
275 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
276 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
277 sizeof(uint32_t));
278
279 /* AQM Q lower-group (0-31) status register bit offset */
280 qi->qStatBitsOffset =
281 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
282 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
283 } else { /* AQM Q upper-group (32-63), only */
284 qi->qUOStatRegAddr = 0; /* XXX */
285
286 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
287 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
288
289 /* AQM Q upper-group (32-63) Full status register bitmasks */
290 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
291 }
292 }
293
294 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
295
296 ixpqmgr_rebuild(sc); /* build inital priority table */
297 aqm_reset(sc); /* reset h/w */
298 }
299
300 static void
301 ixpqmgr_detach(device_t dev)
302 {
303 struct ixpqmgr_softc *sc = device_get_softc(dev);
304
305 aqm_reset(sc); /* disable interrupts */
306 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
307 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq);
308 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
309 }
310
311 int
312 ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
313 void (*cb)(int, void *), void *cbarg)
314 {
315 struct ixpqmgr_softc *sc = ixpqmgr_sc;
316 struct qmgrInfo *qi = &sc->qinfo[qId];
317
318 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
319 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
320
321 /* NB: entry size is always 1 */
322 qi->qSizeInWords = qEntries;
323
324 qi->qReadCount = 0;
325 qi->qWriteCount = 0;
326 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
327
328 if (cb == NULL) {
329 /* Reset to dummy callback */
330 qi->cb = dummyCallback;
331 qi->cbarg = 0;
332 } else {
333 qi->cb = cb;
334 qi->cbarg = cbarg;
335 }
336
337 /* Write the config register; NB must be AFTER qinfo setup */
338 aqm_qcfg(sc, qId, ne, nf);
339 /*
340 * Account for space just allocated to queue.
341 */
342 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
343
344 /* Set the interrupt source if this queue is in the range 0-31 */
345 if (qId < IX_QMGR_MIN_QUEUPP_QID)
346 aqm_srcsel_write(sc, qId, srcSel);
347
348 if (cb != NULL) /* Enable the interrupt */
349 aqm_int_enable(sc, qId);
350
351 sc->rebuildTable = TRUE;
352
353 return 0; /* XXX */
354 }
355
356 int
357 ixpqmgr_qwrite(int qId, uint32_t entry)
358 {
359 struct ixpqmgr_softc *sc = ixpqmgr_sc;
360 struct qmgrInfo *qi = &sc->qinfo[qId];
361
362 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
363 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
364
365 /* write the entry */
366 aqm_reg_write(sc, qi->qAccRegAddr, entry);
367
368 /* NB: overflow is available for lower queues only */
369 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
370 int qSize = qi->qSizeInEntries;
371 /*
372 * Increment the current number of entries in the queue
373 * and check for overflow .
374 */
375 if (qi->qWriteCount++ == qSize) { /* check for overflow */
376 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
377 int qPtrs;
378
379 /*
380 * Read the status twice because the status may
381 * not be immediately ready after the write operation
382 */
383 if ((status & qi->qOflowStatBitMask) ||
384 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
385 /*
386 * The queue is full, clear the overflow status bit if set.
387 */
388 aqm_reg_write(sc, qi->qUOStatRegAddr,
389 status & ~qi->qOflowStatBitMask);
390 qi->qWriteCount = qSize;
391 DPRINTFn(5, sc->sc_dev,
392 "%s(%u, 0x%x) Q full, overflow status cleared\n",
393 __func__, qId, entry);
394 return ENOSPC;
395 }
396 /*
397 * No overflow occured : someone is draining the queue
398 * and the current counter needs to be
399 * updated from the current number of entries in the queue
400 */
401
402 /* calculate number of words in q */
403 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
404 DPRINTFn(2, sc->sc_dev,
405 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
406 __func__, qId, entry, qPtrs);
407 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
408
409 if (qPtrs == 0) {
410 /*
411 * The queue may be full at the time of the
412 * snapshot. Next access will check
413 * the overflow status again.
414 */
415 qi->qWriteCount = qSize;
416 } else {
417 /* convert the number of words to a number of entries */
418 qi->qWriteCount = qPtrs & (qSize - 1);
419 }
420 }
421 }
422 return 0;
423 }
424
425 int
426 ixpqmgr_qread(int qId, uint32_t *entry)
427 {
428 struct ixpqmgr_softc *sc = ixpqmgr_sc;
429 struct qmgrInfo *qi = &sc->qinfo[qId];
430 bus_size_t off = qi->qAccRegAddr;
431
432 *entry = aqm_reg_read(sc, off);
433
434 /*
435 * Reset the current read count : next access to the read function
436 * will force a underflow status check.
437 */
438 qi->qReadCount = 0;
439
440 /* Check if underflow occurred on the read */
441 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
442 /* get the queue status */
443 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
444
445 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
446 aqm_reg_write(sc, qi->qUOStatRegAddr,
447 status &~ qi->qUflowStatBitMask);
448 return ENOSPC;
449 }
450 }
451 return 0;
452 }
453
454 int
455 ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
456 {
457 struct ixpqmgr_softc *sc = ixpqmgr_sc;
458 struct qmgrInfo *qi = &sc->qinfo[qId];
459 uint32_t entry;
460 bus_size_t off = qi->qAccRegAddr;
461
462 entry = aqm_reg_read(sc, off);
463 while (--n) {
464 if (entry == 0) {
465 /* if we read a NULL entry, stop. We have underflowed */
466 break;
467 }
468 *p++ = entry; /* store */
469 entry = aqm_reg_read(sc, off);
470 }
471 *p = entry;
472
473 /*
474 * Reset the current read count : next access to the read function
475 * will force a underflow status check.
476 */
477 qi->qReadCount = 0;
478
479 /* Check if underflow occurred on the read */
480 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
481 /* get the queue status */
482 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
483
484 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
485 aqm_reg_write(sc, qi->qUOStatRegAddr,
486 status &~ qi->qUflowStatBitMask);
487 return ENOSPC;
488 }
489 }
490 return 0;
491 }
492
493 uint32_t
494 ixpqmgr_getqstatus(int qId)
495 {
496 #define QLOWSTATMASK \
497 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
498 struct ixpqmgr_softc *sc = ixpqmgr_sc;
499 const struct qmgrInfo *qi = &sc->qinfo[qId];
500 uint32_t status;
501
502 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
503 /* read the status of a queue in the range 0-31 */
504 status = aqm_reg_read(sc, qi->qStatRegAddr);
505
506 /* mask out the status bits relevant only to this queue */
507 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
508 } else { /* read status of a queue in the range 32-63 */
509 status = 0;
510 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
511 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
512 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
513 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
514 }
515 return status;
516 #undef QLOWSTATMASK
517 }
518
519 uint32_t
520 ixpqmgr_getqconfig(int qId)
521 {
522 struct ixpqmgr_softc *sc = ixpqmgr_sc;
523
524 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
525 }
526
527 void
528 ixpqmgr_dump(void)
529 {
530 struct ixpqmgr_softc *sc = ixpqmgr_sc;
531 int i, a;
532
533 /* status registers */
534 printf("0x%04x: %08x %08x %08x %08x\n"
535 , 0x400
536 , aqm_reg_read(sc, 0x400)
537 , aqm_reg_read(sc, 0x400+4)
538 , aqm_reg_read(sc, 0x400+8)
539 , aqm_reg_read(sc, 0x400+12)
540 );
541 printf("0x%04x: %08x %08x %08x %08x\n"
542 , 0x410
543 , aqm_reg_read(sc, 0x410)
544 , aqm_reg_read(sc, 0x410+4)
545 , aqm_reg_read(sc, 0x410+8)
546 , aqm_reg_read(sc, 0x410+12)
547 );
548 printf("0x%04x: %08x %08x %08x %08x\n"
549 , 0x420
550 , aqm_reg_read(sc, 0x420)
551 , aqm_reg_read(sc, 0x420+4)
552 , aqm_reg_read(sc, 0x420+8)
553 , aqm_reg_read(sc, 0x420+12)
554 );
555 printf("0x%04x: %08x %08x %08x %08x\n"
556 , 0x430
557 , aqm_reg_read(sc, 0x430)
558 , aqm_reg_read(sc, 0x430+4)
559 , aqm_reg_read(sc, 0x430+8)
560 , aqm_reg_read(sc, 0x430+12)
561 );
562 /* q configuration registers */
563 for (a = 0x2000; a < 0x20ff; a += 32)
564 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
565 , a
566 , aqm_reg_read(sc, a)
567 , aqm_reg_read(sc, a+4)
568 , aqm_reg_read(sc, a+8)
569 , aqm_reg_read(sc, a+12)
570 , aqm_reg_read(sc, a+16)
571 , aqm_reg_read(sc, a+20)
572 , aqm_reg_read(sc, a+24)
573 , aqm_reg_read(sc, a+28)
574 );
575 /* allocated SRAM */
576 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
577 a = 0x2000 + i;
578 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
579 , a
580 , aqm_reg_read(sc, a)
581 , aqm_reg_read(sc, a+4)
582 , aqm_reg_read(sc, a+8)
583 , aqm_reg_read(sc, a+12)
584 , aqm_reg_read(sc, a+16)
585 , aqm_reg_read(sc, a+20)
586 , aqm_reg_read(sc, a+24)
587 , aqm_reg_read(sc, a+28)
588 );
589 }
590 for (i = 0; i < 16; i++) {
591 printf("Q[%2d] config 0x%08x status 0x%02x "
592 "Q[%2d] config 0x%08x status 0x%02x\n"
593 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
594 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
595 );
596 }
597 }
598
599 void
600 ixpqmgr_notify_enable(int qId, int srcSel)
601 {
602 struct ixpqmgr_softc *sc = ixpqmgr_sc;
603 #if 0
604 /* Calculate the checkMask and checkValue for this q */
605 aqm_calc_statuscheck(sc, qId, srcSel);
606 #endif
607 /* Set the interrupt source if this queue is in the range 0-31 */
608 if (qId < IX_QMGR_MIN_QUEUPP_QID)
609 aqm_srcsel_write(sc, qId, srcSel);
610
611 /* Enable the interrupt */
612 aqm_int_enable(sc, qId);
613 }
614
615 void
616 ixpqmgr_notify_disable(int qId)
617 {
618 struct ixpqmgr_softc *sc = ixpqmgr_sc;
619
620 aqm_int_disable(sc, qId);
621 }
622
623 /*
624 * Rebuild the priority table used by the dispatcher.
625 */
626 static void
627 ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
628 {
629 int q, pri;
630 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
631 struct qmgrInfo *qi;
632
633 sc->lowPriorityTableFirstHalfMask = 0;
634 sc->uppPriorityTableFirstHalfMask = 0;
635
636 lowQuePriorityTableIndex = 0;
637 uppQuePriorityTableIndex = 32;
638 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
639 /* low priority q's */
640 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
641 qi = &sc->qinfo[q];
642 if (qi->priority == pri) {
643 /*
644 * Build the priority table bitmask which match the
645 * queues of the first half of the priority table.
646 */
647 if (lowQuePriorityTableIndex < 16) {
648 sc->lowPriorityTableFirstHalfMask |=
649 qi->intRegCheckMask;
650 }
651 sc->priorityTable[lowQuePriorityTableIndex++] = q;
652 }
653 }
654 /* high priority q's */
655 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
656 qi = &sc->qinfo[q];
657 if (qi->priority == pri) {
658 /*
659 * Build the priority table bitmask which match the
660 * queues of the first half of the priority table .
661 */
662 if (uppQuePriorityTableIndex < 48) {
663 sc->uppPriorityTableFirstHalfMask |=
664 qi->intRegCheckMask;
665 }
666 sc->priorityTable[uppQuePriorityTableIndex++] = q;
667 }
668 }
669 }
670 sc->rebuildTable = FALSE;
671 }
672
673 /*
674 * Count the number of leading zero bits in a word,
675 * and return the same value than the CLZ instruction.
676 * Note this is similar to the standard ffs function but
677 * it counts zero's from the MSB instead of the LSB.
678 *
679 * word (in) return value (out)
680 * 0x80000000 0
681 * 0x40000000 1
682 * ,,, ,,,
683 * 0x00000002 30
684 * 0x00000001 31
685 * 0x00000000 32
686 *
687 * The C version of this function is used as a replacement
688 * for system not providing the equivalent of the CLZ
689 * assembly language instruction.
690 *
691 * Note that this version is big-endian
692 */
693 static unsigned int
694 _lzcount(uint32_t word)
695 {
696 unsigned int lzcount = 0;
697
698 if (word == 0)
699 return 32;
700 while ((word & 0x80000000) == 0) {
701 word <<= 1;
702 lzcount++;
703 }
704 return lzcount;
705 }
706
707 static void
708 ixpqmgr_intr(void *arg)
709 {
710 struct ixpqmgr_softc *sc = ixpqmgr_sc;
711 uint32_t intRegVal; /* Interrupt reg val */
712 struct qmgrInfo *qi;
713 int priorityTableIndex; /* Priority table index */
714 int qIndex; /* Current queue being processed */
715
716 /* Read the interrupt register */
717 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
718 /* Write back to clear interrupt */
719 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
720
721 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
722 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
723
724 /* No queue has interrupt register set */
725 if (intRegVal != 0) {
726 /* get the first queue Id from the interrupt register value */
727 qIndex = (32 - 1) - _lzcount(intRegVal);
728
729 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
730 __func__, intRegVal, qIndex);
731
732 /*
733 * Optimize for single callback case.
734 */
735 qi = &sc->qinfo[qIndex];
736 if (intRegVal == qi->intRegCheckMask) {
737 /*
738 * Only 1 queue event triggered a notification.
739 * Call the callback function for this queue
740 */
741 qi->cb(qIndex, qi->cbarg);
742 } else {
743 /*
744 * The event is triggered by more than 1 queue,
745 * the queue search will start from the beginning
746 * or the middle of the priority table.
747 *
748 * The search will end when all the bits of the interrupt
749 * register are cleared. There is no need to maintain
750 * a seperate value and test it at each iteration.
751 */
752 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
753 priorityTableIndex = 0;
754 } else {
755 priorityTableIndex = 16;
756 }
757 /*
758 * Iterate over the priority table until all the bits
759 * of the interrupt register are cleared.
760 */
761 do {
762 qIndex = sc->priorityTable[priorityTableIndex++];
763 qi = &sc->qinfo[qIndex];
764
765 /* If this queue caused this interrupt to be raised */
766 if (intRegVal & qi->intRegCheckMask) {
767 /* Call the callback function for this queue */
768 qi->cb(qIndex, qi->cbarg);
769 /* Clear the interrupt register bit */
770 intRegVal &= ~qi->intRegCheckMask;
771 }
772 } while (intRegVal);
773 }
774 }
775
776 /* Rebuild the priority table if needed */
777 if (sc->rebuildTable)
778 ixpqmgr_rebuild(sc);
779 }
780
781 #if 0
782 /*
783 * Generate the parameters used to check if a Q's status matches
784 * the specified source select. We calculate which status word
785 * to check (statusWordOffset), the value to check the status
786 * against (statusCheckValue) and the mask (statusMask) to mask
787 * out all but the bits to check in the status word.
788 */
789 static void
790 aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
791 {
792 struct qmgrInfo *qi = &qinfo[qId];
793 uint32_t shiftVal;
794
795 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
796 switch (srcSel) {
797 case IX_QMGR_Q_SOURCE_ID_E:
798 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
799 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
800 break;
801 case IX_QMGR_Q_SOURCE_ID_NE:
802 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
803 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
804 break;
805 case IX_QMGR_Q_SOURCE_ID_NF:
806 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
807 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
808 break;
809 case IX_QMGR_Q_SOURCE_ID_F:
810 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
811 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
812 break;
813 case IX_QMGR_Q_SOURCE_ID_NOT_E:
814 qi->statusCheckValue = 0;
815 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
816 break;
817 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
818 qi->statusCheckValue = 0;
819 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
820 break;
821 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
822 qi->statusCheckValue = 0;
823 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
824 break;
825 case IX_QMGR_Q_SOURCE_ID_NOT_F:
826 qi->statusCheckValue = 0;
827 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
828 break;
829 default:
830 /* Should never hit */
831 IX_OSAL_ASSERT(0);
832 break;
833 }
834
835 /* One nibble of status per queue so need to shift the
836 * check value and mask out to the correct position.
837 */
838 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
839 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
840
841 /* Calculate the which status word to check from the qId,
842 * 8 Qs status per word
843 */
844 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
845
846 qi->statusCheckValue <<= shiftVal;
847 qi->statusMask <<= shiftVal;
848 } else {
849 /* One status word */
850 qi->statusWordOffset = 0;
851 /* Single bits per queue and int source bit hardwired NE,
852 * Qs start at 32.
853 */
854 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
855 qi->statusCheckValue = qi->statusMask;
856 }
857 }
858 #endif
859
860 static void
861 aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
862 {
863 bus_size_t reg;
864 uint32_t v;
865
866 if (qId < IX_QMGR_MIN_QUEUPP_QID)
867 reg = IX_QMGR_QUEIEREG0_OFFSET;
868 else
869 reg = IX_QMGR_QUEIEREG1_OFFSET;
870 v = aqm_reg_read(sc, reg);
871 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
872
873 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
874 __func__, qId, reg, v, aqm_reg_read(sc, reg));
875 }
876
877 static void
878 aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
879 {
880 bus_size_t reg;
881 uint32_t v;
882
883 if (qId < IX_QMGR_MIN_QUEUPP_QID)
884 reg = IX_QMGR_QUEIEREG0_OFFSET;
885 else
886 reg = IX_QMGR_QUEIEREG1_OFFSET;
887 v = aqm_reg_read(sc, reg);
888 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
889
890 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
891 __func__, qId, reg, v, aqm_reg_read(sc, reg));
892 }
893
894 static unsigned
895 log2(unsigned n)
896 {
897 unsigned count;
898 /*
899 * N.B. this function will return 0 if supplied 0.
900 */
901 for (count = 0; n/2; count++)
902 n /= 2;
903 return count;
904 }
905
906 static __inline unsigned
907 toAqmEntrySize(int entrySize)
908 {
909 /* entrySize 1("00"),2("01"),4("10") */
910 return log2(entrySize);
911 }
912
913 static __inline unsigned
914 toAqmBufferSize(unsigned bufferSizeInWords)
915 {
916 /* bufferSize 16("00"),32("01),64("10"),128("11") */
917 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
918 }
919
920 static __inline unsigned
921 toAqmWatermark(int watermark)
922 {
923 /*
924 * Watermarks 0("000"),1("001"),2("010"),4("011"),
925 * 8("100"),16("101"),32("110"),64("111")
926 */
927 return log2(2 * watermark);
928 }
929
930 static void
931 aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
932 {
933 const struct qmgrInfo *qi = &sc->qinfo[qId];
934 uint32_t qCfg;
935 uint32_t baseAddress;
936
937 /* Build config register */
938 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
939 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
940 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
941 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
942
943 /* baseAddress, calculated relative to start address */
944 baseAddress = sc->aqmFreeSramAddress;
945
946 /* base address must be word-aligned */
947 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0,
948 ("address not word-aligned"));
949
950 /* Now convert to a 16 word pointer as required by QUECONFIG register */
951 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
952 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
953
954 /* set watermarks */
955 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
956 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
957
958 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
959 __func__, qId, ne, nf,
960 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
961 qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
962
963 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
964 }
965
966 static void
967 aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
968 {
969 bus_size_t off;
970 uint32_t v;
971
972 /*
973 * Calculate the register offset; multiple queues split across registers
974 */
975 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
976 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
977
978 v = aqm_reg_read(sc, off);
979 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
980 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
981 v |= 0x7;
982 } else {
983 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
984 uint32_t mask;
985 int qshift;
986
987 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
988 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
989
990 /* merge sourceId */
991 v = (v &~ mask) | ((sourceId << qshift) & mask);
992 }
993
994 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
995 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
996 aqm_reg_write(sc, off, v);
997 }
998
999 /*
1000 * Reset AQM registers to default values.
1001 */
1002 static void
1003 aqm_reset(struct ixpqmgr_softc *sc)
1004 {
1005 int i;
1006
1007 /* Reset queues 0..31 status registers 0..3 */
1008 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1009 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1010 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1011 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1012 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1013 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1014 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1015 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1016
1017 /* Reset underflow/overflow status registers 0..1 */
1018 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1019 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1020 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1021 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1022
1023 /* Reset queues 32..63 nearly empty status registers */
1024 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1025 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1026
1027 /* Reset queues 32..63 full status registers */
1028 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1029 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1030
1031 /* Reset int0 status flag source select registers 0..3 */
1032 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1033 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1034 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1035 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1036 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1037 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1038 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1039 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1040
1041 /* Reset queue interrupt enable register 0..1 */
1042 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1043 IX_QMGR_QUEIEREG_RESET_VALUE);
1044 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1045 IX_QMGR_QUEIEREG_RESET_VALUE);
1046
1047 /* Reset queue interrupt register 0..1 */
1048 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1049 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1050
1051 /* Reset queue configuration words 0..63 */
1052 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1053 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1054 IX_QMGR_QUECONFIG_RESET_VALUE);
1055
1056 /* XXX zero SRAM to simplify debugging */
1057 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1058 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1059 aqm_reg_write(sc, i, 0);
1060 }
1061
1062 static device_method_t ixpqmgr_methods[] = {
1063 DEVMETHOD(device_probe, ixpqmgr_probe),
1064 DEVMETHOD(device_attach, ixpqmgr_attach),
1065 DEVMETHOD(device_detach, ixpqmgr_detach),
1066
1067 { 0, 0 }
1068 };
1069
1070 static driver_t ixpqmgr_driver = {
1071 "ixpqmgr",
1072 ixpqmgr_methods,
1073 sizeof(struct ixpqmgr_softc),
1074 };
1075 static devclass_t ixpqmgr_devclass;
1076
1077 DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);
Cache object: 365e97282de8c92faf469d0859259566
|