FreeBSD/Linux Kernel Cross Reference
sys/dev/qbus/if_qe.c
1 /* $NetBSD: if_qe.c,v 1.57 2003/01/17 15:45:59 bouyer Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 * Handle ubaresets. Does not work at all right now.
36 * Fix ALLMULTI reception. But someone must tell me how...
37 * Collect statistics.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.57 2003/01/17 15:45:59 bouyer Exp $");
42
43 #include "opt_inet.h"
44 #include "bpfilter.h"
45
46 #include <sys/param.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
52
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
59
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #include <net/bpfdesc.h>
63 #endif
64
65 #include <machine/bus.h>
66
67 #include <dev/qbus/ubavar.h>
68 #include <dev/qbus/if_qereg.h>
69
70 #include "ioconf.h"
71
72 #define RXDESCS 30 /* # of receive descriptors */
73 #define TXDESCS 60 /* # transmit descs */
74
75 /*
76 * Structure containing the elements that must be in DMA-safe memory.
77 */
78 struct qe_cdata {
79 struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
80 struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
81 u_int8_t qc_setup[128]; /* Setup packet layout */
82 };
83
84 struct qe_softc {
85 struct device sc_dev; /* Configuration common part */
86 struct evcnt sc_intrcnt; /* Interrupt counting */
87 struct ethercom sc_ec; /* Ethernet common part */
88 #define sc_if sc_ec.ec_if /* network-visible interface */
89 bus_space_tag_t sc_iot;
90 bus_addr_t sc_ioh;
91 bus_dma_tag_t sc_dmat;
92 struct qe_cdata *sc_qedata; /* Descriptor struct */
93 struct qe_cdata *sc_pqedata; /* Unibus address of above */
94 struct mbuf* sc_txmbuf[TXDESCS];
95 struct mbuf* sc_rxmbuf[RXDESCS];
96 bus_dmamap_t sc_xmtmap[TXDESCS];
97 bus_dmamap_t sc_rcvmap[RXDESCS];
98 bus_dmamap_t sc_nulldmamap; /* ethernet padding buffer */
99 struct ubinfo sc_ui;
100 int sc_intvec; /* Interrupt vector */
101 int sc_nexttx;
102 int sc_inq;
103 int sc_lastack;
104 int sc_nextrx;
105 int sc_setup; /* Setup packet in queue */
106 };
107
108 static int qematch(struct device *, struct cfdata *, void *);
109 static void qeattach(struct device *, struct device *, void *);
110 static void qeinit(struct qe_softc *);
111 static void qestart(struct ifnet *);
112 static void qeintr(void *);
113 static int qeioctl(struct ifnet *, u_long, caddr_t);
114 static int qe_add_rxbuf(struct qe_softc *, int);
115 static void qe_setup(struct qe_softc *);
116 static void qetimeout(struct ifnet *);
117
118 CFATTACH_DECL(qe, sizeof(struct qe_softc),
119 qematch, qeattach, NULL, NULL);
120
121 #define QE_WCSR(csr, val) \
122 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
123 #define QE_RCSR(csr) \
124 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
125
126 #define LOWORD(x) ((int)(x) & 0xffff)
127 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
128
129 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
130
131 /*
132 * Check for present DEQNA. Done by sending a fake setup packet
133 * and wait for interrupt.
134 */
135 int
136 qematch(struct device *parent, struct cfdata *cf, void *aux)
137 {
138 struct qe_softc ssc;
139 struct qe_softc *sc = &ssc;
140 struct uba_attach_args *ua = aux;
141 struct uba_softc *ubasc = (struct uba_softc *)parent;
142 struct ubinfo ui;
143
144 #define PROBESIZE 4096
145 struct qe_ring *ring;
146 struct qe_ring *rp;
147 int error;
148
149 ring = malloc(PROBESIZE, M_TEMP, M_WAITOK);
150 bzero(sc, sizeof(struct qe_softc));
151 bzero(ring, PROBESIZE);
152 sc->sc_iot = ua->ua_iot;
153 sc->sc_ioh = ua->ua_ioh;
154 sc->sc_dmat = ua->ua_dmat;
155
156 ubasc->uh_lastiv -= 4;
157 QE_WCSR(QE_CSR_CSR, QE_RESET);
158 QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
159
160 /*
161 * Map the ring area. Actually this is done only to be able to
162 * send and receive a internal packet; some junk is loopbacked
163 * so that the DEQNA has a reason to interrupt.
164 */
165 ui.ui_size = PROBESIZE;
166 ui.ui_vaddr = (caddr_t)&ring[0];
167 if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
168 return 0;
169
170 /*
171 * Init a simple "fake" receive and transmit descriptor that
172 * points to some unused area. Send a fake setup packet.
173 */
174 rp = (void *)ui.ui_baddr;
175 ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
176 ring[0].qe_addr_lo = LOWORD(&rp[4]);
177 ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
178 ring[0].qe_buf_len = -64;
179
180 ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
181 ring[2].qe_addr_lo = LOWORD(&rp[4]);
182 ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
183 ring[2].qe_buf_len = -(1500/2);
184
185 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
186 DELAY(1000);
187
188 /*
189 * Start the interface and wait for the packet.
190 */
191 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
192 QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
193 QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
194 QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
195 QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
196 DELAY(10000);
197
198 /*
199 * All done with the bus resources.
200 */
201 ubfree((void *)parent, &ui);
202 free(ring, M_TEMP);
203 return 1;
204 }
205
206 /*
207 * Interface exists: make available by filling in network interface
208 * record. System will initialize the interface when it is ready
209 * to accept packets.
210 */
211 void
212 qeattach(struct device *parent, struct device *self, void *aux)
213 {
214 struct uba_attach_args *ua = aux;
215 struct uba_softc *ubasc = (struct uba_softc *)parent;
216 struct qe_softc *sc = (struct qe_softc *)self;
217 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
218 struct qe_ring *rp;
219 u_int8_t enaddr[ETHER_ADDR_LEN];
220 int i, error;
221 char *nullbuf;
222
223 sc->sc_iot = ua->ua_iot;
224 sc->sc_ioh = ua->ua_ioh;
225 sc->sc_dmat = ua->ua_dmat;
226
227 /*
228 * Allocate DMA safe memory for descriptors and setup memory.
229 */
230
231 sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
232 if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
233 printf(": unable to ubmemalloc(), error = %d\n", error);
234 return;
235 }
236 sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
237 sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
238
239 /*
240 * Zero the newly allocated memory.
241 */
242 bzero(sc->sc_qedata, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
243 nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
244 /*
245 * Create the transmit descriptor DMA maps. We take advantage
246 * of the fact that the Qbus address space is big, and therefore
247 * allocate map registers for all transmit descriptors also,
248 * so that we can avoid this each time we send a packet.
249 */
250 for (i = 0; i < TXDESCS; i++) {
251 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
252 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
253 &sc->sc_xmtmap[i]))) {
254 printf(": unable to create tx DMA map %d, error = %d\n",
255 i, error);
256 goto fail_4;
257 }
258 }
259
260 /*
261 * Create receive buffer DMA maps.
262 */
263 for (i = 0; i < RXDESCS; i++) {
264 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
265 MCLBYTES, 0, BUS_DMA_NOWAIT,
266 &sc->sc_rcvmap[i]))) {
267 printf(": unable to create rx DMA map %d, error = %d\n",
268 i, error);
269 goto fail_5;
270 }
271 }
272 /*
273 * Pre-allocate the receive buffers.
274 */
275 for (i = 0; i < RXDESCS; i++) {
276 if ((error = qe_add_rxbuf(sc, i)) != 0) {
277 printf(": unable to allocate or map rx buffer %d\n,"
278 " error = %d\n", i, error);
279 goto fail_6;
280 }
281 }
282
283 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
284 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
285 printf("%s: unable to create pad buffer DMA map, "
286 "error = %d\n", sc->sc_dev.dv_xname, error);
287 goto fail_6;
288 }
289 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
290 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
291 printf("%s: unable to load pad buffer DMA map, "
292 "error = %d\n", sc->sc_dev.dv_xname, error);
293 goto fail_7;
294 }
295 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
296 BUS_DMASYNC_PREWRITE);
297
298 /*
299 * Create ring loops of the buffer chains.
300 * This is only done once.
301 */
302
303 rp = sc->sc_qedata->qc_recv;
304 rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
305 rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
306 QE_VALID | QE_CHAIN;
307 rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
308
309 rp = sc->sc_qedata->qc_xmit;
310 rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
311 rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
312 QE_VALID | QE_CHAIN;
313 rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
314
315 /*
316 * Get the vector that were set at match time, and remember it.
317 */
318 sc->sc_intvec = ubasc->uh_lastiv;
319 QE_WCSR(QE_CSR_CSR, QE_RESET);
320 DELAY(1000);
321 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
322
323 /*
324 * Read out ethernet address and tell which type this card is.
325 */
326 for (i = 0; i < 6; i++)
327 enaddr[i] = QE_RCSR(i * 2) & 0xff;
328
329 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
330 printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
331 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
332 ether_sprintf(enaddr));
333
334 QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
335
336 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
337 sc, &sc->sc_intrcnt);
338 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
339 sc->sc_dev.dv_xname, "intr");
340
341 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
342 ifp->if_softc = sc;
343 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
344 ifp->if_start = qestart;
345 ifp->if_ioctl = qeioctl;
346 ifp->if_watchdog = qetimeout;
347 IFQ_SET_READY(&ifp->if_snd);
348
349 /*
350 * Attach the interface.
351 */
352 if_attach(ifp);
353 ether_ifattach(ifp, enaddr);
354
355 return;
356
357 /*
358 * Free any resources we've allocated during the failed attach
359 * attempt. Do this in reverse order and fall through.
360 */
361 fail_7:
362 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
363 fail_6:
364 for (i = 0; i < RXDESCS; i++) {
365 if (sc->sc_rxmbuf[i] != NULL) {
366 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
367 m_freem(sc->sc_rxmbuf[i]);
368 }
369 }
370 fail_5:
371 for (i = 0; i < RXDESCS; i++) {
372 if (sc->sc_xmtmap[i] != NULL)
373 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
374 }
375 fail_4:
376 for (i = 0; i < TXDESCS; i++) {
377 if (sc->sc_rcvmap[i] != NULL)
378 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
379 }
380 }
381
382 /*
383 * Initialization of interface.
384 */
385 void
386 qeinit(struct qe_softc *sc)
387 {
388 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
389 struct qe_cdata *qc = sc->sc_qedata;
390 int i;
391
392
393 /*
394 * Reset the interface.
395 */
396 QE_WCSR(QE_CSR_CSR, QE_RESET);
397 DELAY(1000);
398 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
399 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
400
401 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
402 /*
403 * Release and init transmit descriptors.
404 */
405 for (i = 0; i < TXDESCS; i++) {
406 if (sc->sc_txmbuf[i]) {
407 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
408 m_freem(sc->sc_txmbuf[i]);
409 sc->sc_txmbuf[i] = 0;
410 }
411 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
412 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
413 }
414
415
416 /*
417 * Init receive descriptors.
418 */
419 for (i = 0; i < RXDESCS; i++)
420 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
421 sc->sc_nextrx = 0;
422
423 /*
424 * Write the descriptor addresses to the device.
425 * Receiving packets will be enabled in the interrupt routine.
426 */
427 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
428 QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
429 QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
430
431 ifp->if_flags |= IFF_RUNNING;
432 ifp->if_flags &= ~IFF_OACTIVE;
433
434 /*
435 * Send a setup frame.
436 * This will start the transmit machinery as well.
437 */
438 qe_setup(sc);
439
440 }
441
442 /*
443 * Start output on interface.
444 */
445 void
446 qestart(struct ifnet *ifp)
447 {
448 struct qe_softc *sc = ifp->if_softc;
449 struct qe_cdata *qc = sc->sc_qedata;
450 paddr_t buffer;
451 struct mbuf *m, *m0;
452 int idx, len, s, i, totlen, buflen, error;
453 short orword, csr;
454
455 if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
456 return;
457
458 s = splnet();
459 while (sc->sc_inq < (TXDESCS - 1)) {
460
461 if (sc->sc_setup) {
462 qe_setup(sc);
463 continue;
464 }
465 idx = sc->sc_nexttx;
466 IFQ_POLL(&ifp->if_snd, m);
467 if (m == 0)
468 goto out;
469 /*
470 * Count number of mbufs in chain.
471 * Always do DMA directly from mbufs, therefore the transmit
472 * ring is really big.
473 */
474 for (m0 = m, i = 0; m0; m0 = m0->m_next)
475 if (m0->m_len)
476 i++;
477 if (m->m_pkthdr.len < ETHER_PAD_LEN) {
478 buflen = ETHER_PAD_LEN;
479 i++;
480 } else
481 buflen = m->m_pkthdr.len;
482 if (i >= TXDESCS)
483 panic("qestart");
484
485 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
486 ifp->if_flags |= IFF_OACTIVE;
487 goto out;
488 }
489
490 IFQ_DEQUEUE(&ifp->if_snd, m);
491
492 #if NBPFILTER > 0
493 if (ifp->if_bpf)
494 bpf_mtap(ifp->if_bpf, m);
495 #endif
496 /*
497 * m now points to a mbuf chain that can be loaded.
498 * Loop around and set it.
499 */
500 totlen = 0;
501 for (m0 = m; ; m0 = m0->m_next) {
502 if (m0) {
503 if (m0->m_len == 0)
504 continue;
505 error = bus_dmamap_load(sc->sc_dmat,
506 sc->sc_xmtmap[idx], mtod(m0, void *),
507 m0->m_len, 0, 0);
508 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
509 len = m0->m_len;
510 } else if (totlen < ETHER_PAD_LEN) {
511 buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
512 len = ETHER_PAD_LEN - totlen;
513 } else {
514 break;
515 }
516
517 totlen += len;
518 /* Word alignment calc */
519 orword = 0;
520 if (totlen == buflen) {
521 orword |= QE_EOMSG;
522 sc->sc_txmbuf[idx] = m;
523 }
524 if ((buffer & 1) || (len & 1))
525 len += 2;
526 if (buffer & 1)
527 orword |= QE_ODDBEGIN;
528 if ((buffer + len) & 1)
529 orword |= QE_ODDEND;
530 qc->qc_xmit[idx].qe_buf_len = -(len/2);
531 qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
532 qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
533 qc->qc_xmit[idx].qe_flag =
534 qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
535 qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
536 if (++idx == TXDESCS)
537 idx = 0;
538 sc->sc_inq++;
539 if (m0 == NULL)
540 break;
541 }
542 #ifdef DIAGNOSTIC
543 if (totlen != buflen)
544 panic("qestart: len fault");
545 #endif
546
547 /*
548 * Kick off the transmit logic, if it is stopped.
549 */
550 csr = QE_RCSR(QE_CSR_CSR);
551 if (csr & QE_XL_INVALID) {
552 QE_WCSR(QE_CSR_XMTL,
553 LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
554 QE_WCSR(QE_CSR_XMTH,
555 HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
556 }
557 sc->sc_nexttx = idx;
558 }
559 if (sc->sc_inq == (TXDESCS - 1))
560 ifp->if_flags |= IFF_OACTIVE;
561
562 out: if (sc->sc_inq)
563 ifp->if_timer = 5; /* If transmit logic dies */
564 splx(s);
565 }
566
567 static void
568 qeintr(void *arg)
569 {
570 struct qe_softc *sc = arg;
571 struct qe_cdata *qc = sc->sc_qedata;
572 struct ifnet *ifp = &sc->sc_if;
573 struct mbuf *m;
574 int csr, status1, status2, len;
575
576 csr = QE_RCSR(QE_CSR_CSR);
577
578 QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
579 QE_RCV_INT | QE_ILOOP);
580
581 if (csr & QE_RCV_INT)
582 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
583 status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
584 status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
585
586 m = sc->sc_rxmbuf[sc->sc_nextrx];
587 len = ((status1 & QE_RBL_HI) |
588 (status2 & QE_RBL_LO)) + 60;
589 qe_add_rxbuf(sc, sc->sc_nextrx);
590 m->m_pkthdr.rcvif = ifp;
591 m->m_pkthdr.len = m->m_len = len;
592 if (++sc->sc_nextrx == RXDESCS)
593 sc->sc_nextrx = 0;
594 #if NBPFILTER > 0
595 if (ifp->if_bpf)
596 bpf_mtap(ifp->if_bpf, m);
597 #endif
598 if ((status1 & QE_ESETUP) == 0)
599 (*ifp->if_input)(ifp, m);
600 else
601 m_freem(m);
602 }
603
604 if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
605 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
606 int idx = sc->sc_lastack;
607
608 sc->sc_inq--;
609 if (++sc->sc_lastack == TXDESCS)
610 sc->sc_lastack = 0;
611
612 /* XXX collect statistics */
613 qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
614 qc->qc_xmit[idx].qe_status1 =
615 qc->qc_xmit[idx].qe_flag = QE_NOTYET;
616
617 if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
618 continue;
619 if (sc->sc_txmbuf[idx] == NULL ||
620 sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
621 bus_dmamap_unload(sc->sc_dmat,
622 sc->sc_xmtmap[idx]);
623 if (sc->sc_txmbuf[idx]) {
624 m_freem(sc->sc_txmbuf[idx]);
625 sc->sc_txmbuf[idx] = NULL;
626 }
627 }
628 ifp->if_timer = 0;
629 ifp->if_flags &= ~IFF_OACTIVE;
630 qestart(ifp); /* Put in more in queue */
631 }
632 /*
633 * How can the receive list get invalid???
634 * Verified that it happens anyway.
635 */
636 if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
637 (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
638 QE_WCSR(QE_CSR_RCLL,
639 LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
640 QE_WCSR(QE_CSR_RCLH,
641 HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
642 }
643 }
644
645 /*
646 * Process an ioctl request.
647 */
648 int
649 qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
650 {
651 struct qe_softc *sc = ifp->if_softc;
652 struct ifreq *ifr = (struct ifreq *)data;
653 struct ifaddr *ifa = (struct ifaddr *)data;
654 int s = splnet(), error = 0;
655
656 switch (cmd) {
657
658 case SIOCSIFADDR:
659 ifp->if_flags |= IFF_UP;
660 switch(ifa->ifa_addr->sa_family) {
661 #ifdef INET
662 case AF_INET:
663 qeinit(sc);
664 arp_ifinit(ifp, ifa);
665 break;
666 #endif
667 }
668 break;
669
670 case SIOCSIFFLAGS:
671 if ((ifp->if_flags & IFF_UP) == 0 &&
672 (ifp->if_flags & IFF_RUNNING) != 0) {
673 /*
674 * If interface is marked down and it is running,
675 * stop it. (by disabling receive mechanism).
676 */
677 QE_WCSR(QE_CSR_CSR,
678 QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
679 ifp->if_flags &= ~IFF_RUNNING;
680 } else if ((ifp->if_flags & IFF_UP) != 0 &&
681 (ifp->if_flags & IFF_RUNNING) == 0) {
682 /*
683 * If interface it marked up and it is stopped, then
684 * start it.
685 */
686 qeinit(sc);
687 } else if ((ifp->if_flags & IFF_UP) != 0) {
688 /*
689 * Send a new setup packet to match any new changes.
690 * (Like IFF_PROMISC etc)
691 */
692 qe_setup(sc);
693 }
694 break;
695
696 case SIOCADDMULTI:
697 case SIOCDELMULTI:
698 /*
699 * Update our multicast list.
700 */
701 error = (cmd == SIOCADDMULTI) ?
702 ether_addmulti(ifr, &sc->sc_ec):
703 ether_delmulti(ifr, &sc->sc_ec);
704
705 if (error == ENETRESET) {
706 /*
707 * Multicast list has changed; set the hardware filter
708 * accordingly.
709 */
710 qe_setup(sc);
711 error = 0;
712 }
713 break;
714
715 default:
716 error = EINVAL;
717
718 }
719 splx(s);
720 return (error);
721 }
722
723 /*
724 * Add a receive buffer to the indicated descriptor.
725 */
726 int
727 qe_add_rxbuf(struct qe_softc *sc, int i)
728 {
729 struct mbuf *m;
730 struct qe_ring *rp;
731 vaddr_t addr;
732 int error;
733
734 MGETHDR(m, M_DONTWAIT, MT_DATA);
735 if (m == NULL)
736 return (ENOBUFS);
737
738 MCLGET(m, M_DONTWAIT);
739 if ((m->m_flags & M_EXT) == 0) {
740 m_freem(m);
741 return (ENOBUFS);
742 }
743
744 if (sc->sc_rxmbuf[i] != NULL)
745 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
746
747 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
748 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
749 if (error)
750 panic("%s: can't load rx DMA map %d, error = %d",
751 sc->sc_dev.dv_xname, i, error);
752 sc->sc_rxmbuf[i] = m;
753
754 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
755 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
756
757 /*
758 * We know that the mbuf cluster is page aligned. Also, be sure
759 * that the IP header will be longword aligned.
760 */
761 m->m_data += 2;
762 addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
763 rp = &sc->sc_qedata->qc_recv[i];
764 rp->qe_flag = rp->qe_status1 = QE_NOTYET;
765 rp->qe_addr_lo = LOWORD(addr);
766 rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
767 rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
768
769 return (0);
770 }
771
772 /*
773 * Create a setup packet and put in queue for sending.
774 */
775 void
776 qe_setup(struct qe_softc *sc)
777 {
778 struct ether_multi *enm;
779 struct ether_multistep step;
780 struct qe_cdata *qc = sc->sc_qedata;
781 struct ifnet *ifp = &sc->sc_if;
782 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
783 int i, j, k, idx, s;
784
785 s = splnet();
786 if (sc->sc_inq == (TXDESCS - 1)) {
787 sc->sc_setup = 1;
788 splx(s);
789 return;
790 }
791 sc->sc_setup = 0;
792 /*
793 * Init the setup packet with valid info.
794 */
795 memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
796 for (i = 0; i < ETHER_ADDR_LEN; i++)
797 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
798
799 /*
800 * Multicast handling. The DEQNA can handle up to 12 direct
801 * ethernet addresses.
802 */
803 j = 3; k = 0;
804 ifp->if_flags &= ~IFF_ALLMULTI;
805 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
806 while (enm != NULL) {
807 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
808 ifp->if_flags |= IFF_ALLMULTI;
809 break;
810 }
811 for (i = 0; i < ETHER_ADDR_LEN; i++)
812 qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
813 j++;
814 if (j == 8) {
815 j = 1; k += 64;
816 }
817 if (k > 64) {
818 ifp->if_flags |= IFF_ALLMULTI;
819 break;
820 }
821 ETHER_NEXT_MULTI(step, enm);
822 }
823 idx = sc->sc_nexttx;
824 qc->qc_xmit[idx].qe_buf_len = -64;
825
826 /*
827 * How is the DEQNA turned in ALLMULTI mode???
828 * Until someone tells me, fall back to PROMISC when more than
829 * 12 ethernet addresses.
830 */
831 if (ifp->if_flags & IFF_ALLMULTI)
832 ifp->if_flags |= IFF_PROMISC;
833 else if (ifp->if_pcount == 0)
834 ifp->if_flags &= ~IFF_PROMISC;
835 if (ifp->if_flags & IFF_PROMISC)
836 qc->qc_xmit[idx].qe_buf_len = -65;
837
838 qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
839 qc->qc_xmit[idx].qe_addr_hi =
840 HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
841 qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
842 qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
843
844 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
845 QE_WCSR(QE_CSR_XMTL,
846 LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
847 QE_WCSR(QE_CSR_XMTH,
848 HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
849 }
850
851 sc->sc_inq++;
852 if (++sc->sc_nexttx == TXDESCS)
853 sc->sc_nexttx = 0;
854 splx(s);
855 }
856
857 /*
858 * Check for dead transmit logic. Not uncommon.
859 */
860 void
861 qetimeout(struct ifnet *ifp)
862 {
863 struct qe_softc *sc = ifp->if_softc;
864
865 if (sc->sc_inq == 0)
866 return;
867
868 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
869 /*
870 * Do a reset of interface, to get it going again.
871 * Will it work by just restart the transmit logic?
872 */
873 qeinit(sc);
874 }
Cache object: dbdc211e698b41464ccca198e7b68371
|