FreeBSD/Linux Kernel Cross Reference
sys/dev/lnc/if_lnc.c
1 /*-
2 * Copyright (c) 1994-2000
3 * Paul Richards. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * verbatim and that no modifications are made prior to this
11 * point in the file.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name Paul Richards may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: releng/5.0/sys/dev/lnc/if_lnc.c 106937 2002-11-14 23:54:55Z sam $
31 */
32
33 /*
34 #define DIAGNOSTIC
35 #define DEBUG
36 *
37 * TODO ----
38 *
39 * Check all the XXX comments -- some of them are just things I've left
40 * unfinished rather than "difficult" problems that were hacked around.
41 *
42 * Check log settings.
43 *
44 * Check how all the arpcom flags get set and used.
45 *
46 * Re-inline and re-static all routines after debugging.
47 *
48 * Remember to assign iobase in SHMEM probe routines.
49 *
50 * Replace all occurences of LANCE-controller-card etc in prints by the name
51 * strings of the appropriate type -- nifty window dressing
52 *
53 * Add DEPCA support -- mostly done.
54 *
55 */
56
57 #include "opt_inet.h"
58
59 /* Some defines that should really be in generic locations */
60 #define FCS_LEN 4
61 #define MULTICAST_FILTER_LEN 8
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/bus.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/module.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
72 #include <sys/syslog.h>
73
74 #include <machine/bus.h>
75 #include <machine/resource.h>
76 #include <sys/rman.h>
77
78 #include <net/ethernet.h>
79 #include <net/if.h>
80 #include <net/if_dl.h>
81 #include <net/if_types.h>
82
83 #include <netinet/in.h>
84 #include <netinet/if_ether.h>
85
86 #include <net/bpf.h>
87
88 #include <machine/md_var.h>
89
90 #include <dev/lnc/if_lncvar.h>
91 #include <dev/lnc/if_lncreg.h>
92
93 devclass_t lnc_devclass;
94
95 static char const * const nic_ident[] = {
96 "Unknown",
97 "BICC",
98 "NE2100",
99 "DEPCA",
100 "CNET98S", /* PC-98 */
101 };
102
103 static char const * const ic_ident[] = {
104 "Unknown",
105 "LANCE",
106 "C-LANCE",
107 "PCnet-ISA",
108 "PCnet-ISA+",
109 "PCnet-ISA II",
110 "PCnet-32 VL-Bus",
111 "PCnet-PCI",
112 "PCnet-PCI II",
113 "PCnet-FAST",
114 "PCnet-FAST+",
115 "PCnet-Home",
116 };
117
118 static void lnc_setladrf(struct lnc_softc *sc);
119 static void lnc_reset(struct lnc_softc *sc);
120 static void lnc_free_mbufs(struct lnc_softc *sc);
121 static __inline int alloc_mbuf_cluster(struct lnc_softc *sc,
122 struct host_ring_entry *desc);
123 static __inline struct mbuf *chain_mbufs(struct lnc_softc *sc,
124 int start_of_packet,
125 int pkt_len);
126 static __inline struct mbuf *mbuf_packet(struct lnc_softc *sc,
127 int start_of_packet,
128 int pkt_len);
129 static __inline void lnc_rint(struct lnc_softc *sc);
130 static __inline void lnc_tint(struct lnc_softc *sc);
131
132 static void lnc_init(void *);
133 static __inline int mbuf_to_buffer(struct mbuf *m, char *buffer);
134 static __inline struct mbuf *chain_to_cluster(struct mbuf *m);
135 static void lnc_start(struct ifnet *ifp);
136 static int lnc_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
137 static void lnc_watchdog(struct ifnet *ifp);
138 #ifdef DEBUG
139 void lnc_dump_state(struct lnc_softc *sc);
140 void mbuf_dump_chain(struct mbuf *m);
141 #endif
142
143 u_short
144 read_csr(struct lnc_softc *sc, u_short port)
145 {
146 lnc_outw(sc->rap, port);
147 return (lnc_inw(sc->rdp));
148 }
149
150 void
151 write_csr(struct lnc_softc *sc, u_short port, u_short val)
152 {
153 lnc_outw(sc->rap, port);
154 lnc_outw(sc->rdp, val);
155 }
156
157 static __inline void
158 write_bcr(struct lnc_softc *sc, u_short port, u_short val)
159 {
160 lnc_outw(sc->rap, port);
161 lnc_outw(sc->bdp, val);
162 }
163
164 static __inline u_short
165 read_bcr(struct lnc_softc *sc, u_short port)
166 {
167 lnc_outw(sc->rap, port);
168 return (lnc_inw(sc->bdp));
169 }
170
171 int
172 lance_probe(struct lnc_softc *sc)
173 {
174 write_csr(sc, CSR0, STOP);
175
176 if ((lnc_inw(sc->rdp) & STOP) && ! (read_csr(sc, CSR3))) {
177 /*
178 * Check to see if it's a C-LANCE. For the LANCE the INEA bit
179 * cannot be set while the STOP bit is. This restriction is
180 * removed for the C-LANCE.
181 */
182 write_csr(sc, CSR0, INEA);
183 if (read_csr(sc, CSR0) & INEA)
184 return (C_LANCE);
185 else
186 return (LANCE);
187 } else
188 return (UNKNOWN);
189 }
190
191 static __inline u_long
192 ether_crc(const u_char *ether_addr)
193 {
194 #define POLYNOMIAL 0xEDB88320UL
195 u_char i, j, addr;
196 u_int crc = 0xFFFFFFFFUL;
197
198 for (i = 0; i < ETHER_ADDR_LEN; i++) {
199 addr = *ether_addr++;
200 for (j = 0; j < MULTICAST_FILTER_LEN; j++) {
201 crc = (crc >> 1) ^ (((crc ^ addr) & 1) ? POLYNOMIAL : 0);
202 addr >>= 1;
203 }
204 }
205 return crc;
206 #undef POLYNOMIAL
207 }
208
209 void
210 lnc_release_resources(device_t dev)
211 {
212 lnc_softc_t *sc = device_get_softc(dev);
213
214 if (sc->irqres) {
215 bus_teardown_intr(dev, sc->irqres, sc->intrhand);
216 bus_release_resource(dev, SYS_RES_IRQ, sc->irqrid, sc->irqres);
217 }
218
219 if (sc->portres)
220 bus_release_resource(dev, SYS_RES_IOPORT,
221 sc->portrid, sc->portres);
222 if (sc->drqres)
223 bus_release_resource(dev, SYS_RES_DRQ, sc->drqrid, sc->drqres);
224
225 if (sc->dmat) {
226 if (sc->dmamap) {
227 bus_dmamap_unload(sc->dmat, sc->dmamap);
228 bus_dmamem_free(sc->dmat, sc->recv_ring, sc->dmamap);
229 }
230 bus_dma_tag_destroy(sc->dmat);
231 }
232 }
233
234 /*
235 * Set up the logical address filter for multicast packets
236 */
237 static __inline void
238 lnc_setladrf(struct lnc_softc *sc)
239 {
240 struct ifnet *ifp = &sc->arpcom.ac_if;
241 struct ifmultiaddr *ifma;
242 u_long index;
243 int i;
244
245 if (sc->flags & IFF_ALLMULTI) {
246 for (i=0; i < MULTICAST_FILTER_LEN; i++)
247 sc->init_block->ladrf[i] = 0xFF;
248 return;
249 }
250
251 /*
252 * For each multicast address, calculate a crc for that address and
253 * then use the high order 6 bits of the crc as a hash code where
254 * bits 3-5 select the byte of the address filter and bits 0-2 select
255 * the bit within that byte.
256 */
257
258 bzero(sc->init_block->ladrf, MULTICAST_FILTER_LEN);
259 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
260 if (ifma->ifma_addr->sa_family != AF_LINK)
261 continue;
262
263 index = ether_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr))
264 >> 26;
265 sc->init_block->ladrf[index >> 3] |= 1 << (index & 7);
266 }
267 }
268
269 void
270 lnc_stop(struct lnc_softc *sc)
271 {
272 write_csr(sc, CSR0, STOP);
273 }
274
275 static void
276 lnc_reset(struct lnc_softc *sc)
277 {
278 lnc_init(sc);
279 }
280
281 static void
282 lnc_free_mbufs(struct lnc_softc *sc)
283 {
284 int i;
285
286 /*
287 * We rely on other routines to keep the buff.mbuf field valid. If
288 * it's not NULL then we assume it points to an allocated mbuf.
289 */
290
291 for (i = 0; i < NDESC(sc->nrdre); i++)
292 if ((sc->recv_ring + i)->buff.mbuf)
293 m_free((sc->recv_ring + i)->buff.mbuf);
294
295 for (i = 0; i < NDESC(sc->ntdre); i++)
296 if ((sc->trans_ring + i)->buff.mbuf)
297 m_free((sc->trans_ring + i)->buff.mbuf);
298
299 if (sc->mbuf_count)
300 m_freem(sc->mbufs);
301 }
302
303 static __inline int
304 alloc_mbuf_cluster(struct lnc_softc *sc, struct host_ring_entry *desc)
305 {
306 register struct mds *md = desc->md;
307 struct mbuf *m=0;
308 int addr;
309
310 /* Try and get cluster off local cache */
311 if (sc->mbuf_count) {
312 sc->mbuf_count--;
313 m = sc->mbufs;
314 sc->mbufs = m->m_next;
315 /* XXX m->m_data = m->m_ext.ext_buf;*/
316 } else {
317 MGET(m, M_DONTWAIT, MT_DATA);
318 if (!m)
319 return(1);
320 MCLGET(m, M_DONTWAIT);
321 if (!m->m_ext.ext_buf) {
322 m_free(m);
323 return(1);
324 }
325 }
326
327 desc->buff.mbuf = m;
328 addr = kvtop(m->m_data);
329 md->md0 = addr;
330 md->md1= ((addr >> 16) & 0xff) | OWN;
331 md->md2 = -(short)(MCLBYTES - sizeof(struct pkthdr));
332 md->md3 = 0;
333 return(0);
334 }
335
336 static __inline struct mbuf *
337 chain_mbufs(struct lnc_softc *sc, int start_of_packet, int pkt_len)
338 {
339 struct mbuf *head, *m;
340 struct host_ring_entry *desc;
341
342 /*
343 * Turn head into a pkthdr mbuf --
344 * assumes a pkthdr type mbuf was
345 * allocated to the descriptor
346 * originally.
347 */
348
349 desc = sc->recv_ring + start_of_packet;
350
351 head = desc->buff.mbuf;
352 head->m_flags |= M_PKTHDR;
353 bzero(&head->m_pkthdr, sizeof(head->m_pkthdr));
354
355 m = head;
356 do {
357 m = desc->buff.mbuf;
358 m->m_len = min((MCLBYTES - sizeof(struct pkthdr)), pkt_len);
359 pkt_len -= m->m_len;
360 if (alloc_mbuf_cluster(sc, desc))
361 return((struct mbuf *)NULL);
362 INC_MD_PTR(start_of_packet, sc->nrdre)
363 desc = sc->recv_ring + start_of_packet;
364 m->m_next = desc->buff.mbuf;
365 } while (start_of_packet != sc->recv_next);
366
367 m->m_next = 0;
368 return(head);
369 }
370
371 static __inline struct mbuf *
372 mbuf_packet(struct lnc_softc *sc, int start_of_packet, int pkt_len)
373 {
374
375 struct host_ring_entry *start;
376 struct mbuf *head,*m,*m_prev;
377 char *data,*mbuf_data;
378 short blen;
379 int amount;
380
381 /* Get a pkthdr mbuf for the start of packet */
382 MGETHDR(head, M_DONTWAIT, MT_DATA);
383 if (!head) {
384 LNCSTATS(drop_packet)
385 return(0);
386 }
387
388 m = head;
389 m->m_len = 0;
390 start = sc->recv_ring + start_of_packet;
391 /*blen = -(start->md->md2);*/
392 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
393 data = start->buff.data;
394 mbuf_data = m->m_data;
395
396 while (start_of_packet != sc->recv_next) {
397 /*
398 * If the data left fits in a single buffer then set
399 * blen to the size of the data left.
400 */
401 if (pkt_len < blen)
402 blen = pkt_len;
403
404 /*
405 * amount is least of data in current ring buffer and
406 * amount of space left in current mbuf.
407 */
408 amount = min(blen, M_TRAILINGSPACE(m));
409 if (amount == 0) {
410 /* mbuf must be empty */
411 m_prev = m;
412 MGET(m, M_DONTWAIT, MT_DATA);
413 if (!m) {
414 m_freem(head);
415 return(0);
416 }
417 if (pkt_len >= MINCLSIZE)
418 MCLGET(m, M_DONTWAIT);
419 m->m_len = 0;
420 m_prev->m_next = m;
421 amount = min(blen, M_TRAILINGSPACE(m));
422 mbuf_data = m->m_data;
423 }
424 bcopy(data, mbuf_data, amount);
425 blen -= amount;
426 pkt_len -= amount;
427 m->m_len += amount;
428 data += amount;
429 mbuf_data += amount;
430
431 if (blen == 0) {
432 start->md->md1 &= HADR;
433 start->md->md1 |= OWN;
434 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
435 INC_MD_PTR(start_of_packet, sc->nrdre)
436 start = sc->recv_ring + start_of_packet;
437 data = start->buff.data;
438 /*blen = -(start->md->md2);*/
439 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
440 }
441 }
442 return(head);
443 }
444
445
446 static __inline void
447 lnc_rint(struct lnc_softc *sc)
448 {
449 struct ifnet *ifp = &sc->arpcom.ac_if;
450 struct host_ring_entry *next, *start;
451 int start_of_packet;
452 struct mbuf *head;
453 struct ether_header *eh;
454 int lookahead;
455 int flags;
456 int pkt_len;
457
458 /*
459 * The LANCE will issue a RINT interrupt when the ownership of the
460 * last buffer of a receive packet has been relinquished by the LANCE.
461 * Therefore, it can be assumed that a complete packet can be found
462 * before hitting buffers that are still owned by the LANCE, if not
463 * then there is a bug in the driver that is causing the descriptors
464 * to get out of sync.
465 */
466
467 #ifdef DIAGNOSTIC
468 if ((sc->recv_ring + sc->recv_next)->md->md1 & OWN) {
469 int unit = ifp->if_unit;
470 log(LOG_ERR, "lnc%d: Receive interrupt with buffer still owned by controller -- Resetting\n", unit);
471 lnc_reset(sc);
472 return;
473 }
474 if (!((sc->recv_ring + sc->recv_next)->md->md1 & STP)) {
475 int unit = ifp->if_unit;
476 log(LOG_ERR, "lnc%d: Receive interrupt but not start of packet -- Resetting\n", unit);
477 lnc_reset(sc);
478 return;
479 }
480 #endif
481
482 lookahead = 0;
483 next = sc->recv_ring + sc->recv_next;
484 while ((flags = next->md->md1) & STP) {
485
486 /* Make a note of the start of the packet */
487 start_of_packet = sc->recv_next;
488
489 /*
490 * Find the end of the packet. Even if not data chaining,
491 * jabber packets can overrun into a second descriptor.
492 * If there is no error, then the ENP flag is set in the last
493 * descriptor of the packet. If there is an error then the ERR
494 * flag will be set in the descriptor where the error occured.
495 * Therefore, to find the last buffer of a packet we search for
496 * either ERR or ENP.
497 */
498
499 if (!(flags & (ENP | MDERR))) {
500 do {
501 INC_MD_PTR(sc->recv_next, sc->nrdre)
502 next = sc->recv_ring + sc->recv_next;
503 flags = next->md->md1;
504 } while (!(flags & (STP | OWN | ENP | MDERR)));
505
506 if (flags & STP) {
507 int unit = ifp->if_unit;
508 log(LOG_ERR, "lnc%d: Start of packet found before end of previous in receive ring -- Resetting\n", unit);
509 lnc_reset(sc);
510 return;
511 }
512 if (flags & OWN) {
513 if (lookahead) {
514 /*
515 * Looked ahead into a packet still
516 * being received
517 */
518 sc->recv_next = start_of_packet;
519 break;
520 } else {
521 int unit = ifp->if_unit;
522 log(LOG_ERR, "lnc%d: End of received packet not found-- Resetting\n", unit);
523 lnc_reset(sc);
524 return;
525 }
526 }
527 }
528
529 pkt_len = (next->md->md3 & MCNT) - FCS_LEN;
530
531 /* Move pointer onto start of next packet */
532 INC_MD_PTR(sc->recv_next, sc->nrdre)
533 next = sc->recv_ring + sc->recv_next;
534
535 if (flags & MDERR) {
536 int unit = ifp->if_unit;
537 if (flags & RBUFF) {
538 LNCSTATS(rbuff)
539 log(LOG_ERR, "lnc%d: Receive buffer error\n", unit);
540 }
541 if (flags & OFLO) {
542 /* OFLO only valid if ENP is not set */
543 if (!(flags & ENP)) {
544 LNCSTATS(oflo)
545 log(LOG_ERR, "lnc%d: Receive overflow error \n", unit);
546 }
547 } else if (flags & ENP) {
548 if ((ifp->if_flags & IFF_PROMISC)==0) {
549 /*
550 * FRAM and CRC are valid only if ENP
551 * is set and OFLO is not.
552 */
553 if (flags & FRAM) {
554 LNCSTATS(fram)
555 log(LOG_ERR, "lnc%d: Framing error\n", unit);
556 /*
557 * FRAM is only set if there's a CRC
558 * error so avoid multiple messages
559 */
560 } else if (flags & CRC) {
561 LNCSTATS(crc)
562 log(LOG_ERR, "lnc%d: Receive CRC error\n", unit);
563 }
564 }
565 }
566
567 /* Drop packet */
568 LNCSTATS(rerr)
569 ifp->if_ierrors++;
570 while (start_of_packet != sc->recv_next) {
571 start = sc->recv_ring + start_of_packet;
572 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
573 start->md->md1 &= HADR;
574 start->md->md1 |= OWN;
575 INC_MD_PTR(start_of_packet, sc->nrdre)
576 }
577 } else { /* Valid packet */
578
579 ifp->if_ipackets++;
580
581
582 if (sc->nic.mem_mode == DMA_MBUF)
583 head = chain_mbufs(sc, start_of_packet, pkt_len);
584 else
585 head = mbuf_packet(sc, start_of_packet, pkt_len);
586
587 if (head) {
588 /*
589 * First mbuf in packet holds the
590 * ethernet and packet headers
591 */
592 head->m_pkthdr.rcvif = ifp;
593 head->m_pkthdr.len = pkt_len ;
594 eh = (struct ether_header *) head->m_data;
595
596 /*
597 * vmware ethernet hardware emulation loops
598 * packets back to itself, violates IFF_SIMPLEX.
599 * drop it if it is from myself.
600 */
601 if (bcmp(eh->ether_shost,
602 sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == 0) {
603 m_freem(head);
604 } else {
605 (*ifp->if_input)(ifp, head);
606 }
607 } else {
608 int unit = ifp->if_unit;
609 log(LOG_ERR,"lnc%d: Packet dropped, no mbufs\n",unit);
610 LNCSTATS(drop_packet)
611 }
612 }
613
614 lookahead++;
615 }
616
617 /*
618 * At this point all completely received packets have been processed
619 * so clear RINT since any packets that have arrived while we were in
620 * here have been dealt with.
621 */
622
623 lnc_outw(sc->rdp, RINT | INEA);
624 }
625
626 static __inline void
627 lnc_tint(struct lnc_softc *sc)
628 {
629 struct host_ring_entry *next, *start;
630 int start_of_packet;
631 int lookahead;
632
633 /*
634 * If the driver is reset in this routine then we return immediately to
635 * the interrupt driver routine. Any interrupts that have occured
636 * since the reset will be dealt with there. sc->trans_next
637 * should point to the start of the first packet that was awaiting
638 * transmission after the last transmit interrupt was dealt with. The
639 * LANCE should have relinquished ownership of that descriptor before
640 * the interrupt. Therefore, sc->trans_next should point to a
641 * descriptor with STP set and OWN cleared. If not then the driver's
642 * pointers are out of sync with the LANCE, which signifies a bug in
643 * the driver. Therefore, the following two checks are really
644 * diagnostic, since if the driver is working correctly they should
645 * never happen.
646 */
647
648 #ifdef DIAGNOSTIC
649 if ((sc->trans_ring + sc->trans_next)->md->md1 & OWN) {
650 int unit = sc->arpcom.ac_if.if_unit;
651 log(LOG_ERR, "lnc%d: Transmit interrupt with buffer still owned by controller -- Resetting\n", unit);
652 lnc_reset(sc);
653 return;
654 }
655 #endif
656
657
658 /*
659 * The LANCE will write the status information for the packet it just
660 * tried to transmit in one of two places. If the packet was
661 * transmitted successfully then the status will be written into the
662 * last descriptor of the packet. If the transmit failed then the
663 * status will be written into the descriptor that was being accessed
664 * when the error occured and all subsequent descriptors in that
665 * packet will have been relinquished by the LANCE.
666 *
667 * At this point we know that sc->trans_next points to the start
668 * of a packet that the LANCE has just finished trying to transmit.
669 * We now search for a buffer with either ENP or ERR set.
670 */
671
672 lookahead = 0;
673
674 do {
675 start_of_packet = sc->trans_next;
676 next = sc->trans_ring + sc->trans_next;
677
678 #ifdef DIAGNOSTIC
679 if (!(next->md->md1 & STP)) {
680 int unit = sc->arpcom.ac_if.if_unit;
681 log(LOG_ERR, "lnc%d: Transmit interrupt but not start of packet -- Resetting\n", unit);
682 lnc_reset(sc);
683 return;
684 }
685 #endif
686
687 /*
688 * Find end of packet.
689 */
690
691 if (!(next->md->md1 & (ENP | MDERR))) {
692 do {
693 INC_MD_PTR(sc->trans_next, sc->ntdre)
694 next = sc->trans_ring + sc->trans_next;
695 } while (!(next->md->md1 & (STP | OWN | ENP | MDERR)));
696
697 if (next->md->md1 & STP) {
698 int unit = sc->arpcom.ac_if.if_unit;
699 log(LOG_ERR, "lnc%d: Start of packet found before end of previous in transmit ring -- Resetting\n", unit);
700 lnc_reset(sc);
701 return;
702 }
703 if (next->md->md1 & OWN) {
704 if (lookahead) {
705 /*
706 * Looked ahead into a packet still
707 * being transmitted
708 */
709 sc->trans_next = start_of_packet;
710 break;
711 } else {
712 int unit = sc->arpcom.ac_if.if_unit;
713 log(LOG_ERR, "lnc%d: End of transmitted packet not found -- Resetting\n", unit);
714 lnc_reset(sc);
715 return;
716 }
717 }
718 }
719 /*
720 * Check for ERR first since other flags are irrelevant if an
721 * error occurred.
722 */
723 if (next->md->md1 & MDERR) {
724
725 int unit = sc->arpcom.ac_if.if_unit;
726
727 LNCSTATS(terr)
728 sc->arpcom.ac_if.if_oerrors++;
729
730 if (next->md->md3 & LCOL) {
731 LNCSTATS(lcol)
732 log(LOG_ERR, "lnc%d: Transmit late collision -- Net error?\n", unit);
733 sc->arpcom.ac_if.if_collisions++;
734 /*
735 * Clear TBUFF since it's not valid when LCOL
736 * set
737 */
738 next->md->md3 &= ~TBUFF;
739 }
740 if (next->md->md3 & LCAR) {
741 LNCSTATS(lcar)
742 log(LOG_ERR, "lnc%d: Loss of carrier during transmit -- Net error?\n", unit);
743 }
744 if (next->md->md3 & RTRY) {
745 LNCSTATS(rtry)
746 log(LOG_ERR, "lnc%d: Transmit of packet failed after 16 attempts -- TDR = %d\n", unit, ((sc->trans_ring + sc->trans_next)->md->md3 & TDR));
747 sc->arpcom.ac_if.if_collisions += 16;
748 /*
749 * Clear TBUFF since it's not valid when RTRY
750 * set
751 */
752 next->md->md3 &= ~TBUFF;
753 }
754 /*
755 * TBUFF is only valid if neither LCOL nor RTRY are set.
756 * We need to check UFLO after LCOL and RTRY so that we
757 * know whether or not TBUFF is valid. If either are
758 * set then TBUFF will have been cleared above. A
759 * UFLO error will turn off the transmitter so we
760 * have to reset.
761 *
762 */
763
764 if (next->md->md3 & UFLO) {
765 LNCSTATS(uflo)
766 /*
767 * If an UFLO has occured it's possibly due
768 * to a TBUFF error
769 */
770 if (next->md->md3 & TBUFF) {
771 LNCSTATS(tbuff)
772 log(LOG_ERR, "lnc%d: Transmit buffer error -- Resetting\n", unit);
773 } else
774 log(LOG_ERR, "lnc%d: Transmit underflow error -- Resetting\n", unit);
775 lnc_reset(sc);
776 return;
777 }
778 do {
779 INC_MD_PTR(sc->trans_next, sc->ntdre)
780 next = sc->trans_ring + sc->trans_next;
781 } while (!(next->md->md1 & STP) && (sc->trans_next != sc->next_to_send));
782
783 } else {
784 /*
785 * Since we check for ERR first then if we get here
786 * the packet was transmitted correctly. There may
787 * still have been non-fatal errors though.
788 * Don't bother checking for DEF, waste of time.
789 */
790
791 sc->arpcom.ac_if.if_opackets++;
792
793 if (next->md->md1 & MORE) {
794 LNCSTATS(more)
795 sc->arpcom.ac_if.if_collisions += 2;
796 }
797
798 /*
799 * ONE is invalid if LCOL is set. If LCOL was set then
800 * ERR would have also been set and we would have
801 * returned from lnc_tint above. Therefore we can
802 * assume if we arrive here that ONE is valid.
803 *
804 */
805
806 if (next->md->md1 & ONE) {
807 LNCSTATS(one)
808 sc->arpcom.ac_if.if_collisions++;
809 }
810 INC_MD_PTR(sc->trans_next, sc->ntdre)
811 next = sc->trans_ring + sc->trans_next;
812 }
813
814 /*
815 * Clear descriptors and free any mbufs.
816 */
817
818 do {
819 start = sc->trans_ring + start_of_packet;
820 start->md->md1 &= HADR;
821 if (sc->nic.mem_mode == DMA_MBUF) {
822 /* Cache clusters on a local queue */
823 if ((start->buff.mbuf->m_flags & M_EXT) && (sc->mbuf_count < MBUF_CACHE_LIMIT)) {
824 if (sc->mbuf_count) {
825 start->buff.mbuf->m_next = sc->mbufs;
826 sc->mbufs = start->buff.mbuf;
827 } else
828 sc->mbufs = start->buff.mbuf;
829 sc->mbuf_count++;
830 start->buff.mbuf = 0;
831 } else {
832 /*
833 * XXX should this be m_freem()?
834 */
835 m_free(start->buff.mbuf);
836 start->buff.mbuf = NULL;
837 }
838 }
839 sc->pending_transmits--;
840 INC_MD_PTR(start_of_packet, sc->ntdre)
841 }while (start_of_packet != sc->trans_next);
842
843 /*
844 * There's now at least one free descriptor
845 * in the ring so indicate that we can accept
846 * more packets again.
847 */
848
849 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
850
851 lookahead++;
852
853 } while (sc->pending_transmits && !(next->md->md1 & OWN));
854
855 /*
856 * Clear TINT since we've dealt with all
857 * the completed transmissions.
858 */
859
860 lnc_outw(sc->rdp, TINT | INEA);
861 }
862
863 int
864 lnc_attach_common(device_t dev)
865 {
866 int unit = device_get_unit(dev);
867 lnc_softc_t *sc = device_get_softc(dev);
868 int i;
869 int skip;
870
871 switch (sc->nic.ident) {
872 case BICC:
873 case CNET98S:
874 skip = 2;
875 break;
876 default:
877 skip = 1;
878 break;
879 }
880
881 /* Set default mode */
882 sc->nic.mode = NORMAL;
883
884 /* Fill in arpcom structure entries */
885
886 sc->arpcom.ac_if.if_softc = sc;
887 sc->arpcom.ac_if.if_name = "lnc";
888 sc->arpcom.ac_if.if_unit = unit;
889 sc->arpcom.ac_if.if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
890 sc->arpcom.ac_if.if_timer = 0;
891 sc->arpcom.ac_if.if_output = ether_output;
892 sc->arpcom.ac_if.if_start = lnc_start;
893 sc->arpcom.ac_if.if_ioctl = lnc_ioctl;
894 sc->arpcom.ac_if.if_watchdog = lnc_watchdog;
895 sc->arpcom.ac_if.if_init = lnc_init;
896 sc->arpcom.ac_if.if_snd.ifq_maxlen = IFQ_MAXLEN;
897
898 /* Extract MAC address from PROM */
899 for (i = 0; i < ETHER_ADDR_LEN; i++)
900 sc->arpcom.ac_enaddr[i] = lnc_inb(i * skip);
901
902 /*
903 * XXX -- should check return status of if_attach
904 */
905
906 ether_ifattach(&sc->arpcom.ac_if, sc->arpcom.ac_enaddr);
907
908 printf("lnc%d: ", unit);
909 if (sc->nic.ic == LANCE || sc->nic.ic == C_LANCE)
910 printf("%s (%s)",
911 nic_ident[sc->nic.ident], ic_ident[sc->nic.ic]);
912 else
913 printf("%s", ic_ident[sc->nic.ic]);
914 printf(" address %6D\n", sc->arpcom.ac_enaddr, ":");
915
916 return (1);
917 }
918
919 static void
920 lnc_init(xsc)
921 void *xsc;
922 {
923 struct lnc_softc *sc = xsc;
924 int s, i;
925 char *lnc_mem;
926
927 /* Check that interface has valid address */
928
929 if (TAILQ_EMPTY(&sc->arpcom.ac_if.if_addrhead)) { /* XXX unlikely */
930 printf("XXX no address?\n");
931 return;
932 }
933
934 /* Shut down interface */
935
936 s = splimp();
937 lnc_stop(sc);
938 sc->arpcom.ac_if.if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* XXX??? */
939
940 /*
941 * This sets up the memory area for the controller. Memory is set up for
942 * the initialisation block (12 words of contiguous memory starting
943 * on a word boundary),the transmit and receive ring structures (each
944 * entry is 4 words long and must start on a quadword boundary) and
945 * the data buffers.
946 *
947 * The alignment tests are particularly paranoid.
948 */
949
950 sc->recv_next = 0;
951 sc->trans_ring = sc->recv_ring + NDESC(sc->nrdre);
952 sc->trans_next = 0;
953
954 if (sc->nic.mem_mode == SHMEM)
955 lnc_mem = (char *) sc->nic.iobase;
956 else
957 lnc_mem = (char *) (sc->trans_ring + NDESC(sc->ntdre));
958
959 lnc_mem = (char *)(((int)lnc_mem + 1) & ~1);
960 sc->init_block = (struct init_block *) ((int) lnc_mem & ~1);
961 lnc_mem = (char *) (sc->init_block + 1);
962 lnc_mem = (char *)(((int)lnc_mem + 7) & ~7);
963
964 /* Initialise pointers to descriptor entries */
965 for (i = 0; i < NDESC(sc->nrdre); i++) {
966 (sc->recv_ring + i)->md = (struct mds *) lnc_mem;
967 lnc_mem += sizeof(struct mds);
968 }
969 for (i = 0; i < NDESC(sc->ntdre); i++) {
970 (sc->trans_ring + i)->md = (struct mds *) lnc_mem;
971 lnc_mem += sizeof(struct mds);
972 }
973
974 /* Initialise the remaining ring entries */
975
976 if (sc->nic.mem_mode == DMA_MBUF) {
977
978 sc->mbufs = 0;
979 sc->mbuf_count = 0;
980
981 /* Free previously allocated mbufs */
982 if (sc->flags & LNC_INITIALISED)
983 lnc_free_mbufs(sc);
984
985
986 for (i = 0; i < NDESC(sc->nrdre); i++) {
987 if (alloc_mbuf_cluster(sc, sc->recv_ring+i)) {
988 log(LOG_ERR, "Initialisation failed -- no mbufs\n");
989 splx(s);
990 return;
991 }
992 }
993
994 for (i = 0; i < NDESC(sc->ntdre); i++) {
995 (sc->trans_ring + i)->buff.mbuf = 0;
996 (sc->trans_ring + i)->md->md0 = 0;
997 (sc->trans_ring + i)->md->md1 = 0;
998 (sc->trans_ring + i)->md->md2 = 0;
999 (sc->trans_ring + i)->md->md3 = 0;
1000 }
1001 } else {
1002 for (i = 0; i < NDESC(sc->nrdre); i++) {
1003 (sc->recv_ring + i)->md->md0 = kvtop(lnc_mem);
1004 (sc->recv_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff) | OWN;
1005 (sc->recv_ring + i)->md->md2 = -RECVBUFSIZE;
1006 (sc->recv_ring + i)->md->md3 = 0;
1007 (sc->recv_ring + i)->buff.data = lnc_mem;
1008 lnc_mem += RECVBUFSIZE;
1009 }
1010 for (i = 0; i < NDESC(sc->ntdre); i++) {
1011 (sc->trans_ring + i)->md->md0 = kvtop(lnc_mem);
1012 (sc->trans_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff);
1013 (sc->trans_ring + i)->md->md2 = 0;
1014 (sc->trans_ring + i)->md->md3 = 0;
1015 (sc->trans_ring + i)->buff.data = lnc_mem;
1016 lnc_mem += TRANSBUFSIZE;
1017 }
1018 }
1019
1020 sc->next_to_send = 0;
1021
1022 /* Set up initialisation block */
1023
1024 sc->init_block->mode = sc->nic.mode;
1025
1026 for (i = 0; i < ETHER_ADDR_LEN; i++)
1027 sc->init_block->padr[i] = sc->arpcom.ac_enaddr[i];
1028
1029 lnc_setladrf(sc);
1030
1031 sc->init_block->rdra = kvtop(sc->recv_ring->md);
1032 sc->init_block->rlen = ((kvtop(sc->recv_ring->md) >> 16) & 0xff) | (sc->nrdre << 13);
1033 sc->init_block->tdra = kvtop(sc->trans_ring->md);
1034 sc->init_block->tlen = ((kvtop(sc->trans_ring->md) >> 16) & 0xff) | (sc->ntdre << 13);
1035
1036
1037 /* Set flags to show that the memory area is valid */
1038 sc->flags |= LNC_INITIALISED;
1039
1040 sc->pending_transmits = 0;
1041
1042 /* Give the LANCE the physical address of the initialisation block */
1043
1044 if (sc->nic.ic == PCnet_Home) {
1045 u_short media;
1046 /* Set PHY_SEL to HomeRun */
1047 media = read_bcr(sc, BCR49);
1048 media &= ~3;
1049 media |= 1;
1050 write_bcr(sc, BCR49, media);
1051 }
1052
1053 write_csr(sc, CSR1, kvtop(sc->init_block));
1054 write_csr(sc, CSR2, (kvtop(sc->init_block) >> 16) & 0xff);
1055
1056 /*
1057 * Depending on which controller this is, CSR3 has different meanings.
1058 * For the Am7990 it controls DMA operations, for the Am79C960 it
1059 * controls interrupt masks and transmitter algorithms. In either
1060 * case, none of the flags are set.
1061 *
1062 */
1063
1064 write_csr(sc, CSR3, 0);
1065
1066 /* Let's see if it starts */
1067 /*
1068 printf("Enabling lnc interrupts\n");
1069 sc->arpcom.ac_if.if_timer = 10;
1070 write_csr(sc, CSR0, INIT|INEA);
1071 */
1072
1073 /*
1074 * Now that the initialisation is complete there's no reason to
1075 * access anything except CSR0, so we leave RAP pointing there
1076 * so we can just access RDP from now on, saving an outw each
1077 * time.
1078 */
1079
1080 write_csr(sc, CSR0, INIT);
1081 for(i=0; i < 1000; i++)
1082 if (read_csr(sc, CSR0) & IDON)
1083 break;
1084
1085 if (read_csr(sc, CSR0) & IDON) {
1086 /*
1087 * Enable interrupts, start the LANCE, mark the interface as
1088 * running and transmit any pending packets.
1089 */
1090 write_csr(sc, CSR0, STRT | INEA);
1091 sc->arpcom.ac_if.if_flags |= IFF_RUNNING;
1092 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
1093 lnc_start(&sc->arpcom.ac_if);
1094 } else
1095 log(LOG_ERR, "lnc%d: Initialisation failed\n",
1096 sc->arpcom.ac_if.if_unit);
1097
1098 splx(s);
1099 }
1100
1101 /*
1102 * The interrupt flag (INTR) will be set and provided that the interrupt enable
1103 * flag (INEA) is also set, the interrupt pin will be driven low when any of
1104 * the following occur:
1105 *
1106 * 1) Completion of the initialisation routine (IDON). 2) The reception of a
1107 * packet (RINT). 3) The transmission of a packet (TINT). 4) A transmitter
1108 * timeout error (BABL). 5) A missed packet (MISS). 6) A memory error (MERR).
1109 *
1110 * The interrupt flag is cleared when all of the above conditions are cleared.
1111 *
1112 * If the driver is reset from this routine then it first checks to see if any
1113 * interrupts have ocurred since the reset and handles them before returning.
1114 * This is because the NIC may signify a pending interrupt in CSR0 using the
1115 * INTR flag even if a hardware interrupt is currently inhibited (at least I
1116 * think it does from reading the data sheets). We may as well deal with
1117 * these pending interrupts now rather than get the overhead of another
1118 * hardware interrupt immediately upon returning from the interrupt handler.
1119 *
1120 */
1121
1122 void
1123 lncintr(void *arg)
1124 {
1125 lnc_softc_t *sc = arg;
1126 int unit = sc->arpcom.ac_if.if_unit;
1127 u_short csr0;
1128
1129 /*
1130 * INEA is the only bit that can be cleared by writing a 0 to it so
1131 * we have to include it in any writes that clear other flags.
1132 */
1133
1134 while ((csr0 = lnc_inw(sc->rdp)) & INTR) {
1135
1136 /*
1137 * Clear interrupt flags early to avoid race conditions. The
1138 * controller can still set these flags even while we're in
1139 * this interrupt routine. If the flag is still set from the
1140 * event that caused this interrupt any new events will
1141 * be missed.
1142 */
1143
1144 lnc_outw(sc->rdp, csr0);
1145 /*lnc_outw(sc->rdp, IDON | CERR | BABL | MISS | MERR | RINT | TINT | INEA);*/
1146
1147 #ifdef notyet
1148 if (csr0 & IDON) {
1149 printf("IDON\n");
1150 sc->arpcom.ac_if.if_timer = 0;
1151 write_csr(sc, CSR0, STRT | INEA);
1152 sc->arpcom.ac_if.if_flags |= IFF_RUNNING;
1153 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
1154 lnc_start(&sc->arpcom.ac_if);
1155 continue;
1156 }
1157 #endif
1158
1159 if (csr0 & ERR) {
1160 if (csr0 & CERR) {
1161 log(LOG_ERR, "lnc%d: Heartbeat error -- SQE test failed\n", unit);
1162 LNCSTATS(cerr)
1163 }
1164 if (csr0 & BABL) {
1165 log(LOG_ERR, "lnc%d: Babble error - more than 1519 bytes transmitted\n", unit);
1166 LNCSTATS(babl)
1167 sc->arpcom.ac_if.if_oerrors++;
1168 }
1169 if (csr0 & MISS) {
1170 log(LOG_ERR, "lnc%d: Missed packet -- no receive buffer\n", unit);
1171 LNCSTATS(miss)
1172 sc->arpcom.ac_if.if_ierrors++;
1173 }
1174 if (csr0 & MERR) {
1175 log(LOG_ERR, "lnc%d: Memory error -- Resetting\n", unit);
1176 LNCSTATS(merr)
1177 lnc_reset(sc);
1178 continue;
1179 }
1180 }
1181 if (csr0 & RINT) {
1182 LNCSTATS(rint)
1183 lnc_rint(sc);
1184 }
1185 if (csr0 & TINT) {
1186 LNCSTATS(tint)
1187 sc->arpcom.ac_if.if_timer = 0;
1188 lnc_tint(sc);
1189 }
1190
1191 /*
1192 * If there's room in the transmit descriptor ring then queue
1193 * some more transmit packets.
1194 */
1195
1196 if (!(sc->arpcom.ac_if.if_flags & IFF_OACTIVE))
1197 lnc_start(&sc->arpcom.ac_if);
1198 }
1199 }
1200
1201 static __inline int
1202 mbuf_to_buffer(struct mbuf *m, char *buffer)
1203 {
1204
1205 int len=0;
1206
1207 for( ; m; m = m->m_next) {
1208 bcopy(mtod(m, caddr_t), buffer, m->m_len);
1209 buffer += m->m_len;
1210 len += m->m_len;
1211 }
1212
1213 return(len);
1214 }
1215
1216 static __inline struct mbuf *
1217 chain_to_cluster(struct mbuf *m)
1218 {
1219 struct mbuf *new;
1220
1221 MGET(new, M_DONTWAIT, MT_DATA);
1222 if (new) {
1223 MCLGET(new, M_DONTWAIT);
1224 if (new->m_ext.ext_buf) {
1225 new->m_len = mbuf_to_buffer(m, new->m_data);
1226 m_freem(m);
1227 return(new);
1228 } else
1229 m_free(new);
1230 }
1231 return(0);
1232 }
1233
1234 /*
1235 * IFF_OACTIVE and IFF_RUNNING are checked in ether_output so it's redundant
1236 * to check them again since we wouldn't have got here if they were not
1237 * appropriately set. This is also called from lnc_init and lncintr but the
1238 * flags should be ok at those points too.
1239 */
1240
1241 static void
1242 lnc_start(struct ifnet *ifp)
1243 {
1244
1245 struct lnc_softc *sc = ifp->if_softc;
1246 struct host_ring_entry *desc;
1247 int tmp;
1248 int end_of_packet;
1249 struct mbuf *head, *m;
1250 int len, chunk;
1251 int addr;
1252 int no_entries_needed;
1253
1254 do {
1255
1256 IF_DEQUEUE(&sc->arpcom.ac_if.if_snd, head);
1257 if (!head)
1258 return;
1259
1260 if (sc->nic.mem_mode == DMA_MBUF) {
1261
1262 no_entries_needed = 0;
1263 for (m=head; m; m = m->m_next)
1264 no_entries_needed++;
1265
1266 /*
1267 * We try and avoid bcopy as much as possible
1268 * but there are two cases when we use it.
1269 *
1270 * 1) If there are not enough free entries in the ring
1271 * to hold each mbuf in the chain then compact the
1272 * chain into a single cluster.
1273 *
1274 * 2) The Am7990 and Am79C90 must not have less than
1275 * 100 bytes in the first descriptor of a chained
1276 * packet so it's necessary to shuffle the mbuf
1277 * contents to ensure this.
1278 */
1279
1280
1281 if (no_entries_needed > (NDESC(sc->ntdre) - sc->pending_transmits)) {
1282 if (!(head = chain_to_cluster(head))) {
1283 log(LOG_ERR, "lnc%d: Couldn't get mbuf for transmit packet -- Resetting \n ",ifp->if_unit);
1284 lnc_reset(sc);
1285 return;
1286 }
1287 } else if ((sc->nic.ic == LANCE) || (sc->nic.ic == C_LANCE)) {
1288 if ((head->m_len < 100) && (head->m_next)) {
1289 len = 100 - head->m_len;
1290 if (M_TRAILINGSPACE(head) < len) {
1291 /*
1292 * Move data to start of data
1293 * area. We assume the first
1294 * mbuf has a packet header
1295 * and is not a cluster.
1296 */
1297 bcopy((caddr_t)head->m_data, (caddr_t)head->m_pktdat, head->m_len);
1298 head->m_data = head->m_pktdat;
1299 }
1300 m = head->m_next;
1301 while (m && (len > 0)) {
1302 chunk = min(len, m->m_len);
1303 bcopy(mtod(m, caddr_t), mtod(head, caddr_t) + head->m_len, chunk);
1304 len -= chunk;
1305 head->m_len += chunk;
1306 m->m_len -= chunk;
1307 m->m_data += chunk;
1308 if (m->m_len <= 0) {
1309 m = m_free(m);
1310 head->m_next = m;
1311 }
1312 }
1313 }
1314 }
1315
1316 tmp = sc->next_to_send;
1317
1318 /*
1319 * On entering this loop we know that tmp points to a
1320 * descriptor with a clear OWN bit.
1321 */
1322
1323 desc = sc->trans_ring + tmp;
1324 len = ETHER_MIN_LEN;
1325 for (m = head; m; m = m->m_next) {
1326 desc->buff.mbuf = m;
1327 addr = kvtop(m->m_data);
1328 desc->md->md0 = addr;
1329 desc->md->md1 = ((addr >> 16) & 0xff);
1330 desc->md->md3 = 0;
1331 desc->md->md2 = -m->m_len;
1332 sc->pending_transmits++;
1333 len -= m->m_len;
1334
1335 INC_MD_PTR(tmp, sc->ntdre)
1336 desc = sc->trans_ring + tmp;
1337 }
1338
1339 end_of_packet = tmp;
1340 DEC_MD_PTR(tmp, sc->ntdre)
1341 desc = sc->trans_ring + tmp;
1342 desc->md->md1 |= ENP;
1343
1344 if (len > 0)
1345 desc->md->md2 -= len;
1346
1347 /*
1348 * Set OWN bits in reverse order, otherwise the Lance
1349 * could start sending the packet before all the
1350 * buffers have been relinquished by the host.
1351 */
1352
1353 while (tmp != sc->next_to_send) {
1354 desc->md->md1 |= OWN;
1355 DEC_MD_PTR(tmp, sc->ntdre)
1356 desc = sc->trans_ring + tmp;
1357 }
1358 sc->next_to_send = end_of_packet;
1359 desc->md->md1 |= STP | OWN;
1360 } else {
1361 sc->pending_transmits++;
1362 desc = sc->trans_ring + sc->next_to_send;
1363 len = mbuf_to_buffer(head, desc->buff.data);
1364 desc->md->md3 = 0;
1365 desc->md->md2 = -max(len, ETHER_MIN_LEN - ETHER_CRC_LEN);
1366 desc->md->md1 |= OWN | STP | ENP;
1367 INC_MD_PTR(sc->next_to_send, sc->ntdre)
1368 }
1369
1370 /* Force an immediate poll of the transmit ring */
1371 lnc_outw(sc->rdp, TDMD | INEA);
1372
1373 /*
1374 * Set a timer so if the buggy Am7990.h shuts
1375 * down we can wake it up.
1376 */
1377
1378 ifp->if_timer = 2;
1379
1380 BPF_MTAP(&sc->arpcom.ac_if, head);
1381
1382 if (sc->nic.mem_mode != DMA_MBUF)
1383 m_freem(head);
1384
1385 } while (sc->pending_transmits < NDESC(sc->ntdre));
1386
1387 /*
1388 * Transmit ring is full so set IFF_OACTIVE
1389 * since we can't buffer any more packets.
1390 */
1391
1392 sc->arpcom.ac_if.if_flags |= IFF_OACTIVE;
1393 LNCSTATS(trans_ring_full)
1394 }
1395
1396 static int
1397 lnc_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1398 {
1399
1400 struct lnc_softc *sc = ifp->if_softc;
1401 int s, error = 0;
1402
1403 s = splimp();
1404
1405 switch (command) {
1406 case SIOCSIFFLAGS:
1407 #ifdef DEBUG
1408 if (ifp->if_flags & IFF_DEBUG)
1409 sc->lnc_debug = 1;
1410 else
1411 sc->lnc_debug = 0;
1412 #endif
1413 if (ifp->if_flags & IFF_PROMISC) {
1414 if (!(sc->nic.mode & PROM)) {
1415 sc->nic.mode |= PROM;
1416 lnc_init(sc);
1417 }
1418 } else if (sc->nic.mode & PROM) {
1419 sc->nic.mode &= ~PROM;
1420 lnc_init(sc);
1421 }
1422
1423 if ((ifp->if_flags & IFF_ALLMULTI) &&
1424 !(sc->flags & LNC_ALLMULTI)) {
1425 sc->flags |= LNC_ALLMULTI;
1426 lnc_init(sc);
1427 } else if (!(ifp->if_flags & IFF_ALLMULTI) &&
1428 (sc->flags & LNC_ALLMULTI)) {
1429 sc->flags &= ~LNC_ALLMULTI;
1430 lnc_init(sc);
1431 }
1432
1433 if ((ifp->if_flags & IFF_UP) == 0 &&
1434 (ifp->if_flags & IFF_RUNNING) != 0) {
1435 /*
1436 * If interface is marked down and it is running,
1437 * then stop it.
1438 */
1439 lnc_stop(sc);
1440 ifp->if_flags &= ~IFF_RUNNING;
1441 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1442 (ifp->if_flags & IFF_RUNNING) == 0) {
1443 /*
1444 * If interface is marked up and it is stopped, then
1445 * start it.
1446 */
1447 lnc_init(sc);
1448 }
1449 break;
1450 case SIOCADDMULTI:
1451 case SIOCDELMULTI:
1452 lnc_init(sc);
1453 error = 0;
1454 break;
1455 default:
1456 error = ether_ioctl(ifp, command, data);
1457 break;
1458 }
1459 (void) splx(s);
1460 return error;
1461 }
1462
1463 static void
1464 lnc_watchdog(struct ifnet *ifp)
1465 {
1466 log(LOG_ERR, "lnc%d: Device timeout -- Resetting\n", ifp->if_unit);
1467 ifp->if_oerrors++;
1468 lnc_reset(ifp->if_softc);
1469 }
1470
1471 #ifdef DEBUG
1472 void
1473 lnc_dump_state(struct lnc_softc *sc)
1474 {
1475 int i;
1476
1477 printf("\nDriver/NIC [%d] state dump\n", sc->arpcom.ac_if.if_unit);
1478 printf("Memory access mode: %b\n", sc->nic.mem_mode, MEM_MODES);
1479 printf("Host memory\n");
1480 printf("-----------\n");
1481
1482 printf("Receive ring: base = %p, next = %p\n",
1483 (void *)sc->recv_ring, (void *)(sc->recv_ring + sc->recv_next));
1484 for (i = 0; i < NDESC(sc->nrdre); i++)
1485 printf("\t%d:%p md = %p buff = %p\n",
1486 i, (void *)(sc->recv_ring + i),
1487 (void *)(sc->recv_ring + i)->md,
1488 (void *)(sc->recv_ring + i)->buff.data);
1489
1490 printf("Transmit ring: base = %p, next = %p\n",
1491 (void *)sc->trans_ring, (void *)(sc->trans_ring + sc->trans_next));
1492 for (i = 0; i < NDESC(sc->ntdre); i++)
1493 printf("\t%d:%p md = %p buff = %p\n",
1494 i, (void *)(sc->trans_ring + i),
1495 (void *)(sc->trans_ring + i)->md,
1496 (void *)(sc->trans_ring + i)->buff.data);
1497 printf("Lance memory (may be on host(DMA) or card(SHMEM))\n");
1498 printf("Init block = %p\n", (void *)sc->init_block);
1499 printf("\tmode = %b rlen:rdra = %x:%x tlen:tdra = %x:%x\n",
1500 sc->init_block->mode, INIT_MODE, sc->init_block->rlen,
1501 sc->init_block->rdra, sc->init_block->tlen, sc->init_block->tdra);
1502 printf("Receive descriptor ring\n");
1503 for (i = 0; i < NDESC(sc->nrdre); i++)
1504 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tMCNT = %u,\tflags = %b\n",
1505 i, ((sc->recv_ring + i)->md->md1 & HADR),
1506 (sc->recv_ring + i)->md->md0,
1507 -(short) (sc->recv_ring + i)->md->md2,
1508 (sc->recv_ring + i)->md->md3,
1509 (((sc->recv_ring + i)->md->md1 & ~HADR) >> 8), RECV_MD1);
1510 printf("Transmit descriptor ring\n");
1511 for (i = 0; i < NDESC(sc->ntdre); i++)
1512 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tflags = %b %b\n",
1513 i, ((sc->trans_ring + i)->md->md1 & HADR),
1514 (sc->trans_ring + i)->md->md0,
1515 -(short) (sc->trans_ring + i)->md->md2,
1516 ((sc->trans_ring + i)->md->md1 >> 8), TRANS_MD1,
1517 ((sc->trans_ring + i)->md->md3 >> 10), TRANS_MD3);
1518 printf("\nnext_to_send = %x\n", sc->next_to_send);
1519 printf("\n CSR0 = %b CSR1 = %x CSR2 = %x CSR3 = %x\n\n",
1520 read_csr(sc, CSR0), CSR0_FLAGS, read_csr(sc, CSR1),
1521 read_csr(sc, CSR2), read_csr(sc, CSR3));
1522
1523 /* Set RAP back to CSR0 */
1524 lnc_outw(sc->rap, CSR0);
1525 }
1526
1527 void
1528 mbuf_dump_chain(struct mbuf * m)
1529 {
1530
1531 #define MBUF_FLAGS \
1532 "\2\1M_EXT\2M_PKTHDR\3M_EOR\4UNKNOWN\5M_BCAST\6M_MCAST"
1533
1534 if (!m)
1535 log(LOG_DEBUG, "m == NULL\n");
1536 do {
1537 log(LOG_DEBUG, "m = %p\n", (void *)m);
1538 log(LOG_DEBUG, "m_hdr.mh_next = %p\n",
1539 (void *)m->m_hdr.mh_next);
1540 log(LOG_DEBUG, "m_hdr.mh_nextpkt = %p\n",
1541 (void *)m->m_hdr.mh_nextpkt);
1542 log(LOG_DEBUG, "m_hdr.mh_len = %d\n", m->m_hdr.mh_len);
1543 log(LOG_DEBUG, "m_hdr.mh_data = %p\n",
1544 (void *)m->m_hdr.mh_data);
1545 log(LOG_DEBUG, "m_hdr.mh_type = %d\n", m->m_hdr.mh_type);
1546 log(LOG_DEBUG, "m_hdr.mh_flags = %b\n", m->m_hdr.mh_flags,
1547 MBUF_FLAGS);
1548 if (!(m->m_hdr.mh_flags & (M_PKTHDR | M_EXT)))
1549 log(LOG_DEBUG, "M_dat.M_databuf = %p\n",
1550 (void *)m->M_dat.M_databuf);
1551 else {
1552 if (m->m_hdr.mh_flags & M_PKTHDR) {
1553 log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.len = %d\n",
1554 m->M_dat.MH.MH_pkthdr.len);
1555 log(LOG_DEBUG,
1556 "M_dat.MH.MH_pkthdr.rcvif = %p\n",
1557 (void *)m->M_dat.MH.MH_pkthdr.rcvif);
1558 if (!(m->m_hdr.mh_flags & M_EXT))
1559 log(LOG_DEBUG,
1560 "M_dat.MH.MH_dat.MH_databuf = %p\n",
1561 (void *)m->M_dat.MH.MH_dat.MH_databuf);
1562 }
1563 if (m->m_hdr.mh_flags & M_EXT) {
1564 log(LOG_DEBUG,
1565 "M_dat.MH.MH_dat.MH_ext.ext_buff %p\n",
1566 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_buf);
1567 log(LOG_DEBUG,
1568 "M_dat.MH.MH_dat.MH_ext.ext_free %p\n",
1569 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_free);
1570 log(LOG_DEBUG,
1571 "M_dat.MH.MH_dat.MH_ext.ext_size %d\n",
1572 m->M_dat.MH.MH_dat.MH_ext.ext_size);
1573 }
1574 }
1575 } while ((m = m->m_next) != NULL);
1576 }
1577 #endif
Cache object: b570c43666d2b9de9b7c3a27ceb29f14
|