FreeBSD/Linux Kernel Cross Reference
sys/dev/lnc/if_lnc.c
1 /*-
2 * Copyright (c) 1994-2000
3 * Paul Richards. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * verbatim and that no modifications are made prior to this
11 * point in the file.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name Paul Richards may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /*
35 #define DIAGNOSTIC
36 #define DEBUG
37 *
38 * TODO ----
39 *
40 * Check all the XXX comments -- some of them are just things I've left
41 * unfinished rather than "difficult" problems that were hacked around.
42 *
43 * Check log settings.
44 *
45 * Check how all the arpcom flags get set and used.
46 *
47 * Re-inline and re-static all routines after debugging.
48 *
49 * Remember to assign iobase in SHMEM probe routines.
50 *
51 * Replace all occurences of LANCE-controller-card etc in prints by the name
52 * strings of the appropriate type -- nifty window dressing
53 *
54 * Add DEPCA support -- mostly done.
55 *
56 */
57
58 #include "opt_inet.h"
59
60 /* Some defines that should really be in generic locations */
61 #define FCS_LEN 4
62 #define MULTICAST_FILTER_LEN 8
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/bus.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/module.h>
71 #include <sys/socket.h>
72 #include <sys/sockio.h>
73 #include <sys/syslog.h>
74
75 #include <machine/bus.h>
76 #include <machine/resource.h>
77 #include <sys/rman.h>
78
79 #include <net/ethernet.h>
80 #include <net/if.h>
81 #include <net/if_dl.h>
82 #include <net/if_types.h>
83
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
86
87 #include <net/bpf.h>
88
89 #include <machine/md_var.h>
90
91 #include <dev/lnc/if_lncvar.h>
92 #include <dev/lnc/if_lncreg.h>
93
94 devclass_t lnc_devclass;
95
96 static char const * const nic_ident[] = {
97 "Unknown",
98 "BICC",
99 "NE2100",
100 "DEPCA",
101 "CNET98S", /* PC-98 */
102 };
103
104 static char const * const ic_ident[] = {
105 "Unknown",
106 "LANCE",
107 "C-LANCE",
108 "PCnet-ISA",
109 "PCnet-ISA+",
110 "PCnet-ISA II",
111 "PCnet-32 VL-Bus",
112 "PCnet-PCI",
113 "PCnet-PCI II",
114 "PCnet-FAST",
115 "PCnet-FAST+",
116 "PCnet-Home",
117 };
118
119 static void lnc_setladrf(struct lnc_softc *sc);
120 static void lnc_reset(struct lnc_softc *sc);
121 static void lnc_free_mbufs(struct lnc_softc *sc);
122 static __inline int alloc_mbuf_cluster(struct lnc_softc *sc,
123 struct host_ring_entry *desc);
124 static __inline struct mbuf *chain_mbufs(struct lnc_softc *sc,
125 int start_of_packet,
126 int pkt_len);
127 static __inline struct mbuf *mbuf_packet(struct lnc_softc *sc,
128 int start_of_packet,
129 int pkt_len);
130 static void lnc_rint(struct lnc_softc *sc);
131 static void lnc_tint(struct lnc_softc *sc);
132
133 static void lnc_init(void *);
134 static __inline int mbuf_to_buffer(struct mbuf *m, char *buffer);
135 static __inline struct mbuf *chain_to_cluster(struct mbuf *m);
136 static void lnc_start(struct ifnet *ifp);
137 static int lnc_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
138 static void lnc_watchdog(struct ifnet *ifp);
139 #ifdef DEBUG
140 void lnc_dump_state(struct lnc_softc *sc);
141 void mbuf_dump_chain(struct mbuf *m);
142 #endif
143
144 u_short
145 read_csr(struct lnc_softc *sc, u_short port)
146 {
147 lnc_outw(sc->rap, port);
148 return (lnc_inw(sc->rdp));
149 }
150
151 void
152 write_csr(struct lnc_softc *sc, u_short port, u_short val)
153 {
154 lnc_outw(sc->rap, port);
155 lnc_outw(sc->rdp, val);
156 }
157
158 static __inline void
159 write_bcr(struct lnc_softc *sc, u_short port, u_short val)
160 {
161 lnc_outw(sc->rap, port);
162 lnc_outw(sc->bdp, val);
163 }
164
165 static __inline u_short
166 read_bcr(struct lnc_softc *sc, u_short port)
167 {
168 lnc_outw(sc->rap, port);
169 return (lnc_inw(sc->bdp));
170 }
171
172 int
173 lance_probe(struct lnc_softc *sc)
174 {
175 write_csr(sc, CSR0, STOP);
176
177 if ((lnc_inw(sc->rdp) & STOP) && ! (read_csr(sc, CSR3))) {
178 /*
179 * Check to see if it's a C-LANCE. For the LANCE the INEA bit
180 * cannot be set while the STOP bit is. This restriction is
181 * removed for the C-LANCE.
182 */
183 write_csr(sc, CSR0, INEA);
184 if (read_csr(sc, CSR0) & INEA)
185 return (C_LANCE);
186 else
187 return (LANCE);
188 } else
189 return (UNKNOWN);
190 }
191
192 void
193 lnc_release_resources(device_t dev)
194 {
195 lnc_softc_t *sc = device_get_softc(dev);
196
197 if (sc->irqres) {
198 bus_teardown_intr(dev, sc->irqres, sc->intrhand);
199 bus_release_resource(dev, SYS_RES_IRQ, sc->irqrid, sc->irqres);
200 }
201
202 if (sc->portres)
203 bus_release_resource(dev, SYS_RES_IOPORT,
204 sc->portrid, sc->portres);
205 if (sc->drqres)
206 bus_release_resource(dev, SYS_RES_DRQ, sc->drqrid, sc->drqres);
207
208 if (sc->dmat) {
209 if (sc->dmamap) {
210 bus_dmamap_unload(sc->dmat, sc->dmamap);
211 bus_dmamem_free(sc->dmat, sc->recv_ring, sc->dmamap);
212 }
213 bus_dma_tag_destroy(sc->dmat);
214 }
215 }
216
217 /*
218 * Set up the logical address filter for multicast packets
219 */
220 static __inline void
221 lnc_setladrf(struct lnc_softc *sc)
222 {
223 struct ifnet *ifp = &sc->arpcom.ac_if;
224 struct ifmultiaddr *ifma;
225 u_long index;
226 int i;
227
228 if (sc->flags & IFF_ALLMULTI) {
229 for (i=0; i < MULTICAST_FILTER_LEN; i++)
230 sc->init_block->ladrf[i] = 0xFF;
231 return;
232 }
233
234 /*
235 * For each multicast address, calculate a crc for that address and
236 * then use the high order 6 bits of the crc as a hash code where
237 * bits 3-5 select the byte of the address filter and bits 0-2 select
238 * the bit within that byte.
239 */
240
241 bzero(sc->init_block->ladrf, MULTICAST_FILTER_LEN);
242 IF_ADDR_LOCK(ifp);
243 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
244 if (ifma->ifma_addr->sa_family != AF_LINK)
245 continue;
246
247 index = ether_crc32_le(LLADDR((struct sockaddr_dl *)
248 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
249 sc->init_block->ladrf[index >> 3] |= 1 << (index & 7);
250 }
251 IF_ADDR_UNLOCK(ifp);
252 }
253
254 void
255 lnc_stop(struct lnc_softc *sc)
256 {
257 write_csr(sc, CSR0, STOP);
258 }
259
260 static void
261 lnc_reset(struct lnc_softc *sc)
262 {
263 lnc_init(sc);
264 }
265
266 static void
267 lnc_free_mbufs(struct lnc_softc *sc)
268 {
269 int i;
270
271 /*
272 * We rely on other routines to keep the buff.mbuf field valid. If
273 * it's not NULL then we assume it points to an allocated mbuf.
274 */
275
276 for (i = 0; i < NDESC(sc->nrdre); i++)
277 if ((sc->recv_ring + i)->buff.mbuf)
278 m_free((sc->recv_ring + i)->buff.mbuf);
279
280 for (i = 0; i < NDESC(sc->ntdre); i++)
281 if ((sc->trans_ring + i)->buff.mbuf)
282 m_free((sc->trans_ring + i)->buff.mbuf);
283
284 if (sc->mbuf_count)
285 m_freem(sc->mbufs);
286 }
287
288 static __inline int
289 alloc_mbuf_cluster(struct lnc_softc *sc, struct host_ring_entry *desc)
290 {
291 register struct mds *md = desc->md;
292 struct mbuf *m=0;
293 int addr;
294
295 /* Try and get cluster off local cache */
296 if (sc->mbuf_count) {
297 sc->mbuf_count--;
298 m = sc->mbufs;
299 sc->mbufs = m->m_next;
300 /* XXX m->m_data = m->m_ext.ext_buf;*/
301 } else {
302 MGET(m, M_DONTWAIT, MT_DATA);
303 if (!m)
304 return(1);
305 MCLGET(m, M_DONTWAIT);
306 if (!m->m_ext.ext_buf) {
307 m_free(m);
308 return(1);
309 }
310 }
311
312 desc->buff.mbuf = m;
313 addr = kvtop(m->m_data);
314 md->md0 = addr;
315 md->md1= ((addr >> 16) & 0xff) | OWN;
316 md->md2 = -(short)(MCLBYTES - sizeof(struct pkthdr));
317 md->md3 = 0;
318 return(0);
319 }
320
321 static __inline struct mbuf *
322 chain_mbufs(struct lnc_softc *sc, int start_of_packet, int pkt_len)
323 {
324 struct mbuf *head, *m;
325 struct host_ring_entry *desc;
326
327 /*
328 * Turn head into a pkthdr mbuf --
329 * assumes a pkthdr type mbuf was
330 * allocated to the descriptor
331 * originally.
332 */
333
334 desc = sc->recv_ring + start_of_packet;
335
336 head = desc->buff.mbuf;
337 head->m_flags |= M_PKTHDR;
338 bzero(&head->m_pkthdr, sizeof(head->m_pkthdr));
339
340 m = head;
341 do {
342 m = desc->buff.mbuf;
343 m->m_len = min((MCLBYTES - sizeof(struct pkthdr)), pkt_len);
344 pkt_len -= m->m_len;
345 if (alloc_mbuf_cluster(sc, desc))
346 return((struct mbuf *)NULL);
347 INC_MD_PTR(start_of_packet, sc->nrdre)
348 desc = sc->recv_ring + start_of_packet;
349 m->m_next = desc->buff.mbuf;
350 } while (start_of_packet != sc->recv_next);
351
352 m->m_next = 0;
353 return(head);
354 }
355
356 static __inline struct mbuf *
357 mbuf_packet(struct lnc_softc *sc, int start_of_packet, int pkt_len)
358 {
359
360 struct host_ring_entry *start;
361 struct mbuf *head,*m,*m_prev;
362 char *data,*mbuf_data;
363 short blen;
364 int amount;
365
366 /* Get a pkthdr mbuf for the start of packet */
367 MGETHDR(head, M_DONTWAIT, MT_DATA);
368 if (!head) {
369 LNCSTATS(drop_packet)
370 return(0);
371 }
372
373 m = head;
374 m->m_len = 0;
375 start = sc->recv_ring + start_of_packet;
376 /*blen = -(start->md->md2);*/
377 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
378 data = start->buff.data;
379 mbuf_data = m->m_data;
380
381 while (start_of_packet != sc->recv_next) {
382 /*
383 * If the data left fits in a single buffer then set
384 * blen to the size of the data left.
385 */
386 if (pkt_len < blen)
387 blen = pkt_len;
388
389 /*
390 * amount is least of data in current ring buffer and
391 * amount of space left in current mbuf.
392 */
393 amount = min(blen, M_TRAILINGSPACE(m));
394 if (amount == 0) {
395 /* mbuf must be empty */
396 m_prev = m;
397 MGET(m, M_DONTWAIT, MT_DATA);
398 if (!m) {
399 m_freem(head);
400 return(0);
401 }
402 if (pkt_len >= MINCLSIZE)
403 MCLGET(m, M_DONTWAIT);
404 m->m_len = 0;
405 m_prev->m_next = m;
406 amount = min(blen, M_TRAILINGSPACE(m));
407 mbuf_data = m->m_data;
408 }
409 bcopy(data, mbuf_data, amount);
410 blen -= amount;
411 pkt_len -= amount;
412 m->m_len += amount;
413 data += amount;
414 mbuf_data += amount;
415
416 if (blen == 0) {
417 start->md->md1 &= HADR;
418 start->md->md1 |= OWN;
419 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
420 INC_MD_PTR(start_of_packet, sc->nrdre)
421 start = sc->recv_ring + start_of_packet;
422 data = start->buff.data;
423 /*blen = -(start->md->md2);*/
424 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
425 }
426 }
427 return(head);
428 }
429
430
431 static void
432 lnc_rint(struct lnc_softc *sc)
433 {
434 struct ifnet *ifp = &sc->arpcom.ac_if;
435 struct host_ring_entry *next, *start;
436 int start_of_packet;
437 struct mbuf *head;
438 struct ether_header *eh;
439 int lookahead;
440 int flags;
441 int pkt_len;
442
443 /*
444 * The LANCE will issue a RINT interrupt when the ownership of the
445 * last buffer of a receive packet has been relinquished by the LANCE.
446 * Therefore, it can be assumed that a complete packet can be found
447 * before hitting buffers that are still owned by the LANCE, if not
448 * then there is a bug in the driver that is causing the descriptors
449 * to get out of sync.
450 */
451
452 #ifdef DIAGNOSTIC
453 if ((sc->recv_ring + sc->recv_next)->md->md1 & OWN) {
454 log(LOG_ERR, "%s: Receive interrupt with buffer still owned by controller -- Resetting\n", ifp->if_xname);
455 lnc_reset(sc);
456 return;
457 }
458 if (!((sc->recv_ring + sc->recv_next)->md->md1 & STP)) {
459 log(LOG_ERR, "%s: Receive interrupt but not start of packet -- Resetting\n", ifp->if_xname);
460 lnc_reset(sc);
461 return;
462 }
463 #endif
464
465 lookahead = 0;
466 next = sc->recv_ring + sc->recv_next;
467 while ((flags = next->md->md1) & STP) {
468
469 /* Make a note of the start of the packet */
470 start_of_packet = sc->recv_next;
471
472 /*
473 * Find the end of the packet. Even if not data chaining,
474 * jabber packets can overrun into a second descriptor.
475 * If there is no error, then the ENP flag is set in the last
476 * descriptor of the packet. If there is an error then the ERR
477 * flag will be set in the descriptor where the error occured.
478 * Therefore, to find the last buffer of a packet we search for
479 * either ERR or ENP.
480 */
481
482 if (!(flags & (ENP | MDERR))) {
483 do {
484 INC_MD_PTR(sc->recv_next, sc->nrdre)
485 next = sc->recv_ring + sc->recv_next;
486 flags = next->md->md1;
487 } while (!(flags & (STP | OWN | ENP | MDERR)));
488
489 if (flags & STP) {
490 log(LOG_ERR, "%s: Start of packet found before end of previous in receive ring -- Resetting\n", ifp->if_xname);
491 lnc_reset(sc);
492 return;
493 }
494 if (flags & OWN) {
495 if (lookahead) {
496 /*
497 * Looked ahead into a packet still
498 * being received
499 */
500 sc->recv_next = start_of_packet;
501 break;
502 } else {
503 log(LOG_ERR, "%s: End of received packet not found-- Resetting\n", ifp->if_xname);
504 lnc_reset(sc);
505 return;
506 }
507 }
508 }
509
510 pkt_len = (next->md->md3 & MCNT) - FCS_LEN;
511
512 /* Move pointer onto start of next packet */
513 INC_MD_PTR(sc->recv_next, sc->nrdre)
514 next = sc->recv_ring + sc->recv_next;
515
516 if (flags & MDERR) {
517 const char *if_xname = ifp->if_xname;
518 if (flags & RBUFF) {
519 LNCSTATS(rbuff)
520 log(LOG_ERR, "%s: Receive buffer error\n", if_xname);
521 }
522 if (flags & OFLO) {
523 /* OFLO only valid if ENP is not set */
524 if (!(flags & ENP)) {
525 LNCSTATS(oflo)
526 log(LOG_ERR, "%s: Receive overflow error \n", if_xname);
527 }
528 } else if (flags & ENP) {
529 if ((ifp->if_flags & IFF_PROMISC)==0) {
530 /*
531 * FRAM and CRC are valid only if ENP
532 * is set and OFLO is not.
533 */
534 if (flags & FRAM) {
535 LNCSTATS(fram)
536 log(LOG_ERR, "%s: Framing error\n", if_xname);
537 /*
538 * FRAM is only set if there's a CRC
539 * error so avoid multiple messages
540 */
541 } else if (flags & CRC) {
542 LNCSTATS(crc)
543 log(LOG_ERR, "%s: Receive CRC error\n", if_xname);
544 }
545 }
546 }
547
548 /* Drop packet */
549 LNCSTATS(rerr)
550 ifp->if_ierrors++;
551 while (start_of_packet != sc->recv_next) {
552 start = sc->recv_ring + start_of_packet;
553 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
554 start->md->md1 &= HADR;
555 start->md->md1 |= OWN;
556 INC_MD_PTR(start_of_packet, sc->nrdre)
557 }
558 } else { /* Valid packet */
559
560 ifp->if_ipackets++;
561
562
563 if (sc->nic.mem_mode == DMA_MBUF)
564 head = chain_mbufs(sc, start_of_packet, pkt_len);
565 else
566 head = mbuf_packet(sc, start_of_packet, pkt_len);
567
568 if (head) {
569 /*
570 * First mbuf in packet holds the
571 * ethernet and packet headers
572 */
573 head->m_pkthdr.rcvif = ifp;
574 head->m_pkthdr.len = pkt_len ;
575 eh = (struct ether_header *) head->m_data;
576
577 /*
578 * vmware ethernet hardware emulation loops
579 * packets back to itself, violates IFF_SIMPLEX.
580 * drop it if it is from myself.
581 */
582 if (bcmp(eh->ether_shost,
583 sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == 0) {
584 m_freem(head);
585 } else {
586 (*ifp->if_input)(ifp, head);
587 }
588 } else {
589 log(LOG_ERR,"%s: Packet dropped, no mbufs\n",ifp->if_xname);
590 LNCSTATS(drop_packet)
591 }
592 }
593
594 lookahead++;
595 }
596
597 /*
598 * At this point all completely received packets have been processed
599 * so clear RINT since any packets that have arrived while we were in
600 * here have been dealt with.
601 */
602
603 lnc_outw(sc->rdp, RINT | INEA);
604 }
605
606 static void
607 lnc_tint(struct lnc_softc *sc)
608 {
609 struct host_ring_entry *next, *start;
610 int start_of_packet;
611 int lookahead;
612
613 /*
614 * If the driver is reset in this routine then we return immediately to
615 * the interrupt driver routine. Any interrupts that have occured
616 * since the reset will be dealt with there. sc->trans_next
617 * should point to the start of the first packet that was awaiting
618 * transmission after the last transmit interrupt was dealt with. The
619 * LANCE should have relinquished ownership of that descriptor before
620 * the interrupt. Therefore, sc->trans_next should point to a
621 * descriptor with STP set and OWN cleared. If not then the driver's
622 * pointers are out of sync with the LANCE, which signifies a bug in
623 * the driver. Therefore, the following two checks are really
624 * diagnostic, since if the driver is working correctly they should
625 * never happen.
626 */
627
628 #ifdef DIAGNOSTIC
629 if ((sc->trans_ring + sc->trans_next)->md->md1 & OWN) {
630 log(LOG_ERR, "%s: Transmit interrupt with buffer still owned by controller -- Resetting\n", sc->arpcom.ac_if.if_xname);
631 lnc_reset(sc);
632 return;
633 }
634 #endif
635
636
637 /*
638 * The LANCE will write the status information for the packet it just
639 * tried to transmit in one of two places. If the packet was
640 * transmitted successfully then the status will be written into the
641 * last descriptor of the packet. If the transmit failed then the
642 * status will be written into the descriptor that was being accessed
643 * when the error occured and all subsequent descriptors in that
644 * packet will have been relinquished by the LANCE.
645 *
646 * At this point we know that sc->trans_next points to the start
647 * of a packet that the LANCE has just finished trying to transmit.
648 * We now search for a buffer with either ENP or ERR set.
649 */
650
651 lookahead = 0;
652
653 do {
654 start_of_packet = sc->trans_next;
655 next = sc->trans_ring + sc->trans_next;
656
657 #ifdef DIAGNOSTIC
658 if (!(next->md->md1 & STP)) {
659 log(LOG_ERR, "%s: Transmit interrupt but not start of packet -- Resetting\n", sc->arpcom.ac_if.if_xname);
660 lnc_reset(sc);
661 return;
662 }
663 #endif
664
665 /*
666 * Find end of packet.
667 */
668
669 if (!(next->md->md1 & (ENP | MDERR))) {
670 do {
671 INC_MD_PTR(sc->trans_next, sc->ntdre)
672 next = sc->trans_ring + sc->trans_next;
673 } while (!(next->md->md1 & (STP | OWN | ENP | MDERR)));
674
675 if (next->md->md1 & STP) {
676 log(LOG_ERR, "%s: Start of packet found before end of previous in transmit ring -- Resetting\n", sc->arpcom.ac_if.if_xname);
677 lnc_reset(sc);
678 return;
679 }
680 if (next->md->md1 & OWN) {
681 if (lookahead) {
682 /*
683 * Looked ahead into a packet still
684 * being transmitted
685 */
686 sc->trans_next = start_of_packet;
687 break;
688 } else {
689 log(LOG_ERR, "%s: End of transmitted packet not found -- Resetting\n", sc->arpcom.ac_if.if_xname);
690 lnc_reset(sc);
691 return;
692 }
693 }
694 }
695 /*
696 * Check for ERR first since other flags are irrelevant if an
697 * error occurred.
698 */
699 if (next->md->md1 & MDERR) {
700
701 LNCSTATS(terr)
702 sc->arpcom.ac_if.if_oerrors++;
703
704 if (next->md->md3 & LCOL) {
705 LNCSTATS(lcol)
706 log(LOG_ERR, "%s: Transmit late collision -- Net error?\n", sc->arpcom.ac_if.if_xname);
707 sc->arpcom.ac_if.if_collisions++;
708 /*
709 * Clear TBUFF since it's not valid when LCOL
710 * set
711 */
712 next->md->md3 &= ~TBUFF;
713 }
714 if (next->md->md3 & LCAR) {
715 LNCSTATS(lcar)
716 log(LOG_ERR, "%s: Loss of carrier during transmit -- Net error?\n", sc->arpcom.ac_if.if_xname);
717 }
718 if (next->md->md3 & RTRY) {
719 LNCSTATS(rtry)
720 log(LOG_ERR, "%s: Transmit of packet failed after 16 attempts -- TDR = %d\n", sc->arpcom.ac_if.if_xname, ((sc->trans_ring + sc->trans_next)->md->md3 & TDR));
721 sc->arpcom.ac_if.if_collisions += 16;
722 /*
723 * Clear TBUFF since it's not valid when RTRY
724 * set
725 */
726 next->md->md3 &= ~TBUFF;
727 }
728 /*
729 * TBUFF is only valid if neither LCOL nor RTRY are set.
730 * We need to check UFLO after LCOL and RTRY so that we
731 * know whether or not TBUFF is valid. If either are
732 * set then TBUFF will have been cleared above. A
733 * UFLO error will turn off the transmitter so we
734 * have to reset.
735 *
736 */
737
738 if (next->md->md3 & UFLO) {
739 LNCSTATS(uflo)
740 /*
741 * If an UFLO has occured it's possibly due
742 * to a TBUFF error
743 */
744 if (next->md->md3 & TBUFF) {
745 LNCSTATS(tbuff)
746 log(LOG_ERR, "%s: Transmit buffer error -- Resetting\n", sc->arpcom.ac_if.if_xname);
747 } else
748 log(LOG_ERR, "%s: Transmit underflow error -- Resetting\n", sc->arpcom.ac_if.if_xname);
749 lnc_reset(sc);
750 return;
751 }
752 do {
753 INC_MD_PTR(sc->trans_next, sc->ntdre)
754 next = sc->trans_ring + sc->trans_next;
755 } while (!(next->md->md1 & STP) && (sc->trans_next != sc->next_to_send));
756
757 } else {
758 /*
759 * Since we check for ERR first then if we get here
760 * the packet was transmitted correctly. There may
761 * still have been non-fatal errors though.
762 * Don't bother checking for DEF, waste of time.
763 */
764
765 sc->arpcom.ac_if.if_opackets++;
766
767 if (next->md->md1 & MORE) {
768 LNCSTATS(more)
769 sc->arpcom.ac_if.if_collisions += 2;
770 }
771
772 /*
773 * ONE is invalid if LCOL is set. If LCOL was set then
774 * ERR would have also been set and we would have
775 * returned from lnc_tint above. Therefore we can
776 * assume if we arrive here that ONE is valid.
777 *
778 */
779
780 if (next->md->md1 & ONE) {
781 LNCSTATS(one)
782 sc->arpcom.ac_if.if_collisions++;
783 }
784 INC_MD_PTR(sc->trans_next, sc->ntdre)
785 next = sc->trans_ring + sc->trans_next;
786 }
787
788 /*
789 * Clear descriptors and free any mbufs.
790 */
791
792 do {
793 start = sc->trans_ring + start_of_packet;
794 start->md->md1 &= HADR;
795 if (sc->nic.mem_mode == DMA_MBUF) {
796 /* Cache clusters on a local queue */
797 if ((start->buff.mbuf->m_flags & M_EXT) && (sc->mbuf_count < MBUF_CACHE_LIMIT)) {
798 if (sc->mbuf_count) {
799 start->buff.mbuf->m_next = sc->mbufs;
800 sc->mbufs = start->buff.mbuf;
801 } else
802 sc->mbufs = start->buff.mbuf;
803 sc->mbuf_count++;
804 start->buff.mbuf = 0;
805 } else {
806 /*
807 * XXX should this be m_freem()?
808 */
809 m_free(start->buff.mbuf);
810 start->buff.mbuf = NULL;
811 }
812 }
813 sc->pending_transmits--;
814 INC_MD_PTR(start_of_packet, sc->ntdre)
815 }while (start_of_packet != sc->trans_next);
816
817 /*
818 * There's now at least one free descriptor
819 * in the ring so indicate that we can accept
820 * more packets again.
821 */
822
823 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
824
825 lookahead++;
826
827 } while (sc->pending_transmits && !(next->md->md1 & OWN));
828
829 /*
830 * Clear TINT since we've dealt with all
831 * the completed transmissions.
832 */
833
834 lnc_outw(sc->rdp, TINT | INEA);
835 }
836
837 int
838 lnc_attach_common(device_t dev)
839 {
840 lnc_softc_t *sc = device_get_softc(dev);
841 int i;
842 int skip;
843
844 switch (sc->nic.ident) {
845 case BICC:
846 case CNET98S:
847 skip = 2;
848 break;
849 default:
850 skip = 1;
851 break;
852 }
853
854 /* Set default mode */
855 sc->nic.mode = NORMAL;
856
857 /* Fill in arpcom structure entries */
858
859 sc->arpcom.ac_if.if_softc = sc;
860 if_initname(&sc->arpcom.ac_if, device_get_name(dev),
861 device_get_unit(dev));
862 sc->arpcom.ac_if.if_flags = IFF_BROADCAST | IFF_SIMPLEX |
863 IFF_MULTICAST | IFF_NEEDSGIANT;
864 sc->arpcom.ac_if.if_timer = 0;
865 sc->arpcom.ac_if.if_start = lnc_start;
866 sc->arpcom.ac_if.if_ioctl = lnc_ioctl;
867 sc->arpcom.ac_if.if_watchdog = lnc_watchdog;
868 sc->arpcom.ac_if.if_init = lnc_init;
869 IFQ_SET_MAXLEN(&sc->arpcom.ac_if.if_snd, IFQ_MAXLEN);
870 sc->arpcom.ac_if.if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
871 IFQ_SET_READY(&sc->arpcom.ac_if.if_snd);
872
873 /* Extract MAC address from PROM */
874 for (i = 0; i < ETHER_ADDR_LEN; i++)
875 sc->arpcom.ac_enaddr[i] = lnc_inb(i * skip);
876
877 /*
878 * XXX -- should check return status of if_attach
879 */
880
881 ether_ifattach(&sc->arpcom.ac_if, sc->arpcom.ac_enaddr);
882
883 if (sc->nic.ic == LANCE || sc->nic.ic == C_LANCE)
884 if_printf(&sc->arpcom.ac_if, "%s (%s)\n",
885 nic_ident[sc->nic.ident], ic_ident[sc->nic.ic]);
886 else
887 if_printf(&sc->arpcom.ac_if, "%s\n", ic_ident[sc->nic.ic]);
888
889 return (1);
890 }
891
892 static void
893 lnc_init(xsc)
894 void *xsc;
895 {
896 struct lnc_softc *sc = xsc;
897 int s, i;
898 char *lnc_mem;
899
900 /* Shut down interface */
901
902 s = splimp();
903 lnc_stop(sc);
904 sc->arpcom.ac_if.if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* XXX??? */
905
906 /*
907 * This sets up the memory area for the controller. Memory is set up for
908 * the initialisation block (12 words of contiguous memory starting
909 * on a word boundary),the transmit and receive ring structures (each
910 * entry is 4 words long and must start on a quadword boundary) and
911 * the data buffers.
912 *
913 * The alignment tests are particularly paranoid.
914 */
915
916 sc->recv_next = 0;
917 sc->trans_ring = sc->recv_ring + NDESC(sc->nrdre);
918 sc->trans_next = 0;
919
920 if (sc->nic.mem_mode == SHMEM)
921 lnc_mem = (char *)(uintptr_t)sc->nic.iobase;
922 else
923 lnc_mem = (char *) (sc->trans_ring + NDESC(sc->ntdre));
924
925 lnc_mem = (char *)(((long)lnc_mem + 1) & ~1);
926 sc->init_block = (struct init_block *) ((long) lnc_mem & ~1);
927 lnc_mem = (char *) (sc->init_block + 1);
928 lnc_mem = (char *)(((long)lnc_mem + 7) & ~7);
929
930 /* Initialise pointers to descriptor entries */
931 for (i = 0; i < NDESC(sc->nrdre); i++) {
932 (sc->recv_ring + i)->md = (struct mds *) lnc_mem;
933 lnc_mem += sizeof(struct mds);
934 }
935 for (i = 0; i < NDESC(sc->ntdre); i++) {
936 (sc->trans_ring + i)->md = (struct mds *) lnc_mem;
937 lnc_mem += sizeof(struct mds);
938 }
939
940 /* Initialise the remaining ring entries */
941
942 if (sc->nic.mem_mode == DMA_MBUF) {
943
944 sc->mbufs = 0;
945 sc->mbuf_count = 0;
946
947 /* Free previously allocated mbufs */
948 if (sc->flags & LNC_INITIALISED)
949 lnc_free_mbufs(sc);
950
951
952 for (i = 0; i < NDESC(sc->nrdre); i++) {
953 if (alloc_mbuf_cluster(sc, sc->recv_ring+i)) {
954 log(LOG_ERR, "Initialisation failed -- no mbufs\n");
955 splx(s);
956 return;
957 }
958 }
959
960 for (i = 0; i < NDESC(sc->ntdre); i++) {
961 (sc->trans_ring + i)->buff.mbuf = 0;
962 (sc->trans_ring + i)->md->md0 = 0;
963 (sc->trans_ring + i)->md->md1 = 0;
964 (sc->trans_ring + i)->md->md2 = 0;
965 (sc->trans_ring + i)->md->md3 = 0;
966 }
967 } else {
968 for (i = 0; i < NDESC(sc->nrdre); i++) {
969 (sc->recv_ring + i)->md->md0 = kvtop(lnc_mem);
970 (sc->recv_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff) | OWN;
971 (sc->recv_ring + i)->md->md2 = -RECVBUFSIZE;
972 (sc->recv_ring + i)->md->md3 = 0;
973 (sc->recv_ring + i)->buff.data = lnc_mem;
974 lnc_mem += RECVBUFSIZE;
975 }
976 for (i = 0; i < NDESC(sc->ntdre); i++) {
977 (sc->trans_ring + i)->md->md0 = kvtop(lnc_mem);
978 (sc->trans_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff);
979 (sc->trans_ring + i)->md->md2 = 0;
980 (sc->trans_ring + i)->md->md3 = 0;
981 (sc->trans_ring + i)->buff.data = lnc_mem;
982 lnc_mem += TRANSBUFSIZE;
983 }
984 }
985
986 sc->next_to_send = 0;
987
988 /* Set up initialisation block */
989
990 sc->init_block->mode = sc->nic.mode;
991
992 for (i = 0; i < ETHER_ADDR_LEN; i++)
993 sc->init_block->padr[i] = sc->arpcom.ac_enaddr[i];
994
995 lnc_setladrf(sc);
996
997 sc->init_block->rdra = kvtop(sc->recv_ring->md);
998 sc->init_block->rlen = ((kvtop(sc->recv_ring->md) >> 16) & 0xff) | (sc->nrdre << 13);
999 sc->init_block->tdra = kvtop(sc->trans_ring->md);
1000 sc->init_block->tlen = ((kvtop(sc->trans_ring->md) >> 16) & 0xff) | (sc->ntdre << 13);
1001
1002
1003 /* Set flags to show that the memory area is valid */
1004 sc->flags |= LNC_INITIALISED;
1005
1006 sc->pending_transmits = 0;
1007
1008 /* Give the LANCE the physical address of the initialisation block */
1009
1010 if (sc->nic.ic == PCnet_Home) {
1011 u_short media;
1012 /* Set PHY_SEL to HomeRun */
1013 media = read_bcr(sc, BCR49);
1014 media &= ~3;
1015 media |= 1;
1016 write_bcr(sc, BCR49, media);
1017 }
1018
1019 write_csr(sc, CSR1, kvtop(sc->init_block));
1020 write_csr(sc, CSR2, (kvtop(sc->init_block) >> 16) & 0xff);
1021
1022 /*
1023 * Depending on which controller this is, CSR3 has different meanings.
1024 * For the Am7990 it controls DMA operations, for the Am79C960 it
1025 * controls interrupt masks and transmitter algorithms. In either
1026 * case, none of the flags are set.
1027 *
1028 */
1029
1030 write_csr(sc, CSR3, 0);
1031
1032 /* Let's see if it starts */
1033 /*
1034 printf("Enabling lnc interrupts\n");
1035 sc->arpcom.ac_if.if_timer = 10;
1036 write_csr(sc, CSR0, INIT|INEA);
1037 */
1038
1039 /*
1040 * Now that the initialisation is complete there's no reason to
1041 * access anything except CSR0, so we leave RAP pointing there
1042 * so we can just access RDP from now on, saving an outw each
1043 * time.
1044 */
1045
1046 write_csr(sc, CSR0, INIT);
1047 for(i=0; i < 1000; i++)
1048 if (read_csr(sc, CSR0) & IDON)
1049 break;
1050
1051 if (read_csr(sc, CSR0) & IDON) {
1052 /*
1053 * Enable interrupts, start the LANCE, mark the interface as
1054 * running and transmit any pending packets.
1055 */
1056 write_csr(sc, CSR0, STRT | INEA);
1057 sc->arpcom.ac_if.if_flags |= IFF_RUNNING;
1058 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
1059 lnc_start(&sc->arpcom.ac_if);
1060 } else
1061 log(LOG_ERR, "%s: Initialisation failed\n",
1062 sc->arpcom.ac_if.if_xname);
1063
1064 splx(s);
1065 }
1066
1067 /*
1068 * The interrupt flag (INTR) will be set and provided that the interrupt enable
1069 * flag (INEA) is also set, the interrupt pin will be driven low when any of
1070 * the following occur:
1071 *
1072 * 1) Completion of the initialisation routine (IDON). 2) The reception of a
1073 * packet (RINT). 3) The transmission of a packet (TINT). 4) A transmitter
1074 * timeout error (BABL). 5) A missed packet (MISS). 6) A memory error (MERR).
1075 *
1076 * The interrupt flag is cleared when all of the above conditions are cleared.
1077 *
1078 * If the driver is reset from this routine then it first checks to see if any
1079 * interrupts have ocurred since the reset and handles them before returning.
1080 * This is because the NIC may signify a pending interrupt in CSR0 using the
1081 * INTR flag even if a hardware interrupt is currently inhibited (at least I
1082 * think it does from reading the data sheets). We may as well deal with
1083 * these pending interrupts now rather than get the overhead of another
1084 * hardware interrupt immediately upon returning from the interrupt handler.
1085 *
1086 */
1087
1088 void
1089 lncintr(void *arg)
1090 {
1091 lnc_softc_t *sc = arg;
1092 u_short csr0;
1093
1094 /*
1095 * INEA is the only bit that can be cleared by writing a 0 to it so
1096 * we have to include it in any writes that clear other flags.
1097 */
1098
1099 while ((csr0 = lnc_inw(sc->rdp)) & INTR) {
1100
1101 /*
1102 * Clear interrupt flags early to avoid race conditions. The
1103 * controller can still set these flags even while we're in
1104 * this interrupt routine. If the flag is still set from the
1105 * event that caused this interrupt any new events will
1106 * be missed.
1107 */
1108
1109 lnc_outw(sc->rdp, csr0);
1110 /*lnc_outw(sc->rdp, IDON | CERR | BABL | MISS | MERR | RINT | TINT | INEA);*/
1111
1112 #ifdef notyet
1113 if (csr0 & IDON) {
1114 printf("IDON\n");
1115 sc->arpcom.ac_if.if_timer = 0;
1116 write_csr(sc, CSR0, STRT | INEA);
1117 sc->arpcom.ac_if.if_flags |= IFF_RUNNING;
1118 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
1119 lnc_start(&sc->arpcom.ac_if);
1120 continue;
1121 }
1122 #endif
1123
1124 if (csr0 & ERR) {
1125 if (csr0 & CERR) {
1126 log(LOG_ERR, "%s: Heartbeat error -- SQE test failed\n", sc->arpcom.ac_if.if_xname);
1127 LNCSTATS(cerr)
1128 }
1129 if (csr0 & BABL) {
1130 log(LOG_ERR, "%s: Babble error - more than 1519 bytes transmitted\n", sc->arpcom.ac_if.if_xname);
1131 LNCSTATS(babl)
1132 sc->arpcom.ac_if.if_oerrors++;
1133 }
1134 if (csr0 & MISS) {
1135 log(LOG_ERR, "%s: Missed packet -- no receive buffer\n", sc->arpcom.ac_if.if_xname);
1136 LNCSTATS(miss)
1137 sc->arpcom.ac_if.if_ierrors++;
1138 }
1139 if (csr0 & MERR) {
1140 log(LOG_ERR, "%s: Memory error -- Resetting\n", sc->arpcom.ac_if.if_xname);
1141 LNCSTATS(merr)
1142 lnc_reset(sc);
1143 continue;
1144 }
1145 }
1146 if (csr0 & RINT) {
1147 LNCSTATS(rint)
1148 lnc_rint(sc);
1149 }
1150 if (csr0 & TINT) {
1151 LNCSTATS(tint)
1152 sc->arpcom.ac_if.if_timer = 0;
1153 lnc_tint(sc);
1154 }
1155
1156 /*
1157 * If there's room in the transmit descriptor ring then queue
1158 * some more transmit packets.
1159 */
1160
1161 if (!(sc->arpcom.ac_if.if_flags & IFF_OACTIVE))
1162 lnc_start(&sc->arpcom.ac_if);
1163 }
1164 }
1165
1166 static __inline int
1167 mbuf_to_buffer(struct mbuf *m, char *buffer)
1168 {
1169
1170 int len=0;
1171
1172 for( ; m; m = m->m_next) {
1173 bcopy(mtod(m, caddr_t), buffer, m->m_len);
1174 buffer += m->m_len;
1175 len += m->m_len;
1176 }
1177
1178 return(len);
1179 }
1180
1181 static __inline struct mbuf *
1182 chain_to_cluster(struct mbuf *m)
1183 {
1184 struct mbuf *new;
1185
1186 MGET(new, M_DONTWAIT, MT_DATA);
1187 if (new) {
1188 MCLGET(new, M_DONTWAIT);
1189 if (new->m_ext.ext_buf) {
1190 new->m_len = mbuf_to_buffer(m, new->m_data);
1191 m_freem(m);
1192 return(new);
1193 } else
1194 m_free(new);
1195 }
1196 return(0);
1197 }
1198
1199 /*
1200 * IFF_OACTIVE and IFF_RUNNING are checked in ether_output so it's redundant
1201 * to check them again since we wouldn't have got here if they were not
1202 * appropriately set. This is also called from lnc_init and lncintr but the
1203 * flags should be ok at those points too.
1204 */
1205
1206 static void
1207 lnc_start(struct ifnet *ifp)
1208 {
1209
1210 struct lnc_softc *sc = ifp->if_softc;
1211 struct host_ring_entry *desc;
1212 int tmp;
1213 int end_of_packet;
1214 struct mbuf *head, *m;
1215 int len, chunk;
1216 int addr;
1217 int no_entries_needed;
1218
1219 do {
1220
1221 IFQ_DRV_DEQUEUE(&sc->arpcom.ac_if.if_snd, head);
1222 if (!head)
1223 return;
1224
1225 if (sc->nic.mem_mode == DMA_MBUF) {
1226
1227 no_entries_needed = 0;
1228 for (m=head; m; m = m->m_next)
1229 no_entries_needed++;
1230
1231 /*
1232 * We try and avoid bcopy as much as possible
1233 * but there are two cases when we use it.
1234 *
1235 * 1) If there are not enough free entries in the ring
1236 * to hold each mbuf in the chain then compact the
1237 * chain into a single cluster.
1238 *
1239 * 2) The Am7990 and Am79C90 must not have less than
1240 * 100 bytes in the first descriptor of a chained
1241 * packet so it's necessary to shuffle the mbuf
1242 * contents to ensure this.
1243 */
1244
1245
1246 if (no_entries_needed > (NDESC(sc->ntdre) - sc->pending_transmits)) {
1247 if (!(head = chain_to_cluster(head))) {
1248 log(LOG_ERR, "%s: Couldn't get mbuf for transmit packet -- Resetting \n ",ifp->if_xname);
1249 lnc_reset(sc);
1250 return;
1251 }
1252 } else if ((sc->nic.ic == LANCE) || (sc->nic.ic == C_LANCE)) {
1253 if ((head->m_len < 100) && (head->m_next)) {
1254 len = 100 - head->m_len;
1255 if (M_TRAILINGSPACE(head) < len) {
1256 /*
1257 * Move data to start of data
1258 * area. We assume the first
1259 * mbuf has a packet header
1260 * and is not a cluster.
1261 */
1262 bcopy((caddr_t)head->m_data, (caddr_t)head->m_pktdat, head->m_len);
1263 head->m_data = head->m_pktdat;
1264 }
1265 m = head->m_next;
1266 while (m && (len > 0)) {
1267 chunk = min(len, m->m_len);
1268 bcopy(mtod(m, caddr_t), mtod(head, caddr_t) + head->m_len, chunk);
1269 len -= chunk;
1270 head->m_len += chunk;
1271 m->m_len -= chunk;
1272 m->m_data += chunk;
1273 if (m->m_len <= 0) {
1274 m = m_free(m);
1275 head->m_next = m;
1276 }
1277 }
1278 }
1279 }
1280
1281 tmp = sc->next_to_send;
1282
1283 /*
1284 * On entering this loop we know that tmp points to a
1285 * descriptor with a clear OWN bit.
1286 */
1287
1288 desc = sc->trans_ring + tmp;
1289 len = ETHER_MIN_LEN;
1290 for (m = head; m; m = m->m_next) {
1291 desc->buff.mbuf = m;
1292 addr = kvtop(m->m_data);
1293 desc->md->md0 = addr;
1294 desc->md->md1 = ((addr >> 16) & 0xff);
1295 desc->md->md3 = 0;
1296 desc->md->md2 = -m->m_len;
1297 sc->pending_transmits++;
1298 len -= m->m_len;
1299
1300 INC_MD_PTR(tmp, sc->ntdre)
1301 desc = sc->trans_ring + tmp;
1302 }
1303
1304 end_of_packet = tmp;
1305 DEC_MD_PTR(tmp, sc->ntdre)
1306 desc = sc->trans_ring + tmp;
1307 desc->md->md1 |= ENP;
1308
1309 if (len > 0)
1310 desc->md->md2 -= len;
1311
1312 /*
1313 * Set OWN bits in reverse order, otherwise the Lance
1314 * could start sending the packet before all the
1315 * buffers have been relinquished by the host.
1316 */
1317
1318 while (tmp != sc->next_to_send) {
1319 desc->md->md1 |= OWN;
1320 DEC_MD_PTR(tmp, sc->ntdre)
1321 desc = sc->trans_ring + tmp;
1322 }
1323 sc->next_to_send = end_of_packet;
1324 desc->md->md1 |= STP | OWN;
1325 } else {
1326 sc->pending_transmits++;
1327 desc = sc->trans_ring + sc->next_to_send;
1328 len = mbuf_to_buffer(head, desc->buff.data);
1329 desc->md->md3 = 0;
1330 desc->md->md2 = -max(len, ETHER_MIN_LEN - ETHER_CRC_LEN);
1331 desc->md->md1 |= OWN | STP | ENP;
1332 INC_MD_PTR(sc->next_to_send, sc->ntdre)
1333 }
1334
1335 /* Force an immediate poll of the transmit ring */
1336 lnc_outw(sc->rdp, TDMD | INEA);
1337
1338 /*
1339 * Set a timer so if the buggy Am7990.h shuts
1340 * down we can wake it up.
1341 */
1342
1343 ifp->if_timer = 2;
1344
1345 BPF_MTAP(&sc->arpcom.ac_if, head);
1346
1347 if (sc->nic.mem_mode != DMA_MBUF)
1348 m_freem(head);
1349
1350 } while (sc->pending_transmits < NDESC(sc->ntdre));
1351
1352 /*
1353 * Transmit ring is full so set IFF_OACTIVE
1354 * since we can't buffer any more packets.
1355 */
1356
1357 sc->arpcom.ac_if.if_flags |= IFF_OACTIVE;
1358 LNCSTATS(trans_ring_full)
1359 }
1360
1361 static int
1362 lnc_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1363 {
1364
1365 struct lnc_softc *sc = ifp->if_softc;
1366 int s, error = 0;
1367
1368 s = splimp();
1369
1370 switch (command) {
1371 case SIOCSIFFLAGS:
1372 #ifdef DEBUG
1373 if (ifp->if_flags & IFF_DEBUG)
1374 sc->lnc_debug = 1;
1375 else
1376 sc->lnc_debug = 0;
1377 #endif
1378 if (ifp->if_flags & IFF_PROMISC) {
1379 if (!(sc->nic.mode & PROM)) {
1380 sc->nic.mode |= PROM;
1381 lnc_init(sc);
1382 }
1383 } else if (sc->nic.mode & PROM) {
1384 sc->nic.mode &= ~PROM;
1385 lnc_init(sc);
1386 }
1387
1388 if ((ifp->if_flags & IFF_ALLMULTI) &&
1389 !(sc->flags & LNC_ALLMULTI)) {
1390 sc->flags |= LNC_ALLMULTI;
1391 lnc_init(sc);
1392 } else if (!(ifp->if_flags & IFF_ALLMULTI) &&
1393 (sc->flags & LNC_ALLMULTI)) {
1394 sc->flags &= ~LNC_ALLMULTI;
1395 lnc_init(sc);
1396 }
1397
1398 if ((ifp->if_flags & IFF_UP) == 0 &&
1399 (ifp->if_flags & IFF_RUNNING) != 0) {
1400 /*
1401 * If interface is marked down and it is running,
1402 * then stop it.
1403 */
1404 lnc_stop(sc);
1405 ifp->if_flags &= ~IFF_RUNNING;
1406 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1407 (ifp->if_flags & IFF_RUNNING) == 0) {
1408 /*
1409 * If interface is marked up and it is stopped, then
1410 * start it.
1411 */
1412 lnc_init(sc);
1413 }
1414 break;
1415 case SIOCADDMULTI:
1416 case SIOCDELMULTI:
1417 lnc_init(sc);
1418 error = 0;
1419 break;
1420 default:
1421 error = ether_ioctl(ifp, command, data);
1422 break;
1423 }
1424 (void) splx(s);
1425 return error;
1426 }
1427
1428 static void
1429 lnc_watchdog(struct ifnet *ifp)
1430 {
1431 log(LOG_ERR, "%s: Device timeout -- Resetting\n", ifp->if_xname);
1432 ifp->if_oerrors++;
1433 lnc_reset(ifp->if_softc);
1434 }
1435
1436 #ifdef DEBUG
1437 void
1438 lnc_dump_state(struct lnc_softc *sc)
1439 {
1440 int i;
1441
1442 printf("\nDriver/NIC [%s] state dump\n", sc->arpcom.ac_if.if_xname);
1443 printf("Memory access mode: %b\n", sc->nic.mem_mode, MEM_MODES);
1444 printf("Host memory\n");
1445 printf("-----------\n");
1446
1447 printf("Receive ring: base = %p, next = %p\n",
1448 (void *)sc->recv_ring, (void *)(sc->recv_ring + sc->recv_next));
1449 for (i = 0; i < NDESC(sc->nrdre); i++)
1450 printf("\t%d:%p md = %p buff = %p\n",
1451 i, (void *)(sc->recv_ring + i),
1452 (void *)(sc->recv_ring + i)->md,
1453 (void *)(sc->recv_ring + i)->buff.data);
1454
1455 printf("Transmit ring: base = %p, next = %p\n",
1456 (void *)sc->trans_ring, (void *)(sc->trans_ring + sc->trans_next));
1457 for (i = 0; i < NDESC(sc->ntdre); i++)
1458 printf("\t%d:%p md = %p buff = %p\n",
1459 i, (void *)(sc->trans_ring + i),
1460 (void *)(sc->trans_ring + i)->md,
1461 (void *)(sc->trans_ring + i)->buff.data);
1462 printf("Lance memory (may be on host(DMA) or card(SHMEM))\n");
1463 printf("Init block = %p\n", (void *)sc->init_block);
1464 printf("\tmode = %b rlen:rdra = %x:%x tlen:tdra = %x:%x\n",
1465 sc->init_block->mode, INIT_MODE, sc->init_block->rlen,
1466 sc->init_block->rdra, sc->init_block->tlen, sc->init_block->tdra);
1467 printf("Receive descriptor ring\n");
1468 for (i = 0; i < NDESC(sc->nrdre); i++)
1469 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tMCNT = %u,\tflags = %b\n",
1470 i, ((sc->recv_ring + i)->md->md1 & HADR),
1471 (sc->recv_ring + i)->md->md0,
1472 -(short) (sc->recv_ring + i)->md->md2,
1473 (sc->recv_ring + i)->md->md3,
1474 (((sc->recv_ring + i)->md->md1 & ~HADR) >> 8), RECV_MD1);
1475 printf("Transmit descriptor ring\n");
1476 for (i = 0; i < NDESC(sc->ntdre); i++)
1477 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tflags = %b %b\n",
1478 i, ((sc->trans_ring + i)->md->md1 & HADR),
1479 (sc->trans_ring + i)->md->md0,
1480 -(short) (sc->trans_ring + i)->md->md2,
1481 ((sc->trans_ring + i)->md->md1 >> 8), TRANS_MD1,
1482 ((sc->trans_ring + i)->md->md3 >> 10), TRANS_MD3);
1483 printf("\nnext_to_send = %x\n", sc->next_to_send);
1484 printf("\n CSR0 = %b CSR1 = %x CSR2 = %x CSR3 = %x\n\n",
1485 read_csr(sc, CSR0), CSR0_FLAGS, read_csr(sc, CSR1),
1486 read_csr(sc, CSR2), read_csr(sc, CSR3));
1487
1488 /* Set RAP back to CSR0 */
1489 lnc_outw(sc->rap, CSR0);
1490 }
1491
1492 void
1493 mbuf_dump_chain(struct mbuf * m)
1494 {
1495
1496 #define MBUF_FLAGS \
1497 "\2\1M_EXT\2M_PKTHDR\3M_EOR\4UNKNOWN\5M_BCAST\6M_MCAST"
1498
1499 if (!m)
1500 log(LOG_DEBUG, "m == NULL\n");
1501 do {
1502 log(LOG_DEBUG, "m = %p\n", (void *)m);
1503 log(LOG_DEBUG, "m_hdr.mh_next = %p\n",
1504 (void *)m->m_hdr.mh_next);
1505 log(LOG_DEBUG, "m_hdr.mh_nextpkt = %p\n",
1506 (void *)m->m_hdr.mh_nextpkt);
1507 log(LOG_DEBUG, "m_hdr.mh_len = %d\n", m->m_hdr.mh_len);
1508 log(LOG_DEBUG, "m_hdr.mh_data = %p\n",
1509 (void *)m->m_hdr.mh_data);
1510 log(LOG_DEBUG, "m_hdr.mh_type = %d\n", m->m_hdr.mh_type);
1511 log(LOG_DEBUG, "m_hdr.mh_flags = %b\n", m->m_hdr.mh_flags,
1512 MBUF_FLAGS);
1513 if (!(m->m_hdr.mh_flags & (M_PKTHDR | M_EXT)))
1514 log(LOG_DEBUG, "M_dat.M_databuf = %p\n",
1515 (void *)m->M_dat.M_databuf);
1516 else {
1517 if (m->m_hdr.mh_flags & M_PKTHDR) {
1518 log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.len = %d\n",
1519 m->M_dat.MH.MH_pkthdr.len);
1520 log(LOG_DEBUG,
1521 "M_dat.MH.MH_pkthdr.rcvif = %p\n",
1522 (void *)m->M_dat.MH.MH_pkthdr.rcvif);
1523 if (!(m->m_hdr.mh_flags & M_EXT))
1524 log(LOG_DEBUG,
1525 "M_dat.MH.MH_dat.MH_databuf = %p\n",
1526 (void *)m->M_dat.MH.MH_dat.MH_databuf);
1527 }
1528 if (m->m_hdr.mh_flags & M_EXT) {
1529 log(LOG_DEBUG,
1530 "M_dat.MH.MH_dat.MH_ext.ext_buff %p\n",
1531 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_buf);
1532 log(LOG_DEBUG,
1533 "M_dat.MH.MH_dat.MH_ext.ext_free %p\n",
1534 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_free);
1535 log(LOG_DEBUG,
1536 "M_dat.MH.MH_dat.MH_ext.ext_size %d\n",
1537 m->M_dat.MH.MH_dat.MH_ext.ext_size);
1538 }
1539 }
1540 } while ((m = m->m_next) != NULL);
1541 }
1542 #endif
Cache object: 8b4be752b35809b7cf96301246243260
|