FreeBSD/Linux Kernel Cross Reference
sys/dev/lnc/if_lnc.c
1 /*-
2 * Copyright (c) 1994-2000
3 * Paul Richards. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * verbatim and that no modifications are made prior to this
11 * point in the file.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name Paul Richards may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/6.0/sys/dev/lnc/if_lnc.c 151143 2005-10-09 04:18:18Z delphij $");
33
34 /*
35 #define DIAGNOSTIC
36 #define DEBUG
37 *
38 * TODO ----
39 *
40 * Check all the XXX comments -- some of them are just things I've left
41 * unfinished rather than "difficult" problems that were hacked around.
42 *
43 * Check log settings.
44 *
45 * Check how all the arpcom flags get set and used.
46 *
47 * Re-inline and re-static all routines after debugging.
48 *
49 * Remember to assign iobase in SHMEM probe routines.
50 *
51 * Replace all occurences of LANCE-controller-card etc in prints by the name
52 * strings of the appropriate type -- nifty window dressing
53 *
54 * Add DEPCA support -- mostly done.
55 *
56 */
57
58 #include "opt_inet.h"
59
60 /* Some defines that should really be in generic locations */
61 #define FCS_LEN 4
62 #define MULTICAST_FILTER_LEN 8
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/bus.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/module.h>
71 #include <sys/socket.h>
72 #include <sys/sockio.h>
73 #include <sys/syslog.h>
74
75 #include <machine/bus.h>
76 #include <machine/resource.h>
77 #include <sys/rman.h>
78
79 #include <net/ethernet.h>
80 #include <net/if.h>
81 #include <net/if_dl.h>
82 #include <net/if_types.h>
83
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
86
87 #include <net/bpf.h>
88
89 #include <machine/md_var.h>
90
91 #include <dev/lnc/if_lncvar.h>
92 #include <dev/lnc/if_lncreg.h>
93
94 devclass_t lnc_devclass;
95
96 static char const * const nic_ident[] = {
97 "Unknown",
98 "BICC",
99 "NE2100",
100 "DEPCA",
101 "CNET98S", /* PC-98 */
102 };
103
104 static char const * const ic_ident[] = {
105 "Unknown",
106 "LANCE",
107 "C-LANCE",
108 "PCnet-ISA",
109 "PCnet-ISA+",
110 "PCnet-ISA II",
111 "PCnet-32 VL-Bus",
112 "PCnet-PCI",
113 "PCnet-PCI II",
114 "PCnet-FAST",
115 "PCnet-FAST+",
116 "PCnet-Home",
117 };
118
119 static void lnc_setladrf(struct lnc_softc *sc);
120 static void lnc_reset(struct lnc_softc *sc);
121 static void lnc_free_mbufs(struct lnc_softc *sc);
122 static __inline int alloc_mbuf_cluster(struct lnc_softc *sc,
123 struct host_ring_entry *desc);
124 static __inline struct mbuf *chain_mbufs(struct lnc_softc *sc,
125 int start_of_packet,
126 int pkt_len);
127 static __inline struct mbuf *mbuf_packet(struct lnc_softc *sc,
128 int start_of_packet,
129 int pkt_len);
130 static void lnc_rint(struct lnc_softc *sc);
131 static void lnc_tint(struct lnc_softc *sc);
132
133 static void lnc_init(void *);
134 static __inline int mbuf_to_buffer(struct mbuf *m, char *buffer);
135 static __inline struct mbuf *chain_to_cluster(struct mbuf *m);
136 static void lnc_start(struct ifnet *ifp);
137 static int lnc_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
138 static void lnc_watchdog(struct ifnet *ifp);
139 #ifdef DEBUG
140 void lnc_dump_state(struct lnc_softc *sc);
141 void mbuf_dump_chain(struct mbuf *m);
142 #endif
143
144 u_short
145 read_csr(struct lnc_softc *sc, u_short port)
146 {
147 lnc_outw(sc->rap, port);
148 return (lnc_inw(sc->rdp));
149 }
150
151 void
152 write_csr(struct lnc_softc *sc, u_short port, u_short val)
153 {
154 lnc_outw(sc->rap, port);
155 lnc_outw(sc->rdp, val);
156 }
157
158 static __inline void
159 write_bcr(struct lnc_softc *sc, u_short port, u_short val)
160 {
161 lnc_outw(sc->rap, port);
162 lnc_outw(sc->bdp, val);
163 }
164
165 static __inline u_short
166 read_bcr(struct lnc_softc *sc, u_short port)
167 {
168 lnc_outw(sc->rap, port);
169 return (lnc_inw(sc->bdp));
170 }
171
172 int
173 lance_probe(struct lnc_softc *sc)
174 {
175 write_csr(sc, CSR0, STOP);
176
177 if ((lnc_inw(sc->rdp) & STOP) && ! (read_csr(sc, CSR3))) {
178 /*
179 * Check to see if it's a C-LANCE. For the LANCE the INEA bit
180 * cannot be set while the STOP bit is. This restriction is
181 * removed for the C-LANCE.
182 */
183 write_csr(sc, CSR0, INEA);
184 if (read_csr(sc, CSR0) & INEA)
185 return (C_LANCE);
186 else
187 return (LANCE);
188 } else
189 return (UNKNOWN);
190 }
191
192 void
193 lnc_release_resources(device_t dev)
194 {
195 lnc_softc_t *sc = device_get_softc(dev);
196
197 if (sc->irqres) {
198 bus_teardown_intr(dev, sc->irqres, sc->intrhand);
199 bus_release_resource(dev, SYS_RES_IRQ, sc->irqrid, sc->irqres);
200 }
201
202 if (sc->portres)
203 bus_release_resource(dev, SYS_RES_IOPORT,
204 sc->portrid, sc->portres);
205 if (sc->drqres)
206 bus_release_resource(dev, SYS_RES_DRQ, sc->drqrid, sc->drqres);
207
208 if (sc->dmat) {
209 if (sc->dmamap) {
210 bus_dmamap_unload(sc->dmat, sc->dmamap);
211 bus_dmamem_free(sc->dmat, sc->recv_ring, sc->dmamap);
212 }
213 bus_dma_tag_destroy(sc->dmat);
214 }
215
216 if (sc->ifp)
217 if_free(sc->ifp);
218 }
219
220 /*
221 * Set up the logical address filter for multicast packets
222 */
223 static __inline void
224 lnc_setladrf(struct lnc_softc *sc)
225 {
226 struct ifnet *ifp = sc->ifp;
227 struct ifmultiaddr *ifma;
228 u_long index;
229 int i;
230
231 if (sc->flags & IFF_ALLMULTI) {
232 for (i=0; i < MULTICAST_FILTER_LEN; i++)
233 sc->init_block->ladrf[i] = 0xFF;
234 return;
235 }
236
237 /*
238 * For each multicast address, calculate a crc for that address and
239 * then use the high order 6 bits of the crc as a hash code where
240 * bits 3-5 select the byte of the address filter and bits 0-2 select
241 * the bit within that byte.
242 */
243
244 bzero(sc->init_block->ladrf, MULTICAST_FILTER_LEN);
245 IF_ADDR_LOCK(ifp);
246 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
247 if (ifma->ifma_addr->sa_family != AF_LINK)
248 continue;
249
250 index = ether_crc32_le(LLADDR((struct sockaddr_dl *)
251 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
252 sc->init_block->ladrf[index >> 3] |= 1 << (index & 7);
253 }
254 IF_ADDR_UNLOCK(ifp);
255 }
256
257 void
258 lnc_stop(struct lnc_softc *sc)
259 {
260 write_csr(sc, CSR0, STOP);
261 }
262
263 static void
264 lnc_reset(struct lnc_softc *sc)
265 {
266 lnc_init(sc);
267 }
268
269 static void
270 lnc_free_mbufs(struct lnc_softc *sc)
271 {
272 int i;
273
274 /*
275 * We rely on other routines to keep the buff.mbuf field valid. If
276 * it's not NULL then we assume it points to an allocated mbuf.
277 */
278
279 for (i = 0; i < NDESC(sc->nrdre); i++)
280 if ((sc->recv_ring + i)->buff.mbuf)
281 m_free((sc->recv_ring + i)->buff.mbuf);
282
283 for (i = 0; i < NDESC(sc->ntdre); i++)
284 if ((sc->trans_ring + i)->buff.mbuf)
285 m_free((sc->trans_ring + i)->buff.mbuf);
286
287 if (sc->mbuf_count)
288 m_freem(sc->mbufs);
289 }
290
291 static __inline int
292 alloc_mbuf_cluster(struct lnc_softc *sc, struct host_ring_entry *desc)
293 {
294 register struct mds *md = desc->md;
295 struct mbuf *m=0;
296 int addr;
297
298 /* Try and get cluster off local cache */
299 if (sc->mbuf_count) {
300 sc->mbuf_count--;
301 m = sc->mbufs;
302 sc->mbufs = m->m_next;
303 /* XXX m->m_data = m->m_ext.ext_buf;*/
304 } else {
305 MGET(m, M_DONTWAIT, MT_DATA);
306 if (!m)
307 return(1);
308 MCLGET(m, M_DONTWAIT);
309 if (!m->m_ext.ext_buf) {
310 m_free(m);
311 return(1);
312 }
313 }
314
315 desc->buff.mbuf = m;
316 addr = kvtop(m->m_data);
317 md->md0 = addr;
318 md->md1= ((addr >> 16) & 0xff) | OWN;
319 md->md2 = -(short)(MCLBYTES - sizeof(struct pkthdr));
320 md->md3 = 0;
321 return(0);
322 }
323
324 static __inline struct mbuf *
325 chain_mbufs(struct lnc_softc *sc, int start_of_packet, int pkt_len)
326 {
327 struct mbuf *head, *m;
328 struct host_ring_entry *desc;
329
330 /*
331 * Turn head into a pkthdr mbuf --
332 * assumes a pkthdr type mbuf was
333 * allocated to the descriptor
334 * originally.
335 */
336
337 desc = sc->recv_ring + start_of_packet;
338
339 head = desc->buff.mbuf;
340 head->m_flags |= M_PKTHDR;
341 bzero(&head->m_pkthdr, sizeof(head->m_pkthdr));
342
343 m = head;
344 do {
345 m = desc->buff.mbuf;
346 m->m_len = min((MCLBYTES - sizeof(struct pkthdr)), pkt_len);
347 pkt_len -= m->m_len;
348 if (alloc_mbuf_cluster(sc, desc))
349 return((struct mbuf *)NULL);
350 INC_MD_PTR(start_of_packet, sc->nrdre)
351 desc = sc->recv_ring + start_of_packet;
352 m->m_next = desc->buff.mbuf;
353 } while (start_of_packet != sc->recv_next);
354
355 m->m_next = 0;
356 return(head);
357 }
358
359 static __inline struct mbuf *
360 mbuf_packet(struct lnc_softc *sc, int start_of_packet, int pkt_len)
361 {
362
363 struct host_ring_entry *start;
364 struct mbuf *head,*m,*m_prev;
365 char *data,*mbuf_data;
366 short blen;
367 int amount;
368
369 /* Get a pkthdr mbuf for the start of packet */
370 MGETHDR(head, M_DONTWAIT, MT_DATA);
371 if (!head) {
372 LNCSTATS(drop_packet)
373 return(0);
374 }
375
376 m = head;
377 m->m_len = 0;
378 start = sc->recv_ring + start_of_packet;
379 /*blen = -(start->md->md2);*/
380 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
381 data = start->buff.data;
382 mbuf_data = m->m_data;
383
384 while (start_of_packet != sc->recv_next) {
385 /*
386 * If the data left fits in a single buffer then set
387 * blen to the size of the data left.
388 */
389 if (pkt_len < blen)
390 blen = pkt_len;
391
392 /*
393 * amount is least of data in current ring buffer and
394 * amount of space left in current mbuf.
395 */
396 amount = min(blen, M_TRAILINGSPACE(m));
397 if (amount == 0) {
398 /* mbuf must be empty */
399 m_prev = m;
400 MGET(m, M_DONTWAIT, MT_DATA);
401 if (!m) {
402 m_freem(head);
403 return(0);
404 }
405 if (pkt_len >= MINCLSIZE)
406 MCLGET(m, M_DONTWAIT);
407 m->m_len = 0;
408 m_prev->m_next = m;
409 amount = min(blen, M_TRAILINGSPACE(m));
410 mbuf_data = m->m_data;
411 }
412 bcopy(data, mbuf_data, amount);
413 blen -= amount;
414 pkt_len -= amount;
415 m->m_len += amount;
416 data += amount;
417 mbuf_data += amount;
418
419 if (blen == 0) {
420 start->md->md1 &= HADR;
421 start->md->md1 |= OWN;
422 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
423 INC_MD_PTR(start_of_packet, sc->nrdre)
424 start = sc->recv_ring + start_of_packet;
425 data = start->buff.data;
426 /*blen = -(start->md->md2);*/
427 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
428 }
429 }
430 return(head);
431 }
432
433
434 static void
435 lnc_rint(struct lnc_softc *sc)
436 {
437 struct ifnet *ifp = sc->ifp;
438 struct host_ring_entry *next, *start;
439 int start_of_packet;
440 struct mbuf *head;
441 struct ether_header *eh;
442 int lookahead;
443 int flags;
444 int pkt_len;
445
446 /*
447 * The LANCE will issue a RINT interrupt when the ownership of the
448 * last buffer of a receive packet has been relinquished by the LANCE.
449 * Therefore, it can be assumed that a complete packet can be found
450 * before hitting buffers that are still owned by the LANCE, if not
451 * then there is a bug in the driver that is causing the descriptors
452 * to get out of sync.
453 */
454
455 #ifdef DIAGNOSTIC
456 if ((sc->recv_ring + sc->recv_next)->md->md1 & OWN) {
457 log(LOG_ERR, "%s: Receive interrupt with buffer still owned by controller -- Resetting\n", ifp->if_xname);
458 lnc_reset(sc);
459 return;
460 }
461 if (!((sc->recv_ring + sc->recv_next)->md->md1 & STP)) {
462 log(LOG_ERR, "%s: Receive interrupt but not start of packet -- Resetting\n", ifp->if_xname);
463 lnc_reset(sc);
464 return;
465 }
466 #endif
467
468 lookahead = 0;
469 next = sc->recv_ring + sc->recv_next;
470 while ((flags = next->md->md1) & STP) {
471
472 /* Make a note of the start of the packet */
473 start_of_packet = sc->recv_next;
474
475 /*
476 * Find the end of the packet. Even if not data chaining,
477 * jabber packets can overrun into a second descriptor.
478 * If there is no error, then the ENP flag is set in the last
479 * descriptor of the packet. If there is an error then the ERR
480 * flag will be set in the descriptor where the error occured.
481 * Therefore, to find the last buffer of a packet we search for
482 * either ERR or ENP.
483 */
484
485 if (!(flags & (ENP | MDERR))) {
486 do {
487 INC_MD_PTR(sc->recv_next, sc->nrdre)
488 next = sc->recv_ring + sc->recv_next;
489 flags = next->md->md1;
490 } while (!(flags & (STP | OWN | ENP | MDERR)));
491
492 if (flags & STP) {
493 log(LOG_ERR, "%s: Start of packet found before end of previous in receive ring -- Resetting\n", ifp->if_xname);
494 lnc_reset(sc);
495 return;
496 }
497 if (flags & OWN) {
498 if (lookahead) {
499 /*
500 * Looked ahead into a packet still
501 * being received
502 */
503 sc->recv_next = start_of_packet;
504 break;
505 } else {
506 log(LOG_ERR, "%s: End of received packet not found-- Resetting\n", ifp->if_xname);
507 lnc_reset(sc);
508 return;
509 }
510 }
511 }
512
513 pkt_len = (next->md->md3 & MCNT) - FCS_LEN;
514
515 /* Move pointer onto start of next packet */
516 INC_MD_PTR(sc->recv_next, sc->nrdre)
517 next = sc->recv_ring + sc->recv_next;
518
519 if (flags & MDERR) {
520 const char *if_xname = ifp->if_xname;
521 if (flags & RBUFF) {
522 LNCSTATS(rbuff)
523 log(LOG_ERR, "%s: Receive buffer error\n", if_xname);
524 }
525 if (flags & OFLO) {
526 /* OFLO only valid if ENP is not set */
527 if (!(flags & ENP)) {
528 LNCSTATS(oflo)
529 log(LOG_ERR, "%s: Receive overflow error \n", if_xname);
530 }
531 } else if (flags & ENP) {
532 if ((ifp->if_flags & IFF_PROMISC)==0) {
533 /*
534 * FRAM and CRC are valid only if ENP
535 * is set and OFLO is not.
536 */
537 if (flags & FRAM) {
538 LNCSTATS(fram)
539 log(LOG_ERR, "%s: Framing error\n", if_xname);
540 /*
541 * FRAM is only set if there's a CRC
542 * error so avoid multiple messages
543 */
544 } else if (flags & CRC) {
545 LNCSTATS(crc)
546 log(LOG_ERR, "%s: Receive CRC error\n", if_xname);
547 }
548 }
549 }
550
551 /* Drop packet */
552 LNCSTATS(rerr)
553 ifp->if_ierrors++;
554 while (start_of_packet != sc->recv_next) {
555 start = sc->recv_ring + start_of_packet;
556 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
557 start->md->md1 &= HADR;
558 start->md->md1 |= OWN;
559 INC_MD_PTR(start_of_packet, sc->nrdre)
560 }
561 } else { /* Valid packet */
562
563 ifp->if_ipackets++;
564
565
566 if (sc->nic.mem_mode == DMA_MBUF)
567 head = chain_mbufs(sc, start_of_packet, pkt_len);
568 else
569 head = mbuf_packet(sc, start_of_packet, pkt_len);
570
571 if (head) {
572 /*
573 * First mbuf in packet holds the
574 * ethernet and packet headers
575 */
576 head->m_pkthdr.rcvif = ifp;
577 head->m_pkthdr.len = pkt_len ;
578 eh = (struct ether_header *) head->m_data;
579
580 /*
581 * vmware ethernet hardware emulation loops
582 * packets back to itself, violates IFF_SIMPLEX.
583 * drop it if it is from myself.
584 */
585 if (bcmp(eh->ether_shost,
586 IFP2ENADDR(sc->ifp), ETHER_ADDR_LEN) == 0) {
587 m_freem(head);
588 } else {
589 (*ifp->if_input)(ifp, head);
590 }
591 } else {
592 log(LOG_ERR,"%s: Packet dropped, no mbufs\n",ifp->if_xname);
593 LNCSTATS(drop_packet)
594 }
595 }
596
597 lookahead++;
598 }
599
600 /*
601 * At this point all completely received packets have been processed
602 * so clear RINT since any packets that have arrived while we were in
603 * here have been dealt with.
604 */
605
606 lnc_outw(sc->rdp, RINT | INEA);
607 }
608
609 static void
610 lnc_tint(struct lnc_softc *sc)
611 {
612 struct host_ring_entry *next, *start;
613 int start_of_packet;
614 int lookahead;
615
616 /*
617 * If the driver is reset in this routine then we return immediately to
618 * the interrupt driver routine. Any interrupts that have occured
619 * since the reset will be dealt with there. sc->trans_next
620 * should point to the start of the first packet that was awaiting
621 * transmission after the last transmit interrupt was dealt with. The
622 * LANCE should have relinquished ownership of that descriptor before
623 * the interrupt. Therefore, sc->trans_next should point to a
624 * descriptor with STP set and OWN cleared. If not then the driver's
625 * pointers are out of sync with the LANCE, which signifies a bug in
626 * the driver. Therefore, the following two checks are really
627 * diagnostic, since if the driver is working correctly they should
628 * never happen.
629 */
630
631 #ifdef DIAGNOSTIC
632 if ((sc->trans_ring + sc->trans_next)->md->md1 & OWN) {
633 log(LOG_ERR, "%s: Transmit interrupt with buffer still owned by controller -- Resetting\n", sc->ifp->if_xname);
634 lnc_reset(sc);
635 return;
636 }
637 #endif
638
639
640 /*
641 * The LANCE will write the status information for the packet it just
642 * tried to transmit in one of two places. If the packet was
643 * transmitted successfully then the status will be written into the
644 * last descriptor of the packet. If the transmit failed then the
645 * status will be written into the descriptor that was being accessed
646 * when the error occured and all subsequent descriptors in that
647 * packet will have been relinquished by the LANCE.
648 *
649 * At this point we know that sc->trans_next points to the start
650 * of a packet that the LANCE has just finished trying to transmit.
651 * We now search for a buffer with either ENP or ERR set.
652 */
653
654 lookahead = 0;
655
656 do {
657 start_of_packet = sc->trans_next;
658 next = sc->trans_ring + sc->trans_next;
659
660 #ifdef DIAGNOSTIC
661 if (!(next->md->md1 & STP)) {
662 log(LOG_ERR, "%s: Transmit interrupt but not start of packet -- Resetting\n", sc->ifp->if_xname);
663 lnc_reset(sc);
664 return;
665 }
666 #endif
667
668 /*
669 * Find end of packet.
670 */
671
672 if (!(next->md->md1 & (ENP | MDERR))) {
673 do {
674 INC_MD_PTR(sc->trans_next, sc->ntdre)
675 next = sc->trans_ring + sc->trans_next;
676 } while (!(next->md->md1 & (STP | OWN | ENP | MDERR)));
677
678 if (next->md->md1 & STP) {
679 log(LOG_ERR, "%s: Start of packet found before end of previous in transmit ring -- Resetting\n", sc->ifp->if_xname);
680 lnc_reset(sc);
681 return;
682 }
683 if (next->md->md1 & OWN) {
684 if (lookahead) {
685 /*
686 * Looked ahead into a packet still
687 * being transmitted
688 */
689 sc->trans_next = start_of_packet;
690 break;
691 } else {
692 log(LOG_ERR, "%s: End of transmitted packet not found -- Resetting\n", sc->ifp->if_xname);
693 lnc_reset(sc);
694 return;
695 }
696 }
697 }
698 /*
699 * Check for ERR first since other flags are irrelevant if an
700 * error occurred.
701 */
702 if (next->md->md1 & MDERR) {
703
704 LNCSTATS(terr)
705 sc->ifp->if_oerrors++;
706
707 if (next->md->md3 & LCOL) {
708 LNCSTATS(lcol)
709 log(LOG_ERR, "%s: Transmit late collision -- Net error?\n", sc->ifp->if_xname);
710 sc->ifp->if_collisions++;
711 /*
712 * Clear TBUFF since it's not valid when LCOL
713 * set
714 */
715 next->md->md3 &= ~TBUFF;
716 }
717 if (next->md->md3 & LCAR) {
718 LNCSTATS(lcar)
719 log(LOG_ERR, "%s: Loss of carrier during transmit -- Net error?\n", sc->ifp->if_xname);
720 }
721 if (next->md->md3 & RTRY) {
722 LNCSTATS(rtry)
723 log(LOG_ERR, "%s: Transmit of packet failed after 16 attempts -- TDR = %d\n", sc->ifp->if_xname, ((sc->trans_ring + sc->trans_next)->md->md3 & TDR));
724 sc->ifp->if_collisions += 16;
725 /*
726 * Clear TBUFF since it's not valid when RTRY
727 * set
728 */
729 next->md->md3 &= ~TBUFF;
730 }
731 /*
732 * TBUFF is only valid if neither LCOL nor RTRY are set.
733 * We need to check UFLO after LCOL and RTRY so that we
734 * know whether or not TBUFF is valid. If either are
735 * set then TBUFF will have been cleared above. A
736 * UFLO error will turn off the transmitter so we
737 * have to reset.
738 *
739 */
740
741 if (next->md->md3 & UFLO) {
742 LNCSTATS(uflo)
743 /*
744 * If an UFLO has occured it's possibly due
745 * to a TBUFF error
746 */
747 if (next->md->md3 & TBUFF) {
748 LNCSTATS(tbuff)
749 log(LOG_ERR, "%s: Transmit buffer error -- Resetting\n", sc->ifp->if_xname);
750 } else
751 log(LOG_ERR, "%s: Transmit underflow error -- Resetting\n", sc->ifp->if_xname);
752 lnc_reset(sc);
753 return;
754 }
755 do {
756 INC_MD_PTR(sc->trans_next, sc->ntdre)
757 next = sc->trans_ring + sc->trans_next;
758 } while (!(next->md->md1 & STP) && (sc->trans_next != sc->next_to_send));
759
760 } else {
761 /*
762 * Since we check for ERR first then if we get here
763 * the packet was transmitted correctly. There may
764 * still have been non-fatal errors though.
765 * Don't bother checking for DEF, waste of time.
766 */
767
768 sc->ifp->if_opackets++;
769
770 if (next->md->md1 & MORE) {
771 LNCSTATS(more)
772 sc->ifp->if_collisions += 2;
773 }
774
775 /*
776 * ONE is invalid if LCOL is set. If LCOL was set then
777 * ERR would have also been set and we would have
778 * returned from lnc_tint above. Therefore we can
779 * assume if we arrive here that ONE is valid.
780 *
781 */
782
783 if (next->md->md1 & ONE) {
784 LNCSTATS(one)
785 sc->ifp->if_collisions++;
786 }
787 INC_MD_PTR(sc->trans_next, sc->ntdre)
788 next = sc->trans_ring + sc->trans_next;
789 }
790
791 /*
792 * Clear descriptors and free any mbufs.
793 */
794
795 do {
796 start = sc->trans_ring + start_of_packet;
797 start->md->md1 &= HADR;
798 if (sc->nic.mem_mode == DMA_MBUF) {
799 /* Cache clusters on a local queue */
800 if ((start->buff.mbuf->m_flags & M_EXT) && (sc->mbuf_count < MBUF_CACHE_LIMIT)) {
801 if (sc->mbuf_count) {
802 start->buff.mbuf->m_next = sc->mbufs;
803 sc->mbufs = start->buff.mbuf;
804 } else
805 sc->mbufs = start->buff.mbuf;
806 sc->mbuf_count++;
807 start->buff.mbuf = 0;
808 } else {
809 /*
810 * XXX should this be m_freem()?
811 */
812 m_free(start->buff.mbuf);
813 start->buff.mbuf = NULL;
814 }
815 }
816 sc->pending_transmits--;
817 INC_MD_PTR(start_of_packet, sc->ntdre)
818 }while (start_of_packet != sc->trans_next);
819
820 /*
821 * There's now at least one free descriptor
822 * in the ring so indicate that we can accept
823 * more packets again.
824 */
825
826 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
827
828 lookahead++;
829
830 } while (sc->pending_transmits && !(next->md->md1 & OWN));
831
832 /*
833 * Clear TINT since we've dealt with all
834 * the completed transmissions.
835 */
836
837 lnc_outw(sc->rdp, TINT | INEA);
838 }
839
840 int
841 lnc_attach_common(device_t dev)
842 {
843 lnc_softc_t *sc = device_get_softc(dev);
844 int i;
845 int skip;
846 u_char eaddr[6];
847
848 switch (sc->nic.ident) {
849 case BICC:
850 case CNET98S:
851 skip = 2;
852 break;
853 default:
854 skip = 1;
855 break;
856 }
857
858 /* Set default mode */
859 sc->nic.mode = NORMAL;
860
861 /* Fill in arpcom structure entries */
862
863 sc->ifp = if_alloc(IFT_ETHER);
864 if (sc->ifp == NULL) {
865 device_printf(dev, "can not if_alloc()\n");
866 return (0);
867 }
868 sc->ifp->if_softc = sc;
869 if_initname(sc->ifp, device_get_name(dev),
870 device_get_unit(dev));
871 sc->ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |
872 IFF_MULTICAST | IFF_NEEDSGIANT;
873 sc->ifp->if_timer = 0;
874 sc->ifp->if_start = lnc_start;
875 sc->ifp->if_ioctl = lnc_ioctl;
876 sc->ifp->if_watchdog = lnc_watchdog;
877 sc->ifp->if_init = lnc_init;
878 IFQ_SET_MAXLEN(&sc->ifp->if_snd, IFQ_MAXLEN);
879 sc->ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
880 IFQ_SET_READY(&sc->ifp->if_snd);
881
882 /* Extract MAC address from PROM */
883 for (i = 0; i < ETHER_ADDR_LEN; i++)
884 eaddr[i] = lnc_inb(i * skip);
885
886 ether_ifattach(sc->ifp, eaddr);
887
888 if (sc->nic.ic == LANCE || sc->nic.ic == C_LANCE)
889 if_printf(sc->ifp, "%s (%s)\n",
890 nic_ident[sc->nic.ident], ic_ident[sc->nic.ic]);
891 else
892 if_printf(sc->ifp, "%s\n", ic_ident[sc->nic.ic]);
893
894 return (1);
895 }
896
897 int
898 lnc_detach_common(device_t dev) {
899 lnc_softc_t *sc = device_get_softc(dev);
900 int s = splimp();
901
902 ether_ifdetach(sc->ifp);
903 lnc_stop(sc);
904 lnc_release_resources(dev);
905
906 splx(s);
907 return (0);
908 }
909
910 static void
911 lnc_init(xsc)
912 void *xsc;
913 {
914 struct lnc_softc *sc = xsc;
915 int s, i;
916 char *lnc_mem;
917
918 /* Shut down interface */
919
920 s = splimp();
921 lnc_stop(sc);
922 sc->ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* XXX??? */
923
924 /*
925 * This sets up the memory area for the controller. Memory is set up for
926 * the initialisation block (12 words of contiguous memory starting
927 * on a word boundary),the transmit and receive ring structures (each
928 * entry is 4 words long and must start on a quadword boundary) and
929 * the data buffers.
930 *
931 * The alignment tests are particularly paranoid.
932 */
933
934 sc->recv_next = 0;
935 sc->trans_ring = sc->recv_ring + NDESC(sc->nrdre);
936 sc->trans_next = 0;
937
938 if (sc->nic.mem_mode == SHMEM)
939 lnc_mem = (char *)(uintptr_t)sc->nic.iobase;
940 else
941 lnc_mem = (char *) (sc->trans_ring + NDESC(sc->ntdre));
942
943 lnc_mem = (char *)(((long)lnc_mem + 1) & ~1);
944 sc->init_block = (struct init_block *) ((long) lnc_mem & ~1);
945 lnc_mem = (char *) (sc->init_block + 1);
946 lnc_mem = (char *)(((long)lnc_mem + 7) & ~7);
947
948 /* Initialise pointers to descriptor entries */
949 for (i = 0; i < NDESC(sc->nrdre); i++) {
950 (sc->recv_ring + i)->md = (struct mds *) lnc_mem;
951 lnc_mem += sizeof(struct mds);
952 }
953 for (i = 0; i < NDESC(sc->ntdre); i++) {
954 (sc->trans_ring + i)->md = (struct mds *) lnc_mem;
955 lnc_mem += sizeof(struct mds);
956 }
957
958 /* Initialise the remaining ring entries */
959
960 if (sc->nic.mem_mode == DMA_MBUF) {
961
962 sc->mbufs = 0;
963 sc->mbuf_count = 0;
964
965 /* Free previously allocated mbufs */
966 if (sc->flags & LNC_INITIALISED)
967 lnc_free_mbufs(sc);
968
969
970 for (i = 0; i < NDESC(sc->nrdre); i++) {
971 if (alloc_mbuf_cluster(sc, sc->recv_ring+i)) {
972 log(LOG_ERR, "Initialisation failed -- no mbufs\n");
973 splx(s);
974 return;
975 }
976 }
977
978 for (i = 0; i < NDESC(sc->ntdre); i++) {
979 (sc->trans_ring + i)->buff.mbuf = 0;
980 (sc->trans_ring + i)->md->md0 = 0;
981 (sc->trans_ring + i)->md->md1 = 0;
982 (sc->trans_ring + i)->md->md2 = 0;
983 (sc->trans_ring + i)->md->md3 = 0;
984 }
985 } else {
986 for (i = 0; i < NDESC(sc->nrdre); i++) {
987 (sc->recv_ring + i)->md->md0 = kvtop(lnc_mem);
988 (sc->recv_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff) | OWN;
989 (sc->recv_ring + i)->md->md2 = -RECVBUFSIZE;
990 (sc->recv_ring + i)->md->md3 = 0;
991 (sc->recv_ring + i)->buff.data = lnc_mem;
992 lnc_mem += RECVBUFSIZE;
993 }
994 for (i = 0; i < NDESC(sc->ntdre); i++) {
995 (sc->trans_ring + i)->md->md0 = kvtop(lnc_mem);
996 (sc->trans_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff);
997 (sc->trans_ring + i)->md->md2 = 0;
998 (sc->trans_ring + i)->md->md3 = 0;
999 (sc->trans_ring + i)->buff.data = lnc_mem;
1000 lnc_mem += TRANSBUFSIZE;
1001 }
1002 }
1003
1004 sc->next_to_send = 0;
1005
1006 /* Set up initialisation block */
1007
1008 sc->init_block->mode = sc->nic.mode;
1009
1010 for (i = 0; i < ETHER_ADDR_LEN; i++)
1011 sc->init_block->padr[i] = IFP2ENADDR(sc->ifp)[i];
1012
1013 lnc_setladrf(sc);
1014
1015 sc->init_block->rdra = kvtop(sc->recv_ring->md);
1016 sc->init_block->rlen = ((kvtop(sc->recv_ring->md) >> 16) & 0xff) | (sc->nrdre << 13);
1017 sc->init_block->tdra = kvtop(sc->trans_ring->md);
1018 sc->init_block->tlen = ((kvtop(sc->trans_ring->md) >> 16) & 0xff) | (sc->ntdre << 13);
1019
1020
1021 /* Set flags to show that the memory area is valid */
1022 sc->flags |= LNC_INITIALISED;
1023
1024 sc->pending_transmits = 0;
1025
1026 /* Give the LANCE the physical address of the initialisation block */
1027
1028 if (sc->nic.ic == PCnet_Home) {
1029 u_short media;
1030 /* Set PHY_SEL to HomeRun */
1031 media = read_bcr(sc, BCR49);
1032 media &= ~3;
1033 media |= 1;
1034 write_bcr(sc, BCR49, media);
1035 }
1036
1037 write_csr(sc, CSR1, kvtop(sc->init_block));
1038 write_csr(sc, CSR2, (kvtop(sc->init_block) >> 16) & 0xff);
1039
1040 /*
1041 * Depending on which controller this is, CSR3 has different meanings.
1042 * For the Am7990 it controls DMA operations, for the Am79C960 it
1043 * controls interrupt masks and transmitter algorithms. In either
1044 * case, none of the flags are set.
1045 *
1046 */
1047
1048 write_csr(sc, CSR3, 0);
1049
1050 /* Let's see if it starts */
1051 /*
1052 printf("Enabling lnc interrupts\n");
1053 sc->ifp->if_timer = 10;
1054 write_csr(sc, CSR0, INIT|INEA);
1055 */
1056
1057 /*
1058 * Now that the initialisation is complete there's no reason to
1059 * access anything except CSR0, so we leave RAP pointing there
1060 * so we can just access RDP from now on, saving an outw each
1061 * time.
1062 */
1063
1064 write_csr(sc, CSR0, INIT);
1065 for(i=0; i < 1000; i++)
1066 if (read_csr(sc, CSR0) & IDON)
1067 break;
1068
1069 if (read_csr(sc, CSR0) & IDON) {
1070 /*
1071 * Enable interrupts, start the LANCE, mark the interface as
1072 * running and transmit any pending packets.
1073 */
1074 write_csr(sc, CSR0, STRT | INEA);
1075 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1076 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1077 lnc_start(sc->ifp);
1078 } else
1079 log(LOG_ERR, "%s: Initialisation failed\n",
1080 sc->ifp->if_xname);
1081
1082 splx(s);
1083 }
1084
1085 /*
1086 * The interrupt flag (INTR) will be set and provided that the interrupt enable
1087 * flag (INEA) is also set, the interrupt pin will be driven low when any of
1088 * the following occur:
1089 *
1090 * 1) Completion of the initialisation routine (IDON). 2) The reception of a
1091 * packet (RINT). 3) The transmission of a packet (TINT). 4) A transmitter
1092 * timeout error (BABL). 5) A missed packet (MISS). 6) A memory error (MERR).
1093 *
1094 * The interrupt flag is cleared when all of the above conditions are cleared.
1095 *
1096 * If the driver is reset from this routine then it first checks to see if any
1097 * interrupts have ocurred since the reset and handles them before returning.
1098 * This is because the NIC may signify a pending interrupt in CSR0 using the
1099 * INTR flag even if a hardware interrupt is currently inhibited (at least I
1100 * think it does from reading the data sheets). We may as well deal with
1101 * these pending interrupts now rather than get the overhead of another
1102 * hardware interrupt immediately upon returning from the interrupt handler.
1103 *
1104 */
1105
1106 void
1107 lncintr(void *arg)
1108 {
1109 lnc_softc_t *sc = arg;
1110 u_short csr0;
1111
1112 /*
1113 * INEA is the only bit that can be cleared by writing a 0 to it so
1114 * we have to include it in any writes that clear other flags.
1115 */
1116
1117 while ((csr0 = lnc_inw(sc->rdp)) & INTR) {
1118
1119 /*
1120 * Clear interrupt flags early to avoid race conditions. The
1121 * controller can still set these flags even while we're in
1122 * this interrupt routine. If the flag is still set from the
1123 * event that caused this interrupt any new events will
1124 * be missed.
1125 */
1126
1127 lnc_outw(sc->rdp, csr0);
1128 /*lnc_outw(sc->rdp, IDON | CERR | BABL | MISS | MERR | RINT | TINT | INEA);*/
1129
1130 #ifdef notyet
1131 if (csr0 & IDON) {
1132 printf("IDON\n");
1133 sc->ifp->if_timer = 0;
1134 write_csr(sc, CSR0, STRT | INEA);
1135 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1136 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1137 lnc_start(sc->ifp);
1138 continue;
1139 }
1140 #endif
1141
1142 if (csr0 & ERR) {
1143 if (csr0 & CERR) {
1144 log(LOG_ERR, "%s: Heartbeat error -- SQE test failed\n", sc->ifp->if_xname);
1145 LNCSTATS(cerr)
1146 }
1147 if (csr0 & BABL) {
1148 log(LOG_ERR, "%s: Babble error - more than 1519 bytes transmitted\n", sc->ifp->if_xname);
1149 LNCSTATS(babl)
1150 sc->ifp->if_oerrors++;
1151 }
1152 if (csr0 & MISS) {
1153 log(LOG_ERR, "%s: Missed packet -- no receive buffer\n", sc->ifp->if_xname);
1154 LNCSTATS(miss)
1155 sc->ifp->if_ierrors++;
1156 }
1157 if (csr0 & MERR) {
1158 log(LOG_ERR, "%s: Memory error -- Resetting\n", sc->ifp->if_xname);
1159 LNCSTATS(merr)
1160 lnc_reset(sc);
1161 continue;
1162 }
1163 }
1164 if (csr0 & RINT) {
1165 LNCSTATS(rint)
1166 lnc_rint(sc);
1167 }
1168 if (csr0 & TINT) {
1169 LNCSTATS(tint)
1170 sc->ifp->if_timer = 0;
1171 lnc_tint(sc);
1172 }
1173
1174 /*
1175 * If there's room in the transmit descriptor ring then queue
1176 * some more transmit packets.
1177 */
1178
1179 if (!(sc->ifp->if_drv_flags & IFF_DRV_OACTIVE))
1180 lnc_start(sc->ifp);
1181 }
1182 }
1183
1184 static __inline int
1185 mbuf_to_buffer(struct mbuf *m, char *buffer)
1186 {
1187
1188 int len=0;
1189
1190 for( ; m; m = m->m_next) {
1191 bcopy(mtod(m, caddr_t), buffer, m->m_len);
1192 buffer += m->m_len;
1193 len += m->m_len;
1194 }
1195
1196 return(len);
1197 }
1198
1199 static __inline struct mbuf *
1200 chain_to_cluster(struct mbuf *m)
1201 {
1202 struct mbuf *new;
1203
1204 MGET(new, M_DONTWAIT, MT_DATA);
1205 if (new) {
1206 MCLGET(new, M_DONTWAIT);
1207 if (new->m_ext.ext_buf) {
1208 new->m_len = mbuf_to_buffer(m, new->m_data);
1209 m_freem(m);
1210 return(new);
1211 } else
1212 m_free(new);
1213 }
1214 return(0);
1215 }
1216
1217 /*
1218 * IFF_DRV_OACTIVE and IFF_DRV_RUNNING are checked in ether_output so it's
1219 * redundant to check them again since we wouldn't have got here if they were
1220 * not appropriately set. This is also called from lnc_init and lncintr but the
1221 * flags should be ok at those points too.
1222 */
1223
1224 static void
1225 lnc_start(struct ifnet *ifp)
1226 {
1227
1228 struct lnc_softc *sc = ifp->if_softc;
1229 struct host_ring_entry *desc;
1230 int tmp;
1231 int end_of_packet;
1232 struct mbuf *head, *m;
1233 int len, chunk;
1234 int addr;
1235 int no_entries_needed;
1236
1237 do {
1238
1239 IFQ_DRV_DEQUEUE(&sc->ifp->if_snd, head);
1240 if (!head)
1241 return;
1242
1243 if (sc->nic.mem_mode == DMA_MBUF) {
1244
1245 no_entries_needed = 0;
1246 for (m=head; m; m = m->m_next)
1247 no_entries_needed++;
1248
1249 /*
1250 * We try and avoid bcopy as much as possible
1251 * but there are two cases when we use it.
1252 *
1253 * 1) If there are not enough free entries in the ring
1254 * to hold each mbuf in the chain then compact the
1255 * chain into a single cluster.
1256 *
1257 * 2) The Am7990 and Am79C90 must not have less than
1258 * 100 bytes in the first descriptor of a chained
1259 * packet so it's necessary to shuffle the mbuf
1260 * contents to ensure this.
1261 */
1262
1263
1264 if (no_entries_needed > (NDESC(sc->ntdre) - sc->pending_transmits)) {
1265 if (!(head = chain_to_cluster(head))) {
1266 log(LOG_ERR, "%s: Couldn't get mbuf for transmit packet -- Resetting \n ",ifp->if_xname);
1267 lnc_reset(sc);
1268 return;
1269 }
1270 } else if ((sc->nic.ic == LANCE) || (sc->nic.ic == C_LANCE)) {
1271 if ((head->m_len < 100) && (head->m_next)) {
1272 len = 100 - head->m_len;
1273 if (M_TRAILINGSPACE(head) < len) {
1274 /*
1275 * Move data to start of data
1276 * area. We assume the first
1277 * mbuf has a packet header
1278 * and is not a cluster.
1279 */
1280 bcopy((caddr_t)head->m_data, (caddr_t)head->m_pktdat, head->m_len);
1281 head->m_data = head->m_pktdat;
1282 }
1283 m = head->m_next;
1284 while (m && (len > 0)) {
1285 chunk = min(len, m->m_len);
1286 bcopy(mtod(m, caddr_t), mtod(head, caddr_t) + head->m_len, chunk);
1287 len -= chunk;
1288 head->m_len += chunk;
1289 m->m_len -= chunk;
1290 m->m_data += chunk;
1291 if (m->m_len <= 0) {
1292 m = m_free(m);
1293 head->m_next = m;
1294 }
1295 }
1296 }
1297 }
1298
1299 tmp = sc->next_to_send;
1300
1301 /*
1302 * On entering this loop we know that tmp points to a
1303 * descriptor with a clear OWN bit.
1304 */
1305
1306 desc = sc->trans_ring + tmp;
1307 len = ETHER_MIN_LEN;
1308 for (m = head; m; m = m->m_next) {
1309 desc->buff.mbuf = m;
1310 addr = kvtop(m->m_data);
1311 desc->md->md0 = addr;
1312 desc->md->md1 = ((addr >> 16) & 0xff);
1313 desc->md->md3 = 0;
1314 desc->md->md2 = -m->m_len;
1315 sc->pending_transmits++;
1316 len -= m->m_len;
1317
1318 INC_MD_PTR(tmp, sc->ntdre)
1319 desc = sc->trans_ring + tmp;
1320 }
1321
1322 end_of_packet = tmp;
1323 DEC_MD_PTR(tmp, sc->ntdre)
1324 desc = sc->trans_ring + tmp;
1325 desc->md->md1 |= ENP;
1326
1327 if (len > 0)
1328 desc->md->md2 -= len;
1329
1330 /*
1331 * Set OWN bits in reverse order, otherwise the Lance
1332 * could start sending the packet before all the
1333 * buffers have been relinquished by the host.
1334 */
1335
1336 while (tmp != sc->next_to_send) {
1337 desc->md->md1 |= OWN;
1338 DEC_MD_PTR(tmp, sc->ntdre)
1339 desc = sc->trans_ring + tmp;
1340 }
1341 sc->next_to_send = end_of_packet;
1342 desc->md->md1 |= STP | OWN;
1343 } else {
1344 sc->pending_transmits++;
1345 desc = sc->trans_ring + sc->next_to_send;
1346 len = mbuf_to_buffer(head, desc->buff.data);
1347 desc->md->md3 = 0;
1348 desc->md->md2 = -max(len, ETHER_MIN_LEN - ETHER_CRC_LEN);
1349 desc->md->md1 |= OWN | STP | ENP;
1350 INC_MD_PTR(sc->next_to_send, sc->ntdre)
1351 }
1352
1353 /* Force an immediate poll of the transmit ring */
1354 lnc_outw(sc->rdp, TDMD | INEA);
1355
1356 /*
1357 * Set a timer so if the buggy Am7990.h shuts
1358 * down we can wake it up.
1359 */
1360
1361 ifp->if_timer = 2;
1362
1363 BPF_MTAP(sc->ifp, head);
1364
1365 if (sc->nic.mem_mode != DMA_MBUF)
1366 m_freem(head);
1367
1368 } while (sc->pending_transmits < NDESC(sc->ntdre));
1369
1370 /*
1371 * Transmit ring is full so set IFF_DRV_OACTIVE
1372 * since we can't buffer any more packets.
1373 */
1374
1375 sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1376 LNCSTATS(trans_ring_full)
1377 }
1378
1379 static int
1380 lnc_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1381 {
1382
1383 struct lnc_softc *sc = ifp->if_softc;
1384 int s, error = 0;
1385
1386 s = splimp();
1387
1388 switch (command) {
1389 case SIOCSIFFLAGS:
1390 #ifdef DEBUG
1391 if (ifp->if_flags & IFF_DEBUG)
1392 sc->lnc_debug = 1;
1393 else
1394 sc->lnc_debug = 0;
1395 #endif
1396 if (ifp->if_flags & IFF_PROMISC) {
1397 if (!(sc->nic.mode & PROM)) {
1398 sc->nic.mode |= PROM;
1399 lnc_init(sc);
1400 }
1401 } else if (sc->nic.mode & PROM) {
1402 sc->nic.mode &= ~PROM;
1403 lnc_init(sc);
1404 }
1405
1406 if ((ifp->if_flags & IFF_ALLMULTI) &&
1407 !(sc->flags & LNC_ALLMULTI)) {
1408 sc->flags |= LNC_ALLMULTI;
1409 lnc_init(sc);
1410 } else if (!(ifp->if_flags & IFF_ALLMULTI) &&
1411 (sc->flags & LNC_ALLMULTI)) {
1412 sc->flags &= ~LNC_ALLMULTI;
1413 lnc_init(sc);
1414 }
1415
1416 if ((ifp->if_flags & IFF_UP) == 0 &&
1417 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1418 /*
1419 * If interface is marked down and it is running,
1420 * then stop it.
1421 */
1422 lnc_stop(sc);
1423 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1424 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1425 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1426 /*
1427 * If interface is marked up and it is stopped, then
1428 * start it.
1429 */
1430 lnc_init(sc);
1431 }
1432 break;
1433 case SIOCADDMULTI:
1434 case SIOCDELMULTI:
1435 lnc_init(sc);
1436 error = 0;
1437 break;
1438 default:
1439 error = ether_ioctl(ifp, command, data);
1440 break;
1441 }
1442 (void) splx(s);
1443 return error;
1444 }
1445
1446 static void
1447 lnc_watchdog(struct ifnet *ifp)
1448 {
1449 log(LOG_ERR, "%s: Device timeout -- Resetting\n", ifp->if_xname);
1450 ifp->if_oerrors++;
1451 lnc_reset(ifp->if_softc);
1452 }
1453
1454 #ifdef DEBUG
1455 void
1456 lnc_dump_state(struct lnc_softc *sc)
1457 {
1458 int i;
1459
1460 printf("\nDriver/NIC [%s] state dump\n", sc->ifp->if_xname);
1461 printf("Memory access mode: %b\n", sc->nic.mem_mode, MEM_MODES);
1462 printf("Host memory\n");
1463 printf("-----------\n");
1464
1465 printf("Receive ring: base = %p, next = %p\n",
1466 (void *)sc->recv_ring, (void *)(sc->recv_ring + sc->recv_next));
1467 for (i = 0; i < NDESC(sc->nrdre); i++)
1468 printf("\t%d:%p md = %p buff = %p\n",
1469 i, (void *)(sc->recv_ring + i),
1470 (void *)(sc->recv_ring + i)->md,
1471 (void *)(sc->recv_ring + i)->buff.data);
1472
1473 printf("Transmit ring: base = %p, next = %p\n",
1474 (void *)sc->trans_ring, (void *)(sc->trans_ring + sc->trans_next));
1475 for (i = 0; i < NDESC(sc->ntdre); i++)
1476 printf("\t%d:%p md = %p buff = %p\n",
1477 i, (void *)(sc->trans_ring + i),
1478 (void *)(sc->trans_ring + i)->md,
1479 (void *)(sc->trans_ring + i)->buff.data);
1480 printf("Lance memory (may be on host(DMA) or card(SHMEM))\n");
1481 printf("Init block = %p\n", (void *)sc->init_block);
1482 printf("\tmode = %b rlen:rdra = %x:%x tlen:tdra = %x:%x\n",
1483 sc->init_block->mode, INIT_MODE, sc->init_block->rlen,
1484 sc->init_block->rdra, sc->init_block->tlen, sc->init_block->tdra);
1485 printf("Receive descriptor ring\n");
1486 for (i = 0; i < NDESC(sc->nrdre); i++)
1487 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tMCNT = %u,\tflags = %b\n",
1488 i, ((sc->recv_ring + i)->md->md1 & HADR),
1489 (sc->recv_ring + i)->md->md0,
1490 -(short) (sc->recv_ring + i)->md->md2,
1491 (sc->recv_ring + i)->md->md3,
1492 (((sc->recv_ring + i)->md->md1 & ~HADR) >> 8), RECV_MD1);
1493 printf("Transmit descriptor ring\n");
1494 for (i = 0; i < NDESC(sc->ntdre); i++)
1495 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tflags = %b %b\n",
1496 i, ((sc->trans_ring + i)->md->md1 & HADR),
1497 (sc->trans_ring + i)->md->md0,
1498 -(short) (sc->trans_ring + i)->md->md2,
1499 ((sc->trans_ring + i)->md->md1 >> 8), TRANS_MD1,
1500 ((sc->trans_ring + i)->md->md3 >> 10), TRANS_MD3);
1501 printf("\nnext_to_send = %x\n", sc->next_to_send);
1502 printf("\n CSR0 = %b CSR1 = %x CSR2 = %x CSR3 = %x\n\n",
1503 read_csr(sc, CSR0), CSR0_FLAGS, read_csr(sc, CSR1),
1504 read_csr(sc, CSR2), read_csr(sc, CSR3));
1505
1506 /* Set RAP back to CSR0 */
1507 lnc_outw(sc->rap, CSR0);
1508 }
1509
1510 void
1511 mbuf_dump_chain(struct mbuf * m)
1512 {
1513
1514 #define MBUF_FLAGS \
1515 "\2\1M_EXT\2M_PKTHDR\3M_EOR\4UNKNOWN\5M_BCAST\6M_MCAST"
1516
1517 if (!m)
1518 log(LOG_DEBUG, "m == NULL\n");
1519 do {
1520 log(LOG_DEBUG, "m = %p\n", (void *)m);
1521 log(LOG_DEBUG, "m_hdr.mh_next = %p\n",
1522 (void *)m->m_hdr.mh_next);
1523 log(LOG_DEBUG, "m_hdr.mh_nextpkt = %p\n",
1524 (void *)m->m_hdr.mh_nextpkt);
1525 log(LOG_DEBUG, "m_hdr.mh_len = %d\n", m->m_hdr.mh_len);
1526 log(LOG_DEBUG, "m_hdr.mh_data = %p\n",
1527 (void *)m->m_hdr.mh_data);
1528 log(LOG_DEBUG, "m_hdr.mh_type = %d\n", m->m_hdr.mh_type);
1529 log(LOG_DEBUG, "m_hdr.mh_flags = %b\n", m->m_hdr.mh_flags,
1530 MBUF_FLAGS);
1531 if (!(m->m_hdr.mh_flags & (M_PKTHDR | M_EXT)))
1532 log(LOG_DEBUG, "M_dat.M_databuf = %p\n",
1533 (void *)m->M_dat.M_databuf);
1534 else {
1535 if (m->m_hdr.mh_flags & M_PKTHDR) {
1536 log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.len = %d\n",
1537 m->M_dat.MH.MH_pkthdr.len);
1538 log(LOG_DEBUG,
1539 "M_dat.MH.MH_pkthdr.rcvif = %p\n",
1540 (void *)m->M_dat.MH.MH_pkthdr.rcvif);
1541 if (!(m->m_hdr.mh_flags & M_EXT))
1542 log(LOG_DEBUG,
1543 "M_dat.MH.MH_dat.MH_databuf = %p\n",
1544 (void *)m->M_dat.MH.MH_dat.MH_databuf);
1545 }
1546 if (m->m_hdr.mh_flags & M_EXT) {
1547 log(LOG_DEBUG,
1548 "M_dat.MH.MH_dat.MH_ext.ext_buff %p\n",
1549 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_buf);
1550 log(LOG_DEBUG,
1551 "M_dat.MH.MH_dat.MH_ext.ext_free %p\n",
1552 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_free);
1553 log(LOG_DEBUG,
1554 "M_dat.MH.MH_dat.MH_ext.ext_size %d\n",
1555 m->M_dat.MH.MH_dat.MH_ext.ext_size);
1556 }
1557 }
1558 } while ((m = m->m_next) != NULL);
1559 }
1560 #endif
Cache object: 2e3d82d169b3c1b7fea5f95afcc21417
|