FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/hme.c
1 /* $NetBSD: hme.c,v 1.109 2022/05/29 10:43:46 rin Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * HME Ethernet module driver.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: hme.c,v 1.109 2022/05/29 10:43:46 rin Exp $");
38
39 /* #define HMEDEBUG */
40
41 #include "opt_inet.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/mbuf.h>
47 #include <sys/syslog.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/ioctl.h>
52 #include <sys/errno.h>
53 #include <sys/rndsource.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_ether.h>
58 #include <net/if_media.h>
59 #include <net/bpf.h>
60
61 #ifdef INET
62 #include <net/if_vlanvar.h>
63 #include <netinet/in.h>
64 #include <netinet/if_inarp.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/tcp.h>
69 #include <netinet/udp.h>
70 #endif
71
72 #include <dev/mii/mii.h>
73 #include <dev/mii/miivar.h>
74
75 #include <sys/bus.h>
76
77 #include <dev/ic/hmereg.h>
78 #include <dev/ic/hmevar.h>
79
80 static void hme_start(struct ifnet *);
81 static void hme_stop(struct ifnet *, int);
82 static int hme_ioctl(struct ifnet *, u_long, void *);
83 static void hme_tick(void *);
84 static void hme_watchdog(struct ifnet *);
85 static bool hme_shutdown(device_t, int);
86 static int hme_init(struct ifnet *);
87 static void hme_meminit(struct hme_softc *);
88 static void hme_mifinit(struct hme_softc *);
89 static void hme_reset(struct hme_softc *);
90 static void hme_chipreset(struct hme_softc *);
91 static void hme_setladrf(struct hme_softc *);
92
93 /* MII methods & callbacks */
94 static int hme_mii_readreg(device_t, int, int, uint16_t *);
95 static int hme_mii_writereg(device_t, int, int, uint16_t);
96 static void hme_mii_statchg(struct ifnet *);
97
98 static int hme_mediachange(struct ifnet *);
99
100 static struct mbuf *hme_get(struct hme_softc *, int, uint32_t);
101 static int hme_put(struct hme_softc *, int, struct mbuf *);
102 static void hme_read(struct hme_softc *, int, uint32_t);
103 static int hme_eint(struct hme_softc *, u_int);
104 static int hme_rint(struct hme_softc *);
105 static int hme_tint(struct hme_softc *);
106
107 #if 0
108 /* Default buffer copy routines */
109 static void hme_copytobuf_contig(struct hme_softc *, void *, int, int);
110 static void hme_copyfrombuf_contig(struct hme_softc *, void *, int, int);
111 #endif
112
113 void
114 hme_config(struct hme_softc *sc)
115 {
116 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
117 struct mii_data *mii = &sc->sc_mii;
118 struct mii_softc *child;
119 bus_dma_tag_t dmatag = sc->sc_dmatag;
120 bus_dma_segment_t seg;
121 bus_size_t size;
122 int rseg, error;
123
124 /*
125 * HME common initialization.
126 *
127 * hme_softc fields that must be initialized by the front-end:
128 *
129 * the bus tag:
130 * sc_bustag
131 *
132 * the DMA bus tag:
133 * sc_dmatag
134 *
135 * the bus handles:
136 * sc_seb (Shared Ethernet Block registers)
137 * sc_erx (Receiver Unit registers)
138 * sc_etx (Transmitter Unit registers)
139 * sc_mac (MAC registers)
140 * sc_mif (Management Interface registers)
141 *
142 * the maximum bus burst size:
143 * sc_burst
144 *
145 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
146 * rb_membase, rb_dmabase)
147 *
148 * the local Ethernet address:
149 * sc_enaddr
150 *
151 */
152
153 /* Make sure the chip is stopped. */
154 hme_chipreset(sc);
155
156 /*
157 * Allocate descriptors and buffers
158 * XXX - do all this differently.. and more configurably,
159 * eg. use things as `dma_load_mbuf()' on transmit,
160 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
161 * all the time) on the receiver side.
162 *
163 * Note: receive buffers must be 64-byte aligned.
164 * Also, apparently, the buffers must extend to a DMA burst
165 * boundary beyond the maximum packet size.
166 */
167 #define _HME_NDESC 128
168 #define _HME_BUFSZ 1600
169
170 /* Note: the # of descriptors must be a multiple of 16 */
171 sc->sc_rb.rb_ntbuf = _HME_NDESC;
172 sc->sc_rb.rb_nrbuf = _HME_NDESC;
173
174 /*
175 * Allocate DMA capable memory
176 * Buffer descriptors must be aligned on a 2048 byte boundary;
177 * take this into account when calculating the size. Note that
178 * the maximum number of descriptors (256) occupies 2048 bytes,
179 * so we allocate that much regardless of _HME_NDESC.
180 */
181 size = 2048 + /* TX descriptors */
182 2048 + /* RX descriptors */
183 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */
184 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* RX buffers */
185
186 /* Allocate DMA buffer */
187 if ((error = bus_dmamem_alloc(dmatag, size,
188 2048, 0,
189 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
190 aprint_error_dev(sc->sc_dev, "DMA buffer alloc error %d\n",
191 error);
192 return;
193 }
194
195 /* Map DMA memory in CPU addressable space */
196 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
197 &sc->sc_rb.rb_membase,
198 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
199 aprint_error_dev(sc->sc_dev, "DMA buffer map error %d\n",
200 error);
201 goto bad_free;
202 }
203
204 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
205 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
206 aprint_error_dev(sc->sc_dev, "DMA map create error %d\n",
207 error);
208 goto bad_unmap;
209 }
210
211 /* Load the buffer */
212 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
213 sc->sc_rb.rb_membase, size, NULL,
214 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
215 aprint_error_dev(sc->sc_dev, "DMA buffer map load error %d\n",
216 error);
217 goto bad_destroy;
218 }
219 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
220
221 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
222 ether_sprintf(sc->sc_enaddr));
223
224 /* Initialize ifnet structure. */
225 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
226 ifp->if_softc = sc;
227 ifp->if_start = hme_start;
228 ifp->if_stop = hme_stop;
229 ifp->if_ioctl = hme_ioctl;
230 ifp->if_init = hme_init;
231 ifp->if_watchdog = hme_watchdog;
232 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
233 sc->sc_if_flags = ifp->if_flags;
234 ifp->if_capabilities |=
235 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
236 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
237 IFQ_SET_READY(&ifp->if_snd);
238
239 /* Initialize ifmedia structures and MII info */
240 mii->mii_ifp = ifp;
241 mii->mii_readreg = hme_mii_readreg;
242 mii->mii_writereg = hme_mii_writereg;
243 mii->mii_statchg = hme_mii_statchg;
244
245 sc->sc_ethercom.ec_mii = mii;
246 ifmedia_init(&mii->mii_media, 0, hme_mediachange, ether_mediastatus);
247
248 hme_mifinit(sc);
249
250 mii_attach(sc->sc_dev, mii, 0xffffffff,
251 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
252
253 child = LIST_FIRST(&mii->mii_phys);
254 if (child == NULL) {
255 /* No PHY attached */
256 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
257 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
258 } else {
259 /*
260 * Walk along the list of attached MII devices and
261 * establish an `MII instance' to `phy number'
262 * mapping. We'll use this mapping in media change
263 * requests to determine which phy to use to program
264 * the MIF configuration register.
265 */
266 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
267 /*
268 * Note: we support just two PHYs: the built-in
269 * internal device and an external on the MII
270 * connector.
271 */
272 if (child->mii_phy > 1 || child->mii_inst > 1) {
273 aprint_error_dev(sc->sc_dev,
274 "cannot accommodate MII device %s"
275 " at phy %d, instance %d\n",
276 device_xname(child->mii_dev),
277 child->mii_phy, child->mii_inst);
278 continue;
279 }
280
281 sc->sc_phys[child->mii_inst] = child->mii_phy;
282 }
283
284 /*
285 * Set the default media to auto negotiation if the phy has
286 * the auto negotiation capability.
287 * XXX; What to do otherwise?
288 */
289 if (ifmedia_match(&mii->mii_media, IFM_ETHER | IFM_AUTO, 0))
290 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
291 /*
292 else
293 ifmedia_set(&sc->sc_mii.mii_media, sc->sc_defaultmedia);
294 */
295 }
296
297 /* claim 802.1q capability */
298 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
299
300 /* Attach the interface. */
301 if_attach(ifp);
302 if_deferred_start_init(ifp, NULL);
303 ether_ifattach(ifp, sc->sc_enaddr);
304
305 if (pmf_device_register1(sc->sc_dev, NULL, NULL, hme_shutdown))
306 pmf_class_network_register(sc->sc_dev, ifp);
307 else
308 aprint_error_dev(sc->sc_dev,
309 "couldn't establish power handler\n");
310
311 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
312 RND_TYPE_NET, RND_FLAG_DEFAULT);
313
314 callout_init(&sc->sc_tick_ch, 0);
315 callout_setfunc(&sc->sc_tick_ch, hme_tick, sc);
316
317 return;
318
319 bad_destroy:
320 bus_dmamap_destroy(dmatag, sc->sc_dmamap);
321 bad_unmap:
322 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
323 bad_free:
324 bus_dmamem_free(dmatag, &seg, rseg);
325 }
326
327 void
328 hme_tick(void *arg)
329 {
330 struct hme_softc *sc = arg;
331 int s;
332
333 s = splnet();
334 mii_tick(&sc->sc_mii);
335 splx(s);
336
337 callout_schedule(&sc->sc_tick_ch, hz);
338 }
339
340 void
341 hme_reset(struct hme_softc *sc)
342 {
343 int s;
344
345 s = splnet();
346 (void)hme_init(&sc->sc_ethercom.ec_if);
347 splx(s);
348 }
349
350 void
351 hme_chipreset(struct hme_softc *sc)
352 {
353 bus_space_tag_t t = sc->sc_bustag;
354 bus_space_handle_t seb = sc->sc_seb;
355 int n;
356
357 /* Mask all interrupts */
358 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
359
360 /* Reset transmitter and receiver */
361 bus_space_write_4(t, seb, HME_SEBI_RESET,
362 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
363
364 for (n = 0; n < 20; n++) {
365 uint32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
366 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
367 return;
368 DELAY(20);
369 }
370
371 printf("%s: %s: reset failed\n", device_xname(sc->sc_dev), __func__);
372 }
373
374 void
375 hme_stop(struct ifnet *ifp, int disable)
376 {
377 struct hme_softc *sc;
378
379 sc = ifp->if_softc;
380
381 ifp->if_timer = 0;
382 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
383
384 callout_stop(&sc->sc_tick_ch);
385 mii_down(&sc->sc_mii);
386
387 hme_chipreset(sc);
388 }
389
390 void
391 hme_meminit(struct hme_softc *sc)
392 {
393 bus_addr_t txbufdma, rxbufdma;
394 bus_addr_t dma;
395 char *p;
396 unsigned int ntbuf, nrbuf, i;
397 struct hme_ring *hr = &sc->sc_rb;
398
399 p = hr->rb_membase;
400 dma = hr->rb_dmabase;
401
402 ntbuf = hr->rb_ntbuf;
403 nrbuf = hr->rb_nrbuf;
404
405 /*
406 * Allocate transmit descriptors
407 */
408 hr->rb_txd = p;
409 hr->rb_txddma = dma;
410 p += ntbuf * HME_XD_SIZE;
411 dma += ntbuf * HME_XD_SIZE;
412 /* We have reserved descriptor space until the next 2048 byte boundary.*/
413 dma = (bus_addr_t)roundup((u_long)dma, 2048);
414 p = (void *)roundup((u_long)p, 2048);
415
416 /*
417 * Allocate receive descriptors
418 */
419 hr->rb_rxd = p;
420 hr->rb_rxddma = dma;
421 p += nrbuf * HME_XD_SIZE;
422 dma += nrbuf * HME_XD_SIZE;
423 /* Again move forward to the next 2048 byte boundary.*/
424 dma = (bus_addr_t)roundup((u_long)dma, 2048);
425 p = (void *)roundup((u_long)p, 2048);
426
427
428 /*
429 * Allocate transmit buffers
430 */
431 hr->rb_txbuf = p;
432 txbufdma = dma;
433 p += ntbuf * _HME_BUFSZ;
434 dma += ntbuf * _HME_BUFSZ;
435
436 /*
437 * Allocate receive buffers
438 */
439 hr->rb_rxbuf = p;
440 rxbufdma = dma;
441 p += nrbuf * _HME_BUFSZ;
442 dma += nrbuf * _HME_BUFSZ;
443
444 /*
445 * Initialize transmit buffer descriptors
446 */
447 for (i = 0; i < ntbuf; i++) {
448 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
449 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
450 }
451
452 /*
453 * Initialize receive buffer descriptors
454 */
455 for (i = 0; i < nrbuf; i++) {
456 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ);
457 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
458 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
459 }
460
461 hr->rb_tdhead = hr->rb_tdtail = 0;
462 hr->rb_td_nbusy = 0;
463 hr->rb_rdtail = 0;
464 }
465
466 /*
467 * Initialization of interface; set up initialization block
468 * and transmit/receive descriptor rings.
469 */
470 int
471 hme_init(struct ifnet *ifp)
472 {
473 struct hme_softc *sc = ifp->if_softc;
474 bus_space_tag_t t = sc->sc_bustag;
475 bus_space_handle_t seb = sc->sc_seb;
476 bus_space_handle_t etx = sc->sc_etx;
477 bus_space_handle_t erx = sc->sc_erx;
478 bus_space_handle_t mac = sc->sc_mac;
479 uint8_t *ea;
480 uint32_t v;
481 int rc;
482
483 /*
484 * Initialization sequence. The numbered steps below correspond
485 * to the sequence outlined in section 6.3.5.1 in the Ethernet
486 * Channel Engine manual (part of the PCIO manual).
487 * See also the STP2002-STQ document from Sun Microsystems.
488 */
489
490 /* step 1 & 2. Reset the Ethernet Channel */
491 hme_stop(ifp, 0);
492
493 /* Re-initialize the MIF */
494 hme_mifinit(sc);
495
496 /* Call MI reset function if any */
497 if (sc->sc_hwreset)
498 (*sc->sc_hwreset)(sc);
499
500 #if 0
501 /* Mask all MIF interrupts, just in case */
502 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
503 #endif
504
505 /* step 3. Setup data structures in host memory */
506 hme_meminit(sc);
507
508 /* step 4. TX MAC registers & counters */
509 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
510 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
511 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
512 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
513 bus_space_write_4(t, mac, HME_MACI_TXSIZE,
514 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
515 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN);
516 sc->sc_ec_capenable = sc->sc_ethercom.ec_capenable;
517
518 /* Load station MAC address */
519 ea = sc->sc_enaddr;
520 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
521 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
522 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
523
524 /*
525 * Init seed for backoff
526 * (source suggested by manual: low 10 bits of MAC address)
527 */
528 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
529 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
530
531
532 /* Note: Accepting power-on default for other MAC registers here.. */
533
534
535 /* step 5. RX MAC registers & counters */
536 hme_setladrf(sc);
537
538 /* step 6 & 7. Program Descriptor Ring Base Addresses */
539 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
540 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
541
542 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
543 bus_space_write_4(t, mac, HME_MACI_RXSIZE,
544 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
545 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN);
546
547 /* step 8. Global Configuration & Interrupt Mask */
548 bus_space_write_4(t, seb, HME_SEBI_IMASK,
549 ~(
550 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
551 HME_SEB_STAT_HOSTTOTX |
552 HME_SEB_STAT_RXTOHOST |
553 HME_SEB_STAT_TXALL |
554 HME_SEB_STAT_TXPERR |
555 HME_SEB_STAT_RCNTEXP |
556 HME_SEB_STAT_MIFIRQ |
557 HME_SEB_STAT_ALL_ERRORS ));
558
559 switch (sc->sc_burst) {
560 default:
561 v = 0;
562 break;
563 case 16:
564 v = HME_SEB_CFG_BURST16;
565 break;
566 case 32:
567 v = HME_SEB_CFG_BURST32;
568 break;
569 case 64:
570 v = HME_SEB_CFG_BURST64;
571 break;
572 }
573 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
574
575 /* step 9. ETX Configuration: use mostly default values */
576
577 /* Enable DMA */
578 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
579 v |= HME_ETX_CFG_DMAENABLE;
580 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
581
582 /* Transmit Descriptor ring size: in increments of 16 */
583 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1);
584
585
586 /* step 10. ERX Configuration */
587 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
588
589 /* Encode Receive Descriptor ring size: four possible values */
590 switch (_HME_NDESC /*XXX*/) {
591 case 32:
592 v |= HME_ERX_CFG_RINGSIZE32;
593 break;
594 case 64:
595 v |= HME_ERX_CFG_RINGSIZE64;
596 break;
597 case 128:
598 v |= HME_ERX_CFG_RINGSIZE128;
599 break;
600 case 256:
601 v |= HME_ERX_CFG_RINGSIZE256;
602 break;
603 default:
604 printf("hme: invalid Receive Descriptor ring size\n");
605 break;
606 }
607
608 /* Enable DMA */
609 v |= HME_ERX_CFG_DMAENABLE;
610
611 /* set h/w rx checksum start offset (# of half-words) */
612 #ifdef INET
613 v |= (((ETHER_HDR_LEN + sizeof(struct ip)) / sizeof(uint16_t))
614 << HME_ERX_CFG_CSUMSHIFT) &
615 HME_ERX_CFG_CSUMSTART;
616 #endif
617 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
618
619 /* step 11. XIF Configuration */
620 v = bus_space_read_4(t, mac, HME_MACI_XIF);
621 v |= HME_MAC_XIF_OE;
622 bus_space_write_4(t, mac, HME_MACI_XIF, v);
623
624 /* step 12. RX_MAC Configuration Register */
625 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
626 v |= HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_PSTRIP;
627 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
628
629 /* step 13. TX_MAC Configuration Register */
630 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
631 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
632 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
633
634 /* step 14. Issue Transmit Pending command */
635
636 /* Call MI initialization function if any */
637 if (sc->sc_hwinit)
638 (*sc->sc_hwinit)(sc);
639
640 /* Set the current media. */
641 if ((rc = hme_mediachange(ifp)) != 0)
642 return rc;
643
644 /* Start the one second timer. */
645 callout_schedule(&sc->sc_tick_ch, hz);
646
647 ifp->if_flags |= IFF_RUNNING;
648 ifp->if_flags &= ~IFF_OACTIVE;
649 sc->sc_if_flags = ifp->if_flags;
650 ifp->if_timer = 0;
651 hme_start(ifp);
652 return 0;
653 }
654
655 /*
656 * Routine to copy from mbuf chain to transmit buffer in
657 * network buffer memory.
658 * Returns the amount of data copied.
659 */
660 int
661 hme_put(struct hme_softc *sc, int ri, struct mbuf *m)
662 /* ri: Ring index */
663 {
664 struct mbuf *n;
665 int len, tlen = 0;
666 char *bp;
667
668 bp = (char *)sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
669 for (; m; m = n) {
670 len = m->m_len;
671 if (len == 0) {
672 n = m_free(m);
673 continue;
674 }
675 memcpy(bp, mtod(m, void *), len);
676 bp += len;
677 tlen += len;
678 n = m_free(m);
679 }
680 return (tlen);
681 }
682
683 /*
684 * Pull data off an interface.
685 * Len is length of data, with local net header stripped.
686 * We copy the data into mbufs. When full cluster sized units are present
687 * we copy into clusters.
688 */
689 struct mbuf *
690 hme_get(struct hme_softc *sc, int ri, uint32_t flags)
691 {
692 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
693 struct mbuf *m, *m0, *newm;
694 char *bp;
695 int len, totlen;
696 #ifdef INET
697 int csum_flags;
698 #endif
699
700 totlen = HME_XD_DECODE_RSIZE(flags);
701 MGETHDR(m0, M_DONTWAIT, MT_DATA);
702 if (m0 == 0)
703 return (0);
704 MCLAIM(m0, &sc->sc_ethercom.ec_rx_mowner);
705 m_set_rcvif(m0, ifp);
706 m0->m_pkthdr.len = totlen;
707 len = MHLEN;
708 m = m0;
709
710 bp = (char *)sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
711
712 while (totlen > 0) {
713 if (totlen >= MINCLSIZE) {
714 MCLGET(m, M_DONTWAIT);
715 if ((m->m_flags & M_EXT) == 0)
716 goto bad;
717 len = MCLBYTES;
718 }
719
720 if (m == m0) {
721 char *newdata = (char *)
722 ALIGN(m->m_data + sizeof(struct ether_header)) -
723 sizeof(struct ether_header);
724 len -= newdata - m->m_data;
725 m->m_data = newdata;
726 }
727
728 m->m_len = len = uimin(totlen, len);
729 memcpy(mtod(m, void *), bp, len);
730 bp += len;
731
732 totlen -= len;
733 if (totlen > 0) {
734 MGET(newm, M_DONTWAIT, MT_DATA);
735 if (newm == 0)
736 goto bad;
737 len = MLEN;
738 m = m->m_next = newm;
739 }
740 }
741
742 #ifdef INET
743 /* hardware checksum */
744 csum_flags = 0;
745 if (ifp->if_csum_flags_rx & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
746 struct ether_header *eh;
747 struct ether_vlan_header *evh;
748 struct ip *ip;
749 struct udphdr *uh;
750 uint16_t *opts;
751 int32_t hlen, pktlen;
752 uint32_t csum_data;
753
754 eh = mtod(m0, struct ether_header *);
755 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
756 ip = (struct ip *)((char *)eh + ETHER_HDR_LEN);
757 pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN;
758 } else if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) {
759 evh = (struct ether_vlan_header *)eh;
760 if (ntohs(evh->evl_proto) != ETHERTYPE_IP)
761 goto swcsum;
762 ip = (struct ip *)((char *)eh + ETHER_HDR_LEN +
763 ETHER_VLAN_ENCAP_LEN);
764 pktlen = m0->m_pkthdr.len -
765 ETHER_HDR_LEN - ETHER_VLAN_ENCAP_LEN;
766 } else
767 goto swcsum;
768
769 /* IPv4 only */
770 if (ip->ip_v != IPVERSION)
771 goto swcsum;
772
773 hlen = ip->ip_hl << 2;
774 if (hlen < sizeof(struct ip))
775 goto swcsum;
776
777 /*
778 * bail if too short, has random trailing garbage, truncated,
779 * fragment, or has ethernet pad.
780 */
781 if (ntohs(ip->ip_len) < hlen ||
782 ntohs(ip->ip_len) != pktlen ||
783 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0)
784 goto swcsum;
785
786 switch (ip->ip_p) {
787 case IPPROTO_TCP:
788 if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0)
789 goto swcsum;
790 if (pktlen < (hlen + sizeof(struct tcphdr)))
791 goto swcsum;
792 csum_flags =
793 M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
794 break;
795 case IPPROTO_UDP:
796 if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0)
797 goto swcsum;
798 if (pktlen < (hlen + sizeof(struct udphdr)))
799 goto swcsum;
800 uh = (struct udphdr *)((char *)ip + hlen);
801 /* no checksum */
802 if (uh->uh_sum == 0)
803 goto swcsum;
804 csum_flags =
805 M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
806 break;
807 default:
808 goto swcsum;
809 }
810
811 /* w/ M_CSUM_NO_PSEUDOHDR, the uncomplemented sum is expected */
812 csum_data = ~flags & HME_XD_RXCKSUM;
813
814 /*
815 * If data offset is different from RX cksum start offset,
816 * we have to deduct them.
817 */
818 hlen = ((char *)ip + hlen) -
819 ((char *)eh + ETHER_HDR_LEN + sizeof(struct ip));
820 if (hlen > 1) {
821 uint32_t optsum;
822
823 optsum = 0;
824 opts = (uint16_t *)((char *)eh +
825 ETHER_HDR_LEN + sizeof(struct ip));
826
827 while (hlen > 1) {
828 optsum += ntohs(*opts++);
829 hlen -= 2;
830 }
831 while (optsum >> 16)
832 optsum = (optsum >> 16) + (optsum & 0xffff);
833
834 /* Deduct the ip opts sum from the hwsum. */
835 csum_data += (uint16_t)~optsum;
836
837 while (csum_data >> 16)
838 csum_data =
839 (csum_data >> 16) + (csum_data & 0xffff);
840 }
841 m0->m_pkthdr.csum_data = csum_data;
842 }
843 swcsum:
844 m0->m_pkthdr.csum_flags = csum_flags;
845 #endif
846
847 return (m0);
848
849 bad:
850 m_freem(m0);
851 return (0);
852 }
853
854 /*
855 * Pass a packet to the higher levels.
856 */
857 void
858 hme_read(struct hme_softc *sc, int ix, uint32_t flags)
859 {
860 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
861 struct mbuf *m;
862 int len;
863
864 len = HME_XD_DECODE_RSIZE(flags);
865 if (len <= sizeof(struct ether_header) ||
866 len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
867 ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) :
868 ETHERMTU + sizeof(struct ether_header))) {
869 #ifdef HMEDEBUG
870 printf("%s: invalid packet size %d; dropping\n",
871 device_xname(sc->sc_dev), len);
872 #endif
873 if_statinc(ifp, if_ierrors);
874 return;
875 }
876
877 /* Pull packet off interface. */
878 m = hme_get(sc, ix, flags);
879 if (m == 0) {
880 if_statinc(ifp, if_ierrors);
881 return;
882 }
883
884 /* Pass the packet up. */
885 if_percpuq_enqueue(ifp->if_percpuq, m);
886 }
887
888 void
889 hme_start(struct ifnet *ifp)
890 {
891 struct hme_softc *sc = ifp->if_softc;
892 void *txd = sc->sc_rb.rb_txd;
893 struct mbuf *m;
894 unsigned int txflags;
895 unsigned int ri, len, obusy;
896 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
897
898 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
899 return;
900
901 ri = sc->sc_rb.rb_tdhead;
902 obusy = sc->sc_rb.rb_td_nbusy;
903
904 for (;;) {
905 IFQ_DEQUEUE(&ifp->if_snd, m);
906 if (m == 0)
907 break;
908
909 /*
910 * If BPF is listening on this interface, let it see the
911 * packet before we commit it to the wire.
912 */
913 bpf_mtap(ifp, m, BPF_D_OUT);
914
915 #ifdef INET
916 /* collect bits for h/w csum, before hme_put frees the mbuf */
917 if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 | M_CSUM_UDPv4) &&
918 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
919 struct ether_header *eh;
920 uint16_t offset, start;
921
922 eh = mtod(m, struct ether_header *);
923 switch (ntohs(eh->ether_type)) {
924 case ETHERTYPE_IP:
925 start = ETHER_HDR_LEN;
926 break;
927 case ETHERTYPE_VLAN:
928 start = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
929 break;
930 default:
931 /* unsupported, drop it */
932 m_free(m);
933 continue;
934 }
935 start += M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
936 offset = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data)
937 + start;
938 txflags = HME_XD_TXCKSUM |
939 (offset << HME_XD_TXCSSTUFFSHIFT) |
940 (start << HME_XD_TXCSSTARTSHIFT);
941 } else
942 #endif
943 txflags = 0;
944
945 /*
946 * Copy the mbuf chain into the transmit buffer.
947 */
948 len = hme_put(sc, ri, m);
949
950 /*
951 * Initialize transmit registers and start transmission
952 */
953 HME_XD_SETFLAGS(sc->sc_pci, txd, ri,
954 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
955 HME_XD_ENCODE_TSIZE(len) | txflags);
956
957 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/
958 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
959 HME_ETX_TP_DMAWAKEUP);
960
961 if (++ri == ntbuf)
962 ri = 0;
963
964 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
965 ifp->if_flags |= IFF_OACTIVE;
966 break;
967 }
968 }
969
970 if (obusy != sc->sc_rb.rb_td_nbusy) {
971 sc->sc_rb.rb_tdhead = ri;
972 ifp->if_timer = 5;
973 }
974 }
975
976 /*
977 * Transmit interrupt.
978 */
979 int
980 hme_tint(struct hme_softc *sc)
981 {
982 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
983 bus_space_tag_t t = sc->sc_bustag;
984 bus_space_handle_t mac = sc->sc_mac;
985 unsigned int ri, txflags;
986
987 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
988
989 /*
990 * Unload collision counters
991 */
992 if_statadd_ref(nsr, if_collisions,
993 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
994 bus_space_read_4(t, mac, HME_MACI_FCCNT));
995 if_statadd_ref(nsr, if_oerrors,
996 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
997 bus_space_read_4(t, mac, HME_MACI_LTCNT));
998
999 /*
1000 * then clear the hardware counters.
1001 */
1002 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
1003 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
1004 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
1005 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
1006
1007 /* Fetch current position in the transmit ring */
1008 ri = sc->sc_rb.rb_tdtail;
1009
1010 for (;;) {
1011 if (sc->sc_rb.rb_td_nbusy <= 0)
1012 break;
1013
1014 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1015
1016 if (txflags & HME_XD_OWN)
1017 break;
1018
1019 ifp->if_flags &= ~IFF_OACTIVE;
1020 if_statinc_ref(nsr, if_opackets);
1021
1022 if (++ri == sc->sc_rb.rb_ntbuf)
1023 ri = 0;
1024
1025 --sc->sc_rb.rb_td_nbusy;
1026 }
1027
1028 IF_STAT_PUTREF(ifp);
1029
1030 /* Update ring */
1031 sc->sc_rb.rb_tdtail = ri;
1032
1033 if_schedule_deferred_start(ifp);
1034
1035 if (sc->sc_rb.rb_td_nbusy == 0)
1036 ifp->if_timer = 0;
1037
1038 return (1);
1039 }
1040
1041 /*
1042 * Receive interrupt.
1043 */
1044 int
1045 hme_rint(struct hme_softc *sc)
1046 {
1047 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1048 bus_space_tag_t t = sc->sc_bustag;
1049 bus_space_handle_t mac = sc->sc_mac;
1050 void *xdr = sc->sc_rb.rb_rxd;
1051 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
1052 unsigned int ri;
1053 uint32_t flags;
1054
1055 ri = sc->sc_rb.rb_rdtail;
1056
1057 /*
1058 * Process all buffers with valid data.
1059 */
1060 for (;;) {
1061 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1062 if (flags & HME_XD_OWN)
1063 break;
1064
1065 if (flags & HME_XD_OFL) {
1066 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
1067 device_xname(sc->sc_dev), ri, flags);
1068 } else
1069 hme_read(sc, ri, flags);
1070
1071 /* This buffer can be used by the hardware again */
1072 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri,
1073 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
1074
1075 if (++ri == nrbuf)
1076 ri = 0;
1077 }
1078
1079 sc->sc_rb.rb_rdtail = ri;
1080
1081 /* Read error counters ... */
1082 if_statadd(ifp, if_ierrors,
1083 bus_space_read_4(t, mac, HME_MACI_STAT_LCNT) +
1084 bus_space_read_4(t, mac, HME_MACI_STAT_ACNT) +
1085 bus_space_read_4(t, mac, HME_MACI_STAT_CCNT) +
1086 bus_space_read_4(t, mac, HME_MACI_STAT_CVCNT));
1087
1088 /* ... then clear the hardware counters. */
1089 bus_space_write_4(t, mac, HME_MACI_STAT_LCNT, 0);
1090 bus_space_write_4(t, mac, HME_MACI_STAT_ACNT, 0);
1091 bus_space_write_4(t, mac, HME_MACI_STAT_CCNT, 0);
1092 bus_space_write_4(t, mac, HME_MACI_STAT_CVCNT, 0);
1093 return (1);
1094 }
1095
1096 int
1097 hme_eint(struct hme_softc *sc, u_int status)
1098 {
1099 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1100 char bits[128];
1101
1102 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1103 bus_space_tag_t t = sc->sc_bustag;
1104 bus_space_handle_t mif = sc->sc_mif;
1105 uint32_t cf, st, sm;
1106 cf = bus_space_read_4(t, mif, HME_MIFI_CFG);
1107 st = bus_space_read_4(t, mif, HME_MIFI_STAT);
1108 sm = bus_space_read_4(t, mif, HME_MIFI_SM);
1109 printf("%s: XXXlink status changed: cfg=%x, stat %x, sm %x\n",
1110 device_xname(sc->sc_dev), cf, st, sm);
1111 return (1);
1112 }
1113
1114 /* Receive error counters rolled over */
1115 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1116 if (status & HME_SEB_STAT_ACNTEXP)
1117 if_statadd_ref(nsr, if_ierrors, 0xff);
1118 if (status & HME_SEB_STAT_CCNTEXP)
1119 if_statadd_ref(nsr, if_ierrors, 0xff);
1120 if (status & HME_SEB_STAT_LCNTEXP)
1121 if_statadd_ref(nsr, if_ierrors, 0xff);
1122 if (status & HME_SEB_STAT_CVCNTEXP)
1123 if_statadd_ref(nsr, if_ierrors, 0xff);
1124 IF_STAT_PUTREF(ifp);
1125
1126 /* RXTERR locks up the interface, so do a reset */
1127 if (status & HME_SEB_STAT_RXTERR)
1128 hme_reset(sc);
1129
1130 snprintb(bits, sizeof(bits), HME_SEB_STAT_BITS, status);
1131 printf("%s: status=%s\n", device_xname(sc->sc_dev), bits);
1132
1133 return (1);
1134 }
1135
1136 int
1137 hme_intr(void *v)
1138 {
1139 struct hme_softc *sc = v;
1140 bus_space_tag_t t = sc->sc_bustag;
1141 bus_space_handle_t seb = sc->sc_seb;
1142 uint32_t status;
1143 int r = 0;
1144
1145 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
1146
1147 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1148 r |= hme_eint(sc, status);
1149
1150 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1151 r |= hme_tint(sc);
1152
1153 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1154 r |= hme_rint(sc);
1155
1156 rnd_add_uint32(&sc->rnd_source, status);
1157
1158 return (r);
1159 }
1160
1161
1162 void
1163 hme_watchdog(struct ifnet *ifp)
1164 {
1165 struct hme_softc *sc = ifp->if_softc;
1166
1167 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1168 if_statinc(ifp, if_oerrors);
1169
1170 hme_reset(sc);
1171 }
1172
1173 /*
1174 * Initialize the MII Management Interface
1175 */
1176 void
1177 hme_mifinit(struct hme_softc *sc)
1178 {
1179 bus_space_tag_t t = sc->sc_bustag;
1180 bus_space_handle_t mif = sc->sc_mif;
1181 bus_space_handle_t mac = sc->sc_mac;
1182 int instance, phy;
1183 uint32_t v;
1184
1185 if (sc->sc_mii.mii_media.ifm_cur != NULL) {
1186 instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1187 phy = sc->sc_phys[instance];
1188 } else
1189 /* No media set yet, pick phy arbitrarily.. */
1190 phy = HME_PHYAD_EXTERNAL;
1191
1192 /* Configure the MIF in frame mode, no poll, current phy select */
1193 v = 0;
1194 if (phy == HME_PHYAD_EXTERNAL)
1195 v |= HME_MIF_CFG_PHY;
1196 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1197
1198 /* If an external transceiver is selected, enable its MII drivers */
1199 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1200 v &= ~HME_MAC_XIF_MIIENABLE;
1201 if (phy == HME_PHYAD_EXTERNAL)
1202 v |= HME_MAC_XIF_MIIENABLE;
1203 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1204 }
1205
1206 /*
1207 * MII interface
1208 */
1209 static int
1210 hme_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1211 {
1212 struct hme_softc *sc = device_private(self);
1213 bus_space_tag_t t = sc->sc_bustag;
1214 bus_space_handle_t mif = sc->sc_mif;
1215 bus_space_handle_t mac = sc->sc_mac;
1216 uint32_t v, xif_cfg, mifi_cfg;
1217 int n, rv;
1218
1219 /* We can at most have two PHYs */
1220 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1221 return -1;
1222
1223 /* Select the desired PHY in the MIF configuration register */
1224 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1225 v &= ~HME_MIF_CFG_PHY;
1226 if (phy == HME_PHYAD_EXTERNAL)
1227 v |= HME_MIF_CFG_PHY;
1228 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1229
1230 /* Enable MII drivers on external transceiver */
1231 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1232 if (phy == HME_PHYAD_EXTERNAL)
1233 v |= HME_MAC_XIF_MIIENABLE;
1234 else
1235 v &= ~HME_MAC_XIF_MIIENABLE;
1236 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1237
1238 #if 0
1239 /* This doesn't work reliably; the MDIO_1 bit is off most of the time */
1240 /*
1241 * Check whether a transceiver is connected by testing
1242 * the MIF configuration register's MDI_X bits. Note that
1243 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h
1244 */
1245 mif_mdi_bit = 1 << (8 + (1 - phy));
1246 delay(100);
1247 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1248 if ((v & mif_mdi_bit) == 0) {
1249 rv = -1;
1250 goto out;
1251 }
1252 #endif
1253
1254 /* Construct the frame command */
1255 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1256 HME_MIF_FO_TAMSB |
1257 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1258 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1259 (reg << HME_MIF_FO_REGAD_SHIFT);
1260
1261 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1262 for (n = 0; n < 100; n++) {
1263 DELAY(1);
1264 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1265 if (v & HME_MIF_FO_TALSB) {
1266 *val = v & HME_MIF_FO_DATA;
1267 rv = 0;
1268 goto out;
1269 }
1270 }
1271
1272 rv = ETIMEDOUT;
1273 printf("%s: mii_read timeout\n", device_xname(sc->sc_dev));
1274
1275 out:
1276 /* Restore MIFI_CFG register */
1277 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1278 /* Restore XIF register */
1279 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1280 return rv;
1281 }
1282
1283 static int
1284 hme_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1285 {
1286 struct hme_softc *sc = device_private(self);
1287 bus_space_tag_t t = sc->sc_bustag;
1288 bus_space_handle_t mif = sc->sc_mif;
1289 bus_space_handle_t mac = sc->sc_mac;
1290 uint32_t v, xif_cfg, mifi_cfg;
1291 int n, rv;
1292
1293 /* We can at most have two PHYs */
1294 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1295 return -1;
1296
1297 /* Select the desired PHY in the MIF configuration register */
1298 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1299 v &= ~HME_MIF_CFG_PHY;
1300 if (phy == HME_PHYAD_EXTERNAL)
1301 v |= HME_MIF_CFG_PHY;
1302 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1303
1304 /* Enable MII drivers on external transceiver */
1305 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1306 if (phy == HME_PHYAD_EXTERNAL)
1307 v |= HME_MAC_XIF_MIIENABLE;
1308 else
1309 v &= ~HME_MAC_XIF_MIIENABLE;
1310 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1311
1312 #if 0
1313 /* This doesn't work reliably; the MDIO_1 bit is off most of the time */
1314 /*
1315 * Check whether a transceiver is connected by testing
1316 * the MIF configuration register's MDI_X bits. Note that
1317 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h
1318 */
1319 mif_mdi_bit = 1 << (8 + (1 - phy));
1320 delay(100);
1321 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1322 if ((v & mif_mdi_bit) == 0) {
1323 rv = -1;
1324 goto out;
1325 }
1326 #endif
1327
1328 /* Construct the frame command */
1329 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1330 HME_MIF_FO_TAMSB |
1331 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1332 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1333 (reg << HME_MIF_FO_REGAD_SHIFT) |
1334 (val & HME_MIF_FO_DATA);
1335
1336 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1337 for (n = 0; n < 100; n++) {
1338 DELAY(1);
1339 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1340 if (v & HME_MIF_FO_TALSB) {
1341 rv = 0;
1342 goto out;
1343 }
1344 }
1345
1346 rv = ETIMEDOUT;
1347 printf("%s: mii_write timeout\n", device_xname(sc->sc_dev));
1348 out:
1349 /* Restore MIFI_CFG register */
1350 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1351 /* Restore XIF register */
1352 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1353
1354 return rv;
1355 }
1356
1357 static void
1358 hme_mii_statchg(struct ifnet *ifp)
1359 {
1360 struct hme_softc *sc = ifp->if_softc;
1361 bus_space_tag_t t = sc->sc_bustag;
1362 bus_space_handle_t mac = sc->sc_mac;
1363 uint32_t v;
1364
1365 #ifdef HMEDEBUG
1366 if (sc->sc_debug)
1367 printf("hme_mii_statchg: status change\n");
1368 #endif
1369
1370 /* Set the MAC Full Duplex bit appropriately */
1371 /* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1372 but not otherwise. */
1373 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1374 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1375 v |= HME_MAC_TXCFG_FULLDPLX;
1376 sc->sc_ethercom.ec_if.if_flags |= IFF_SIMPLEX;
1377 } else {
1378 v &= ~HME_MAC_TXCFG_FULLDPLX;
1379 sc->sc_ethercom.ec_if.if_flags &= ~IFF_SIMPLEX;
1380 }
1381 sc->sc_if_flags = sc->sc_ethercom.ec_if.if_flags;
1382 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1383 }
1384
1385 int
1386 hme_mediachange(struct ifnet *ifp)
1387 {
1388 struct hme_softc *sc = ifp->if_softc;
1389 bus_space_tag_t t = sc->sc_bustag;
1390 bus_space_handle_t mif = sc->sc_mif;
1391 bus_space_handle_t mac = sc->sc_mac;
1392 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1393 int phy = sc->sc_phys[instance];
1394 int rc;
1395 uint32_t v;
1396
1397 #ifdef HMEDEBUG
1398 if (sc->sc_debug)
1399 printf("hme_mediachange: phy = %d\n", phy);
1400 #endif
1401
1402 /* Select the current PHY in the MIF configuration register */
1403 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1404 v &= ~HME_MIF_CFG_PHY;
1405 if (phy == HME_PHYAD_EXTERNAL)
1406 v |= HME_MIF_CFG_PHY;
1407 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1408
1409 /* If an external transceiver is selected, enable its MII drivers */
1410 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1411 v &= ~HME_MAC_XIF_MIIENABLE;
1412 if (phy == HME_PHYAD_EXTERNAL)
1413 v |= HME_MAC_XIF_MIIENABLE;
1414 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1415
1416 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
1417 return 0;
1418 return rc;
1419 }
1420
1421 /*
1422 * Process an ioctl request.
1423 */
1424 int
1425 hme_ioctl(struct ifnet *ifp, unsigned long cmd, void *data)
1426 {
1427 struct hme_softc *sc = ifp->if_softc;
1428 struct ifaddr *ifa = (struct ifaddr *)data;
1429 int s, error = 0;
1430
1431 s = splnet();
1432
1433 switch (cmd) {
1434
1435 case SIOCINITIFADDR:
1436 switch (ifa->ifa_addr->sa_family) {
1437 #ifdef INET
1438 case AF_INET:
1439 if (ifp->if_flags & IFF_UP)
1440 hme_setladrf(sc);
1441 else {
1442 ifp->if_flags |= IFF_UP;
1443 error = hme_init(ifp);
1444 }
1445 arp_ifinit(ifp, ifa);
1446 break;
1447 #endif
1448 default:
1449 ifp->if_flags |= IFF_UP;
1450 error = hme_init(ifp);
1451 break;
1452 }
1453 break;
1454
1455 case SIOCSIFFLAGS:
1456 #ifdef HMEDEBUG
1457 {
1458 struct ifreq *ifr = data;
1459 sc->sc_debug =
1460 (ifr->ifr_flags & IFF_DEBUG) != 0 ? 1 : 0;
1461 }
1462 #endif
1463 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1464 break;
1465
1466 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1467 case IFF_RUNNING:
1468 /*
1469 * If interface is marked down and it is running, then
1470 * stop it.
1471 */
1472 hme_stop(ifp, 0);
1473 ifp->if_flags &= ~IFF_RUNNING;
1474 break;
1475 case IFF_UP:
1476 /*
1477 * If interface is marked up and it is stopped, then
1478 * start it.
1479 */
1480 error = hme_init(ifp);
1481 break;
1482 case IFF_UP | IFF_RUNNING:
1483 /*
1484 * If setting debug or promiscuous mode, do not reset
1485 * the chip; for everything else, call hme_init()
1486 * which will trigger a reset.
1487 */
1488 #define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG)
1489 if (ifp->if_flags != sc->sc_if_flags) {
1490 if ((ifp->if_flags & (~RESETIGN))
1491 == (sc->sc_if_flags & (~RESETIGN)))
1492 hme_setladrf(sc);
1493 else
1494 error = hme_init(ifp);
1495 }
1496 #undef RESETIGN
1497 break;
1498 case 0:
1499 break;
1500 }
1501
1502 if (sc->sc_ec_capenable != sc->sc_ethercom.ec_capenable)
1503 error = hme_init(ifp);
1504
1505 break;
1506
1507 default:
1508 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1509 break;
1510
1511 error = 0;
1512
1513 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1514 ;
1515 else if (ifp->if_flags & IFF_RUNNING) {
1516 /*
1517 * Multicast list has changed; set the hardware filter
1518 * accordingly.
1519 */
1520 hme_setladrf(sc);
1521 }
1522 break;
1523 }
1524
1525 sc->sc_if_flags = ifp->if_flags;
1526 splx(s);
1527 return (error);
1528 }
1529
1530 bool
1531 hme_shutdown(device_t self, int howto)
1532 {
1533 struct hme_softc *sc;
1534 struct ifnet *ifp;
1535
1536 sc = device_private(self);
1537 ifp = &sc->sc_ethercom.ec_if;
1538 hme_stop(ifp, 1);
1539
1540 return true;
1541 }
1542
1543 /*
1544 * Set up the logical address filter.
1545 */
1546 void
1547 hme_setladrf(struct hme_softc *sc)
1548 {
1549 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1550 struct ether_multi *enm;
1551 struct ether_multistep step;
1552 struct ethercom *ec = &sc->sc_ethercom;
1553 bus_space_tag_t t = sc->sc_bustag;
1554 bus_space_handle_t mac = sc->sc_mac;
1555 uint32_t v;
1556 uint32_t crc;
1557 uint32_t hash[4];
1558
1559 /* Clear hash table */
1560 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1561
1562 /* Get current RX configuration */
1563 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1564
1565 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1566 /* Turn on promiscuous mode; turn off the hash filter */
1567 v |= HME_MAC_RXCFG_PMISC;
1568 v &= ~HME_MAC_RXCFG_HENABLE;
1569 ifp->if_flags |= IFF_ALLMULTI;
1570 goto chipit;
1571 }
1572
1573 /* Turn off promiscuous mode; turn on the hash filter */
1574 v &= ~HME_MAC_RXCFG_PMISC;
1575 v |= HME_MAC_RXCFG_HENABLE;
1576
1577 /*
1578 * Set up multicast address filter by passing all multicast addresses
1579 * through a crc generator, and then using the high order 6 bits as an
1580 * index into the 64 bit logical address filter. The high order bit
1581 * selects the word, while the rest of the bits select the bit within
1582 * the word.
1583 */
1584
1585 ETHER_LOCK(ec);
1586 ETHER_FIRST_MULTI(step, ec, enm);
1587 while (enm != NULL) {
1588 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1589 /*
1590 * We must listen to a range of multicast addresses.
1591 * For now, just accept all multicasts, rather than
1592 * trying to set only those filter bits needed to match
1593 * the range. (At this time, the only use of address
1594 * ranges is for IP multicast routing, for which the
1595 * range is big enough to require all bits set.)
1596 */
1597 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1598 ifp->if_flags |= IFF_ALLMULTI;
1599 ETHER_UNLOCK(ec);
1600 goto chipit;
1601 }
1602
1603 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1604
1605 /* Just want the 6 most significant bits. */
1606 crc >>= 26;
1607
1608 /* Set the corresponding bit in the filter. */
1609 hash[crc >> 4] |= 1 << (crc & 0xf);
1610
1611 ETHER_NEXT_MULTI(step, enm);
1612 }
1613 ETHER_UNLOCK(ec);
1614
1615 ifp->if_flags &= ~IFF_ALLMULTI;
1616
1617 chipit:
1618 /* Now load the hash table into the chip */
1619 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1620 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1621 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1622 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1623 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1624 }
1625
1626 /*
1627 * Routines for accessing the transmit and receive buffers.
1628 * The various CPU and adapter configurations supported by this
1629 * driver require three different access methods for buffers
1630 * and descriptors:
1631 * (1) contig (contiguous data; no padding),
1632 * (2) gap2 (two bytes of data followed by two bytes of padding),
1633 * (3) gap16 (16 bytes of data followed by 16 bytes of padding).
1634 */
1635
1636 #if 0
1637 /*
1638 * contig: contiguous data with no padding.
1639 *
1640 * Buffers may have any alignment.
1641 */
1642
1643 void
1644 hme_copytobuf_contig(struct hme_softc *sc, void *from, int ri, int len)
1645 {
1646 volatile void *buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
1647
1648 /*
1649 * Just call memcpy() to do the work.
1650 */
1651 memcpy(buf, from, len);
1652 }
1653
1654 void
1655 hme_copyfrombuf_contig(struct hme_softc *sc, void *to, int boff, int len)
1656 {
1657 volatile void *buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
1658
1659 /*
1660 * Just call memcpy() to do the work.
1661 */
1662 memcpy(to, buf, len);
1663 }
1664 #endif
Cache object: 0b7b7e72df6dc83d3274b2025063e39c
|