FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c
1 /*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/6.0/sys/dev/hme/if_hme.c 150683 2005-09-28 14:10:00Z kensmith $");
42
43 /*
44 * HME Ethernet module driver.
45 *
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
52 *
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
56 *
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
62 */
63 #define HME_CSUM_FEATURES (CSUM_TCP)
64 #define HMEDEBUG
65 #define KTR_HME KTR_CT2 /* XXX */
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/bus.h>
70 #include <sys/endian.h>
71 #include <sys/kernel.h>
72 #include <sys/module.h>
73 #include <sys/ktr.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78
79 #include <net/bpf.h>
80 #include <net/ethernet.h>
81 #include <net/if.h>
82 #include <net/if_arp.h>
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 #include <net/if_types.h>
86 #include <net/if_vlan_var.h>
87
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/ip.h>
91 #include <netinet/tcp.h>
92 #include <netinet/udp.h>
93
94 #include <dev/mii/mii.h>
95 #include <dev/mii/miivar.h>
96
97 #include <machine/bus.h>
98
99 #include <dev/hme/if_hmereg.h>
100 #include <dev/hme/if_hmevar.h>
101
102 static void hme_start(struct ifnet *);
103 static void hme_start_locked(struct ifnet *);
104 static void hme_stop(struct hme_softc *);
105 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
106 static void hme_tick(void *);
107 static void hme_watchdog(struct ifnet *);
108 static void hme_init(void *);
109 static void hme_init_locked(struct hme_softc *);
110 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
111 static int hme_meminit(struct hme_softc *);
112 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
113 u_int32_t, u_int32_t);
114 static void hme_mifinit(struct hme_softc *);
115 static void hme_setladrf(struct hme_softc *, int);
116
117 static int hme_mediachange(struct ifnet *);
118 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
119
120 static int hme_load_txmbuf(struct hme_softc *, struct mbuf *);
121 static void hme_read(struct hme_softc *, int, int, u_int32_t);
122 static void hme_eint(struct hme_softc *, u_int);
123 static void hme_rint(struct hme_softc *);
124 static void hme_tint(struct hme_softc *);
125 static void hme_txcksum(struct mbuf *, u_int32_t *);
126 static void hme_rxcksum(struct mbuf *, u_int32_t);
127
128 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
129 static void hme_txdma_callback(void *, bus_dma_segment_t *, int,
130 bus_size_t, int);
131
132 devclass_t hme_devclass;
133
134 static int hme_nerr;
135
136 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
137 MODULE_DEPEND(hme, miibus, 1, 1, 1);
138
139 #define HME_SPC_READ_4(spc, sc, offs) \
140 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
141 (offs))
142 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
143 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
144 (offs), (v))
145
146 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
147 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
148 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
149 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
150 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
151 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
152 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
153 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
154 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
155 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
156
157 #define HME_MAXERR 5
158 #define HME_WHINE(dev, ...) do { \
159 if (hme_nerr++ < HME_MAXERR) \
160 device_printf(dev, __VA_ARGS__); \
161 if (hme_nerr == HME_MAXERR) { \
162 device_printf(dev, "too may errors; not reporting any " \
163 "more\n"); \
164 } \
165 } while(0)
166
167 /* Support oversized VLAN frames. */
168 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
169
170 int
171 hme_config(struct hme_softc *sc)
172 {
173 struct ifnet *ifp;
174 struct mii_softc *child;
175 bus_size_t size;
176 int error, rdesc, tdesc, i;
177
178 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
179 if (ifp == NULL)
180 return (ENOSPC);
181
182 /*
183 * HME common initialization.
184 *
185 * hme_softc fields that must be initialized by the front-end:
186 *
187 * the DMA bus tag:
188 * sc_dmatag
189 *
190 * the bus handles, tags and offsets (splitted for SBus compatability):
191 * sc_seb{t,h,o} (Shared Ethernet Block registers)
192 * sc_erx{t,h,o} (Receiver Unit registers)
193 * sc_etx{t,h,o} (Transmitter Unit registers)
194 * sc_mac{t,h,o} (MAC registers)
195 * sc_mif{t,h,o} (Management Interface registers)
196 *
197 * the maximum bus burst size:
198 * sc_burst
199 *
200 */
201
202 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
203
204 /* Make sure the chip is stopped. */
205 HME_LOCK(sc);
206 hme_stop(sc);
207 HME_UNLOCK(sc);
208
209 /*
210 * Allocate DMA capable memory
211 * Buffer descriptors must be aligned on a 2048 byte boundary;
212 * take this into account when calculating the size. Note that
213 * the maximum number of descriptors (256) occupies 2048 bytes,
214 * so we allocate that much regardless of HME_N*DESC.
215 */
216 size = 4096;
217
218 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
219 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
220 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
221 if (error)
222 goto fail_ifnet;
223
224 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
225 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
226 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
227 &sc->sc_lock, &sc->sc_cdmatag);
228 if (error)
229 goto fail_ptag;
230
231 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
232 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
233 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
234 NULL, NULL, &sc->sc_rdmatag);
235 if (error)
236 goto fail_ctag;
237
238 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
239 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
240 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
241 NULL, NULL, &sc->sc_tdmatag);
242 if (error)
243 goto fail_rtag;
244
245 /* Allocate control/TX DMA buffer */
246 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
247 0, &sc->sc_cdmamap);
248 if (error != 0) {
249 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
250 goto fail_ttag;
251 }
252
253 /* Load the buffer */
254 sc->sc_rb.rb_dmabase = 0;
255 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
256 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
257 sc->sc_rb.rb_dmabase == 0) {
258 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
259 error);
260 goto fail_free;
261 }
262 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
263 sc->sc_rb.rb_dmabase);
264
265 /*
266 * Prepare the RX descriptors. rdesc serves as marker for the last
267 * processed descriptor and may be used later on.
268 */
269 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
270 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
271 error = bus_dmamap_create(sc->sc_rdmatag, 0,
272 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
273 if (error != 0)
274 goto fail_rxdesc;
275 }
276 error = bus_dmamap_create(sc->sc_rdmatag, 0,
277 &sc->sc_rb.rb_spare_dmamap);
278 if (error != 0)
279 goto fail_rxdesc;
280 /* Same for the TX descs. */
281 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
282 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
283 error = bus_dmamap_create(sc->sc_tdmatag, 0,
284 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
285 if (error != 0)
286 goto fail_txdesc;
287 }
288
289 sc->sc_csum_features = HME_CSUM_FEATURES;
290 /* Initialize ifnet structure. */
291 ifp->if_softc = sc;
292 if_initname(ifp, device_get_name(sc->sc_dev),
293 device_get_unit(sc->sc_dev));
294 ifp->if_mtu = ETHERMTU;
295 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
296 ifp->if_start = hme_start;
297 ifp->if_ioctl = hme_ioctl;
298 ifp->if_init = hme_init;
299 ifp->if_watchdog = hme_watchdog;
300 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
301 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
302 IFQ_SET_READY(&ifp->if_snd);
303
304 hme_mifinit(sc);
305
306 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
307 hme_mediastatus)) != 0) {
308 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
309 goto fail_rxdesc;
310 }
311 sc->sc_mii = device_get_softc(sc->sc_miibus);
312
313 /*
314 * Walk along the list of attached MII devices and
315 * establish an `MII instance' to `phy number'
316 * mapping. We'll use this mapping in media change
317 * requests to determine which phy to use to program
318 * the MIF configuration register.
319 */
320 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
321 child = LIST_NEXT(child, mii_list)) {
322 /*
323 * Note: we support just two PHYs: the built-in
324 * internal device and an external on the MII
325 * connector.
326 */
327 if (child->mii_phy > 1 || child->mii_inst > 1) {
328 device_printf(sc->sc_dev, "cannot accommodate "
329 "MII device %s at phy %d, instance %d\n",
330 device_get_name(child->mii_dev),
331 child->mii_phy, child->mii_inst);
332 continue;
333 }
334
335 sc->sc_phys[child->mii_inst] = child->mii_phy;
336 }
337
338 /* Attach the interface. */
339 ether_ifattach(ifp, sc->sc_enaddr);
340
341 /*
342 * Tell the upper layer(s) we support long frames/checksum offloads.
343 */
344 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
345 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
346 ifp->if_hwassist |= sc->sc_csum_features;
347 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
348 return (0);
349
350 fail_txdesc:
351 for (i = 0; i < tdesc; i++) {
352 bus_dmamap_destroy(sc->sc_tdmatag,
353 sc->sc_rb.rb_txdesc[i].htx_dmamap);
354 }
355 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
356 fail_rxdesc:
357 for (i = 0; i < rdesc; i++) {
358 bus_dmamap_destroy(sc->sc_rdmatag,
359 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
360 }
361 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
362 fail_free:
363 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
364 fail_ttag:
365 bus_dma_tag_destroy(sc->sc_tdmatag);
366 fail_rtag:
367 bus_dma_tag_destroy(sc->sc_rdmatag);
368 fail_ctag:
369 bus_dma_tag_destroy(sc->sc_cdmatag);
370 fail_ptag:
371 bus_dma_tag_destroy(sc->sc_pdmatag);
372 fail_ifnet:
373 if_free(ifp);
374 return (error);
375 }
376
377 void
378 hme_detach(struct hme_softc *sc)
379 {
380 struct ifnet *ifp = sc->sc_ifp;
381 int i;
382
383 HME_LOCK(sc);
384 hme_stop(sc);
385 HME_UNLOCK(sc);
386 callout_drain(&sc->sc_tick_ch);
387 ether_ifdetach(ifp);
388 if_free(ifp);
389 device_delete_child(sc->sc_dev, sc->sc_miibus);
390
391 for (i = 0; i < HME_NTXQ; i++) {
392 bus_dmamap_destroy(sc->sc_tdmatag,
393 sc->sc_rb.rb_txdesc[i].htx_dmamap);
394 }
395 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
396 for (i = 0; i < HME_NRXDESC; i++) {
397 bus_dmamap_destroy(sc->sc_rdmatag,
398 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
399 }
400 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
401 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
402 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
403 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
404 bus_dma_tag_destroy(sc->sc_tdmatag);
405 bus_dma_tag_destroy(sc->sc_rdmatag);
406 bus_dma_tag_destroy(sc->sc_cdmatag);
407 bus_dma_tag_destroy(sc->sc_pdmatag);
408 }
409
410 void
411 hme_suspend(struct hme_softc *sc)
412 {
413
414 HME_LOCK(sc);
415 hme_stop(sc);
416 HME_UNLOCK(sc);
417 }
418
419 void
420 hme_resume(struct hme_softc *sc)
421 {
422 struct ifnet *ifp = sc->sc_ifp;
423
424 HME_LOCK(sc);
425 if ((ifp->if_flags & IFF_UP) != 0)
426 hme_init_locked(sc);
427 HME_UNLOCK(sc);
428 }
429
430 static void
431 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
432 {
433 struct hme_softc *sc = (struct hme_softc *)xsc;
434
435 if (error != 0)
436 return;
437 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
438 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
439 }
440
441 static void
442 hme_tick(void *arg)
443 {
444 struct hme_softc *sc = arg;
445
446 HME_LOCK_ASSERT(sc, MA_OWNED);
447 mii_tick(sc->sc_mii);
448
449 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
450 }
451
452 static void
453 hme_stop(struct hme_softc *sc)
454 {
455 u_int32_t v;
456 int n;
457
458 callout_stop(&sc->sc_tick_ch);
459 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
460
461 /* Reset transmitter and receiver */
462 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
463 HME_SEB_RESET_ERX);
464
465 for (n = 0; n < 20; n++) {
466 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
467 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
468 return;
469 DELAY(20);
470 }
471
472 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
473 }
474
475 /*
476 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
477 * ring for subsequent use.
478 */
479 static __inline void
480 hme_discard_rxbuf(struct hme_softc *sc, int ix)
481 {
482
483 /*
484 * Dropped a packet, reinitialize the descriptor and turn the
485 * ownership back to the hardware.
486 */
487 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
488 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
489 }
490
491 static int
492 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
493 {
494 struct hme_rxdesc *rd;
495 struct mbuf *m;
496 bus_dma_segment_t segs[1];
497 bus_dmamap_t map;
498 uintptr_t b;
499 int a, unmap, nsegs;
500
501 rd = &sc->sc_rb.rb_rxdesc[ri];
502 unmap = rd->hrx_m != NULL;
503 if (unmap && keepold) {
504 /*
505 * Reinitialize the descriptor flags, as they may have been
506 * altered by the hardware.
507 */
508 hme_discard_rxbuf(sc, ri);
509 return (0);
510 }
511 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
512 return (ENOBUFS);
513 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
514 b = mtod(m, uintptr_t);
515 /*
516 * Required alignment boundary. At least 16 is needed, but since
517 * the mapping must be done in a way that a burst can start on a
518 * natural boundary we might need to extend this.
519 */
520 a = max(HME_MINRXALIGN, sc->sc_burst);
521 /*
522 * Make sure the buffer suitably aligned. The 2 byte offset is removed
523 * when the mbuf is handed up. XXX: this ensures at least 16 byte
524 * alignment of the header adjacent to the ethernet header, which
525 * should be sufficient in all cases. Nevertheless, this second-guesses
526 * ALIGN().
527 */
528 m_adj(m, roundup2(b, a) - b);
529 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
530 m, segs, &nsegs, 0) != 0) {
531 m_freem(m);
532 return (ENOBUFS);
533 }
534 /* If nsegs is wrong then the stack is corrupt */
535 KASSERT(nsegs == 1, ("Too many segments returned!"));
536 if (unmap) {
537 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
538 BUS_DMASYNC_POSTREAD);
539 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
540 }
541 map = rd->hrx_dmamap;
542 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
543 sc->sc_rb.rb_spare_dmamap = map;
544 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
545 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, segs[0].ds_addr);
546 rd->hrx_m = m;
547 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
548 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
549 return (0);
550 }
551
552 static int
553 hme_meminit(struct hme_softc *sc)
554 {
555 struct hme_ring *hr = &sc->sc_rb;
556 struct hme_txdesc *td;
557 bus_addr_t dma;
558 caddr_t p;
559 unsigned int i;
560 int error;
561
562 p = hr->rb_membase;
563 dma = hr->rb_dmabase;
564
565 /*
566 * Allocate transmit descriptors
567 */
568 hr->rb_txd = p;
569 hr->rb_txddma = dma;
570 p += HME_NTXDESC * HME_XD_SIZE;
571 dma += HME_NTXDESC * HME_XD_SIZE;
572 /* We have reserved descriptor space until the next 2048 byte boundary.*/
573 dma = (bus_addr_t)roundup((u_long)dma, 2048);
574 p = (caddr_t)roundup((u_long)p, 2048);
575
576 /*
577 * Allocate receive descriptors
578 */
579 hr->rb_rxd = p;
580 hr->rb_rxddma = dma;
581 p += HME_NRXDESC * HME_XD_SIZE;
582 dma += HME_NRXDESC * HME_XD_SIZE;
583 /* Again move forward to the next 2048 byte boundary.*/
584 dma = (bus_addr_t)roundup((u_long)dma, 2048);
585 p = (caddr_t)roundup((u_long)p, 2048);
586
587 /*
588 * Initialize transmit buffer descriptors
589 */
590 for (i = 0; i < HME_NTXDESC; i++) {
591 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
592 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
593 }
594
595 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
596 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
597 for (i = 0; i < HME_NTXQ; i++) {
598 td = &sc->sc_rb.rb_txdesc[i];
599 if (td->htx_m != NULL) {
600 m_freem(td->htx_m);
601 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
602 BUS_DMASYNC_POSTWRITE);
603 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
604 td->htx_m = NULL;
605 }
606 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
607 }
608
609 /*
610 * Initialize receive buffer descriptors
611 */
612 for (i = 0; i < HME_NRXDESC; i++) {
613 error = hme_add_rxbuf(sc, i, 1);
614 if (error != 0)
615 return (error);
616 }
617
618 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
619 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
620
621 hr->rb_tdhead = hr->rb_tdtail = 0;
622 hr->rb_td_nbusy = 0;
623 hr->rb_rdtail = 0;
624 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
625 hr->rb_txddma);
626 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
627 hr->rb_rxddma);
628 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
629 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
630 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
631 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
632 return (0);
633 }
634
635 static int
636 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
637 u_int32_t clr, u_int32_t set)
638 {
639 int i = 0;
640
641 val &= ~clr;
642 val |= set;
643 HME_MAC_WRITE_4(sc, reg, val);
644 if (clr == 0 && set == 0)
645 return (1); /* just write, no bits to wait for */
646 do {
647 DELAY(100);
648 i++;
649 val = HME_MAC_READ_4(sc, reg);
650 if (i > 40) {
651 /* After 3.5ms, we should have been done. */
652 device_printf(sc->sc_dev, "timeout while writing to "
653 "MAC configuration register\n");
654 return (0);
655 }
656 } while ((val & clr) != 0 && (val & set) != set);
657 return (1);
658 }
659
660 /*
661 * Initialization of interface; set up initialization block
662 * and transmit/receive descriptor rings.
663 */
664 static void
665 hme_init(void *xsc)
666 {
667 struct hme_softc *sc = (struct hme_softc *)xsc;
668
669 HME_LOCK(sc);
670 hme_init_locked(sc);
671 HME_UNLOCK(sc);
672 }
673
674 static void
675 hme_init_locked(struct hme_softc *sc)
676 {
677 struct ifnet *ifp = sc->sc_ifp;
678 u_int8_t *ea;
679 u_int32_t n, v;
680
681 HME_LOCK_ASSERT(sc, MA_OWNED);
682 /*
683 * Initialization sequence. The numbered steps below correspond
684 * to the sequence outlined in section 6.3.5.1 in the Ethernet
685 * Channel Engine manual (part of the PCIO manual).
686 * See also the STP2002-STQ document from Sun Microsystems.
687 */
688
689 /* step 1 & 2. Reset the Ethernet Channel */
690 hme_stop(sc);
691
692 /* Re-initialize the MIF */
693 hme_mifinit(sc);
694
695 #if 0
696 /* Mask all MIF interrupts, just in case */
697 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
698 #endif
699
700 /* step 3. Setup data structures in host memory */
701 if (hme_meminit(sc) != 0) {
702 device_printf(sc->sc_dev, "out of buffers; init aborted.");
703 return;
704 }
705
706 /* step 4. TX MAC registers & counters */
707 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
708 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
709 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
710 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
711 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
712
713 /* Load station MAC address */
714 ea = IFP2ENADDR(sc->sc_ifp);
715 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
716 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
717 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
718
719 /*
720 * Init seed for backoff
721 * (source suggested by manual: low 10 bits of MAC address)
722 */
723 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
724 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
725
726
727 /* Note: Accepting power-on default for other MAC registers here.. */
728
729 /* step 5. RX MAC registers & counters */
730 hme_setladrf(sc, 0);
731
732 /* step 6 & 7. Program Descriptor Ring Base Addresses */
733 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
734 /* Transmit Descriptor ring size: in increments of 16 */
735 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
736
737 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
738 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
739
740 /* step 8. Global Configuration & Interrupt Mask */
741 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
742 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
743 HME_SEB_STAT_HOSTTOTX |
744 HME_SEB_STAT_RXTOHOST |
745 HME_SEB_STAT_TXALL |
746 HME_SEB_STAT_TXPERR |
747 HME_SEB_STAT_RCNTEXP |
748 HME_SEB_STAT_ALL_ERRORS ));
749
750 switch (sc->sc_burst) {
751 default:
752 v = 0;
753 break;
754 case 16:
755 v = HME_SEB_CFG_BURST16;
756 break;
757 case 32:
758 v = HME_SEB_CFG_BURST32;
759 break;
760 case 64:
761 v = HME_SEB_CFG_BURST64;
762 break;
763 }
764 /*
765 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
766 * Allowing 64bit transfers breaks TX checksum offload as well.
767 * Don't know this comes from hardware bug or driver's DMAing
768 * scheme.
769 *
770 * if (sc->sc_pci == 0)
771 * v |= HME_SEB_CFG_64BIT;
772 */
773 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
774
775 /* step 9. ETX Configuration: use mostly default values */
776
777 /* Enable DMA */
778 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
779 v |= HME_ETX_CFG_DMAENABLE;
780 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
781
782 /* step 10. ERX Configuration */
783 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
784
785 /* Encode Receive Descriptor ring size: four possible values */
786 v &= ~HME_ERX_CFG_RINGSIZEMSK;
787 switch (HME_NRXDESC) {
788 case 32:
789 v |= HME_ERX_CFG_RINGSIZE32;
790 break;
791 case 64:
792 v |= HME_ERX_CFG_RINGSIZE64;
793 break;
794 case 128:
795 v |= HME_ERX_CFG_RINGSIZE128;
796 break;
797 case 256:
798 v |= HME_ERX_CFG_RINGSIZE256;
799 break;
800 default:
801 printf("hme: invalid Receive Descriptor ring size\n");
802 break;
803 }
804
805 /* Enable DMA, fix RX first byte offset. */
806 v &= ~HME_ERX_CFG_FBO_MASK;
807 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
808 /* RX TCP/UDP checksum offset */
809 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
810 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
811 v |= n;
812 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
813 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
814
815 /* step 11. XIF Configuration */
816 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
817 v |= HME_MAC_XIF_OE;
818 /* If an external transceiver is connected, enable its MII drivers */
819 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
820 v |= HME_MAC_XIF_MIIENABLE;
821 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
822 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
823
824 /* step 12. RX_MAC Configuration Register */
825 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
826 v |= HME_MAC_RXCFG_ENABLE;
827 v &= ~(HME_MAC_RXCFG_DCRCS);
828 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
829 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
830
831 /* step 13. TX_MAC Configuration Register */
832 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
833 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
834 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
835 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
836
837 /* step 14. Issue Transmit Pending command */
838
839 #ifdef HMEDEBUG
840 /* Debug: double-check. */
841 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
842 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
843 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
844 HME_ERX_READ_4(sc, HME_ERXI_RING),
845 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
846 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
847 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
848 HME_ERX_READ_4(sc, HME_ERXI_CFG),
849 HME_ETX_READ_4(sc, HME_ETXI_CFG));
850 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
851 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
852 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
853 #endif
854
855 /* Set the current media. */
856 /*
857 * mii_mediachg(sc->sc_mii);
858 */
859
860 /* Start the one second timer. */
861 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
862
863 ifp->if_drv_flags |= IFF_DRV_RUNNING;
864 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
865 ifp->if_timer = 0;
866 hme_start_locked(ifp);
867 }
868
869 struct hme_txdma_arg {
870 struct hme_softc *hta_sc;
871 struct hme_txdesc *hta_htx;
872 int hta_ndescs;
873 };
874
875 /*
876 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
877 * are readable from the nearest burst boundary on (i.e. potentially before
878 * ds_addr) to the first boundary beyond the end. This is usually a safe
879 * assumption to make, but is not documented.
880 */
881 static void
882 hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
883 bus_size_t totsz, int error)
884 {
885 struct hme_txdma_arg *ta = xsc;
886 struct hme_txdesc *htx;
887 bus_size_t len = 0;
888 caddr_t txd;
889 u_int32_t flags = 0;
890 int i, tdhead, pci;
891
892 if (error != 0)
893 return;
894
895 tdhead = ta->hta_sc->sc_rb.rb_tdhead;
896 pci = ta->hta_sc->sc_pci;
897 txd = ta->hta_sc->sc_rb.rb_txd;
898 htx = ta->hta_htx;
899
900 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
901 ta->hta_ndescs = -1;
902 return;
903 }
904 ta->hta_ndescs = nsegs;
905
906 for (i = 0; i < nsegs; i++) {
907 if (segs[i].ds_len == 0)
908 continue;
909
910 /* Fill the ring entry. */
911 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
912 if (len == 0)
913 flags |= HME_XD_SOP;
914 if (len + segs[i].ds_len == totsz)
915 flags |= HME_XD_EOP;
916 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
917 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
918 (u_int)segs[i].ds_addr);
919 HME_XD_SETFLAGS(pci, txd, tdhead, flags);
920 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
921
922 ta->hta_sc->sc_rb.rb_td_nbusy++;
923 htx->htx_lastdesc = tdhead;
924 tdhead = (tdhead + 1) % HME_NTXDESC;
925 len += segs[i].ds_len;
926 }
927 ta->hta_sc->sc_rb.rb_tdhead = tdhead;
928 KASSERT((flags & HME_XD_EOP) != 0,
929 ("hme_txdma_callback: missed end of packet!"));
930 }
931
932 /* TX TCP/UDP checksum */
933 static void
934 hme_txcksum(struct mbuf *m, u_int32_t *cflags)
935 {
936 struct ip *ip;
937 u_int32_t offset, offset2;
938 caddr_t p;
939
940 for(; m && m->m_len == 0; m = m->m_next)
941 ;
942 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
943 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
944 return; /* checksum will be corrupted */
945 }
946 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
947 if (m->m_len != ETHER_HDR_LEN) {
948 printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
949 return; /* checksum will be corrupted */
950 }
951 /* XXX */
952 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
953 ;
954 if (m == NULL)
955 return; /* checksum will be corrupted */
956 ip = mtod(m, struct ip *);
957 } else {
958 p = mtod(m, caddr_t);
959 p += ETHER_HDR_LEN;
960 ip = (struct ip *)p;
961 }
962 offset2 = m->m_pkthdr.csum_data;
963 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
964 *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
965 *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT);
966 *cflags |= HME_XD_TXCKSUM;
967 }
968
969 /*
970 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
971 * start the transmission.
972 * Returns 0 on success, -1 if there were not enough free descriptors to map
973 * the packet, or an errno otherwise.
974 */
975 static int
976 hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
977 {
978 struct hme_txdma_arg cba;
979 struct hme_txdesc *td;
980 int error, si, ri;
981 u_int32_t flags, cflags = 0;
982
983 si = sc->sc_rb.rb_tdhead;
984 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
985 return (-1);
986 if ((m0->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
987 hme_txcksum(m0, &cflags);
988 cba.hta_sc = sc;
989 cba.hta_htx = td;
990 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
991 m0, hme_txdma_callback, &cba, 0)) != 0)
992 goto fail;
993 if (cba.hta_ndescs == -1) {
994 error = -1;
995 goto fail;
996 }
997 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
998 BUS_DMASYNC_PREWRITE);
999
1000 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1001 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
1002 td->htx_m = m0;
1003
1004 /* Turn descriptor ownership to the hme, back to forth. */
1005 ri = sc->sc_rb.rb_tdhead;
1006 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
1007 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
1008 do {
1009 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1010 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
1011 HME_XD_OWN | cflags;
1012 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
1013 ri, si, flags);
1014 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
1015 } while (ri != si);
1016
1017 /* start the transmission. */
1018 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1019 return (0);
1020 fail:
1021 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
1022 return (error);
1023 }
1024
1025 /*
1026 * Pass a packet to the higher levels.
1027 */
1028 static void
1029 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1030 {
1031 struct ifnet *ifp = sc->sc_ifp;
1032 struct mbuf *m;
1033
1034 if (len <= sizeof(struct ether_header) ||
1035 len > HME_MAX_FRAMESIZE) {
1036 #ifdef HMEDEBUG
1037 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1038 len);
1039 #endif
1040 ifp->if_ierrors++;
1041 hme_discard_rxbuf(sc, ix);
1042 return;
1043 }
1044
1045 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1046 CTR1(KTR_HME, "hme_read: len %d", len);
1047
1048 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1049 /*
1050 * hme_add_rxbuf will leave the old buffer in the ring until
1051 * it is sure that a new buffer can be mapped. If it can not,
1052 * drop the packet, but leave the interface up.
1053 */
1054 ifp->if_iqdrops++;
1055 hme_discard_rxbuf(sc, ix);
1056 return;
1057 }
1058
1059 ifp->if_ipackets++;
1060
1061 m->m_pkthdr.rcvif = ifp;
1062 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1063 m_adj(m, HME_RXOFFS);
1064 /* RX TCP/UDP checksum */
1065 if (ifp->if_capenable & IFCAP_RXCSUM)
1066 hme_rxcksum(m, flags);
1067 /* Pass the packet up. */
1068 HME_UNLOCK(sc);
1069 (*ifp->if_input)(ifp, m);
1070 HME_LOCK(sc);
1071 }
1072
1073 static void
1074 hme_start(struct ifnet *ifp)
1075 {
1076 struct hme_softc *sc = ifp->if_softc;
1077
1078 HME_LOCK(sc);
1079 hme_start_locked(ifp);
1080 HME_UNLOCK(sc);
1081 }
1082
1083 static void
1084 hme_start_locked(struct ifnet *ifp)
1085 {
1086 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1087 struct mbuf *m;
1088 int error, enq = 0;
1089
1090 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1091 IFF_DRV_RUNNING)
1092 return;
1093
1094 error = 0;
1095 for (;;) {
1096 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1097 if (m == NULL)
1098 break;
1099
1100 error = hme_load_txmbuf(sc, m);
1101 if (error == -1) {
1102 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1103 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1104 break;
1105 } else if (error > 0) {
1106 printf("hme_start: error %d while loading mbuf\n",
1107 error);
1108 } else {
1109 enq = 1;
1110 BPF_MTAP(ifp, m);
1111 }
1112 }
1113
1114 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
1115 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1116 /* Set watchdog timer if a packet was queued */
1117 if (enq) {
1118 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1119 BUS_DMASYNC_PREWRITE);
1120 ifp->if_timer = 5;
1121 }
1122 }
1123
1124 /*
1125 * Transmit interrupt.
1126 */
1127 static void
1128 hme_tint(struct hme_softc *sc)
1129 {
1130 struct ifnet *ifp = sc->sc_ifp;
1131 struct hme_txdesc *htx;
1132 unsigned int ri, txflags;
1133
1134 /*
1135 * Unload collision counters
1136 */
1137 ifp->if_collisions +=
1138 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
1139 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
1140 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
1141 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
1142
1143 /*
1144 * then clear the hardware counters.
1145 */
1146 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
1147 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
1148 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
1149 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
1150
1151 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1152 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1153 /* Fetch current position in the transmit ring */
1154 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1155 if (sc->sc_rb.rb_td_nbusy <= 0) {
1156 CTR0(KTR_HME, "hme_tint: not busy!");
1157 break;
1158 }
1159
1160 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1161 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1162
1163 if ((txflags & HME_XD_OWN) != 0)
1164 break;
1165
1166 CTR0(KTR_HME, "hme_tint: not owned");
1167 --sc->sc_rb.rb_td_nbusy;
1168 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1169
1170 /* Complete packet transmitted? */
1171 if ((txflags & HME_XD_EOP) == 0)
1172 continue;
1173
1174 KASSERT(htx->htx_lastdesc == ri,
1175 ("hme_tint: ring indices skewed: %d != %d!",
1176 htx->htx_lastdesc, ri));
1177 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1178 BUS_DMASYNC_POSTWRITE);
1179 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1180
1181 ifp->if_opackets++;
1182 m_freem(htx->htx_m);
1183 htx->htx_m = NULL;
1184 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1185 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1186 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1187 }
1188 /* Turn off watchdog */
1189 if (sc->sc_rb.rb_td_nbusy == 0)
1190 ifp->if_timer = 0;
1191
1192 /* Update ring */
1193 sc->sc_rb.rb_tdtail = ri;
1194
1195 hme_start_locked(ifp);
1196
1197 if (sc->sc_rb.rb_td_nbusy == 0)
1198 ifp->if_timer = 0;
1199 }
1200
1201 /*
1202 * RX TCP/UDP checksum
1203 */
1204 static void
1205 hme_rxcksum(struct mbuf *m, u_int32_t flags)
1206 {
1207 struct ether_header *eh;
1208 struct ip *ip;
1209 struct udphdr *uh;
1210 int32_t hlen, len, pktlen;
1211 u_int16_t cksum, *opts;
1212 u_int32_t temp32;
1213
1214 pktlen = m->m_pkthdr.len;
1215 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1216 return;
1217 eh = mtod(m, struct ether_header *);
1218 if (eh->ether_type != htons(ETHERTYPE_IP))
1219 return;
1220 ip = (struct ip *)(eh + 1);
1221 if (ip->ip_v != IPVERSION)
1222 return;
1223
1224 hlen = ip->ip_hl << 2;
1225 pktlen -= sizeof(struct ether_header);
1226 if (hlen < sizeof(struct ip))
1227 return;
1228 if (ntohs(ip->ip_len) < hlen)
1229 return;
1230 if (ntohs(ip->ip_len) != pktlen)
1231 return;
1232 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1233 return; /* can't handle fragmented packet */
1234
1235 switch (ip->ip_p) {
1236 case IPPROTO_TCP:
1237 if (pktlen < (hlen + sizeof(struct tcphdr)))
1238 return;
1239 break;
1240 case IPPROTO_UDP:
1241 if (pktlen < (hlen + sizeof(struct udphdr)))
1242 return;
1243 uh = (struct udphdr *)((caddr_t)ip + hlen);
1244 if (uh->uh_sum == 0)
1245 return; /* no checksum */
1246 break;
1247 default:
1248 return;
1249 }
1250
1251 cksum = ~(flags & HME_XD_RXCKSUM);
1252 /* checksum fixup for IP options */
1253 len = hlen - sizeof(struct ip);
1254 if (len > 0) {
1255 opts = (u_int16_t *)(ip + 1);
1256 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1257 temp32 = cksum - *opts;
1258 temp32 = (temp32 >> 16) + (temp32 & 65535);
1259 cksum = temp32 & 65535;
1260 }
1261 }
1262 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1263 m->m_pkthdr.csum_data = cksum;
1264 }
1265
1266 /*
1267 * Receive interrupt.
1268 */
1269 static void
1270 hme_rint(struct hme_softc *sc)
1271 {
1272 caddr_t xdr = sc->sc_rb.rb_rxd;
1273 struct ifnet *ifp = sc->sc_ifp;
1274 unsigned int ri, len;
1275 int progress = 0;
1276 u_int32_t flags;
1277
1278 /*
1279 * Process all buffers with valid data.
1280 */
1281 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1282 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1283 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1284 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1285 if ((flags & HME_XD_OWN) != 0)
1286 break;
1287
1288 progress++;
1289 if ((flags & HME_XD_OFL) != 0) {
1290 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1291 "flags=0x%x\n", ri, flags);
1292 ifp->if_ierrors++;
1293 hme_discard_rxbuf(sc, ri);
1294 } else {
1295 len = HME_XD_DECODE_RSIZE(flags);
1296 hme_read(sc, ri, len, flags);
1297 }
1298 }
1299 if (progress) {
1300 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1301 BUS_DMASYNC_PREWRITE);
1302 }
1303 sc->sc_rb.rb_rdtail = ri;
1304 }
1305
1306 static void
1307 hme_eint(struct hme_softc *sc, u_int status)
1308 {
1309
1310 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1311 device_printf(sc->sc_dev, "XXXlink status changed\n");
1312 return;
1313 }
1314
1315 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1316 }
1317
1318 void
1319 hme_intr(void *v)
1320 {
1321 struct hme_softc *sc = (struct hme_softc *)v;
1322 u_int32_t status;
1323
1324 HME_LOCK(sc);
1325 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1326 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1327
1328 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1329 hme_eint(sc, status);
1330
1331 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1332 hme_tint(sc);
1333
1334 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1335 hme_rint(sc);
1336 HME_UNLOCK(sc);
1337 }
1338
1339
1340 static void
1341 hme_watchdog(struct ifnet *ifp)
1342 {
1343 struct hme_softc *sc = ifp->if_softc;
1344 #ifdef HMEDEBUG
1345 u_int32_t status;
1346 #endif
1347
1348 HME_LOCK(sc);
1349 #ifdef HMEDEBUG
1350 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1351 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1352 #endif
1353 device_printf(sc->sc_dev, "device timeout\n");
1354 ++ifp->if_oerrors;
1355
1356 hme_init_locked(sc);
1357 HME_UNLOCK(sc);
1358 }
1359
1360 /*
1361 * Initialize the MII Management Interface
1362 */
1363 static void
1364 hme_mifinit(struct hme_softc *sc)
1365 {
1366 u_int32_t v;
1367
1368 /* Configure the MIF in frame mode */
1369 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1370 v &= ~HME_MIF_CFG_BBMODE;
1371 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1372 }
1373
1374 /*
1375 * MII interface
1376 */
1377 int
1378 hme_mii_readreg(device_t dev, int phy, int reg)
1379 {
1380 struct hme_softc *sc = device_get_softc(dev);
1381 int n;
1382 u_int32_t v;
1383
1384 /* Select the desired PHY in the MIF configuration register */
1385 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1386 /* Clear PHY select bit */
1387 v &= ~HME_MIF_CFG_PHY;
1388 if (phy == HME_PHYAD_EXTERNAL)
1389 /* Set PHY select bit to get at external device */
1390 v |= HME_MIF_CFG_PHY;
1391 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1392
1393 /* Construct the frame command */
1394 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1395 HME_MIF_FO_TAMSB |
1396 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1397 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1398 (reg << HME_MIF_FO_REGAD_SHIFT);
1399
1400 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1401 for (n = 0; n < 100; n++) {
1402 DELAY(1);
1403 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1404 if (v & HME_MIF_FO_TALSB) {
1405 return (v & HME_MIF_FO_DATA);
1406 }
1407 }
1408
1409 device_printf(sc->sc_dev, "mii_read timeout\n");
1410 return (0);
1411 }
1412
1413 int
1414 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1415 {
1416 struct hme_softc *sc = device_get_softc(dev);
1417 int n;
1418 u_int32_t v;
1419
1420 /* Select the desired PHY in the MIF configuration register */
1421 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1422 /* Clear PHY select bit */
1423 v &= ~HME_MIF_CFG_PHY;
1424 if (phy == HME_PHYAD_EXTERNAL)
1425 /* Set PHY select bit to get at external device */
1426 v |= HME_MIF_CFG_PHY;
1427 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1428
1429 /* Construct the frame command */
1430 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1431 HME_MIF_FO_TAMSB |
1432 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1433 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1434 (reg << HME_MIF_FO_REGAD_SHIFT) |
1435 (val & HME_MIF_FO_DATA);
1436
1437 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1438 for (n = 0; n < 100; n++) {
1439 DELAY(1);
1440 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1441 if (v & HME_MIF_FO_TALSB)
1442 return (1);
1443 }
1444
1445 device_printf(sc->sc_dev, "mii_write timeout\n");
1446 return (0);
1447 }
1448
1449 void
1450 hme_mii_statchg(device_t dev)
1451 {
1452 struct hme_softc *sc = device_get_softc(dev);
1453 int instance;
1454 int phy;
1455 u_int32_t v;
1456
1457 instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1458 phy = sc->sc_phys[instance];
1459 #ifdef HMEDEBUG
1460 if (sc->sc_debug)
1461 printf("hme_mii_statchg: status change: phy = %d\n", phy);
1462 #endif
1463
1464 /* Select the current PHY in the MIF configuration register */
1465 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1466 v &= ~HME_MIF_CFG_PHY;
1467 if (phy == HME_PHYAD_EXTERNAL)
1468 v |= HME_MIF_CFG_PHY;
1469 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1470
1471 /* Set the MAC Full Duplex bit appropriately */
1472 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1473 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1474 return;
1475 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1476 v |= HME_MAC_TXCFG_FULLDPLX;
1477 else
1478 v &= ~HME_MAC_TXCFG_FULLDPLX;
1479 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1480 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1481 return;
1482 }
1483
1484 static int
1485 hme_mediachange(struct ifnet *ifp)
1486 {
1487 struct hme_softc *sc = ifp->if_softc;
1488 int error;
1489
1490 HME_LOCK(sc);
1491 error = mii_mediachg(sc->sc_mii);
1492 HME_UNLOCK(sc);
1493 return (error);
1494 }
1495
1496 static void
1497 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1498 {
1499 struct hme_softc *sc = ifp->if_softc;
1500
1501 HME_LOCK(sc);
1502 if ((ifp->if_flags & IFF_UP) == 0) {
1503 HME_UNLOCK(sc);
1504 return;
1505 }
1506
1507 mii_pollstat(sc->sc_mii);
1508 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1509 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1510 HME_UNLOCK(sc);
1511 }
1512
1513 /*
1514 * Process an ioctl request.
1515 */
1516 static int
1517 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1518 {
1519 struct hme_softc *sc = ifp->if_softc;
1520 struct ifreq *ifr = (struct ifreq *)data;
1521 int error = 0;
1522
1523 switch (cmd) {
1524 case SIOCSIFFLAGS:
1525 HME_LOCK(sc);
1526 if ((ifp->if_flags & IFF_UP) == 0 &&
1527 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1528 /*
1529 * If interface is marked down and it is running, then
1530 * stop it.
1531 */
1532 hme_stop(sc);
1533 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1534 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1535 /*
1536 * If interface is marked up and it is stopped, then
1537 * start it.
1538 */
1539 hme_init_locked(sc);
1540 } else if ((ifp->if_flags & IFF_UP) != 0) {
1541 /*
1542 * Reset the interface to pick up changes in any other
1543 * flags that affect hardware registers.
1544 */
1545 hme_init_locked(sc);
1546 }
1547 if ((ifp->if_flags & IFF_LINK0) != 0)
1548 sc->sc_csum_features |= CSUM_UDP;
1549 else
1550 sc->sc_csum_features &= ~CSUM_UDP;
1551 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1552 ifp->if_hwassist = sc->sc_csum_features;
1553 #ifdef HMEDEBUG
1554 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1555 #endif
1556 HME_UNLOCK(sc);
1557 break;
1558
1559 case SIOCADDMULTI:
1560 case SIOCDELMULTI:
1561 HME_LOCK(sc);
1562 hme_setladrf(sc, 1);
1563 HME_UNLOCK(sc);
1564 error = 0;
1565 break;
1566 case SIOCGIFMEDIA:
1567 case SIOCSIFMEDIA:
1568 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1569 break;
1570 case SIOCSIFCAP:
1571 HME_LOCK(sc);
1572 ifp->if_capenable = ifr->ifr_reqcap;
1573 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1574 ifp->if_hwassist = sc->sc_csum_features;
1575 else
1576 ifp->if_hwassist = 0;
1577 HME_UNLOCK(sc);
1578 break;
1579 default:
1580 error = ether_ioctl(ifp, cmd, data);
1581 break;
1582 }
1583
1584 return (error);
1585 }
1586
1587 /*
1588 * Set up the logical address filter.
1589 */
1590 static void
1591 hme_setladrf(struct hme_softc *sc, int reenable)
1592 {
1593 struct ifnet *ifp = sc->sc_ifp;
1594 struct ifmultiaddr *inm;
1595 u_int32_t crc;
1596 u_int32_t hash[4];
1597 u_int32_t macc;
1598
1599 HME_LOCK_ASSERT(sc, MA_OWNED);
1600 /* Clear hash table */
1601 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1602
1603 /* Get current RX configuration */
1604 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1605
1606 /*
1607 * Disable the receiver while changing it's state as the documentation
1608 * mandates.
1609 * We then must wait until the bit clears in the register. This should
1610 * take at most 3.5ms.
1611 */
1612 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1613 return;
1614 /* Disable the hash filter before writing to the filter registers. */
1615 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1616 HME_MAC_RXCFG_HENABLE, 0))
1617 return;
1618
1619 /* make RXMAC really SIMPLEX */
1620 macc |= HME_MAC_RXCFG_ME;
1621 if (reenable)
1622 macc |= HME_MAC_RXCFG_ENABLE;
1623 else
1624 macc &= ~HME_MAC_RXCFG_ENABLE;
1625
1626 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1627 /* Turn on promiscuous mode; turn off the hash filter */
1628 macc |= HME_MAC_RXCFG_PMISC;
1629 macc &= ~HME_MAC_RXCFG_HENABLE;
1630 ifp->if_flags |= IFF_ALLMULTI;
1631 goto chipit;
1632 }
1633
1634 /* Turn off promiscuous mode; turn on the hash filter */
1635 macc &= ~HME_MAC_RXCFG_PMISC;
1636 macc |= HME_MAC_RXCFG_HENABLE;
1637
1638 /*
1639 * Set up multicast address filter by passing all multicast addresses
1640 * through a crc generator, and then using the high order 6 bits as an
1641 * index into the 64 bit logical address filter. The high order bit
1642 * selects the word, while the rest of the bits select the bit within
1643 * the word.
1644 */
1645
1646 IF_ADDR_LOCK(sc->sc_ifp);
1647 TAILQ_FOREACH(inm, &sc->sc_ifp->if_multiaddrs, ifma_link) {
1648 if (inm->ifma_addr->sa_family != AF_LINK)
1649 continue;
1650 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1651 inm->ifma_addr), ETHER_ADDR_LEN);
1652
1653 /* Just want the 6 most significant bits. */
1654 crc >>= 26;
1655
1656 /* Set the corresponding bit in the filter. */
1657 hash[crc >> 4] |= 1 << (crc & 0xf);
1658 }
1659 IF_ADDR_UNLOCK(sc->sc_ifp);
1660
1661 ifp->if_flags &= ~IFF_ALLMULTI;
1662
1663 chipit:
1664 /* Now load the hash table into the chip */
1665 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1666 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1667 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1668 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1669 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1670 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1671 HME_MAC_RXCFG_ME));
1672 }
Cache object: 9938a726d7986b17efc44667e2a1d574
|