FreeBSD/Linux Kernel Cross Reference
sys/dev/sf/if_sf.c
1 /*-
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38 * Programming manual is available from:
39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
40 *
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Department of Electical Engineering
43 * Columbia University, New York City
44 */
45 /*
46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
47 * controller designed with flexibility and reducing CPU load in mind.
48 * The Starfire offers high and low priority buffer queues, a
49 * producer/consumer index mechanism and several different buffer
50 * queue and completion queue descriptor types. Any one of a number
51 * of different driver designs can be used, depending on system and
52 * OS requirements. This driver makes use of type2 transmit frame
53 * descriptors to take full advantage of fragmented packets buffers
54 * and two RX buffer queues prioritized on size (one queue for small
55 * frames that will fit into a single mbuf, another with full size
56 * mbuf clusters for everything else). The producer/consumer indexes
57 * and completion queues are also used.
58 *
59 * One downside to the Starfire has to do with alignment: buffer
60 * queues must be aligned on 256-byte boundaries, and receive buffers
61 * must be aligned on longword boundaries. The receive buffer alignment
62 * causes problems on the strict alignment architecture, where the
63 * packet payload should be longword aligned. There is no simple way
64 * around this.
65 *
66 * For receive filtering, the Starfire offers 16 perfect filter slots
67 * and a 512-bit hash table.
68 *
69 * The Starfire has no internal transceiver, relying instead on an
70 * external MII-based transceiver. Accessing registers on external
71 * PHYs is done through a special register map rather than with the
72 * usual bitbang MDIO method.
73 *
74 * Acesssing the registers on the Starfire is a little tricky. The
75 * Starfire has a 512K internal register space. When programmed for
76 * PCI memory mapped mode, the entire register space can be accessed
77 * directly. However in I/O space mode, only 256 bytes are directly
78 * mapped into PCI I/O space. The other registers can be accessed
79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
80 * registers inside the 256-byte I/O window.
81 */
82
83 #ifdef HAVE_KERNEL_OPTION_HEADERS
84 #include "opt_device_polling.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/bus.h>
90 #include <sys/endian.h>
91 #include <sys/kernel.h>
92 #include <sys/malloc.h>
93 #include <sys/mbuf.h>
94 #include <sys/rman.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/sysctl.h>
99
100 #include <net/bpf.h>
101 #include <net/if.h>
102 #include <net/if_arp.h>
103 #include <net/ethernet.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_types.h>
107 #include <net/if_vlan_var.h>
108
109 #include <dev/mii/mii.h>
110 #include <dev/mii/miivar.h>
111
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
114
115 #include <machine/bus.h>
116
117 #include <dev/sf/if_sfreg.h>
118 #include <dev/sf/starfire_rx.h>
119 #include <dev/sf/starfire_tx.h>
120
121 /* "device miibus" required. See GENERIC if you get errors here. */
122 #include "miibus_if.h"
123
124 MODULE_DEPEND(sf, pci, 1, 1, 1);
125 MODULE_DEPEND(sf, ether, 1, 1, 1);
126 MODULE_DEPEND(sf, miibus, 1, 1, 1);
127
128 #undef SF_GFP_DEBUG
129 #define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
130 /* Define this to activate partial TCP/UDP checksum offload. */
131 #undef SF_PARTIAL_CSUM_SUPPORT
132
133 static struct sf_type sf_devs[] = {
134 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
135 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
136 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
137 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
138 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
139 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
140 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
141 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
142 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
143 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
144 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
145 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
146 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
147 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
148 };
149
150 static int sf_probe(device_t);
151 static int sf_attach(device_t);
152 static int sf_detach(device_t);
153 static int sf_shutdown(device_t);
154 static int sf_suspend(device_t);
155 static int sf_resume(device_t);
156 static void sf_intr(void *);
157 static void sf_tick(void *);
158 static void sf_stats_update(struct sf_softc *);
159 #ifndef __NO_STRICT_ALIGNMENT
160 static __inline void sf_fixup_rx(struct mbuf *);
161 #endif
162 static int sf_rxeof(struct sf_softc *);
163 static void sf_txeof(struct sf_softc *);
164 static int sf_encap(struct sf_softc *, struct mbuf **);
165 static void sf_start(struct ifnet *);
166 static void sf_start_locked(struct ifnet *);
167 static int sf_ioctl(struct ifnet *, u_long, caddr_t);
168 static void sf_download_fw(struct sf_softc *);
169 static void sf_init(void *);
170 static void sf_init_locked(struct sf_softc *);
171 static void sf_stop(struct sf_softc *);
172 static void sf_watchdog(struct sf_softc *);
173 static int sf_ifmedia_upd(struct ifnet *);
174 static int sf_ifmedia_upd_locked(struct ifnet *);
175 static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176 static void sf_reset(struct sf_softc *);
177 static int sf_dma_alloc(struct sf_softc *);
178 static void sf_dma_free(struct sf_softc *);
179 static int sf_init_rx_ring(struct sf_softc *);
180 static void sf_init_tx_ring(struct sf_softc *);
181 static int sf_newbuf(struct sf_softc *, int);
182 static void sf_rxfilter(struct sf_softc *);
183 static int sf_setperf(struct sf_softc *, int, uint8_t *);
184 static int sf_sethash(struct sf_softc *, caddr_t, int);
185 #ifdef notdef
186 static int sf_setvlan(struct sf_softc *, int, uint32_t);
187 #endif
188
189 static uint8_t sf_read_eeprom(struct sf_softc *, int);
190
191 static int sf_miibus_readreg(device_t, int, int);
192 static int sf_miibus_writereg(device_t, int, int, int);
193 static void sf_miibus_statchg(device_t);
194 #ifdef DEVICE_POLLING
195 static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
196 #endif
197
198 static uint32_t csr_read_4(struct sf_softc *, int);
199 static void csr_write_4(struct sf_softc *, int, uint32_t);
200 static void sf_txthresh_adjust(struct sf_softc *);
201 static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
202 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
203 static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
204
205 static device_method_t sf_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, sf_probe),
208 DEVMETHOD(device_attach, sf_attach),
209 DEVMETHOD(device_detach, sf_detach),
210 DEVMETHOD(device_shutdown, sf_shutdown),
211 DEVMETHOD(device_suspend, sf_suspend),
212 DEVMETHOD(device_resume, sf_resume),
213
214 /* MII interface */
215 DEVMETHOD(miibus_readreg, sf_miibus_readreg),
216 DEVMETHOD(miibus_writereg, sf_miibus_writereg),
217 DEVMETHOD(miibus_statchg, sf_miibus_statchg),
218
219 DEVMETHOD_END
220 };
221
222 static driver_t sf_driver = {
223 "sf",
224 sf_methods,
225 sizeof(struct sf_softc),
226 };
227
228 static devclass_t sf_devclass;
229
230 DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
231 DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
232
233 #define SF_SETBIT(sc, reg, x) \
234 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
235
236 #define SF_CLRBIT(sc, reg, x) \
237 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
238
239 static uint32_t
240 csr_read_4(struct sf_softc *sc, int reg)
241 {
242 uint32_t val;
243
244 if (sc->sf_restype == SYS_RES_MEMORY)
245 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
246 else {
247 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
248 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
249 }
250
251 return (val);
252 }
253
254 static uint8_t
255 sf_read_eeprom(struct sf_softc *sc, int reg)
256 {
257 uint8_t val;
258
259 val = (csr_read_4(sc, SF_EEADDR_BASE +
260 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
261
262 return (val);
263 }
264
265 static void
266 csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
267 {
268
269 if (sc->sf_restype == SYS_RES_MEMORY)
270 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
271 else {
272 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
273 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
274 }
275 }
276
277 /*
278 * Copy the address 'mac' into the perfect RX filter entry at
279 * offset 'idx.' The perfect filter only has 16 entries so do
280 * some sanity tests.
281 */
282 static int
283 sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
284 {
285
286 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
287 return (EINVAL);
288
289 if (mac == NULL)
290 return (EINVAL);
291
292 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
293 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
294 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
295 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
296 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
297 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
298
299 return (0);
300 }
301
302 /*
303 * Set the bit in the 512-bit hash table that corresponds to the
304 * specified mac address 'mac.' If 'prio' is nonzero, update the
305 * priority hash table instead of the filter hash table.
306 */
307 static int
308 sf_sethash(struct sf_softc *sc, caddr_t mac, int prio)
309 {
310 uint32_t h;
311
312 if (mac == NULL)
313 return (EINVAL);
314
315 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
316
317 if (prio) {
318 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
319 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
320 } else {
321 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
322 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
323 }
324
325 return (0);
326 }
327
328 #ifdef notdef
329 /*
330 * Set a VLAN tag in the receive filter.
331 */
332 static int
333 sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
334 {
335
336 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
337 return (EINVAL);
338
339 csr_write_4(sc, SF_RXFILT_HASH_BASE +
340 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
341
342 return (0);
343 }
344 #endif
345
346 static int
347 sf_miibus_readreg(device_t dev, int phy, int reg)
348 {
349 struct sf_softc *sc;
350 int i;
351 uint32_t val = 0;
352
353 sc = device_get_softc(dev);
354
355 for (i = 0; i < SF_TIMEOUT; i++) {
356 val = csr_read_4(sc, SF_PHY_REG(phy, reg));
357 if ((val & SF_MII_DATAVALID) != 0)
358 break;
359 }
360
361 if (i == SF_TIMEOUT)
362 return (0);
363
364 val &= SF_MII_DATAPORT;
365 if (val == 0xffff)
366 return (0);
367
368 return (val);
369 }
370
371 static int
372 sf_miibus_writereg(device_t dev, int phy, int reg, int val)
373 {
374 struct sf_softc *sc;
375 int i;
376 int busy;
377
378 sc = device_get_softc(dev);
379
380 csr_write_4(sc, SF_PHY_REG(phy, reg), val);
381
382 for (i = 0; i < SF_TIMEOUT; i++) {
383 busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
384 if ((busy & SF_MII_BUSY) == 0)
385 break;
386 }
387
388 return (0);
389 }
390
391 static void
392 sf_miibus_statchg(device_t dev)
393 {
394 struct sf_softc *sc;
395 struct mii_data *mii;
396 struct ifnet *ifp;
397 uint32_t val;
398
399 sc = device_get_softc(dev);
400 mii = device_get_softc(sc->sf_miibus);
401 ifp = sc->sf_ifp;
402 if (mii == NULL || ifp == NULL ||
403 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
404 return;
405
406 if (mii->mii_media_status & IFM_ACTIVE) {
407 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
408 sc->sf_link = 1;
409 } else
410 sc->sf_link = 0;
411
412 val = csr_read_4(sc, SF_MACCFG_1);
413 val &= ~SF_MACCFG1_FULLDUPLEX;
414 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
415 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
416 val |= SF_MACCFG1_FULLDUPLEX;
417 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
418 #ifdef notyet
419 /* Configure flow-control bits. */
420 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
421 IFM_ETH_RXPAUSE) != 0)
422 val |= SF_MACCFG1_RX_FLOWENB;
423 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
424 IFM_ETH_TXPAUSE) != 0)
425 val |= SF_MACCFG1_TX_FLOWENB;
426 #endif
427 } else
428 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
429
430 /* Make sure to reset MAC to take changes effect. */
431 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
432 DELAY(1000);
433 csr_write_4(sc, SF_MACCFG_1, val);
434
435 val = csr_read_4(sc, SF_TIMER_CTL);
436 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
437 val |= SF_TIMER_TIMES_TEN;
438 else
439 val &= ~SF_TIMER_TIMES_TEN;
440 csr_write_4(sc, SF_TIMER_CTL, val);
441 }
442
443 static void
444 sf_rxfilter(struct sf_softc *sc)
445 {
446 struct ifnet *ifp;
447 int i;
448 struct ifmultiaddr *ifma;
449 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
450 uint32_t rxfilt;
451
452 ifp = sc->sf_ifp;
453
454 /* First zot all the existing filters. */
455 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
456 sf_setperf(sc, i, dummy);
457 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
458 i += sizeof(uint32_t))
459 csr_write_4(sc, i, 0);
460
461 rxfilt = csr_read_4(sc, SF_RXFILT);
462 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
463 if ((ifp->if_flags & IFF_BROADCAST) != 0)
464 rxfilt |= SF_RXFILT_BROAD;
465 if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
466 (ifp->if_flags & IFF_PROMISC) != 0) {
467 if ((ifp->if_flags & IFF_PROMISC) != 0)
468 rxfilt |= SF_RXFILT_PROMISC;
469 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
470 rxfilt |= SF_RXFILT_ALLMULTI;
471 goto done;
472 }
473
474 /* Now program new ones. */
475 i = 1;
476 if_maddr_rlock(ifp);
477 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
478 ifma_link) {
479 if (ifma->ifma_addr->sa_family != AF_LINK)
480 continue;
481 /*
482 * Program the first 15 multicast groups
483 * into the perfect filter. For all others,
484 * use the hash table.
485 */
486 if (i < SF_RXFILT_PERFECT_CNT) {
487 sf_setperf(sc, i,
488 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
489 i++;
490 continue;
491 }
492
493 sf_sethash(sc,
494 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
495 }
496 if_maddr_runlock(ifp);
497
498 done:
499 csr_write_4(sc, SF_RXFILT, rxfilt);
500 }
501
502 /*
503 * Set media options.
504 */
505 static int
506 sf_ifmedia_upd(struct ifnet *ifp)
507 {
508 struct sf_softc *sc;
509 int error;
510
511 sc = ifp->if_softc;
512 SF_LOCK(sc);
513 error = sf_ifmedia_upd_locked(ifp);
514 SF_UNLOCK(sc);
515 return (error);
516 }
517
518 static int
519 sf_ifmedia_upd_locked(struct ifnet *ifp)
520 {
521 struct sf_softc *sc;
522 struct mii_data *mii;
523 struct mii_softc *miisc;
524
525 sc = ifp->if_softc;
526 mii = device_get_softc(sc->sf_miibus);
527 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
528 mii_phy_reset(miisc);
529 return (mii_mediachg(mii));
530 }
531
532 /*
533 * Report current media status.
534 */
535 static void
536 sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
537 {
538 struct sf_softc *sc;
539 struct mii_data *mii;
540
541 sc = ifp->if_softc;
542 SF_LOCK(sc);
543 if ((ifp->if_flags & IFF_UP) == 0) {
544 SF_UNLOCK(sc);
545 return;
546 }
547
548 mii = device_get_softc(sc->sf_miibus);
549 mii_pollstat(mii);
550 ifmr->ifm_active = mii->mii_media_active;
551 ifmr->ifm_status = mii->mii_media_status;
552 SF_UNLOCK(sc);
553 }
554
555 static int
556 sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
557 {
558 struct sf_softc *sc;
559 struct ifreq *ifr;
560 struct mii_data *mii;
561 int error, mask;
562
563 sc = ifp->if_softc;
564 ifr = (struct ifreq *)data;
565 error = 0;
566
567 switch (command) {
568 case SIOCSIFFLAGS:
569 SF_LOCK(sc);
570 if (ifp->if_flags & IFF_UP) {
571 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
572 if ((ifp->if_flags ^ sc->sf_if_flags) &
573 (IFF_PROMISC | IFF_ALLMULTI))
574 sf_rxfilter(sc);
575 } else {
576 if (sc->sf_detach == 0)
577 sf_init_locked(sc);
578 }
579 } else {
580 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
581 sf_stop(sc);
582 }
583 sc->sf_if_flags = ifp->if_flags;
584 SF_UNLOCK(sc);
585 break;
586 case SIOCADDMULTI:
587 case SIOCDELMULTI:
588 SF_LOCK(sc);
589 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
590 sf_rxfilter(sc);
591 SF_UNLOCK(sc);
592 break;
593 case SIOCGIFMEDIA:
594 case SIOCSIFMEDIA:
595 mii = device_get_softc(sc->sf_miibus);
596 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
597 break;
598 case SIOCSIFCAP:
599 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
600 #ifdef DEVICE_POLLING
601 if ((mask & IFCAP_POLLING) != 0) {
602 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
603 error = ether_poll_register(sf_poll, ifp);
604 if (error != 0)
605 break;
606 SF_LOCK(sc);
607 /* Disable interrupts. */
608 csr_write_4(sc, SF_IMR, 0);
609 ifp->if_capenable |= IFCAP_POLLING;
610 SF_UNLOCK(sc);
611 } else {
612 error = ether_poll_deregister(ifp);
613 /* Enable interrupts. */
614 SF_LOCK(sc);
615 csr_write_4(sc, SF_IMR, SF_INTRS);
616 ifp->if_capenable &= ~IFCAP_POLLING;
617 SF_UNLOCK(sc);
618 }
619 }
620 #endif /* DEVICE_POLLING */
621 if ((mask & IFCAP_TXCSUM) != 0) {
622 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
623 SF_LOCK(sc);
624 ifp->if_capenable ^= IFCAP_TXCSUM;
625 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
626 ifp->if_hwassist |= SF_CSUM_FEATURES;
627 SF_SETBIT(sc, SF_GEN_ETH_CTL,
628 SF_ETHCTL_TXGFP_ENB);
629 } else {
630 ifp->if_hwassist &= ~SF_CSUM_FEATURES;
631 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
632 SF_ETHCTL_TXGFP_ENB);
633 }
634 SF_UNLOCK(sc);
635 }
636 }
637 if ((mask & IFCAP_RXCSUM) != 0) {
638 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
639 SF_LOCK(sc);
640 ifp->if_capenable ^= IFCAP_RXCSUM;
641 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
642 SF_SETBIT(sc, SF_GEN_ETH_CTL,
643 SF_ETHCTL_RXGFP_ENB);
644 else
645 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
646 SF_ETHCTL_RXGFP_ENB);
647 SF_UNLOCK(sc);
648 }
649 }
650 break;
651 default:
652 error = ether_ioctl(ifp, command, data);
653 break;
654 }
655
656 return (error);
657 }
658
659 static void
660 sf_reset(struct sf_softc *sc)
661 {
662 int i;
663
664 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
665 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
666 DELAY(1000);
667 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
668
669 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
670
671 for (i = 0; i < SF_TIMEOUT; i++) {
672 DELAY(10);
673 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
674 break;
675 }
676
677 if (i == SF_TIMEOUT)
678 device_printf(sc->sf_dev, "reset never completed!\n");
679
680 /* Wait a little while for the chip to get its brains in order. */
681 DELAY(1000);
682 }
683
684 /*
685 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
686 * IDs against our list and return a device name if we find a match.
687 * We also check the subsystem ID so that we can identify exactly which
688 * NIC has been found, if possible.
689 */
690 static int
691 sf_probe(device_t dev)
692 {
693 struct sf_type *t;
694 uint16_t vid;
695 uint16_t did;
696 uint16_t sdid;
697 int i;
698
699 vid = pci_get_vendor(dev);
700 did = pci_get_device(dev);
701 sdid = pci_get_subdevice(dev);
702
703 t = sf_devs;
704 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) {
705 if (vid == t->sf_vid && did == t->sf_did) {
706 if (sdid == t->sf_sdid) {
707 device_set_desc(dev, t->sf_sname);
708 return (BUS_PROBE_DEFAULT);
709 }
710 }
711 }
712
713 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
714 /* unkown subdevice */
715 device_set_desc(dev, sf_devs[0].sf_name);
716 return (BUS_PROBE_DEFAULT);
717 }
718
719 return (ENXIO);
720 }
721
722 /*
723 * Attach the interface. Allocate softc structures, do ifmedia
724 * setup and ethernet/BPF attach.
725 */
726 static int
727 sf_attach(device_t dev)
728 {
729 int i;
730 struct sf_softc *sc;
731 struct ifnet *ifp;
732 uint32_t reg;
733 int rid, error = 0;
734 uint8_t eaddr[ETHER_ADDR_LEN];
735
736 sc = device_get_softc(dev);
737 sc->sf_dev = dev;
738
739 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
740 MTX_DEF);
741 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
742
743 /*
744 * Map control/status registers.
745 */
746 pci_enable_busmaster(dev);
747
748 /*
749 * Prefer memory space register mapping over I/O space as the
750 * hardware requires lots of register access to get various
751 * producer/consumer index during Tx/Rx operation. However this
752 * requires large memory space(512K) to map the entire register
753 * space.
754 */
755 sc->sf_rid = PCIR_BAR(0);
756 sc->sf_restype = SYS_RES_MEMORY;
757 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
758 RF_ACTIVE);
759 if (sc->sf_res == NULL) {
760 reg = pci_read_config(dev, PCIR_BAR(0), 4);
761 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
762 sc->sf_rid = PCIR_BAR(2);
763 else
764 sc->sf_rid = PCIR_BAR(1);
765 sc->sf_restype = SYS_RES_IOPORT;
766 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
767 &sc->sf_rid, RF_ACTIVE);
768 if (sc->sf_res == NULL) {
769 device_printf(dev, "couldn't allocate resources\n");
770 mtx_destroy(&sc->sf_mtx);
771 return (ENXIO);
772 }
773 }
774 if (bootverbose)
775 device_printf(dev, "using %s space register mapping\n",
776 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
777
778 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
779 if (reg == 0) {
780 /*
781 * If cache line size is 0, MWI is not used at all, so set
782 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
783 * and 64.
784 */
785 reg = 16;
786 device_printf(dev, "setting PCI cache line size to %u\n", reg);
787 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
788 } else {
789 if (bootverbose)
790 device_printf(dev, "PCI cache line size : %u\n", reg);
791 }
792 /* Enable MWI. */
793 reg = pci_read_config(dev, PCIR_COMMAND, 2);
794 reg |= PCIM_CMD_MWRICEN;
795 pci_write_config(dev, PCIR_COMMAND, reg, 2);
796
797 /* Allocate interrupt. */
798 rid = 0;
799 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
800 RF_SHAREABLE | RF_ACTIVE);
801
802 if (sc->sf_irq == NULL) {
803 device_printf(dev, "couldn't map interrupt\n");
804 error = ENXIO;
805 goto fail;
806 }
807
808 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
809 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
810 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
811 sf_sysctl_stats, "I", "Statistics");
812
813 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
814 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
815 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
816 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
817 "sf interrupt moderation");
818 /* Pull in device tunables. */
819 sc->sf_int_mod = SF_IM_DEFAULT;
820 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
821 "int_mod", &sc->sf_int_mod);
822 if (error == 0) {
823 if (sc->sf_int_mod < SF_IM_MIN ||
824 sc->sf_int_mod > SF_IM_MAX) {
825 device_printf(dev, "int_mod value out of range; "
826 "using default: %d\n", SF_IM_DEFAULT);
827 sc->sf_int_mod = SF_IM_DEFAULT;
828 }
829 }
830
831 /* Reset the adapter. */
832 sf_reset(sc);
833
834 /*
835 * Get station address from the EEPROM.
836 */
837 for (i = 0; i < ETHER_ADDR_LEN; i++)
838 eaddr[i] =
839 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
840
841 /* Allocate DMA resources. */
842 if (sf_dma_alloc(sc) != 0) {
843 error = ENOSPC;
844 goto fail;
845 }
846
847 sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
848
849 ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
850 if (ifp == NULL) {
851 device_printf(dev, "can not allocate ifnet structure\n");
852 error = ENOSPC;
853 goto fail;
854 }
855
856 /* Do MII setup. */
857 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd,
858 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
859 if (error != 0) {
860 device_printf(dev, "attaching PHYs failed\n");
861 goto fail;
862 }
863
864 ifp->if_softc = sc;
865 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
866 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
867 ifp->if_ioctl = sf_ioctl;
868 ifp->if_start = sf_start;
869 ifp->if_init = sf_init;
870 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
871 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
872 IFQ_SET_READY(&ifp->if_snd);
873 /*
874 * With the help of firmware, AIC-6915 supports
875 * Tx/Rx TCP/UDP checksum offload.
876 */
877 ifp->if_hwassist = SF_CSUM_FEATURES;
878 ifp->if_capabilities = IFCAP_HWCSUM;
879
880 /*
881 * Call MI attach routine.
882 */
883 ether_ifattach(ifp, eaddr);
884
885 /* VLAN capability setup. */
886 ifp->if_capabilities |= IFCAP_VLAN_MTU;
887 ifp->if_capenable = ifp->if_capabilities;
888 #ifdef DEVICE_POLLING
889 ifp->if_capabilities |= IFCAP_POLLING;
890 #endif
891 /*
892 * Tell the upper layer(s) we support long frames.
893 * Must appear after the call to ether_ifattach() because
894 * ether_ifattach() sets ifi_hdrlen to the default value.
895 */
896 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
897
898 /* Hook interrupt last to avoid having to lock softc */
899 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
900 NULL, sf_intr, sc, &sc->sf_intrhand);
901
902 if (error) {
903 device_printf(dev, "couldn't set up irq\n");
904 ether_ifdetach(ifp);
905 goto fail;
906 }
907
908 fail:
909 if (error)
910 sf_detach(dev);
911
912 return (error);
913 }
914
915 /*
916 * Shutdown hardware and free up resources. This can be called any
917 * time after the mutex has been initialized. It is called in both
918 * the error case in attach and the normal detach case so it needs
919 * to be careful about only freeing resources that have actually been
920 * allocated.
921 */
922 static int
923 sf_detach(device_t dev)
924 {
925 struct sf_softc *sc;
926 struct ifnet *ifp;
927
928 sc = device_get_softc(dev);
929 ifp = sc->sf_ifp;
930
931 #ifdef DEVICE_POLLING
932 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
933 ether_poll_deregister(ifp);
934 #endif
935
936 /* These should only be active if attach succeeded */
937 if (device_is_attached(dev)) {
938 SF_LOCK(sc);
939 sc->sf_detach = 1;
940 sf_stop(sc);
941 SF_UNLOCK(sc);
942 callout_drain(&sc->sf_co);
943 if (ifp != NULL)
944 ether_ifdetach(ifp);
945 }
946 if (sc->sf_miibus) {
947 device_delete_child(dev, sc->sf_miibus);
948 sc->sf_miibus = NULL;
949 }
950 bus_generic_detach(dev);
951
952 if (sc->sf_intrhand != NULL)
953 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
954 if (sc->sf_irq != NULL)
955 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
956 if (sc->sf_res != NULL)
957 bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
958 sc->sf_res);
959
960 sf_dma_free(sc);
961 if (ifp != NULL)
962 if_free(ifp);
963
964 mtx_destroy(&sc->sf_mtx);
965
966 return (0);
967 }
968
969 struct sf_dmamap_arg {
970 bus_addr_t sf_busaddr;
971 };
972
973 static void
974 sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
975 {
976 struct sf_dmamap_arg *ctx;
977
978 if (error != 0)
979 return;
980 ctx = arg;
981 ctx->sf_busaddr = segs[0].ds_addr;
982 }
983
984 static int
985 sf_dma_alloc(struct sf_softc *sc)
986 {
987 struct sf_dmamap_arg ctx;
988 struct sf_txdesc *txd;
989 struct sf_rxdesc *rxd;
990 bus_addr_t lowaddr;
991 bus_addr_t rx_ring_end, rx_cring_end;
992 bus_addr_t tx_ring_end, tx_cring_end;
993 int error, i;
994
995 lowaddr = BUS_SPACE_MAXADDR;
996
997 again:
998 /* Create parent DMA tag. */
999 error = bus_dma_tag_create(
1000 bus_get_dma_tag(sc->sf_dev), /* parent */
1001 1, 0, /* alignment, boundary */
1002 lowaddr, /* lowaddr */
1003 BUS_SPACE_MAXADDR, /* highaddr */
1004 NULL, NULL, /* filter, filterarg */
1005 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1006 0, /* nsegments */
1007 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1008 0, /* flags */
1009 NULL, NULL, /* lockfunc, lockarg */
1010 &sc->sf_cdata.sf_parent_tag);
1011 if (error != 0) {
1012 device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1013 goto fail;
1014 }
1015 /* Create tag for Tx ring. */
1016 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1017 SF_RING_ALIGN, 0, /* alignment, boundary */
1018 BUS_SPACE_MAXADDR, /* lowaddr */
1019 BUS_SPACE_MAXADDR, /* highaddr */
1020 NULL, NULL, /* filter, filterarg */
1021 SF_TX_DLIST_SIZE, /* maxsize */
1022 1, /* nsegments */
1023 SF_TX_DLIST_SIZE, /* maxsegsize */
1024 0, /* flags */
1025 NULL, NULL, /* lockfunc, lockarg */
1026 &sc->sf_cdata.sf_tx_ring_tag);
1027 if (error != 0) {
1028 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1029 goto fail;
1030 }
1031
1032 /* Create tag for Tx completion ring. */
1033 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1034 SF_RING_ALIGN, 0, /* alignment, boundary */
1035 BUS_SPACE_MAXADDR, /* lowaddr */
1036 BUS_SPACE_MAXADDR, /* highaddr */
1037 NULL, NULL, /* filter, filterarg */
1038 SF_TX_CLIST_SIZE, /* maxsize */
1039 1, /* nsegments */
1040 SF_TX_CLIST_SIZE, /* maxsegsize */
1041 0, /* flags */
1042 NULL, NULL, /* lockfunc, lockarg */
1043 &sc->sf_cdata.sf_tx_cring_tag);
1044 if (error != 0) {
1045 device_printf(sc->sf_dev,
1046 "failed to create Tx completion ring DMA tag\n");
1047 goto fail;
1048 }
1049
1050 /* Create tag for Rx ring. */
1051 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1052 SF_RING_ALIGN, 0, /* alignment, boundary */
1053 BUS_SPACE_MAXADDR, /* lowaddr */
1054 BUS_SPACE_MAXADDR, /* highaddr */
1055 NULL, NULL, /* filter, filterarg */
1056 SF_RX_DLIST_SIZE, /* maxsize */
1057 1, /* nsegments */
1058 SF_RX_DLIST_SIZE, /* maxsegsize */
1059 0, /* flags */
1060 NULL, NULL, /* lockfunc, lockarg */
1061 &sc->sf_cdata.sf_rx_ring_tag);
1062 if (error != 0) {
1063 device_printf(sc->sf_dev,
1064 "failed to create Rx ring DMA tag\n");
1065 goto fail;
1066 }
1067
1068 /* Create tag for Rx completion ring. */
1069 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1070 SF_RING_ALIGN, 0, /* alignment, boundary */
1071 BUS_SPACE_MAXADDR, /* lowaddr */
1072 BUS_SPACE_MAXADDR, /* highaddr */
1073 NULL, NULL, /* filter, filterarg */
1074 SF_RX_CLIST_SIZE, /* maxsize */
1075 1, /* nsegments */
1076 SF_RX_CLIST_SIZE, /* maxsegsize */
1077 0, /* flags */
1078 NULL, NULL, /* lockfunc, lockarg */
1079 &sc->sf_cdata.sf_rx_cring_tag);
1080 if (error != 0) {
1081 device_printf(sc->sf_dev,
1082 "failed to create Rx completion ring DMA tag\n");
1083 goto fail;
1084 }
1085
1086 /* Create tag for Tx buffers. */
1087 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1088 1, 0, /* alignment, boundary */
1089 BUS_SPACE_MAXADDR, /* lowaddr */
1090 BUS_SPACE_MAXADDR, /* highaddr */
1091 NULL, NULL, /* filter, filterarg */
1092 MCLBYTES * SF_MAXTXSEGS, /* maxsize */
1093 SF_MAXTXSEGS, /* nsegments */
1094 MCLBYTES, /* maxsegsize */
1095 0, /* flags */
1096 NULL, NULL, /* lockfunc, lockarg */
1097 &sc->sf_cdata.sf_tx_tag);
1098 if (error != 0) {
1099 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1100 goto fail;
1101 }
1102
1103 /* Create tag for Rx buffers. */
1104 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1105 SF_RX_ALIGN, 0, /* alignment, boundary */
1106 BUS_SPACE_MAXADDR, /* lowaddr */
1107 BUS_SPACE_MAXADDR, /* highaddr */
1108 NULL, NULL, /* filter, filterarg */
1109 MCLBYTES, /* maxsize */
1110 1, /* nsegments */
1111 MCLBYTES, /* maxsegsize */
1112 0, /* flags */
1113 NULL, NULL, /* lockfunc, lockarg */
1114 &sc->sf_cdata.sf_rx_tag);
1115 if (error != 0) {
1116 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1117 goto fail;
1118 }
1119
1120 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1121 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1122 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1123 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1124 if (error != 0) {
1125 device_printf(sc->sf_dev,
1126 "failed to allocate DMA'able memory for Tx ring\n");
1127 goto fail;
1128 }
1129
1130 ctx.sf_busaddr = 0;
1131 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1132 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1133 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1134 if (error != 0 || ctx.sf_busaddr == 0) {
1135 device_printf(sc->sf_dev,
1136 "failed to load DMA'able memory for Tx ring\n");
1137 goto fail;
1138 }
1139 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1140
1141 /*
1142 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1143 */
1144 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1145 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1146 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1147 if (error != 0) {
1148 device_printf(sc->sf_dev,
1149 "failed to allocate DMA'able memory for "
1150 "Tx completion ring\n");
1151 goto fail;
1152 }
1153
1154 ctx.sf_busaddr = 0;
1155 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1156 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1157 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1158 if (error != 0 || ctx.sf_busaddr == 0) {
1159 device_printf(sc->sf_dev,
1160 "failed to load DMA'able memory for Tx completion ring\n");
1161 goto fail;
1162 }
1163 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1164
1165 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1166 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1167 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1168 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1169 if (error != 0) {
1170 device_printf(sc->sf_dev,
1171 "failed to allocate DMA'able memory for Rx ring\n");
1172 goto fail;
1173 }
1174
1175 ctx.sf_busaddr = 0;
1176 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1177 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1178 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1179 if (error != 0 || ctx.sf_busaddr == 0) {
1180 device_printf(sc->sf_dev,
1181 "failed to load DMA'able memory for Rx ring\n");
1182 goto fail;
1183 }
1184 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1185
1186 /*
1187 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1188 */
1189 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1190 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1191 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1192 if (error != 0) {
1193 device_printf(sc->sf_dev,
1194 "failed to allocate DMA'able memory for "
1195 "Rx completion ring\n");
1196 goto fail;
1197 }
1198
1199 ctx.sf_busaddr = 0;
1200 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1201 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1202 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1203 if (error != 0 || ctx.sf_busaddr == 0) {
1204 device_printf(sc->sf_dev,
1205 "failed to load DMA'able memory for Rx completion ring\n");
1206 goto fail;
1207 }
1208 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1209
1210 /*
1211 * Tx desciptor ring and Tx completion ring should be addressed in
1212 * the same 4GB space. The same rule applys to Rx ring and Rx
1213 * completion ring. Unfortunately there is no way to specify this
1214 * boundary restriction with bus_dma(9). So just try to allocate
1215 * without the restriction and check the restriction was satisfied.
1216 * If not, fall back to 32bit dma addressing mode which always
1217 * guarantees the restriction.
1218 */
1219 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1220 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1221 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1222 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1223 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1224 SF_ADDR_HI(tx_cring_end)) ||
1225 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1226 SF_ADDR_HI(tx_ring_end)) ||
1227 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1228 SF_ADDR_HI(rx_cring_end)) ||
1229 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1230 SF_ADDR_HI(rx_ring_end))) {
1231 device_printf(sc->sf_dev,
1232 "switching to 32bit DMA mode\n");
1233 sf_dma_free(sc);
1234 /* Limit DMA address space to 32bit and try again. */
1235 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1236 goto again;
1237 }
1238
1239 /* Create DMA maps for Tx buffers. */
1240 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1241 txd = &sc->sf_cdata.sf_txdesc[i];
1242 txd->tx_m = NULL;
1243 txd->ndesc = 0;
1244 txd->tx_dmamap = NULL;
1245 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1246 &txd->tx_dmamap);
1247 if (error != 0) {
1248 device_printf(sc->sf_dev,
1249 "failed to create Tx dmamap\n");
1250 goto fail;
1251 }
1252 }
1253 /* Create DMA maps for Rx buffers. */
1254 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1255 &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1256 device_printf(sc->sf_dev,
1257 "failed to create spare Rx dmamap\n");
1258 goto fail;
1259 }
1260 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1261 rxd = &sc->sf_cdata.sf_rxdesc[i];
1262 rxd->rx_m = NULL;
1263 rxd->rx_dmamap = NULL;
1264 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1265 &rxd->rx_dmamap);
1266 if (error != 0) {
1267 device_printf(sc->sf_dev,
1268 "failed to create Rx dmamap\n");
1269 goto fail;
1270 }
1271 }
1272
1273 fail:
1274 return (error);
1275 }
1276
1277 static void
1278 sf_dma_free(struct sf_softc *sc)
1279 {
1280 struct sf_txdesc *txd;
1281 struct sf_rxdesc *rxd;
1282 int i;
1283
1284 /* Tx ring. */
1285 if (sc->sf_cdata.sf_tx_ring_tag) {
1286 if (sc->sf_cdata.sf_tx_ring_map)
1287 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1288 sc->sf_cdata.sf_tx_ring_map);
1289 if (sc->sf_cdata.sf_tx_ring_map &&
1290 sc->sf_rdata.sf_tx_ring)
1291 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1292 sc->sf_rdata.sf_tx_ring,
1293 sc->sf_cdata.sf_tx_ring_map);
1294 sc->sf_rdata.sf_tx_ring = NULL;
1295 sc->sf_cdata.sf_tx_ring_map = NULL;
1296 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1297 sc->sf_cdata.sf_tx_ring_tag = NULL;
1298 }
1299 /* Tx completion ring. */
1300 if (sc->sf_cdata.sf_tx_cring_tag) {
1301 if (sc->sf_cdata.sf_tx_cring_map)
1302 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1303 sc->sf_cdata.sf_tx_cring_map);
1304 if (sc->sf_cdata.sf_tx_cring_map &&
1305 sc->sf_rdata.sf_tx_cring)
1306 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1307 sc->sf_rdata.sf_tx_cring,
1308 sc->sf_cdata.sf_tx_cring_map);
1309 sc->sf_rdata.sf_tx_cring = NULL;
1310 sc->sf_cdata.sf_tx_cring_map = NULL;
1311 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1312 sc->sf_cdata.sf_tx_cring_tag = NULL;
1313 }
1314 /* Rx ring. */
1315 if (sc->sf_cdata.sf_rx_ring_tag) {
1316 if (sc->sf_cdata.sf_rx_ring_map)
1317 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1318 sc->sf_cdata.sf_rx_ring_map);
1319 if (sc->sf_cdata.sf_rx_ring_map &&
1320 sc->sf_rdata.sf_rx_ring)
1321 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1322 sc->sf_rdata.sf_rx_ring,
1323 sc->sf_cdata.sf_rx_ring_map);
1324 sc->sf_rdata.sf_rx_ring = NULL;
1325 sc->sf_cdata.sf_rx_ring_map = NULL;
1326 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1327 sc->sf_cdata.sf_rx_ring_tag = NULL;
1328 }
1329 /* Rx completion ring. */
1330 if (sc->sf_cdata.sf_rx_cring_tag) {
1331 if (sc->sf_cdata.sf_rx_cring_map)
1332 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1333 sc->sf_cdata.sf_rx_cring_map);
1334 if (sc->sf_cdata.sf_rx_cring_map &&
1335 sc->sf_rdata.sf_rx_cring)
1336 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1337 sc->sf_rdata.sf_rx_cring,
1338 sc->sf_cdata.sf_rx_cring_map);
1339 sc->sf_rdata.sf_rx_cring = NULL;
1340 sc->sf_cdata.sf_rx_cring_map = NULL;
1341 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1342 sc->sf_cdata.sf_rx_cring_tag = NULL;
1343 }
1344 /* Tx buffers. */
1345 if (sc->sf_cdata.sf_tx_tag) {
1346 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1347 txd = &sc->sf_cdata.sf_txdesc[i];
1348 if (txd->tx_dmamap) {
1349 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1350 txd->tx_dmamap);
1351 txd->tx_dmamap = NULL;
1352 }
1353 }
1354 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1355 sc->sf_cdata.sf_tx_tag = NULL;
1356 }
1357 /* Rx buffers. */
1358 if (sc->sf_cdata.sf_rx_tag) {
1359 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1360 rxd = &sc->sf_cdata.sf_rxdesc[i];
1361 if (rxd->rx_dmamap) {
1362 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1363 rxd->rx_dmamap);
1364 rxd->rx_dmamap = NULL;
1365 }
1366 }
1367 if (sc->sf_cdata.sf_rx_sparemap) {
1368 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1369 sc->sf_cdata.sf_rx_sparemap);
1370 sc->sf_cdata.sf_rx_sparemap = 0;
1371 }
1372 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1373 sc->sf_cdata.sf_rx_tag = NULL;
1374 }
1375
1376 if (sc->sf_cdata.sf_parent_tag) {
1377 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1378 sc->sf_cdata.sf_parent_tag = NULL;
1379 }
1380 }
1381
1382 static int
1383 sf_init_rx_ring(struct sf_softc *sc)
1384 {
1385 struct sf_ring_data *rd;
1386 int i;
1387
1388 sc->sf_cdata.sf_rxc_cons = 0;
1389
1390 rd = &sc->sf_rdata;
1391 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1392 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1393
1394 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1395 if (sf_newbuf(sc, i) != 0)
1396 return (ENOBUFS);
1397 }
1398
1399 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1400 sc->sf_cdata.sf_rx_cring_map,
1401 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1402 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1403 sc->sf_cdata.sf_rx_ring_map,
1404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1405
1406 return (0);
1407 }
1408
1409 static void
1410 sf_init_tx_ring(struct sf_softc *sc)
1411 {
1412 struct sf_ring_data *rd;
1413 int i;
1414
1415 sc->sf_cdata.sf_tx_prod = 0;
1416 sc->sf_cdata.sf_tx_cnt = 0;
1417 sc->sf_cdata.sf_txc_cons = 0;
1418
1419 rd = &sc->sf_rdata;
1420 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1421 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1422 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1423 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1424 sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1425 sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1426 }
1427 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1428
1429 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1430 sc->sf_cdata.sf_tx_ring_map,
1431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1432 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1433 sc->sf_cdata.sf_tx_cring_map,
1434 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1435 }
1436
1437 /*
1438 * Initialize an RX descriptor and attach an MBUF cluster.
1439 */
1440 static int
1441 sf_newbuf(struct sf_softc *sc, int idx)
1442 {
1443 struct sf_rx_rdesc *desc;
1444 struct sf_rxdesc *rxd;
1445 struct mbuf *m;
1446 bus_dma_segment_t segs[1];
1447 bus_dmamap_t map;
1448 int nsegs;
1449
1450 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1451 if (m == NULL)
1452 return (ENOBUFS);
1453 m->m_len = m->m_pkthdr.len = MCLBYTES;
1454 m_adj(m, sizeof(uint32_t));
1455
1456 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1457 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1458 m_freem(m);
1459 return (ENOBUFS);
1460 }
1461 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1462
1463 rxd = &sc->sf_cdata.sf_rxdesc[idx];
1464 if (rxd->rx_m != NULL) {
1465 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1466 BUS_DMASYNC_POSTREAD);
1467 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1468 }
1469 map = rxd->rx_dmamap;
1470 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1471 sc->sf_cdata.sf_rx_sparemap = map;
1472 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1473 BUS_DMASYNC_PREREAD);
1474 rxd->rx_m = m;
1475 desc = &sc->sf_rdata.sf_rx_ring[idx];
1476 desc->sf_addr = htole64(segs[0].ds_addr);
1477
1478 return (0);
1479 }
1480
1481 #ifndef __NO_STRICT_ALIGNMENT
1482 static __inline void
1483 sf_fixup_rx(struct mbuf *m)
1484 {
1485 int i;
1486 uint16_t *src, *dst;
1487
1488 src = mtod(m, uint16_t *);
1489 dst = src - 1;
1490
1491 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1492 *dst++ = *src++;
1493
1494 m->m_data -= ETHER_ALIGN;
1495 }
1496 #endif
1497
1498 /*
1499 * The starfire is programmed to use 'normal' mode for packet reception,
1500 * which means we use the consumer/producer model for both the buffer
1501 * descriptor queue and the completion descriptor queue. The only problem
1502 * with this is that it involves a lot of register accesses: we have to
1503 * read the RX completion consumer and producer indexes and the RX buffer
1504 * producer index, plus the RX completion consumer and RX buffer producer
1505 * indexes have to be updated. It would have been easier if Adaptec had
1506 * put each index in a separate register, especially given that the damn
1507 * NIC has a 512K register space.
1508 *
1509 * In spite of all the lovely features that Adaptec crammed into the 6915,
1510 * it is marred by one truly stupid design flaw, which is that receive
1511 * buffer addresses must be aligned on a longword boundary. This forces
1512 * the packet payload to be unaligned, which is suboptimal on the x86 and
1513 * completely unuseable on the Alpha. Our only recourse is to copy received
1514 * packets into properly aligned buffers before handing them off.
1515 */
1516 static int
1517 sf_rxeof(struct sf_softc *sc)
1518 {
1519 struct mbuf *m;
1520 struct ifnet *ifp;
1521 struct sf_rxdesc *rxd;
1522 struct sf_rx_rcdesc *cur_cmp;
1523 int cons, eidx, prog, rx_npkts;
1524 uint32_t status, status2;
1525
1526 SF_LOCK_ASSERT(sc);
1527
1528 ifp = sc->sf_ifp;
1529 rx_npkts = 0;
1530
1531 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1532 sc->sf_cdata.sf_rx_ring_map,
1533 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1534 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1535 sc->sf_cdata.sf_rx_cring_map,
1536 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1537
1538 /*
1539 * To reduce register access, directly read Receive completion
1540 * queue entry.
1541 */
1542 eidx = 0;
1543 prog = 0;
1544 for (cons = sc->sf_cdata.sf_rxc_cons;
1545 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1546 SF_INC(cons, SF_RX_CLIST_CNT)) {
1547 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1548 status = le32toh(cur_cmp->sf_rx_status1);
1549 if (status == 0)
1550 break;
1551 #ifdef DEVICE_POLLING
1552 if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
1553 if (sc->rxcycles <= 0)
1554 break;
1555 sc->rxcycles--;
1556 }
1557 #endif
1558 prog++;
1559 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1560 rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1561 m = rxd->rx_m;
1562
1563 /*
1564 * Note, if_ipackets and if_ierrors counters
1565 * are handled in sf_stats_update().
1566 */
1567 if ((status & SF_RXSTAT1_OK) == 0) {
1568 cur_cmp->sf_rx_status1 = 0;
1569 continue;
1570 }
1571
1572 if (sf_newbuf(sc, eidx) != 0) {
1573 ifp->if_iqdrops++;
1574 cur_cmp->sf_rx_status1 = 0;
1575 continue;
1576 }
1577
1578 /* AIC-6915 supports TCP/UDP checksum offload. */
1579 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1580 status2 = le32toh(cur_cmp->sf_rx_status2);
1581 /*
1582 * Sometimes AIC-6915 generates an interrupt to
1583 * warn RxGFP stall with bad checksum bit set
1584 * in status word. I'm not sure what conditioan
1585 * triggers it but recevied packet's checksum
1586 * was correct even though AIC-6915 does not
1587 * agree on this. This may be an indication of
1588 * firmware bug. To fix the issue, do not rely
1589 * on bad checksum bit in status word and let
1590 * upper layer verify integrity of received
1591 * frame.
1592 * Another nice feature of AIC-6915 is hardware
1593 * assistance of checksum calculation by
1594 * providing partial checksum value for received
1595 * frame. The partial checksum value can be used
1596 * to accelerate checksum computation for
1597 * fragmented TCP/UDP packets. Upper network
1598 * stack already takes advantage of the partial
1599 * checksum value in IP reassembly stage. But
1600 * I'm not sure the correctness of the partial
1601 * hardware checksum assistance as frequent
1602 * RxGFP stalls are seen on non-fragmented
1603 * frames. Due to the nature of the complexity
1604 * of checksum computation code in firmware it's
1605 * possible to see another bug in RxGFP so
1606 * ignore checksum assistance for fragmented
1607 * frames. This can be changed in future.
1608 */
1609 if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1610 if ((status2 & (SF_RXSTAT2_TCP |
1611 SF_RXSTAT2_UDP)) != 0) {
1612 if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1613 m->m_pkthdr.csum_flags =
1614 CSUM_DATA_VALID |
1615 CSUM_PSEUDO_HDR;
1616 m->m_pkthdr.csum_data = 0xffff;
1617 }
1618 }
1619 }
1620 #ifdef SF_PARTIAL_CSUM_SUPPORT
1621 else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1622 if ((status2 & (SF_RXSTAT2_TCP |
1623 SF_RXSTAT2_UDP)) != 0) {
1624 if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1625 m->m_pkthdr.csum_flags =
1626 CSUM_DATA_VALID;
1627 m->m_pkthdr.csum_data =
1628 (status &
1629 SF_RX_CMPDESC_CSUM2);
1630 }
1631 }
1632 }
1633 #endif
1634 }
1635
1636 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1637 #ifndef __NO_STRICT_ALIGNMENT
1638 sf_fixup_rx(m);
1639 #endif
1640 m->m_pkthdr.rcvif = ifp;
1641
1642 SF_UNLOCK(sc);
1643 (*ifp->if_input)(ifp, m);
1644 SF_LOCK(sc);
1645 rx_npkts++;
1646
1647 /* Clear completion status. */
1648 cur_cmp->sf_rx_status1 = 0;
1649 }
1650
1651 if (prog > 0) {
1652 sc->sf_cdata.sf_rxc_cons = cons;
1653 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1654 sc->sf_cdata.sf_rx_ring_map,
1655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1656 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1657 sc->sf_cdata.sf_rx_cring_map,
1658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1659
1660 /* Update Rx completion Q1 consumer index. */
1661 csr_write_4(sc, SF_CQ_CONSIDX,
1662 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1663 (cons & SF_CQ_CONSIDX_RXQ1));
1664 /* Update Rx descriptor Q1 ptr. */
1665 csr_write_4(sc, SF_RXDQ_PTR_Q1,
1666 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1667 (eidx & SF_RXDQ_PRODIDX));
1668 }
1669 return (rx_npkts);
1670 }
1671
1672 /*
1673 * Read the transmit status from the completion queue and release
1674 * mbufs. Note that the buffer descriptor index in the completion
1675 * descriptor is an offset from the start of the transmit buffer
1676 * descriptor list in bytes. This is important because the manual
1677 * gives the impression that it should match the producer/consumer
1678 * index, which is the offset in 8 byte blocks.
1679 */
1680 static void
1681 sf_txeof(struct sf_softc *sc)
1682 {
1683 struct sf_txdesc *txd;
1684 struct sf_tx_rcdesc *cur_cmp;
1685 struct ifnet *ifp;
1686 uint32_t status;
1687 int cons, idx, prod;
1688
1689 SF_LOCK_ASSERT(sc);
1690
1691 ifp = sc->sf_ifp;
1692
1693 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1694 sc->sf_cdata.sf_tx_cring_map,
1695 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1696
1697 cons = sc->sf_cdata.sf_txc_cons;
1698 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1699 if (prod == cons)
1700 return;
1701
1702 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1703 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1704 status = le32toh(cur_cmp->sf_tx_status1);
1705 if (status == 0)
1706 break;
1707 switch (status & SF_TX_CMPDESC_TYPE) {
1708 case SF_TXCMPTYPE_TX:
1709 /* Tx complete entry. */
1710 break;
1711 case SF_TXCMPTYPE_DMA:
1712 /* DMA complete entry. */
1713 idx = status & SF_TX_CMPDESC_IDX;
1714 idx = idx / sizeof(struct sf_tx_rdesc);
1715 /*
1716 * We don't need to check Tx status here.
1717 * SF_ISR_TX_LOFIFO intr would handle this.
1718 * Note, if_opackets, if_collisions and if_oerrors
1719 * counters are handled in sf_stats_update().
1720 */
1721 txd = &sc->sf_cdata.sf_txdesc[idx];
1722 if (txd->tx_m != NULL) {
1723 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1724 txd->tx_dmamap,
1725 BUS_DMASYNC_POSTWRITE);
1726 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1727 txd->tx_dmamap);
1728 m_freem(txd->tx_m);
1729 txd->tx_m = NULL;
1730 }
1731 sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1732 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1733 ("%s: Active Tx desc counter was garbled\n",
1734 __func__));
1735 txd->ndesc = 0;
1736 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1737 break;
1738 default:
1739 /* It should not happen. */
1740 device_printf(sc->sf_dev,
1741 "unknown Tx completion type : 0x%08x : %d : %d\n",
1742 status, cons, prod);
1743 break;
1744 }
1745 cur_cmp->sf_tx_status1 = 0;
1746 }
1747
1748 sc->sf_cdata.sf_txc_cons = cons;
1749 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1750 sc->sf_cdata.sf_tx_cring_map,
1751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1752
1753 if (sc->sf_cdata.sf_tx_cnt == 0)
1754 sc->sf_watchdog_timer = 0;
1755
1756 /* Update Tx completion consumer index. */
1757 csr_write_4(sc, SF_CQ_CONSIDX,
1758 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1759 ((cons << 16) & 0xffff0000));
1760 }
1761
1762 static void
1763 sf_txthresh_adjust(struct sf_softc *sc)
1764 {
1765 uint32_t txfctl;
1766
1767 device_printf(sc->sf_dev, "Tx underrun -- ");
1768 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1769 txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1770 /* Increase Tx threshold 256 bytes. */
1771 sc->sf_txthresh += 16;
1772 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1773 sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1774 txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1775 txfctl |= sc->sf_txthresh;
1776 printf("increasing Tx threshold to %d bytes\n",
1777 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1778 csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1779 } else
1780 printf("\n");
1781 }
1782
1783 #ifdef DEVICE_POLLING
1784 static int
1785 sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1786 {
1787 struct sf_softc *sc;
1788 uint32_t status;
1789 int rx_npkts;
1790
1791 sc = ifp->if_softc;
1792 rx_npkts = 0;
1793 SF_LOCK(sc);
1794
1795 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1796 SF_UNLOCK(sc);
1797 return (rx_npkts);
1798 }
1799
1800 sc->rxcycles = count;
1801 rx_npkts = sf_rxeof(sc);
1802 sf_txeof(sc);
1803 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1804 sf_start_locked(ifp);
1805
1806 if (cmd == POLL_AND_CHECK_STATUS) {
1807 /* Reading the ISR register clears all interrrupts. */
1808 status = csr_read_4(sc, SF_ISR);
1809
1810 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1811 if ((status & SF_ISR_STATSOFLOW) != 0)
1812 sf_stats_update(sc);
1813 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1814 sf_txthresh_adjust(sc);
1815 else if ((status & SF_ISR_DMAERR) != 0) {
1816 device_printf(sc->sf_dev,
1817 "DMA error, resetting\n");
1818 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1819 sf_init_locked(sc);
1820 SF_UNLOCK(sc);
1821 return (rx_npkts);
1822 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1823 sc->sf_statistics.sf_tx_gfp_stall++;
1824 #ifdef SF_GFP_DEBUG
1825 device_printf(sc->sf_dev,
1826 "TxGFP is not responding!\n");
1827 #endif
1828 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1829 sc->sf_statistics.sf_rx_gfp_stall++;
1830 #ifdef SF_GFP_DEBUG
1831 device_printf(sc->sf_dev,
1832 "RxGFP is not responding!\n");
1833 #endif
1834 }
1835 }
1836 }
1837
1838 SF_UNLOCK(sc);
1839 return (rx_npkts);
1840 }
1841 #endif /* DEVICE_POLLING */
1842
1843 static void
1844 sf_intr(void *arg)
1845 {
1846 struct sf_softc *sc;
1847 struct ifnet *ifp;
1848 uint32_t status;
1849 int cnt;
1850
1851 sc = (struct sf_softc *)arg;
1852 SF_LOCK(sc);
1853
1854 if (sc->sf_suspended != 0)
1855 goto done_locked;
1856
1857 /* Reading the ISR register clears all interrrupts. */
1858 status = csr_read_4(sc, SF_ISR);
1859 if (status == 0 || status == 0xffffffff ||
1860 (status & SF_ISR_PCIINT_ASSERTED) == 0)
1861 goto done_locked;
1862
1863 ifp = sc->sf_ifp;
1864 #ifdef DEVICE_POLLING
1865 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1866 goto done_locked;
1867 #endif
1868
1869 /* Disable interrupts. */
1870 csr_write_4(sc, SF_IMR, 0x00000000);
1871
1872 for (cnt = 32; (status & SF_INTRS) != 0;) {
1873 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1874 break;
1875 if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1876 sf_rxeof(sc);
1877
1878 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1879 SF_ISR_TX_QUEUEDONE)) != 0)
1880 sf_txeof(sc);
1881
1882 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1883 if ((status & SF_ISR_STATSOFLOW) != 0)
1884 sf_stats_update(sc);
1885 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1886 sf_txthresh_adjust(sc);
1887 else if ((status & SF_ISR_DMAERR) != 0) {
1888 device_printf(sc->sf_dev,
1889 "DMA error, resetting\n");
1890 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1891 sf_init_locked(sc);
1892 SF_UNLOCK(sc);
1893 return;
1894 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1895 sc->sf_statistics.sf_tx_gfp_stall++;
1896 #ifdef SF_GFP_DEBUG
1897 device_printf(sc->sf_dev,
1898 "TxGFP is not responding!\n");
1899 #endif
1900 }
1901 else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1902 sc->sf_statistics.sf_rx_gfp_stall++;
1903 #ifdef SF_GFP_DEBUG
1904 device_printf(sc->sf_dev,
1905 "RxGFP is not responding!\n");
1906 #endif
1907 }
1908 }
1909 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1910 sf_start_locked(ifp);
1911 if (--cnt <= 0)
1912 break;
1913 /* Reading the ISR register clears all interrrupts. */
1914 status = csr_read_4(sc, SF_ISR);
1915 }
1916
1917 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1918 /* Re-enable interrupts. */
1919 csr_write_4(sc, SF_IMR, SF_INTRS);
1920 }
1921
1922 done_locked:
1923 SF_UNLOCK(sc);
1924 }
1925
1926 static void
1927 sf_download_fw(struct sf_softc *sc)
1928 {
1929 uint32_t gfpinst;
1930 int i, ndx;
1931 uint8_t *p;
1932
1933 /*
1934 * A FP instruction is composed of 48bits so we have to
1935 * write it with two parts.
1936 */
1937 p = txfwdata;
1938 ndx = 0;
1939 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1940 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1941 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1942 gfpinst = p[0] << 8 | p[1];
1943 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1944 p += SF_GFP_INST_BYTES;
1945 ndx += 2;
1946 }
1947 if (bootverbose)
1948 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1949
1950 p = rxfwdata;
1951 ndx = 0;
1952 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1953 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1954 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1955 gfpinst = p[0] << 8 | p[1];
1956 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1957 p += SF_GFP_INST_BYTES;
1958 ndx += 2;
1959 }
1960 if (bootverbose)
1961 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1962 }
1963
1964 static void
1965 sf_init(void *xsc)
1966 {
1967 struct sf_softc *sc;
1968
1969 sc = (struct sf_softc *)xsc;
1970 SF_LOCK(sc);
1971 sf_init_locked(sc);
1972 SF_UNLOCK(sc);
1973 }
1974
1975 static void
1976 sf_init_locked(struct sf_softc *sc)
1977 {
1978 struct ifnet *ifp;
1979 struct mii_data *mii;
1980 uint8_t eaddr[ETHER_ADDR_LEN];
1981 bus_addr_t addr;
1982 int i;
1983
1984 SF_LOCK_ASSERT(sc);
1985 ifp = sc->sf_ifp;
1986 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1987 return;
1988 mii = device_get_softc(sc->sf_miibus);
1989
1990 sf_stop(sc);
1991 /* Reset the hardware to a known state. */
1992 sf_reset(sc);
1993
1994 /* Init all the receive filter registers */
1995 for (i = SF_RXFILT_PERFECT_BASE;
1996 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
1997 csr_write_4(sc, i, 0);
1998
1999 /* Empty stats counter registers. */
2000 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2001 csr_write_4(sc, i, 0);
2002
2003 /* Init our MAC address. */
2004 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
2005 csr_write_4(sc, SF_PAR0,
2006 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2007 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2008 sf_setperf(sc, 0, eaddr);
2009
2010 if (sf_init_rx_ring(sc) == ENOBUFS) {
2011 device_printf(sc->sf_dev,
2012 "initialization failed: no memory for rx buffers\n");
2013 sf_stop(sc);
2014 return;
2015 }
2016
2017 sf_init_tx_ring(sc);
2018
2019 /*
2020 * 16 perfect address filtering.
2021 * Hash only multicast destination address, Accept matching
2022 * frames regardless of VLAN ID.
2023 */
2024 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
2025
2026 /*
2027 * Set Rx filter.
2028 */
2029 sf_rxfilter(sc);
2030
2031 /* Init the completion queue indexes. */
2032 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2033 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2034
2035 /* Init the RX completion queue. */
2036 addr = sc->sf_rdata.sf_rx_cring_paddr;
2037 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2038 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2039 if (SF_ADDR_HI(addr) != 0)
2040 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2041 /* Set RX completion queue type 2. */
2042 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2043 csr_write_4(sc, SF_RXCQ_CTL_2, 0);
2044
2045 /*
2046 * Init RX DMA control.
2047 * default RxHighPriority Threshold,
2048 * default RxBurstSize, 128bytes.
2049 */
2050 SF_SETBIT(sc, SF_RXDMA_CTL,
2051 SF_RXDMA_REPORTBADPKTS |
2052 (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2053 SF_RXDMA_BURST);
2054
2055 /* Init the RX buffer descriptor queue. */
2056 addr = sc->sf_rdata.sf_rx_ring_paddr;
2057 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2058 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2059
2060 /* Set RX queue buffer length. */
2061 csr_write_4(sc, SF_RXDQ_CTL_1,
2062 ((MCLBYTES - sizeof(uint32_t)) << 16) |
2063 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2064
2065 if (SF_ADDR_HI(addr) != 0)
2066 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
2067 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2068 csr_write_4(sc, SF_RXDQ_CTL_2, 0);
2069
2070 /* Init the TX completion queue */
2071 addr = sc->sf_rdata.sf_tx_cring_paddr;
2072 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2073 if (SF_ADDR_HI(addr) != 0)
2074 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
2075
2076 /* Init the TX buffer descriptor queue. */
2077 addr = sc->sf_rdata.sf_tx_ring_paddr;
2078 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2079 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2080 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2081 csr_write_4(sc, SF_TX_FRAMCTL,
2082 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
2083 csr_write_4(sc, SF_TXDQ_CTL,
2084 SF_TXDMA_HIPRIO_THRESH << 24 |
2085 SF_TXSKIPLEN_0BYTES << 16 |
2086 SF_TXDDMA_BURST << 8 |
2087 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2088 if (SF_ADDR_HI(addr) != 0)
2089 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
2090
2091 /* Set VLAN Type register. */
2092 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2093
2094 /* Set TxPause Timer. */
2095 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2096
2097 /* Enable autopadding of short TX frames. */
2098 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2099 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2100 /* Make sure to reset MAC to take changes effect. */
2101 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2102 DELAY(1000);
2103 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2104
2105 /* Enable PCI bus master. */
2106 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2107
2108 /* Load StarFire firmware. */
2109 sf_download_fw(sc);
2110
2111 /* Intialize interrupt moderation. */
2112 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2113 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2114
2115 #ifdef DEVICE_POLLING
2116 /* Disable interrupts if we are polling. */
2117 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2118 csr_write_4(sc, SF_IMR, 0x00000000);
2119 else
2120 #endif
2121 /* Enable interrupts. */
2122 csr_write_4(sc, SF_IMR, SF_INTRS);
2123 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2124
2125 /* Enable the RX and TX engines. */
2126 csr_write_4(sc, SF_GEN_ETH_CTL,
2127 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2128 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
2129
2130 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2131 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2132 else
2133 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2134 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2135 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2136 else
2137 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2138
2139 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2140 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2141
2142 sc->sf_link = 0;
2143 sf_ifmedia_upd_locked(ifp);
2144
2145 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2146 }
2147
2148 static int
2149 sf_encap(struct sf_softc *sc, struct mbuf **m_head)
2150 {
2151 struct sf_txdesc *txd;
2152 struct sf_tx_rdesc *desc;
2153 struct mbuf *m;
2154 bus_dmamap_t map;
2155 bus_dma_segment_t txsegs[SF_MAXTXSEGS];
2156 int error, i, nsegs, prod, si;
2157 int avail, nskip;
2158
2159 SF_LOCK_ASSERT(sc);
2160
2161 m = *m_head;
2162 prod = sc->sf_cdata.sf_tx_prod;
2163 txd = &sc->sf_cdata.sf_txdesc[prod];
2164 map = txd->tx_dmamap;
2165 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2166 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2167 if (error == EFBIG) {
2168 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS);
2169 if (m == NULL) {
2170 m_freem(*m_head);
2171 *m_head = NULL;
2172 return (ENOBUFS);
2173 }
2174 *m_head = m;
2175 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2176 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2177 if (error != 0) {
2178 m_freem(*m_head);
2179 *m_head = NULL;
2180 return (error);
2181 }
2182 } else if (error != 0)
2183 return (error);
2184 if (nsegs == 0) {
2185 m_freem(*m_head);
2186 *m_head = NULL;
2187 return (EIO);
2188 }
2189
2190 /* Check number of available descriptors. */
2191 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2192 if (avail < nsegs) {
2193 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2194 return (ENOBUFS);
2195 }
2196 nskip = 0;
2197 if (prod + nsegs >= SF_TX_DLIST_CNT) {
2198 nskip = SF_TX_DLIST_CNT - prod - 1;
2199 if (avail < nsegs + nskip) {
2200 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2201 return (ENOBUFS);
2202 }
2203 }
2204
2205 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2206
2207 si = prod;
2208 for (i = 0; i < nsegs; i++) {
2209 desc = &sc->sf_rdata.sf_tx_ring[prod];
2210 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2211 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2212 desc->sf_tx_reserved = 0;
2213 desc->sf_addr = htole64(txsegs[i].ds_addr);
2214 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2215 /* Queue wraps! */
2216 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2217 prod = 0;
2218 } else
2219 SF_INC(prod, SF_TX_DLIST_CNT);
2220 }
2221 /* Update producer index. */
2222 sc->sf_cdata.sf_tx_prod = prod;
2223 sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
2224
2225 desc = &sc->sf_rdata.sf_tx_ring[si];
2226 /* Check TDP/UDP checksum offload request. */
2227 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2228 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2229 desc->sf_tx_ctrl |=
2230 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
2231
2232 txd->tx_dmamap = map;
2233 txd->tx_m = m;
2234 txd->ndesc = nsegs + nskip;
2235
2236 return (0);
2237 }
2238
2239 static void
2240 sf_start(struct ifnet *ifp)
2241 {
2242 struct sf_softc *sc;
2243
2244 sc = ifp->if_softc;
2245 SF_LOCK(sc);
2246 sf_start_locked(ifp);
2247 SF_UNLOCK(sc);
2248 }
2249
2250 static void
2251 sf_start_locked(struct ifnet *ifp)
2252 {
2253 struct sf_softc *sc;
2254 struct mbuf *m_head;
2255 int enq;
2256
2257 sc = ifp->if_softc;
2258 SF_LOCK_ASSERT(sc);
2259
2260 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2261 IFF_DRV_RUNNING || sc->sf_link == 0)
2262 return;
2263
2264 /*
2265 * Since we don't know when descriptor wrap occurrs in advance
2266 * limit available number of active Tx descriptor counter to be
2267 * higher than maximum number of DMA segments allowed in driver.
2268 */
2269 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2270 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
2271 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2272 if (m_head == NULL)
2273 break;
2274 /*
2275 * Pack the data into the transmit ring. If we
2276 * don't have room, set the OACTIVE flag and wait
2277 * for the NIC to drain the ring.
2278 */
2279 if (sf_encap(sc, &m_head)) {
2280 if (m_head == NULL)
2281 break;
2282 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2283 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2284 break;
2285 }
2286
2287 enq++;
2288 /*
2289 * If there's a BPF listener, bounce a copy of this frame
2290 * to him.
2291 */
2292 ETHER_BPF_MTAP(ifp, m_head);
2293 }
2294
2295 if (enq > 0) {
2296 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2297 sc->sf_cdata.sf_tx_ring_map,
2298 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2299 /* Kick transmit. */
2300 csr_write_4(sc, SF_TXDQ_PRODIDX,
2301 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
2302
2303 /* Set a timeout in case the chip goes out to lunch. */
2304 sc->sf_watchdog_timer = 5;
2305 }
2306 }
2307
2308 static void
2309 sf_stop(struct sf_softc *sc)
2310 {
2311 struct sf_txdesc *txd;
2312 struct sf_rxdesc *rxd;
2313 struct ifnet *ifp;
2314 int i;
2315
2316 SF_LOCK_ASSERT(sc);
2317
2318 ifp = sc->sf_ifp;
2319
2320 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2321 sc->sf_link = 0;
2322 callout_stop(&sc->sf_co);
2323 sc->sf_watchdog_timer = 0;
2324
2325 /* Reading the ISR register clears all interrrupts. */
2326 csr_read_4(sc, SF_ISR);
2327 /* Disable further interrupts. */
2328 csr_write_4(sc, SF_IMR, 0);
2329
2330 /* Disable Tx/Rx egine. */
2331 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2332
2333 /* Give hardware chance to drain active DMA cycles. */
2334 DELAY(1000);
2335
2336 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2337 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2338 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2339 csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2340 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2341 csr_write_4(sc, SF_TXCQ_CTL, 0);
2342 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2343 csr_write_4(sc, SF_TXDQ_CTL, 0);
2344
2345 /*
2346 * Free RX and TX mbufs still in the queues.
2347 */
2348 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2349 rxd = &sc->sf_cdata.sf_rxdesc[i];
2350 if (rxd->rx_m != NULL) {
2351 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2352 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2353 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2354 rxd->rx_dmamap);
2355 m_freem(rxd->rx_m);
2356 rxd->rx_m = NULL;
2357 }
2358 }
2359 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2360 txd = &sc->sf_cdata.sf_txdesc[i];
2361 if (txd->tx_m != NULL) {
2362 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2363 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2364 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2365 txd->tx_dmamap);
2366 m_freem(txd->tx_m);
2367 txd->tx_m = NULL;
2368 txd->ndesc = 0;
2369 }
2370 }
2371 }
2372
2373 static void
2374 sf_tick(void *xsc)
2375 {
2376 struct sf_softc *sc;
2377 struct mii_data *mii;
2378
2379 sc = xsc;
2380 SF_LOCK_ASSERT(sc);
2381 mii = device_get_softc(sc->sf_miibus);
2382 mii_tick(mii);
2383 sf_stats_update(sc);
2384 sf_watchdog(sc);
2385 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2386 }
2387
2388 /*
2389 * Note: it is important that this function not be interrupted. We
2390 * use a two-stage register access scheme: if we are interrupted in
2391 * between setting the indirect address register and reading from the
2392 * indirect data register, the contents of the address register could
2393 * be changed out from under us.
2394 */
2395 static void
2396 sf_stats_update(struct sf_softc *sc)
2397 {
2398 struct ifnet *ifp;
2399 struct sf_stats now, *stats, *nstats;
2400 int i;
2401
2402 SF_LOCK_ASSERT(sc);
2403
2404 ifp = sc->sf_ifp;
2405 stats = &now;
2406
2407 stats->sf_tx_frames =
2408 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2409 stats->sf_tx_single_colls =
2410 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2411 stats->sf_tx_multi_colls =
2412 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2413 stats->sf_tx_crcerrs =
2414 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2415 stats->sf_tx_bytes =
2416 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2417 stats->sf_tx_deferred =
2418 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2419 stats->sf_tx_late_colls =
2420 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2421 stats->sf_tx_pause_frames =
2422 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2423 stats->sf_tx_control_frames =
2424 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2425 stats->sf_tx_excess_colls =
2426 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2427 stats->sf_tx_excess_defer =
2428 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2429 stats->sf_tx_mcast_frames =
2430 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2431 stats->sf_tx_bcast_frames =
2432 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2433 stats->sf_tx_frames_lost =
2434 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2435 stats->sf_rx_frames =
2436 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2437 stats->sf_rx_crcerrs =
2438 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2439 stats->sf_rx_alignerrs =
2440 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2441 stats->sf_rx_bytes =
2442 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2443 stats->sf_rx_pause_frames =
2444 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2445 stats->sf_rx_control_frames =
2446 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2447 stats->sf_rx_unsup_control_frames =
2448 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2449 stats->sf_rx_giants =
2450 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2451 stats->sf_rx_runts =
2452 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2453 stats->sf_rx_jabbererrs =
2454 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2455 stats->sf_rx_fragments =
2456 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2457 stats->sf_rx_pkts_64 =
2458 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2459 stats->sf_rx_pkts_65_127 =
2460 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2461 stats->sf_rx_pkts_128_255 =
2462 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2463 stats->sf_rx_pkts_256_511 =
2464 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2465 stats->sf_rx_pkts_512_1023 =
2466 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2467 stats->sf_rx_pkts_1024_1518 =
2468 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2469 stats->sf_rx_frames_lost =
2470 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2471 /* Lower 16bits are valid. */
2472 stats->sf_tx_underruns =
2473 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
2474
2475 /* Empty stats counter registers. */
2476 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2477 csr_write_4(sc, i, 0);
2478
2479 ifp->if_opackets += (u_long)stats->sf_tx_frames;
2480
2481 ifp->if_collisions += (u_long)stats->sf_tx_single_colls +
2482 (u_long)stats->sf_tx_multi_colls;
2483
2484 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls +
2485 (u_long)stats->sf_tx_excess_defer +
2486 (u_long)stats->sf_tx_frames_lost;
2487
2488 ifp->if_ipackets += (u_long)stats->sf_rx_frames;
2489
2490 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs +
2491 (u_long)stats->sf_rx_alignerrs +
2492 (u_long)stats->sf_rx_giants +
2493 (u_long)stats->sf_rx_runts +
2494 (u_long)stats->sf_rx_jabbererrs +
2495 (u_long)stats->sf_rx_frames_lost;
2496
2497 nstats = &sc->sf_statistics;
2498
2499 nstats->sf_tx_frames += stats->sf_tx_frames;
2500 nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2501 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2502 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2503 nstats->sf_tx_bytes += stats->sf_tx_bytes;
2504 nstats->sf_tx_deferred += stats->sf_tx_deferred;
2505 nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2506 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2507 nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2508 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2509 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2510 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2511 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2512 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2513 nstats->sf_rx_frames += stats->sf_rx_frames;
2514 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2515 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2516 nstats->sf_rx_bytes += stats->sf_rx_bytes;
2517 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2518 nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2519 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2520 nstats->sf_rx_giants += stats->sf_rx_giants;
2521 nstats->sf_rx_runts += stats->sf_rx_runts;
2522 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2523 nstats->sf_rx_fragments += stats->sf_rx_fragments;
2524 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2525 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2526 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2527 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2528 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2529 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2530 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2531 nstats->sf_tx_underruns += stats->sf_tx_underruns;
2532 }
2533
2534 static void
2535 sf_watchdog(struct sf_softc *sc)
2536 {
2537 struct ifnet *ifp;
2538
2539 SF_LOCK_ASSERT(sc);
2540
2541 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2542 return;
2543
2544 ifp = sc->sf_ifp;
2545
2546 ifp->if_oerrors++;
2547 if (sc->sf_link == 0) {
2548 if (bootverbose)
2549 if_printf(sc->sf_ifp, "watchdog timeout "
2550 "(missed link)\n");
2551 } else
2552 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2553 sc->sf_cdata.sf_tx_cnt);
2554
2555 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2556 sf_init_locked(sc);
2557
2558 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2559 sf_start_locked(ifp);
2560 }
2561
2562 static int
2563 sf_shutdown(device_t dev)
2564 {
2565 struct sf_softc *sc;
2566
2567 sc = device_get_softc(dev);
2568
2569 SF_LOCK(sc);
2570 sf_stop(sc);
2571 SF_UNLOCK(sc);
2572
2573 return (0);
2574 }
2575
2576 static int
2577 sf_suspend(device_t dev)
2578 {
2579 struct sf_softc *sc;
2580
2581 sc = device_get_softc(dev);
2582
2583 SF_LOCK(sc);
2584 sf_stop(sc);
2585 sc->sf_suspended = 1;
2586 bus_generic_suspend(dev);
2587 SF_UNLOCK(sc);
2588
2589 return (0);
2590 }
2591
2592 static int
2593 sf_resume(device_t dev)
2594 {
2595 struct sf_softc *sc;
2596 struct ifnet *ifp;
2597
2598 sc = device_get_softc(dev);
2599
2600 SF_LOCK(sc);
2601 bus_generic_resume(dev);
2602 ifp = sc->sf_ifp;
2603 if ((ifp->if_flags & IFF_UP) != 0)
2604 sf_init_locked(sc);
2605
2606 sc->sf_suspended = 0;
2607 SF_UNLOCK(sc);
2608
2609 return (0);
2610 }
2611
2612 static int
2613 sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2614 {
2615 struct sf_softc *sc;
2616 struct sf_stats *stats;
2617 int error;
2618 int result;
2619
2620 result = -1;
2621 error = sysctl_handle_int(oidp, &result, 0, req);
2622
2623 if (error != 0 || req->newptr == NULL)
2624 return (error);
2625
2626 if (result != 1)
2627 return (error);
2628
2629 sc = (struct sf_softc *)arg1;
2630 stats = &sc->sf_statistics;
2631
2632 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2633 printf("Transmit good frames : %ju\n",
2634 (uintmax_t)stats->sf_tx_frames);
2635 printf("Transmit good octets : %ju\n",
2636 (uintmax_t)stats->sf_tx_bytes);
2637 printf("Transmit single collisions : %u\n",
2638 stats->sf_tx_single_colls);
2639 printf("Transmit multiple collisions : %u\n",
2640 stats->sf_tx_multi_colls);
2641 printf("Transmit late collisions : %u\n",
2642 stats->sf_tx_late_colls);
2643 printf("Transmit abort due to excessive collisions : %u\n",
2644 stats->sf_tx_excess_colls);
2645 printf("Transmit CRC errors : %u\n",
2646 stats->sf_tx_crcerrs);
2647 printf("Transmit deferrals : %u\n",
2648 stats->sf_tx_deferred);
2649 printf("Transmit abort due to excessive deferrals : %u\n",
2650 stats->sf_tx_excess_defer);
2651 printf("Transmit pause control frames : %u\n",
2652 stats->sf_tx_pause_frames);
2653 printf("Transmit control frames : %u\n",
2654 stats->sf_tx_control_frames);
2655 printf("Transmit good multicast frames : %u\n",
2656 stats->sf_tx_mcast_frames);
2657 printf("Transmit good broadcast frames : %u\n",
2658 stats->sf_tx_bcast_frames);
2659 printf("Transmit frames lost due to internal transmit errors : %u\n",
2660 stats->sf_tx_frames_lost);
2661 printf("Transmit FIFO underflows : %u\n",
2662 stats->sf_tx_underruns);
2663 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2664 printf("Receive good frames : %ju\n",
2665 (uint64_t)stats->sf_rx_frames);
2666 printf("Receive good octets : %ju\n",
2667 (uint64_t)stats->sf_rx_bytes);
2668 printf("Receive CRC errors : %u\n",
2669 stats->sf_rx_crcerrs);
2670 printf("Receive alignment errors : %u\n",
2671 stats->sf_rx_alignerrs);
2672 printf("Receive pause frames : %u\n",
2673 stats->sf_rx_pause_frames);
2674 printf("Receive control frames : %u\n",
2675 stats->sf_rx_control_frames);
2676 printf("Receive control frames with unsupported opcode : %u\n",
2677 stats->sf_rx_unsup_control_frames);
2678 printf("Receive frames too long : %u\n",
2679 stats->sf_rx_giants);
2680 printf("Receive frames too short : %u\n",
2681 stats->sf_rx_runts);
2682 printf("Receive frames jabber errors : %u\n",
2683 stats->sf_rx_jabbererrs);
2684 printf("Receive frames fragments : %u\n",
2685 stats->sf_rx_fragments);
2686 printf("Receive packets 64 bytes : %ju\n",
2687 (uint64_t)stats->sf_rx_pkts_64);
2688 printf("Receive packets 65 to 127 bytes : %ju\n",
2689 (uint64_t)stats->sf_rx_pkts_65_127);
2690 printf("Receive packets 128 to 255 bytes : %ju\n",
2691 (uint64_t)stats->sf_rx_pkts_128_255);
2692 printf("Receive packets 256 to 511 bytes : %ju\n",
2693 (uint64_t)stats->sf_rx_pkts_256_511);
2694 printf("Receive packets 512 to 1023 bytes : %ju\n",
2695 (uint64_t)stats->sf_rx_pkts_512_1023);
2696 printf("Receive packets 1024 to 1518 bytes : %ju\n",
2697 (uint64_t)stats->sf_rx_pkts_1024_1518);
2698 printf("Receive frames lost due to internal receive errors : %u\n",
2699 stats->sf_rx_frames_lost);
2700 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2701
2702 return (error);
2703 }
2704
2705 static int
2706 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2707 {
2708 int error, value;
2709
2710 if (!arg1)
2711 return (EINVAL);
2712 value = *(int *)arg1;
2713 error = sysctl_handle_int(oidp, &value, 0, req);
2714 if (error || !req->newptr)
2715 return (error);
2716 if (value < low || value > high)
2717 return (EINVAL);
2718 *(int *)arg1 = value;
2719
2720 return (0);
2721 }
2722
2723 static int
2724 sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2725 {
2726
2727 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));
2728 }
Cache object: 80de30dab7f3dc6023fee689aad16153
|