FreeBSD/Linux Kernel Cross Reference
sys/dev/sf/if_sf.c
1 /*-
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38 * Programming manual is available from:
39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
40 *
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Department of Electical Engineering
43 * Columbia University, New York City
44 */
45 /*
46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
47 * controller designed with flexibility and reducing CPU load in mind.
48 * The Starfire offers high and low priority buffer queues, a
49 * producer/consumer index mechanism and several different buffer
50 * queue and completion queue descriptor types. Any one of a number
51 * of different driver designs can be used, depending on system and
52 * OS requirements. This driver makes use of type2 transmit frame
53 * descriptors to take full advantage of fragmented packets buffers
54 * and two RX buffer queues prioritized on size (one queue for small
55 * frames that will fit into a single mbuf, another with full size
56 * mbuf clusters for everything else). The producer/consumer indexes
57 * and completion queues are also used.
58 *
59 * One downside to the Starfire has to do with alignment: buffer
60 * queues must be aligned on 256-byte boundaries, and receive buffers
61 * must be aligned on longword boundaries. The receive buffer alignment
62 * causes problems on the strict alignment architecture, where the
63 * packet payload should be longword aligned. There is no simple way
64 * around this.
65 *
66 * For receive filtering, the Starfire offers 16 perfect filter slots
67 * and a 512-bit hash table.
68 *
69 * The Starfire has no internal transceiver, relying instead on an
70 * external MII-based transceiver. Accessing registers on external
71 * PHYs is done through a special register map rather than with the
72 * usual bitbang MDIO method.
73 *
74 * Acesssing the registers on the Starfire is a little tricky. The
75 * Starfire has a 512K internal register space. When programmed for
76 * PCI memory mapped mode, the entire register space can be accessed
77 * directly. However in I/O space mode, only 256 bytes are directly
78 * mapped into PCI I/O space. The other registers can be accessed
79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
80 * registers inside the 256-byte I/O window.
81 */
82
83 #ifdef HAVE_KERNEL_OPTION_HEADERS
84 #include "opt_device_polling.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/bus.h>
90 #include <sys/endian.h>
91 #include <sys/kernel.h>
92 #include <sys/malloc.h>
93 #include <sys/mbuf.h>
94 #include <sys/rman.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/sysctl.h>
99
100 #include <net/bpf.h>
101 #include <net/if.h>
102 #include <net/if_arp.h>
103 #include <net/ethernet.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_types.h>
107 #include <net/if_vlan_var.h>
108
109 #include <dev/mii/mii.h>
110 #include <dev/mii/miivar.h>
111
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
114
115 #include <machine/bus.h>
116
117 #include <dev/sf/if_sfreg.h>
118 #include <dev/sf/starfire_rx.h>
119 #include <dev/sf/starfire_tx.h>
120
121 /* "device miibus" required. See GENERIC if you get errors here. */
122 #include "miibus_if.h"
123
124 MODULE_DEPEND(sf, pci, 1, 1, 1);
125 MODULE_DEPEND(sf, ether, 1, 1, 1);
126 MODULE_DEPEND(sf, miibus, 1, 1, 1);
127
128 #undef SF_GFP_DEBUG
129 #define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
130 /* Define this to activate partial TCP/UDP checksum offload. */
131 #undef SF_PARTIAL_CSUM_SUPPORT
132
133 static struct sf_type sf_devs[] = {
134 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
135 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
136 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
137 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
138 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
139 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
140 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
141 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
142 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
143 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
144 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
145 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
146 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
147 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
148 };
149
150 static int sf_probe(device_t);
151 static int sf_attach(device_t);
152 static int sf_detach(device_t);
153 static int sf_shutdown(device_t);
154 static int sf_suspend(device_t);
155 static int sf_resume(device_t);
156 static void sf_intr(void *);
157 static void sf_tick(void *);
158 static void sf_stats_update(struct sf_softc *);
159 #ifndef __NO_STRICT_ALIGNMENT
160 static __inline void sf_fixup_rx(struct mbuf *);
161 #endif
162 static void sf_rxeof(struct sf_softc *);
163 static void sf_txeof(struct sf_softc *);
164 static int sf_encap(struct sf_softc *, struct mbuf **);
165 static void sf_start(struct ifnet *);
166 static void sf_start_locked(struct ifnet *);
167 static int sf_ioctl(struct ifnet *, u_long, caddr_t);
168 static void sf_download_fw(struct sf_softc *);
169 static void sf_init(void *);
170 static void sf_init_locked(struct sf_softc *);
171 static void sf_stop(struct sf_softc *);
172 static void sf_watchdog(struct sf_softc *);
173 static int sf_ifmedia_upd(struct ifnet *);
174 static int sf_ifmedia_upd_locked(struct ifnet *);
175 static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176 static void sf_reset(struct sf_softc *);
177 static int sf_dma_alloc(struct sf_softc *);
178 static void sf_dma_free(struct sf_softc *);
179 static int sf_init_rx_ring(struct sf_softc *);
180 static void sf_init_tx_ring(struct sf_softc *);
181 static int sf_newbuf(struct sf_softc *, int);
182 static void sf_rxfilter(struct sf_softc *);
183 static int sf_setperf(struct sf_softc *, int, uint8_t *);
184 static int sf_sethash(struct sf_softc *, caddr_t, int);
185 #ifdef notdef
186 static int sf_setvlan(struct sf_softc *, int, uint32_t);
187 #endif
188
189 static uint8_t sf_read_eeprom(struct sf_softc *, int);
190
191 static int sf_miibus_readreg(device_t, int, int);
192 static int sf_miibus_writereg(device_t, int, int, int);
193 static void sf_miibus_statchg(device_t);
194 #ifdef DEVICE_POLLING
195 static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
196 #endif
197
198 static uint32_t csr_read_4(struct sf_softc *, int);
199 static void csr_write_4(struct sf_softc *, int, uint32_t);
200 static void sf_txthresh_adjust(struct sf_softc *);
201 static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
202 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
203 static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
204
205 static device_method_t sf_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, sf_probe),
208 DEVMETHOD(device_attach, sf_attach),
209 DEVMETHOD(device_detach, sf_detach),
210 DEVMETHOD(device_shutdown, sf_shutdown),
211 DEVMETHOD(device_suspend, sf_suspend),
212 DEVMETHOD(device_resume, sf_resume),
213
214 /* bus interface */
215 DEVMETHOD(bus_print_child, bus_generic_print_child),
216 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
217
218 /* MII interface */
219 DEVMETHOD(miibus_readreg, sf_miibus_readreg),
220 DEVMETHOD(miibus_writereg, sf_miibus_writereg),
221 DEVMETHOD(miibus_statchg, sf_miibus_statchg),
222
223 { NULL, NULL }
224 };
225
226 static driver_t sf_driver = {
227 "sf",
228 sf_methods,
229 sizeof(struct sf_softc),
230 };
231
232 static devclass_t sf_devclass;
233
234 DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
235 DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
236
237 #define SF_SETBIT(sc, reg, x) \
238 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
239
240 #define SF_CLRBIT(sc, reg, x) \
241 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
242
243 static uint32_t
244 csr_read_4(struct sf_softc *sc, int reg)
245 {
246 uint32_t val;
247
248 if (sc->sf_restype == SYS_RES_MEMORY)
249 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
250 else {
251 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
252 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
253 }
254
255 return (val);
256 }
257
258 static uint8_t
259 sf_read_eeprom(struct sf_softc *sc, int reg)
260 {
261 uint8_t val;
262
263 val = (csr_read_4(sc, SF_EEADDR_BASE +
264 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
265
266 return (val);
267 }
268
269 static void
270 csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
271 {
272
273 if (sc->sf_restype == SYS_RES_MEMORY)
274 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
275 else {
276 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
277 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
278 }
279 }
280
281 /*
282 * Copy the address 'mac' into the perfect RX filter entry at
283 * offset 'idx.' The perfect filter only has 16 entries so do
284 * some sanity tests.
285 */
286 static int
287 sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
288 {
289
290 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
291 return (EINVAL);
292
293 if (mac == NULL)
294 return (EINVAL);
295
296 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
297 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
298 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
299 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
300 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
301 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
302
303 return (0);
304 }
305
306 /*
307 * Set the bit in the 512-bit hash table that corresponds to the
308 * specified mac address 'mac.' If 'prio' is nonzero, update the
309 * priority hash table instead of the filter hash table.
310 */
311 static int
312 sf_sethash(struct sf_softc *sc, caddr_t mac, int prio)
313 {
314 uint32_t h;
315
316 if (mac == NULL)
317 return (EINVAL);
318
319 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
320
321 if (prio) {
322 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
323 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
324 } else {
325 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
326 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
327 }
328
329 return (0);
330 }
331
332 #ifdef notdef
333 /*
334 * Set a VLAN tag in the receive filter.
335 */
336 static int
337 sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
338 {
339
340 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
341 return (EINVAL);
342
343 csr_write_4(sc, SF_RXFILT_HASH_BASE +
344 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
345
346 return (0);
347 }
348 #endif
349
350 static int
351 sf_miibus_readreg(device_t dev, int phy, int reg)
352 {
353 struct sf_softc *sc;
354 int i;
355 uint32_t val = 0;
356
357 sc = device_get_softc(dev);
358
359 for (i = 0; i < SF_TIMEOUT; i++) {
360 val = csr_read_4(sc, SF_PHY_REG(phy, reg));
361 if ((val & SF_MII_DATAVALID) != 0)
362 break;
363 }
364
365 if (i == SF_TIMEOUT)
366 return (0);
367
368 val &= SF_MII_DATAPORT;
369 if (val == 0xffff)
370 return (0);
371
372 return (val);
373 }
374
375 static int
376 sf_miibus_writereg(device_t dev, int phy, int reg, int val)
377 {
378 struct sf_softc *sc;
379 int i;
380 int busy;
381
382 sc = device_get_softc(dev);
383
384 csr_write_4(sc, SF_PHY_REG(phy, reg), val);
385
386 for (i = 0; i < SF_TIMEOUT; i++) {
387 busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
388 if ((busy & SF_MII_BUSY) == 0)
389 break;
390 }
391
392 return (0);
393 }
394
395 static void
396 sf_miibus_statchg(device_t dev)
397 {
398 struct sf_softc *sc;
399 struct mii_data *mii;
400 struct ifnet *ifp;
401 uint32_t val;
402
403 sc = device_get_softc(dev);
404 mii = device_get_softc(sc->sf_miibus);
405 ifp = sc->sf_ifp;
406 if (mii == NULL || ifp == NULL ||
407 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
408 return;
409
410 if (mii->mii_media_status & IFM_ACTIVE) {
411 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
412 sc->sf_link = 1;
413 } else
414 sc->sf_link = 0;
415
416 val = csr_read_4(sc, SF_MACCFG_1);
417 val &= ~SF_MACCFG1_FULLDUPLEX;
418 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
419 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
420 val |= SF_MACCFG1_FULLDUPLEX;
421 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
422 #ifdef notyet
423 /* Configure flow-control bits. */
424 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
425 IFM_ETH_RXPAUSE) != 0)
426 val |= SF_MACCFG1_RX_FLOWENB;
427 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
428 IFM_ETH_TXPAUSE) != 0)
429 val |= SF_MACCFG1_TX_FLOWENB;
430 #endif
431 } else
432 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
433
434 /* Make sure to reset MAC to take changes effect. */
435 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
436 DELAY(1000);
437 csr_write_4(sc, SF_MACCFG_1, val);
438
439 val = csr_read_4(sc, SF_TIMER_CTL);
440 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
441 val |= SF_TIMER_TIMES_TEN;
442 else
443 val &= ~SF_TIMER_TIMES_TEN;
444 csr_write_4(sc, SF_TIMER_CTL, val);
445 }
446
447 static void
448 sf_rxfilter(struct sf_softc *sc)
449 {
450 struct ifnet *ifp;
451 int i;
452 struct ifmultiaddr *ifma;
453 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
454 uint32_t rxfilt;
455
456 ifp = sc->sf_ifp;
457
458 /* First zot all the existing filters. */
459 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
460 sf_setperf(sc, i, dummy);
461 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
462 i += sizeof(uint32_t))
463 csr_write_4(sc, i, 0);
464
465 rxfilt = csr_read_4(sc, SF_RXFILT);
466 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
467 if ((ifp->if_flags & IFF_BROADCAST) != 0)
468 rxfilt |= SF_RXFILT_BROAD;
469 if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
470 (ifp->if_flags & IFF_PROMISC) != 0) {
471 if ((ifp->if_flags & IFF_PROMISC) != 0)
472 rxfilt |= SF_RXFILT_PROMISC;
473 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
474 rxfilt |= SF_RXFILT_ALLMULTI;
475 goto done;
476 }
477
478 /* Now program new ones. */
479 i = 1;
480 IF_ADDR_LOCK(ifp);
481 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
482 ifma_link) {
483 if (ifma->ifma_addr->sa_family != AF_LINK)
484 continue;
485 /*
486 * Program the first 15 multicast groups
487 * into the perfect filter. For all others,
488 * use the hash table.
489 */
490 if (i < SF_RXFILT_PERFECT_CNT) {
491 sf_setperf(sc, i,
492 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
493 i++;
494 continue;
495 }
496
497 sf_sethash(sc,
498 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
499 }
500 IF_ADDR_UNLOCK(ifp);
501
502 done:
503 csr_write_4(sc, SF_RXFILT, rxfilt);
504 }
505
506 /*
507 * Set media options.
508 */
509 static int
510 sf_ifmedia_upd(struct ifnet *ifp)
511 {
512 struct sf_softc *sc;
513 int error;
514
515 sc = ifp->if_softc;
516 SF_LOCK(sc);
517 error = sf_ifmedia_upd_locked(ifp);
518 SF_UNLOCK(sc);
519 return (error);
520 }
521
522 static int
523 sf_ifmedia_upd_locked(struct ifnet *ifp)
524 {
525 struct sf_softc *sc;
526 struct mii_data *mii;
527 struct mii_softc *miisc;
528
529 sc = ifp->if_softc;
530 mii = device_get_softc(sc->sf_miibus);
531 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
532 mii_phy_reset(miisc);
533 return (mii_mediachg(mii));
534 }
535
536 /*
537 * Report current media status.
538 */
539 static void
540 sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
541 {
542 struct sf_softc *sc;
543 struct mii_data *mii;
544
545 sc = ifp->if_softc;
546 SF_LOCK(sc);
547 if ((ifp->if_flags & IFF_UP) == 0) {
548 SF_UNLOCK(sc);
549 return;
550 }
551
552 mii = device_get_softc(sc->sf_miibus);
553 mii_pollstat(mii);
554 ifmr->ifm_active = mii->mii_media_active;
555 ifmr->ifm_status = mii->mii_media_status;
556 SF_UNLOCK(sc);
557 }
558
559 static int
560 sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
561 {
562 struct sf_softc *sc;
563 struct ifreq *ifr;
564 struct mii_data *mii;
565 int error, mask;
566
567 sc = ifp->if_softc;
568 ifr = (struct ifreq *)data;
569 error = 0;
570
571 switch (command) {
572 case SIOCSIFFLAGS:
573 SF_LOCK(sc);
574 if (ifp->if_flags & IFF_UP) {
575 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
576 if ((ifp->if_flags ^ sc->sf_if_flags) &
577 (IFF_PROMISC | IFF_ALLMULTI))
578 sf_rxfilter(sc);
579 } else {
580 if (sc->sf_detach == 0)
581 sf_init_locked(sc);
582 }
583 } else {
584 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
585 sf_stop(sc);
586 }
587 sc->sf_if_flags = ifp->if_flags;
588 SF_UNLOCK(sc);
589 break;
590 case SIOCADDMULTI:
591 case SIOCDELMULTI:
592 SF_LOCK(sc);
593 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
594 sf_rxfilter(sc);
595 SF_UNLOCK(sc);
596 break;
597 case SIOCGIFMEDIA:
598 case SIOCSIFMEDIA:
599 mii = device_get_softc(sc->sf_miibus);
600 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
601 break;
602 case SIOCSIFCAP:
603 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
604 #ifdef DEVICE_POLLING
605 if ((mask & IFCAP_POLLING) != 0) {
606 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
607 error = ether_poll_register(sf_poll, ifp);
608 if (error != 0)
609 break;
610 SF_LOCK(sc);
611 /* Disable interrupts. */
612 csr_write_4(sc, SF_IMR, 0);
613 ifp->if_capenable |= IFCAP_POLLING;
614 SF_UNLOCK(sc);
615 } else {
616 error = ether_poll_deregister(ifp);
617 /* Enable interrupts. */
618 SF_LOCK(sc);
619 csr_write_4(sc, SF_IMR, SF_INTRS);
620 ifp->if_capenable &= ~IFCAP_POLLING;
621 SF_UNLOCK(sc);
622 }
623 }
624 #endif /* DEVICE_POLLING */
625 if ((mask & IFCAP_TXCSUM) != 0) {
626 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
627 SF_LOCK(sc);
628 ifp->if_capenable ^= IFCAP_TXCSUM;
629 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
630 ifp->if_hwassist |= SF_CSUM_FEATURES;
631 SF_SETBIT(sc, SF_GEN_ETH_CTL,
632 SF_ETHCTL_TXGFP_ENB);
633 } else {
634 ifp->if_hwassist &= ~SF_CSUM_FEATURES;
635 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
636 SF_ETHCTL_TXGFP_ENB);
637 }
638 SF_UNLOCK(sc);
639 }
640 }
641 if ((mask & IFCAP_RXCSUM) != 0) {
642 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
643 SF_LOCK(sc);
644 ifp->if_capenable ^= IFCAP_RXCSUM;
645 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
646 SF_SETBIT(sc, SF_GEN_ETH_CTL,
647 SF_ETHCTL_RXGFP_ENB);
648 else
649 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
650 SF_ETHCTL_RXGFP_ENB);
651 SF_UNLOCK(sc);
652 }
653 }
654 break;
655 default:
656 error = ether_ioctl(ifp, command, data);
657 break;
658 }
659
660 return (error);
661 }
662
663 static void
664 sf_reset(struct sf_softc *sc)
665 {
666 int i;
667
668 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
669 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
670 DELAY(1000);
671 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
672
673 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
674
675 for (i = 0; i < SF_TIMEOUT; i++) {
676 DELAY(10);
677 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
678 break;
679 }
680
681 if (i == SF_TIMEOUT)
682 device_printf(sc->sf_dev, "reset never completed!\n");
683
684 /* Wait a little while for the chip to get its brains in order. */
685 DELAY(1000);
686 }
687
688 /*
689 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
690 * IDs against our list and return a device name if we find a match.
691 * We also check the subsystem ID so that we can identify exactly which
692 * NIC has been found, if possible.
693 */
694 static int
695 sf_probe(device_t dev)
696 {
697 struct sf_type *t;
698 uint16_t vid;
699 uint16_t did;
700 uint16_t sdid;
701 int i;
702
703 vid = pci_get_vendor(dev);
704 did = pci_get_device(dev);
705 sdid = pci_get_subdevice(dev);
706
707 t = sf_devs;
708 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) {
709 if (vid == t->sf_vid && did == t->sf_did) {
710 if (sdid == t->sf_sdid) {
711 device_set_desc(dev, t->sf_sname);
712 return (BUS_PROBE_DEFAULT);
713 }
714 }
715 }
716
717 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
718 /* unkown subdevice */
719 device_set_desc(dev, sf_devs[0].sf_name);
720 return (BUS_PROBE_DEFAULT);
721 }
722
723 return (ENXIO);
724 }
725
726 /*
727 * Attach the interface. Allocate softc structures, do ifmedia
728 * setup and ethernet/BPF attach.
729 */
730 static int
731 sf_attach(device_t dev)
732 {
733 int i;
734 struct sf_softc *sc;
735 struct ifnet *ifp;
736 uint32_t reg;
737 int rid, error = 0;
738 uint8_t eaddr[ETHER_ADDR_LEN];
739
740 sc = device_get_softc(dev);
741 sc->sf_dev = dev;
742
743 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
744 MTX_DEF);
745 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
746
747 /*
748 * Map control/status registers.
749 */
750 pci_enable_busmaster(dev);
751
752 /*
753 * Prefer memory space register mapping over I/O space as the
754 * hardware requires lots of register access to get various
755 * producer/consumer index during Tx/Rx operation. However this
756 * requires large memory space(512K) to map the entire register
757 * space.
758 */
759 sc->sf_rid = PCIR_BAR(0);
760 sc->sf_restype = SYS_RES_MEMORY;
761 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
762 RF_ACTIVE);
763 if (sc->sf_res == NULL) {
764 reg = pci_read_config(dev, PCIR_BAR(0), 4);
765 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
766 sc->sf_rid = PCIR_BAR(2);
767 else
768 sc->sf_rid = PCIR_BAR(1);
769 sc->sf_restype = SYS_RES_IOPORT;
770 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
771 &sc->sf_rid, RF_ACTIVE);
772 if (sc->sf_res == NULL) {
773 device_printf(dev, "couldn't allocate resources\n");
774 mtx_destroy(&sc->sf_mtx);
775 return (ENXIO);
776 }
777 }
778 if (bootverbose)
779 device_printf(dev, "using %s space register mapping\n",
780 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
781
782 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
783 if (reg == 0) {
784 /*
785 * If cache line size is 0, MWI is not used at all, so set
786 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
787 * and 64.
788 */
789 reg = 16;
790 device_printf(dev, "setting PCI cache line size to %u\n", reg);
791 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
792 } else {
793 if (bootverbose)
794 device_printf(dev, "PCI cache line size : %u\n", reg);
795 }
796 /* Enable MWI. */
797 reg = pci_read_config(dev, PCIR_COMMAND, 2);
798 reg |= PCIM_CMD_MWRICEN;
799 pci_write_config(dev, PCIR_COMMAND, reg, 2);
800
801 /* Allocate interrupt. */
802 rid = 0;
803 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
804 RF_SHAREABLE | RF_ACTIVE);
805
806 if (sc->sf_irq == NULL) {
807 device_printf(dev, "couldn't map interrupt\n");
808 error = ENXIO;
809 goto fail;
810 }
811
812 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
813 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
814 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
815 sf_sysctl_stats, "I", "Statistics");
816
817 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
818 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
819 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
820 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
821 "sf interrupt moderation");
822 /* Pull in device tunables. */
823 sc->sf_int_mod = SF_IM_DEFAULT;
824 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
825 "int_mod", &sc->sf_int_mod);
826 if (error == 0) {
827 if (sc->sf_int_mod < SF_IM_MIN ||
828 sc->sf_int_mod > SF_IM_MAX) {
829 device_printf(dev, "int_mod value out of range; "
830 "using default: %d\n", SF_IM_DEFAULT);
831 sc->sf_int_mod = SF_IM_DEFAULT;
832 }
833 }
834
835 /* Reset the adapter. */
836 sf_reset(sc);
837
838 /*
839 * Get station address from the EEPROM.
840 */
841 for (i = 0; i < ETHER_ADDR_LEN; i++)
842 eaddr[i] =
843 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
844
845 /* Allocate DMA resources. */
846 if (sf_dma_alloc(sc) != 0) {
847 error = ENOSPC;
848 goto fail;
849 }
850
851 sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
852
853 ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
854 if (ifp == NULL) {
855 device_printf(dev, "can not allocate ifnet structure\n");
856 error = ENOSPC;
857 goto fail;
858 }
859
860 /* Do MII setup. */
861 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd,
862 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
863 if (error != 0) {
864 device_printf(dev, "attaching PHYs failed\n");
865 goto fail;
866 }
867
868 ifp->if_softc = sc;
869 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
870 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
871 ifp->if_ioctl = sf_ioctl;
872 ifp->if_start = sf_start;
873 ifp->if_init = sf_init;
874 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
875 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
876 IFQ_SET_READY(&ifp->if_snd);
877 /*
878 * With the help of firmware, AIC-6915 supports
879 * Tx/Rx TCP/UDP checksum offload.
880 */
881 ifp->if_hwassist = SF_CSUM_FEATURES;
882 ifp->if_capabilities = IFCAP_HWCSUM;
883
884 /*
885 * Call MI attach routine.
886 */
887 ether_ifattach(ifp, eaddr);
888
889 /* VLAN capability setup. */
890 ifp->if_capabilities |= IFCAP_VLAN_MTU;
891 ifp->if_capenable = ifp->if_capabilities;
892 #ifdef DEVICE_POLLING
893 ifp->if_capabilities |= IFCAP_POLLING;
894 #endif
895 /*
896 * Tell the upper layer(s) we support long frames.
897 * Must appear after the call to ether_ifattach() because
898 * ether_ifattach() sets ifi_hdrlen to the default value.
899 */
900 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
901
902 /* Hook interrupt last to avoid having to lock softc */
903 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
904 NULL, sf_intr, sc, &sc->sf_intrhand);
905
906 if (error) {
907 device_printf(dev, "couldn't set up irq\n");
908 ether_ifdetach(ifp);
909 goto fail;
910 }
911
912 fail:
913 if (error)
914 sf_detach(dev);
915
916 return (error);
917 }
918
919 /*
920 * Shutdown hardware and free up resources. This can be called any
921 * time after the mutex has been initialized. It is called in both
922 * the error case in attach and the normal detach case so it needs
923 * to be careful about only freeing resources that have actually been
924 * allocated.
925 */
926 static int
927 sf_detach(device_t dev)
928 {
929 struct sf_softc *sc;
930 struct ifnet *ifp;
931
932 sc = device_get_softc(dev);
933 ifp = sc->sf_ifp;
934
935 #ifdef DEVICE_POLLING
936 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
937 ether_poll_deregister(ifp);
938 #endif
939
940 /* These should only be active if attach succeeded */
941 if (device_is_attached(dev)) {
942 SF_LOCK(sc);
943 sc->sf_detach = 1;
944 sf_stop(sc);
945 SF_UNLOCK(sc);
946 callout_drain(&sc->sf_co);
947 if (ifp != NULL)
948 ether_ifdetach(ifp);
949 }
950 if (sc->sf_miibus) {
951 device_delete_child(dev, sc->sf_miibus);
952 sc->sf_miibus = NULL;
953 }
954 bus_generic_detach(dev);
955
956 if (sc->sf_intrhand != NULL)
957 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
958 if (sc->sf_irq != NULL)
959 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
960 if (sc->sf_res != NULL)
961 bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
962 sc->sf_res);
963
964 sf_dma_free(sc);
965 if (ifp != NULL)
966 if_free(ifp);
967
968 mtx_destroy(&sc->sf_mtx);
969
970 return (0);
971 }
972
973 struct sf_dmamap_arg {
974 bus_addr_t sf_busaddr;
975 };
976
977 static void
978 sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
979 {
980 struct sf_dmamap_arg *ctx;
981
982 if (error != 0)
983 return;
984 ctx = arg;
985 ctx->sf_busaddr = segs[0].ds_addr;
986 }
987
988 static int
989 sf_dma_alloc(struct sf_softc *sc)
990 {
991 struct sf_dmamap_arg ctx;
992 struct sf_txdesc *txd;
993 struct sf_rxdesc *rxd;
994 bus_addr_t lowaddr;
995 bus_addr_t rx_ring_end, rx_cring_end;
996 bus_addr_t tx_ring_end, tx_cring_end;
997 int error, i;
998
999 lowaddr = BUS_SPACE_MAXADDR;
1000
1001 again:
1002 /* Create parent DMA tag. */
1003 error = bus_dma_tag_create(
1004 bus_get_dma_tag(sc->sf_dev), /* parent */
1005 1, 0, /* alignment, boundary */
1006 lowaddr, /* lowaddr */
1007 BUS_SPACE_MAXADDR, /* highaddr */
1008 NULL, NULL, /* filter, filterarg */
1009 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1010 0, /* nsegments */
1011 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1012 0, /* flags */
1013 NULL, NULL, /* lockfunc, lockarg */
1014 &sc->sf_cdata.sf_parent_tag);
1015 if (error != 0) {
1016 device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1017 goto fail;
1018 }
1019 /* Create tag for Tx ring. */
1020 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1021 SF_RING_ALIGN, 0, /* alignment, boundary */
1022 BUS_SPACE_MAXADDR, /* lowaddr */
1023 BUS_SPACE_MAXADDR, /* highaddr */
1024 NULL, NULL, /* filter, filterarg */
1025 SF_TX_DLIST_SIZE, /* maxsize */
1026 1, /* nsegments */
1027 SF_TX_DLIST_SIZE, /* maxsegsize */
1028 0, /* flags */
1029 NULL, NULL, /* lockfunc, lockarg */
1030 &sc->sf_cdata.sf_tx_ring_tag);
1031 if (error != 0) {
1032 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1033 goto fail;
1034 }
1035
1036 /* Create tag for Tx completion ring. */
1037 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1038 SF_RING_ALIGN, 0, /* alignment, boundary */
1039 BUS_SPACE_MAXADDR, /* lowaddr */
1040 BUS_SPACE_MAXADDR, /* highaddr */
1041 NULL, NULL, /* filter, filterarg */
1042 SF_TX_CLIST_SIZE, /* maxsize */
1043 1, /* nsegments */
1044 SF_TX_CLIST_SIZE, /* maxsegsize */
1045 0, /* flags */
1046 NULL, NULL, /* lockfunc, lockarg */
1047 &sc->sf_cdata.sf_tx_cring_tag);
1048 if (error != 0) {
1049 device_printf(sc->sf_dev,
1050 "failed to create Tx completion ring DMA tag\n");
1051 goto fail;
1052 }
1053
1054 /* Create tag for Rx ring. */
1055 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1056 SF_RING_ALIGN, 0, /* alignment, boundary */
1057 BUS_SPACE_MAXADDR, /* lowaddr */
1058 BUS_SPACE_MAXADDR, /* highaddr */
1059 NULL, NULL, /* filter, filterarg */
1060 SF_RX_DLIST_SIZE, /* maxsize */
1061 1, /* nsegments */
1062 SF_RX_DLIST_SIZE, /* maxsegsize */
1063 0, /* flags */
1064 NULL, NULL, /* lockfunc, lockarg */
1065 &sc->sf_cdata.sf_rx_ring_tag);
1066 if (error != 0) {
1067 device_printf(sc->sf_dev,
1068 "failed to create Rx ring DMA tag\n");
1069 goto fail;
1070 }
1071
1072 /* Create tag for Rx completion ring. */
1073 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1074 SF_RING_ALIGN, 0, /* alignment, boundary */
1075 BUS_SPACE_MAXADDR, /* lowaddr */
1076 BUS_SPACE_MAXADDR, /* highaddr */
1077 NULL, NULL, /* filter, filterarg */
1078 SF_RX_CLIST_SIZE, /* maxsize */
1079 1, /* nsegments */
1080 SF_RX_CLIST_SIZE, /* maxsegsize */
1081 0, /* flags */
1082 NULL, NULL, /* lockfunc, lockarg */
1083 &sc->sf_cdata.sf_rx_cring_tag);
1084 if (error != 0) {
1085 device_printf(sc->sf_dev,
1086 "failed to create Rx completion ring DMA tag\n");
1087 goto fail;
1088 }
1089
1090 /* Create tag for Tx buffers. */
1091 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1092 1, 0, /* alignment, boundary */
1093 BUS_SPACE_MAXADDR, /* lowaddr */
1094 BUS_SPACE_MAXADDR, /* highaddr */
1095 NULL, NULL, /* filter, filterarg */
1096 MCLBYTES * SF_MAXTXSEGS, /* maxsize */
1097 SF_MAXTXSEGS, /* nsegments */
1098 MCLBYTES, /* maxsegsize */
1099 0, /* flags */
1100 NULL, NULL, /* lockfunc, lockarg */
1101 &sc->sf_cdata.sf_tx_tag);
1102 if (error != 0) {
1103 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1104 goto fail;
1105 }
1106
1107 /* Create tag for Rx buffers. */
1108 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1109 SF_RX_ALIGN, 0, /* alignment, boundary */
1110 BUS_SPACE_MAXADDR, /* lowaddr */
1111 BUS_SPACE_MAXADDR, /* highaddr */
1112 NULL, NULL, /* filter, filterarg */
1113 MCLBYTES, /* maxsize */
1114 1, /* nsegments */
1115 MCLBYTES, /* maxsegsize */
1116 0, /* flags */
1117 NULL, NULL, /* lockfunc, lockarg */
1118 &sc->sf_cdata.sf_rx_tag);
1119 if (error != 0) {
1120 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1121 goto fail;
1122 }
1123
1124 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1125 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1126 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1127 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1128 if (error != 0) {
1129 device_printf(sc->sf_dev,
1130 "failed to allocate DMA'able memory for Tx ring\n");
1131 goto fail;
1132 }
1133
1134 ctx.sf_busaddr = 0;
1135 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1136 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1137 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1138 if (error != 0 || ctx.sf_busaddr == 0) {
1139 device_printf(sc->sf_dev,
1140 "failed to load DMA'able memory for Tx ring\n");
1141 goto fail;
1142 }
1143 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1144
1145 /*
1146 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1147 */
1148 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1149 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1150 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1151 if (error != 0) {
1152 device_printf(sc->sf_dev,
1153 "failed to allocate DMA'able memory for "
1154 "Tx completion ring\n");
1155 goto fail;
1156 }
1157
1158 ctx.sf_busaddr = 0;
1159 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1160 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1161 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1162 if (error != 0 || ctx.sf_busaddr == 0) {
1163 device_printf(sc->sf_dev,
1164 "failed to load DMA'able memory for Tx completion ring\n");
1165 goto fail;
1166 }
1167 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1168
1169 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1170 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1171 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1172 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1173 if (error != 0) {
1174 device_printf(sc->sf_dev,
1175 "failed to allocate DMA'able memory for Rx ring\n");
1176 goto fail;
1177 }
1178
1179 ctx.sf_busaddr = 0;
1180 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1181 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1182 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1183 if (error != 0 || ctx.sf_busaddr == 0) {
1184 device_printf(sc->sf_dev,
1185 "failed to load DMA'able memory for Rx ring\n");
1186 goto fail;
1187 }
1188 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1189
1190 /*
1191 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1192 */
1193 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1194 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1195 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1196 if (error != 0) {
1197 device_printf(sc->sf_dev,
1198 "failed to allocate DMA'able memory for "
1199 "Rx completion ring\n");
1200 goto fail;
1201 }
1202
1203 ctx.sf_busaddr = 0;
1204 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1205 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1206 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1207 if (error != 0 || ctx.sf_busaddr == 0) {
1208 device_printf(sc->sf_dev,
1209 "failed to load DMA'able memory for Rx completion ring\n");
1210 goto fail;
1211 }
1212 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1213
1214 /*
1215 * Tx desciptor ring and Tx completion ring should be addressed in
1216 * the same 4GB space. The same rule applys to Rx ring and Rx
1217 * completion ring. Unfortunately there is no way to specify this
1218 * boundary restriction with bus_dma(9). So just try to allocate
1219 * without the restriction and check the restriction was satisfied.
1220 * If not, fall back to 32bit dma addressing mode which always
1221 * guarantees the restriction.
1222 */
1223 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1224 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1225 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1226 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1227 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1228 SF_ADDR_HI(tx_cring_end)) ||
1229 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1230 SF_ADDR_HI(tx_ring_end)) ||
1231 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1232 SF_ADDR_HI(rx_cring_end)) ||
1233 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1234 SF_ADDR_HI(rx_ring_end))) {
1235 device_printf(sc->sf_dev,
1236 "switching to 32bit DMA mode\n");
1237 sf_dma_free(sc);
1238 /* Limit DMA address space to 32bit and try again. */
1239 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1240 goto again;
1241 }
1242
1243 /* Create DMA maps for Tx buffers. */
1244 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1245 txd = &sc->sf_cdata.sf_txdesc[i];
1246 txd->tx_m = NULL;
1247 txd->ndesc = 0;
1248 txd->tx_dmamap = NULL;
1249 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1250 &txd->tx_dmamap);
1251 if (error != 0) {
1252 device_printf(sc->sf_dev,
1253 "failed to create Tx dmamap\n");
1254 goto fail;
1255 }
1256 }
1257 /* Create DMA maps for Rx buffers. */
1258 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1259 &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1260 device_printf(sc->sf_dev,
1261 "failed to create spare Rx dmamap\n");
1262 goto fail;
1263 }
1264 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1265 rxd = &sc->sf_cdata.sf_rxdesc[i];
1266 rxd->rx_m = NULL;
1267 rxd->rx_dmamap = NULL;
1268 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1269 &rxd->rx_dmamap);
1270 if (error != 0) {
1271 device_printf(sc->sf_dev,
1272 "failed to create Rx dmamap\n");
1273 goto fail;
1274 }
1275 }
1276
1277 fail:
1278 return (error);
1279 }
1280
1281 static void
1282 sf_dma_free(struct sf_softc *sc)
1283 {
1284 struct sf_txdesc *txd;
1285 struct sf_rxdesc *rxd;
1286 int i;
1287
1288 /* Tx ring. */
1289 if (sc->sf_cdata.sf_tx_ring_tag) {
1290 if (sc->sf_cdata.sf_tx_ring_map)
1291 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1292 sc->sf_cdata.sf_tx_ring_map);
1293 if (sc->sf_cdata.sf_tx_ring_map &&
1294 sc->sf_rdata.sf_tx_ring)
1295 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1296 sc->sf_rdata.sf_tx_ring,
1297 sc->sf_cdata.sf_tx_ring_map);
1298 sc->sf_rdata.sf_tx_ring = NULL;
1299 sc->sf_cdata.sf_tx_ring_map = NULL;
1300 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1301 sc->sf_cdata.sf_tx_ring_tag = NULL;
1302 }
1303 /* Tx completion ring. */
1304 if (sc->sf_cdata.sf_tx_cring_tag) {
1305 if (sc->sf_cdata.sf_tx_cring_map)
1306 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1307 sc->sf_cdata.sf_tx_cring_map);
1308 if (sc->sf_cdata.sf_tx_cring_map &&
1309 sc->sf_rdata.sf_tx_cring)
1310 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1311 sc->sf_rdata.sf_tx_cring,
1312 sc->sf_cdata.sf_tx_cring_map);
1313 sc->sf_rdata.sf_tx_cring = NULL;
1314 sc->sf_cdata.sf_tx_cring_map = NULL;
1315 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1316 sc->sf_cdata.sf_tx_cring_tag = NULL;
1317 }
1318 /* Rx ring. */
1319 if (sc->sf_cdata.sf_rx_ring_tag) {
1320 if (sc->sf_cdata.sf_rx_ring_map)
1321 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1322 sc->sf_cdata.sf_rx_ring_map);
1323 if (sc->sf_cdata.sf_rx_ring_map &&
1324 sc->sf_rdata.sf_rx_ring)
1325 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1326 sc->sf_rdata.sf_rx_ring,
1327 sc->sf_cdata.sf_rx_ring_map);
1328 sc->sf_rdata.sf_rx_ring = NULL;
1329 sc->sf_cdata.sf_rx_ring_map = NULL;
1330 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1331 sc->sf_cdata.sf_rx_ring_tag = NULL;
1332 }
1333 /* Rx completion ring. */
1334 if (sc->sf_cdata.sf_rx_cring_tag) {
1335 if (sc->sf_cdata.sf_rx_cring_map)
1336 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1337 sc->sf_cdata.sf_rx_cring_map);
1338 if (sc->sf_cdata.sf_rx_cring_map &&
1339 sc->sf_rdata.sf_rx_cring)
1340 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1341 sc->sf_rdata.sf_rx_cring,
1342 sc->sf_cdata.sf_rx_cring_map);
1343 sc->sf_rdata.sf_rx_cring = NULL;
1344 sc->sf_cdata.sf_rx_cring_map = NULL;
1345 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1346 sc->sf_cdata.sf_rx_cring_tag = NULL;
1347 }
1348 /* Tx buffers. */
1349 if (sc->sf_cdata.sf_tx_tag) {
1350 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1351 txd = &sc->sf_cdata.sf_txdesc[i];
1352 if (txd->tx_dmamap) {
1353 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1354 txd->tx_dmamap);
1355 txd->tx_dmamap = NULL;
1356 }
1357 }
1358 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1359 sc->sf_cdata.sf_tx_tag = NULL;
1360 }
1361 /* Rx buffers. */
1362 if (sc->sf_cdata.sf_rx_tag) {
1363 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1364 rxd = &sc->sf_cdata.sf_rxdesc[i];
1365 if (rxd->rx_dmamap) {
1366 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1367 rxd->rx_dmamap);
1368 rxd->rx_dmamap = NULL;
1369 }
1370 }
1371 if (sc->sf_cdata.sf_rx_sparemap) {
1372 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1373 sc->sf_cdata.sf_rx_sparemap);
1374 sc->sf_cdata.sf_rx_sparemap = 0;
1375 }
1376 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1377 sc->sf_cdata.sf_rx_tag = NULL;
1378 }
1379
1380 if (sc->sf_cdata.sf_parent_tag) {
1381 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1382 sc->sf_cdata.sf_parent_tag = NULL;
1383 }
1384 }
1385
1386 static int
1387 sf_init_rx_ring(struct sf_softc *sc)
1388 {
1389 struct sf_ring_data *rd;
1390 int i;
1391
1392 sc->sf_cdata.sf_rxc_cons = 0;
1393
1394 rd = &sc->sf_rdata;
1395 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1396 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1397
1398 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1399 if (sf_newbuf(sc, i) != 0)
1400 return (ENOBUFS);
1401 }
1402
1403 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1404 sc->sf_cdata.sf_rx_cring_map,
1405 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1406 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1407 sc->sf_cdata.sf_rx_ring_map,
1408 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1409
1410 return (0);
1411 }
1412
1413 static void
1414 sf_init_tx_ring(struct sf_softc *sc)
1415 {
1416 struct sf_ring_data *rd;
1417 int i;
1418
1419 sc->sf_cdata.sf_tx_prod = 0;
1420 sc->sf_cdata.sf_tx_cnt = 0;
1421 sc->sf_cdata.sf_txc_cons = 0;
1422
1423 rd = &sc->sf_rdata;
1424 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1425 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1426 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1427 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1428 sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1429 sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1430 }
1431 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1432
1433 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1434 sc->sf_cdata.sf_tx_ring_map,
1435 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1436 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1437 sc->sf_cdata.sf_tx_cring_map,
1438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1439 }
1440
1441 /*
1442 * Initialize an RX descriptor and attach an MBUF cluster.
1443 */
1444 static int
1445 sf_newbuf(struct sf_softc *sc, int idx)
1446 {
1447 struct sf_rx_rdesc *desc;
1448 struct sf_rxdesc *rxd;
1449 struct mbuf *m;
1450 bus_dma_segment_t segs[1];
1451 bus_dmamap_t map;
1452 int nsegs;
1453
1454 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1455 if (m == NULL)
1456 return (ENOBUFS);
1457 m->m_len = m->m_pkthdr.len = MCLBYTES;
1458 m_adj(m, sizeof(uint32_t));
1459
1460 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1461 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1462 m_freem(m);
1463 return (ENOBUFS);
1464 }
1465 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1466
1467 rxd = &sc->sf_cdata.sf_rxdesc[idx];
1468 if (rxd->rx_m != NULL) {
1469 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1470 BUS_DMASYNC_POSTREAD);
1471 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1472 }
1473 map = rxd->rx_dmamap;
1474 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1475 sc->sf_cdata.sf_rx_sparemap = map;
1476 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1477 BUS_DMASYNC_PREREAD);
1478 rxd->rx_m = m;
1479 desc = &sc->sf_rdata.sf_rx_ring[idx];
1480 desc->sf_addr = htole64(segs[0].ds_addr);
1481
1482 return (0);
1483 }
1484
1485 #ifndef __NO_STRICT_ALIGNMENT
1486 static __inline void
1487 sf_fixup_rx(struct mbuf *m)
1488 {
1489 int i;
1490 uint16_t *src, *dst;
1491
1492 src = mtod(m, uint16_t *);
1493 dst = src - 1;
1494
1495 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1496 *dst++ = *src++;
1497
1498 m->m_data -= ETHER_ALIGN;
1499 }
1500 #endif
1501
1502 /*
1503 * The starfire is programmed to use 'normal' mode for packet reception,
1504 * which means we use the consumer/producer model for both the buffer
1505 * descriptor queue and the completion descriptor queue. The only problem
1506 * with this is that it involves a lot of register accesses: we have to
1507 * read the RX completion consumer and producer indexes and the RX buffer
1508 * producer index, plus the RX completion consumer and RX buffer producer
1509 * indexes have to be updated. It would have been easier if Adaptec had
1510 * put each index in a separate register, especially given that the damn
1511 * NIC has a 512K register space.
1512 *
1513 * In spite of all the lovely features that Adaptec crammed into the 6915,
1514 * it is marred by one truly stupid design flaw, which is that receive
1515 * buffer addresses must be aligned on a longword boundary. This forces
1516 * the packet payload to be unaligned, which is suboptimal on the x86 and
1517 * completely unuseable on the Alpha. Our only recourse is to copy received
1518 * packets into properly aligned buffers before handing them off.
1519 */
1520 static void
1521 sf_rxeof(struct sf_softc *sc)
1522 {
1523 struct mbuf *m;
1524 struct ifnet *ifp;
1525 struct sf_rxdesc *rxd;
1526 struct sf_rx_rcdesc *cur_cmp;
1527 int cons, eidx, prog;
1528 uint32_t status, status2;
1529
1530 SF_LOCK_ASSERT(sc);
1531
1532 ifp = sc->sf_ifp;
1533
1534 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1535 sc->sf_cdata.sf_rx_ring_map,
1536 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1537 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1538 sc->sf_cdata.sf_rx_cring_map,
1539 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1540
1541 /*
1542 * To reduce register access, directly read Receive completion
1543 * queue entry.
1544 */
1545 eidx = 0;
1546 prog = 0;
1547 for (cons = sc->sf_cdata.sf_rxc_cons;
1548 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1549 SF_INC(cons, SF_RX_CLIST_CNT)) {
1550 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1551 status = le32toh(cur_cmp->sf_rx_status1);
1552 if (status == 0)
1553 break;
1554 #ifdef DEVICE_POLLING
1555 if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
1556 if (sc->rxcycles <= 0)
1557 break;
1558 sc->rxcycles--;
1559 }
1560 #endif
1561 prog++;
1562 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1563 rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1564 m = rxd->rx_m;
1565
1566 /*
1567 * Note, if_ipackets and if_ierrors counters
1568 * are handled in sf_stats_update().
1569 */
1570 if ((status & SF_RXSTAT1_OK) == 0) {
1571 cur_cmp->sf_rx_status1 = 0;
1572 continue;
1573 }
1574
1575 if (sf_newbuf(sc, eidx) != 0) {
1576 ifp->if_iqdrops++;
1577 cur_cmp->sf_rx_status1 = 0;
1578 continue;
1579 }
1580
1581 /* AIC-6915 supports TCP/UDP checksum offload. */
1582 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1583 status2 = le32toh(cur_cmp->sf_rx_status2);
1584 /*
1585 * Sometimes AIC-6915 generates an interrupt to
1586 * warn RxGFP stall with bad checksum bit set
1587 * in status word. I'm not sure what conditioan
1588 * triggers it but recevied packet's checksum
1589 * was correct even though AIC-6915 does not
1590 * agree on this. This may be an indication of
1591 * firmware bug. To fix the issue, do not rely
1592 * on bad checksum bit in status word and let
1593 * upper layer verify integrity of received
1594 * frame.
1595 * Another nice feature of AIC-6915 is hardware
1596 * assistance of checksum calculation by
1597 * providing partial checksum value for received
1598 * frame. The partial checksum value can be used
1599 * to accelerate checksum computation for
1600 * fragmented TCP/UDP packets. Upper network
1601 * stack already takes advantage of the partial
1602 * checksum value in IP reassembly stage. But
1603 * I'm not sure the correctness of the partial
1604 * hardware checksum assistance as frequent
1605 * RxGFP stalls are seen on non-fragmented
1606 * frames. Due to the nature of the complexity
1607 * of checksum computation code in firmware it's
1608 * possible to see another bug in RxGFP so
1609 * ignore checksum assistance for fragmented
1610 * frames. This can be changed in future.
1611 */
1612 if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1613 if ((status2 & (SF_RXSTAT2_TCP |
1614 SF_RXSTAT2_UDP)) != 0) {
1615 if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1616 m->m_pkthdr.csum_flags =
1617 CSUM_DATA_VALID |
1618 CSUM_PSEUDO_HDR;
1619 m->m_pkthdr.csum_data = 0xffff;
1620 }
1621 }
1622 }
1623 #ifdef SF_PARTIAL_CSUM_SUPPORT
1624 else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1625 if ((status2 & (SF_RXSTAT2_TCP |
1626 SF_RXSTAT2_UDP)) != 0) {
1627 if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1628 m->m_pkthdr.csum_flags =
1629 CSUM_DATA_VALID;
1630 m->m_pkthdr.csum_data =
1631 (status &
1632 SF_RX_CMPDESC_CSUM2);
1633 }
1634 }
1635 }
1636 #endif
1637 }
1638
1639 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1640 #ifndef __NO_STRICT_ALIGNMENT
1641 sf_fixup_rx(m);
1642 #endif
1643 m->m_pkthdr.rcvif = ifp;
1644
1645 SF_UNLOCK(sc);
1646 (*ifp->if_input)(ifp, m);
1647 SF_LOCK(sc);
1648
1649 /* Clear completion status. */
1650 cur_cmp->sf_rx_status1 = 0;
1651 }
1652
1653 if (prog > 0) {
1654 sc->sf_cdata.sf_rxc_cons = cons;
1655 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1656 sc->sf_cdata.sf_rx_ring_map,
1657 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1658 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1659 sc->sf_cdata.sf_rx_cring_map,
1660 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1661
1662 /* Update Rx completion Q1 consumer index. */
1663 csr_write_4(sc, SF_CQ_CONSIDX,
1664 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1665 (cons & SF_CQ_CONSIDX_RXQ1));
1666 /* Update Rx descriptor Q1 ptr. */
1667 csr_write_4(sc, SF_RXDQ_PTR_Q1,
1668 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1669 (eidx & SF_RXDQ_PRODIDX));
1670 }
1671 }
1672
1673 /*
1674 * Read the transmit status from the completion queue and release
1675 * mbufs. Note that the buffer descriptor index in the completion
1676 * descriptor is an offset from the start of the transmit buffer
1677 * descriptor list in bytes. This is important because the manual
1678 * gives the impression that it should match the producer/consumer
1679 * index, which is the offset in 8 byte blocks.
1680 */
1681 static void
1682 sf_txeof(struct sf_softc *sc)
1683 {
1684 struct sf_txdesc *txd;
1685 struct sf_tx_rcdesc *cur_cmp;
1686 struct ifnet *ifp;
1687 uint32_t status;
1688 int cons, idx, prod;
1689
1690 SF_LOCK_ASSERT(sc);
1691
1692 ifp = sc->sf_ifp;
1693
1694 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1695 sc->sf_cdata.sf_tx_cring_map,
1696 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1697
1698 cons = sc->sf_cdata.sf_txc_cons;
1699 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1700 if (prod == cons)
1701 return;
1702
1703 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1704 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1705 status = le32toh(cur_cmp->sf_tx_status1);
1706 if (status == 0)
1707 break;
1708 switch (status & SF_TX_CMPDESC_TYPE) {
1709 case SF_TXCMPTYPE_TX:
1710 /* Tx complete entry. */
1711 break;
1712 case SF_TXCMPTYPE_DMA:
1713 /* DMA complete entry. */
1714 idx = status & SF_TX_CMPDESC_IDX;
1715 idx = idx / sizeof(struct sf_tx_rdesc);
1716 /*
1717 * We don't need to check Tx status here.
1718 * SF_ISR_TX_LOFIFO intr would handle this.
1719 * Note, if_opackets, if_collisions and if_oerrors
1720 * counters are handled in sf_stats_update().
1721 */
1722 txd = &sc->sf_cdata.sf_txdesc[idx];
1723 if (txd->tx_m != NULL) {
1724 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1725 txd->tx_dmamap,
1726 BUS_DMASYNC_POSTWRITE);
1727 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1728 txd->tx_dmamap);
1729 m_freem(txd->tx_m);
1730 txd->tx_m = NULL;
1731 }
1732 sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1733 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1734 ("%s: Active Tx desc counter was garbled\n",
1735 __func__));
1736 txd->ndesc = 0;
1737 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1738 break;
1739 default:
1740 /* It should not happen. */
1741 device_printf(sc->sf_dev,
1742 "unknown Tx completion type : 0x%08x : %d : %d\n",
1743 status, cons, prod);
1744 break;
1745 }
1746 cur_cmp->sf_tx_status1 = 0;
1747 }
1748
1749 sc->sf_cdata.sf_txc_cons = cons;
1750 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1751 sc->sf_cdata.sf_tx_cring_map,
1752 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1753
1754 if (sc->sf_cdata.sf_tx_cnt == 0)
1755 sc->sf_watchdog_timer = 0;
1756
1757 /* Update Tx completion consumer index. */
1758 csr_write_4(sc, SF_CQ_CONSIDX,
1759 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1760 ((cons << 16) & 0xffff0000));
1761 }
1762
1763 static void
1764 sf_txthresh_adjust(struct sf_softc *sc)
1765 {
1766 uint32_t txfctl;
1767
1768 device_printf(sc->sf_dev, "Tx underrun -- ");
1769 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1770 txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1771 /* Increase Tx threshold 256 bytes. */
1772 sc->sf_txthresh += 16;
1773 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1774 sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1775 txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1776 txfctl |= sc->sf_txthresh;
1777 printf("increasing Tx threshold to %d bytes\n",
1778 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1779 csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1780 } else
1781 printf("\n");
1782 }
1783
1784 #ifdef DEVICE_POLLING
1785 static void
1786 sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1787 {
1788 struct sf_softc *sc;
1789 uint32_t status;
1790
1791 sc = ifp->if_softc;
1792 SF_LOCK(sc);
1793
1794 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1795 SF_UNLOCK(sc);
1796 return;
1797 }
1798
1799 sc->rxcycles = count;
1800 sf_rxeof(sc);
1801 sf_txeof(sc);
1802 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1803 sf_start_locked(ifp);
1804
1805 if (cmd == POLL_AND_CHECK_STATUS) {
1806 /* Reading the ISR register clears all interrrupts. */
1807 status = csr_read_4(sc, SF_ISR);
1808
1809 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1810 if ((status & SF_ISR_STATSOFLOW) != 0)
1811 sf_stats_update(sc);
1812 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1813 sf_txthresh_adjust(sc);
1814 else if ((status & SF_ISR_DMAERR) != 0) {
1815 device_printf(sc->sf_dev,
1816 "DMA error, resetting\n");
1817 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1818 sf_init_locked(sc);
1819 SF_UNLOCK(sc);
1820 return;
1821 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1822 sc->sf_statistics.sf_tx_gfp_stall++;
1823 #ifdef SF_GFP_DEBUG
1824 device_printf(sc->sf_dev,
1825 "TxGFP is not responding!\n");
1826 #endif
1827 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1828 sc->sf_statistics.sf_rx_gfp_stall++;
1829 #ifdef SF_GFP_DEBUG
1830 device_printf(sc->sf_dev,
1831 "RxGFP is not responding!\n");
1832 #endif
1833 }
1834 }
1835 }
1836
1837 SF_UNLOCK(sc);
1838 }
1839 #endif /* DEVICE_POLLING */
1840
1841 static void
1842 sf_intr(void *arg)
1843 {
1844 struct sf_softc *sc;
1845 struct ifnet *ifp;
1846 uint32_t status;
1847 int cnt;
1848
1849 sc = (struct sf_softc *)arg;
1850 SF_LOCK(sc);
1851
1852 if (sc->sf_suspended != 0)
1853 goto done_locked;
1854
1855 /* Reading the ISR register clears all interrrupts. */
1856 status = csr_read_4(sc, SF_ISR);
1857 if (status == 0 || status == 0xffffffff ||
1858 (status & SF_ISR_PCIINT_ASSERTED) == 0)
1859 goto done_locked;
1860
1861 ifp = sc->sf_ifp;
1862 #ifdef DEVICE_POLLING
1863 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1864 goto done_locked;
1865 #endif
1866
1867 /* Disable interrupts. */
1868 csr_write_4(sc, SF_IMR, 0x00000000);
1869
1870 for (cnt = 32; (status & SF_INTRS) != 0;) {
1871 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1872 break;
1873 if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1874 sf_rxeof(sc);
1875
1876 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1877 SF_ISR_TX_QUEUEDONE)) != 0)
1878 sf_txeof(sc);
1879
1880 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1881 if ((status & SF_ISR_STATSOFLOW) != 0)
1882 sf_stats_update(sc);
1883 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1884 sf_txthresh_adjust(sc);
1885 else if ((status & SF_ISR_DMAERR) != 0) {
1886 device_printf(sc->sf_dev,
1887 "DMA error, resetting\n");
1888 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1889 sf_init_locked(sc);
1890 SF_UNLOCK(sc);
1891 return;
1892 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1893 sc->sf_statistics.sf_tx_gfp_stall++;
1894 #ifdef SF_GFP_DEBUG
1895 device_printf(sc->sf_dev,
1896 "TxGFP is not responding!\n");
1897 #endif
1898 }
1899 else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1900 sc->sf_statistics.sf_rx_gfp_stall++;
1901 #ifdef SF_GFP_DEBUG
1902 device_printf(sc->sf_dev,
1903 "RxGFP is not responding!\n");
1904 #endif
1905 }
1906 }
1907 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1908 sf_start_locked(ifp);
1909 if (--cnt <= 0)
1910 break;
1911 /* Reading the ISR register clears all interrrupts. */
1912 status = csr_read_4(sc, SF_ISR);
1913 }
1914
1915 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1916 /* Re-enable interrupts. */
1917 csr_write_4(sc, SF_IMR, SF_INTRS);
1918 }
1919
1920 done_locked:
1921 SF_UNLOCK(sc);
1922 }
1923
1924 static void
1925 sf_download_fw(struct sf_softc *sc)
1926 {
1927 uint32_t gfpinst;
1928 int i, ndx;
1929 uint8_t *p;
1930
1931 /*
1932 * A FP instruction is composed of 48bits so we have to
1933 * write it with two parts.
1934 */
1935 p = txfwdata;
1936 ndx = 0;
1937 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1938 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1939 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1940 gfpinst = p[0] << 8 | p[1];
1941 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1942 p += SF_GFP_INST_BYTES;
1943 ndx += 2;
1944 }
1945 if (bootverbose)
1946 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1947
1948 p = rxfwdata;
1949 ndx = 0;
1950 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1951 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1952 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1953 gfpinst = p[0] << 8 | p[1];
1954 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1955 p += SF_GFP_INST_BYTES;
1956 ndx += 2;
1957 }
1958 if (bootverbose)
1959 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1960 }
1961
1962 static void
1963 sf_init(void *xsc)
1964 {
1965 struct sf_softc *sc;
1966
1967 sc = (struct sf_softc *)xsc;
1968 SF_LOCK(sc);
1969 sf_init_locked(sc);
1970 SF_UNLOCK(sc);
1971 }
1972
1973 static void
1974 sf_init_locked(struct sf_softc *sc)
1975 {
1976 struct ifnet *ifp;
1977 struct mii_data *mii;
1978 uint8_t eaddr[ETHER_ADDR_LEN];
1979 bus_addr_t addr;
1980 int i;
1981
1982 SF_LOCK_ASSERT(sc);
1983 ifp = sc->sf_ifp;
1984 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1985 return;
1986 mii = device_get_softc(sc->sf_miibus);
1987
1988 sf_stop(sc);
1989 /* Reset the hardware to a known state. */
1990 sf_reset(sc);
1991
1992 /* Init all the receive filter registers */
1993 for (i = SF_RXFILT_PERFECT_BASE;
1994 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
1995 csr_write_4(sc, i, 0);
1996
1997 /* Empty stats counter registers. */
1998 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
1999 csr_write_4(sc, i, 0);
2000
2001 /* Init our MAC address. */
2002 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
2003 csr_write_4(sc, SF_PAR0,
2004 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2005 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2006 sf_setperf(sc, 0, eaddr);
2007
2008 if (sf_init_rx_ring(sc) == ENOBUFS) {
2009 device_printf(sc->sf_dev,
2010 "initialization failed: no memory for rx buffers\n");
2011 sf_stop(sc);
2012 return;
2013 }
2014
2015 sf_init_tx_ring(sc);
2016
2017 /*
2018 * 16 perfect address filtering.
2019 * Hash only multicast destination address, Accept matching
2020 * frames regardless of VLAN ID.
2021 */
2022 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
2023
2024 /*
2025 * Set Rx filter.
2026 */
2027 sf_rxfilter(sc);
2028
2029 /* Init the completion queue indexes. */
2030 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2031 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2032
2033 /* Init the RX completion queue. */
2034 addr = sc->sf_rdata.sf_rx_cring_paddr;
2035 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2036 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2037 if (SF_ADDR_HI(addr) != 0)
2038 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2039 /* Set RX completion queue type 2. */
2040 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2041 csr_write_4(sc, SF_RXCQ_CTL_2, 0);
2042
2043 /*
2044 * Init RX DMA control.
2045 * default RxHighPriority Threshold,
2046 * default RxBurstSize, 128bytes.
2047 */
2048 SF_SETBIT(sc, SF_RXDMA_CTL,
2049 SF_RXDMA_REPORTBADPKTS |
2050 (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2051 SF_RXDMA_BURST);
2052
2053 /* Init the RX buffer descriptor queue. */
2054 addr = sc->sf_rdata.sf_rx_ring_paddr;
2055 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2056 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2057
2058 /* Set RX queue buffer length. */
2059 csr_write_4(sc, SF_RXDQ_CTL_1,
2060 ((MCLBYTES - sizeof(uint32_t)) << 16) |
2061 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2062
2063 if (SF_ADDR_HI(addr) != 0)
2064 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
2065 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2066 csr_write_4(sc, SF_RXDQ_CTL_2, 0);
2067
2068 /* Init the TX completion queue */
2069 addr = sc->sf_rdata.sf_tx_cring_paddr;
2070 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2071 if (SF_ADDR_HI(addr) != 0)
2072 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
2073
2074 /* Init the TX buffer descriptor queue. */
2075 addr = sc->sf_rdata.sf_tx_ring_paddr;
2076 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2077 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2078 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2079 csr_write_4(sc, SF_TX_FRAMCTL,
2080 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
2081 csr_write_4(sc, SF_TXDQ_CTL,
2082 SF_TXDMA_HIPRIO_THRESH << 24 |
2083 SF_TXSKIPLEN_0BYTES << 16 |
2084 SF_TXDDMA_BURST << 8 |
2085 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2086 if (SF_ADDR_HI(addr) != 0)
2087 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
2088
2089 /* Set VLAN Type register. */
2090 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2091
2092 /* Set TxPause Timer. */
2093 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2094
2095 /* Enable autopadding of short TX frames. */
2096 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2097 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2098 /* Make sure to reset MAC to take changes effect. */
2099 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2100 DELAY(1000);
2101 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2102
2103 /* Enable PCI bus master. */
2104 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2105
2106 /* Load StarFire firmware. */
2107 sf_download_fw(sc);
2108
2109 /* Intialize interrupt moderation. */
2110 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2111 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2112
2113 #ifdef DEVICE_POLLING
2114 /* Disable interrupts if we are polling. */
2115 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2116 csr_write_4(sc, SF_IMR, 0x00000000);
2117 else
2118 #endif
2119 /* Enable interrupts. */
2120 csr_write_4(sc, SF_IMR, SF_INTRS);
2121 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2122
2123 /* Enable the RX and TX engines. */
2124 csr_write_4(sc, SF_GEN_ETH_CTL,
2125 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2126 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
2127
2128 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2129 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2130 else
2131 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2132 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2133 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2134 else
2135 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2136
2137 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2138 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2139
2140 sc->sf_link = 0;
2141 sf_ifmedia_upd_locked(ifp);
2142
2143 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2144 }
2145
2146 static int
2147 sf_encap(struct sf_softc *sc, struct mbuf **m_head)
2148 {
2149 struct sf_txdesc *txd;
2150 struct sf_tx_rdesc *desc;
2151 struct mbuf *m;
2152 bus_dmamap_t map;
2153 bus_dma_segment_t txsegs[SF_MAXTXSEGS];
2154 int error, i, nsegs, prod, si;
2155 int avail, nskip;
2156
2157 SF_LOCK_ASSERT(sc);
2158
2159 m = *m_head;
2160 prod = sc->sf_cdata.sf_tx_prod;
2161 txd = &sc->sf_cdata.sf_txdesc[prod];
2162 map = txd->tx_dmamap;
2163 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2164 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2165 if (error == EFBIG) {
2166 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS);
2167 if (m == NULL) {
2168 m_freem(*m_head);
2169 *m_head = NULL;
2170 return (ENOBUFS);
2171 }
2172 *m_head = m;
2173 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2174 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2175 if (error != 0) {
2176 m_freem(*m_head);
2177 *m_head = NULL;
2178 return (error);
2179 }
2180 } else if (error != 0)
2181 return (error);
2182 if (nsegs == 0) {
2183 m_freem(*m_head);
2184 *m_head = NULL;
2185 return (EIO);
2186 }
2187
2188 /* Check number of available descriptors. */
2189 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2190 if (avail < nsegs) {
2191 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2192 return (ENOBUFS);
2193 }
2194 nskip = 0;
2195 if (prod + nsegs >= SF_TX_DLIST_CNT) {
2196 nskip = SF_TX_DLIST_CNT - prod - 1;
2197 if (avail < nsegs + nskip) {
2198 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2199 return (ENOBUFS);
2200 }
2201 }
2202
2203 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2204
2205 si = prod;
2206 for (i = 0; i < nsegs; i++) {
2207 desc = &sc->sf_rdata.sf_tx_ring[prod];
2208 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2209 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2210 desc->sf_tx_reserved = 0;
2211 desc->sf_addr = htole64(txsegs[i].ds_addr);
2212 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2213 /* Queue wraps! */
2214 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2215 prod = 0;
2216 } else
2217 SF_INC(prod, SF_TX_DLIST_CNT);
2218 }
2219 /* Update producer index. */
2220 sc->sf_cdata.sf_tx_prod = prod;
2221 sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
2222
2223 desc = &sc->sf_rdata.sf_tx_ring[si];
2224 /* Check TDP/UDP checksum offload request. */
2225 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2226 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2227 desc->sf_tx_ctrl |=
2228 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
2229
2230 txd->tx_dmamap = map;
2231 txd->tx_m = m;
2232 txd->ndesc = nsegs + nskip;
2233
2234 return (0);
2235 }
2236
2237 static void
2238 sf_start(struct ifnet *ifp)
2239 {
2240 struct sf_softc *sc;
2241
2242 sc = ifp->if_softc;
2243 SF_LOCK(sc);
2244 sf_start_locked(ifp);
2245 SF_UNLOCK(sc);
2246 }
2247
2248 static void
2249 sf_start_locked(struct ifnet *ifp)
2250 {
2251 struct sf_softc *sc;
2252 struct mbuf *m_head;
2253 int enq;
2254
2255 sc = ifp->if_softc;
2256 SF_LOCK_ASSERT(sc);
2257
2258 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2259 IFF_DRV_RUNNING || sc->sf_link == 0)
2260 return;
2261
2262 /*
2263 * Since we don't know when descriptor wrap occurrs in advance
2264 * limit available number of active Tx descriptor counter to be
2265 * higher than maximum number of DMA segments allowed in driver.
2266 */
2267 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2268 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
2269 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2270 if (m_head == NULL)
2271 break;
2272 /*
2273 * Pack the data into the transmit ring. If we
2274 * don't have room, set the OACTIVE flag and wait
2275 * for the NIC to drain the ring.
2276 */
2277 if (sf_encap(sc, &m_head)) {
2278 if (m_head == NULL)
2279 break;
2280 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2281 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2282 break;
2283 }
2284
2285 enq++;
2286 /*
2287 * If there's a BPF listener, bounce a copy of this frame
2288 * to him.
2289 */
2290 ETHER_BPF_MTAP(ifp, m_head);
2291 }
2292
2293 if (enq > 0) {
2294 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2295 sc->sf_cdata.sf_tx_ring_map,
2296 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2297 /* Kick transmit. */
2298 csr_write_4(sc, SF_TXDQ_PRODIDX,
2299 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
2300
2301 /* Set a timeout in case the chip goes out to lunch. */
2302 sc->sf_watchdog_timer = 5;
2303 }
2304 }
2305
2306 static void
2307 sf_stop(struct sf_softc *sc)
2308 {
2309 struct sf_txdesc *txd;
2310 struct sf_rxdesc *rxd;
2311 struct ifnet *ifp;
2312 int i;
2313
2314 SF_LOCK_ASSERT(sc);
2315
2316 ifp = sc->sf_ifp;
2317
2318 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2319 sc->sf_link = 0;
2320 callout_stop(&sc->sf_co);
2321 sc->sf_watchdog_timer = 0;
2322
2323 /* Reading the ISR register clears all interrrupts. */
2324 csr_read_4(sc, SF_ISR);
2325 /* Disable further interrupts. */
2326 csr_write_4(sc, SF_IMR, 0);
2327
2328 /* Disable Tx/Rx egine. */
2329 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2330
2331 /* Give hardware chance to drain active DMA cycles. */
2332 DELAY(1000);
2333
2334 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2335 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2336 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2337 csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2338 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2339 csr_write_4(sc, SF_TXCQ_CTL, 0);
2340 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2341 csr_write_4(sc, SF_TXDQ_CTL, 0);
2342
2343 /*
2344 * Free RX and TX mbufs still in the queues.
2345 */
2346 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2347 rxd = &sc->sf_cdata.sf_rxdesc[i];
2348 if (rxd->rx_m != NULL) {
2349 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2350 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2351 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2352 rxd->rx_dmamap);
2353 m_freem(rxd->rx_m);
2354 rxd->rx_m = NULL;
2355 }
2356 }
2357 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2358 txd = &sc->sf_cdata.sf_txdesc[i];
2359 if (txd->tx_m != NULL) {
2360 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2361 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2362 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2363 txd->tx_dmamap);
2364 m_freem(txd->tx_m);
2365 txd->tx_m = NULL;
2366 txd->ndesc = 0;
2367 }
2368 }
2369 }
2370
2371 static void
2372 sf_tick(void *xsc)
2373 {
2374 struct sf_softc *sc;
2375 struct mii_data *mii;
2376
2377 sc = xsc;
2378 SF_LOCK_ASSERT(sc);
2379 mii = device_get_softc(sc->sf_miibus);
2380 mii_tick(mii);
2381 sf_stats_update(sc);
2382 sf_watchdog(sc);
2383 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2384 }
2385
2386 /*
2387 * Note: it is important that this function not be interrupted. We
2388 * use a two-stage register access scheme: if we are interrupted in
2389 * between setting the indirect address register and reading from the
2390 * indirect data register, the contents of the address register could
2391 * be changed out from under us.
2392 */
2393 static void
2394 sf_stats_update(struct sf_softc *sc)
2395 {
2396 struct ifnet *ifp;
2397 struct sf_stats now, *stats, *nstats;
2398 int i;
2399
2400 SF_LOCK_ASSERT(sc);
2401
2402 ifp = sc->sf_ifp;
2403 stats = &now;
2404
2405 stats->sf_tx_frames =
2406 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2407 stats->sf_tx_single_colls =
2408 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2409 stats->sf_tx_multi_colls =
2410 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2411 stats->sf_tx_crcerrs =
2412 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2413 stats->sf_tx_bytes =
2414 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2415 stats->sf_tx_deferred =
2416 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2417 stats->sf_tx_late_colls =
2418 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2419 stats->sf_tx_pause_frames =
2420 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2421 stats->sf_tx_control_frames =
2422 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2423 stats->sf_tx_excess_colls =
2424 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2425 stats->sf_tx_excess_defer =
2426 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2427 stats->sf_tx_mcast_frames =
2428 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2429 stats->sf_tx_bcast_frames =
2430 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2431 stats->sf_tx_frames_lost =
2432 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2433 stats->sf_rx_frames =
2434 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2435 stats->sf_rx_crcerrs =
2436 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2437 stats->sf_rx_alignerrs =
2438 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2439 stats->sf_rx_bytes =
2440 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2441 stats->sf_rx_pause_frames =
2442 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2443 stats->sf_rx_control_frames =
2444 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2445 stats->sf_rx_unsup_control_frames =
2446 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2447 stats->sf_rx_giants =
2448 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2449 stats->sf_rx_runts =
2450 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2451 stats->sf_rx_jabbererrs =
2452 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2453 stats->sf_rx_fragments =
2454 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2455 stats->sf_rx_pkts_64 =
2456 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2457 stats->sf_rx_pkts_65_127 =
2458 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2459 stats->sf_rx_pkts_128_255 =
2460 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2461 stats->sf_rx_pkts_256_511 =
2462 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2463 stats->sf_rx_pkts_512_1023 =
2464 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2465 stats->sf_rx_pkts_1024_1518 =
2466 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2467 stats->sf_rx_frames_lost =
2468 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2469 /* Lower 16bits are valid. */
2470 stats->sf_tx_underruns =
2471 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
2472
2473 /* Empty stats counter registers. */
2474 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2475 csr_write_4(sc, i, 0);
2476
2477 ifp->if_opackets += (u_long)stats->sf_tx_frames;
2478
2479 ifp->if_collisions += (u_long)stats->sf_tx_single_colls +
2480 (u_long)stats->sf_tx_multi_colls;
2481
2482 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls +
2483 (u_long)stats->sf_tx_excess_defer +
2484 (u_long)stats->sf_tx_frames_lost;
2485
2486 ifp->if_ipackets += (u_long)stats->sf_rx_frames;
2487
2488 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs +
2489 (u_long)stats->sf_rx_alignerrs +
2490 (u_long)stats->sf_rx_giants +
2491 (u_long)stats->sf_rx_runts +
2492 (u_long)stats->sf_rx_jabbererrs +
2493 (u_long)stats->sf_rx_frames_lost;
2494
2495 nstats = &sc->sf_statistics;
2496
2497 nstats->sf_tx_frames += stats->sf_tx_frames;
2498 nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2499 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2500 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2501 nstats->sf_tx_bytes += stats->sf_tx_bytes;
2502 nstats->sf_tx_deferred += stats->sf_tx_deferred;
2503 nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2504 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2505 nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2506 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2507 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2508 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2509 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2510 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2511 nstats->sf_rx_frames += stats->sf_rx_frames;
2512 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2513 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2514 nstats->sf_rx_bytes += stats->sf_rx_bytes;
2515 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2516 nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2517 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2518 nstats->sf_rx_giants += stats->sf_rx_giants;
2519 nstats->sf_rx_runts += stats->sf_rx_runts;
2520 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2521 nstats->sf_rx_fragments += stats->sf_rx_fragments;
2522 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2523 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2524 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2525 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2526 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2527 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2528 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2529 nstats->sf_tx_underruns += stats->sf_tx_underruns;
2530 }
2531
2532 static void
2533 sf_watchdog(struct sf_softc *sc)
2534 {
2535 struct ifnet *ifp;
2536
2537 SF_LOCK_ASSERT(sc);
2538
2539 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2540 return;
2541
2542 ifp = sc->sf_ifp;
2543
2544 ifp->if_oerrors++;
2545 if (sc->sf_link == 0) {
2546 if (bootverbose)
2547 if_printf(sc->sf_ifp, "watchdog timeout "
2548 "(missed link)\n");
2549 } else
2550 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2551 sc->sf_cdata.sf_tx_cnt);
2552
2553 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2554 sf_init_locked(sc);
2555
2556 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2557 sf_start_locked(ifp);
2558 }
2559
2560 static int
2561 sf_shutdown(device_t dev)
2562 {
2563 struct sf_softc *sc;
2564
2565 sc = device_get_softc(dev);
2566
2567 SF_LOCK(sc);
2568 sf_stop(sc);
2569 SF_UNLOCK(sc);
2570
2571 return (0);
2572 }
2573
2574 static int
2575 sf_suspend(device_t dev)
2576 {
2577 struct sf_softc *sc;
2578
2579 sc = device_get_softc(dev);
2580
2581 SF_LOCK(sc);
2582 sf_stop(sc);
2583 sc->sf_suspended = 1;
2584 bus_generic_suspend(dev);
2585 SF_UNLOCK(sc);
2586
2587 return (0);
2588 }
2589
2590 static int
2591 sf_resume(device_t dev)
2592 {
2593 struct sf_softc *sc;
2594 struct ifnet *ifp;
2595
2596 sc = device_get_softc(dev);
2597
2598 SF_LOCK(sc);
2599 bus_generic_resume(dev);
2600 ifp = sc->sf_ifp;
2601 if ((ifp->if_flags & IFF_UP) != 0)
2602 sf_init_locked(sc);
2603
2604 sc->sf_suspended = 0;
2605 SF_UNLOCK(sc);
2606
2607 return (0);
2608 }
2609
2610 static int
2611 sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2612 {
2613 struct sf_softc *sc;
2614 struct sf_stats *stats;
2615 int error;
2616 int result;
2617
2618 result = -1;
2619 error = sysctl_handle_int(oidp, &result, 0, req);
2620
2621 if (error != 0 || req->newptr == NULL)
2622 return (error);
2623
2624 if (result != 1)
2625 return (error);
2626
2627 sc = (struct sf_softc *)arg1;
2628 stats = &sc->sf_statistics;
2629
2630 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2631 printf("Transmit good frames : %ju\n",
2632 (uintmax_t)stats->sf_tx_frames);
2633 printf("Transmit good octets : %ju\n",
2634 (uintmax_t)stats->sf_tx_bytes);
2635 printf("Transmit single collisions : %u\n",
2636 stats->sf_tx_single_colls);
2637 printf("Transmit multiple collisions : %u\n",
2638 stats->sf_tx_multi_colls);
2639 printf("Transmit late collisions : %u\n",
2640 stats->sf_tx_late_colls);
2641 printf("Transmit abort due to excessive collisions : %u\n",
2642 stats->sf_tx_excess_colls);
2643 printf("Transmit CRC errors : %u\n",
2644 stats->sf_tx_crcerrs);
2645 printf("Transmit deferrals : %u\n",
2646 stats->sf_tx_deferred);
2647 printf("Transmit abort due to excessive deferrals : %u\n",
2648 stats->sf_tx_excess_defer);
2649 printf("Transmit pause control frames : %u\n",
2650 stats->sf_tx_pause_frames);
2651 printf("Transmit control frames : %u\n",
2652 stats->sf_tx_control_frames);
2653 printf("Transmit good multicast frames : %u\n",
2654 stats->sf_tx_mcast_frames);
2655 printf("Transmit good broadcast frames : %u\n",
2656 stats->sf_tx_bcast_frames);
2657 printf("Transmit frames lost due to internal transmit errors : %u\n",
2658 stats->sf_tx_frames_lost);
2659 printf("Transmit FIFO underflows : %u\n",
2660 stats->sf_tx_underruns);
2661 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2662 printf("Receive good frames : %ju\n",
2663 (uint64_t)stats->sf_rx_frames);
2664 printf("Receive good octets : %ju\n",
2665 (uint64_t)stats->sf_rx_bytes);
2666 printf("Receive CRC errors : %u\n",
2667 stats->sf_rx_crcerrs);
2668 printf("Receive alignment errors : %u\n",
2669 stats->sf_rx_alignerrs);
2670 printf("Receive pause frames : %u\n",
2671 stats->sf_rx_pause_frames);
2672 printf("Receive control frames : %u\n",
2673 stats->sf_rx_control_frames);
2674 printf("Receive control frames with unsupported opcode : %u\n",
2675 stats->sf_rx_unsup_control_frames);
2676 printf("Receive frames too long : %u\n",
2677 stats->sf_rx_giants);
2678 printf("Receive frames too short : %u\n",
2679 stats->sf_rx_runts);
2680 printf("Receive frames jabber errors : %u\n",
2681 stats->sf_rx_jabbererrs);
2682 printf("Receive frames fragments : %u\n",
2683 stats->sf_rx_fragments);
2684 printf("Receive packets 64 bytes : %ju\n",
2685 (uint64_t)stats->sf_rx_pkts_64);
2686 printf("Receive packets 65 to 127 bytes : %ju\n",
2687 (uint64_t)stats->sf_rx_pkts_65_127);
2688 printf("Receive packets 128 to 255 bytes : %ju\n",
2689 (uint64_t)stats->sf_rx_pkts_128_255);
2690 printf("Receive packets 256 to 511 bytes : %ju\n",
2691 (uint64_t)stats->sf_rx_pkts_256_511);
2692 printf("Receive packets 512 to 1023 bytes : %ju\n",
2693 (uint64_t)stats->sf_rx_pkts_512_1023);
2694 printf("Receive packets 1024 to 1518 bytes : %ju\n",
2695 (uint64_t)stats->sf_rx_pkts_1024_1518);
2696 printf("Receive frames lost due to internal receive errors : %u\n",
2697 stats->sf_rx_frames_lost);
2698 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2699
2700 return (error);
2701 }
2702
2703 static int
2704 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2705 {
2706 int error, value;
2707
2708 if (!arg1)
2709 return (EINVAL);
2710 value = *(int *)arg1;
2711 error = sysctl_handle_int(oidp, &value, 0, req);
2712 if (error || !req->newptr)
2713 return (error);
2714 if (value < low || value > high)
2715 return (EINVAL);
2716 *(int *)arg1 = value;
2717
2718 return (0);
2719 }
2720
2721 static int
2722 sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2723 {
2724
2725 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));
2726 }
Cache object: 9345a16ca2024c0a02058f96bdd0dc78
|