FreeBSD/Linux Kernel Cross Reference
sys/dev/msk/if_msk.c
1 /******************************************************************************
2 *
3 * Name : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.11 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11 /******************************************************************************
12 *
13 * LICENSE:
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
20 * and conditions:
21 *
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 * /LICENSE
45 *
46 *****************************************************************************/
47
48 /*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79 /*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95 /*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101 #include <sys/cdefs.h>
102 __FBSDID("$FreeBSD$");
103
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/bus.h>
107 #include <sys/endian.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/module.h>
112 #include <sys/socket.h>
113 #include <sys/sockio.h>
114 #include <sys/queue.h>
115 #include <sys/sysctl.h>
116 #include <sys/taskqueue.h>
117
118 #include <net/bpf.h>
119 #include <net/ethernet.h>
120 #include <net/if.h>
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_types.h>
125 #include <net/if_vlan_var.h>
126
127 #include <netinet/in.h>
128 #include <netinet/in_systm.h>
129 #include <netinet/ip.h>
130 #include <netinet/tcp.h>
131 #include <netinet/udp.h>
132
133 #include <machine/bus.h>
134 #include <machine/in_cksum.h>
135 #include <machine/resource.h>
136 #include <sys/rman.h>
137
138 #include <dev/mii/mii.h>
139 #include <dev/mii/miivar.h>
140 #include <dev/mii/brgphyreg.h>
141
142 #include <dev/pci/pcireg.h>
143 #include <dev/pci/pcivar.h>
144
145 #include <dev/msk/if_mskreg.h>
146
147 MODULE_DEPEND(msk, pci, 1, 1, 1);
148 MODULE_DEPEND(msk, ether, 1, 1, 1);
149 MODULE_DEPEND(msk, miibus, 1, 1, 1);
150
151 /* "device miibus" required. See GENERIC if you get errors here. */
152 #include "miibus_if.h"
153
154 /* Tunables. */
155 static int msi_disable = 0;
156 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
157
158 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
159
160 /* RELENG_6 support code */
161 #ifndef IFCAP_TSO4
162 #define IFCAP_TSO4 0
163 #define CSUM_TSO 0
164 #endif
165 #ifndef VLAN_CAPABILITIES
166 #define VLAN_CAPABILITIES(x)
167 #endif
168 #ifndef IFCAP_VLAN_HWCSUM
169 #define IFCAP_VLAN_HWCSUM 0
170 #endif
171 #define MSI_SUPPORT
172 #undef TSO_SUPPORT
173
174 /*
175 * Devices supported by this driver.
176 */
177 static struct msk_product {
178 uint16_t msk_vendorid;
179 uint16_t msk_deviceid;
180 const char *msk_name;
181 } msk_products[] = {
182 { VENDORID_SK, DEVICEID_SK_YUKON2,
183 "SK-9Sxx Gigabit Ethernet" },
184 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
185 "SK-9Exx Gigabit Ethernet"},
186 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
187 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
188 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
189 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
190 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
191 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
192 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
193 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
194 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
195 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
196 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
197 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
198 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
199 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
200 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
201 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
202 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
203 "Marvell Yukon 88E8035 Gigabit Ethernet" },
204 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
205 "Marvell Yukon 88E8036 Gigabit Ethernet" },
206 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
207 "Marvell Yukon 88E8038 Gigabit Ethernet" },
208 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
209 "Marvell Yukon 88E8039 Gigabit Ethernet" },
210 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
211 "Marvell Yukon 88E8050 Gigabit Ethernet" },
212 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
213 "Marvell Yukon 88E8052 Gigabit Ethernet" },
214 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
215 "Marvell Yukon 88E8053 Gigabit Ethernet" },
216 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
217 "Marvell Yukon 88E8055 Gigabit Ethernet" },
218 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
219 "Marvell Yukon 88E8056 Gigabit Ethernet" },
220 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
221 "Marvell Yukon 88E8058 Gigabit Ethernet" },
222 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
223 "D-Link 550SX Gigabit Ethernet" },
224 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
225 "D-Link 560T Gigabit Ethernet" }
226 };
227
228 static const char *model_name[] = {
229 "Yukon XL",
230 "Yukon EC Ultra",
231 "Yukon Unknown",
232 "Yukon EC",
233 "Yukon FE"
234 };
235
236 static int mskc_probe(device_t);
237 static int mskc_attach(device_t);
238 static int mskc_detach(device_t);
239 static void mskc_shutdown(device_t);
240 static int mskc_setup_rambuffer(struct msk_softc *);
241 static int mskc_suspend(device_t);
242 static int mskc_resume(device_t);
243 static void mskc_reset(struct msk_softc *);
244
245 static int msk_probe(device_t);
246 static int msk_attach(device_t);
247 static int msk_detach(device_t);
248
249 static void msk_tick(void *);
250 static void msk_intr(void *);
251 static void msk_int_task(void *, int);
252 static void msk_intr_phy(struct msk_if_softc *);
253 static void msk_intr_gmac(struct msk_if_softc *);
254 static __inline void msk_rxput(struct msk_if_softc *);
255 static int msk_handle_events(struct msk_softc *);
256 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
257 static void msk_intr_hwerr(struct msk_softc *);
258 static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
259 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
260 static void msk_txeof(struct msk_if_softc *, int);
261 static struct mbuf *msk_defrag(struct mbuf *, int, int);
262 static int msk_encap(struct msk_if_softc *, struct mbuf **);
263 static void msk_tx_task(void *, int);
264 static void msk_start(struct ifnet *);
265 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
266 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
267 static void msk_set_rambuffer(struct msk_if_softc *);
268 static void msk_init(void *);
269 static void msk_init_locked(struct msk_if_softc *);
270 static void msk_stop(struct msk_if_softc *);
271 static void msk_watchdog(struct msk_if_softc *);
272 static int msk_mediachange(struct ifnet *);
273 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
274 static void msk_phy_power(struct msk_softc *, int);
275 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
276 static int msk_status_dma_alloc(struct msk_softc *);
277 static void msk_status_dma_free(struct msk_softc *);
278 static int msk_txrx_dma_alloc(struct msk_if_softc *);
279 static void msk_txrx_dma_free(struct msk_if_softc *);
280 static void *msk_jalloc(struct msk_if_softc *);
281 static void msk_jfree(void *, void *);
282 static int msk_init_rx_ring(struct msk_if_softc *);
283 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
284 static void msk_init_tx_ring(struct msk_if_softc *);
285 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
286 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
287 static int msk_newbuf(struct msk_if_softc *, int);
288 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
289
290 static int msk_phy_readreg(struct msk_if_softc *, int, int);
291 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
292 static int msk_miibus_readreg(device_t, int, int);
293 static int msk_miibus_writereg(device_t, int, int, int);
294 static void msk_miibus_statchg(device_t);
295 static void msk_link_task(void *, int);
296
297 static void msk_setmulti(struct msk_if_softc *);
298 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
299 static void msk_setpromisc(struct msk_if_softc *);
300
301 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
302 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
303
304 static device_method_t mskc_methods[] = {
305 /* Device interface */
306 DEVMETHOD(device_probe, mskc_probe),
307 DEVMETHOD(device_attach, mskc_attach),
308 DEVMETHOD(device_detach, mskc_detach),
309 DEVMETHOD(device_suspend, mskc_suspend),
310 DEVMETHOD(device_resume, mskc_resume),
311 DEVMETHOD(device_shutdown, mskc_shutdown),
312
313 /* bus interface */
314 DEVMETHOD(bus_print_child, bus_generic_print_child),
315 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
316
317 { NULL, NULL }
318 };
319
320 static driver_t mskc_driver = {
321 "mskc",
322 mskc_methods,
323 sizeof(struct msk_softc)
324 };
325
326 static devclass_t mskc_devclass;
327
328 static device_method_t msk_methods[] = {
329 /* Device interface */
330 DEVMETHOD(device_probe, msk_probe),
331 DEVMETHOD(device_attach, msk_attach),
332 DEVMETHOD(device_detach, msk_detach),
333 DEVMETHOD(device_shutdown, bus_generic_shutdown),
334
335 /* bus interface */
336 DEVMETHOD(bus_print_child, bus_generic_print_child),
337 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
338
339 /* MII interface */
340 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
341 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
342 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
343
344 { NULL, NULL }
345 };
346
347 static driver_t msk_driver = {
348 "msk",
349 msk_methods,
350 sizeof(struct msk_if_softc)
351 };
352
353 static devclass_t msk_devclass;
354
355 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
356 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
357 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
358
359 static int
360 msk_miibus_readreg(device_t dev, int phy, int reg)
361 {
362 struct msk_if_softc *sc_if;
363
364 if (phy != PHY_ADDR_MARV)
365 return (0);
366
367 sc_if = device_get_softc(dev);
368
369 return (msk_phy_readreg(sc_if, phy, reg));
370 }
371
372 static int
373 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
374 {
375 struct msk_softc *sc;
376 int i, val;
377
378 sc = sc_if->msk_softc;
379
380 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
381 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
382
383 for (i = 0; i < MSK_TIMEOUT; i++) {
384 DELAY(1);
385 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
386 if ((val & GM_SMI_CT_RD_VAL) != 0) {
387 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
388 break;
389 }
390 }
391
392 if (i == MSK_TIMEOUT) {
393 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
394 val = 0;
395 }
396
397 return (val);
398 }
399
400 static int
401 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
402 {
403 struct msk_if_softc *sc_if;
404
405 if (phy != PHY_ADDR_MARV)
406 return (0);
407
408 sc_if = device_get_softc(dev);
409
410 return (msk_phy_writereg(sc_if, phy, reg, val));
411 }
412
413 static int
414 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
415 {
416 struct msk_softc *sc;
417 int i;
418
419 sc = sc_if->msk_softc;
420
421 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
422 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
423 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
424 for (i = 0; i < MSK_TIMEOUT; i++) {
425 DELAY(1);
426 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
427 GM_SMI_CT_BUSY) == 0)
428 break;
429 }
430 if (i == MSK_TIMEOUT)
431 if_printf(sc_if->msk_ifp, "phy write timeout\n");
432
433 return (0);
434 }
435
436 static void
437 msk_miibus_statchg(device_t dev)
438 {
439 struct msk_if_softc *sc_if;
440
441 sc_if = device_get_softc(dev);
442 taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task);
443 }
444
445 static void
446 msk_link_task(void *arg, int pending)
447 {
448 struct msk_softc *sc;
449 struct msk_if_softc *sc_if;
450 struct mii_data *mii;
451 struct ifnet *ifp;
452 uint32_t gmac;
453
454 sc_if = (struct msk_if_softc *)arg;
455 sc = sc_if->msk_softc;
456
457 MSK_IF_LOCK(sc_if);
458
459 mii = device_get_softc(sc_if->msk_miibus);
460 ifp = sc_if->msk_ifp;
461 if (mii == NULL || ifp == NULL) {
462 MSK_IF_UNLOCK(sc_if);
463 return;
464 }
465
466 if (mii->mii_media_status & IFM_ACTIVE) {
467 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
468 sc_if->msk_link = 1;
469 } else
470 sc_if->msk_link = 0;
471
472 if (sc_if->msk_link != 0) {
473 /* Enable Tx FIFO Underrun. */
474 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
475 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
476 /*
477 * Because mii(4) notify msk(4) that it detected link status
478 * change, there is no need to enable automatic
479 * speed/flow-control/duplex updates.
480 */
481 gmac = GM_GPCR_AU_ALL_DIS;
482 switch (IFM_SUBTYPE(mii->mii_media_active)) {
483 case IFM_1000_SX:
484 case IFM_1000_T:
485 gmac |= GM_GPCR_SPEED_1000;
486 break;
487 case IFM_100_TX:
488 gmac |= GM_GPCR_SPEED_100;
489 break;
490 case IFM_10_T:
491 break;
492 }
493
494 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
495 gmac |= GM_GPCR_DUP_FULL;
496 /* Disable Rx flow control. */
497 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
498 gmac |= GM_GPCR_FC_RX_DIS;
499 /* Disable Tx flow control. */
500 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
501 gmac |= GM_GPCR_FC_TX_DIS;
502 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
503 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
504 /* Read again to ensure writing. */
505 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
506
507 gmac = GMC_PAUSE_ON;
508 if (((mii->mii_media_active & IFM_GMASK) &
509 (IFM_FLAG0 | IFM_FLAG1)) == 0)
510 gmac = GMC_PAUSE_OFF;
511 /* Diable pause for 10/100 Mbps in half-duplex mode. */
512 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
513 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
514 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
515 gmac = GMC_PAUSE_OFF;
516 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
517
518 /* Enable PHY interrupt for FIFO underrun/overflow. */
519 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
520 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
521 } else {
522 /*
523 * Link state changed to down.
524 * Disable PHY interrupts.
525 */
526 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
527 /* Disable Rx/Tx MAC. */
528 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
529 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
530 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
531 /* Read again to ensure writing. */
532 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
533 }
534
535 MSK_IF_UNLOCK(sc_if);
536 }
537
538 static void
539 msk_setmulti(struct msk_if_softc *sc_if)
540 {
541 struct msk_softc *sc;
542 struct ifnet *ifp;
543 struct ifmultiaddr *ifma;
544 uint32_t mchash[2];
545 uint32_t crc;
546 uint16_t mode;
547
548 sc = sc_if->msk_softc;
549
550 MSK_IF_LOCK_ASSERT(sc_if);
551
552 ifp = sc_if->msk_ifp;
553
554 bzero(mchash, sizeof(mchash));
555 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
556 mode |= GM_RXCR_UCF_ENA;
557 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
558 if ((ifp->if_flags & IFF_PROMISC) != 0)
559 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
560 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
561 mchash[0] = 0xffff;
562 mchash[1] = 0xffff;
563 }
564 } else {
565 IF_ADDR_LOCK(ifp);
566 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
567 if (ifma->ifma_addr->sa_family != AF_LINK)
568 continue;
569 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
570 ifma->ifma_addr), ETHER_ADDR_LEN);
571 /* Just want the 6 least significant bits. */
572 crc &= 0x3f;
573 /* Set the corresponding bit in the hash table. */
574 mchash[crc >> 5] |= 1 << (crc & 0x1f);
575 }
576 IF_ADDR_UNLOCK(ifp);
577 mode |= GM_RXCR_MCF_ENA;
578 }
579
580 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
581 mchash[0] & 0xffff);
582 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
583 (mchash[0] >> 16) & 0xffff);
584 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
585 mchash[1] & 0xffff);
586 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
587 (mchash[1] >> 16) & 0xffff);
588 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
589 }
590
591 static void
592 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
593 {
594 struct msk_softc *sc;
595
596 sc = sc_if->msk_softc;
597 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
598 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
599 RX_VLAN_STRIP_ON);
600 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
601 TX_VLAN_TAG_ON);
602 } else {
603 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
604 RX_VLAN_STRIP_OFF);
605 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
606 TX_VLAN_TAG_OFF);
607 }
608 }
609
610 static void
611 msk_setpromisc(struct msk_if_softc *sc_if)
612 {
613 struct msk_softc *sc;
614 struct ifnet *ifp;
615 uint16_t mode;
616
617 MSK_IF_LOCK_ASSERT(sc_if);
618
619 sc = sc_if->msk_softc;
620 ifp = sc_if->msk_ifp;
621
622 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
623 if (ifp->if_flags & IFF_PROMISC)
624 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
625 else
626 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
627 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
628 }
629
630 static int
631 msk_init_rx_ring(struct msk_if_softc *sc_if)
632 {
633 struct msk_ring_data *rd;
634 struct msk_rxdesc *rxd;
635 int i, prod;
636
637 MSK_IF_LOCK_ASSERT(sc_if);
638
639 sc_if->msk_cdata.msk_rx_cons = 0;
640 sc_if->msk_cdata.msk_rx_prod = 0;
641 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
642
643 rd = &sc_if->msk_rdata;
644 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
645 prod = sc_if->msk_cdata.msk_rx_prod;
646 for (i = 0; i < MSK_RX_RING_CNT; i++) {
647 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
648 rxd->rx_m = NULL;
649 rxd->rx_le = &rd->msk_rx_ring[prod];
650 if (msk_newbuf(sc_if, prod) != 0)
651 return (ENOBUFS);
652 MSK_INC(prod, MSK_RX_RING_CNT);
653 }
654
655 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
656 sc_if->msk_cdata.msk_rx_ring_map,
657 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
658
659 /* Update prefetch unit. */
660 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
661 CSR_WRITE_2(sc_if->msk_softc,
662 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
663 sc_if->msk_cdata.msk_rx_prod);
664
665 return (0);
666 }
667
668 static int
669 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
670 {
671 struct msk_ring_data *rd;
672 struct msk_rxdesc *rxd;
673 int i, prod;
674
675 MSK_IF_LOCK_ASSERT(sc_if);
676
677 sc_if->msk_cdata.msk_rx_cons = 0;
678 sc_if->msk_cdata.msk_rx_prod = 0;
679 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
680
681 rd = &sc_if->msk_rdata;
682 bzero(rd->msk_jumbo_rx_ring,
683 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
684 prod = sc_if->msk_cdata.msk_rx_prod;
685 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
686 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
687 rxd->rx_m = NULL;
688 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
689 if (msk_jumbo_newbuf(sc_if, prod) != 0)
690 return (ENOBUFS);
691 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
692 }
693
694 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
695 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
696 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
697
698 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
699 CSR_WRITE_2(sc_if->msk_softc,
700 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
701 sc_if->msk_cdata.msk_rx_prod);
702
703 return (0);
704 }
705
706 static void
707 msk_init_tx_ring(struct msk_if_softc *sc_if)
708 {
709 struct msk_ring_data *rd;
710 struct msk_txdesc *txd;
711 int i;
712
713 sc_if->msk_cdata.msk_tso_mtu = 0;
714 sc_if->msk_cdata.msk_tx_prod = 0;
715 sc_if->msk_cdata.msk_tx_cons = 0;
716 sc_if->msk_cdata.msk_tx_cnt = 0;
717
718 rd = &sc_if->msk_rdata;
719 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
720 for (i = 0; i < MSK_TX_RING_CNT; i++) {
721 txd = &sc_if->msk_cdata.msk_txdesc[i];
722 txd->tx_m = NULL;
723 txd->tx_le = &rd->msk_tx_ring[i];
724 }
725
726 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
727 sc_if->msk_cdata.msk_tx_ring_map,
728 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
729 }
730
731 static __inline void
732 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
733 {
734 struct msk_rx_desc *rx_le;
735 struct msk_rxdesc *rxd;
736 struct mbuf *m;
737
738 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
739 m = rxd->rx_m;
740 rx_le = rxd->rx_le;
741 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
742 }
743
744 static __inline void
745 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
746 {
747 struct msk_rx_desc *rx_le;
748 struct msk_rxdesc *rxd;
749 struct mbuf *m;
750
751 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
752 m = rxd->rx_m;
753 rx_le = rxd->rx_le;
754 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
755 }
756
757 static int
758 msk_newbuf(struct msk_if_softc *sc_if, int idx)
759 {
760 struct msk_rx_desc *rx_le;
761 struct msk_rxdesc *rxd;
762 struct mbuf *m;
763 bus_dma_segment_t segs[1];
764 bus_dmamap_t map;
765 int nsegs;
766
767 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
768 if (m == NULL)
769 return (ENOBUFS);
770
771 m->m_len = m->m_pkthdr.len = MCLBYTES;
772 m_adj(m, ETHER_ALIGN);
773
774 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
775 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
776 BUS_DMA_NOWAIT) != 0) {
777 m_freem(m);
778 return (ENOBUFS);
779 }
780 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
781
782 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
783 if (rxd->rx_m != NULL) {
784 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
785 BUS_DMASYNC_POSTREAD);
786 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
787 }
788 map = rxd->rx_dmamap;
789 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
790 sc_if->msk_cdata.msk_rx_sparemap = map;
791 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
792 BUS_DMASYNC_PREREAD);
793 rxd->rx_m = m;
794 rx_le = rxd->rx_le;
795 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
796 rx_le->msk_control =
797 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
798
799 return (0);
800 }
801
802 static int
803 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
804 {
805 struct msk_rx_desc *rx_le;
806 struct msk_rxdesc *rxd;
807 struct mbuf *m;
808 bus_dma_segment_t segs[1];
809 bus_dmamap_t map;
810 int nsegs;
811 void *buf;
812
813 MGETHDR(m, M_DONTWAIT, MT_DATA);
814 if (m == NULL)
815 return (ENOBUFS);
816 buf = msk_jalloc(sc_if);
817 if (buf == NULL) {
818 m_freem(m);
819 return (ENOBUFS);
820 }
821 /* Attach the buffer to the mbuf. */
822 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
823 EXT_NET_DRV);
824 if ((m->m_flags & M_EXT) == 0) {
825 m_freem(m);
826 return (ENOBUFS);
827 }
828 m->m_pkthdr.len = m->m_len = MSK_JLEN;
829 m_adj(m, ETHER_ALIGN);
830
831 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
832 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
833 BUS_DMA_NOWAIT) != 0) {
834 m_freem(m);
835 return (ENOBUFS);
836 }
837 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
838
839 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
840 if (rxd->rx_m != NULL) {
841 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
842 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
843 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
844 rxd->rx_dmamap);
845 }
846 map = rxd->rx_dmamap;
847 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
848 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
849 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
850 BUS_DMASYNC_PREREAD);
851 rxd->rx_m = m;
852 rx_le = rxd->rx_le;
853 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
854 rx_le->msk_control =
855 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
856
857 return (0);
858 }
859
860 /*
861 * Set media options.
862 */
863 static int
864 msk_mediachange(struct ifnet *ifp)
865 {
866 struct msk_if_softc *sc_if;
867 struct mii_data *mii;
868
869 sc_if = ifp->if_softc;
870
871 MSK_IF_LOCK(sc_if);
872 mii = device_get_softc(sc_if->msk_miibus);
873 mii_mediachg(mii);
874 MSK_IF_UNLOCK(sc_if);
875
876 return (0);
877 }
878
879 /*
880 * Report current media status.
881 */
882 static void
883 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
884 {
885 struct msk_if_softc *sc_if;
886 struct mii_data *mii;
887
888 sc_if = ifp->if_softc;
889 MSK_IF_LOCK(sc_if);
890 mii = device_get_softc(sc_if->msk_miibus);
891
892 mii_pollstat(mii);
893 MSK_IF_UNLOCK(sc_if);
894 ifmr->ifm_active = mii->mii_media_active;
895 ifmr->ifm_status = mii->mii_media_status;
896 }
897
898 static int
899 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
900 {
901 struct msk_if_softc *sc_if;
902 struct ifreq *ifr;
903 struct mii_data *mii;
904 int error, mask;
905
906 sc_if = ifp->if_softc;
907 ifr = (struct ifreq *)data;
908 error = 0;
909
910 switch(command) {
911 case SIOCSIFMTU:
912 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
913 error = EINVAL;
914 break;
915 }
916 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
917 ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
918 error = EINVAL;
919 break;
920 }
921 MSK_IF_LOCK(sc_if);
922 ifp->if_mtu = ifr->ifr_mtu;
923 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
924 msk_init_locked(sc_if);
925 MSK_IF_UNLOCK(sc_if);
926 break;
927 case SIOCSIFFLAGS:
928 MSK_IF_LOCK(sc_if);
929 if ((ifp->if_flags & IFF_UP) != 0) {
930 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
931 if (((ifp->if_flags ^ sc_if->msk_if_flags)
932 & IFF_PROMISC) != 0) {
933 msk_setpromisc(sc_if);
934 msk_setmulti(sc_if);
935 }
936 } else {
937 if (sc_if->msk_detach == 0)
938 msk_init_locked(sc_if);
939 }
940 } else {
941 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
942 msk_stop(sc_if);
943 }
944 sc_if->msk_if_flags = ifp->if_flags;
945 MSK_IF_UNLOCK(sc_if);
946 break;
947 case SIOCADDMULTI:
948 case SIOCDELMULTI:
949 MSK_IF_LOCK(sc_if);
950 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
951 msk_setmulti(sc_if);
952 MSK_IF_UNLOCK(sc_if);
953 break;
954 case SIOCGIFMEDIA:
955 case SIOCSIFMEDIA:
956 mii = device_get_softc(sc_if->msk_miibus);
957 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
958 break;
959 case SIOCSIFCAP:
960 MSK_IF_LOCK(sc_if);
961 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
962 if ((mask & IFCAP_TXCSUM) != 0) {
963 ifp->if_capenable ^= IFCAP_TXCSUM;
964 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
965 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
966 ifp->if_hwassist |= MSK_CSUM_FEATURES;
967 else
968 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
969 }
970 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
971 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
972 msk_setvlan(sc_if, ifp);
973 }
974
975 if ((mask & IFCAP_TSO4) != 0) {
976 ifp->if_capenable ^= IFCAP_TSO4;
977 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
978 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
979 ifp->if_hwassist |= CSUM_TSO;
980 else
981 ifp->if_hwassist &= ~CSUM_TSO;
982 }
983 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
984 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
985 /*
986 * In Yukon EC Ultra, TSO & checksum offload is not
987 * supported for jumbo frame.
988 */
989 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
990 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
991 }
992
993 VLAN_CAPABILITIES(ifp);
994 MSK_IF_UNLOCK(sc_if);
995 break;
996 default:
997 error = ether_ioctl(ifp, command, data);
998 break;
999 }
1000
1001 return (error);
1002 }
1003
1004 static int
1005 mskc_probe(device_t dev)
1006 {
1007 struct msk_product *mp;
1008 uint16_t vendor, devid;
1009 int i;
1010
1011 vendor = pci_get_vendor(dev);
1012 devid = pci_get_device(dev);
1013 mp = msk_products;
1014 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1015 i++, mp++) {
1016 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1017 device_set_desc(dev, mp->msk_name);
1018 return (BUS_PROBE_DEFAULT);
1019 }
1020 }
1021
1022 return (ENXIO);
1023 }
1024
1025 static int
1026 mskc_setup_rambuffer(struct msk_softc *sc)
1027 {
1028 int next;
1029 int i;
1030 uint8_t val;
1031
1032 /* Get adapter SRAM size. */
1033 val = CSR_READ_1(sc, B2_E_0);
1034 sc->msk_ramsize = (val == 0) ? 128 : val * 4;
1035 if (bootverbose)
1036 device_printf(sc->msk_dev,
1037 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1038 /*
1039 * Give receiver 2/3 of memory and round down to the multiple
1040 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
1041 * of 1024.
1042 */
1043 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1044 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1045 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1046 sc->msk_rxqstart[i] = next;
1047 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1048 next = sc->msk_rxqend[i] + 1;
1049 sc->msk_txqstart[i] = next;
1050 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1051 next = sc->msk_txqend[i] + 1;
1052 if (bootverbose) {
1053 device_printf(sc->msk_dev,
1054 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1055 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1056 sc->msk_rxqend[i]);
1057 device_printf(sc->msk_dev,
1058 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1059 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1060 sc->msk_txqend[i]);
1061 }
1062 }
1063
1064 return (0);
1065 }
1066
1067 static void
1068 msk_phy_power(struct msk_softc *sc, int mode)
1069 {
1070 uint32_t val;
1071 int i;
1072
1073 switch (mode) {
1074 case MSK_PHY_POWERUP:
1075 /* Switch power to VCC (WA for VAUX problem). */
1076 CSR_WRITE_1(sc, B0_POWER_CTRL,
1077 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1078 /* Disable Core Clock Division, set Clock Select to 0. */
1079 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1080
1081 val = 0;
1082 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1083 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1084 /* Enable bits are inverted. */
1085 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1086 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1087 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1088 }
1089 /*
1090 * Enable PCI & Core Clock, enable clock gating for both Links.
1091 */
1092 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1093
1094 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1095 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1096 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1097 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1098 /* Deassert Low Power for 1st PHY. */
1099 val |= PCI_Y2_PHY1_COMA;
1100 if (sc->msk_num_port > 1)
1101 val |= PCI_Y2_PHY2_COMA;
1102 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1103 uint32_t our;
1104
1105 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1106
1107 /* Enable all clocks. */
1108 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1109 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1110 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1111 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1112 /* Set all bits to 0 except bits 15..12. */
1113 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1114 /* Set to default value. */
1115 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1116 }
1117 /* Release PHY from PowerDown/COMA mode. */
1118 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1119 for (i = 0; i < sc->msk_num_port; i++) {
1120 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1121 GMLC_RST_SET);
1122 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1123 GMLC_RST_CLR);
1124 }
1125 break;
1126 case MSK_PHY_POWERDOWN:
1127 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1128 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1129 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1130 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1131 val &= ~PCI_Y2_PHY1_COMA;
1132 if (sc->msk_num_port > 1)
1133 val &= ~PCI_Y2_PHY2_COMA;
1134 }
1135 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1136
1137 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1138 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1139 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1140 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1141 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1142 /* Enable bits are inverted. */
1143 val = 0;
1144 }
1145 /*
1146 * Disable PCI & Core Clock, disable clock gating for
1147 * both Links.
1148 */
1149 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1150 CSR_WRITE_1(sc, B0_POWER_CTRL,
1151 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1152 break;
1153 default:
1154 break;
1155 }
1156 }
1157
1158 static void
1159 mskc_reset(struct msk_softc *sc)
1160 {
1161 bus_addr_t addr;
1162 uint16_t status;
1163 uint32_t val;
1164 int i;
1165
1166 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1167
1168 /* Disable ASF. */
1169 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1170 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1171 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1172 }
1173 /*
1174 * Since we disabled ASF, S/W reset is required for Power Management.
1175 */
1176 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1177 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1178
1179 /* Clear all error bits in the PCI status register. */
1180 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1181 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1182
1183 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1184 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1185 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1186 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1187
1188 switch (sc->msk_bustype) {
1189 case MSK_PEX_BUS:
1190 /* Clear all PEX errors. */
1191 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1192 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1193 if ((val & PEX_RX_OV) != 0) {
1194 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1195 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1196 }
1197 break;
1198 case MSK_PCI_BUS:
1199 case MSK_PCIX_BUS:
1200 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1201 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1202 if (val == 0)
1203 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1204 if (sc->msk_bustype == MSK_PCIX_BUS) {
1205 /* Set Cache Line Size opt. */
1206 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1207 val |= PCI_CLS_OPT;
1208 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1209 }
1210 break;
1211 }
1212 /* Set PHY power state. */
1213 msk_phy_power(sc, MSK_PHY_POWERUP);
1214
1215 /* Reset GPHY/GMAC Control */
1216 for (i = 0; i < sc->msk_num_port; i++) {
1217 /* GPHY Control reset. */
1218 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1219 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1220 /* GMAC Control reset. */
1221 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1222 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1223 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1224 }
1225 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1226
1227 /* LED On. */
1228 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1229
1230 /* Clear TWSI IRQ. */
1231 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1232
1233 /* Turn off hardware timer. */
1234 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1235 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1236
1237 /* Turn off descriptor polling. */
1238 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1239
1240 /* Turn off time stamps. */
1241 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1242 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1243
1244 /* Configure timeout values. */
1245 for (i = 0; i < sc->msk_num_port; i++) {
1246 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1247 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1248 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1249 MSK_RI_TO_53);
1250 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1251 MSK_RI_TO_53);
1252 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1253 MSK_RI_TO_53);
1254 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1255 MSK_RI_TO_53);
1256 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1257 MSK_RI_TO_53);
1258 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1259 MSK_RI_TO_53);
1260 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1261 MSK_RI_TO_53);
1262 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1263 MSK_RI_TO_53);
1264 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1265 MSK_RI_TO_53);
1266 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1267 MSK_RI_TO_53);
1268 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1269 MSK_RI_TO_53);
1270 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1271 MSK_RI_TO_53);
1272 }
1273
1274 /* Disable all interrupts. */
1275 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1276 CSR_READ_4(sc, B0_HWE_IMSK);
1277 CSR_WRITE_4(sc, B0_IMSK, 0);
1278 CSR_READ_4(sc, B0_IMSK);
1279
1280 /*
1281 * On dual port PCI-X card, there is an problem where status
1282 * can be received out of order due to split transactions.
1283 */
1284 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1285 int pcix;
1286 uint16_t pcix_cmd;
1287
1288 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1289 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1290 /* Clear Max Outstanding Split Transactions. */
1291 pcix_cmd &= ~0x70;
1292 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1293 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1294 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1295 }
1296 }
1297 if (sc->msk_bustype == MSK_PEX_BUS) {
1298 uint16_t v, width;
1299
1300 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1301 /* Change Max. Read Request Size to 4096 bytes. */
1302 v &= ~PEX_DC_MAX_RRS_MSK;
1303 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1304 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1305 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1306 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1307 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1308 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1309 if (v != width)
1310 device_printf(sc->msk_dev,
1311 "negotiated width of link(x%d) != "
1312 "max. width of link(x%d)\n", width, v);
1313 }
1314
1315 /* Clear status list. */
1316 bzero(sc->msk_stat_ring,
1317 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1318 sc->msk_stat_cons = 0;
1319 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1320 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1321 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1322 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1323 /* Set the status list base address. */
1324 addr = sc->msk_stat_ring_paddr;
1325 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1326 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1327 /* Set the status list last index. */
1328 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1329 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1330 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1331 /* WA for dev. #4.3 */
1332 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1333 /* WA for dev. #4.18 */
1334 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1335 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1336 } else {
1337 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1338 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1339 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1340 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1341 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1342 else
1343 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1344 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1345 }
1346 /*
1347 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1348 */
1349 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1350
1351 /* Enable status unit. */
1352 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1353
1354 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1355 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1356 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1357 }
1358
1359 static int
1360 msk_probe(device_t dev)
1361 {
1362 struct msk_softc *sc;
1363 char desc[100];
1364
1365 sc = device_get_softc(device_get_parent(dev));
1366 /*
1367 * Not much to do here. We always know there will be
1368 * at least one GMAC present, and if there are two,
1369 * mskc_attach() will create a second device instance
1370 * for us.
1371 */
1372 snprintf(desc, sizeof(desc),
1373 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1374 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1375 sc->msk_hw_rev);
1376 device_set_desc_copy(dev, desc);
1377
1378 return (BUS_PROBE_DEFAULT);
1379 }
1380
1381 static int
1382 msk_attach(device_t dev)
1383 {
1384 struct msk_softc *sc;
1385 struct msk_if_softc *sc_if;
1386 struct ifnet *ifp;
1387 int i, port, error;
1388 uint8_t eaddr[6];
1389
1390 if (dev == NULL)
1391 return (EINVAL);
1392
1393 error = 0;
1394 sc_if = device_get_softc(dev);
1395 sc = device_get_softc(device_get_parent(dev));
1396 port = *(int *)device_get_ivars(dev);
1397
1398 sc_if->msk_if_dev = dev;
1399 sc_if->msk_port = port;
1400 sc_if->msk_softc = sc;
1401 sc->msk_if[port] = sc_if;
1402 /* Setup Tx/Rx queue register offsets. */
1403 if (port == MSK_PORT_A) {
1404 sc_if->msk_txq = Q_XA1;
1405 sc_if->msk_txsq = Q_XS1;
1406 sc_if->msk_rxq = Q_R1;
1407 } else {
1408 sc_if->msk_txq = Q_XA2;
1409 sc_if->msk_txsq = Q_XS2;
1410 sc_if->msk_rxq = Q_R2;
1411 }
1412
1413 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1414 TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if);
1415
1416 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1417 goto fail;
1418
1419 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1420 if (ifp == NULL) {
1421 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1422 error = ENOSPC;
1423 goto fail;
1424 }
1425 ifp->if_softc = sc_if;
1426 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1427 ifp->if_mtu = ETHERMTU;
1428 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1429 /*
1430 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1431 * has serious bug in Rx checksum offload for all Yukon II family
1432 * hardware. It seems there is a workaround to make it work somtimes.
1433 * However, the workaround also have to check OP code sequences to
1434 * verify whether the OP code is correct. Sometimes it should compute
1435 * IP/TCP/UDP checksum in driver in order to verify correctness of
1436 * checksum computed by hardware. If you have to compute checksum
1437 * with software to verify the hardware's checksum why have hardware
1438 * compute the checksum? I think there is no reason to spend time to
1439 * make Rx checksum offload work on Yukon II hardware.
1440 */
1441 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1442 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1443 ifp->if_capenable = ifp->if_capabilities;
1444 ifp->if_ioctl = msk_ioctl;
1445 ifp->if_start = msk_start;
1446 ifp->if_timer = 0;
1447 ifp->if_watchdog = NULL;
1448 ifp->if_init = msk_init;
1449 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1450 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1451 IFQ_SET_READY(&ifp->if_snd);
1452
1453 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1454
1455 /*
1456 * Get station address for this interface. Note that
1457 * dual port cards actually come with three station
1458 * addresses: one for each port, plus an extra. The
1459 * extra one is used by the SysKonnect driver software
1460 * as a 'virtual' station address for when both ports
1461 * are operating in failover mode. Currently we don't
1462 * use this extra address.
1463 */
1464 MSK_IF_LOCK(sc_if);
1465 for (i = 0; i < ETHER_ADDR_LEN; i++)
1466 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1467
1468 /*
1469 * Call MI attach routine. Can't hold locks when calling into ether_*.
1470 */
1471 MSK_IF_UNLOCK(sc_if);
1472 ether_ifattach(ifp, eaddr);
1473 MSK_IF_LOCK(sc_if);
1474
1475 /*
1476 * VLAN capability setup
1477 * Due to Tx checksum offload hardware bugs, msk(4) manually
1478 * computes checksum for short frames. For VLAN tagged frames
1479 * this workaround does not work so disable checksum offload
1480 * for VLAN interface.
1481 */
1482 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1483 ifp->if_capenable = ifp->if_capabilities;
1484
1485 /*
1486 * Tell the upper layer(s) we support long frames.
1487 * Must appear after the call to ether_ifattach() because
1488 * ether_ifattach() sets ifi_hdrlen to the default value.
1489 */
1490 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1491
1492 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
1493 ETHER_VLAN_ENCAP_LEN;
1494
1495 /*
1496 * Do miibus setup.
1497 */
1498 MSK_IF_UNLOCK(sc_if);
1499 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1500 msk_mediastatus);
1501 if (error != 0) {
1502 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1503 ether_ifdetach(ifp);
1504 error = ENXIO;
1505 goto fail;
1506 }
1507
1508 fail:
1509 if (error != 0) {
1510 /* Access should be ok even though lock has been dropped */
1511 sc->msk_if[port] = NULL;
1512 msk_detach(dev);
1513 }
1514
1515 return (error);
1516 }
1517
1518 /*
1519 * Attach the interface. Allocate softc structures, do ifmedia
1520 * setup and ethernet/BPF attach.
1521 */
1522 static int
1523 mskc_attach(device_t dev)
1524 {
1525 struct msk_softc *sc;
1526 int error, *port, reg, rid;
1527 #ifdef MSI_SUPPORT
1528 int i, msic, msir;
1529 #endif
1530
1531 sc = device_get_softc(dev);
1532 sc->msk_dev = dev;
1533 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1534 MTX_DEF);
1535
1536 /*
1537 * Map control/status registers.
1538 */
1539 pci_enable_busmaster(dev);
1540
1541 /* Allocate I/O resource */
1542 #ifdef MSK_USEIOSPACE
1543 sc->msk_res_type = SYS_RES_IOPORT;
1544 sc->msk_res_id = PCIR_BAR(1);
1545 #else
1546 sc->msk_res_type = SYS_RES_MEMORY;
1547 sc->msk_res_id = PCIR_BAR(0);
1548 #endif
1549 sc->msk_res[0] = bus_alloc_resource_any(dev, sc->msk_res_type,
1550 &sc->msk_res_id, RF_ACTIVE);
1551 if (sc->msk_res[0] == NULL) {
1552 if (sc->msk_res_type == SYS_RES_MEMORY) {
1553 sc->msk_res_type = SYS_RES_IOPORT;
1554 sc->msk_res_id = PCIR_BAR(1);
1555 } else {
1556 sc->msk_res_type = SYS_RES_MEMORY;
1557 sc->msk_res_id = PCIR_BAR(0);
1558 }
1559 sc->msk_res[0] = bus_alloc_resource_any(dev, sc->msk_res_type,
1560 &sc->msk_res_id, RF_ACTIVE);
1561 if (sc->msk_res[0] == NULL) {
1562 device_printf(dev, "couldn't allocate %s resources\n",
1563 sc->msk_res_type == SYS_RES_MEMORY ? "memory" :
1564 "I/O");
1565 mtx_destroy(&sc->msk_mtx);
1566 return (ENXIO);
1567 }
1568 }
1569
1570 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1571 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1572 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1573 /* Bail out if chip is not recognized. */
1574 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1575 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1576 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1577 sc->msk_hw_id, sc->msk_hw_rev);
1578 mtx_destroy(&sc->msk_mtx);
1579 return (ENXIO);
1580 }
1581
1582 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1583 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1584 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1585 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1586 "max number of Rx events to process");
1587
1588 sc->msk_process_limit = MSK_PROC_DEFAULT;
1589 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1590 "process_limit", &sc->msk_process_limit);
1591 if (error == 0) {
1592 if (sc->msk_process_limit < MSK_PROC_MIN ||
1593 sc->msk_process_limit > MSK_PROC_MAX) {
1594 device_printf(dev, "process_limit value out of range; "
1595 "using default: %d\n", MSK_PROC_DEFAULT);
1596 sc->msk_process_limit = MSK_PROC_DEFAULT;
1597 }
1598 }
1599
1600 /* Soft reset. */
1601 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1602 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1603 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1604 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1605 sc->msk_coppertype = 0;
1606 else
1607 sc->msk_coppertype = 1;
1608 /* Check number of MACs. */
1609 sc->msk_num_port = 1;
1610 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1611 CFG_DUAL_MAC_MSK) {
1612 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1613 sc->msk_num_port++;
1614 }
1615
1616 /* Check bus type. */
1617 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0)
1618 sc->msk_bustype = MSK_PEX_BUS;
1619 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0)
1620 sc->msk_bustype = MSK_PCIX_BUS;
1621 else
1622 sc->msk_bustype = MSK_PCI_BUS;
1623
1624 switch (sc->msk_hw_id) {
1625 case CHIP_ID_YUKON_EC:
1626 case CHIP_ID_YUKON_EC_U:
1627 sc->msk_clock = 125; /* 125 Mhz */
1628 break;
1629 case CHIP_ID_YUKON_FE:
1630 sc->msk_clock = 100; /* 100 Mhz */
1631 break;
1632 case CHIP_ID_YUKON_XL:
1633 sc->msk_clock = 156; /* 156 Mhz */
1634 break;
1635 default:
1636 sc->msk_clock = 156; /* 156 Mhz */
1637 break;
1638 }
1639
1640 #ifdef MSI_SUPPORT
1641 /* Allocate IRQ resources. */
1642 msic = pci_msi_count(dev);
1643 if (bootverbose)
1644 device_printf(dev, "MSI count : %d\n", msic);
1645 /*
1646 * The Yukon II reports it can handle two messages, one for each
1647 * possible port. We go ahead and allocate two messages and only
1648 * setup a handler for both if we have a dual port card.
1649 *
1650 * XXX: I haven't untangled the interrupt handler to handle dual
1651 * port cards with separate MSI messages, so for now I disable MSI
1652 * on dual port cards.
1653 */
1654 if (msi_disable == 0) {
1655 switch (msic) {
1656 case 2:
1657 case 1: /* 88E8058 reports 1 MSI message */
1658 msir = msic;
1659 if (sc->msk_num_port == 1 &&
1660 pci_alloc_msi(dev, &msir) == 0) {
1661 if (msic == msir)
1662 sc->msk_msi = msic;
1663 else
1664 pci_release_msi(dev);
1665 }
1666 break;
1667 default:
1668 device_printf(dev,
1669 "Unexpected number of MSI messages : %d\n", msic);
1670 break;
1671 }
1672 }
1673
1674 if (sc->msk_msi == 0) {
1675 rid = 0;
1676 sc->msk_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1677 RF_SHAREABLE | RF_ACTIVE);
1678 if (sc->msk_irq[0] == NULL) {
1679 device_printf(dev, "couldn't allocate IRQ resources\n");
1680 error = ENXIO;
1681 goto fail;
1682 }
1683 } else {
1684 for (i = 0, rid = 1; i < sc->msk_msi; i++, rid++) {
1685 sc->msk_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1686 &rid, RF_ACTIVE);
1687 if (sc->msk_irq[i] == NULL) {
1688 device_printf(dev,
1689 "couldn't allocate IRQ resources\n");
1690 error = ENXIO;
1691 goto fail;
1692 }
1693 }
1694 }
1695 #else
1696 rid = 0;
1697 sc->msk_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1698 RF_SHAREABLE | RF_ACTIVE);
1699 if (sc->msk_irq[0] == NULL) {
1700 device_printf(dev, "couldn't allocate IRQ resources\n");
1701 error = ENXIO;
1702 goto fail;
1703 }
1704 #endif
1705
1706 if ((error = msk_status_dma_alloc(sc)) != 0)
1707 goto fail;
1708
1709 /* Set base interrupt mask. */
1710 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1711 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1712 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1713
1714 /* Reset the adapter. */
1715 mskc_reset(sc);
1716
1717 if ((error = mskc_setup_rambuffer(sc)) != 0)
1718 goto fail;
1719
1720 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1721 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1722 device_printf(dev, "failed to add child for PORT_A\n");
1723 error = ENXIO;
1724 goto fail;
1725 }
1726 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1727 if (port == NULL) {
1728 device_printf(dev, "failed to allocate memory for "
1729 "ivars of PORT_A\n");
1730 error = ENXIO;
1731 goto fail;
1732 }
1733 *port = MSK_PORT_A;
1734 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1735
1736 if (sc->msk_num_port > 1) {
1737 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1738 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1739 device_printf(dev, "failed to add child for PORT_B\n");
1740 error = ENXIO;
1741 goto fail;
1742 }
1743 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1744 if (port == NULL) {
1745 device_printf(dev, "failed to allocate memory for "
1746 "ivars of PORT_B\n");
1747 error = ENXIO;
1748 goto fail;
1749 }
1750 *port = MSK_PORT_B;
1751 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1752 }
1753
1754 error = bus_generic_attach(dev);
1755 if (error) {
1756 device_printf(dev, "failed to attach port(s)\n");
1757 goto fail;
1758 }
1759
1760 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1761 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1762 taskqueue_thread_enqueue, &sc->msk_tq);
1763 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1764 device_get_nameunit(sc->msk_dev));
1765 /* Hook interrupt last to avoid having to lock softc. */
1766 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1767 INTR_MPSAFE | INTR_FAST, msk_intr, sc, &sc->msk_intrhand[0]);
1768
1769 if (error != 0) {
1770 device_printf(dev, "couldn't set up interrupt handler\n");
1771 taskqueue_free(sc->msk_tq);
1772 sc->msk_tq = NULL;
1773 goto fail;
1774 }
1775 fail:
1776 if (error != 0)
1777 mskc_detach(dev);
1778
1779 return (error);
1780 }
1781
1782 /*
1783 * Shutdown hardware and free up resources. This can be called any
1784 * time after the mutex has been initialized. It is called in both
1785 * the error case in attach and the normal detach case so it needs
1786 * to be careful about only freeing resources that have actually been
1787 * allocated.
1788 */
1789 static int
1790 msk_detach(device_t dev)
1791 {
1792 struct msk_softc *sc;
1793 struct msk_if_softc *sc_if;
1794 struct ifnet *ifp;
1795
1796 sc_if = device_get_softc(dev);
1797 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1798 ("msk mutex not initialized in msk_detach"));
1799 MSK_IF_LOCK(sc_if);
1800
1801 ifp = sc_if->msk_ifp;
1802 if (device_is_attached(dev)) {
1803 /* XXX */
1804 sc_if->msk_detach = 1;
1805 msk_stop(sc_if);
1806 /* Can't hold locks while calling detach. */
1807 MSK_IF_UNLOCK(sc_if);
1808 callout_drain(&sc_if->msk_tick_ch);
1809 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1810 taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task);
1811 ether_ifdetach(ifp);
1812 MSK_IF_LOCK(sc_if);
1813 }
1814
1815 /*
1816 * We're generally called from mskc_detach() which is using
1817 * device_delete_child() to get to here. It's already trashed
1818 * miibus for us, so don't do it here or we'll panic.
1819 *
1820 * if (sc_if->msk_miibus != NULL) {
1821 * device_delete_child(dev, sc_if->msk_miibus);
1822 * sc_if->msk_miibus = NULL;
1823 * }
1824 */
1825
1826 msk_txrx_dma_free(sc_if);
1827 bus_generic_detach(dev);
1828
1829 if (ifp)
1830 if_free(ifp);
1831 sc = sc_if->msk_softc;
1832 sc->msk_if[sc_if->msk_port] = NULL;
1833 MSK_IF_UNLOCK(sc_if);
1834
1835 return (0);
1836 }
1837
1838 static int
1839 mskc_detach(device_t dev)
1840 {
1841 struct msk_softc *sc;
1842
1843 sc = device_get_softc(dev);
1844 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1845
1846 if (device_is_alive(dev)) {
1847 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1848 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1849 M_DEVBUF);
1850 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1851 }
1852 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1853 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1854 M_DEVBUF);
1855 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1856 }
1857 bus_generic_detach(dev);
1858 }
1859
1860 /* Disable all interrupts. */
1861 CSR_WRITE_4(sc, B0_IMSK, 0);
1862 CSR_READ_4(sc, B0_IMSK);
1863 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1864 CSR_READ_4(sc, B0_HWE_IMSK);
1865
1866 /* LED Off. */
1867 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1868
1869 /* Put hardware reset. */
1870 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1871
1872 msk_status_dma_free(sc);
1873
1874 if (sc->msk_tq != NULL) {
1875 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1876 taskqueue_free(sc->msk_tq);
1877 sc->msk_tq = NULL;
1878 }
1879 if (sc->msk_intrhand[0]) {
1880 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1881 sc->msk_intrhand[0] = NULL;
1882 }
1883 if (sc->msk_intrhand[1]) {
1884 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1885 sc->msk_intrhand[1] = NULL;
1886 }
1887 #ifdef MSI_SUPPORT
1888 if (sc->msk_msi) {
1889 int i, rid;
1890 for (i = 0, rid = 1; i < sc->msk_msi; i++, rid++) {
1891 if (sc->msk_irq[i] != NULL) {
1892 bus_release_resource(dev, SYS_RES_IRQ, rid,
1893 sc->msk_irq[i]);
1894 sc->msk_irq[i] = NULL;
1895 }
1896 }
1897 pci_release_msi(dev);
1898 } else {
1899 if (sc->msk_irq[0] != NULL) {
1900 bus_release_resource(dev, SYS_RES_IRQ, 0,
1901 sc->msk_irq[0]);
1902 sc->msk_irq[0] = NULL;
1903 }
1904 }
1905 #else
1906 if (sc->msk_irq[0] != NULL) {
1907 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->msk_irq[0]);
1908 sc->msk_irq[0] = NULL;
1909 }
1910 #endif
1911 if (sc->msk_res[0] != NULL)
1912 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_id,
1913 sc->msk_res[0]);
1914 mtx_destroy(&sc->msk_mtx);
1915
1916 return (0);
1917 }
1918
1919 struct msk_dmamap_arg {
1920 bus_addr_t msk_busaddr;
1921 };
1922
1923 static void
1924 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1925 {
1926 struct msk_dmamap_arg *ctx;
1927
1928 if (error != 0)
1929 return;
1930 ctx = arg;
1931 ctx->msk_busaddr = segs[0].ds_addr;
1932 }
1933
1934 /* Create status DMA region. */
1935 static int
1936 msk_status_dma_alloc(struct msk_softc *sc)
1937 {
1938 struct msk_dmamap_arg ctx;
1939 int error;
1940
1941 error = bus_dma_tag_create(
1942 bus_get_dma_tag(sc->msk_dev), /* parent */
1943 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1944 BUS_SPACE_MAXADDR, /* lowaddr */
1945 BUS_SPACE_MAXADDR, /* highaddr */
1946 NULL, NULL, /* filter, filterarg */
1947 MSK_STAT_RING_SZ, /* maxsize */
1948 1, /* nsegments */
1949 MSK_STAT_RING_SZ, /* maxsegsize */
1950 0, /* flags */
1951 NULL, NULL, /* lockfunc, lockarg */
1952 &sc->msk_stat_tag);
1953 if (error != 0) {
1954 device_printf(sc->msk_dev,
1955 "failed to create status DMA tag\n");
1956 return (error);
1957 }
1958
1959 /* Allocate DMA'able memory and load the DMA map for status ring. */
1960 error = bus_dmamem_alloc(sc->msk_stat_tag,
1961 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1962 BUS_DMA_ZERO, &sc->msk_stat_map);
1963 if (error != 0) {
1964 device_printf(sc->msk_dev,
1965 "failed to allocate DMA'able memory for status ring\n");
1966 return (error);
1967 }
1968
1969 ctx.msk_busaddr = 0;
1970 error = bus_dmamap_load(sc->msk_stat_tag,
1971 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1972 msk_dmamap_cb, &ctx, 0);
1973 if (error != 0) {
1974 device_printf(sc->msk_dev,
1975 "failed to load DMA'able memory for status ring\n");
1976 return (error);
1977 }
1978 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1979
1980 return (0);
1981 }
1982
1983 static void
1984 msk_status_dma_free(struct msk_softc *sc)
1985 {
1986
1987 /* Destroy status block. */
1988 if (sc->msk_stat_tag) {
1989 if (sc->msk_stat_map) {
1990 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1991 if (sc->msk_stat_ring) {
1992 bus_dmamem_free(sc->msk_stat_tag,
1993 sc->msk_stat_ring, sc->msk_stat_map);
1994 sc->msk_stat_ring = NULL;
1995 }
1996 sc->msk_stat_map = NULL;
1997 }
1998 bus_dma_tag_destroy(sc->msk_stat_tag);
1999 sc->msk_stat_tag = NULL;
2000 }
2001 }
2002
2003 static int
2004 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2005 {
2006 struct msk_dmamap_arg ctx;
2007 struct msk_txdesc *txd;
2008 struct msk_rxdesc *rxd;
2009 struct msk_rxdesc *jrxd;
2010 struct msk_jpool_entry *entry;
2011 uint8_t *ptr;
2012 int error, i;
2013
2014 mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF);
2015 SLIST_INIT(&sc_if->msk_jfree_listhead);
2016 SLIST_INIT(&sc_if->msk_jinuse_listhead);
2017
2018 /* Create parent DMA tag. */
2019 /*
2020 * XXX
2021 * It seems that Yukon II supports full 64bits DMA operations. But
2022 * it needs two descriptors(list elements) for 64bits DMA operations.
2023 * Since we don't know what DMA address mappings(32bits or 64bits)
2024 * would be used in advance for each mbufs, we limits its DMA space
2025 * to be in range of 32bits address space. Otherwise, we should check
2026 * what DMA address is used and chain another descriptor for the
2027 * 64bits DMA operation. This also means descriptor ring size is
2028 * variable. Limiting DMA address to be in 32bit address space greatly
2029 * simplyfies descriptor handling and possibly would increase
2030 * performance a bit due to efficient handling of descriptors.
2031 * Apart from harassing checksum offloading mechanisms, it seems
2032 * it's really bad idea to use a seperate descriptor for 64bit
2033 * DMA operation to save small descriptor memory. Anyway, I've
2034 * never seen these exotic scheme on ethernet interface hardware.
2035 */
2036 error = bus_dma_tag_create(
2037 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2038 1, 0, /* alignment, boundary */
2039 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2040 BUS_SPACE_MAXADDR, /* highaddr */
2041 NULL, NULL, /* filter, filterarg */
2042 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2043 0, /* nsegments */
2044 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2045 0, /* flags */
2046 NULL, NULL, /* lockfunc, lockarg */
2047 &sc_if->msk_cdata.msk_parent_tag);
2048 if (error != 0) {
2049 device_printf(sc_if->msk_if_dev,
2050 "failed to create parent DMA tag\n");
2051 goto fail;
2052 }
2053 /* Create tag for Tx ring. */
2054 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2055 MSK_RING_ALIGN, 0, /* alignment, boundary */
2056 BUS_SPACE_MAXADDR, /* lowaddr */
2057 BUS_SPACE_MAXADDR, /* highaddr */
2058 NULL, NULL, /* filter, filterarg */
2059 MSK_TX_RING_SZ, /* maxsize */
2060 1, /* nsegments */
2061 MSK_TX_RING_SZ, /* maxsegsize */
2062 0, /* flags */
2063 NULL, NULL, /* lockfunc, lockarg */
2064 &sc_if->msk_cdata.msk_tx_ring_tag);
2065 if (error != 0) {
2066 device_printf(sc_if->msk_if_dev,
2067 "failed to create Tx ring DMA tag\n");
2068 goto fail;
2069 }
2070
2071 /* Create tag for Rx ring. */
2072 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2073 MSK_RING_ALIGN, 0, /* alignment, boundary */
2074 BUS_SPACE_MAXADDR, /* lowaddr */
2075 BUS_SPACE_MAXADDR, /* highaddr */
2076 NULL, NULL, /* filter, filterarg */
2077 MSK_RX_RING_SZ, /* maxsize */
2078 1, /* nsegments */
2079 MSK_RX_RING_SZ, /* maxsegsize */
2080 0, /* flags */
2081 NULL, NULL, /* lockfunc, lockarg */
2082 &sc_if->msk_cdata.msk_rx_ring_tag);
2083 if (error != 0) {
2084 device_printf(sc_if->msk_if_dev,
2085 "failed to create Rx ring DMA tag\n");
2086 goto fail;
2087 }
2088
2089 /* Create tag for jumbo Rx ring. */
2090 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2091 MSK_RING_ALIGN, 0, /* alignment, boundary */
2092 BUS_SPACE_MAXADDR, /* lowaddr */
2093 BUS_SPACE_MAXADDR, /* highaddr */
2094 NULL, NULL, /* filter, filterarg */
2095 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2096 1, /* nsegments */
2097 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2098 0, /* flags */
2099 NULL, NULL, /* lockfunc, lockarg */
2100 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2101 if (error != 0) {
2102 device_printf(sc_if->msk_if_dev,
2103 "failed to create jumbo Rx ring DMA tag\n");
2104 goto fail;
2105 }
2106
2107 /* Create tag for jumbo buffer blocks. */
2108 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2109 PAGE_SIZE, 0, /* alignment, boundary */
2110 BUS_SPACE_MAXADDR, /* lowaddr */
2111 BUS_SPACE_MAXADDR, /* highaddr */
2112 NULL, NULL, /* filter, filterarg */
2113 MSK_JMEM, /* maxsize */
2114 1, /* nsegments */
2115 MSK_JMEM, /* maxsegsize */
2116 0, /* flags */
2117 NULL, NULL, /* lockfunc, lockarg */
2118 &sc_if->msk_cdata.msk_jumbo_tag);
2119 if (error != 0) {
2120 device_printf(sc_if->msk_if_dev,
2121 "failed to create jumbo Rx buffer block DMA tag\n");
2122 goto fail;
2123 }
2124
2125 /* Create tag for Tx buffers. */
2126 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2127 1, 0, /* alignment, boundary */
2128 BUS_SPACE_MAXADDR, /* lowaddr */
2129 BUS_SPACE_MAXADDR, /* highaddr */
2130 NULL, NULL, /* filter, filterarg */
2131 MSK_TSO_MAXSIZE, /* maxsize */
2132 MSK_MAXTXSEGS, /* nsegments */
2133 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2134 0, /* flags */
2135 NULL, NULL, /* lockfunc, lockarg */
2136 &sc_if->msk_cdata.msk_tx_tag);
2137 if (error != 0) {
2138 device_printf(sc_if->msk_if_dev,
2139 "failed to create Tx DMA tag\n");
2140 goto fail;
2141 }
2142
2143 /* Create tag for Rx buffers. */
2144 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2145 1, 0, /* alignment, boundary */
2146 BUS_SPACE_MAXADDR, /* lowaddr */
2147 BUS_SPACE_MAXADDR, /* highaddr */
2148 NULL, NULL, /* filter, filterarg */
2149 MCLBYTES, /* maxsize */
2150 1, /* nsegments */
2151 MCLBYTES, /* maxsegsize */
2152 0, /* flags */
2153 NULL, NULL, /* lockfunc, lockarg */
2154 &sc_if->msk_cdata.msk_rx_tag);
2155 if (error != 0) {
2156 device_printf(sc_if->msk_if_dev,
2157 "failed to create Rx DMA tag\n");
2158 goto fail;
2159 }
2160
2161 /* Create tag for jumbo Rx buffers. */
2162 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2163 PAGE_SIZE, 0, /* alignment, boundary */
2164 BUS_SPACE_MAXADDR, /* lowaddr */
2165 BUS_SPACE_MAXADDR, /* highaddr */
2166 NULL, NULL, /* filter, filterarg */
2167 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */
2168 MSK_MAXRXSEGS, /* nsegments */
2169 MSK_JLEN, /* maxsegsize */
2170 0, /* flags */
2171 NULL, NULL, /* lockfunc, lockarg */
2172 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2173 if (error != 0) {
2174 device_printf(sc_if->msk_if_dev,
2175 "failed to create jumbo Rx DMA tag\n");
2176 goto fail;
2177 }
2178
2179 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2180 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2181 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2182 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2183 if (error != 0) {
2184 device_printf(sc_if->msk_if_dev,
2185 "failed to allocate DMA'able memory for Tx ring\n");
2186 goto fail;
2187 }
2188
2189 ctx.msk_busaddr = 0;
2190 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2191 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2192 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2193 if (error != 0) {
2194 device_printf(sc_if->msk_if_dev,
2195 "failed to load DMA'able memory for Tx ring\n");
2196 goto fail;
2197 }
2198 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2199
2200 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2201 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2202 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2203 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2204 if (error != 0) {
2205 device_printf(sc_if->msk_if_dev,
2206 "failed to allocate DMA'able memory for Rx ring\n");
2207 goto fail;
2208 }
2209
2210 ctx.msk_busaddr = 0;
2211 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2212 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2213 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2214 if (error != 0) {
2215 device_printf(sc_if->msk_if_dev,
2216 "failed to load DMA'able memory for Rx ring\n");
2217 goto fail;
2218 }
2219 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2220
2221 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2222 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2223 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2224 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2225 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2226 if (error != 0) {
2227 device_printf(sc_if->msk_if_dev,
2228 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2229 goto fail;
2230 }
2231
2232 ctx.msk_busaddr = 0;
2233 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2234 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2235 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2236 msk_dmamap_cb, &ctx, 0);
2237 if (error != 0) {
2238 device_printf(sc_if->msk_if_dev,
2239 "failed to load DMA'able memory for jumbo Rx ring\n");
2240 goto fail;
2241 }
2242 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2243
2244 /* Create DMA maps for Tx buffers. */
2245 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2246 txd = &sc_if->msk_cdata.msk_txdesc[i];
2247 txd->tx_m = NULL;
2248 txd->tx_dmamap = NULL;
2249 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2250 &txd->tx_dmamap);
2251 if (error != 0) {
2252 device_printf(sc_if->msk_if_dev,
2253 "failed to create Tx dmamap\n");
2254 goto fail;
2255 }
2256 }
2257 /* Create DMA maps for Rx buffers. */
2258 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2259 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2260 device_printf(sc_if->msk_if_dev,
2261 "failed to create spare Rx dmamap\n");
2262 goto fail;
2263 }
2264 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2265 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2266 rxd->rx_m = NULL;
2267 rxd->rx_dmamap = NULL;
2268 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2269 &rxd->rx_dmamap);
2270 if (error != 0) {
2271 device_printf(sc_if->msk_if_dev,
2272 "failed to create Rx dmamap\n");
2273 goto fail;
2274 }
2275 }
2276 /* Create DMA maps for jumbo Rx buffers. */
2277 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2278 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2279 device_printf(sc_if->msk_if_dev,
2280 "failed to create spare jumbo Rx dmamap\n");
2281 goto fail;
2282 }
2283 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2284 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2285 jrxd->rx_m = NULL;
2286 jrxd->rx_dmamap = NULL;
2287 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2288 &jrxd->rx_dmamap);
2289 if (error != 0) {
2290 device_printf(sc_if->msk_if_dev,
2291 "failed to create jumbo Rx dmamap\n");
2292 goto fail;
2293 }
2294 }
2295
2296 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2297 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2298 (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2299 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2300 &sc_if->msk_cdata.msk_jumbo_map);
2301 if (error != 0) {
2302 device_printf(sc_if->msk_if_dev,
2303 "failed to allocate DMA'able memory for jumbo buf\n");
2304 goto fail;
2305 }
2306
2307 ctx.msk_busaddr = 0;
2308 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2309 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2310 MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2311 if (error != 0) {
2312 device_printf(sc_if->msk_if_dev,
2313 "failed to load DMA'able memory for jumbobuf\n");
2314 goto fail;
2315 }
2316 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2317
2318 /*
2319 * Now divide it up into 9K pieces and save the addresses
2320 * in an array.
2321 */
2322 ptr = sc_if->msk_rdata.msk_jumbo_buf;
2323 for (i = 0; i < MSK_JSLOTS; i++) {
2324 sc_if->msk_cdata.msk_jslots[i] = ptr;
2325 ptr += MSK_JLEN;
2326 entry = malloc(sizeof(struct msk_jpool_entry),
2327 M_DEVBUF, M_WAITOK);
2328 if (entry == NULL) {
2329 device_printf(sc_if->msk_if_dev,
2330 "no memory for jumbo buffers!\n");
2331 error = ENOMEM;
2332 goto fail;
2333 }
2334 entry->slot = i;
2335 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2336 jpool_entries);
2337 }
2338
2339 fail:
2340 return (error);
2341 }
2342
2343 static void
2344 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2345 {
2346 struct msk_txdesc *txd;
2347 struct msk_rxdesc *rxd;
2348 struct msk_rxdesc *jrxd;
2349 struct msk_jpool_entry *entry;
2350 int i;
2351
2352 MSK_JLIST_LOCK(sc_if);
2353 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2354 device_printf(sc_if->msk_if_dev,
2355 "asked to free buffer that is in use!\n");
2356 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2357 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2358 jpool_entries);
2359 }
2360
2361 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2362 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2363 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2364 free(entry, M_DEVBUF);
2365 }
2366 MSK_JLIST_UNLOCK(sc_if);
2367
2368 /* Destroy jumbo buffer block. */
2369 if (sc_if->msk_cdata.msk_jumbo_map)
2370 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2371 sc_if->msk_cdata.msk_jumbo_map);
2372
2373 if (sc_if->msk_rdata.msk_jumbo_buf) {
2374 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2375 sc_if->msk_rdata.msk_jumbo_buf,
2376 sc_if->msk_cdata.msk_jumbo_map);
2377 sc_if->msk_rdata.msk_jumbo_buf = NULL;
2378 sc_if->msk_cdata.msk_jumbo_map = NULL;
2379 }
2380
2381 /* Tx ring. */
2382 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2383 if (sc_if->msk_cdata.msk_tx_ring_map)
2384 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2385 sc_if->msk_cdata.msk_tx_ring_map);
2386 if (sc_if->msk_cdata.msk_tx_ring_map &&
2387 sc_if->msk_rdata.msk_tx_ring)
2388 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2389 sc_if->msk_rdata.msk_tx_ring,
2390 sc_if->msk_cdata.msk_tx_ring_map);
2391 sc_if->msk_rdata.msk_tx_ring = NULL;
2392 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2393 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2394 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2395 }
2396 /* Rx ring. */
2397 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2398 if (sc_if->msk_cdata.msk_rx_ring_map)
2399 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2400 sc_if->msk_cdata.msk_rx_ring_map);
2401 if (sc_if->msk_cdata.msk_rx_ring_map &&
2402 sc_if->msk_rdata.msk_rx_ring)
2403 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2404 sc_if->msk_rdata.msk_rx_ring,
2405 sc_if->msk_cdata.msk_rx_ring_map);
2406 sc_if->msk_rdata.msk_rx_ring = NULL;
2407 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2408 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2409 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2410 }
2411 /* Jumbo Rx ring. */
2412 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2413 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2414 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2415 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2416 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2417 sc_if->msk_rdata.msk_jumbo_rx_ring)
2418 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2419 sc_if->msk_rdata.msk_jumbo_rx_ring,
2420 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2421 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2422 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2423 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2424 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2425 }
2426 /* Tx buffers. */
2427 if (sc_if->msk_cdata.msk_tx_tag) {
2428 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2429 txd = &sc_if->msk_cdata.msk_txdesc[i];
2430 if (txd->tx_dmamap) {
2431 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2432 txd->tx_dmamap);
2433 txd->tx_dmamap = NULL;
2434 }
2435 }
2436 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2437 sc_if->msk_cdata.msk_tx_tag = NULL;
2438 }
2439 /* Rx buffers. */
2440 if (sc_if->msk_cdata.msk_rx_tag) {
2441 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2442 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2443 if (rxd->rx_dmamap) {
2444 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2445 rxd->rx_dmamap);
2446 rxd->rx_dmamap = NULL;
2447 }
2448 }
2449 if (sc_if->msk_cdata.msk_rx_sparemap) {
2450 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2451 sc_if->msk_cdata.msk_rx_sparemap);
2452 sc_if->msk_cdata.msk_rx_sparemap = 0;
2453 }
2454 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2455 sc_if->msk_cdata.msk_rx_tag = NULL;
2456 }
2457 /* Jumbo Rx buffers. */
2458 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2459 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2460 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2461 if (jrxd->rx_dmamap) {
2462 bus_dmamap_destroy(
2463 sc_if->msk_cdata.msk_jumbo_rx_tag,
2464 jrxd->rx_dmamap);
2465 jrxd->rx_dmamap = NULL;
2466 }
2467 }
2468 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2469 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2470 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2471 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2472 }
2473 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2474 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2475 }
2476
2477 if (sc_if->msk_cdata.msk_parent_tag) {
2478 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2479 sc_if->msk_cdata.msk_parent_tag = NULL;
2480 }
2481 mtx_destroy(&sc_if->msk_jlist_mtx);
2482 }
2483
2484 /*
2485 * Allocate a jumbo buffer.
2486 */
2487 static void *
2488 msk_jalloc(struct msk_if_softc *sc_if)
2489 {
2490 struct msk_jpool_entry *entry;
2491
2492 MSK_JLIST_LOCK(sc_if);
2493
2494 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2495
2496 if (entry == NULL) {
2497 MSK_JLIST_UNLOCK(sc_if);
2498 return (NULL);
2499 }
2500
2501 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2502 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2503
2504 MSK_JLIST_UNLOCK(sc_if);
2505
2506 return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2507 }
2508
2509 /*
2510 * Release a jumbo buffer.
2511 */
2512 static void
2513 msk_jfree(void *buf, void *args)
2514 {
2515 struct msk_if_softc *sc_if;
2516 struct msk_jpool_entry *entry;
2517 int i;
2518
2519 /* Extract the softc struct pointer. */
2520 sc_if = (struct msk_if_softc *)args;
2521 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2522
2523 MSK_JLIST_LOCK(sc_if);
2524 /* Calculate the slot this buffer belongs to. */
2525 i = ((vm_offset_t)buf
2526 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2527 KASSERT(i >= 0 && i < MSK_JSLOTS,
2528 ("%s: asked to free buffer that we don't manage!", __func__));
2529
2530 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2531 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2532 entry->slot = i;
2533 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2534 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2535 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2536 wakeup(sc_if);
2537
2538 MSK_JLIST_UNLOCK(sc_if);
2539 }
2540
2541 /*
2542 * It's copy of ath_defrag(ath(4)).
2543 *
2544 * Defragment an mbuf chain, returning at most maxfrags separate
2545 * mbufs+clusters. If this is not possible NULL is returned and
2546 * the original mbuf chain is left in it's present (potentially
2547 * modified) state. We use two techniques: collapsing consecutive
2548 * mbufs and replacing consecutive mbufs by a cluster.
2549 */
2550 static struct mbuf *
2551 msk_defrag(struct mbuf *m0, int how, int maxfrags)
2552 {
2553 struct mbuf *m, *n, *n2, **prev;
2554 u_int curfrags;
2555
2556 /*
2557 * Calculate the current number of frags.
2558 */
2559 curfrags = 0;
2560 for (m = m0; m != NULL; m = m->m_next)
2561 curfrags++;
2562 /*
2563 * First, try to collapse mbufs. Note that we always collapse
2564 * towards the front so we don't need to deal with moving the
2565 * pkthdr. This may be suboptimal if the first mbuf has much
2566 * less data than the following.
2567 */
2568 m = m0;
2569 again:
2570 for (;;) {
2571 n = m->m_next;
2572 if (n == NULL)
2573 break;
2574 if ((m->m_flags & M_RDONLY) == 0 &&
2575 n->m_len < M_TRAILINGSPACE(m)) {
2576 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
2577 n->m_len);
2578 m->m_len += n->m_len;
2579 m->m_next = n->m_next;
2580 m_free(n);
2581 if (--curfrags <= maxfrags)
2582 return (m0);
2583 } else
2584 m = n;
2585 }
2586 KASSERT(maxfrags > 1,
2587 ("maxfrags %u, but normal collapse failed", maxfrags));
2588 /*
2589 * Collapse consecutive mbufs to a cluster.
2590 */
2591 prev = &m0->m_next; /* NB: not the first mbuf */
2592 while ((n = *prev) != NULL) {
2593 if ((n2 = n->m_next) != NULL &&
2594 n->m_len + n2->m_len < MCLBYTES) {
2595 m = m_getcl(how, MT_DATA, 0);
2596 if (m == NULL)
2597 goto bad;
2598 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
2599 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
2600 n2->m_len);
2601 m->m_len = n->m_len + n2->m_len;
2602 m->m_next = n2->m_next;
2603 *prev = m;
2604 m_free(n);
2605 m_free(n2);
2606 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
2607 return m0;
2608 /*
2609 * Still not there, try the normal collapse
2610 * again before we allocate another cluster.
2611 */
2612 goto again;
2613 }
2614 prev = &n->m_next;
2615 }
2616 /*
2617 * No place where we can collapse to a cluster; punt.
2618 * This can occur if, for example, you request 2 frags
2619 * but the packet requires that both be clusters (we
2620 * never reallocate the first mbuf to avoid moving the
2621 * packet header).
2622 */
2623 bad:
2624 return (NULL);
2625 }
2626
2627 static int
2628 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2629 {
2630 struct msk_txdesc *txd, *txd_last;
2631 struct msk_tx_desc *tx_le;
2632 struct mbuf *m;
2633 struct m_tag *mtag;
2634 bus_dmamap_t map;
2635 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2636 uint32_t control, prod, si;
2637 uint16_t offset, tcp_offset;
2638 #ifdef TSO_SUPPORT
2639 uint16_t tso_mtu;
2640 #endif
2641 int error, i, nseg, tso;
2642
2643 MSK_IF_LOCK_ASSERT(sc_if);
2644
2645 tcp_offset = offset = 0;
2646 m = *m_head;
2647 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2648 /*
2649 * Since mbuf has no protocol specific structure information
2650 * in it we have to inspect protocol information here to
2651 * setup TSO and checksum offload. I don't know why Marvell
2652 * made a such decision in chip design because other GigE
2653 * hardwares normally takes care of all these chores in
2654 * hardware. However, TSO performance of Yukon II is very
2655 * good such that it's worth to implement it.
2656 */
2657 struct ether_header *eh;
2658 struct ip *ip;
2659 struct tcphdr *tcp;
2660
2661 /* TODO check for M_WRITABLE(m) */
2662
2663 offset = sizeof(struct ether_header);
2664 m = m_pullup(m, offset);
2665 if (m == NULL) {
2666 *m_head = NULL;
2667 return (ENOBUFS);
2668 }
2669 eh = mtod(m, struct ether_header *);
2670 /* Check if hardware VLAN insertion is off. */
2671 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2672 offset = sizeof(struct ether_vlan_header);
2673 m = m_pullup(m, offset);
2674 if (m == NULL) {
2675 *m_head = NULL;
2676 return (ENOBUFS);
2677 }
2678 }
2679 m = m_pullup(m, offset + sizeof(struct ip));
2680 if (m == NULL) {
2681 *m_head = NULL;
2682 return (ENOBUFS);
2683 }
2684 ip = (struct ip *)(mtod(m, char *) + offset);
2685 offset += (ip->ip_hl << 2);
2686 tcp_offset = offset;
2687 /*
2688 * It seems that Yukon II has Tx checksum offload bug for
2689 * small TCP packets that's less than 60 bytes in size
2690 * (e.g. TCP window probe packet, pure ACK packet).
2691 * Common work around like padding with zeros to make the
2692 * frame minimum ethernet frame size didn't work at all.
2693 * Instead of disabling checksum offload completely we
2694 * resort to S/W checksum routine when we encounter short
2695 * TCP frames.
2696 * Short UDP packets appear to be handled correctly by
2697 * Yukon II.
2698 */
2699 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2700 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2701 uint16_t csum;
2702
2703 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
2704 (ip->ip_hl << 2), offset);
2705 *(uint16_t *)(m->m_data + offset +
2706 m->m_pkthdr.csum_data) = csum;
2707 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2708 }
2709 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2710 m = m_pullup(m, offset + sizeof(struct tcphdr));
2711 if (m == NULL) {
2712 *m_head = NULL;
2713 return (ENOBUFS);
2714 }
2715 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2716 offset += (tcp->th_off << 2);
2717 }
2718 *m_head = m;
2719 }
2720
2721 prod = sc_if->msk_cdata.msk_tx_prod;
2722 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2723 txd_last = txd;
2724 map = txd->tx_dmamap;
2725 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2726 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2727 if (error == EFBIG) {
2728 m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2729 if (m == NULL) {
2730 m_freem(*m_head);
2731 *m_head = NULL;
2732 return (ENOBUFS);
2733 }
2734 *m_head = m;
2735 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2736 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2737 if (error != 0) {
2738 m_freem(*m_head);
2739 *m_head = NULL;
2740 return (error);
2741 }
2742 } else if (error != 0)
2743 return (error);
2744 if (nseg == 0) {
2745 m_freem(*m_head);
2746 *m_head = NULL;
2747 return (EIO);
2748 }
2749
2750 /* Check number of available descriptors. */
2751 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2752 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2753 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2754 return (ENOBUFS);
2755 }
2756
2757 control = 0;
2758 tso = 0;
2759 tx_le = NULL;
2760
2761 /* Check TSO support. */
2762 #ifdef TSO_SUPPORT
2763 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2764 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2765 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2766 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2767 tx_le->msk_addr = htole32(tso_mtu);
2768 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2769 sc_if->msk_cdata.msk_tx_cnt++;
2770 MSK_INC(prod, MSK_TX_RING_CNT);
2771 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2772 }
2773 tso++;
2774 }
2775 #endif
2776 /* Check if we have a VLAN tag to insert. */
2777 mtag = VLAN_OUTPUT_TAG(sc_if->msk_ifp, m);
2778 if (mtag != NULL) {
2779 if (tso == 0) {
2780 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2781 tx_le->msk_addr = htole32(0);
2782 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2783 htons(VLAN_TAG_VALUE(mtag)));
2784 sc_if->msk_cdata.msk_tx_cnt++;
2785 MSK_INC(prod, MSK_TX_RING_CNT);
2786 } else {
2787 tx_le->msk_control |= htole32(OP_VLAN |
2788 htons(VLAN_TAG_VALUE(mtag)));
2789 }
2790 control |= INS_VLAN;
2791 }
2792 /* Check if we have to handle checksum offload. */
2793 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2794 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2795 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2796 & 0xffff) | ((uint32_t)tcp_offset << 16));
2797 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2798 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2799 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2800 control |= UDPTCP;
2801 sc_if->msk_cdata.msk_tx_cnt++;
2802 MSK_INC(prod, MSK_TX_RING_CNT);
2803 }
2804
2805 si = prod;
2806 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2807 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2808 if (tso == 0)
2809 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2810 OP_PACKET);
2811 else
2812 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2813 OP_LARGESEND);
2814 sc_if->msk_cdata.msk_tx_cnt++;
2815 MSK_INC(prod, MSK_TX_RING_CNT);
2816
2817 for (i = 1; i < nseg; i++) {
2818 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2819 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2820 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2821 OP_BUFFER | HW_OWNER);
2822 sc_if->msk_cdata.msk_tx_cnt++;
2823 MSK_INC(prod, MSK_TX_RING_CNT);
2824 }
2825 /* Update producer index. */
2826 sc_if->msk_cdata.msk_tx_prod = prod;
2827
2828 /* Set EOP on the last desciptor. */
2829 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2830 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2831 tx_le->msk_control |= htole32(EOP);
2832
2833 /* Turn the first descriptor ownership to hardware. */
2834 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2835 tx_le->msk_control |= htole32(HW_OWNER);
2836
2837 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2838 map = txd_last->tx_dmamap;
2839 txd_last->tx_dmamap = txd->tx_dmamap;
2840 txd->tx_dmamap = map;
2841 txd->tx_m = m;
2842
2843 /* Sync descriptors. */
2844 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2845 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2846 sc_if->msk_cdata.msk_tx_ring_map,
2847 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2848
2849 return (0);
2850 }
2851
2852 static void
2853 msk_tx_task(void *arg, int pending)
2854 {
2855 struct ifnet *ifp;
2856
2857 ifp = arg;
2858 msk_start(ifp);
2859 }
2860
2861 static void
2862 msk_start(struct ifnet *ifp)
2863 {
2864 struct msk_if_softc *sc_if;
2865 struct mbuf *m_head;
2866 int enq;
2867
2868 sc_if = ifp->if_softc;
2869
2870 MSK_IF_LOCK(sc_if);
2871
2872 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2873 IFF_DRV_RUNNING || sc_if->msk_link == 0) {
2874 MSK_IF_UNLOCK(sc_if);
2875 return;
2876 }
2877
2878 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2879 sc_if->msk_cdata.msk_tx_cnt <
2880 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2881 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2882 if (m_head == NULL)
2883 break;
2884 /*
2885 * Pack the data into the transmit ring. If we
2886 * don't have room, set the OACTIVE flag and wait
2887 * for the NIC to drain the ring.
2888 */
2889 if (msk_encap(sc_if, &m_head) != 0) {
2890 if (m_head == NULL)
2891 break;
2892 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2893 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2894 break;
2895 }
2896
2897 enq++;
2898 /*
2899 * If there's a BPF listener, bounce a copy of this frame
2900 * to him.
2901 */
2902 ETHER_BPF_MTAP(ifp, m_head);
2903 }
2904
2905 if (enq > 0) {
2906 /* Transmit */
2907 CSR_WRITE_2(sc_if->msk_softc,
2908 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2909 sc_if->msk_cdata.msk_tx_prod);
2910
2911 /* Set a timeout in case the chip goes out to lunch. */
2912 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2913 }
2914
2915 MSK_IF_UNLOCK(sc_if);
2916 }
2917
2918 static void
2919 msk_watchdog(struct msk_if_softc *sc_if)
2920 {
2921 struct ifnet *ifp;
2922 uint32_t ridx;
2923 int idx;
2924
2925 MSK_IF_LOCK_ASSERT(sc_if);
2926
2927 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2928 return;
2929 ifp = sc_if->msk_ifp;
2930 if (sc_if->msk_link == 0) {
2931 if (bootverbose)
2932 if_printf(sc_if->msk_ifp, "watchdog timeout "
2933 "(missed link)\n");
2934 ifp->if_oerrors++;
2935 msk_init_locked(sc_if);
2936 return;
2937 }
2938
2939 /*
2940 * Reclaim first as there is a possibility of losing Tx completion
2941 * interrupts.
2942 */
2943 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2944 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2945 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2946 msk_txeof(sc_if, idx);
2947 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2948 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2949 "-- recovering\n");
2950 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2951 taskqueue_enqueue(taskqueue_fast,
2952 &sc_if->msk_tx_task);
2953 return;
2954 }
2955 }
2956
2957 if_printf(ifp, "watchdog timeout\n");
2958 ifp->if_oerrors++;
2959 msk_init_locked(sc_if);
2960 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2961 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2962 }
2963
2964 static void
2965 mskc_shutdown(device_t dev)
2966 {
2967 struct msk_softc *sc;
2968 int i;
2969
2970 sc = device_get_softc(dev);
2971 MSK_LOCK(sc);
2972 for (i = 0; i < sc->msk_num_port; i++) {
2973 if (sc->msk_if[i] != NULL)
2974 msk_stop(sc->msk_if[i]);
2975 }
2976
2977 /* Disable all interrupts. */
2978 CSR_WRITE_4(sc, B0_IMSK, 0);
2979 CSR_READ_4(sc, B0_IMSK);
2980 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2981 CSR_READ_4(sc, B0_HWE_IMSK);
2982
2983 /* Put hardware reset. */
2984 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2985
2986 MSK_UNLOCK(sc);
2987 }
2988
2989 static int
2990 mskc_suspend(device_t dev)
2991 {
2992 struct msk_softc *sc;
2993 int i;
2994
2995 sc = device_get_softc(dev);
2996
2997 MSK_LOCK(sc);
2998
2999 for (i = 0; i < sc->msk_num_port; i++) {
3000 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3001 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3002 IFF_DRV_RUNNING) != 0))
3003 msk_stop(sc->msk_if[i]);
3004 }
3005
3006 /* Disable all interrupts. */
3007 CSR_WRITE_4(sc, B0_IMSK, 0);
3008 CSR_READ_4(sc, B0_IMSK);
3009 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
3010 CSR_READ_4(sc, B0_HWE_IMSK);
3011
3012 msk_phy_power(sc, MSK_PHY_POWERDOWN);
3013
3014 /* Put hardware reset. */
3015 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3016 sc->msk_suspended = 1;
3017
3018 MSK_UNLOCK(sc);
3019
3020 return (0);
3021 }
3022
3023 static int
3024 mskc_resume(device_t dev)
3025 {
3026 struct msk_softc *sc;
3027 int i;
3028
3029 sc = device_get_softc(dev);
3030
3031 MSK_LOCK(sc);
3032
3033 mskc_reset(sc);
3034 for (i = 0; i < sc->msk_num_port; i++) {
3035 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3036 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
3037 msk_init_locked(sc->msk_if[i]);
3038 }
3039 sc->msk_suspended = 0;
3040
3041 MSK_UNLOCK(sc);
3042
3043 return (0);
3044 }
3045
3046 static void
3047 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3048 {
3049 struct mbuf *m;
3050 struct ifnet *ifp;
3051 struct msk_rxdesc *rxd;
3052 int cons, rxlen;
3053
3054 ifp = sc_if->msk_ifp;
3055
3056 MSK_IF_LOCK_ASSERT(sc_if);
3057
3058 cons = sc_if->msk_cdata.msk_rx_cons;
3059 do {
3060 rxlen = status >> 16;
3061 if ((status & GMR_FS_VLAN) != 0 &&
3062 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3063 rxlen -= ETHER_VLAN_ENCAP_LEN;
3064 if (len > sc_if->msk_framesize ||
3065 ((status & GMR_FS_ANY_ERR) != 0) ||
3066 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3067 /* Don't count flow-control packet as errors. */
3068 if ((status & GMR_FS_GOOD_FC) == 0)
3069 ifp->if_ierrors++;
3070 msk_discard_rxbuf(sc_if, cons);
3071 break;
3072 }
3073 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3074 m = rxd->rx_m;
3075 if (msk_newbuf(sc_if, cons) != 0) {
3076 ifp->if_iqdrops++;
3077 /* Reuse old buffer. */
3078 msk_discard_rxbuf(sc_if, cons);
3079 break;
3080 }
3081 m->m_pkthdr.rcvif = ifp;
3082 m->m_pkthdr.len = m->m_len = len;
3083 ifp->if_ipackets++;
3084 /* Check for VLAN tagged packets. */
3085 if ((status & GMR_FS_VLAN) != 0 &&
3086 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3087 VLAN_INPUT_TAG_NEW(ifp, m, sc_if->msk_vtag);
3088 }
3089 MSK_IF_UNLOCK(sc_if);
3090 (*ifp->if_input)(ifp, m);
3091 MSK_IF_LOCK(sc_if);
3092 } while (0);
3093
3094 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3095 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3096 }
3097
3098 static void
3099 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3100 {
3101 struct mbuf *m;
3102 struct ifnet *ifp;
3103 struct msk_rxdesc *jrxd;
3104 int cons, rxlen;
3105
3106 ifp = sc_if->msk_ifp;
3107
3108 MSK_IF_LOCK_ASSERT(sc_if);
3109
3110 cons = sc_if->msk_cdata.msk_rx_cons;
3111 do {
3112 rxlen = status >> 16;
3113 if ((status & GMR_FS_VLAN) != 0 &&
3114 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3115 rxlen -= ETHER_VLAN_ENCAP_LEN;
3116 if (len > sc_if->msk_framesize ||
3117 ((status & GMR_FS_ANY_ERR) != 0) ||
3118 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3119 /* Don't count flow-control packet as errors. */
3120 if ((status & GMR_FS_GOOD_FC) == 0)
3121 ifp->if_ierrors++;
3122 msk_discard_jumbo_rxbuf(sc_if, cons);
3123 break;
3124 }
3125 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3126 m = jrxd->rx_m;
3127 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3128 ifp->if_iqdrops++;
3129 /* Reuse old buffer. */
3130 msk_discard_jumbo_rxbuf(sc_if, cons);
3131 break;
3132 }
3133 m->m_pkthdr.rcvif = ifp;
3134 m->m_pkthdr.len = m->m_len = len;
3135 ifp->if_ipackets++;
3136 /* Check for VLAN tagged packets. */
3137 if ((status & GMR_FS_VLAN) != 0 &&
3138 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3139 VLAN_INPUT_TAG_NEW(ifp, m, sc_if->msk_vtag);
3140 }
3141 MSK_IF_UNLOCK(sc_if);
3142 (*ifp->if_input)(ifp, m);
3143 MSK_IF_LOCK(sc_if);
3144 } while (0);
3145
3146 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3147 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3148 }
3149
3150 static void
3151 msk_txeof(struct msk_if_softc *sc_if, int idx)
3152 {
3153 struct msk_txdesc *txd;
3154 struct msk_tx_desc *cur_tx;
3155 struct ifnet *ifp;
3156 uint32_t control;
3157 int cons, prog;
3158
3159 MSK_IF_LOCK_ASSERT(sc_if);
3160
3161 ifp = sc_if->msk_ifp;
3162
3163 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3164 sc_if->msk_cdata.msk_tx_ring_map,
3165 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3166 /*
3167 * Go through our tx ring and free mbufs for those
3168 * frames that have been sent.
3169 */
3170 cons = sc_if->msk_cdata.msk_tx_cons;
3171 prog = 0;
3172 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3173 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3174 break;
3175 prog++;
3176 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3177 control = le32toh(cur_tx->msk_control);
3178 sc_if->msk_cdata.msk_tx_cnt--;
3179 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3180 if ((control & EOP) == 0)
3181 continue;
3182 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3183 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3184 BUS_DMASYNC_POSTWRITE);
3185 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3186
3187 ifp->if_opackets++;
3188 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3189 __func__));
3190 m_freem(txd->tx_m);
3191 txd->tx_m = NULL;
3192 }
3193
3194 if (prog > 0) {
3195 sc_if->msk_cdata.msk_tx_cons = cons;
3196 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3197 sc_if->msk_watchdog_timer = 0;
3198 /* No need to sync LEs as we didn't update LEs. */
3199 }
3200 }
3201
3202 static void
3203 msk_tick(void *xsc_if)
3204 {
3205 struct msk_if_softc *sc_if;
3206 struct mii_data *mii;
3207
3208 sc_if = xsc_if;
3209
3210 MSK_IF_LOCK_ASSERT(sc_if);
3211
3212 mii = device_get_softc(sc_if->msk_miibus);
3213
3214 mii_tick(mii);
3215 msk_watchdog(sc_if);
3216 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3217 }
3218
3219 static void
3220 msk_intr_phy(struct msk_if_softc *sc_if)
3221 {
3222 uint16_t status;
3223
3224 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3225 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3226 /* Handle FIFO Underrun/Overflow? */
3227 if ((status & PHY_M_IS_FIFO_ERROR))
3228 device_printf(sc_if->msk_if_dev,
3229 "PHY FIFO underrun/overflow.\n");
3230 }
3231
3232 static void
3233 msk_intr_gmac(struct msk_if_softc *sc_if)
3234 {
3235 struct msk_softc *sc;
3236 uint8_t status;
3237
3238 sc = sc_if->msk_softc;
3239 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3240
3241 /* GMAC Rx FIFO overrun. */
3242 if ((status & GM_IS_RX_FF_OR) != 0) {
3243 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3244 GMF_CLI_RX_FO);
3245 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3246 }
3247 /* GMAC Tx FIFO underrun. */
3248 if ((status & GM_IS_TX_FF_UR) != 0) {
3249 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3250 GMF_CLI_TX_FU);
3251 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3252 /*
3253 * XXX
3254 * In case of Tx underrun, we may need to flush/reset
3255 * Tx MAC but that would also require resynchronization
3256 * with status LEs. Reintializing status LEs would
3257 * affect other port in dual MAC configuration so it
3258 * should be avoided as possible as we can.
3259 * Due to lack of documentation it's all vague guess but
3260 * it needs more investigation.
3261 */
3262 }
3263 }
3264
3265 static void
3266 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3267 {
3268 struct msk_softc *sc;
3269
3270 sc = sc_if->msk_softc;
3271 if ((status & Y2_IS_PAR_RD1) != 0) {
3272 device_printf(sc_if->msk_if_dev,
3273 "RAM buffer read parity error\n");
3274 /* Clear IRQ. */
3275 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3276 RI_CLR_RD_PERR);
3277 }
3278 if ((status & Y2_IS_PAR_WR1) != 0) {
3279 device_printf(sc_if->msk_if_dev,
3280 "RAM buffer write parity error\n");
3281 /* Clear IRQ. */
3282 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3283 RI_CLR_WR_PERR);
3284 }
3285 if ((status & Y2_IS_PAR_MAC1) != 0) {
3286 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3287 /* Clear IRQ. */
3288 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3289 GMF_CLI_TX_PE);
3290 }
3291 if ((status & Y2_IS_PAR_RX1) != 0) {
3292 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3293 /* Clear IRQ. */
3294 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3295 }
3296 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3297 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3298 /* Clear IRQ. */
3299 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3300 }
3301 }
3302
3303 static void
3304 msk_intr_hwerr(struct msk_softc *sc)
3305 {
3306 uint32_t status;
3307 uint32_t tlphead[4];
3308
3309 status = CSR_READ_4(sc, B0_HWE_ISRC);
3310 /* Time Stamp timer overflow. */
3311 if ((status & Y2_IS_TIST_OV) != 0)
3312 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3313 if ((status & Y2_IS_PCI_NEXP) != 0) {
3314 /*
3315 * PCI Express Error occured which is not described in PEX
3316 * spec.
3317 * This error is also mapped either to Master Abort(
3318 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3319 * can only be cleared there.
3320 */
3321 device_printf(sc->msk_dev,
3322 "PCI Express protocol violation error\n");
3323 }
3324
3325 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3326 uint16_t v16;
3327
3328 if ((status & Y2_IS_MST_ERR) != 0)
3329 device_printf(sc->msk_dev,
3330 "unexpected IRQ Status error\n");
3331 else
3332 device_printf(sc->msk_dev,
3333 "unexpected IRQ Master error\n");
3334 /* Reset all bits in the PCI status register. */
3335 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3336 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3337 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3338 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3339 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3340 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3341 }
3342
3343 /* Check for PCI Express Uncorrectable Error. */
3344 if ((status & Y2_IS_PCI_EXP) != 0) {
3345 uint32_t v32;
3346
3347 /*
3348 * On PCI Express bus bridges are called root complexes (RC).
3349 * PCI Express errors are recognized by the root complex too,
3350 * which requests the system to handle the problem. After
3351 * error occurence it may be that no access to the adapter
3352 * may be performed any longer.
3353 */
3354
3355 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3356 if ((v32 & PEX_UNSUP_REQ) != 0) {
3357 /* Ignore unsupported request error. */
3358 device_printf(sc->msk_dev,
3359 "Uncorrectable PCI Express error\n");
3360 }
3361 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3362 int i;
3363
3364 /* Get TLP header form Log Registers. */
3365 for (i = 0; i < 4; i++)
3366 tlphead[i] = CSR_PCI_READ_4(sc,
3367 PEX_HEADER_LOG + i * 4);
3368 /* Check for vendor defined broadcast message. */
3369 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3370 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3371 CSR_WRITE_4(sc, B0_HWE_IMSK,
3372 sc->msk_intrhwemask);
3373 CSR_READ_4(sc, B0_HWE_IMSK);
3374 }
3375 }
3376 /* Clear the interrupt. */
3377 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3378 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3379 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3380 }
3381
3382 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3383 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3384 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3385 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3386 }
3387
3388 static __inline void
3389 msk_rxput(struct msk_if_softc *sc_if)
3390 {
3391 struct msk_softc *sc;
3392
3393 sc = sc_if->msk_softc;
3394 if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN))
3395 bus_dmamap_sync(
3396 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3397 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3398 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3399 else
3400 bus_dmamap_sync(
3401 sc_if->msk_cdata.msk_rx_ring_tag,
3402 sc_if->msk_cdata.msk_rx_ring_map,
3403 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3404 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3405 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3406 }
3407
3408 static int
3409 msk_handle_events(struct msk_softc *sc)
3410 {
3411 struct msk_if_softc *sc_if;
3412 int rxput[2];
3413 struct msk_stat_desc *sd;
3414 uint32_t control, status;
3415 int cons, idx, len, port, rxprog;
3416
3417 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3418 if (idx == sc->msk_stat_cons)
3419 return (0);
3420
3421 /* Sync status LEs. */
3422 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3423 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3424 /* XXX Sync Rx LEs here. */
3425
3426 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3427
3428 rxprog = 0;
3429 for (cons = sc->msk_stat_cons; cons != idx;) {
3430 sd = &sc->msk_stat_ring[cons];
3431 control = le32toh(sd->msk_control);
3432 if ((control & HW_OWNER) == 0)
3433 break;
3434 /*
3435 * Marvell's FreeBSD driver updates status LE after clearing
3436 * HW_OWNER. However we don't have a way to sync single LE
3437 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3438 * an entire DMA map. So don't sync LE until we have a better
3439 * way to sync LEs.
3440 */
3441 control &= ~HW_OWNER;
3442 sd->msk_control = htole32(control);
3443 status = le32toh(sd->msk_status);
3444 len = control & STLE_LEN_MASK;
3445 port = (control >> 16) & 0x01;
3446 sc_if = sc->msk_if[port];
3447 if (sc_if == NULL) {
3448 device_printf(sc->msk_dev, "invalid port opcode "
3449 "0x%08x\n", control & STLE_OP_MASK);
3450 continue;
3451 }
3452
3453 switch (control & STLE_OP_MASK) {
3454 case OP_RXVLAN:
3455 sc_if->msk_vtag = ntohs(len);
3456 break;
3457 case OP_RXCHKSVLAN:
3458 sc_if->msk_vtag = ntohs(len);
3459 break;
3460 case OP_RXSTAT:
3461 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3462 msk_jumbo_rxeof(sc_if, status, len);
3463 else
3464 msk_rxeof(sc_if, status, len);
3465 rxprog++;
3466 /*
3467 * Because there is no way to sync single Rx LE
3468 * put the DMA sync operation off until the end of
3469 * event processing.
3470 */
3471 rxput[port]++;
3472 /* Update prefetch unit if we've passed water mark. */
3473 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3474 msk_rxput(sc_if);
3475 rxput[port] = 0;
3476 }
3477 break;
3478 case OP_TXINDEXLE:
3479 if (sc->msk_if[MSK_PORT_A] != NULL)
3480 msk_txeof(sc->msk_if[MSK_PORT_A],
3481 status & STLE_TXA1_MSKL);
3482 if (sc->msk_if[MSK_PORT_B] != NULL)
3483 msk_txeof(sc->msk_if[MSK_PORT_B],
3484 ((status & STLE_TXA2_MSKL) >>
3485 STLE_TXA2_SHIFTL) |
3486 ((len & STLE_TXA2_MSKH) <<
3487 STLE_TXA2_SHIFTH));
3488 break;
3489 default:
3490 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3491 control & STLE_OP_MASK);
3492 break;
3493 }
3494 MSK_INC(cons, MSK_STAT_RING_CNT);
3495 if (rxprog > sc->msk_process_limit)
3496 break;
3497 }
3498
3499 sc->msk_stat_cons = cons;
3500 /* XXX We should sync status LEs here. See above notes. */
3501
3502 if (rxput[MSK_PORT_A] > 0)
3503 msk_rxput(sc->msk_if[MSK_PORT_A]);
3504 if (rxput[MSK_PORT_B] > 0)
3505 msk_rxput(sc->msk_if[MSK_PORT_B]);
3506
3507 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3508 }
3509
3510 static void
3511 msk_intr(void *xsc)
3512 {
3513 struct msk_softc *sc;
3514 uint32_t status;
3515
3516 sc = xsc;
3517 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3518 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3519 if (status == 0 || status == 0xffffffff) {
3520 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3521 return;
3522 }
3523
3524 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3525 }
3526
3527 static void
3528 msk_int_task(void *arg, int pending)
3529 {
3530 struct msk_softc *sc;
3531 struct msk_if_softc *sc_if0, *sc_if1;
3532 struct ifnet *ifp0, *ifp1;
3533 uint32_t status;
3534 int domore;
3535
3536 sc = arg;
3537 MSK_LOCK(sc);
3538
3539 /* Get interrupt source. */
3540 status = CSR_READ_4(sc, B0_ISRC);
3541 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3542 (status & sc->msk_intrmask) == 0)
3543 goto done;
3544
3545 sc_if0 = sc->msk_if[MSK_PORT_A];
3546 sc_if1 = sc->msk_if[MSK_PORT_B];
3547 ifp0 = ifp1 = NULL;
3548 if (sc_if0 != NULL)
3549 ifp0 = sc_if0->msk_ifp;
3550 if (sc_if1 != NULL)
3551 ifp1 = sc_if1->msk_ifp;
3552
3553 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3554 msk_intr_phy(sc_if0);
3555 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3556 msk_intr_phy(sc_if1);
3557 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3558 msk_intr_gmac(sc_if0);
3559 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3560 msk_intr_gmac(sc_if1);
3561 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3562 device_printf(sc->msk_dev, "Rx descriptor error\n");
3563 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3564 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3565 CSR_READ_4(sc, B0_IMSK);
3566 }
3567 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3568 device_printf(sc->msk_dev, "Tx descriptor error\n");
3569 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3570 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3571 CSR_READ_4(sc, B0_IMSK);
3572 }
3573 if ((status & Y2_IS_HW_ERR) != 0)
3574 msk_intr_hwerr(sc);
3575
3576 domore = msk_handle_events(sc);
3577 if ((status & Y2_IS_STAT_BMU) != 0)
3578 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3579
3580 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3581 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3582 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3583 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3584 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3585 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3586
3587 if (domore > 0) {
3588 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3589 MSK_UNLOCK(sc);
3590 return;
3591 }
3592 done:
3593 MSK_UNLOCK(sc);
3594
3595 /* Reenable interrupts. */
3596 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3597 }
3598
3599 static void
3600 msk_init(void *xsc)
3601 {
3602 struct msk_if_softc *sc_if = xsc;
3603
3604 MSK_IF_LOCK(sc_if);
3605 msk_init_locked(sc_if);
3606 MSK_IF_UNLOCK(sc_if);
3607 }
3608
3609 static void
3610 msk_init_locked(struct msk_if_softc *sc_if)
3611 {
3612 struct msk_softc *sc;
3613 struct ifnet *ifp;
3614 struct mii_data *mii;
3615 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3616 uint16_t gmac;
3617 int error, i;
3618
3619 MSK_IF_LOCK_ASSERT(sc_if);
3620
3621 ifp = sc_if->msk_ifp;
3622 sc = sc_if->msk_softc;
3623 mii = device_get_softc(sc_if->msk_miibus);
3624
3625 error = 0;
3626 /* Cancel pending I/O and free all Rx/Tx buffers. */
3627 msk_stop(sc_if);
3628
3629 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
3630 ETHER_VLAN_ENCAP_LEN;
3631 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
3632 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3633 /*
3634 * In Yukon EC Ultra, TSO & checksum offload is not
3635 * supported for jumbo frame.
3636 */
3637 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3638 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3639 }
3640
3641 /*
3642 * Initialize GMAC first.
3643 * Without this initialization, Rx MAC did not work as expected
3644 * and Rx MAC garbled status LEs and it resulted in out-of-order
3645 * or duplicated frame delivery which in turn showed very poor
3646 * Rx performance.(I had to write a packet analysis code that
3647 * could be embeded in driver to diagnose this issue.)
3648 * I've spent almost 2 months to fix this issue. If I have had
3649 * datasheet for Yukon II I wouldn't have encountered this. :-(
3650 */
3651 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3652 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3653
3654 /* Dummy read the Interrupt Source Register. */
3655 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3656
3657 /* Set MIB Clear Counter Mode. */
3658 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3659 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3660 /* Read all MIB Counters with Clear Mode set. */
3661 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3662 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3663 /* Clear MIB Clear Counter Mode. */
3664 gmac &= ~GM_PAR_MIB_CLR;
3665 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3666
3667 /* Disable FCS. */
3668 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3669
3670 /* Setup Transmit Control Register. */
3671 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3672
3673 /* Setup Transmit Flow Control Register. */
3674 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3675
3676 /* Setup Transmit Parameter Register. */
3677 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3678 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3679 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3680
3681 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3682 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3683
3684 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3685 gmac |= GM_SMOD_JUMBO_ENA;
3686 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3687
3688 /* Set station address. */
3689 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3690 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3691 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3692 eaddr[i]);
3693 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3694 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3695 eaddr[i]);
3696
3697 /* Disable interrupts for counter overflows. */
3698 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3699 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3700 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3701
3702 /* Configure Rx MAC FIFO. */
3703 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3704 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3705 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3706 GMF_OPER_ON | GMF_RX_F_FL_ON);
3707
3708 /* Set promiscuous mode. */
3709 msk_setpromisc(sc_if);
3710
3711 /* Set multicast filter. */
3712 msk_setmulti(sc_if);
3713
3714 /* Flush Rx MAC FIFO on any flow control or error. */
3715 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3716 GMR_FS_ANY_ERR);
3717
3718 /*
3719 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3720 * due to hardware hang on receipt of pause frames.
3721 */
3722 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3723 RX_GMF_FL_THR_DEF + 1);
3724
3725 /* Configure Tx MAC FIFO. */
3726 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3727 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3728 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3729
3730 /* Configure hardware VLAN tag insertion/stripping. */
3731 msk_setvlan(sc_if, ifp);
3732
3733 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3734 /* Set Rx Pause threshould. */
3735 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3736 MSK_ECU_LLPP);
3737 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3738 MSK_ECU_ULPP);
3739 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3740 /*
3741 * Set Tx GMAC FIFO Almost Empty Threshold.
3742 */
3743 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3744 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3745 /* Disable Store & Forward mode for Tx. */
3746 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3747 TX_JUMBO_ENA | TX_STFW_DIS);
3748 } else {
3749 /* Enable Store & Forward mode for Tx. */
3750 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3751 TX_JUMBO_DIS | TX_STFW_ENA);
3752 }
3753 }
3754
3755 /*
3756 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3757 * arbiter as we don't use Sync Tx queue.
3758 */
3759 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3760 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3761 /* Enable the RAM Interface Arbiter. */
3762 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3763
3764 /* Setup RAM buffer. */
3765 msk_set_rambuffer(sc_if);
3766
3767 /* Disable Tx sync Queue. */
3768 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3769
3770 /* Setup Tx Queue Bus Memory Interface. */
3771 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3772 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3773 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3774 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3775 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3776 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3777 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3778 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3779 }
3780
3781 /* Setup Rx Queue Bus Memory Interface. */
3782 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3783 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3784 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3785 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3786 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3787 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3788 /* MAC Rx RAM Read is controlled by hardware. */
3789 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3790 }
3791
3792 msk_set_prefetch(sc, sc_if->msk_txq,
3793 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3794 msk_init_tx_ring(sc_if);
3795
3796 /* Disable Rx checksum offload and RSS hash. */
3797 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3798 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3799 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3800 msk_set_prefetch(sc, sc_if->msk_rxq,
3801 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3802 MSK_JUMBO_RX_RING_CNT - 1);
3803 error = msk_init_jumbo_rx_ring(sc_if);
3804 } else {
3805 msk_set_prefetch(sc, sc_if->msk_rxq,
3806 sc_if->msk_rdata.msk_rx_ring_paddr,
3807 MSK_RX_RING_CNT - 1);
3808 error = msk_init_rx_ring(sc_if);
3809 }
3810 if (error != 0) {
3811 device_printf(sc_if->msk_if_dev,
3812 "initialization failed: no memory for Rx buffers\n");
3813 msk_stop(sc_if);
3814 return;
3815 }
3816
3817 /* Configure interrupt handling. */
3818 if (sc_if->msk_port == MSK_PORT_A) {
3819 sc->msk_intrmask |= Y2_IS_PORT_A;
3820 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3821 } else {
3822 sc->msk_intrmask |= Y2_IS_PORT_B;
3823 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3824 }
3825 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3826 CSR_READ_4(sc, B0_HWE_IMSK);
3827 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3828 CSR_READ_4(sc, B0_IMSK);
3829
3830 sc_if->msk_link = 0;
3831 mii_mediachg(mii);
3832
3833 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3834 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3835
3836 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3837 }
3838
3839 static void
3840 msk_set_rambuffer(struct msk_if_softc *sc_if)
3841 {
3842 struct msk_softc *sc;
3843 int ltpp, utpp;
3844
3845 sc = sc_if->msk_softc;
3846
3847 /* Setup Rx Queue. */
3848 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3849 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3850 sc->msk_rxqstart[sc_if->msk_port] / 8);
3851 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3852 sc->msk_rxqend[sc_if->msk_port] / 8);
3853 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3854 sc->msk_rxqstart[sc_if->msk_port] / 8);
3855 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3856 sc->msk_rxqstart[sc_if->msk_port] / 8);
3857
3858 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3859 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3860 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3861 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3862 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3863 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3864 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3865 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3866 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3867
3868 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3869 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3870
3871 /* Setup Tx Queue. */
3872 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3873 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3874 sc->msk_txqstart[sc_if->msk_port] / 8);
3875 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3876 sc->msk_txqend[sc_if->msk_port] / 8);
3877 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3878 sc->msk_txqstart[sc_if->msk_port] / 8);
3879 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3880 sc->msk_txqstart[sc_if->msk_port] / 8);
3881 /* Enable Store & Forward for Tx side. */
3882 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3883 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3884 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3885 }
3886
3887 static void
3888 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3889 uint32_t count)
3890 {
3891
3892 /* Reset the prefetch unit. */
3893 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3894 PREF_UNIT_RST_SET);
3895 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3896 PREF_UNIT_RST_CLR);
3897 /* Set LE base address. */
3898 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3899 MSK_ADDR_LO(addr));
3900 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3901 MSK_ADDR_HI(addr));
3902 /* Set the list last index. */
3903 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3904 count);
3905 /* Turn on prefetch unit. */
3906 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3907 PREF_UNIT_OP_ON);
3908 /* Dummy read to ensure write. */
3909 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3910 }
3911
3912 static void
3913 msk_stop(struct msk_if_softc *sc_if)
3914 {
3915 struct msk_softc *sc;
3916 struct msk_txdesc *txd;
3917 struct msk_rxdesc *rxd;
3918 struct msk_rxdesc *jrxd;
3919 struct ifnet *ifp;
3920 uint32_t val;
3921 int i;
3922
3923 MSK_IF_LOCK_ASSERT(sc_if);
3924 sc = sc_if->msk_softc;
3925 ifp = sc_if->msk_ifp;
3926
3927 callout_stop(&sc_if->msk_tick_ch);
3928 sc_if->msk_watchdog_timer = 0;
3929
3930 /* Disable interrupts. */
3931 if (sc_if->msk_port == MSK_PORT_A) {
3932 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3933 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3934 } else {
3935 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3936 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3937 }
3938 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3939 CSR_READ_4(sc, B0_HWE_IMSK);
3940 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3941 CSR_READ_4(sc, B0_IMSK);
3942
3943 /* Disable Tx/Rx MAC. */
3944 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3945 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3946 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3947 /* Read again to ensure writing. */
3948 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3949
3950 /* Stop Tx BMU. */
3951 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3952 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3953 for (i = 0; i < MSK_TIMEOUT; i++) {
3954 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3955 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3956 BMU_STOP);
3957 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3958 } else
3959 break;
3960 DELAY(1);
3961 }
3962 if (i == MSK_TIMEOUT)
3963 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3964 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3965 RB_RST_SET | RB_DIS_OP_MD);
3966
3967 /* Disable all GMAC interrupt. */
3968 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3969 /* Disable PHY interrupt. */
3970 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3971
3972 /* Disable the RAM Interface Arbiter. */
3973 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3974
3975 /* Reset the PCI FIFO of the async Tx queue */
3976 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3977 BMU_RST_SET | BMU_FIFO_RST);
3978
3979 /* Reset the Tx prefetch units. */
3980 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3981 PREF_UNIT_RST_SET);
3982
3983 /* Reset the RAM Buffer async Tx queue. */
3984 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3985
3986 /* Reset Tx MAC FIFO. */
3987 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3988 /* Set Pause Off. */
3989 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3990
3991 /*
3992 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3993 * reach the end of packet and since we can't make sure that we have
3994 * incoming data, we must reset the BMU while it is not during a DMA
3995 * transfer. Since it is possible that the Rx path is still active,
3996 * the Rx RAM buffer will be stopped first, so any possible incoming
3997 * data will not trigger a DMA. After the RAM buffer is stopped, the
3998 * BMU is polled until any DMA in progress is ended and only then it
3999 * will be reset.
4000 */
4001
4002 /* Disable the RAM Buffer receive queue. */
4003 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4004 for (i = 0; i < MSK_TIMEOUT; i++) {
4005 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4006 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4007 break;
4008 DELAY(1);
4009 }
4010 if (i == MSK_TIMEOUT)
4011 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4012 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4013 BMU_RST_SET | BMU_FIFO_RST);
4014 /* Reset the Rx prefetch unit. */
4015 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4016 PREF_UNIT_RST_SET);
4017 /* Reset the RAM Buffer receive queue. */
4018 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4019 /* Reset Rx MAC FIFO. */
4020 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4021
4022 /* Free Rx and Tx mbufs still in the queues. */
4023 for (i = 0; i < MSK_RX_RING_CNT; i++) {
4024 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4025 if (rxd->rx_m != NULL) {
4026 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4027 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4028 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4029 rxd->rx_dmamap);
4030 m_freem(rxd->rx_m);
4031 rxd->rx_m = NULL;
4032 }
4033 }
4034 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4035 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4036 if (jrxd->rx_m != NULL) {
4037 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4038 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4039 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4040 jrxd->rx_dmamap);
4041 m_freem(jrxd->rx_m);
4042 jrxd->rx_m = NULL;
4043 }
4044 }
4045 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4046 txd = &sc_if->msk_cdata.msk_txdesc[i];
4047 if (txd->tx_m != NULL) {
4048 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4049 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4050 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4051 txd->tx_dmamap);
4052 m_freem(txd->tx_m);
4053 txd->tx_m = NULL;
4054 }
4055 }
4056
4057 /*
4058 * Mark the interface down.
4059 */
4060 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4061 sc_if->msk_link = 0;
4062 }
4063
4064 static int
4065 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4066 {
4067 int error, value;
4068
4069 if (!arg1)
4070 return (EINVAL);
4071 value = *(int *)arg1;
4072 error = sysctl_handle_int(oidp, &value, 0, req);
4073 if (error || !req->newptr)
4074 return (error);
4075 if (value < low || value > high)
4076 return (EINVAL);
4077 *(int *)arg1 = value;
4078
4079 return (0);
4080 }
4081
4082 static int
4083 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4084 {
4085
4086 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4087 MSK_PROC_MAX));
4088 }
Cache object: 8da6abdccfb2f16bd687e0c3447be89b
|