FreeBSD/Linux Kernel Cross Reference
sys/dev/jme/if_jme.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/rman.h>
41 #include <sys/module.h>
42 #include <sys/proc.h>
43 #include <sys/queue.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
48
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69
70 #include <machine/bus.h>
71 #include <machine/in_cksum.h>
72
73 #include <dev/jme/if_jmereg.h>
74 #include <dev/jme/if_jmevar.h>
75
76 /* "device miibus" required. See GENERIC if you get errors here. */
77 #include "miibus_if.h"
78
79 /* Define the following to disable printing Rx errors. */
80 #undef JME_SHOW_ERRORS
81
82 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
83
84 MODULE_DEPEND(jme, pci, 1, 1, 1);
85 MODULE_DEPEND(jme, ether, 1, 1, 1);
86 MODULE_DEPEND(jme, miibus, 1, 1, 1);
87
88 /* Tunables. */
89 static int msi_disable = 0;
90 static int msix_disable = 0;
91 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
92 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
93
94 /*
95 * Devices supported by this driver.
96 */
97 static struct jme_dev {
98 uint16_t jme_vendorid;
99 uint16_t jme_deviceid;
100 const char *jme_name;
101 } jme_devs[] = {
102 { VENDORID_JMICRON, DEVICEID_JMC250,
103 "JMicron Inc, JMC25x Gigabit Ethernet" },
104 { VENDORID_JMICRON, DEVICEID_JMC260,
105 "JMicron Inc, JMC26x Fast Ethernet" },
106 };
107
108 static int jme_miibus_readreg(device_t, int, int);
109 static int jme_miibus_writereg(device_t, int, int, int);
110 static void jme_miibus_statchg(device_t);
111 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
112 static int jme_mediachange(struct ifnet *);
113 static int jme_probe(device_t);
114 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
115 static int jme_eeprom_macaddr(struct jme_softc *);
116 static int jme_efuse_macaddr(struct jme_softc *);
117 static void jme_reg_macaddr(struct jme_softc *);
118 static void jme_set_macaddr(struct jme_softc *, uint8_t *);
119 static void jme_map_intr_vector(struct jme_softc *);
120 static int jme_attach(device_t);
121 static int jme_detach(device_t);
122 static void jme_sysctl_node(struct jme_softc *);
123 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
124 static int jme_dma_alloc(struct jme_softc *);
125 static void jme_dma_free(struct jme_softc *);
126 static int jme_shutdown(device_t);
127 static void jme_setlinkspeed(struct jme_softc *);
128 static void jme_setwol(struct jme_softc *);
129 static int jme_suspend(device_t);
130 static int jme_resume(device_t);
131 static int jme_encap(struct jme_softc *, struct mbuf **);
132 static void jme_start(struct ifnet *);
133 static void jme_start_locked(struct ifnet *);
134 static void jme_watchdog(struct jme_softc *);
135 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
136 static void jme_mac_config(struct jme_softc *);
137 static void jme_link_task(void *, int);
138 static int jme_intr(void *);
139 static void jme_int_task(void *, int);
140 static void jme_txeof(struct jme_softc *);
141 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
142 static void jme_rxeof(struct jme_softc *);
143 static int jme_rxintr(struct jme_softc *, int);
144 static void jme_tick(void *);
145 static void jme_reset(struct jme_softc *);
146 static void jme_init(void *);
147 static void jme_init_locked(struct jme_softc *);
148 static void jme_stop(struct jme_softc *);
149 static void jme_stop_tx(struct jme_softc *);
150 static void jme_stop_rx(struct jme_softc *);
151 static int jme_init_rx_ring(struct jme_softc *);
152 static void jme_init_tx_ring(struct jme_softc *);
153 static void jme_init_ssb(struct jme_softc *);
154 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
155 static void jme_set_vlan(struct jme_softc *);
156 static void jme_set_filter(struct jme_softc *);
157 static void jme_stats_clear(struct jme_softc *);
158 static void jme_stats_save(struct jme_softc *);
159 static void jme_stats_update(struct jme_softc *);
160 static void jme_phy_down(struct jme_softc *);
161 static void jme_phy_up(struct jme_softc *);
162 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
163 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
164 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
165 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
166 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
167 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
168
169
170 static device_method_t jme_methods[] = {
171 /* Device interface. */
172 DEVMETHOD(device_probe, jme_probe),
173 DEVMETHOD(device_attach, jme_attach),
174 DEVMETHOD(device_detach, jme_detach),
175 DEVMETHOD(device_shutdown, jme_shutdown),
176 DEVMETHOD(device_suspend, jme_suspend),
177 DEVMETHOD(device_resume, jme_resume),
178
179 /* MII interface. */
180 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
181 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
182 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
183
184 { NULL, NULL }
185 };
186
187 static driver_t jme_driver = {
188 "jme",
189 jme_methods,
190 sizeof(struct jme_softc)
191 };
192
193 DRIVER_MODULE(jme, pci, jme_driver, 0, 0);
194 DRIVER_MODULE(miibus, jme, miibus_driver, 0, 0);
195
196 static struct resource_spec jme_res_spec_mem[] = {
197 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
198 { -1, 0, 0 }
199 };
200
201 static struct resource_spec jme_irq_spec_legacy[] = {
202 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
203 { -1, 0, 0 }
204 };
205
206 static struct resource_spec jme_irq_spec_msi[] = {
207 { SYS_RES_IRQ, 1, RF_ACTIVE },
208 { -1, 0, 0 }
209 };
210
211 /*
212 * Read a PHY register on the MII of the JMC250.
213 */
214 static int
215 jme_miibus_readreg(device_t dev, int phy, int reg)
216 {
217 struct jme_softc *sc;
218 uint32_t val;
219 int i;
220
221 sc = device_get_softc(dev);
222
223 /* For FPGA version, PHY address 0 should be ignored. */
224 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
225 return (0);
226
227 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
228 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
229 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
230 DELAY(1);
231 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
232 break;
233 }
234
235 if (i == 0) {
236 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
237 return (0);
238 }
239
240 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
241 }
242
243 /*
244 * Write a PHY register on the MII of the JMC250.
245 */
246 static int
247 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
248 {
249 struct jme_softc *sc;
250 int i;
251
252 sc = device_get_softc(dev);
253
254 /* For FPGA version, PHY address 0 should be ignored. */
255 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
256 return (0);
257
258 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
259 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
260 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
261 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
262 DELAY(1);
263 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
264 break;
265 }
266
267 if (i == 0)
268 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
269
270 return (0);
271 }
272
273 /*
274 * Callback from MII layer when media changes.
275 */
276 static void
277 jme_miibus_statchg(device_t dev)
278 {
279 struct jme_softc *sc;
280
281 sc = device_get_softc(dev);
282 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
283 }
284
285 /*
286 * Get the current interface media status.
287 */
288 static void
289 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
290 {
291 struct jme_softc *sc;
292 struct mii_data *mii;
293
294 sc = ifp->if_softc;
295 JME_LOCK(sc);
296 if ((ifp->if_flags & IFF_UP) == 0) {
297 JME_UNLOCK(sc);
298 return;
299 }
300 mii = device_get_softc(sc->jme_miibus);
301
302 mii_pollstat(mii);
303 ifmr->ifm_status = mii->mii_media_status;
304 ifmr->ifm_active = mii->mii_media_active;
305 JME_UNLOCK(sc);
306 }
307
308 /*
309 * Set hardware to newly-selected media.
310 */
311 static int
312 jme_mediachange(struct ifnet *ifp)
313 {
314 struct jme_softc *sc;
315 struct mii_data *mii;
316 struct mii_softc *miisc;
317 int error;
318
319 sc = ifp->if_softc;
320 JME_LOCK(sc);
321 mii = device_get_softc(sc->jme_miibus);
322 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
323 PHY_RESET(miisc);
324 error = mii_mediachg(mii);
325 JME_UNLOCK(sc);
326
327 return (error);
328 }
329
330 static int
331 jme_probe(device_t dev)
332 {
333 struct jme_dev *sp;
334 int i;
335 uint16_t vendor, devid;
336
337 vendor = pci_get_vendor(dev);
338 devid = pci_get_device(dev);
339 sp = jme_devs;
340 for (i = 0; i < nitems(jme_devs); i++, sp++) {
341 if (vendor == sp->jme_vendorid &&
342 devid == sp->jme_deviceid) {
343 device_set_desc(dev, sp->jme_name);
344 return (BUS_PROBE_DEFAULT);
345 }
346 }
347
348 return (ENXIO);
349 }
350
351 static int
352 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
353 {
354 uint32_t reg;
355 int i;
356
357 *val = 0;
358 for (i = JME_TIMEOUT; i > 0; i--) {
359 reg = CSR_READ_4(sc, JME_SMBCSR);
360 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
361 break;
362 DELAY(1);
363 }
364
365 if (i == 0) {
366 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
367 return (ETIMEDOUT);
368 }
369
370 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
371 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
372 for (i = JME_TIMEOUT; i > 0; i--) {
373 DELAY(1);
374 reg = CSR_READ_4(sc, JME_SMBINTF);
375 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
376 break;
377 }
378
379 if (i == 0) {
380 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
381 return (ETIMEDOUT);
382 }
383
384 reg = CSR_READ_4(sc, JME_SMBINTF);
385 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
386
387 return (0);
388 }
389
390 static int
391 jme_eeprom_macaddr(struct jme_softc *sc)
392 {
393 uint8_t eaddr[ETHER_ADDR_LEN];
394 uint8_t fup, reg, val;
395 uint32_t offset;
396 int match;
397
398 offset = 0;
399 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
400 fup != JME_EEPROM_SIG0)
401 return (ENOENT);
402 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
403 fup != JME_EEPROM_SIG1)
404 return (ENOENT);
405 match = 0;
406 do {
407 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
408 break;
409 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
410 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
411 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
412 break;
413 if (reg >= JME_PAR0 &&
414 reg < JME_PAR0 + ETHER_ADDR_LEN) {
415 if (jme_eeprom_read_byte(sc, offset + 2,
416 &val) != 0)
417 break;
418 eaddr[reg - JME_PAR0] = val;
419 match++;
420 }
421 }
422 /* Check for the end of EEPROM descriptor. */
423 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
424 break;
425 /* Try next eeprom descriptor. */
426 offset += JME_EEPROM_DESC_BYTES;
427 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
428
429 if (match == ETHER_ADDR_LEN) {
430 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
431 return (0);
432 }
433
434 return (ENOENT);
435 }
436
437 static int
438 jme_efuse_macaddr(struct jme_softc *sc)
439 {
440 uint32_t reg;
441 int i;
442
443 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
444 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
445 EFUSE_CTL1_AUTOLAOD_DONE)
446 return (ENOENT);
447 /* Reset eFuse controller. */
448 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
449 reg |= EFUSE_CTL2_RESET;
450 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
451 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
452 reg &= ~EFUSE_CTL2_RESET;
453 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
454
455 /* Have eFuse reload station address to MAC controller. */
456 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
457 reg &= ~EFUSE_CTL1_CMD_MASK;
458 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
459 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
460
461 /*
462 * Verify completion of eFuse autload command. It should be
463 * completed within 108us.
464 */
465 DELAY(110);
466 for (i = 10; i > 0; i--) {
467 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
468 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
469 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
470 DELAY(20);
471 continue;
472 }
473 if ((reg & EFUSE_CTL1_EXECUTE) == 0)
474 break;
475 /* Station address loading is still in progress. */
476 DELAY(20);
477 }
478 if (i == 0) {
479 device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
480 return (ETIMEDOUT);
481 }
482
483 return (0);
484 }
485
486 static void
487 jme_reg_macaddr(struct jme_softc *sc)
488 {
489 uint32_t par0, par1;
490
491 /* Read station address. */
492 par0 = CSR_READ_4(sc, JME_PAR0);
493 par1 = CSR_READ_4(sc, JME_PAR1);
494 par1 &= 0xFFFF;
495 if ((par0 == 0 && par1 == 0) ||
496 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
497 device_printf(sc->jme_dev,
498 "Failed to retrieve Ethernet address.\n");
499 } else {
500 /*
501 * For controllers that use eFuse, the station address
502 * could also be extracted from JME_PCI_PAR0 and
503 * JME_PCI_PAR1 registers in PCI configuration space.
504 * Each register holds exactly half of station address(24bits)
505 * so use JME_PAR0, JME_PAR1 registers instead.
506 */
507 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
508 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
509 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
510 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
511 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
512 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
513 }
514 }
515
516 static void
517 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
518 {
519 uint32_t val;
520 int i;
521
522 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
523 /*
524 * Avoid reprogramming station address if the address
525 * is the same as previous one. Note, reprogrammed
526 * station address is permanent as if it was written
527 * to EEPROM. So if station address was changed by
528 * admistrator it's possible to lose factory configured
529 * address when driver fails to restore its address.
530 * (e.g. reboot or system crash)
531 */
532 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
533 for (i = 0; i < ETHER_ADDR_LEN; i++) {
534 val = JME_EFUSE_EEPROM_FUNC0 <<
535 JME_EFUSE_EEPROM_FUNC_SHIFT;
536 val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
537 JME_EFUSE_EEPROM_PAGE_SHIFT;
538 val |= (JME_PAR0 + i) <<
539 JME_EFUSE_EEPROM_ADDR_SHIFT;
540 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
541 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
542 val | JME_EFUSE_EEPROM_WRITE, 4);
543 }
544 }
545 } else {
546 CSR_WRITE_4(sc, JME_PAR0,
547 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
548 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
549 }
550 }
551
552 static void
553 jme_map_intr_vector(struct jme_softc *sc)
554 {
555 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
556
557 bzero(map, sizeof(map));
558
559 /* Map Tx interrupts source to MSI/MSIX vector 2. */
560 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
561 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
562 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
563 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
564 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
565 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
566 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
567 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
568 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
569 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
570 map[MSINUM_REG_INDEX(N_INTR_TXQ5_COMP)] |=
571 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
572 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
573 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
574 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
575 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
576 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
577 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
578 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
579 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
580
581 /* Map Rx interrupts source to MSI/MSIX vector 1. */
582 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
583 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
584 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
585 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
586 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
587 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
588 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
589 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
590 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
591 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
592 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
593 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
594 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
595 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
596 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
597 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
598 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
599 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
600 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
601 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
602 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
603 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
604 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
605 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
606 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
607 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
608 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
609 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
610 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
611 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
612 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
613 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
614
615 /* Map all other interrupts source to MSI/MSIX vector 0. */
616 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
617 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
618 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
619 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
620 }
621
622 static int
623 jme_attach(device_t dev)
624 {
625 struct jme_softc *sc;
626 struct ifnet *ifp;
627 struct mii_softc *miisc;
628 struct mii_data *mii;
629 uint32_t reg;
630 uint16_t burst;
631 int error, i, mii_flags, msic, msixc, pmc;
632
633 error = 0;
634 sc = device_get_softc(dev);
635 sc->jme_dev = dev;
636
637 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
638 MTX_DEF);
639 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
640 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
641 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
642
643 /*
644 * Map the device. JMC250 supports both memory mapped and I/O
645 * register space access. Because I/O register access should
646 * use different BARs to access registers it's waste of time
647 * to use I/O register spce access. JMC250 uses 16K to map
648 * entire memory space.
649 */
650 pci_enable_busmaster(dev);
651 sc->jme_res_spec = jme_res_spec_mem;
652 sc->jme_irq_spec = jme_irq_spec_legacy;
653 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
654 if (error != 0) {
655 device_printf(dev, "cannot allocate memory resources.\n");
656 goto fail;
657 }
658
659 /* Allocate IRQ resources. */
660 msixc = pci_msix_count(dev);
661 msic = pci_msi_count(dev);
662 if (bootverbose) {
663 device_printf(dev, "MSIX count : %d\n", msixc);
664 device_printf(dev, "MSI count : %d\n", msic);
665 }
666
667 /* Use 1 MSI/MSI-X. */
668 if (msixc > 1)
669 msixc = 1;
670 if (msic > 1)
671 msic = 1;
672 /* Prefer MSIX over MSI. */
673 if (msix_disable == 0 || msi_disable == 0) {
674 if (msix_disable == 0 && msixc > 0 &&
675 pci_alloc_msix(dev, &msixc) == 0) {
676 if (msixc == 1) {
677 device_printf(dev, "Using %d MSIX messages.\n",
678 msixc);
679 sc->jme_flags |= JME_FLAG_MSIX;
680 sc->jme_irq_spec = jme_irq_spec_msi;
681 } else
682 pci_release_msi(dev);
683 }
684 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
685 msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
686 if (msic == 1) {
687 device_printf(dev, "Using %d MSI messages.\n",
688 msic);
689 sc->jme_flags |= JME_FLAG_MSI;
690 sc->jme_irq_spec = jme_irq_spec_msi;
691 } else
692 pci_release_msi(dev);
693 }
694 /* Map interrupt vector 0, 1 and 2. */
695 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
696 (sc->jme_flags & JME_FLAG_MSIX) != 0)
697 jme_map_intr_vector(sc);
698 }
699
700 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
701 if (error != 0) {
702 device_printf(dev, "cannot allocate IRQ resources.\n");
703 goto fail;
704 }
705
706 sc->jme_rev = pci_get_device(dev);
707 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
708 sc->jme_flags |= JME_FLAG_FASTETH;
709 sc->jme_flags |= JME_FLAG_NOJUMBO;
710 }
711 reg = CSR_READ_4(sc, JME_CHIPMODE);
712 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
713 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
714 CHIPMODE_NOT_FPGA)
715 sc->jme_flags |= JME_FLAG_FPGA;
716 if (bootverbose) {
717 device_printf(dev, "PCI device revision : 0x%04x\n",
718 sc->jme_rev);
719 device_printf(dev, "Chip revision : 0x%02x\n",
720 sc->jme_chip_rev);
721 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
722 device_printf(dev, "FPGA revision : 0x%04x\n",
723 (reg & CHIPMODE_FPGA_REV_MASK) >>
724 CHIPMODE_FPGA_REV_SHIFT);
725 }
726 if (sc->jme_chip_rev == 0xFF) {
727 device_printf(dev, "Unknown chip revision : 0x%02x\n",
728 sc->jme_rev);
729 error = ENXIO;
730 goto fail;
731 }
732
733 /* Identify controller features and bugs. */
734 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
735 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
736 CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
737 sc->jme_flags |= JME_FLAG_DMA32BIT;
738 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
739 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
740 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
741 sc->jme_flags |= JME_FLAG_HWMIB;
742 }
743
744 /* Reset the ethernet controller. */
745 jme_reset(sc);
746
747 /* Get station address. */
748 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
749 error = jme_efuse_macaddr(sc);
750 if (error == 0)
751 jme_reg_macaddr(sc);
752 } else {
753 error = ENOENT;
754 reg = CSR_READ_4(sc, JME_SMBCSR);
755 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
756 error = jme_eeprom_macaddr(sc);
757 if (error != 0 && bootverbose)
758 device_printf(sc->jme_dev,
759 "ethernet hardware address not found in EEPROM.\n");
760 if (error != 0)
761 jme_reg_macaddr(sc);
762 }
763
764 /*
765 * Save PHY address.
766 * Integrated JR0211 has fixed PHY address whereas FPGA version
767 * requires PHY probing to get correct PHY address.
768 */
769 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
770 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
771 GPREG0_PHY_ADDR_MASK;
772 if (bootverbose)
773 device_printf(dev, "PHY is at address %d.\n",
774 sc->jme_phyaddr);
775 } else
776 sc->jme_phyaddr = 0;
777
778 /* Set max allowable DMA size. */
779 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
780 sc->jme_flags |= JME_FLAG_PCIE;
781 burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
782 if (bootverbose) {
783 device_printf(dev, "Read request size : %d bytes.\n",
784 128 << ((burst >> 12) & 0x07));
785 device_printf(dev, "TLP payload size : %d bytes.\n",
786 128 << ((burst >> 5) & 0x07));
787 }
788 switch ((burst >> 12) & 0x07) {
789 case 0:
790 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
791 break;
792 case 1:
793 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
794 break;
795 default:
796 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
797 break;
798 }
799 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
800 } else {
801 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
802 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
803 }
804 /* Create coalescing sysctl node. */
805 jme_sysctl_node(sc);
806 if ((error = jme_dma_alloc(sc)) != 0)
807 goto fail;
808
809 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
810 if (ifp == NULL) {
811 device_printf(dev, "cannot allocate ifnet structure.\n");
812 error = ENXIO;
813 goto fail;
814 }
815
816 ifp->if_softc = sc;
817 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
818 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
819 ifp->if_ioctl = jme_ioctl;
820 ifp->if_start = jme_start;
821 ifp->if_init = jme_init;
822 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
823 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
824 IFQ_SET_READY(&ifp->if_snd);
825 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
826 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
827 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
828 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
829 sc->jme_flags |= JME_FLAG_PMCAP;
830 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
831 }
832 ifp->if_capenable = ifp->if_capabilities;
833
834 /* Wakeup PHY. */
835 jme_phy_up(sc);
836 mii_flags = MIIF_DOPAUSE;
837 /* Ask PHY calibration to PHY driver. */
838 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
839 mii_flags |= MIIF_MACPRIV0;
840 /* Set up MII bus. */
841 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
842 jme_mediastatus, BMSR_DEFCAPMASK,
843 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
844 MII_OFFSET_ANY, mii_flags);
845 if (error != 0) {
846 device_printf(dev, "attaching PHYs failed\n");
847 goto fail;
848 }
849
850 /*
851 * Force PHY to FPGA mode.
852 */
853 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
854 mii = device_get_softc(sc->jme_miibus);
855 if (mii->mii_instance != 0) {
856 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
857 if (miisc->mii_phy != 0) {
858 sc->jme_phyaddr = miisc->mii_phy;
859 break;
860 }
861 }
862 if (sc->jme_phyaddr != 0) {
863 device_printf(sc->jme_dev,
864 "FPGA PHY is at %d\n", sc->jme_phyaddr);
865 /* vendor magic. */
866 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
867 0x0004);
868 }
869 }
870 }
871
872 ether_ifattach(ifp, sc->jme_eaddr);
873
874 /* VLAN capability setup */
875 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
876 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
877 ifp->if_capenable = ifp->if_capabilities;
878
879 /* Tell the upper layer(s) we support long frames. */
880 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
881
882 /* Create local taskq. */
883 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
884 taskqueue_thread_enqueue, &sc->jme_tq);
885 if (sc->jme_tq == NULL) {
886 device_printf(dev, "could not create taskqueue.\n");
887 ether_ifdetach(ifp);
888 error = ENXIO;
889 goto fail;
890 }
891 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
892 device_get_nameunit(sc->jme_dev));
893
894 for (i = 0; i < 1; i++) {
895 error = bus_setup_intr(dev, sc->jme_irq[i],
896 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
897 &sc->jme_intrhand[i]);
898 if (error != 0)
899 break;
900 }
901
902 if (error != 0) {
903 device_printf(dev, "could not set up interrupt handler.\n");
904 taskqueue_free(sc->jme_tq);
905 sc->jme_tq = NULL;
906 ether_ifdetach(ifp);
907 goto fail;
908 }
909
910 fail:
911 if (error != 0)
912 jme_detach(dev);
913
914 return (error);
915 }
916
917 static int
918 jme_detach(device_t dev)
919 {
920 struct jme_softc *sc;
921 struct ifnet *ifp;
922 int i;
923
924 sc = device_get_softc(dev);
925
926 ifp = sc->jme_ifp;
927 if (device_is_attached(dev)) {
928 JME_LOCK(sc);
929 sc->jme_flags |= JME_FLAG_DETACH;
930 jme_stop(sc);
931 JME_UNLOCK(sc);
932 callout_drain(&sc->jme_tick_ch);
933 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
934 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
935 /* Restore possibly modified station address. */
936 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
937 jme_set_macaddr(sc, sc->jme_eaddr);
938 ether_ifdetach(ifp);
939 }
940
941 if (sc->jme_tq != NULL) {
942 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
943 taskqueue_free(sc->jme_tq);
944 sc->jme_tq = NULL;
945 }
946
947 if (sc->jme_miibus != NULL) {
948 device_delete_child(dev, sc->jme_miibus);
949 sc->jme_miibus = NULL;
950 }
951 bus_generic_detach(dev);
952 jme_dma_free(sc);
953
954 if (ifp != NULL) {
955 if_free(ifp);
956 sc->jme_ifp = NULL;
957 }
958
959 for (i = 0; i < 1; i++) {
960 if (sc->jme_intrhand[i] != NULL) {
961 bus_teardown_intr(dev, sc->jme_irq[i],
962 sc->jme_intrhand[i]);
963 sc->jme_intrhand[i] = NULL;
964 }
965 }
966
967 if (sc->jme_irq[0] != NULL)
968 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
969 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
970 pci_release_msi(dev);
971 if (sc->jme_res[0] != NULL)
972 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
973 mtx_destroy(&sc->jme_mtx);
974
975 return (0);
976 }
977
978 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
979 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
980
981 static void
982 jme_sysctl_node(struct jme_softc *sc)
983 {
984 struct sysctl_ctx_list *ctx;
985 struct sysctl_oid_list *child, *parent;
986 struct sysctl_oid *tree;
987 struct jme_hw_stats *stats;
988 int error;
989
990 stats = &sc->jme_stats;
991 ctx = device_get_sysctl_ctx(sc->jme_dev);
992 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
993
994 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
995 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_to,
996 0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
997
998 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
999 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_pkt,
1000 0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
1001
1002 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
1003 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_to,
1004 0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
1005
1006 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
1007 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_pkt,
1008 0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
1009
1010 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1011 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1012 &sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I",
1013 "max number of Rx events to process");
1014
1015 /* Pull in device tunables. */
1016 sc->jme_process_limit = JME_PROC_DEFAULT;
1017 error = resource_int_value(device_get_name(sc->jme_dev),
1018 device_get_unit(sc->jme_dev), "process_limit",
1019 &sc->jme_process_limit);
1020 if (error == 0) {
1021 if (sc->jme_process_limit < JME_PROC_MIN ||
1022 sc->jme_process_limit > JME_PROC_MAX) {
1023 device_printf(sc->jme_dev,
1024 "process_limit value out of range; "
1025 "using default: %d\n", JME_PROC_DEFAULT);
1026 sc->jme_process_limit = JME_PROC_DEFAULT;
1027 }
1028 }
1029
1030 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1031 error = resource_int_value(device_get_name(sc->jme_dev),
1032 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
1033 if (error == 0) {
1034 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
1035 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
1036 device_printf(sc->jme_dev,
1037 "tx_coal_to value out of range; "
1038 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
1039 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1040 }
1041 }
1042
1043 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1044 error = resource_int_value(device_get_name(sc->jme_dev),
1045 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
1046 if (error == 0) {
1047 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
1048 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
1049 device_printf(sc->jme_dev,
1050 "tx_coal_pkt value out of range; "
1051 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
1052 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1053 }
1054 }
1055
1056 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1057 error = resource_int_value(device_get_name(sc->jme_dev),
1058 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
1059 if (error == 0) {
1060 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
1061 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
1062 device_printf(sc->jme_dev,
1063 "rx_coal_to value out of range; "
1064 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
1065 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1066 }
1067 }
1068
1069 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1070 error = resource_int_value(device_get_name(sc->jme_dev),
1071 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
1072 if (error == 0) {
1073 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
1074 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
1075 device_printf(sc->jme_dev,
1076 "tx_coal_pkt value out of range; "
1077 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
1078 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1079 }
1080 }
1081
1082 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1083 return;
1084
1085 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1086 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "JME statistics");
1087 parent = SYSCTL_CHILDREN(tree);
1088
1089 /* Rx statistics. */
1090 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
1091 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
1092 child = SYSCTL_CHILDREN(tree);
1093 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1094 &stats->rx_good_frames, "Good frames");
1095 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1096 &stats->rx_crc_errs, "CRC errors");
1097 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1098 &stats->rx_mii_errs, "MII errors");
1099 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1100 &stats->rx_fifo_oflows, "FIFO overflows");
1101 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1102 &stats->rx_desc_empty, "Descriptor empty");
1103 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1104 &stats->rx_bad_frames, "Bad frames");
1105
1106 /* Tx statistics. */
1107 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
1108 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
1109 child = SYSCTL_CHILDREN(tree);
1110 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1111 &stats->tx_good_frames, "Good frames");
1112 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1113 &stats->tx_bad_frames, "Bad frames");
1114 }
1115
1116 #undef JME_SYSCTL_STAT_ADD32
1117
1118 struct jme_dmamap_arg {
1119 bus_addr_t jme_busaddr;
1120 };
1121
1122 static void
1123 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1124 {
1125 struct jme_dmamap_arg *ctx;
1126
1127 if (error != 0)
1128 return;
1129
1130 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1131
1132 ctx = (struct jme_dmamap_arg *)arg;
1133 ctx->jme_busaddr = segs[0].ds_addr;
1134 }
1135
1136 static int
1137 jme_dma_alloc(struct jme_softc *sc)
1138 {
1139 struct jme_dmamap_arg ctx;
1140 struct jme_txdesc *txd;
1141 struct jme_rxdesc *rxd;
1142 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1143 int error, i;
1144
1145 lowaddr = BUS_SPACE_MAXADDR;
1146 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1147 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1148
1149 again:
1150 /* Create parent ring tag. */
1151 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1152 1, 0, /* algnmnt, boundary */
1153 lowaddr, /* lowaddr */
1154 BUS_SPACE_MAXADDR, /* highaddr */
1155 NULL, NULL, /* filter, filterarg */
1156 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1157 0, /* nsegments */
1158 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1159 0, /* flags */
1160 NULL, NULL, /* lockfunc, lockarg */
1161 &sc->jme_cdata.jme_ring_tag);
1162 if (error != 0) {
1163 device_printf(sc->jme_dev,
1164 "could not create parent ring DMA tag.\n");
1165 goto fail;
1166 }
1167 /* Create tag for Tx ring. */
1168 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1169 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1170 BUS_SPACE_MAXADDR, /* lowaddr */
1171 BUS_SPACE_MAXADDR, /* highaddr */
1172 NULL, NULL, /* filter, filterarg */
1173 JME_TX_RING_SIZE, /* maxsize */
1174 1, /* nsegments */
1175 JME_TX_RING_SIZE, /* maxsegsize */
1176 0, /* flags */
1177 NULL, NULL, /* lockfunc, lockarg */
1178 &sc->jme_cdata.jme_tx_ring_tag);
1179 if (error != 0) {
1180 device_printf(sc->jme_dev,
1181 "could not allocate Tx ring DMA tag.\n");
1182 goto fail;
1183 }
1184
1185 /* Create tag for Rx ring. */
1186 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1187 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1188 lowaddr, /* lowaddr */
1189 BUS_SPACE_MAXADDR, /* highaddr */
1190 NULL, NULL, /* filter, filterarg */
1191 JME_RX_RING_SIZE, /* maxsize */
1192 1, /* nsegments */
1193 JME_RX_RING_SIZE, /* maxsegsize */
1194 0, /* flags */
1195 NULL, NULL, /* lockfunc, lockarg */
1196 &sc->jme_cdata.jme_rx_ring_tag);
1197 if (error != 0) {
1198 device_printf(sc->jme_dev,
1199 "could not allocate Rx ring DMA tag.\n");
1200 goto fail;
1201 }
1202
1203 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1204 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1205 (void **)&sc->jme_rdata.jme_tx_ring,
1206 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1207 &sc->jme_cdata.jme_tx_ring_map);
1208 if (error != 0) {
1209 device_printf(sc->jme_dev,
1210 "could not allocate DMA'able memory for Tx ring.\n");
1211 goto fail;
1212 }
1213
1214 ctx.jme_busaddr = 0;
1215 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1216 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1217 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1218 if (error != 0 || ctx.jme_busaddr == 0) {
1219 device_printf(sc->jme_dev,
1220 "could not load DMA'able memory for Tx ring.\n");
1221 goto fail;
1222 }
1223 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1224
1225 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1226 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1227 (void **)&sc->jme_rdata.jme_rx_ring,
1228 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1229 &sc->jme_cdata.jme_rx_ring_map);
1230 if (error != 0) {
1231 device_printf(sc->jme_dev,
1232 "could not allocate DMA'able memory for Rx ring.\n");
1233 goto fail;
1234 }
1235
1236 ctx.jme_busaddr = 0;
1237 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1238 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1239 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1240 if (error != 0 || ctx.jme_busaddr == 0) {
1241 device_printf(sc->jme_dev,
1242 "could not load DMA'able memory for Rx ring.\n");
1243 goto fail;
1244 }
1245 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1246
1247 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1248 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1249 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1250 JME_TX_RING_SIZE;
1251 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1252 JME_RX_RING_SIZE;
1253 if ((JME_ADDR_HI(tx_ring_end) !=
1254 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1255 (JME_ADDR_HI(rx_ring_end) !=
1256 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1257 device_printf(sc->jme_dev, "4GB boundary crossed, "
1258 "switching to 32bit DMA address mode.\n");
1259 jme_dma_free(sc);
1260 /* Limit DMA address space to 32bit and try again. */
1261 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1262 goto again;
1263 }
1264 }
1265
1266 lowaddr = BUS_SPACE_MAXADDR;
1267 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1268 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1269 /* Create parent buffer tag. */
1270 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1271 1, 0, /* algnmnt, boundary */
1272 lowaddr, /* lowaddr */
1273 BUS_SPACE_MAXADDR, /* highaddr */
1274 NULL, NULL, /* filter, filterarg */
1275 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1276 0, /* nsegments */
1277 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1278 0, /* flags */
1279 NULL, NULL, /* lockfunc, lockarg */
1280 &sc->jme_cdata.jme_buffer_tag);
1281 if (error != 0) {
1282 device_printf(sc->jme_dev,
1283 "could not create parent buffer DMA tag.\n");
1284 goto fail;
1285 }
1286
1287 /* Create shadow status block tag. */
1288 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1289 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1290 BUS_SPACE_MAXADDR, /* lowaddr */
1291 BUS_SPACE_MAXADDR, /* highaddr */
1292 NULL, NULL, /* filter, filterarg */
1293 JME_SSB_SIZE, /* maxsize */
1294 1, /* nsegments */
1295 JME_SSB_SIZE, /* maxsegsize */
1296 0, /* flags */
1297 NULL, NULL, /* lockfunc, lockarg */
1298 &sc->jme_cdata.jme_ssb_tag);
1299 if (error != 0) {
1300 device_printf(sc->jme_dev,
1301 "could not create shared status block DMA tag.\n");
1302 goto fail;
1303 }
1304
1305 /* Create tag for Tx buffers. */
1306 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1307 1, 0, /* algnmnt, boundary */
1308 BUS_SPACE_MAXADDR, /* lowaddr */
1309 BUS_SPACE_MAXADDR, /* highaddr */
1310 NULL, NULL, /* filter, filterarg */
1311 JME_TSO_MAXSIZE, /* maxsize */
1312 JME_MAXTXSEGS, /* nsegments */
1313 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1314 0, /* flags */
1315 NULL, NULL, /* lockfunc, lockarg */
1316 &sc->jme_cdata.jme_tx_tag);
1317 if (error != 0) {
1318 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1319 goto fail;
1320 }
1321
1322 /* Create tag for Rx buffers. */
1323 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1324 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1325 BUS_SPACE_MAXADDR, /* lowaddr */
1326 BUS_SPACE_MAXADDR, /* highaddr */
1327 NULL, NULL, /* filter, filterarg */
1328 MCLBYTES, /* maxsize */
1329 1, /* nsegments */
1330 MCLBYTES, /* maxsegsize */
1331 0, /* flags */
1332 NULL, NULL, /* lockfunc, lockarg */
1333 &sc->jme_cdata.jme_rx_tag);
1334 if (error != 0) {
1335 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1336 goto fail;
1337 }
1338
1339 /*
1340 * Allocate DMA'able memory and load the DMA map for shared
1341 * status block.
1342 */
1343 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1344 (void **)&sc->jme_rdata.jme_ssb_block,
1345 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1346 &sc->jme_cdata.jme_ssb_map);
1347 if (error != 0) {
1348 device_printf(sc->jme_dev, "could not allocate DMA'able "
1349 "memory for shared status block.\n");
1350 goto fail;
1351 }
1352
1353 ctx.jme_busaddr = 0;
1354 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1355 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1356 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1357 if (error != 0 || ctx.jme_busaddr == 0) {
1358 device_printf(sc->jme_dev, "could not load DMA'able memory "
1359 "for shared status block.\n");
1360 goto fail;
1361 }
1362 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1363
1364 /* Create DMA maps for Tx buffers. */
1365 for (i = 0; i < JME_TX_RING_CNT; i++) {
1366 txd = &sc->jme_cdata.jme_txdesc[i];
1367 txd->tx_m = NULL;
1368 txd->tx_dmamap = NULL;
1369 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1370 &txd->tx_dmamap);
1371 if (error != 0) {
1372 device_printf(sc->jme_dev,
1373 "could not create Tx dmamap.\n");
1374 goto fail;
1375 }
1376 }
1377 /* Create DMA maps for Rx buffers. */
1378 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1379 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1380 device_printf(sc->jme_dev,
1381 "could not create spare Rx dmamap.\n");
1382 goto fail;
1383 }
1384 for (i = 0; i < JME_RX_RING_CNT; i++) {
1385 rxd = &sc->jme_cdata.jme_rxdesc[i];
1386 rxd->rx_m = NULL;
1387 rxd->rx_dmamap = NULL;
1388 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1389 &rxd->rx_dmamap);
1390 if (error != 0) {
1391 device_printf(sc->jme_dev,
1392 "could not create Rx dmamap.\n");
1393 goto fail;
1394 }
1395 }
1396
1397 fail:
1398 return (error);
1399 }
1400
1401 static void
1402 jme_dma_free(struct jme_softc *sc)
1403 {
1404 struct jme_txdesc *txd;
1405 struct jme_rxdesc *rxd;
1406 int i;
1407
1408 /* Tx ring */
1409 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1410 if (sc->jme_rdata.jme_tx_ring_paddr)
1411 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1412 sc->jme_cdata.jme_tx_ring_map);
1413 if (sc->jme_rdata.jme_tx_ring)
1414 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1415 sc->jme_rdata.jme_tx_ring,
1416 sc->jme_cdata.jme_tx_ring_map);
1417 sc->jme_rdata.jme_tx_ring = NULL;
1418 sc->jme_rdata.jme_tx_ring_paddr = 0;
1419 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1420 sc->jme_cdata.jme_tx_ring_tag = NULL;
1421 }
1422 /* Rx ring */
1423 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1424 if (sc->jme_rdata.jme_rx_ring_paddr)
1425 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1426 sc->jme_cdata.jme_rx_ring_map);
1427 if (sc->jme_rdata.jme_rx_ring)
1428 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1429 sc->jme_rdata.jme_rx_ring,
1430 sc->jme_cdata.jme_rx_ring_map);
1431 sc->jme_rdata.jme_rx_ring = NULL;
1432 sc->jme_rdata.jme_rx_ring_paddr = 0;
1433 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1434 sc->jme_cdata.jme_rx_ring_tag = NULL;
1435 }
1436 /* Tx buffers */
1437 if (sc->jme_cdata.jme_tx_tag != NULL) {
1438 for (i = 0; i < JME_TX_RING_CNT; i++) {
1439 txd = &sc->jme_cdata.jme_txdesc[i];
1440 if (txd->tx_dmamap != NULL) {
1441 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1442 txd->tx_dmamap);
1443 txd->tx_dmamap = NULL;
1444 }
1445 }
1446 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1447 sc->jme_cdata.jme_tx_tag = NULL;
1448 }
1449 /* Rx buffers */
1450 if (sc->jme_cdata.jme_rx_tag != NULL) {
1451 for (i = 0; i < JME_RX_RING_CNT; i++) {
1452 rxd = &sc->jme_cdata.jme_rxdesc[i];
1453 if (rxd->rx_dmamap != NULL) {
1454 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1455 rxd->rx_dmamap);
1456 rxd->rx_dmamap = NULL;
1457 }
1458 }
1459 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1460 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1461 sc->jme_cdata.jme_rx_sparemap);
1462 sc->jme_cdata.jme_rx_sparemap = NULL;
1463 }
1464 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1465 sc->jme_cdata.jme_rx_tag = NULL;
1466 }
1467
1468 /* Shared status block. */
1469 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1470 if (sc->jme_rdata.jme_ssb_block_paddr)
1471 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1472 sc->jme_cdata.jme_ssb_map);
1473 if (sc->jme_rdata.jme_ssb_block)
1474 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1475 sc->jme_rdata.jme_ssb_block,
1476 sc->jme_cdata.jme_ssb_map);
1477 sc->jme_rdata.jme_ssb_block = NULL;
1478 sc->jme_rdata.jme_ssb_block_paddr = 0;
1479 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1480 sc->jme_cdata.jme_ssb_tag = NULL;
1481 }
1482
1483 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1484 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1485 sc->jme_cdata.jme_buffer_tag = NULL;
1486 }
1487 if (sc->jme_cdata.jme_ring_tag != NULL) {
1488 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1489 sc->jme_cdata.jme_ring_tag = NULL;
1490 }
1491 }
1492
1493 /*
1494 * Make sure the interface is stopped at reboot time.
1495 */
1496 static int
1497 jme_shutdown(device_t dev)
1498 {
1499
1500 return (jme_suspend(dev));
1501 }
1502
1503 /*
1504 * Unlike other ethernet controllers, JMC250 requires
1505 * explicit resetting link speed to 10/100Mbps as gigabit
1506 * link will cunsume more power than 375mA.
1507 * Note, we reset the link speed to 10/100Mbps with
1508 * auto-negotiation but we don't know whether that operation
1509 * would succeed or not as we have no control after powering
1510 * off. If the renegotiation fail WOL may not work. Running
1511 * at 1Gbps draws more power than 375mA at 3.3V which is
1512 * specified in PCI specification and that would result in
1513 * complete shutdowning power to ethernet controller.
1514 *
1515 * TODO
1516 * Save current negotiated media speed/duplex/flow-control
1517 * to softc and restore the same link again after resuming.
1518 * PHY handling such as power down/resetting to 100Mbps
1519 * may be better handled in suspend method in phy driver.
1520 */
1521 static void
1522 jme_setlinkspeed(struct jme_softc *sc)
1523 {
1524 struct mii_data *mii;
1525 int aneg, i;
1526
1527 JME_LOCK_ASSERT(sc);
1528
1529 mii = device_get_softc(sc->jme_miibus);
1530 mii_pollstat(mii);
1531 aneg = 0;
1532 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1533 switch IFM_SUBTYPE(mii->mii_media_active) {
1534 case IFM_10_T:
1535 case IFM_100_TX:
1536 return;
1537 case IFM_1000_T:
1538 aneg++;
1539 default:
1540 break;
1541 }
1542 }
1543 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1544 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1545 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1546 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1547 BMCR_AUTOEN | BMCR_STARTNEG);
1548 DELAY(1000);
1549 if (aneg != 0) {
1550 /* Poll link state until jme(4) get a 10/100 link. */
1551 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1552 mii_pollstat(mii);
1553 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1554 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1555 case IFM_10_T:
1556 case IFM_100_TX:
1557 jme_mac_config(sc);
1558 return;
1559 default:
1560 break;
1561 }
1562 }
1563 JME_UNLOCK(sc);
1564 pause("jmelnk", hz);
1565 JME_LOCK(sc);
1566 }
1567 if (i == MII_ANEGTICKS_GIGE)
1568 device_printf(sc->jme_dev, "establishing link failed, "
1569 "WOL may not work!");
1570 }
1571 /*
1572 * No link, force MAC to have 100Mbps, full-duplex link.
1573 * This is the last resort and may/may not work.
1574 */
1575 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1576 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1577 jme_mac_config(sc);
1578 }
1579
1580 static void
1581 jme_setwol(struct jme_softc *sc)
1582 {
1583 struct ifnet *ifp;
1584 uint32_t gpr, pmcs;
1585 uint16_t pmstat;
1586 int pmc;
1587
1588 JME_LOCK_ASSERT(sc);
1589
1590 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1591 /* Remove Tx MAC/offload clock to save more power. */
1592 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1593 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1594 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1595 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1596 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
1597 CSR_WRITE_4(sc, JME_GPREG1,
1598 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
1599 /* No PME capability, PHY power down. */
1600 jme_phy_down(sc);
1601 return;
1602 }
1603
1604 ifp = sc->jme_ifp;
1605 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1606 pmcs = CSR_READ_4(sc, JME_PMCS);
1607 pmcs &= ~PMCS_WOL_ENB_MASK;
1608 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1609 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1610 /* Enable PME message. */
1611 gpr |= GPREG0_PME_ENB;
1612 /* For gigabit controllers, reset link speed to 10/100. */
1613 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1614 jme_setlinkspeed(sc);
1615 }
1616
1617 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1618 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1619 /* Remove Tx MAC/offload clock to save more power. */
1620 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1621 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1622 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1623 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1624 /* Request PME. */
1625 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1626 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1627 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1628 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1629 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1630 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1631 /* No WOL, PHY power down. */
1632 jme_phy_down(sc);
1633 }
1634 }
1635
1636 static int
1637 jme_suspend(device_t dev)
1638 {
1639 struct jme_softc *sc;
1640
1641 sc = device_get_softc(dev);
1642
1643 JME_LOCK(sc);
1644 jme_stop(sc);
1645 jme_setwol(sc);
1646 JME_UNLOCK(sc);
1647
1648 return (0);
1649 }
1650
1651 static int
1652 jme_resume(device_t dev)
1653 {
1654 struct jme_softc *sc;
1655 struct ifnet *ifp;
1656 uint16_t pmstat;
1657 int pmc;
1658
1659 sc = device_get_softc(dev);
1660
1661 JME_LOCK(sc);
1662 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
1663 pmstat = pci_read_config(sc->jme_dev,
1664 pmc + PCIR_POWER_STATUS, 2);
1665 /* Disable PME clear PME status. */
1666 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1667 pci_write_config(sc->jme_dev,
1668 pmc + PCIR_POWER_STATUS, pmstat, 2);
1669 }
1670 /* Wakeup PHY. */
1671 jme_phy_up(sc);
1672 ifp = sc->jme_ifp;
1673 if ((ifp->if_flags & IFF_UP) != 0) {
1674 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1675 jme_init_locked(sc);
1676 }
1677
1678 JME_UNLOCK(sc);
1679
1680 return (0);
1681 }
1682
1683 static int
1684 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1685 {
1686 struct jme_txdesc *txd;
1687 struct jme_desc *desc;
1688 struct mbuf *m;
1689 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1690 int error, i, nsegs, prod;
1691 uint32_t cflags, tsosegsz;
1692
1693 JME_LOCK_ASSERT(sc);
1694
1695 M_ASSERTPKTHDR((*m_head));
1696
1697 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1698 /*
1699 * Due to the adherence to NDIS specification JMC250
1700 * assumes upper stack computed TCP pseudo checksum
1701 * without including payload length. This breaks
1702 * checksum offload for TSO case so recompute TCP
1703 * pseudo checksum for JMC250. Hopefully this wouldn't
1704 * be much burden on modern CPUs.
1705 */
1706 struct ether_header *eh;
1707 struct ip *ip;
1708 struct tcphdr *tcp;
1709 uint32_t ip_off, poff;
1710
1711 if (M_WRITABLE(*m_head) == 0) {
1712 /* Get a writable copy. */
1713 m = m_dup(*m_head, M_NOWAIT);
1714 m_freem(*m_head);
1715 if (m == NULL) {
1716 *m_head = NULL;
1717 return (ENOBUFS);
1718 }
1719 *m_head = m;
1720 }
1721 ip_off = sizeof(struct ether_header);
1722 m = m_pullup(*m_head, ip_off);
1723 if (m == NULL) {
1724 *m_head = NULL;
1725 return (ENOBUFS);
1726 }
1727 eh = mtod(m, struct ether_header *);
1728 /* Check the existence of VLAN tag. */
1729 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1730 ip_off = sizeof(struct ether_vlan_header);
1731 m = m_pullup(m, ip_off);
1732 if (m == NULL) {
1733 *m_head = NULL;
1734 return (ENOBUFS);
1735 }
1736 }
1737 m = m_pullup(m, ip_off + sizeof(struct ip));
1738 if (m == NULL) {
1739 *m_head = NULL;
1740 return (ENOBUFS);
1741 }
1742 ip = (struct ip *)(mtod(m, char *) + ip_off);
1743 poff = ip_off + (ip->ip_hl << 2);
1744 m = m_pullup(m, poff + sizeof(struct tcphdr));
1745 if (m == NULL) {
1746 *m_head = NULL;
1747 return (ENOBUFS);
1748 }
1749 /*
1750 * Reset IP checksum and recompute TCP pseudo
1751 * checksum that NDIS specification requires.
1752 */
1753 ip = (struct ip *)(mtod(m, char *) + ip_off);
1754 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1755 ip->ip_sum = 0;
1756 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1757 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1758 ip->ip_dst.s_addr,
1759 htons((tcp->th_off << 2) + IPPROTO_TCP));
1760 /* No need to TSO, force IP checksum offload. */
1761 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1762 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1763 } else
1764 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1765 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1766 *m_head = m;
1767 }
1768
1769 prod = sc->jme_cdata.jme_tx_prod;
1770 txd = &sc->jme_cdata.jme_txdesc[prod];
1771
1772 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1773 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1774 if (error == EFBIG) {
1775 m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
1776 if (m == NULL) {
1777 m_freem(*m_head);
1778 *m_head = NULL;
1779 return (ENOMEM);
1780 }
1781 *m_head = m;
1782 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1783 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1784 if (error != 0) {
1785 m_freem(*m_head);
1786 *m_head = NULL;
1787 return (error);
1788 }
1789 } else if (error != 0)
1790 return (error);
1791 if (nsegs == 0) {
1792 m_freem(*m_head);
1793 *m_head = NULL;
1794 return (EIO);
1795 }
1796
1797 /*
1798 * Check descriptor overrun. Leave one free descriptor.
1799 * Since we always use 64bit address mode for transmitting,
1800 * each Tx request requires one more dummy descriptor.
1801 */
1802 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1803 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1804 return (ENOBUFS);
1805 }
1806
1807 m = *m_head;
1808 cflags = 0;
1809 tsosegsz = 0;
1810 /* Configure checksum offload and TSO. */
1811 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1812 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1813 JME_TD_MSS_SHIFT;
1814 cflags |= JME_TD_TSO;
1815 } else {
1816 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1817 cflags |= JME_TD_IPCSUM;
1818 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1819 cflags |= JME_TD_TCPCSUM;
1820 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1821 cflags |= JME_TD_UDPCSUM;
1822 }
1823 /* Configure VLAN. */
1824 if ((m->m_flags & M_VLANTAG) != 0) {
1825 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1826 cflags |= JME_TD_VLAN_TAG;
1827 }
1828
1829 desc = &sc->jme_rdata.jme_tx_ring[prod];
1830 desc->flags = htole32(cflags);
1831 desc->buflen = htole32(tsosegsz);
1832 desc->addr_hi = htole32(m->m_pkthdr.len);
1833 desc->addr_lo = 0;
1834 sc->jme_cdata.jme_tx_cnt++;
1835 JME_DESC_INC(prod, JME_TX_RING_CNT);
1836 for (i = 0; i < nsegs; i++) {
1837 desc = &sc->jme_rdata.jme_tx_ring[prod];
1838 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1839 desc->buflen = htole32(txsegs[i].ds_len);
1840 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1841 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1842 sc->jme_cdata.jme_tx_cnt++;
1843 JME_DESC_INC(prod, JME_TX_RING_CNT);
1844 }
1845
1846 /* Update producer index. */
1847 sc->jme_cdata.jme_tx_prod = prod;
1848 /*
1849 * Finally request interrupt and give the first descriptor
1850 * owenership to hardware.
1851 */
1852 desc = txd->tx_desc;
1853 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1854
1855 txd->tx_m = m;
1856 txd->tx_ndesc = nsegs + 1;
1857
1858 /* Sync descriptors. */
1859 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1860 BUS_DMASYNC_PREWRITE);
1861 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1862 sc->jme_cdata.jme_tx_ring_map,
1863 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1864
1865 return (0);
1866 }
1867
1868 static void
1869 jme_start(struct ifnet *ifp)
1870 {
1871 struct jme_softc *sc;
1872
1873 sc = ifp->if_softc;
1874 JME_LOCK(sc);
1875 jme_start_locked(ifp);
1876 JME_UNLOCK(sc);
1877 }
1878
1879 static void
1880 jme_start_locked(struct ifnet *ifp)
1881 {
1882 struct jme_softc *sc;
1883 struct mbuf *m_head;
1884 int enq;
1885
1886 sc = ifp->if_softc;
1887
1888 JME_LOCK_ASSERT(sc);
1889
1890 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1891 jme_txeof(sc);
1892
1893 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1894 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
1895 return;
1896
1897 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1898 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1899 if (m_head == NULL)
1900 break;
1901 /*
1902 * Pack the data into the transmit ring. If we
1903 * don't have room, set the OACTIVE flag and wait
1904 * for the NIC to drain the ring.
1905 */
1906 if (jme_encap(sc, &m_head)) {
1907 if (m_head == NULL)
1908 break;
1909 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1910 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1911 break;
1912 }
1913
1914 enq++;
1915 /*
1916 * If there's a BPF listener, bounce a copy of this frame
1917 * to him.
1918 */
1919 ETHER_BPF_MTAP(ifp, m_head);
1920 }
1921
1922 if (enq > 0) {
1923 /*
1924 * Reading TXCSR takes very long time under heavy load
1925 * so cache TXCSR value and writes the ORed value with
1926 * the kick command to the TXCSR. This saves one register
1927 * access cycle.
1928 */
1929 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1930 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1931 /* Set a timeout in case the chip goes out to lunch. */
1932 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1933 }
1934 }
1935
1936 static void
1937 jme_watchdog(struct jme_softc *sc)
1938 {
1939 struct ifnet *ifp;
1940
1941 JME_LOCK_ASSERT(sc);
1942
1943 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1944 return;
1945
1946 ifp = sc->jme_ifp;
1947 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1948 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1949 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1950 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1951 jme_init_locked(sc);
1952 return;
1953 }
1954 jme_txeof(sc);
1955 if (sc->jme_cdata.jme_tx_cnt == 0) {
1956 if_printf(sc->jme_ifp,
1957 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1958 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1959 jme_start_locked(ifp);
1960 return;
1961 }
1962
1963 if_printf(sc->jme_ifp, "watchdog timeout\n");
1964 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1965 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1966 jme_init_locked(sc);
1967 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1968 jme_start_locked(ifp);
1969 }
1970
1971 static int
1972 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1973 {
1974 struct jme_softc *sc;
1975 struct ifreq *ifr;
1976 struct mii_data *mii;
1977 uint32_t reg;
1978 int error, mask;
1979
1980 sc = ifp->if_softc;
1981 ifr = (struct ifreq *)data;
1982 error = 0;
1983 switch (cmd) {
1984 case SIOCSIFMTU:
1985 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1986 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1987 ifr->ifr_mtu > JME_MAX_MTU)) {
1988 error = EINVAL;
1989 break;
1990 }
1991
1992 if (ifp->if_mtu != ifr->ifr_mtu) {
1993 /*
1994 * No special configuration is required when interface
1995 * MTU is changed but availability of TSO/Tx checksum
1996 * offload should be chcked against new MTU size as
1997 * FIFO size is just 2K.
1998 */
1999 JME_LOCK(sc);
2000 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
2001 ifp->if_capenable &=
2002 ~(IFCAP_TXCSUM | IFCAP_TSO4);
2003 ifp->if_hwassist &=
2004 ~(JME_CSUM_FEATURES | CSUM_TSO);
2005 VLAN_CAPABILITIES(ifp);
2006 }
2007 ifp->if_mtu = ifr->ifr_mtu;
2008 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2009 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2010 jme_init_locked(sc);
2011 }
2012 JME_UNLOCK(sc);
2013 }
2014 break;
2015 case SIOCSIFFLAGS:
2016 JME_LOCK(sc);
2017 if ((ifp->if_flags & IFF_UP) != 0) {
2018 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2019 if (((ifp->if_flags ^ sc->jme_if_flags)
2020 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2021 jme_set_filter(sc);
2022 } else {
2023 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
2024 jme_init_locked(sc);
2025 }
2026 } else {
2027 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2028 jme_stop(sc);
2029 }
2030 sc->jme_if_flags = ifp->if_flags;
2031 JME_UNLOCK(sc);
2032 break;
2033 case SIOCADDMULTI:
2034 case SIOCDELMULTI:
2035 JME_LOCK(sc);
2036 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2037 jme_set_filter(sc);
2038 JME_UNLOCK(sc);
2039 break;
2040 case SIOCSIFMEDIA:
2041 case SIOCGIFMEDIA:
2042 mii = device_get_softc(sc->jme_miibus);
2043 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2044 break;
2045 case SIOCSIFCAP:
2046 JME_LOCK(sc);
2047 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2048 if ((mask & IFCAP_TXCSUM) != 0 &&
2049 ifp->if_mtu < JME_TX_FIFO_SIZE) {
2050 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2051 ifp->if_capenable ^= IFCAP_TXCSUM;
2052 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2053 ifp->if_hwassist |= JME_CSUM_FEATURES;
2054 else
2055 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
2056 }
2057 }
2058 if ((mask & IFCAP_RXCSUM) != 0 &&
2059 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
2060 ifp->if_capenable ^= IFCAP_RXCSUM;
2061 reg = CSR_READ_4(sc, JME_RXMAC);
2062 reg &= ~RXMAC_CSUM_ENB;
2063 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2064 reg |= RXMAC_CSUM_ENB;
2065 CSR_WRITE_4(sc, JME_RXMAC, reg);
2066 }
2067 if ((mask & IFCAP_TSO4) != 0 &&
2068 ifp->if_mtu < JME_TX_FIFO_SIZE) {
2069 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
2070 ifp->if_capenable ^= IFCAP_TSO4;
2071 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
2072 ifp->if_hwassist |= CSUM_TSO;
2073 else
2074 ifp->if_hwassist &= ~CSUM_TSO;
2075 }
2076 }
2077 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2078 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
2079 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2080 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2081 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2082 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2083 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2084 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2085 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2086 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2087 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2088 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2089 jme_set_vlan(sc);
2090 }
2091 JME_UNLOCK(sc);
2092 VLAN_CAPABILITIES(ifp);
2093 break;
2094 default:
2095 error = ether_ioctl(ifp, cmd, data);
2096 break;
2097 }
2098
2099 return (error);
2100 }
2101
2102 static void
2103 jme_mac_config(struct jme_softc *sc)
2104 {
2105 struct mii_data *mii;
2106 uint32_t ghc, gpreg, rxmac, txmac, txpause;
2107 uint32_t txclk;
2108
2109 JME_LOCK_ASSERT(sc);
2110
2111 mii = device_get_softc(sc->jme_miibus);
2112
2113 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2114 DELAY(10);
2115 CSR_WRITE_4(sc, JME_GHC, 0);
2116 ghc = 0;
2117 txclk = 0;
2118 rxmac = CSR_READ_4(sc, JME_RXMAC);
2119 rxmac &= ~RXMAC_FC_ENB;
2120 txmac = CSR_READ_4(sc, JME_TXMAC);
2121 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2122 txpause = CSR_READ_4(sc, JME_TXPFC);
2123 txpause &= ~TXPFC_PAUSE_ENB;
2124 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2125 ghc |= GHC_FULL_DUPLEX;
2126 rxmac &= ~RXMAC_COLL_DET_ENB;
2127 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2128 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2129 TXMAC_FRAME_BURST);
2130 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2131 txpause |= TXPFC_PAUSE_ENB;
2132 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2133 rxmac |= RXMAC_FC_ENB;
2134 /* Disable retry transmit timer/retry limit. */
2135 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2136 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2137 } else {
2138 rxmac |= RXMAC_COLL_DET_ENB;
2139 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2140 /* Enable retry transmit timer/retry limit. */
2141 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2142 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2143 }
2144 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2145 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2146 case IFM_10_T:
2147 ghc |= GHC_SPEED_10;
2148 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2149 break;
2150 case IFM_100_TX:
2151 ghc |= GHC_SPEED_100;
2152 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2153 break;
2154 case IFM_1000_T:
2155 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2156 break;
2157 ghc |= GHC_SPEED_1000;
2158 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2159 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2160 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2161 break;
2162 default:
2163 break;
2164 }
2165 if (sc->jme_rev == DEVICEID_JMC250 &&
2166 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2167 /*
2168 * Workaround occasional packet loss issue of JMC250 A2
2169 * when it runs on half-duplex media.
2170 */
2171 gpreg = CSR_READ_4(sc, JME_GPREG1);
2172 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2173 gpreg &= ~GPREG1_HDPX_FIX;
2174 else
2175 gpreg |= GPREG1_HDPX_FIX;
2176 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2177 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
2178 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2179 /* Extend interface FIFO depth. */
2180 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2181 0x1B, 0x0000);
2182 } else {
2183 /* Select default interface FIFO depth. */
2184 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2185 0x1B, 0x0004);
2186 }
2187 }
2188 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2189 ghc |= txclk;
2190 CSR_WRITE_4(sc, JME_GHC, ghc);
2191 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2192 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2193 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2194 }
2195
2196 static void
2197 jme_link_task(void *arg, int pending)
2198 {
2199 struct jme_softc *sc;
2200 struct mii_data *mii;
2201 struct ifnet *ifp;
2202 struct jme_txdesc *txd;
2203 bus_addr_t paddr;
2204 int i;
2205
2206 sc = (struct jme_softc *)arg;
2207
2208 JME_LOCK(sc);
2209 mii = device_get_softc(sc->jme_miibus);
2210 ifp = sc->jme_ifp;
2211 if (mii == NULL || ifp == NULL ||
2212 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2213 JME_UNLOCK(sc);
2214 return;
2215 }
2216
2217 sc->jme_flags &= ~JME_FLAG_LINK;
2218 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2219 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2220 case IFM_10_T:
2221 case IFM_100_TX:
2222 sc->jme_flags |= JME_FLAG_LINK;
2223 break;
2224 case IFM_1000_T:
2225 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2226 break;
2227 sc->jme_flags |= JME_FLAG_LINK;
2228 break;
2229 default:
2230 break;
2231 }
2232 }
2233
2234 /*
2235 * Disabling Rx/Tx MACs have a side-effect of resetting
2236 * JME_TXNDA/JME_RXNDA register to the first address of
2237 * Tx/Rx descriptor address. So driver should reset its
2238 * internal procucer/consumer pointer and reclaim any
2239 * allocated resources. Note, just saving the value of
2240 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2241 * and restoring JME_TXNDA/JME_RXNDA register is not
2242 * sufficient to make sure correct MAC state because
2243 * stopping MAC operation can take a while and hardware
2244 * might have updated JME_TXNDA/JME_RXNDA registers
2245 * during the stop operation.
2246 */
2247 /* Block execution of task. */
2248 taskqueue_block(sc->jme_tq);
2249 /* Disable interrupts and stop driver. */
2250 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2251 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2252 callout_stop(&sc->jme_tick_ch);
2253 sc->jme_watchdog_timer = 0;
2254
2255 /* Stop receiver/transmitter. */
2256 jme_stop_rx(sc);
2257 jme_stop_tx(sc);
2258
2259 /* XXX Drain all queued tasks. */
2260 JME_UNLOCK(sc);
2261 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2262 JME_LOCK(sc);
2263
2264 if (sc->jme_cdata.jme_rxhead != NULL)
2265 m_freem(sc->jme_cdata.jme_rxhead);
2266 JME_RXCHAIN_RESET(sc);
2267 jme_txeof(sc);
2268 if (sc->jme_cdata.jme_tx_cnt != 0) {
2269 /* Remove queued packets for transmit. */
2270 for (i = 0; i < JME_TX_RING_CNT; i++) {
2271 txd = &sc->jme_cdata.jme_txdesc[i];
2272 if (txd->tx_m != NULL) {
2273 bus_dmamap_sync(
2274 sc->jme_cdata.jme_tx_tag,
2275 txd->tx_dmamap,
2276 BUS_DMASYNC_POSTWRITE);
2277 bus_dmamap_unload(
2278 sc->jme_cdata.jme_tx_tag,
2279 txd->tx_dmamap);
2280 m_freem(txd->tx_m);
2281 txd->tx_m = NULL;
2282 txd->tx_ndesc = 0;
2283 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2284 }
2285 }
2286 }
2287
2288 /*
2289 * Reuse configured Rx descriptors and reset
2290 * producer/consumer index.
2291 */
2292 sc->jme_cdata.jme_rx_cons = 0;
2293 sc->jme_morework = 0;
2294 jme_init_tx_ring(sc);
2295 /* Initialize shadow status block. */
2296 jme_init_ssb(sc);
2297
2298 /* Program MAC with resolved speed/duplex/flow-control. */
2299 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2300 jme_mac_config(sc);
2301 jme_stats_clear(sc);
2302
2303 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2304 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2305
2306 /* Set Tx ring address to the hardware. */
2307 paddr = JME_TX_RING_ADDR(sc, 0);
2308 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2309 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2310
2311 /* Set Rx ring address to the hardware. */
2312 paddr = JME_RX_RING_ADDR(sc, 0);
2313 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2314 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2315
2316 /* Restart receiver/transmitter. */
2317 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2318 RXCSR_RXQ_START);
2319 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2320 /* Lastly enable TX/RX clock. */
2321 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2322 CSR_WRITE_4(sc, JME_GHC,
2323 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
2324 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
2325 CSR_WRITE_4(sc, JME_GPREG1,
2326 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
2327 }
2328
2329 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2330 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2331 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2332 /* Unblock execution of task. */
2333 taskqueue_unblock(sc->jme_tq);
2334 /* Reenable interrupts. */
2335 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2336
2337 JME_UNLOCK(sc);
2338 }
2339
2340 static int
2341 jme_intr(void *arg)
2342 {
2343 struct jme_softc *sc;
2344 uint32_t status;
2345
2346 sc = (struct jme_softc *)arg;
2347
2348 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2349 if (status == 0 || status == 0xFFFFFFFF)
2350 return (FILTER_STRAY);
2351 /* Disable interrupts. */
2352 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2353 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2354
2355 return (FILTER_HANDLED);
2356 }
2357
2358 static void
2359 jme_int_task(void *arg, int pending)
2360 {
2361 struct jme_softc *sc;
2362 struct ifnet *ifp;
2363 uint32_t status;
2364 int more;
2365
2366 sc = (struct jme_softc *)arg;
2367 ifp = sc->jme_ifp;
2368
2369 JME_LOCK(sc);
2370 status = CSR_READ_4(sc, JME_INTR_STATUS);
2371 if (sc->jme_morework != 0) {
2372 sc->jme_morework = 0;
2373 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2374 }
2375 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2376 goto done;
2377 /* Reset PCC counter/timer and Ack interrupts. */
2378 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2379 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2380 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2381 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2382 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2383 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2384 more = 0;
2385 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2386 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2387 more = jme_rxintr(sc, sc->jme_process_limit);
2388 if (more != 0)
2389 sc->jme_morework = 1;
2390 }
2391 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2392 /*
2393 * Notify hardware availability of new Rx
2394 * buffers.
2395 * Reading RXCSR takes very long time under
2396 * heavy load so cache RXCSR value and writes
2397 * the ORed value with the kick command to
2398 * the RXCSR. This saves one register access
2399 * cycle.
2400 */
2401 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2402 RXCSR_RX_ENB | RXCSR_RXQ_START);
2403 }
2404 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2405 jme_start_locked(ifp);
2406 }
2407
2408 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2409 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2410 JME_UNLOCK(sc);
2411 return;
2412 }
2413 done:
2414 JME_UNLOCK(sc);
2415
2416 /* Reenable interrupts. */
2417 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2418 }
2419
2420 static void
2421 jme_txeof(struct jme_softc *sc)
2422 {
2423 struct ifnet *ifp;
2424 struct jme_txdesc *txd;
2425 uint32_t status;
2426 int cons, nsegs;
2427
2428 JME_LOCK_ASSERT(sc);
2429
2430 ifp = sc->jme_ifp;
2431
2432 cons = sc->jme_cdata.jme_tx_cons;
2433 if (cons == sc->jme_cdata.jme_tx_prod)
2434 return;
2435
2436 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2437 sc->jme_cdata.jme_tx_ring_map,
2438 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2439
2440 /*
2441 * Go through our Tx list and free mbufs for those
2442 * frames which have been transmitted.
2443 */
2444 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2445 txd = &sc->jme_cdata.jme_txdesc[cons];
2446 status = le32toh(txd->tx_desc->flags);
2447 if ((status & JME_TD_OWN) == JME_TD_OWN)
2448 break;
2449
2450 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2451 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2452 else {
2453 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2454 if ((status & JME_TD_COLLISION) != 0)
2455 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2456 le32toh(txd->tx_desc->buflen) &
2457 JME_TD_BUF_LEN_MASK);
2458 }
2459 /*
2460 * Only the first descriptor of multi-descriptor
2461 * transmission is updated so driver have to skip entire
2462 * chained buffers for the transmiited frame. In other
2463 * words, JME_TD_OWN bit is valid only at the first
2464 * descriptor of a multi-descriptor transmission.
2465 */
2466 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2467 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2468 JME_DESC_INC(cons, JME_TX_RING_CNT);
2469 }
2470
2471 /* Reclaim transferred mbufs. */
2472 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2473 BUS_DMASYNC_POSTWRITE);
2474 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2475
2476 KASSERT(txd->tx_m != NULL,
2477 ("%s: freeing NULL mbuf!\n", __func__));
2478 m_freem(txd->tx_m);
2479 txd->tx_m = NULL;
2480 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2481 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2482 ("%s: Active Tx desc counter was garbled\n", __func__));
2483 txd->tx_ndesc = 0;
2484 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2485 }
2486 sc->jme_cdata.jme_tx_cons = cons;
2487 /* Unarm watchdog timer when there is no pending descriptors in queue. */
2488 if (sc->jme_cdata.jme_tx_cnt == 0)
2489 sc->jme_watchdog_timer = 0;
2490
2491 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2492 sc->jme_cdata.jme_tx_ring_map,
2493 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2494 }
2495
2496 static __inline void
2497 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2498 {
2499 struct jme_desc *desc;
2500
2501 desc = &sc->jme_rdata.jme_rx_ring[cons];
2502 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2503 desc->buflen = htole32(MCLBYTES);
2504 }
2505
2506 /* Receive a frame. */
2507 static void
2508 jme_rxeof(struct jme_softc *sc)
2509 {
2510 struct ifnet *ifp;
2511 struct jme_desc *desc;
2512 struct jme_rxdesc *rxd;
2513 struct mbuf *mp, *m;
2514 uint32_t flags, status;
2515 int cons, count, nsegs;
2516
2517 JME_LOCK_ASSERT(sc);
2518
2519 ifp = sc->jme_ifp;
2520
2521 cons = sc->jme_cdata.jme_rx_cons;
2522 desc = &sc->jme_rdata.jme_rx_ring[cons];
2523 flags = le32toh(desc->flags);
2524 status = le32toh(desc->buflen);
2525 nsegs = JME_RX_NSEGS(status);
2526 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2527 if ((status & JME_RX_ERR_STAT) != 0) {
2528 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2529 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2530 #ifdef JME_SHOW_ERRORS
2531 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2532 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2533 #endif
2534 sc->jme_cdata.jme_rx_cons += nsegs;
2535 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2536 return;
2537 }
2538
2539 for (count = 0; count < nsegs; count++,
2540 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2541 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2542 mp = rxd->rx_m;
2543 /* Add a new receive buffer to the ring. */
2544 if (jme_newbuf(sc, rxd) != 0) {
2545 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2546 /* Reuse buffer. */
2547 for (; count < nsegs; count++) {
2548 jme_discard_rxbuf(sc, cons);
2549 JME_DESC_INC(cons, JME_RX_RING_CNT);
2550 }
2551 if (sc->jme_cdata.jme_rxhead != NULL) {
2552 m_freem(sc->jme_cdata.jme_rxhead);
2553 JME_RXCHAIN_RESET(sc);
2554 }
2555 break;
2556 }
2557
2558 /*
2559 * Assume we've received a full sized frame.
2560 * Actual size is fixed when we encounter the end of
2561 * multi-segmented frame.
2562 */
2563 mp->m_len = MCLBYTES;
2564
2565 /* Chain received mbufs. */
2566 if (sc->jme_cdata.jme_rxhead == NULL) {
2567 sc->jme_cdata.jme_rxhead = mp;
2568 sc->jme_cdata.jme_rxtail = mp;
2569 } else {
2570 /*
2571 * Receive processor can receive a maximum frame
2572 * size of 65535 bytes.
2573 */
2574 mp->m_flags &= ~M_PKTHDR;
2575 sc->jme_cdata.jme_rxtail->m_next = mp;
2576 sc->jme_cdata.jme_rxtail = mp;
2577 }
2578
2579 if (count == nsegs - 1) {
2580 /* Last desc. for this frame. */
2581 m = sc->jme_cdata.jme_rxhead;
2582 m->m_flags |= M_PKTHDR;
2583 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2584 if (nsegs > 1) {
2585 /* Set first mbuf size. */
2586 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2587 /* Set last mbuf size. */
2588 mp->m_len = sc->jme_cdata.jme_rxlen -
2589 ((MCLBYTES - JME_RX_PAD_BYTES) +
2590 (MCLBYTES * (nsegs - 2)));
2591 } else
2592 m->m_len = sc->jme_cdata.jme_rxlen;
2593 m->m_pkthdr.rcvif = ifp;
2594
2595 /*
2596 * Account for 10bytes auto padding which is used
2597 * to align IP header on 32bit boundary. Also note,
2598 * CRC bytes is automatically removed by the
2599 * hardware.
2600 */
2601 m->m_data += JME_RX_PAD_BYTES;
2602
2603 /* Set checksum information. */
2604 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2605 (flags & JME_RD_IPV4) != 0) {
2606 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2607 if ((flags & JME_RD_IPCSUM) != 0)
2608 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2609 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2610 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2611 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2612 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2613 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2614 m->m_pkthdr.csum_flags |=
2615 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2616 m->m_pkthdr.csum_data = 0xffff;
2617 }
2618 }
2619
2620 /* Check for VLAN tagged packets. */
2621 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2622 (flags & JME_RD_VLAN_TAG) != 0) {
2623 m->m_pkthdr.ether_vtag =
2624 flags & JME_RD_VLAN_MASK;
2625 m->m_flags |= M_VLANTAG;
2626 }
2627
2628 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2629 /* Pass it on. */
2630 JME_UNLOCK(sc);
2631 (*ifp->if_input)(ifp, m);
2632 JME_LOCK(sc);
2633
2634 /* Reset mbuf chains. */
2635 JME_RXCHAIN_RESET(sc);
2636 }
2637 }
2638
2639 sc->jme_cdata.jme_rx_cons += nsegs;
2640 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2641 }
2642
2643 static int
2644 jme_rxintr(struct jme_softc *sc, int count)
2645 {
2646 struct jme_desc *desc;
2647 int nsegs, prog, pktlen;
2648
2649 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2650 sc->jme_cdata.jme_rx_ring_map,
2651 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2652
2653 for (prog = 0; count > 0; prog++) {
2654 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2655 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2656 break;
2657 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2658 break;
2659 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2660 /*
2661 * Check number of segments against received bytes.
2662 * Non-matching value would indicate that hardware
2663 * is still trying to update Rx descriptors. I'm not
2664 * sure whether this check is needed.
2665 */
2666 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2667 if (nsegs != howmany(pktlen, MCLBYTES))
2668 break;
2669 prog++;
2670 /* Received a frame. */
2671 jme_rxeof(sc);
2672 count -= nsegs;
2673 }
2674
2675 if (prog > 0)
2676 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2677 sc->jme_cdata.jme_rx_ring_map,
2678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2679
2680 return (count > 0 ? 0 : EAGAIN);
2681 }
2682
2683 static void
2684 jme_tick(void *arg)
2685 {
2686 struct jme_softc *sc;
2687 struct mii_data *mii;
2688
2689 sc = (struct jme_softc *)arg;
2690
2691 JME_LOCK_ASSERT(sc);
2692
2693 mii = device_get_softc(sc->jme_miibus);
2694 mii_tick(mii);
2695 /*
2696 * Reclaim Tx buffers that have been completed. It's not
2697 * needed here but it would release allocated mbuf chains
2698 * faster and limit the maximum delay to a hz.
2699 */
2700 jme_txeof(sc);
2701 jme_stats_update(sc);
2702 jme_watchdog(sc);
2703 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2704 }
2705
2706 static void
2707 jme_reset(struct jme_softc *sc)
2708 {
2709 uint32_t ghc, gpreg;
2710
2711 /* Stop receiver, transmitter. */
2712 jme_stop_rx(sc);
2713 jme_stop_tx(sc);
2714
2715 /* Reset controller. */
2716 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2717 CSR_READ_4(sc, JME_GHC);
2718 DELAY(10);
2719 /*
2720 * Workaround Rx FIFO overruns seen under certain conditions.
2721 * Explicitly synchorize TX/RX clock. TX/RX clock should be
2722 * enabled only after enabling TX/RX MACs.
2723 */
2724 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
2725 /* Disable TX clock. */
2726 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
2727 /* Disable RX clock. */
2728 gpreg = CSR_READ_4(sc, JME_GPREG1);
2729 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2730 gpreg = CSR_READ_4(sc, JME_GPREG1);
2731 /* De-assert RESET but still disable TX clock. */
2732 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2733 ghc = CSR_READ_4(sc, JME_GHC);
2734
2735 /* Enable TX clock. */
2736 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
2737 /* Enable RX clock. */
2738 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
2739 CSR_READ_4(sc, JME_GPREG1);
2740
2741 /* Disable TX/RX clock again. */
2742 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2743 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2744 } else
2745 CSR_WRITE_4(sc, JME_GHC, 0);
2746 CSR_READ_4(sc, JME_GHC);
2747 DELAY(10);
2748 }
2749
2750 static void
2751 jme_init(void *xsc)
2752 {
2753 struct jme_softc *sc;
2754
2755 sc = (struct jme_softc *)xsc;
2756 JME_LOCK(sc);
2757 jme_init_locked(sc);
2758 JME_UNLOCK(sc);
2759 }
2760
2761 static void
2762 jme_init_locked(struct jme_softc *sc)
2763 {
2764 struct ifnet *ifp;
2765 struct mii_data *mii;
2766 bus_addr_t paddr;
2767 uint32_t reg;
2768 int error;
2769
2770 JME_LOCK_ASSERT(sc);
2771
2772 ifp = sc->jme_ifp;
2773 mii = device_get_softc(sc->jme_miibus);
2774
2775 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2776 return;
2777 /*
2778 * Cancel any pending I/O.
2779 */
2780 jme_stop(sc);
2781
2782 /*
2783 * Reset the chip to a known state.
2784 */
2785 jme_reset(sc);
2786
2787 /* Init descriptors. */
2788 error = jme_init_rx_ring(sc);
2789 if (error != 0) {
2790 device_printf(sc->jme_dev,
2791 "%s: initialization failed: no memory for Rx buffers.\n",
2792 __func__);
2793 jme_stop(sc);
2794 return;
2795 }
2796 jme_init_tx_ring(sc);
2797 /* Initialize shadow status block. */
2798 jme_init_ssb(sc);
2799
2800 /* Reprogram the station address. */
2801 jme_set_macaddr(sc, IF_LLADDR(sc->jme_ifp));
2802
2803 /*
2804 * Configure Tx queue.
2805 * Tx priority queue weight value : 0
2806 * Tx FIFO threshold for processing next packet : 16QW
2807 * Maximum Tx DMA length : 512
2808 * Allow Tx DMA burst.
2809 */
2810 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2811 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2812 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2813 sc->jme_txcsr |= sc->jme_tx_dma_size;
2814 sc->jme_txcsr |= TXCSR_DMA_BURST;
2815 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2816
2817 /* Set Tx descriptor counter. */
2818 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2819
2820 /* Set Tx ring address to the hardware. */
2821 paddr = JME_TX_RING_ADDR(sc, 0);
2822 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2823 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2824
2825 /* Configure TxMAC parameters. */
2826 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2827 reg |= TXMAC_THRESH_1_PKT;
2828 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2829 CSR_WRITE_4(sc, JME_TXMAC, reg);
2830
2831 /*
2832 * Configure Rx queue.
2833 * FIFO full threshold for transmitting Tx pause packet : 128T
2834 * FIFO threshold for processing next packet : 128QW
2835 * Rx queue 0 select
2836 * Max Rx DMA length : 128
2837 * Rx descriptor retry : 32
2838 * Rx descriptor retry time gap : 256ns
2839 * Don't receive runt/bad frame.
2840 */
2841 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2842 /*
2843 * Since Rx FIFO size is 4K bytes, receiving frames larger
2844 * than 4K bytes will suffer from Rx FIFO overruns. So
2845 * decrease FIFO threshold to reduce the FIFO overruns for
2846 * frames larger than 4000 bytes.
2847 * For best performance of standard MTU sized frames use
2848 * maximum allowable FIFO threshold, 128QW. Note these do
2849 * not hold on chip full mask version >=2. For these
2850 * controllers 64QW and 128QW are not valid value.
2851 */
2852 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2853 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2854 else {
2855 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2856 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2857 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2858 else
2859 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2860 }
2861 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2862 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2863 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2864 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2865
2866 /* Set Rx descriptor counter. */
2867 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2868
2869 /* Set Rx ring address to the hardware. */
2870 paddr = JME_RX_RING_ADDR(sc, 0);
2871 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2872 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2873
2874 /* Clear receive filter. */
2875 CSR_WRITE_4(sc, JME_RXMAC, 0);
2876 /* Set up the receive filter. */
2877 jme_set_filter(sc);
2878 jme_set_vlan(sc);
2879
2880 /*
2881 * Disable all WOL bits as WOL can interfere normal Rx
2882 * operation. Also clear WOL detection status bits.
2883 */
2884 reg = CSR_READ_4(sc, JME_PMCS);
2885 reg &= ~PMCS_WOL_ENB_MASK;
2886 CSR_WRITE_4(sc, JME_PMCS, reg);
2887
2888 reg = CSR_READ_4(sc, JME_RXMAC);
2889 /*
2890 * Pad 10bytes right before received frame. This will greatly
2891 * help Rx performance on strict-alignment architectures as
2892 * it does not need to copy the frame to align the payload.
2893 */
2894 reg |= RXMAC_PAD_10BYTES;
2895 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2896 reg |= RXMAC_CSUM_ENB;
2897 CSR_WRITE_4(sc, JME_RXMAC, reg);
2898
2899 /* Configure general purpose reg0 */
2900 reg = CSR_READ_4(sc, JME_GPREG0);
2901 reg &= ~GPREG0_PCC_UNIT_MASK;
2902 /* Set PCC timer resolution to micro-seconds unit. */
2903 reg |= GPREG0_PCC_UNIT_US;
2904 /*
2905 * Disable all shadow register posting as we have to read
2906 * JME_INTR_STATUS register in jme_int_task. Also it seems
2907 * that it's hard to synchronize interrupt status between
2908 * hardware and software with shadow posting due to
2909 * requirements of bus_dmamap_sync(9).
2910 */
2911 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2912 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2913 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2914 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2915 /* Disable posting of DW0. */
2916 reg &= ~GPREG0_POST_DW0_ENB;
2917 /* Clear PME message. */
2918 reg &= ~GPREG0_PME_ENB;
2919 /* Set PHY address. */
2920 reg &= ~GPREG0_PHY_ADDR_MASK;
2921 reg |= sc->jme_phyaddr;
2922 CSR_WRITE_4(sc, JME_GPREG0, reg);
2923
2924 /* Configure Tx queue 0 packet completion coalescing. */
2925 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2926 PCCTX_COAL_TO_MASK;
2927 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2928 PCCTX_COAL_PKT_MASK;
2929 reg |= PCCTX_COAL_TXQ0;
2930 CSR_WRITE_4(sc, JME_PCCTX, reg);
2931
2932 /* Configure Rx queue 0 packet completion coalescing. */
2933 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2934 PCCRX_COAL_TO_MASK;
2935 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2936 PCCRX_COAL_PKT_MASK;
2937 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2938
2939 /*
2940 * Configure PCD(Packet Completion Deferring). It seems PCD
2941 * generates an interrupt when the time interval between two
2942 * back-to-back incoming/outgoing packet is long enough for
2943 * it to reach its timer value 0. The arrival of new packets
2944 * after timer has started causes the PCD timer to restart.
2945 * Unfortunately, it's not clear how PCD is useful at this
2946 * moment, so just use the same of PCC parameters.
2947 */
2948 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
2949 sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
2950 if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
2951 sc->jme_rx_pcd_to = PCDRX_TO_MAX;
2952 sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
2953 if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
2954 sc->jme_tx_pcd_to = PCDTX_TO_MAX;
2955 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
2956 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
2957 CSR_WRITE_4(sc, PCDRX_REG(0), reg);
2958 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
2959 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
2960 CSR_WRITE_4(sc, JME_PCDTX, reg);
2961 }
2962
2963 /* Configure shadow status block but don't enable posting. */
2964 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2965 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2966 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2967
2968 /* Disable Timer 1 and Timer 2. */
2969 CSR_WRITE_4(sc, JME_TIMER1, 0);
2970 CSR_WRITE_4(sc, JME_TIMER2, 0);
2971
2972 /* Configure retry transmit period, retry limit value. */
2973 CSR_WRITE_4(sc, JME_TXTRHD,
2974 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2975 TXTRHD_RT_PERIOD_MASK) |
2976 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2977 TXTRHD_RT_LIMIT_SHIFT));
2978
2979 /* Disable RSS. */
2980 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2981
2982 /* Initialize the interrupt mask. */
2983 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2984 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2985
2986 /*
2987 * Enabling Tx/Rx DMA engines and Rx queue processing is
2988 * done after detection of valid link in jme_link_task.
2989 */
2990
2991 sc->jme_flags &= ~JME_FLAG_LINK;
2992 /* Set the current media. */
2993 mii_mediachg(mii);
2994
2995 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2996
2997 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2998 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2999 }
3000
3001 static void
3002 jme_stop(struct jme_softc *sc)
3003 {
3004 struct ifnet *ifp;
3005 struct jme_txdesc *txd;
3006 struct jme_rxdesc *rxd;
3007 int i;
3008
3009 JME_LOCK_ASSERT(sc);
3010 /*
3011 * Mark the interface down and cancel the watchdog timer.
3012 */
3013 ifp = sc->jme_ifp;
3014 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3015 sc->jme_flags &= ~JME_FLAG_LINK;
3016 callout_stop(&sc->jme_tick_ch);
3017 sc->jme_watchdog_timer = 0;
3018
3019 /*
3020 * Disable interrupts.
3021 */
3022 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3023 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
3024
3025 /* Disable updating shadow status block. */
3026 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
3027 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
3028
3029 /* Stop receiver, transmitter. */
3030 jme_stop_rx(sc);
3031 jme_stop_tx(sc);
3032
3033 /* Reclaim Rx/Tx buffers that have been completed. */
3034 jme_rxintr(sc, JME_RX_RING_CNT);
3035 if (sc->jme_cdata.jme_rxhead != NULL)
3036 m_freem(sc->jme_cdata.jme_rxhead);
3037 JME_RXCHAIN_RESET(sc);
3038 jme_txeof(sc);
3039 /*
3040 * Free RX and TX mbufs still in the queues.
3041 */
3042 for (i = 0; i < JME_RX_RING_CNT; i++) {
3043 rxd = &sc->jme_cdata.jme_rxdesc[i];
3044 if (rxd->rx_m != NULL) {
3045 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
3046 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3047 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
3048 rxd->rx_dmamap);
3049 m_freem(rxd->rx_m);
3050 rxd->rx_m = NULL;
3051 }
3052 }
3053 for (i = 0; i < JME_TX_RING_CNT; i++) {
3054 txd = &sc->jme_cdata.jme_txdesc[i];
3055 if (txd->tx_m != NULL) {
3056 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
3057 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3058 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
3059 txd->tx_dmamap);
3060 m_freem(txd->tx_m);
3061 txd->tx_m = NULL;
3062 txd->tx_ndesc = 0;
3063 }
3064 }
3065 jme_stats_update(sc);
3066 jme_stats_save(sc);
3067 }
3068
3069 static void
3070 jme_stop_tx(struct jme_softc *sc)
3071 {
3072 uint32_t reg;
3073 int i;
3074
3075 reg = CSR_READ_4(sc, JME_TXCSR);
3076 if ((reg & TXCSR_TX_ENB) == 0)
3077 return;
3078 reg &= ~TXCSR_TX_ENB;
3079 CSR_WRITE_4(sc, JME_TXCSR, reg);
3080 for (i = JME_TIMEOUT; i > 0; i--) {
3081 DELAY(1);
3082 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3083 break;
3084 }
3085 if (i == 0)
3086 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3087 }
3088
3089 static void
3090 jme_stop_rx(struct jme_softc *sc)
3091 {
3092 uint32_t reg;
3093 int i;
3094
3095 reg = CSR_READ_4(sc, JME_RXCSR);
3096 if ((reg & RXCSR_RX_ENB) == 0)
3097 return;
3098 reg &= ~RXCSR_RX_ENB;
3099 CSR_WRITE_4(sc, JME_RXCSR, reg);
3100 for (i = JME_TIMEOUT; i > 0; i--) {
3101 DELAY(1);
3102 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3103 break;
3104 }
3105 if (i == 0)
3106 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3107 }
3108
3109 static void
3110 jme_init_tx_ring(struct jme_softc *sc)
3111 {
3112 struct jme_ring_data *rd;
3113 struct jme_txdesc *txd;
3114 int i;
3115
3116 sc->jme_cdata.jme_tx_prod = 0;
3117 sc->jme_cdata.jme_tx_cons = 0;
3118 sc->jme_cdata.jme_tx_cnt = 0;
3119
3120 rd = &sc->jme_rdata;
3121 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
3122 for (i = 0; i < JME_TX_RING_CNT; i++) {
3123 txd = &sc->jme_cdata.jme_txdesc[i];
3124 txd->tx_m = NULL;
3125 txd->tx_desc = &rd->jme_tx_ring[i];
3126 txd->tx_ndesc = 0;
3127 }
3128
3129 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
3130 sc->jme_cdata.jme_tx_ring_map,
3131 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3132 }
3133
3134 static void
3135 jme_init_ssb(struct jme_softc *sc)
3136 {
3137 struct jme_ring_data *rd;
3138
3139 rd = &sc->jme_rdata;
3140 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3141 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3142 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3143 }
3144
3145 static int
3146 jme_init_rx_ring(struct jme_softc *sc)
3147 {
3148 struct jme_ring_data *rd;
3149 struct jme_rxdesc *rxd;
3150 int i;
3151
3152 sc->jme_cdata.jme_rx_cons = 0;
3153 JME_RXCHAIN_RESET(sc);
3154 sc->jme_morework = 0;
3155
3156 rd = &sc->jme_rdata;
3157 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3158 for (i = 0; i < JME_RX_RING_CNT; i++) {
3159 rxd = &sc->jme_cdata.jme_rxdesc[i];
3160 rxd->rx_m = NULL;
3161 rxd->rx_desc = &rd->jme_rx_ring[i];
3162 if (jme_newbuf(sc, rxd) != 0)
3163 return (ENOBUFS);
3164 }
3165
3166 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3167 sc->jme_cdata.jme_rx_ring_map,
3168 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3169
3170 return (0);
3171 }
3172
3173 static int
3174 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3175 {
3176 struct jme_desc *desc;
3177 struct mbuf *m;
3178 bus_dma_segment_t segs[1];
3179 bus_dmamap_t map;
3180 int nsegs;
3181
3182 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3183 if (m == NULL)
3184 return (ENOBUFS);
3185 /*
3186 * JMC250 has 64bit boundary alignment limitation so jme(4)
3187 * takes advantage of 10 bytes padding feature of hardware
3188 * in order not to copy entire frame to align IP header on
3189 * 32bit boundary.
3190 */
3191 m->m_len = m->m_pkthdr.len = MCLBYTES;
3192
3193 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3194 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3195 m_freem(m);
3196 return (ENOBUFS);
3197 }
3198 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3199
3200 if (rxd->rx_m != NULL) {
3201 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3202 BUS_DMASYNC_POSTREAD);
3203 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3204 }
3205 map = rxd->rx_dmamap;
3206 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3207 sc->jme_cdata.jme_rx_sparemap = map;
3208 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3209 BUS_DMASYNC_PREREAD);
3210 rxd->rx_m = m;
3211
3212 desc = rxd->rx_desc;
3213 desc->buflen = htole32(segs[0].ds_len);
3214 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3215 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3216 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3217
3218 return (0);
3219 }
3220
3221 static void
3222 jme_set_vlan(struct jme_softc *sc)
3223 {
3224 struct ifnet *ifp;
3225 uint32_t reg;
3226
3227 JME_LOCK_ASSERT(sc);
3228
3229 ifp = sc->jme_ifp;
3230 reg = CSR_READ_4(sc, JME_RXMAC);
3231 reg &= ~RXMAC_VLAN_ENB;
3232 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3233 reg |= RXMAC_VLAN_ENB;
3234 CSR_WRITE_4(sc, JME_RXMAC, reg);
3235 }
3236
3237 static u_int
3238 jme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3239 {
3240 uint32_t crc, *mchash = arg;
3241
3242 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
3243
3244 /* Just want the 6 least significant bits. */
3245 crc &= 0x3f;
3246
3247 /* Set the corresponding bit in the hash table. */
3248 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3249
3250 return (1);
3251 }
3252
3253 static void
3254 jme_set_filter(struct jme_softc *sc)
3255 {
3256 struct ifnet *ifp;
3257 uint32_t mchash[2];
3258 uint32_t rxcfg;
3259
3260 JME_LOCK_ASSERT(sc);
3261
3262 ifp = sc->jme_ifp;
3263
3264 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3265 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3266 RXMAC_ALLMULTI);
3267 /* Always accept frames destined to our station address. */
3268 rxcfg |= RXMAC_UNICAST;
3269 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3270 rxcfg |= RXMAC_BROADCAST;
3271 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3272 if ((ifp->if_flags & IFF_PROMISC) != 0)
3273 rxcfg |= RXMAC_PROMISC;
3274 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3275 rxcfg |= RXMAC_ALLMULTI;
3276 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3277 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3278 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3279 return;
3280 }
3281
3282 /*
3283 * Set up the multicast address filter by passing all multicast
3284 * addresses through a CRC generator, and then using the low-order
3285 * 6 bits as an index into the 64 bit multicast hash table. The
3286 * high order bits select the register, while the rest of the bits
3287 * select the bit within the register.
3288 */
3289 rxcfg |= RXMAC_MULTICAST;
3290 bzero(mchash, sizeof(mchash));
3291 if_foreach_llmaddr(ifp, jme_hash_maddr, &mchash);
3292
3293 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3294 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3295 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3296 }
3297
3298 static void
3299 jme_stats_clear(struct jme_softc *sc)
3300 {
3301
3302 JME_LOCK_ASSERT(sc);
3303
3304 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3305 return;
3306
3307 /* Disable and clear counters. */
3308 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3309 /* Activate hw counters. */
3310 CSR_WRITE_4(sc, JME_STATCSR, 0);
3311 CSR_READ_4(sc, JME_STATCSR);
3312 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3313 }
3314
3315 static void
3316 jme_stats_save(struct jme_softc *sc)
3317 {
3318
3319 JME_LOCK_ASSERT(sc);
3320
3321 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3322 return;
3323 /* Save current counters. */
3324 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3325 /* Disable and clear counters. */
3326 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3327 }
3328
3329 static void
3330 jme_stats_update(struct jme_softc *sc)
3331 {
3332 struct jme_hw_stats *stat, *ostat;
3333 uint32_t reg;
3334
3335 JME_LOCK_ASSERT(sc);
3336
3337 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3338 return;
3339 stat = &sc->jme_stats;
3340 ostat = &sc->jme_ostats;
3341 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3342 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3343 reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3344 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3345 STAT_RX_CRC_ERR_SHIFT;
3346 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3347 STAT_RX_MII_ERR_SHIFT;
3348 reg = CSR_READ_4(sc, JME_STAT_RXERR);
3349 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3350 STAT_RXERR_OFLOW_SHIFT;
3351 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3352 STAT_RXERR_MPTY_SHIFT;
3353 reg = CSR_READ_4(sc, JME_STAT_FAIL);
3354 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3355 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3356
3357 /* Account for previous counters. */
3358 stat->rx_good_frames += ostat->rx_good_frames;
3359 stat->rx_crc_errs += ostat->rx_crc_errs;
3360 stat->rx_mii_errs += ostat->rx_mii_errs;
3361 stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3362 stat->rx_desc_empty += ostat->rx_desc_empty;
3363 stat->rx_bad_frames += ostat->rx_bad_frames;
3364 stat->tx_good_frames += ostat->tx_good_frames;
3365 stat->tx_bad_frames += ostat->tx_bad_frames;
3366 }
3367
3368 static void
3369 jme_phy_down(struct jme_softc *sc)
3370 {
3371 uint32_t reg;
3372
3373 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
3374 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3375 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3376 reg |= 0x0000000F;
3377 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3378 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3379 reg &= ~PE1_GIGA_PDOWN_MASK;
3380 reg |= PE1_GIGA_PDOWN_D3;
3381 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3382 }
3383 }
3384
3385 static void
3386 jme_phy_up(struct jme_softc *sc)
3387 {
3388 uint32_t reg;
3389 uint16_t bmcr;
3390
3391 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
3392 bmcr &= ~BMCR_PDOWN;
3393 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
3394 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3395 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3396 reg &= ~0x0000000F;
3397 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3398 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3399 reg &= ~PE1_GIGA_PDOWN_MASK;
3400 reg |= PE1_GIGA_PDOWN_DIS;
3401 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3402 }
3403 }
3404
3405 static int
3406 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3407 {
3408 int error, value;
3409
3410 if (arg1 == NULL)
3411 return (EINVAL);
3412 value = *(int *)arg1;
3413 error = sysctl_handle_int(oidp, &value, 0, req);
3414 if (error || req->newptr == NULL)
3415 return (error);
3416 if (value < low || value > high)
3417 return (EINVAL);
3418 *(int *)arg1 = value;
3419
3420 return (0);
3421 }
3422
3423 static int
3424 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3425 {
3426 return (sysctl_int_range(oidp, arg1, arg2, req,
3427 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3428 }
3429
3430 static int
3431 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3432 {
3433 return (sysctl_int_range(oidp, arg1, arg2, req,
3434 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3435 }
3436
3437 static int
3438 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3439 {
3440 return (sysctl_int_range(oidp, arg1, arg2, req,
3441 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3442 }
3443
3444 static int
3445 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3446 {
3447 return (sysctl_int_range(oidp, arg1, arg2, req,
3448 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3449 }
3450
3451 static int
3452 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3453 {
3454 return (sysctl_int_range(oidp, arg1, arg2, req,
3455 JME_PROC_MIN, JME_PROC_MAX));
3456 }
Cache object: 9e4a4b492d4e200cc347fcb6b39498f5
|