FreeBSD/Linux Kernel Cross Reference
sys/dev/em/if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD: releng/6.1/sys/dev/em/if_em.c 156855 2006-03-18 21:48:08Z glebius $*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/em/if_em.h>
41
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int em_display_debug_stats = 0;
46
47 /*********************************************************************
48 * Driver version
49 *********************************************************************/
50
51 char em_driver_version[] = "Version - 3.2.18";
52
53
54 /*********************************************************************
55 * PCI Device ID Table
56 *
57 * Used by probe to select devices to load on
58 * Last field stores an index into em_strings
59 * Last entry must be all 0s
60 *
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64 static em_vendor_info_t em_vendor_info_array[] =
65 {
66 /* Intel(R) PRO/1000 Network Connection */
67 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
68 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
69 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
72
73 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
74 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
80
81 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
82
83 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
85
86 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
90
91 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
96
97 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
98 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
99 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
101 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
102 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
103 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
108 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
109
110 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
113
114 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
117
118 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
121
122 /* required last entry */
123 { 0, 0, 0, 0, 0}
124 };
125
126 /*********************************************************************
127 * Table of branding strings for all supported NICs.
128 *********************************************************************/
129
130 static char *em_strings[] = {
131 "Intel(R) PRO/1000 Network Connection"
132 };
133
134 /*********************************************************************
135 * Function prototypes
136 *********************************************************************/
137 static int em_probe(device_t);
138 static int em_attach(device_t);
139 static int em_detach(device_t);
140 static int em_shutdown(device_t);
141 static int em_suspend(device_t);
142 static int em_resume(device_t);
143 static void em_intr(void *);
144 static void em_start(struct ifnet *);
145 static void em_start_locked(struct ifnet *ifp);
146 static int em_ioctl(struct ifnet *, u_long, caddr_t);
147 static void em_watchdog(struct ifnet *);
148 static void em_init(void *);
149 static void em_init_locked(struct adapter *);
150 static void em_stop(void *);
151 static void em_media_status(struct ifnet *, struct ifmediareq *);
152 static int em_media_change(struct ifnet *);
153 static void em_identify_hardware(struct adapter *);
154 static int em_allocate_pci_resources(struct adapter *);
155 static void em_free_pci_resources(struct adapter *);
156 static void em_local_timer(void *);
157 static int em_hardware_init(struct adapter *);
158 static void em_setup_interface(device_t, struct adapter *);
159 static int em_setup_transmit_structures(struct adapter *);
160 static void em_initialize_transmit_unit(struct adapter *);
161 static int em_setup_receive_structures(struct adapter *);
162 static void em_initialize_receive_unit(struct adapter *);
163 static void em_enable_intr(struct adapter *);
164 static void em_disable_intr(struct adapter *);
165 static void em_free_transmit_structures(struct adapter *);
166 static void em_free_receive_structures(struct adapter *);
167 static void em_update_stats_counters(struct adapter *);
168 static void em_clean_transmit_interrupts(struct adapter *);
169 static int em_allocate_receive_structures(struct adapter *);
170 static int em_allocate_transmit_structures(struct adapter *);
171 static void em_process_receive_interrupts(struct adapter *, int);
172 static void em_receive_checksum(struct adapter *,
173 struct em_rx_desc *,
174 struct mbuf *);
175 static void em_transmit_checksum_setup(struct adapter *,
176 struct mbuf *,
177 u_int32_t *,
178 u_int32_t *);
179 static void em_set_promisc(struct adapter *);
180 static void em_disable_promisc(struct adapter *);
181 static void em_set_multi(struct adapter *);
182 static void em_print_hw_stats(struct adapter *);
183 static void em_print_link_status(struct adapter *);
184 static int em_get_buf(int i, struct adapter *,
185 struct mbuf *);
186 static void em_enable_vlans(struct adapter *);
187 static void em_disable_vlans(struct adapter *);
188 static int em_encap(struct adapter *, struct mbuf **);
189 static void em_smartspeed(struct adapter *);
190 static int em_82547_fifo_workaround(struct adapter *, int);
191 static void em_82547_update_fifo_head(struct adapter *, int);
192 static int em_82547_tx_fifo_reset(struct adapter *);
193 static void em_82547_move_tail(void *arg);
194 static void em_82547_move_tail_locked(struct adapter *);
195 static int em_dma_malloc(struct adapter *, bus_size_t,
196 struct em_dma_alloc *, int);
197 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
198 static void em_print_debug_info(struct adapter *);
199 static int em_is_valid_ether_addr(u_int8_t *);
200 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
201 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
202 static u_int32_t em_fill_descriptors (bus_addr_t address,
203 u_int32_t length,
204 PDESC_ARRAY desc_array);
205 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
206 static void em_add_int_delay_sysctl(struct adapter *, const char *,
207 const char *, struct em_int_delay_info *,
208 int, int);
209 #ifdef DEVICE_POLLING
210 static poll_handler_t em_poll;
211 #endif
212
213 /*********************************************************************
214 * FreeBSD Device Interface Entry Points
215 *********************************************************************/
216
217 static device_method_t em_methods[] = {
218 /* Device interface */
219 DEVMETHOD(device_probe, em_probe),
220 DEVMETHOD(device_attach, em_attach),
221 DEVMETHOD(device_detach, em_detach),
222 DEVMETHOD(device_shutdown, em_shutdown),
223 DEVMETHOD(device_suspend, em_suspend),
224 DEVMETHOD(device_resume, em_resume),
225 {0, 0}
226 };
227
228 static driver_t em_driver = {
229 "em", em_methods, sizeof(struct adapter ),
230 };
231
232 static devclass_t em_devclass;
233 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
234 MODULE_DEPEND(em, pci, 1, 1, 1);
235 MODULE_DEPEND(em, ether, 1, 1, 1);
236
237 /*********************************************************************
238 * Tunable default values.
239 *********************************************************************/
240
241 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
242 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
243
244 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
245 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
246 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
247 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
248 static int em_rxd = EM_DEFAULT_RXD;
249 static int em_txd = EM_DEFAULT_TXD;
250
251 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
252 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
253 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
254 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
255 TUNABLE_INT("hw.em.rxd", &em_rxd);
256 TUNABLE_INT("hw.em.txd", &em_txd);
257
258 /*********************************************************************
259 * Device identification routine
260 *
261 * em_probe determines if the driver should be loaded on
262 * adapter based on PCI vendor/device id of the adapter.
263 *
264 * return BUS_PROBE_DEFAULT on success, positive on failure
265 *********************************************************************/
266
267 static int
268 em_probe(device_t dev)
269 {
270 em_vendor_info_t *ent;
271
272 u_int16_t pci_vendor_id = 0;
273 u_int16_t pci_device_id = 0;
274 u_int16_t pci_subvendor_id = 0;
275 u_int16_t pci_subdevice_id = 0;
276 char adapter_name[60];
277
278 INIT_DEBUGOUT("em_probe: begin");
279
280 pci_vendor_id = pci_get_vendor(dev);
281 if (pci_vendor_id != EM_VENDOR_ID)
282 return(ENXIO);
283
284 pci_device_id = pci_get_device(dev);
285 pci_subvendor_id = pci_get_subvendor(dev);
286 pci_subdevice_id = pci_get_subdevice(dev);
287
288 ent = em_vendor_info_array;
289 while (ent->vendor_id != 0) {
290 if ((pci_vendor_id == ent->vendor_id) &&
291 (pci_device_id == ent->device_id) &&
292
293 ((pci_subvendor_id == ent->subvendor_id) ||
294 (ent->subvendor_id == PCI_ANY_ID)) &&
295
296 ((pci_subdevice_id == ent->subdevice_id) ||
297 (ent->subdevice_id == PCI_ANY_ID))) {
298 sprintf(adapter_name, "%s %s",
299 em_strings[ent->index],
300 em_driver_version);
301 device_set_desc_copy(dev, adapter_name);
302 return(BUS_PROBE_DEFAULT);
303 }
304 ent++;
305 }
306
307 return(ENXIO);
308 }
309
310 /*********************************************************************
311 * Device initialization routine
312 *
313 * The attach entry point is called when the driver is being loaded.
314 * This routine identifies the type of hardware, allocates all resources
315 * and initializes the hardware.
316 *
317 * return 0 on success, positive on failure
318 *********************************************************************/
319
320 static int
321 em_attach(device_t dev)
322 {
323 struct adapter * adapter;
324 int tsize, rsize;
325 int error = 0;
326
327 INIT_DEBUGOUT("em_attach: begin");
328
329 /* Allocate, clear, and link in our adapter structure */
330 if (!(adapter = device_get_softc(dev))) {
331 printf("em: adapter structure allocation failed\n");
332 return(ENOMEM);
333 }
334 bzero(adapter, sizeof(struct adapter ));
335 adapter->dev = dev;
336 adapter->osdep.dev = dev;
337 adapter->unit = device_get_unit(dev);
338 EM_LOCK_INIT(adapter, device_get_nameunit(dev));
339
340 /* SYSCTL stuff */
341 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
342 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
343 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
344 (void *)adapter, 0,
345 em_sysctl_debug_info, "I", "Debug Information");
346
347 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
348 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
349 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
350 (void *)adapter, 0,
351 em_sysctl_stats, "I", "Statistics");
352
353 callout_init(&adapter->timer, CALLOUT_MPSAFE);
354 callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
355
356 /* Determine hardware revision */
357 em_identify_hardware(adapter);
358
359 /* Set up some sysctls for the tunable interrupt delays */
360 em_add_int_delay_sysctl(adapter, "rx_int_delay",
361 "receive interrupt delay in usecs", &adapter->rx_int_delay,
362 E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
363 em_add_int_delay_sysctl(adapter, "tx_int_delay",
364 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
365 E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
366 if (adapter->hw.mac_type >= em_82540) {
367 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
368 "receive interrupt delay limit in usecs",
369 &adapter->rx_abs_int_delay,
370 E1000_REG_OFFSET(&adapter->hw, RADV),
371 em_rx_abs_int_delay_dflt);
372 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
373 "transmit interrupt delay limit in usecs",
374 &adapter->tx_abs_int_delay,
375 E1000_REG_OFFSET(&adapter->hw, TADV),
376 em_tx_abs_int_delay_dflt);
377 }
378
379 /*
380 * Validate number of transmit and receive descriptors. It
381 * must not exceed hardware maximum, and must be multiple
382 * of E1000_DBA_ALIGN.
383 */
384 if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
385 (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
386 (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
387 (em_txd < EM_MIN_TXD)) {
388 printf("em%d: Using %d TX descriptors instead of %d!\n",
389 adapter->unit, EM_DEFAULT_TXD, em_txd);
390 adapter->num_tx_desc = EM_DEFAULT_TXD;
391 } else
392 adapter->num_tx_desc = em_txd;
393 if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
394 (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
395 (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
396 (em_rxd < EM_MIN_RXD)) {
397 printf("em%d: Using %d RX descriptors instead of %d!\n",
398 adapter->unit, EM_DEFAULT_RXD, em_rxd);
399 adapter->num_rx_desc = EM_DEFAULT_RXD;
400 } else
401 adapter->num_rx_desc = em_rxd;
402
403 adapter->hw.autoneg = DO_AUTO_NEG;
404 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
405 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
406 adapter->hw.tbi_compatibility_en = TRUE;
407 adapter->rx_buffer_len = EM_RXBUFFER_2048;
408
409 adapter->hw.phy_init_script = 1;
410 adapter->hw.phy_reset_disable = FALSE;
411
412 #ifndef EM_MASTER_SLAVE
413 adapter->hw.master_slave = em_ms_hw_default;
414 #else
415 adapter->hw.master_slave = EM_MASTER_SLAVE;
416 #endif
417 /*
418 * Set the max frame size assuming standard ethernet
419 * sized frames
420 */
421 adapter->hw.max_frame_size =
422 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
423
424 adapter->hw.min_frame_size =
425 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
426
427 /*
428 * This controls when hardware reports transmit completion
429 * status.
430 */
431 adapter->hw.report_tx_early = 1;
432
433
434 if (em_allocate_pci_resources(adapter)) {
435 printf("em%d: Allocation of PCI resources failed\n",
436 adapter->unit);
437 error = ENXIO;
438 goto err_pci;
439 }
440
441
442 /* Initialize eeprom parameters */
443 em_init_eeprom_params(&adapter->hw);
444
445 tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
446 E1000_DBA_ALIGN);
447
448 /* Allocate Transmit Descriptor ring */
449 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
450 printf("em%d: Unable to allocate tx_desc memory\n",
451 adapter->unit);
452 error = ENOMEM;
453 goto err_tx_desc;
454 }
455 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
456
457 rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
458 E1000_DBA_ALIGN);
459
460 /* Allocate Receive Descriptor ring */
461 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
462 printf("em%d: Unable to allocate rx_desc memory\n",
463 adapter->unit);
464 error = ENOMEM;
465 goto err_rx_desc;
466 }
467 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
468
469 /* Initialize the hardware */
470 if (em_hardware_init(adapter)) {
471 printf("em%d: Unable to initialize the hardware\n",
472 adapter->unit);
473 error = EIO;
474 goto err_hw_init;
475 }
476
477 /* Copy the permanent MAC address out of the EEPROM */
478 if (em_read_mac_addr(&adapter->hw) < 0) {
479 printf("em%d: EEPROM read error while reading mac address\n",
480 adapter->unit);
481 error = EIO;
482 goto err_mac_addr;
483 }
484
485 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
486 printf("em%d: Invalid mac address\n", adapter->unit);
487 error = EIO;
488 goto err_mac_addr;
489 }
490
491 /* Setup OS specific network interface */
492 em_setup_interface(dev, adapter);
493
494 /* Initialize statistics */
495 em_clear_hw_cntrs(&adapter->hw);
496 em_update_stats_counters(adapter);
497 adapter->hw.get_link_status = 1;
498 em_check_for_link(&adapter->hw);
499
500 if (bootverbose) {
501 /* Print the link status */
502 if (adapter->link_active == 1) {
503 em_get_speed_and_duplex(&adapter->hw,
504 &adapter->link_speed, &adapter->link_duplex);
505 printf("em%d: Speed:%d Mbps Duplex:%s\n",
506 adapter->unit,
507 adapter->link_speed,
508 adapter->link_duplex == FULL_DUPLEX ? "Full" :
509 "Half");
510 } else
511 printf("em%d: Speed:N/A Duplex:N/A\n",
512 adapter->unit);
513 }
514
515 /* Identify 82544 on PCIX */
516 em_get_bus_info(&adapter->hw);
517 if(adapter->hw.bus_type == em_bus_type_pcix &&
518 adapter->hw.mac_type == em_82544) {
519 adapter->pcix_82544 = TRUE;
520 }
521 else {
522 adapter->pcix_82544 = FALSE;
523 }
524 INIT_DEBUGOUT("em_attach: end");
525 return(0);
526
527 err_mac_addr:
528 err_hw_init:
529 em_dma_free(adapter, &adapter->rxdma);
530 err_rx_desc:
531 em_dma_free(adapter, &adapter->txdma);
532 err_tx_desc:
533 err_pci:
534 em_free_pci_resources(adapter);
535 EM_LOCK_DESTROY(adapter);
536 return(error);
537
538 }
539
540 /*********************************************************************
541 * Device removal routine
542 *
543 * The detach entry point is called when the driver is being removed.
544 * This routine stops the adapter and deallocates all the resources
545 * that were allocated for driver operation.
546 *
547 * return 0 on success, positive on failure
548 *********************************************************************/
549
550 static int
551 em_detach(device_t dev)
552 {
553 struct adapter * adapter = device_get_softc(dev);
554 struct ifnet *ifp = adapter->ifp;
555
556 INIT_DEBUGOUT("em_detach: begin");
557
558 #ifdef DEVICE_POLLING
559 if (ifp->if_capenable & IFCAP_POLLING)
560 ether_poll_deregister(ifp);
561 #endif
562
563 EM_LOCK(adapter);
564 adapter->in_detach = 1;
565 em_stop(adapter);
566 em_phy_hw_reset(&adapter->hw);
567 EM_UNLOCK(adapter);
568 ether_ifdetach(adapter->ifp);
569
570 em_free_pci_resources(adapter);
571 bus_generic_detach(dev);
572 if_free(ifp);
573
574 /* Free Transmit Descriptor ring */
575 if (adapter->tx_desc_base) {
576 em_dma_free(adapter, &adapter->txdma);
577 adapter->tx_desc_base = NULL;
578 }
579
580 /* Free Receive Descriptor ring */
581 if (adapter->rx_desc_base) {
582 em_dma_free(adapter, &adapter->rxdma);
583 adapter->rx_desc_base = NULL;
584 }
585
586 EM_LOCK_DESTROY(adapter);
587
588 return(0);
589 }
590
591 /*********************************************************************
592 *
593 * Shutdown entry point
594 *
595 **********************************************************************/
596
597 static int
598 em_shutdown(device_t dev)
599 {
600 struct adapter *adapter = device_get_softc(dev);
601 EM_LOCK(adapter);
602 em_stop(adapter);
603 EM_UNLOCK(adapter);
604 return(0);
605 }
606
607 /*
608 * Suspend/resume device methods.
609 */
610 static int
611 em_suspend(device_t dev)
612 {
613 struct adapter *adapter = device_get_softc(dev);
614
615 EM_LOCK(adapter);
616 em_stop(adapter);
617 EM_UNLOCK(adapter);
618
619 return bus_generic_suspend(dev);
620 }
621
622 static int
623 em_resume(device_t dev)
624 {
625 struct adapter *adapter = device_get_softc(dev);
626 struct ifnet *ifp = adapter->ifp;
627
628 EM_LOCK(adapter);
629 em_init_locked(adapter);
630 if ((ifp->if_flags & IFF_UP) &&
631 (ifp->if_drv_flags & IFF_DRV_RUNNING))
632 em_start_locked(ifp);
633 EM_UNLOCK(adapter);
634
635 return bus_generic_resume(dev);
636 }
637
638
639 /*********************************************************************
640 * Transmit entry point
641 *
642 * em_start is called by the stack to initiate a transmit.
643 * The driver will remain in this routine as long as there are
644 * packets to transmit and transmit resources are available.
645 * In case resources are not available stack is notified and
646 * the packet is requeued.
647 **********************************************************************/
648
649 static void
650 em_start_locked(struct ifnet *ifp)
651 {
652 struct mbuf *m_head;
653 struct adapter *adapter = ifp->if_softc;
654
655 mtx_assert(&adapter->mtx, MA_OWNED);
656
657 if (!adapter->link_active)
658 return;
659
660 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
661
662 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
663
664 if (m_head == NULL) break;
665
666 /*
667 * em_encap() can modify our pointer, and or make it NULL on
668 * failure. In that event, we can't requeue.
669 */
670 if (em_encap(adapter, &m_head)) {
671 if (m_head == NULL)
672 break;
673 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
674 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
675 break;
676 }
677
678 /* Send a copy of the frame to the BPF listener */
679 BPF_MTAP(ifp, m_head);
680
681 /* Set timeout in case hardware has problems transmitting */
682 ifp->if_timer = EM_TX_TIMEOUT;
683
684 }
685 return;
686 }
687
688 static void
689 em_start(struct ifnet *ifp)
690 {
691 struct adapter *adapter = ifp->if_softc;
692
693 EM_LOCK(adapter);
694 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
695 em_start_locked(ifp);
696 EM_UNLOCK(adapter);
697 return;
698 }
699
700 /*********************************************************************
701 * Ioctl entry point
702 *
703 * em_ioctl is called when the user wants to configure the
704 * interface.
705 *
706 * return 0 on success, positive on failure
707 **********************************************************************/
708
709 static int
710 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
711 {
712 struct ifreq *ifr = (struct ifreq *) data;
713 struct adapter * adapter = ifp->if_softc;
714 int error = 0;
715
716 if (adapter->in_detach) return(error);
717
718 switch (command) {
719 case SIOCSIFADDR:
720 case SIOCGIFADDR:
721 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
722 ether_ioctl(ifp, command, data);
723 break;
724 case SIOCSIFMTU:
725 {
726 #ifndef __NO_STRICT_ALIGNMENT
727 if (ifr->ifr_mtu > ETHERMTU)
728 /*
729 * XXX
730 * Due to the limitation of DMA engine, it needs fix-up
731 * code for strict alignment architectures. Disable
732 * jumbo frame until we have better solutions.
733 */
734 error = EINVAL;
735 #else
736 int max_frame_size;
737
738 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
739
740 switch (adapter->hw.mac_type) {
741 case em_82571:
742 case em_82572:
743 max_frame_size = 10500;
744 break;
745 case em_82573:
746 /* 82573 does not support jumbo frames. */
747 max_frame_size = ETHER_MAX_LEN;
748 break;
749 default:
750 max_frame_size = MAX_JUMBO_FRAME_SIZE;
751 }
752 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
753 ETHER_CRC_LEN) {
754 error = EINVAL;
755 break;
756 }
757
758 EM_LOCK(adapter);
759 ifp->if_mtu = ifr->ifr_mtu;
760 adapter->hw.max_frame_size =
761 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
762 em_init_locked(adapter);
763 EM_UNLOCK(adapter);
764 #endif
765 break;
766 }
767 case SIOCSIFFLAGS:
768 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
769 EM_LOCK(adapter);
770 if (ifp->if_flags & IFF_UP) {
771 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
772 em_init_locked(adapter);
773 }
774
775 em_disable_promisc(adapter);
776 em_set_promisc(adapter);
777 } else {
778 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
779 em_stop(adapter);
780 }
781 }
782 EM_UNLOCK(adapter);
783 break;
784 case SIOCADDMULTI:
785 case SIOCDELMULTI:
786 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
787 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
788 EM_LOCK(adapter);
789 em_disable_intr(adapter);
790 em_set_multi(adapter);
791 if (adapter->hw.mac_type == em_82542_rev2_0) {
792 em_initialize_receive_unit(adapter);
793 }
794 #ifdef DEVICE_POLLING
795 if (!(ifp->if_capenable & IFCAP_POLLING))
796 #endif
797 em_enable_intr(adapter);
798 EM_UNLOCK(adapter);
799 }
800 break;
801 case SIOCSIFMEDIA:
802 case SIOCGIFMEDIA:
803 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
804 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
805 break;
806 case SIOCSIFCAP:
807 {
808 int mask, reinit;
809
810 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
811 reinit = 0;
812 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
813 #ifdef DEVICE_POLLING
814 if (mask & IFCAP_POLLING) {
815 if (ifr->ifr_reqcap & IFCAP_POLLING) {
816 error = ether_poll_register(em_poll, ifp);
817 if (error)
818 return(error);
819 EM_LOCK(adapter);
820 em_disable_intr(adapter);
821 ifp->if_capenable |= IFCAP_POLLING;
822 EM_UNLOCK(adapter);
823 } else {
824 error = ether_poll_deregister(ifp);
825 /* Enable interrupt even in error case */
826 EM_LOCK(adapter);
827 em_enable_intr(adapter);
828 ifp->if_capenable &= ~IFCAP_POLLING;
829 EM_UNLOCK(adapter);
830 }
831 }
832 #endif
833 if (mask & IFCAP_HWCSUM) {
834 ifp->if_capenable ^= IFCAP_HWCSUM;
835 reinit = 1;
836 }
837 if (mask & IFCAP_VLAN_HWTAGGING) {
838 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
839 reinit = 1;
840 }
841 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
842 em_init(adapter);
843 break;
844 }
845 default:
846 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
847 error = EINVAL;
848 }
849
850 return(error);
851 }
852
853 /*********************************************************************
854 * Watchdog entry point
855 *
856 * This routine is called whenever hardware quits transmitting.
857 *
858 **********************************************************************/
859
860 static void
861 em_watchdog(struct ifnet *ifp)
862 {
863 struct adapter * adapter;
864 adapter = ifp->if_softc;
865
866 EM_LOCK(adapter);
867 /* If we are in this routine because of pause frames, then
868 * don't reset the hardware.
869 */
870 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
871 ifp->if_timer = EM_TX_TIMEOUT;
872 EM_UNLOCK(adapter);
873 return;
874 }
875
876 if (!em_check_for_link(&adapter->hw))
877 printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
878
879 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
880 adapter->watchdog_events++;
881
882 em_init_locked(adapter);
883 EM_UNLOCK(adapter);
884 }
885
886 /*********************************************************************
887 * Init entry point
888 *
889 * This routine is used in two ways. It is used by the stack as
890 * init entry point in network interface structure. It is also used
891 * by the driver as a hw/sw initialization routine to get to a
892 * consistent state.
893 *
894 * return 0 on success, positive on failure
895 **********************************************************************/
896
897 static void
898 em_init_locked(struct adapter * adapter)
899 {
900 struct ifnet *ifp;
901
902 uint32_t pba;
903 ifp = adapter->ifp;
904
905 INIT_DEBUGOUT("em_init: begin");
906
907 mtx_assert(&adapter->mtx, MA_OWNED);
908
909 em_stop(adapter);
910
911 /*
912 * Packet Buffer Allocation (PBA)
913 * Writing PBA sets the receive portion of the buffer
914 * the remainder is used for the transmit buffer.
915 */
916 switch (adapter->hw.mac_type) {
917 case em_82547:
918 case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
919 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
920 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
921 else
922 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
923 adapter->tx_fifo_head = 0;
924 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
925 adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
926 break;
927 case em_82571: /* 82571: Total Packet Buffer is 48K */
928 case em_82572: /* 82572: Total Packet Buffer is 48K */
929 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
930 break;
931 case em_82573: /* 82573: Total Packet Buffer is 32K */
932 /* Jumbo frames not supported */
933 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
934 break;
935 default:
936 /* Devices before 82547 had a Packet Buffer of 64K. */
937 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
938 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
939 else
940 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
941 }
942
943 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
944 E1000_WRITE_REG(&adapter->hw, PBA, pba);
945
946 /* Get the latest mac address, User can use a LAA */
947 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
948 ETHER_ADDR_LEN);
949
950 /* Initialize the hardware */
951 if (em_hardware_init(adapter)) {
952 printf("em%d: Unable to initialize the hardware\n",
953 adapter->unit);
954 return;
955 }
956
957 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
958 em_enable_vlans(adapter);
959
960 /* Prepare transmit descriptors and buffers */
961 if (em_setup_transmit_structures(adapter)) {
962 printf("em%d: Could not setup transmit structures\n",
963 adapter->unit);
964 em_stop(adapter);
965 return;
966 }
967 em_initialize_transmit_unit(adapter);
968
969 /* Setup Multicast table */
970 em_set_multi(adapter);
971
972 /* Prepare receive descriptors and buffers */
973 if (em_setup_receive_structures(adapter)) {
974 printf("em%d: Could not setup receive structures\n",
975 adapter->unit);
976 em_stop(adapter);
977 return;
978 }
979 em_initialize_receive_unit(adapter);
980
981 /* Don't loose promiscuous settings */
982 em_set_promisc(adapter);
983
984 ifp->if_drv_flags |= IFF_DRV_RUNNING;
985 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
986
987 if (adapter->hw.mac_type >= em_82543) {
988 if (ifp->if_capenable & IFCAP_TXCSUM)
989 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
990 else
991 ifp->if_hwassist = 0;
992 }
993
994 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
995 em_clear_hw_cntrs(&adapter->hw);
996 #ifdef DEVICE_POLLING
997 /*
998 * Only enable interrupts if we are not polling, make sure
999 * they are off otherwise.
1000 */
1001 if (ifp->if_capenable & IFCAP_POLLING)
1002 em_disable_intr(adapter);
1003 else
1004 #endif /* DEVICE_POLLING */
1005 em_enable_intr(adapter);
1006
1007 /* Don't reset the phy next time init gets called */
1008 adapter->hw.phy_reset_disable = TRUE;
1009
1010 return;
1011 }
1012
1013 static void
1014 em_init(void *arg)
1015 {
1016 struct adapter * adapter = arg;
1017
1018 EM_LOCK(adapter);
1019 em_init_locked(adapter);
1020 EM_UNLOCK(adapter);
1021 return;
1022 }
1023
1024
1025 #ifdef DEVICE_POLLING
1026 static void
1027 em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1028 {
1029 struct adapter *adapter = ifp->if_softc;
1030 u_int32_t reg_icr;
1031
1032 mtx_assert(&adapter->mtx, MA_OWNED);
1033
1034 if (cmd == POLL_AND_CHECK_STATUS) {
1035 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1036 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1037 callout_stop(&adapter->timer);
1038 adapter->hw.get_link_status = 1;
1039 em_check_for_link(&adapter->hw);
1040 em_print_link_status(adapter);
1041 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1042 }
1043 }
1044 em_process_receive_interrupts(adapter, count);
1045 em_clean_transmit_interrupts(adapter);
1046
1047 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1048 em_start_locked(ifp);
1049 }
1050
1051 static void
1052 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1053 {
1054 struct adapter *adapter = ifp->if_softc;
1055
1056 EM_LOCK(adapter);
1057 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1058 em_poll_locked(ifp, cmd, count);
1059 EM_UNLOCK(adapter);
1060 }
1061 #endif /* DEVICE_POLLING */
1062
1063 /*********************************************************************
1064 *
1065 * Interrupt Service routine
1066 *
1067 **********************************************************************/
1068 static void
1069 em_intr(void *arg)
1070 {
1071 struct adapter *adapter = arg;
1072 struct ifnet *ifp;
1073 uint32_t reg_icr;
1074 int wantinit = 0;
1075
1076 EM_LOCK(adapter);
1077
1078 ifp = adapter->ifp;
1079
1080 #ifdef DEVICE_POLLING
1081 if (ifp->if_capenable & IFCAP_POLLING) {
1082 EM_UNLOCK(adapter);
1083 return;
1084 }
1085 #endif /* DEVICE_POLLING */
1086
1087 for (;;) {
1088 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1089 if (adapter->hw.mac_type >= em_82571 &&
1090 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1091 break;
1092 else if (reg_icr == 0)
1093 break;
1094
1095 /*
1096 * XXX: some laptops trigger several spurious interrupts
1097 * on em(4) when in the resume cycle. The ICR register
1098 * reports all-ones value in this case. Processing such
1099 * interrupts would lead to a freeze. I don't know why.
1100 */
1101 if (reg_icr == 0xffffffff)
1102 break;
1103
1104 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1105 em_process_receive_interrupts(adapter, -1);
1106 em_clean_transmit_interrupts(adapter);
1107 }
1108
1109 /* Link status change */
1110 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1111 callout_stop(&adapter->timer);
1112 adapter->hw.get_link_status = 1;
1113 em_check_for_link(&adapter->hw);
1114 em_print_link_status(adapter);
1115 callout_reset(&adapter->timer, hz, em_local_timer,
1116 adapter);
1117 }
1118
1119 if (reg_icr & E1000_ICR_RXO) {
1120 adapter->rx_overruns++;
1121 wantinit = 1;
1122 }
1123 }
1124 #if 0
1125 if (wantinit)
1126 em_init_locked(adapter);
1127 #endif
1128 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1129 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1130 em_start_locked(ifp);
1131
1132 EM_UNLOCK(adapter);
1133 return;
1134 }
1135
1136
1137
1138 /*********************************************************************
1139 *
1140 * Media Ioctl callback
1141 *
1142 * This routine is called whenever the user queries the status of
1143 * the interface using ifconfig.
1144 *
1145 **********************************************************************/
1146 static void
1147 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1148 {
1149 struct adapter * adapter = ifp->if_softc;
1150
1151 INIT_DEBUGOUT("em_media_status: begin");
1152
1153 em_check_for_link(&adapter->hw);
1154 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1155 if (adapter->link_active == 0) {
1156 em_get_speed_and_duplex(&adapter->hw,
1157 &adapter->link_speed,
1158 &adapter->link_duplex);
1159 adapter->link_active = 1;
1160 }
1161 } else {
1162 if (adapter->link_active == 1) {
1163 adapter->link_speed = 0;
1164 adapter->link_duplex = 0;
1165 adapter->link_active = 0;
1166 }
1167 }
1168
1169 ifmr->ifm_status = IFM_AVALID;
1170 ifmr->ifm_active = IFM_ETHER;
1171
1172 if (!adapter->link_active)
1173 return;
1174
1175 ifmr->ifm_status |= IFM_ACTIVE;
1176
1177 if (adapter->hw.media_type == em_media_type_fiber) {
1178 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1179 } else {
1180 switch (adapter->link_speed) {
1181 case 10:
1182 ifmr->ifm_active |= IFM_10_T;
1183 break;
1184 case 100:
1185 ifmr->ifm_active |= IFM_100_TX;
1186 break;
1187 case 1000:
1188 ifmr->ifm_active |= IFM_1000_T;
1189 break;
1190 }
1191 if (adapter->link_duplex == FULL_DUPLEX)
1192 ifmr->ifm_active |= IFM_FDX;
1193 else
1194 ifmr->ifm_active |= IFM_HDX;
1195 }
1196 return;
1197 }
1198
1199 /*********************************************************************
1200 *
1201 * Media Ioctl callback
1202 *
1203 * This routine is called when the user changes speed/duplex using
1204 * media/mediopt option with ifconfig.
1205 *
1206 **********************************************************************/
1207 static int
1208 em_media_change(struct ifnet *ifp)
1209 {
1210 struct adapter * adapter = ifp->if_softc;
1211 struct ifmedia *ifm = &adapter->media;
1212
1213 INIT_DEBUGOUT("em_media_change: begin");
1214
1215 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1216 return(EINVAL);
1217
1218 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1219 case IFM_AUTO:
1220 adapter->hw.autoneg = DO_AUTO_NEG;
1221 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1222 break;
1223 case IFM_1000_SX:
1224 case IFM_1000_T:
1225 adapter->hw.autoneg = DO_AUTO_NEG;
1226 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1227 break;
1228 case IFM_100_TX:
1229 adapter->hw.autoneg = FALSE;
1230 adapter->hw.autoneg_advertised = 0;
1231 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1232 adapter->hw.forced_speed_duplex = em_100_full;
1233 else
1234 adapter->hw.forced_speed_duplex = em_100_half;
1235 break;
1236 case IFM_10_T:
1237 adapter->hw.autoneg = FALSE;
1238 adapter->hw.autoneg_advertised = 0;
1239 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1240 adapter->hw.forced_speed_duplex = em_10_full;
1241 else
1242 adapter->hw.forced_speed_duplex = em_10_half;
1243 break;
1244 default:
1245 printf("em%d: Unsupported media type\n", adapter->unit);
1246 }
1247
1248 /* As the speed/duplex settings my have changed we need to
1249 * reset the PHY.
1250 */
1251 adapter->hw.phy_reset_disable = FALSE;
1252
1253 em_init(adapter);
1254
1255 return(0);
1256 }
1257
1258 /*********************************************************************
1259 *
1260 * This routine maps the mbufs to tx descriptors.
1261 *
1262 * return 0 on success, positive on failure
1263 **********************************************************************/
1264 static int
1265 em_encap(struct adapter *adapter, struct mbuf **m_headp)
1266 {
1267 u_int32_t txd_upper;
1268 u_int32_t txd_lower, txd_used = 0, txd_saved = 0;
1269 int i, j, error = 0;
1270 bus_dmamap_t map;
1271
1272 struct mbuf *m_head;
1273
1274 /* For 82544 Workaround */
1275 DESC_ARRAY desc_array;
1276 u_int32_t array_elements;
1277 u_int32_t counter;
1278 struct m_tag *mtag;
1279 bus_dma_segment_t segs[EM_MAX_SCATTER];
1280 int nsegs;
1281 struct em_buffer *tx_buffer;
1282 struct em_tx_desc *current_tx_desc = NULL;
1283 struct ifnet *ifp = adapter->ifp;
1284
1285 m_head = *m_headp;
1286
1287 /*
1288 * Force a cleanup if number of TX descriptors
1289 * available hits the threshold
1290 */
1291 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1292 em_clean_transmit_interrupts(adapter);
1293 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1294 adapter->no_tx_desc_avail1++;
1295 return(ENOBUFS);
1296 }
1297 }
1298
1299 /*
1300 * Map the packet for DMA.
1301 */
1302 tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1303 error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1304 segs, &nsegs, BUS_DMA_NOWAIT);
1305 map = tx_buffer->map;
1306 if (error != 0) {
1307 adapter->no_tx_dma_setup++;
1308 return (error);
1309 }
1310 KASSERT(nsegs != 0, ("em_encap: empty packet"));
1311
1312 if (nsegs > adapter->num_tx_desc_avail) {
1313 adapter->no_tx_desc_avail2++;
1314 error = ENOBUFS;
1315 goto encap_fail;
1316 }
1317
1318
1319 if (ifp->if_hwassist > 0) {
1320 em_transmit_checksum_setup(adapter, m_head,
1321 &txd_upper, &txd_lower);
1322 } else
1323 txd_upper = txd_lower = 0;
1324
1325
1326 /* Find out if we are in vlan mode */
1327 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1328
1329 /*
1330 * When operating in promiscuous mode, hardware encapsulation for
1331 * packets is disabled. This means we have to add the vlan
1332 * encapsulation in the driver, since it will have come down from the
1333 * VLAN layer with a tag instead of a VLAN header.
1334 */
1335 if (mtag != NULL && adapter->em_insert_vlan_header) {
1336 struct ether_vlan_header *evl;
1337 struct ether_header eh;
1338
1339 m_head = m_pullup(m_head, sizeof(eh));
1340 if (m_head == NULL) {
1341 *m_headp = NULL;
1342 error = ENOBUFS;
1343 goto encap_fail;
1344 }
1345 eh = *mtod(m_head, struct ether_header *);
1346 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1347 if (m_head == NULL) {
1348 *m_headp = NULL;
1349 error = ENOBUFS;
1350 goto encap_fail;
1351 }
1352 m_head = m_pullup(m_head, sizeof(*evl));
1353 if (m_head == NULL) {
1354 *m_headp = NULL;
1355 error = ENOBUFS;
1356 goto encap_fail;
1357 }
1358 evl = mtod(m_head, struct ether_vlan_header *);
1359 bcopy(&eh, evl, sizeof(*evl));
1360 evl->evl_proto = evl->evl_encap_proto;
1361 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1362 evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1363 m_tag_delete(m_head, mtag);
1364 mtag = NULL;
1365 *m_headp = m_head;
1366 }
1367
1368 i = adapter->next_avail_tx_desc;
1369 if (adapter->pcix_82544) {
1370 txd_saved = i;
1371 txd_used = 0;
1372 }
1373 for (j = 0; j < nsegs; j++) {
1374 /* If adapter is 82544 and on PCIX bus */
1375 if(adapter->pcix_82544) {
1376 /*
1377 * Check the Address and Length combination and
1378 * split the data accordingly
1379 */
1380 array_elements = em_fill_descriptors(segs[j].ds_addr,
1381 segs[j].ds_len, &desc_array);
1382 for (counter = 0; counter < array_elements; counter++) {
1383 if (txd_used == adapter->num_tx_desc_avail) {
1384 adapter->next_avail_tx_desc = txd_saved;
1385 adapter->no_tx_desc_avail2++;
1386 error = ENOBUFS;
1387 goto encap_fail;
1388 }
1389 tx_buffer = &adapter->tx_buffer_area[i];
1390 current_tx_desc = &adapter->tx_desc_base[i];
1391 current_tx_desc->buffer_addr = htole64(
1392 desc_array.descriptor[counter].address);
1393 current_tx_desc->lower.data = htole32(
1394 (adapter->txd_cmd | txd_lower |
1395 (u_int16_t)desc_array.descriptor[counter].length));
1396 current_tx_desc->upper.data = htole32((txd_upper));
1397 if (++i == adapter->num_tx_desc)
1398 i = 0;
1399
1400 tx_buffer->m_head = NULL;
1401 txd_used++;
1402 }
1403 } else {
1404 tx_buffer = &adapter->tx_buffer_area[i];
1405 current_tx_desc = &adapter->tx_desc_base[i];
1406
1407 current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1408 current_tx_desc->lower.data = htole32(
1409 adapter->txd_cmd | txd_lower | segs[j].ds_len);
1410 current_tx_desc->upper.data = htole32(txd_upper);
1411
1412 if (++i == adapter->num_tx_desc)
1413 i = 0;
1414
1415 tx_buffer->m_head = NULL;
1416 }
1417 }
1418
1419 adapter->next_avail_tx_desc = i;
1420 if (adapter->pcix_82544) {
1421 adapter->num_tx_desc_avail -= txd_used;
1422 }
1423 else {
1424 adapter->num_tx_desc_avail -= nsegs;
1425 }
1426
1427 if (mtag != NULL) {
1428 /* Set the vlan id */
1429 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1430
1431 /* Tell hardware to add tag */
1432 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1433 }
1434
1435 tx_buffer->m_head = m_head;
1436 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1437
1438 /*
1439 * Last Descriptor of Packet needs End Of Packet (EOP)
1440 */
1441 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1442
1443 /*
1444 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1445 * that this frame is available to transmit.
1446 */
1447 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1448 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1449 if (adapter->hw.mac_type == em_82547 &&
1450 adapter->link_duplex == HALF_DUPLEX) {
1451 em_82547_move_tail_locked(adapter);
1452 } else {
1453 E1000_WRITE_REG(&adapter->hw, TDT, i);
1454 if (adapter->hw.mac_type == em_82547) {
1455 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1456 }
1457 }
1458
1459 return(0);
1460
1461 encap_fail:
1462 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1463 return (error);
1464 }
1465
1466 /*********************************************************************
1467 *
1468 * 82547 workaround to avoid controller hang in half-duplex environment.
1469 * The workaround is to avoid queuing a large packet that would span
1470 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1471 * in this case. We do that only when FIFO is quiescent.
1472 *
1473 **********************************************************************/
1474 static void
1475 em_82547_move_tail_locked(struct adapter *adapter)
1476 {
1477 uint16_t hw_tdt;
1478 uint16_t sw_tdt;
1479 struct em_tx_desc *tx_desc;
1480 uint16_t length = 0;
1481 boolean_t eop = 0;
1482
1483 EM_LOCK_ASSERT(adapter);
1484
1485 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1486 sw_tdt = adapter->next_avail_tx_desc;
1487
1488 while (hw_tdt != sw_tdt) {
1489 tx_desc = &adapter->tx_desc_base[hw_tdt];
1490 length += tx_desc->lower.flags.length;
1491 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1492 if(++hw_tdt == adapter->num_tx_desc)
1493 hw_tdt = 0;
1494
1495 if(eop) {
1496 if (em_82547_fifo_workaround(adapter, length)) {
1497 adapter->tx_fifo_wrk_cnt++;
1498 callout_reset(&adapter->tx_fifo_timer, 1,
1499 em_82547_move_tail, adapter);
1500 break;
1501 }
1502 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1503 em_82547_update_fifo_head(adapter, length);
1504 length = 0;
1505 }
1506 }
1507 return;
1508 }
1509
1510 static void
1511 em_82547_move_tail(void *arg)
1512 {
1513 struct adapter *adapter = arg;
1514
1515 EM_LOCK(adapter);
1516 em_82547_move_tail_locked(adapter);
1517 EM_UNLOCK(adapter);
1518 }
1519
1520 static int
1521 em_82547_fifo_workaround(struct adapter *adapter, int len)
1522 {
1523 int fifo_space, fifo_pkt_len;
1524
1525 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1526
1527 if (adapter->link_duplex == HALF_DUPLEX) {
1528 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1529
1530 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1531 if (em_82547_tx_fifo_reset(adapter)) {
1532 return(0);
1533 }
1534 else {
1535 return(1);
1536 }
1537 }
1538 }
1539
1540 return(0);
1541 }
1542
1543 static void
1544 em_82547_update_fifo_head(struct adapter *adapter, int len)
1545 {
1546 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1547
1548 /* tx_fifo_head is always 16 byte aligned */
1549 adapter->tx_fifo_head += fifo_pkt_len;
1550 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1551 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1552 }
1553
1554 return;
1555 }
1556
1557
1558 static int
1559 em_82547_tx_fifo_reset(struct adapter *adapter)
1560 {
1561 uint32_t tctl;
1562
1563 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1564 E1000_READ_REG(&adapter->hw, TDH)) &&
1565 (E1000_READ_REG(&adapter->hw, TDFT) ==
1566 E1000_READ_REG(&adapter->hw, TDFH)) &&
1567 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1568 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1569 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1570
1571 /* Disable TX unit */
1572 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1573 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1574
1575 /* Reset FIFO pointers */
1576 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr);
1577 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr);
1578 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1579 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1580
1581 /* Re-enable TX unit */
1582 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1583 E1000_WRITE_FLUSH(&adapter->hw);
1584
1585 adapter->tx_fifo_head = 0;
1586 adapter->tx_fifo_reset_cnt++;
1587
1588 return(TRUE);
1589 }
1590 else {
1591 return(FALSE);
1592 }
1593 }
1594
1595 static void
1596 em_set_promisc(struct adapter * adapter)
1597 {
1598
1599 u_int32_t reg_rctl;
1600 struct ifnet *ifp = adapter->ifp;
1601
1602 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1603
1604 if (ifp->if_flags & IFF_PROMISC) {
1605 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1606 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1607 /* Disable VLAN stripping in promiscous mode
1608 * This enables bridging of vlan tagged frames to occur
1609 * and also allows vlan tags to be seen in tcpdump
1610 */
1611 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1612 em_disable_vlans(adapter);
1613 adapter->em_insert_vlan_header = 1;
1614 } else if (ifp->if_flags & IFF_ALLMULTI) {
1615 reg_rctl |= E1000_RCTL_MPE;
1616 reg_rctl &= ~E1000_RCTL_UPE;
1617 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1618 adapter->em_insert_vlan_header = 0;
1619 } else
1620 adapter->em_insert_vlan_header = 0;
1621
1622 return;
1623 }
1624
1625 static void
1626 em_disable_promisc(struct adapter * adapter)
1627 {
1628 u_int32_t reg_rctl;
1629 struct ifnet *ifp = adapter->ifp;
1630
1631 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1632
1633 reg_rctl &= (~E1000_RCTL_UPE);
1634 reg_rctl &= (~E1000_RCTL_MPE);
1635 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1636
1637 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1638 em_enable_vlans(adapter);
1639 adapter->em_insert_vlan_header = 0;
1640
1641 return;
1642 }
1643
1644
1645 /*********************************************************************
1646 * Multicast Update
1647 *
1648 * This routine is called whenever multicast address list is updated.
1649 *
1650 **********************************************************************/
1651
1652 static void
1653 em_set_multi(struct adapter * adapter)
1654 {
1655 u_int32_t reg_rctl = 0;
1656 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1657 struct ifmultiaddr *ifma;
1658 int mcnt = 0;
1659 struct ifnet *ifp = adapter->ifp;
1660
1661 IOCTL_DEBUGOUT("em_set_multi: begin");
1662
1663 if (adapter->hw.mac_type == em_82542_rev2_0) {
1664 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1665 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1666 em_pci_clear_mwi(&adapter->hw);
1667 }
1668 reg_rctl |= E1000_RCTL_RST;
1669 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1670 msec_delay(5);
1671 }
1672
1673 IF_ADDR_LOCK(ifp);
1674 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1675 if (ifma->ifma_addr->sa_family != AF_LINK)
1676 continue;
1677
1678 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1679
1680 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1681 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1682 mcnt++;
1683 }
1684 IF_ADDR_UNLOCK(ifp);
1685
1686 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1687 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1688 reg_rctl |= E1000_RCTL_MPE;
1689 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1690 } else
1691 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1692
1693 if (adapter->hw.mac_type == em_82542_rev2_0) {
1694 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1695 reg_rctl &= ~E1000_RCTL_RST;
1696 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1697 msec_delay(5);
1698 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1699 em_pci_set_mwi(&adapter->hw);
1700 }
1701 }
1702
1703 return;
1704 }
1705
1706
1707 /*********************************************************************
1708 * Timer routine
1709 *
1710 * This routine checks for link status and updates statistics.
1711 *
1712 **********************************************************************/
1713
1714 static void
1715 em_local_timer(void *arg)
1716 {
1717 struct ifnet *ifp;
1718 struct adapter * adapter = arg;
1719 ifp = adapter->ifp;
1720
1721 EM_LOCK(adapter);
1722
1723 em_check_for_link(&adapter->hw);
1724 em_print_link_status(adapter);
1725 em_update_stats_counters(adapter);
1726 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1727 em_print_hw_stats(adapter);
1728 }
1729 em_smartspeed(adapter);
1730
1731 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1732
1733 EM_UNLOCK(adapter);
1734 return;
1735 }
1736
1737 static void
1738 em_print_link_status(struct adapter * adapter)
1739 {
1740 struct ifnet *ifp = adapter->ifp;
1741
1742 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1743 if (adapter->link_active == 0) {
1744 em_get_speed_and_duplex(&adapter->hw,
1745 &adapter->link_speed,
1746 &adapter->link_duplex);
1747 if (bootverbose)
1748 printf("em%d: Link is up %d Mbps %s\n",
1749 adapter->unit,
1750 adapter->link_speed,
1751 ((adapter->link_duplex == FULL_DUPLEX) ?
1752 "Full Duplex" : "Half Duplex"));
1753 adapter->link_active = 1;
1754 adapter->smartspeed = 0;
1755 ifp->if_baudrate = adapter->link_speed * 1000000;
1756 if_link_state_change(ifp, LINK_STATE_UP);
1757 }
1758 } else {
1759 if (adapter->link_active == 1) {
1760 ifp->if_baudrate = adapter->link_speed = 0;
1761 adapter->link_duplex = 0;
1762 if (bootverbose)
1763 printf("em%d: Link is Down\n", adapter->unit);
1764 adapter->link_active = 0;
1765 if_link_state_change(ifp, LINK_STATE_DOWN);
1766 }
1767 }
1768
1769 return;
1770 }
1771
1772 /*********************************************************************
1773 *
1774 * This routine disables all traffic on the adapter by issuing a
1775 * global reset on the MAC and deallocates TX/RX buffers.
1776 *
1777 **********************************************************************/
1778
1779 static void
1780 em_stop(void *arg)
1781 {
1782 struct ifnet *ifp;
1783 struct adapter * adapter = arg;
1784 ifp = adapter->ifp;
1785
1786 mtx_assert(&adapter->mtx, MA_OWNED);
1787
1788 INIT_DEBUGOUT("em_stop: begin");
1789
1790 em_disable_intr(adapter);
1791 em_reset_hw(&adapter->hw);
1792 callout_stop(&adapter->timer);
1793 callout_stop(&adapter->tx_fifo_timer);
1794 em_free_transmit_structures(adapter);
1795 em_free_receive_structures(adapter);
1796
1797
1798 /* Tell the stack that the interface is no longer active */
1799 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1800
1801 return;
1802 }
1803
1804
1805 /*********************************************************************
1806 *
1807 * Determine hardware revision.
1808 *
1809 **********************************************************************/
1810 static void
1811 em_identify_hardware(struct adapter * adapter)
1812 {
1813 device_t dev = adapter->dev;
1814
1815 /* Make sure our PCI config space has the necessary stuff set */
1816 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1817 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1818 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1819 printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1820 adapter->unit);
1821 adapter->hw.pci_cmd_word |=
1822 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1823 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1824 }
1825
1826 /* Save off the information about this board */
1827 adapter->hw.vendor_id = pci_get_vendor(dev);
1828 adapter->hw.device_id = pci_get_device(dev);
1829 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1830 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1831 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1832
1833 /* Identify the MAC */
1834 if (em_set_mac_type(&adapter->hw))
1835 printf("em%d: Unknown MAC Type\n", adapter->unit);
1836
1837 if(adapter->hw.mac_type == em_82541 ||
1838 adapter->hw.mac_type == em_82541_rev_2 ||
1839 adapter->hw.mac_type == em_82547 ||
1840 adapter->hw.mac_type == em_82547_rev_2)
1841 adapter->hw.phy_init_script = TRUE;
1842
1843 return;
1844 }
1845
1846 static int
1847 em_allocate_pci_resources(struct adapter * adapter)
1848 {
1849 int val, rid;
1850 device_t dev = adapter->dev;
1851
1852 rid = PCIR_BAR(0);
1853 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1854 &rid, RF_ACTIVE);
1855 if (!(adapter->res_memory)) {
1856 printf("em%d: Unable to allocate bus resource: memory\n",
1857 adapter->unit);
1858 return(ENXIO);
1859 }
1860 adapter->osdep.mem_bus_space_tag =
1861 rman_get_bustag(adapter->res_memory);
1862 adapter->osdep.mem_bus_space_handle =
1863 rman_get_bushandle(adapter->res_memory);
1864 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1865
1866
1867 if (adapter->hw.mac_type > em_82543) {
1868 /* Figure our where our IO BAR is ? */
1869 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1870 val = pci_read_config(dev, rid, 4);
1871 if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1872 adapter->io_rid = rid;
1873 break;
1874 }
1875 rid += 4;
1876 /* check for 64bit BAR */
1877 if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1878 rid += 4;
1879 }
1880 if (rid >= PCIR_CIS) {
1881 printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1882 return (ENXIO);
1883 }
1884 adapter->res_ioport = bus_alloc_resource_any(dev,
1885 SYS_RES_IOPORT,
1886 &adapter->io_rid,
1887 RF_ACTIVE);
1888 if (!(adapter->res_ioport)) {
1889 printf("em%d: Unable to allocate bus resource: ioport\n",
1890 adapter->unit);
1891 return(ENXIO);
1892 }
1893 adapter->hw.io_base = 0;
1894 adapter->osdep.io_bus_space_tag =
1895 rman_get_bustag(adapter->res_ioport);
1896 adapter->osdep.io_bus_space_handle =
1897 rman_get_bushandle(adapter->res_ioport);
1898 }
1899
1900 rid = 0x0;
1901 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1902 RF_SHAREABLE |
1903 RF_ACTIVE);
1904 if (!(adapter->res_interrupt)) {
1905 printf("em%d: Unable to allocate bus resource: interrupt\n",
1906 adapter->unit);
1907 return(ENXIO);
1908 }
1909 if (bus_setup_intr(dev, adapter->res_interrupt,
1910 INTR_TYPE_NET | INTR_MPSAFE,
1911 (void (*)(void *)) em_intr, adapter,
1912 &adapter->int_handler_tag)) {
1913 printf("em%d: Error registering interrupt handler!\n",
1914 adapter->unit);
1915 return(ENXIO);
1916 }
1917
1918 adapter->hw.back = &adapter->osdep;
1919
1920 return(0);
1921 }
1922
1923 static void
1924 em_free_pci_resources(struct adapter * adapter)
1925 {
1926 device_t dev = adapter->dev;
1927
1928 if (adapter->res_interrupt != NULL) {
1929 bus_teardown_intr(dev, adapter->res_interrupt,
1930 adapter->int_handler_tag);
1931 bus_release_resource(dev, SYS_RES_IRQ, 0,
1932 adapter->res_interrupt);
1933 }
1934 if (adapter->res_memory != NULL) {
1935 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1936 adapter->res_memory);
1937 }
1938
1939 if (adapter->res_ioport != NULL) {
1940 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1941 adapter->res_ioport);
1942 }
1943 return;
1944 }
1945
1946 /*********************************************************************
1947 *
1948 * Initialize the hardware to a configuration as specified by the
1949 * adapter structure. The controller is reset, the EEPROM is
1950 * verified, the MAC address is set, then the shared initialization
1951 * routines are called.
1952 *
1953 **********************************************************************/
1954 static int
1955 em_hardware_init(struct adapter * adapter)
1956 {
1957 uint16_t rx_buffer_size;
1958
1959 INIT_DEBUGOUT("em_hardware_init: begin");
1960 /* Issue a global reset */
1961 em_reset_hw(&adapter->hw);
1962
1963 /* When hardware is reset, fifo_head is also reset */
1964 adapter->tx_fifo_head = 0;
1965
1966 /* Make sure we have a good EEPROM before we read from it */
1967 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1968 printf("em%d: The EEPROM Checksum Is Not Valid\n",
1969 adapter->unit);
1970 return(EIO);
1971 }
1972
1973 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1974 printf("em%d: EEPROM read error while reading part number\n",
1975 adapter->unit);
1976 return(EIO);
1977 }
1978
1979 /*
1980 * These parameters control the automatic generation (Tx) and
1981 * response (Rx) to Ethernet PAUSE frames.
1982 * - High water mark should allow for at least two frames to be
1983 * received after sending an XOFF.
1984 * - Low water mark works best when it is very near the high water mark.
1985 * This allows the receiver to restart by sending XON when it has drained
1986 * a bit. Here we use an arbitary value of 1500 which will restart after
1987 * one full frame is pulled from the buffer. There could be several smaller
1988 * frames in the buffer and if so they will not trigger the XON until their
1989 * total number reduces the buffer by 1500.
1990 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1991 */
1992 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
1993
1994 adapter->hw.fc_high_water = rx_buffer_size -
1995 roundup2(adapter->hw.max_frame_size, 1024);
1996 adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1997 adapter->hw.fc_pause_time = 0x1000;
1998 adapter->hw.fc_send_xon = TRUE;
1999 adapter->hw.fc = em_fc_full;
2000
2001 if (em_init_hw(&adapter->hw) < 0) {
2002 printf("em%d: Hardware Initialization Failed",
2003 adapter->unit);
2004 return(EIO);
2005 }
2006
2007 em_check_for_link(&adapter->hw);
2008 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
2009 adapter->link_active = 1;
2010 else
2011 adapter->link_active = 0;
2012
2013 if (adapter->link_active) {
2014 em_get_speed_and_duplex(&adapter->hw,
2015 &adapter->link_speed,
2016 &adapter->link_duplex);
2017 } else {
2018 adapter->link_speed = 0;
2019 adapter->link_duplex = 0;
2020 }
2021
2022 return(0);
2023 }
2024
2025 /*********************************************************************
2026 *
2027 * Setup networking device structure and register an interface.
2028 *
2029 **********************************************************************/
2030 static void
2031 em_setup_interface(device_t dev, struct adapter * adapter)
2032 {
2033 struct ifnet *ifp;
2034 INIT_DEBUGOUT("em_setup_interface: begin");
2035
2036 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2037 if (ifp == NULL)
2038 panic("%s: can not if_alloc()", device_get_nameunit(dev));
2039 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2040 ifp->if_mtu = ETHERMTU;
2041 ifp->if_init = em_init;
2042 ifp->if_softc = adapter;
2043 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2044 ifp->if_ioctl = em_ioctl;
2045 ifp->if_start = em_start;
2046 ifp->if_watchdog = em_watchdog;
2047 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2048 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2049 IFQ_SET_READY(&ifp->if_snd);
2050
2051 ether_ifattach(ifp, adapter->hw.mac_addr);
2052
2053 ifp->if_capabilities = ifp->if_capenable = 0;
2054
2055 if (adapter->hw.mac_type >= em_82543) {
2056 ifp->if_capabilities |= IFCAP_HWCSUM;
2057 ifp->if_capenable |= IFCAP_HWCSUM;
2058 }
2059
2060 /*
2061 * Tell the upper layer(s) we support long frames.
2062 */
2063 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2064 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2065 ifp->if_capenable |= IFCAP_VLAN_MTU;
2066
2067 #ifdef DEVICE_POLLING
2068 ifp->if_capabilities |= IFCAP_POLLING;
2069 #endif
2070
2071 /*
2072 * Specify the media types supported by this adapter and register
2073 * callbacks to update media and link information
2074 */
2075 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2076 em_media_status);
2077 if (adapter->hw.media_type == em_media_type_fiber) {
2078 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2079 0, NULL);
2080 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2081 0, NULL);
2082 } else {
2083 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2084 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2085 0, NULL);
2086 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2087 0, NULL);
2088 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2089 0, NULL);
2090 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2091 0, NULL);
2092 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2093 }
2094 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2095 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2096
2097 return;
2098 }
2099
2100
2101 /*********************************************************************
2102 *
2103 * Workaround for SmartSpeed on 82541 and 82547 controllers
2104 *
2105 **********************************************************************/
2106 static void
2107 em_smartspeed(struct adapter *adapter)
2108 {
2109 uint16_t phy_tmp;
2110
2111 if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2112 !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2113 return;
2114
2115 if(adapter->smartspeed == 0) {
2116 /* If Master/Slave config fault is asserted twice,
2117 * we assume back-to-back */
2118 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2119 if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2120 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2121 if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2122 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2123 &phy_tmp);
2124 if(phy_tmp & CR_1000T_MS_ENABLE) {
2125 phy_tmp &= ~CR_1000T_MS_ENABLE;
2126 em_write_phy_reg(&adapter->hw,
2127 PHY_1000T_CTRL, phy_tmp);
2128 adapter->smartspeed++;
2129 if(adapter->hw.autoneg &&
2130 !em_phy_setup_autoneg(&adapter->hw) &&
2131 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2132 &phy_tmp)) {
2133 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2134 MII_CR_RESTART_AUTO_NEG);
2135 em_write_phy_reg(&adapter->hw,
2136 PHY_CTRL, phy_tmp);
2137 }
2138 }
2139 }
2140 return;
2141 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2142 /* If still no link, perhaps using 2/3 pair cable */
2143 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2144 phy_tmp |= CR_1000T_MS_ENABLE;
2145 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2146 if(adapter->hw.autoneg &&
2147 !em_phy_setup_autoneg(&adapter->hw) &&
2148 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2149 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2150 MII_CR_RESTART_AUTO_NEG);
2151 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2152 }
2153 }
2154 /* Restart process after EM_SMARTSPEED_MAX iterations */
2155 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2156 adapter->smartspeed = 0;
2157
2158 return;
2159 }
2160
2161
2162 /*
2163 * Manage DMA'able memory.
2164 */
2165 static void
2166 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2167 {
2168 if (error)
2169 return;
2170 *(bus_addr_t *) arg = segs[0].ds_addr;
2171 }
2172
2173 static int
2174 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2175 struct em_dma_alloc *dma, int mapflags)
2176 {
2177 int r;
2178
2179 r = bus_dma_tag_create(NULL, /* parent */
2180 E1000_DBA_ALIGN, 0, /* alignment, bounds */
2181 BUS_SPACE_MAXADDR, /* lowaddr */
2182 BUS_SPACE_MAXADDR, /* highaddr */
2183 NULL, NULL, /* filter, filterarg */
2184 size, /* maxsize */
2185 1, /* nsegments */
2186 size, /* maxsegsize */
2187 BUS_DMA_ALLOCNOW, /* flags */
2188 NULL, /* lockfunc */
2189 NULL, /* lockarg */
2190 &dma->dma_tag);
2191 if (r != 0) {
2192 printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2193 "error %u\n", adapter->unit, r);
2194 goto fail_0;
2195 }
2196
2197 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2198 BUS_DMA_NOWAIT, &dma->dma_map);
2199 if (r != 0) {
2200 printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2201 "size %ju, error %d\n", adapter->unit,
2202 (uintmax_t)size, r);
2203 goto fail_2;
2204 }
2205
2206 dma->dma_paddr = 0;
2207 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2208 size,
2209 em_dmamap_cb,
2210 &dma->dma_paddr,
2211 mapflags | BUS_DMA_NOWAIT);
2212 if (r != 0 || dma->dma_paddr == 0) {
2213 printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2214 "error %u\n", adapter->unit, r);
2215 goto fail_3;
2216 }
2217
2218 return (0);
2219
2220 fail_3:
2221 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2222 fail_2:
2223 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2224 bus_dma_tag_destroy(dma->dma_tag);
2225 fail_0:
2226 dma->dma_map = NULL;
2227 dma->dma_tag = NULL;
2228 return (r);
2229 }
2230
2231 static void
2232 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2233 {
2234 if (dma->dma_tag == NULL)
2235 return;
2236 if (dma->dma_map != NULL) {
2237 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2238 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2239 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2240 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2241 dma->dma_map = NULL;
2242 }
2243 bus_dma_tag_destroy(dma->dma_tag);
2244 dma->dma_tag = NULL;
2245 }
2246
2247
2248 /*********************************************************************
2249 *
2250 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2251 * the information needed to transmit a packet on the wire.
2252 *
2253 **********************************************************************/
2254 static int
2255 em_allocate_transmit_structures(struct adapter * adapter)
2256 {
2257 if (!(adapter->tx_buffer_area =
2258 (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2259 adapter->num_tx_desc, M_DEVBUF,
2260 M_NOWAIT))) {
2261 printf("em%d: Unable to allocate tx_buffer memory\n",
2262 adapter->unit);
2263 return ENOMEM;
2264 }
2265
2266 bzero(adapter->tx_buffer_area,
2267 sizeof(struct em_buffer) * adapter->num_tx_desc);
2268
2269 return 0;
2270 }
2271
2272 /*********************************************************************
2273 *
2274 * Allocate and initialize transmit structures.
2275 *
2276 **********************************************************************/
2277 static int
2278 em_setup_transmit_structures(struct adapter * adapter)
2279 {
2280 struct em_buffer *tx_buffer;
2281 bus_size_t size;
2282 int error, i;
2283
2284 /*
2285 * Setup DMA descriptor areas.
2286 */
2287 size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2288 if ((error = bus_dma_tag_create(NULL, /* parent */
2289 1, 0, /* alignment, bounds */
2290 BUS_SPACE_MAXADDR, /* lowaddr */
2291 BUS_SPACE_MAXADDR, /* highaddr */
2292 NULL, NULL, /* filter, filterarg */
2293 size, /* maxsize */
2294 EM_MAX_SCATTER, /* nsegments */
2295 size, /* maxsegsize */
2296 0, /* flags */
2297 NULL, /* lockfunc */
2298 NULL, /* lockarg */
2299 &adapter->txtag)) != 0) {
2300 printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2301 goto fail;
2302 }
2303
2304 if ((error = em_allocate_transmit_structures(adapter)) != 0)
2305 goto fail;
2306
2307 bzero((void *) adapter->tx_desc_base,
2308 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2309 tx_buffer = adapter->tx_buffer_area;
2310 for (i = 0; i < adapter->num_tx_desc; i++) {
2311 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2312 if (error != 0) {
2313 printf("em%d: Unable to create TX DMA map\n",
2314 adapter->unit);
2315 goto fail;
2316 }
2317 tx_buffer++;
2318 }
2319
2320 adapter->next_avail_tx_desc = 0;
2321 adapter->oldest_used_tx_desc = 0;
2322
2323 /* Set number of descriptors available */
2324 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2325
2326 /* Set checksum context */
2327 adapter->active_checksum_context = OFFLOAD_NONE;
2328 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2329 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2330
2331 return (0);
2332
2333 fail:
2334 em_free_transmit_structures(adapter);
2335 return (error);
2336 }
2337
2338 /*********************************************************************
2339 *
2340 * Enable transmit unit.
2341 *
2342 **********************************************************************/
2343 static void
2344 em_initialize_transmit_unit(struct adapter * adapter)
2345 {
2346 u_int32_t reg_tctl;
2347 u_int32_t reg_tipg = 0;
2348 u_int64_t bus_addr;
2349
2350 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2351 /* Setup the Base and Length of the Tx Descriptor Ring */
2352 bus_addr = adapter->txdma.dma_paddr;
2353 E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2354 E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2355 E1000_WRITE_REG(&adapter->hw, TDLEN,
2356 adapter->num_tx_desc *
2357 sizeof(struct em_tx_desc));
2358
2359 /* Setup the HW Tx Head and Tail descriptor pointers */
2360 E1000_WRITE_REG(&adapter->hw, TDH, 0);
2361 E1000_WRITE_REG(&adapter->hw, TDT, 0);
2362
2363
2364 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2365 E1000_READ_REG(&adapter->hw, TDBAL),
2366 E1000_READ_REG(&adapter->hw, TDLEN));
2367
2368 /* Set the default values for the Tx Inter Packet Gap timer */
2369 switch (adapter->hw.mac_type) {
2370 case em_82542_rev2_0:
2371 case em_82542_rev2_1:
2372 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2373 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2374 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2375 break;
2376 default:
2377 if (adapter->hw.media_type == em_media_type_fiber)
2378 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2379 else
2380 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2381 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2382 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2383 }
2384
2385 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2386 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2387 if(adapter->hw.mac_type >= em_82540)
2388 E1000_WRITE_REG(&adapter->hw, TADV,
2389 adapter->tx_abs_int_delay.value);
2390
2391 /* Program the Transmit Control Register */
2392 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2393 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2394 if (adapter->hw.mac_type >= em_82571)
2395 reg_tctl |= E1000_TCTL_MULR;
2396 if (adapter->link_duplex == 1) {
2397 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2398 } else {
2399 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2400 }
2401 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2402
2403 /* Setup Transmit Descriptor Settings for this adapter */
2404 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2405
2406 if (adapter->tx_int_delay.value > 0)
2407 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2408
2409 return;
2410 }
2411
2412 /*********************************************************************
2413 *
2414 * Free all transmit related data structures.
2415 *
2416 **********************************************************************/
2417 static void
2418 em_free_transmit_structures(struct adapter * adapter)
2419 {
2420 struct em_buffer *tx_buffer;
2421 int i;
2422
2423 INIT_DEBUGOUT("free_transmit_structures: begin");
2424
2425 if (adapter->tx_buffer_area != NULL) {
2426 tx_buffer = adapter->tx_buffer_area;
2427 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2428 if (tx_buffer->m_head != NULL) {
2429 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2430 BUS_DMASYNC_POSTWRITE);
2431 bus_dmamap_unload(adapter->txtag,
2432 tx_buffer->map);
2433 m_freem(tx_buffer->m_head);
2434 tx_buffer->m_head = NULL;
2435 } else if (tx_buffer->map != NULL)
2436 bus_dmamap_unload(adapter->txtag,
2437 tx_buffer->map);
2438 if (tx_buffer->map != NULL) {
2439 bus_dmamap_destroy(adapter->txtag,
2440 tx_buffer->map);
2441 tx_buffer->map = NULL;
2442 }
2443 }
2444 }
2445 if (adapter->tx_buffer_area != NULL) {
2446 free(adapter->tx_buffer_area, M_DEVBUF);
2447 adapter->tx_buffer_area = NULL;
2448 }
2449 if (adapter->txtag != NULL) {
2450 bus_dma_tag_destroy(adapter->txtag);
2451 adapter->txtag = NULL;
2452 }
2453 return;
2454 }
2455
2456 /*********************************************************************
2457 *
2458 * The offload context needs to be set when we transfer the first
2459 * packet of a particular protocol (TCP/UDP). We change the
2460 * context only if the protocol type changes.
2461 *
2462 **********************************************************************/
2463 static void
2464 em_transmit_checksum_setup(struct adapter * adapter,
2465 struct mbuf *mp,
2466 u_int32_t *txd_upper,
2467 u_int32_t *txd_lower)
2468 {
2469 struct em_context_desc *TXD;
2470 struct em_buffer *tx_buffer;
2471 int curr_txd;
2472
2473 if (mp->m_pkthdr.csum_flags) {
2474
2475 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2476 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2477 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2478 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2479 return;
2480 else
2481 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2482
2483 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2484 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2485 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2486 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2487 return;
2488 else
2489 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2490 } else {
2491 *txd_upper = 0;
2492 *txd_lower = 0;
2493 return;
2494 }
2495 } else {
2496 *txd_upper = 0;
2497 *txd_lower = 0;
2498 return;
2499 }
2500
2501 /* If we reach this point, the checksum offload context
2502 * needs to be reset.
2503 */
2504 curr_txd = adapter->next_avail_tx_desc;
2505 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2506 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2507
2508 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2509 TXD->lower_setup.ip_fields.ipcso =
2510 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2511 TXD->lower_setup.ip_fields.ipcse =
2512 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2513
2514 TXD->upper_setup.tcp_fields.tucss =
2515 ETHER_HDR_LEN + sizeof(struct ip);
2516 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2517
2518 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2519 TXD->upper_setup.tcp_fields.tucso =
2520 ETHER_HDR_LEN + sizeof(struct ip) +
2521 offsetof(struct tcphdr, th_sum);
2522 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2523 TXD->upper_setup.tcp_fields.tucso =
2524 ETHER_HDR_LEN + sizeof(struct ip) +
2525 offsetof(struct udphdr, uh_sum);
2526 }
2527
2528 TXD->tcp_seg_setup.data = htole32(0);
2529 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2530
2531 tx_buffer->m_head = NULL;
2532
2533 if (++curr_txd == adapter->num_tx_desc)
2534 curr_txd = 0;
2535
2536 adapter->num_tx_desc_avail--;
2537 adapter->next_avail_tx_desc = curr_txd;
2538
2539 return;
2540 }
2541
2542 /**********************************************************************
2543 *
2544 * Examine each tx_buffer in the used queue. If the hardware is done
2545 * processing the packet then free associated resources. The
2546 * tx_buffer is put back on the free queue.
2547 *
2548 **********************************************************************/
2549 static void
2550 em_clean_transmit_interrupts(struct adapter * adapter)
2551 {
2552 int i, num_avail;
2553 struct em_buffer *tx_buffer;
2554 struct em_tx_desc *tx_desc;
2555 struct ifnet *ifp = adapter->ifp;
2556
2557 mtx_assert(&adapter->mtx, MA_OWNED);
2558
2559 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2560 return;
2561
2562 num_avail = adapter->num_tx_desc_avail;
2563 i = adapter->oldest_used_tx_desc;
2564
2565 tx_buffer = &adapter->tx_buffer_area[i];
2566 tx_desc = &adapter->tx_desc_base[i];
2567
2568 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2569 BUS_DMASYNC_POSTREAD);
2570 while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2571
2572 tx_desc->upper.data = 0;
2573 num_avail++;
2574
2575 if (tx_buffer->m_head) {
2576 ifp->if_opackets++;
2577 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2578 BUS_DMASYNC_POSTWRITE);
2579 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2580
2581 m_freem(tx_buffer->m_head);
2582 tx_buffer->m_head = NULL;
2583 }
2584
2585 if (++i == adapter->num_tx_desc)
2586 i = 0;
2587
2588 tx_buffer = &adapter->tx_buffer_area[i];
2589 tx_desc = &adapter->tx_desc_base[i];
2590 }
2591 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2592 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2593
2594 adapter->oldest_used_tx_desc = i;
2595
2596 /*
2597 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2598 * that it is OK to send packets.
2599 * If there are no pending descriptors, clear the timeout. Otherwise,
2600 * if some descriptors have been freed, restart the timeout.
2601 */
2602 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2603 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2604 if (num_avail == adapter->num_tx_desc)
2605 ifp->if_timer = 0;
2606 else if (num_avail != adapter->num_tx_desc_avail)
2607 ifp->if_timer = EM_TX_TIMEOUT;
2608 }
2609 adapter->num_tx_desc_avail = num_avail;
2610 return;
2611 }
2612
2613 /*********************************************************************
2614 *
2615 * Get a buffer from system mbuf buffer pool.
2616 *
2617 **********************************************************************/
2618 static int
2619 em_get_buf(int i, struct adapter *adapter,
2620 struct mbuf *nmp)
2621 {
2622 struct mbuf *mp = nmp;
2623 struct em_buffer *rx_buffer;
2624 struct ifnet *ifp;
2625 bus_dma_segment_t segs[1];
2626 int error, nsegs;
2627
2628 ifp = adapter->ifp;
2629
2630 if (mp == NULL) {
2631 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2632 if (mp == NULL) {
2633 adapter->mbuf_cluster_failed++;
2634 return(ENOBUFS);
2635 }
2636 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2637 } else {
2638 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2639 mp->m_data = mp->m_ext.ext_buf;
2640 mp->m_next = NULL;
2641 }
2642
2643 if (ifp->if_mtu <= ETHERMTU) {
2644 m_adj(mp, ETHER_ALIGN);
2645 }
2646
2647 rx_buffer = &adapter->rx_buffer_area[i];
2648
2649 /*
2650 * Using memory from the mbuf cluster pool, invoke the
2651 * bus_dma machinery to arrange the memory mapping.
2652 */
2653 error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2654 mp, segs, &nsegs, 0);
2655 if (error != 0) {
2656 m_free(mp);
2657 return(error);
2658 }
2659 /* If nsegs is wrong then the stack is corrupt */
2660 KASSERT(nsegs == 1, ("Too many segments returned!"));
2661 rx_buffer->m_head = mp;
2662 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2663 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2664
2665 return(0);
2666 }
2667
2668 /*********************************************************************
2669 *
2670 * Allocate memory for rx_buffer structures. Since we use one
2671 * rx_buffer per received packet, the maximum number of rx_buffer's
2672 * that we'll need is equal to the number of receive descriptors
2673 * that we've allocated.
2674 *
2675 **********************************************************************/
2676 static int
2677 em_allocate_receive_structures(struct adapter * adapter)
2678 {
2679 int i, error;
2680 struct em_buffer *rx_buffer;
2681
2682 if (!(adapter->rx_buffer_area =
2683 (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2684 adapter->num_rx_desc, M_DEVBUF,
2685 M_NOWAIT))) {
2686 printf("em%d: Unable to allocate rx_buffer memory\n",
2687 adapter->unit);
2688 return(ENOMEM);
2689 }
2690
2691 bzero(adapter->rx_buffer_area,
2692 sizeof(struct em_buffer) * adapter->num_rx_desc);
2693
2694 error = bus_dma_tag_create(NULL, /* parent */
2695 1, 0, /* alignment, bounds */
2696 BUS_SPACE_MAXADDR, /* lowaddr */
2697 BUS_SPACE_MAXADDR, /* highaddr */
2698 NULL, NULL, /* filter, filterarg */
2699 MCLBYTES, /* maxsize */
2700 1, /* nsegments */
2701 MCLBYTES, /* maxsegsize */
2702 BUS_DMA_ALLOCNOW, /* flags */
2703 NULL, /* lockfunc */
2704 NULL, /* lockarg */
2705 &adapter->rxtag);
2706 if (error != 0) {
2707 printf("em%d: em_allocate_receive_structures: "
2708 "bus_dma_tag_create failed; error %u\n",
2709 adapter->unit, error);
2710 goto fail;
2711 }
2712
2713 rx_buffer = adapter->rx_buffer_area;
2714 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2715 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2716 &rx_buffer->map);
2717 if (error != 0) {
2718 printf("em%d: em_allocate_receive_structures: "
2719 "bus_dmamap_create failed; error %u\n",
2720 adapter->unit, error);
2721 goto fail;
2722 }
2723 }
2724
2725 for (i = 0; i < adapter->num_rx_desc; i++) {
2726 error = em_get_buf(i, adapter, NULL);
2727 if (error != 0)
2728 goto fail;
2729 }
2730 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2732
2733 return(0);
2734
2735 fail:
2736 em_free_receive_structures(adapter);
2737 return (error);
2738 }
2739
2740 /*********************************************************************
2741 *
2742 * Allocate and initialize receive structures.
2743 *
2744 **********************************************************************/
2745 static int
2746 em_setup_receive_structures(struct adapter * adapter)
2747 {
2748 bzero((void *) adapter->rx_desc_base,
2749 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2750
2751 if (em_allocate_receive_structures(adapter))
2752 return ENOMEM;
2753
2754 /* Setup our descriptor pointers */
2755 adapter->next_rx_desc_to_check = 0;
2756 return(0);
2757 }
2758
2759 /*********************************************************************
2760 *
2761 * Enable receive unit.
2762 *
2763 **********************************************************************/
2764 static void
2765 em_initialize_receive_unit(struct adapter * adapter)
2766 {
2767 u_int32_t reg_rctl;
2768 u_int32_t reg_rxcsum;
2769 struct ifnet *ifp;
2770 u_int64_t bus_addr;
2771
2772 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2773 ifp = adapter->ifp;
2774
2775 /* Make sure receives are disabled while setting up the descriptor ring */
2776 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2777
2778 /* Set the Receive Delay Timer Register */
2779 E1000_WRITE_REG(&adapter->hw, RDTR,
2780 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2781
2782 if(adapter->hw.mac_type >= em_82540) {
2783 E1000_WRITE_REG(&adapter->hw, RADV,
2784 adapter->rx_abs_int_delay.value);
2785
2786 /* Set the interrupt throttling rate. Value is calculated
2787 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2788 #define MAX_INTS_PER_SEC 8000
2789 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
2790 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2791 }
2792
2793 /* Setup the Base and Length of the Rx Descriptor Ring */
2794 bus_addr = adapter->rxdma.dma_paddr;
2795 E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2796 E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2797 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2798 sizeof(struct em_rx_desc));
2799
2800 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2801 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2802 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2803
2804 /* Setup the Receive Control Register */
2805 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2806 E1000_RCTL_RDMTS_HALF |
2807 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2808
2809 if (adapter->hw.tbi_compatibility_on == TRUE)
2810 reg_rctl |= E1000_RCTL_SBP;
2811
2812
2813 switch (adapter->rx_buffer_len) {
2814 default:
2815 case EM_RXBUFFER_2048:
2816 reg_rctl |= E1000_RCTL_SZ_2048;
2817 break;
2818 case EM_RXBUFFER_4096:
2819 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2820 break;
2821 case EM_RXBUFFER_8192:
2822 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2823 break;
2824 case EM_RXBUFFER_16384:
2825 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2826 break;
2827 }
2828
2829 if (ifp->if_mtu > ETHERMTU)
2830 reg_rctl |= E1000_RCTL_LPE;
2831
2832 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2833 if ((adapter->hw.mac_type >= em_82543) &&
2834 (ifp->if_capenable & IFCAP_RXCSUM)) {
2835 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2836 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2837 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2838 }
2839
2840 /* Enable Receives */
2841 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2842
2843 return;
2844 }
2845
2846 /*********************************************************************
2847 *
2848 * Free receive related data structures.
2849 *
2850 **********************************************************************/
2851 static void
2852 em_free_receive_structures(struct adapter *adapter)
2853 {
2854 struct em_buffer *rx_buffer;
2855 int i;
2856
2857 INIT_DEBUGOUT("free_receive_structures: begin");
2858
2859 if (adapter->rx_buffer_area != NULL) {
2860 rx_buffer = adapter->rx_buffer_area;
2861 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2862 if (rx_buffer->m_head != NULL) {
2863 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2864 BUS_DMASYNC_POSTREAD);
2865 bus_dmamap_unload(adapter->rxtag,
2866 rx_buffer->map);
2867 m_freem(rx_buffer->m_head);
2868 rx_buffer->m_head = NULL;
2869 } else if (rx_buffer->map != NULL)
2870 bus_dmamap_unload(adapter->rxtag,
2871 rx_buffer->map);
2872 if (rx_buffer->map != NULL) {
2873 bus_dmamap_destroy(adapter->rxtag,
2874 rx_buffer->map);
2875 rx_buffer->map = NULL;
2876 }
2877 }
2878 }
2879 if (adapter->rx_buffer_area != NULL) {
2880 free(adapter->rx_buffer_area, M_DEVBUF);
2881 adapter->rx_buffer_area = NULL;
2882 }
2883 if (adapter->rxtag != NULL) {
2884 bus_dma_tag_destroy(adapter->rxtag);
2885 adapter->rxtag = NULL;
2886 }
2887 return;
2888 }
2889
2890 /*********************************************************************
2891 *
2892 * This routine executes in interrupt context. It replenishes
2893 * the mbufs in the descriptor and sends data which has been
2894 * dma'ed into host memory to upper layer.
2895 *
2896 * We loop at most count times if count is > 0, or until done if
2897 * count < 0.
2898 *
2899 *********************************************************************/
2900 static void
2901 em_process_receive_interrupts(struct adapter * adapter, int count)
2902 {
2903 struct ifnet *ifp;
2904 struct mbuf *mp;
2905 u_int8_t accept_frame = 0;
2906 u_int8_t eop = 0;
2907 u_int16_t len, desc_len, prev_len_adj;
2908 int i;
2909
2910 /* Pointer to the receive descriptor being examined. */
2911 struct em_rx_desc *current_desc;
2912
2913 mtx_assert(&adapter->mtx, MA_OWNED);
2914
2915 ifp = adapter->ifp;
2916 i = adapter->next_rx_desc_to_check;
2917 current_desc = &adapter->rx_desc_base[i];
2918 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2919 BUS_DMASYNC_POSTREAD);
2920
2921 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2922 return;
2923 }
2924
2925 while ((current_desc->status & E1000_RXD_STAT_DD) &&
2926 (count != 0) &&
2927 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2928 struct mbuf *m = NULL;
2929
2930 mp = adapter->rx_buffer_area[i].m_head;
2931 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2932 BUS_DMASYNC_POSTREAD);
2933 bus_dmamap_unload(adapter->rxtag,
2934 adapter->rx_buffer_area[i].map);
2935
2936 accept_frame = 1;
2937 prev_len_adj = 0;
2938 desc_len = le16toh(current_desc->length);
2939 if (current_desc->status & E1000_RXD_STAT_EOP) {
2940 count--;
2941 eop = 1;
2942 if (desc_len < ETHER_CRC_LEN) {
2943 len = 0;
2944 prev_len_adj = ETHER_CRC_LEN - desc_len;
2945 }
2946 else {
2947 len = desc_len - ETHER_CRC_LEN;
2948 }
2949 } else {
2950 eop = 0;
2951 len = desc_len;
2952 }
2953
2954 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2955 u_int8_t last_byte;
2956 u_int32_t pkt_len = desc_len;
2957
2958 if (adapter->fmp != NULL)
2959 pkt_len += adapter->fmp->m_pkthdr.len;
2960
2961 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2962
2963 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2964 current_desc->errors,
2965 pkt_len, last_byte)) {
2966 em_tbi_adjust_stats(&adapter->hw,
2967 &adapter->stats,
2968 pkt_len,
2969 adapter->hw.mac_addr);
2970 if (len > 0) len--;
2971 }
2972 else {
2973 accept_frame = 0;
2974 }
2975 }
2976
2977 if (accept_frame) {
2978
2979 if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2980 adapter->dropped_pkts++;
2981 em_get_buf(i, adapter, mp);
2982 if (adapter->fmp != NULL)
2983 m_freem(adapter->fmp);
2984 adapter->fmp = NULL;
2985 adapter->lmp = NULL;
2986 break;
2987 }
2988
2989 /* Assign correct length to the current fragment */
2990 mp->m_len = len;
2991
2992 if (adapter->fmp == NULL) {
2993 mp->m_pkthdr.len = len;
2994 adapter->fmp = mp; /* Store the first mbuf */
2995 adapter->lmp = mp;
2996 } else {
2997 /* Chain mbuf's together */
2998 mp->m_flags &= ~M_PKTHDR;
2999 /*
3000 * Adjust length of previous mbuf in chain if we
3001 * received less than 4 bytes in the last descriptor.
3002 */
3003 if (prev_len_adj > 0) {
3004 adapter->lmp->m_len -= prev_len_adj;
3005 adapter->fmp->m_pkthdr.len -= prev_len_adj;
3006 }
3007 adapter->lmp->m_next = mp;
3008 adapter->lmp = adapter->lmp->m_next;
3009 adapter->fmp->m_pkthdr.len += len;
3010 }
3011
3012 if (eop) {
3013 adapter->fmp->m_pkthdr.rcvif = ifp;
3014 ifp->if_ipackets++;
3015 em_receive_checksum(adapter, current_desc,
3016 adapter->fmp);
3017 if (current_desc->status & E1000_RXD_STAT_VP)
3018 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
3019 (le16toh(current_desc->special) &
3020 E1000_RXD_SPC_VLAN_MASK));
3021 m = adapter->fmp;
3022 adapter->fmp = NULL;
3023 adapter->lmp = NULL;
3024 }
3025 } else {
3026 adapter->dropped_pkts++;
3027 em_get_buf(i, adapter, mp);
3028 if (adapter->fmp != NULL)
3029 m_freem(adapter->fmp);
3030 adapter->fmp = NULL;
3031 adapter->lmp = NULL;
3032 }
3033
3034 /* Zero out the receive descriptors status */
3035 current_desc->status = 0;
3036 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3037 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3038
3039 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3040 E1000_WRITE_REG(&adapter->hw, RDT, i);
3041
3042 /* Advance our pointers to the next descriptor */
3043 if (++i == adapter->num_rx_desc)
3044 i = 0;
3045 if (m != NULL) {
3046 adapter->next_rx_desc_to_check = i;
3047 EM_UNLOCK(adapter);
3048 (*ifp->if_input)(ifp, m);
3049 EM_LOCK(adapter);
3050 i = adapter->next_rx_desc_to_check;
3051 }
3052 current_desc = &adapter->rx_desc_base[i];
3053 }
3054 adapter->next_rx_desc_to_check = i;
3055 return;
3056 }
3057
3058 /*********************************************************************
3059 *
3060 * Verify that the hardware indicated that the checksum is valid.
3061 * Inform the stack about the status of checksum so that stack
3062 * doesn't spend time verifying the checksum.
3063 *
3064 *********************************************************************/
3065 static void
3066 em_receive_checksum(struct adapter *adapter,
3067 struct em_rx_desc *rx_desc,
3068 struct mbuf *mp)
3069 {
3070 /* 82543 or newer only */
3071 if ((adapter->hw.mac_type < em_82543) ||
3072 /* Ignore Checksum bit is set */
3073 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3074 mp->m_pkthdr.csum_flags = 0;
3075 return;
3076 }
3077
3078 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3079 /* Did it pass? */
3080 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3081 /* IP Checksum Good */
3082 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3083 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3084
3085 } else {
3086 mp->m_pkthdr.csum_flags = 0;
3087 }
3088 }
3089
3090 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3091 /* Did it pass? */
3092 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3093 mp->m_pkthdr.csum_flags |=
3094 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3095 mp->m_pkthdr.csum_data = htons(0xffff);
3096 }
3097 }
3098
3099 return;
3100 }
3101
3102
3103 static void
3104 em_enable_vlans(struct adapter *adapter)
3105 {
3106 uint32_t ctrl;
3107
3108 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3109
3110 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3111 ctrl |= E1000_CTRL_VME;
3112 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3113
3114 return;
3115 }
3116
3117 static void
3118 em_disable_vlans(struct adapter *adapter)
3119 {
3120 uint32_t ctrl;
3121
3122 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3123 ctrl &= ~E1000_CTRL_VME;
3124 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3125
3126 return;
3127 }
3128
3129 static void
3130 em_enable_intr(struct adapter * adapter)
3131 {
3132 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3133 return;
3134 }
3135
3136 static void
3137 em_disable_intr(struct adapter *adapter)
3138 {
3139 /*
3140 * The first version of 82542 had an errata where when link was forced it
3141 * would stay up even up even if the cable was disconnected. Sequence errors
3142 * were used to detect the disconnect and then the driver would unforce the link.
3143 * This code in the in the ISR. For this to work correctly the Sequence error
3144 * interrupt had to be enabled all the time.
3145 */
3146
3147 if (adapter->hw.mac_type == em_82542_rev2_0)
3148 E1000_WRITE_REG(&adapter->hw, IMC,
3149 (0xffffffff & ~E1000_IMC_RXSEQ));
3150 else
3151 E1000_WRITE_REG(&adapter->hw, IMC,
3152 0xffffffff);
3153 return;
3154 }
3155
3156 static int
3157 em_is_valid_ether_addr(u_int8_t *addr)
3158 {
3159 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3160
3161 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3162 return (FALSE);
3163 }
3164
3165 return(TRUE);
3166 }
3167
3168 void
3169 em_write_pci_cfg(struct em_hw *hw,
3170 uint32_t reg,
3171 uint16_t *value)
3172 {
3173 pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3174 *value, 2);
3175 }
3176
3177 void
3178 em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3179 uint16_t *value)
3180 {
3181 *value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3182 reg, 2);
3183 return;
3184 }
3185
3186 void
3187 em_pci_set_mwi(struct em_hw *hw)
3188 {
3189 pci_write_config(((struct em_osdep *)hw->back)->dev,
3190 PCIR_COMMAND,
3191 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3192 return;
3193 }
3194
3195 void
3196 em_pci_clear_mwi(struct em_hw *hw)
3197 {
3198 pci_write_config(((struct em_osdep *)hw->back)->dev,
3199 PCIR_COMMAND,
3200 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3201 return;
3202 }
3203
3204 /*********************************************************************
3205 * 82544 Coexistence issue workaround.
3206 * There are 2 issues.
3207 * 1. Transmit Hang issue.
3208 * To detect this issue, following equation can be used...
3209 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3210 * If SUM[3:0] is in between 1 to 4, we will have this issue.
3211 *
3212 * 2. DAC issue.
3213 * To detect this issue, following equation can be used...
3214 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3215 * If SUM[3:0] is in between 9 to c, we will have this issue.
3216 *
3217 *
3218 * WORKAROUND:
3219 * Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3220 *
3221 *** *********************************************************************/
3222 static u_int32_t
3223 em_fill_descriptors (bus_addr_t address,
3224 u_int32_t length,
3225 PDESC_ARRAY desc_array)
3226 {
3227 /* Since issue is sensitive to length and address.*/
3228 /* Let us first check the address...*/
3229 u_int32_t safe_terminator;
3230 if (length <= 4) {
3231 desc_array->descriptor[0].address = address;
3232 desc_array->descriptor[0].length = length;
3233 desc_array->elements = 1;
3234 return desc_array->elements;
3235 }
3236 safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3237 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3238 if (safe_terminator == 0 ||
3239 (safe_terminator > 4 &&
3240 safe_terminator < 9) ||
3241 (safe_terminator > 0xC &&
3242 safe_terminator <= 0xF)) {
3243 desc_array->descriptor[0].address = address;
3244 desc_array->descriptor[0].length = length;
3245 desc_array->elements = 1;
3246 return desc_array->elements;
3247 }
3248
3249 desc_array->descriptor[0].address = address;
3250 desc_array->descriptor[0].length = length - 4;
3251 desc_array->descriptor[1].address = address + (length - 4);
3252 desc_array->descriptor[1].length = 4;
3253 desc_array->elements = 2;
3254 return desc_array->elements;
3255 }
3256
3257 /**********************************************************************
3258 *
3259 * Update the board statistics counters.
3260 *
3261 **********************************************************************/
3262 static void
3263 em_update_stats_counters(struct adapter *adapter)
3264 {
3265 struct ifnet *ifp;
3266
3267 if(adapter->hw.media_type == em_media_type_copper ||
3268 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3269 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3270 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3271 }
3272 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3273 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3274 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3275 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3276
3277 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3278 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3279 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3280 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3281 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3282 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3283 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3284 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3285 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3286 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3287 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3288 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3289 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3290 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3291 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3292 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3293 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3294 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3295 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3296 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3297
3298 /* For the 64-bit byte counters the low dword must be read first. */
3299 /* Both registers clear on the read of the high dword */
3300
3301 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3302 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3303 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3304 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3305
3306 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3307 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3308 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3309 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3310 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3311
3312 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3313 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3314 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3315 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3316
3317 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3318 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3319 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3320 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3321 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3322 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3323 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3324 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3325 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3326 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3327
3328 if (adapter->hw.mac_type >= em_82543) {
3329 adapter->stats.algnerrc +=
3330 E1000_READ_REG(&adapter->hw, ALGNERRC);
3331 adapter->stats.rxerrc +=
3332 E1000_READ_REG(&adapter->hw, RXERRC);
3333 adapter->stats.tncrs +=
3334 E1000_READ_REG(&adapter->hw, TNCRS);
3335 adapter->stats.cexterr +=
3336 E1000_READ_REG(&adapter->hw, CEXTERR);
3337 adapter->stats.tsctc +=
3338 E1000_READ_REG(&adapter->hw, TSCTC);
3339 adapter->stats.tsctfc +=
3340 E1000_READ_REG(&adapter->hw, TSCTFC);
3341 }
3342 ifp = adapter->ifp;
3343
3344 ifp->if_collisions = adapter->stats.colc;
3345
3346 /* Rx Errors */
3347 ifp->if_ierrors =
3348 adapter->dropped_pkts +
3349 adapter->stats.rxerrc +
3350 adapter->stats.crcerrs +
3351 adapter->stats.algnerrc +
3352 adapter->stats.rlec +
3353 adapter->stats.mpc + adapter->stats.cexterr;
3354
3355 /* Tx Errors */
3356 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3357 adapter->watchdog_events;
3358
3359 }
3360
3361
3362 /**********************************************************************
3363 *
3364 * This routine is called only when em_display_debug_stats is enabled.
3365 * This routine provides a way to take a look at important statistics
3366 * maintained by the driver and hardware.
3367 *
3368 **********************************************************************/
3369 static void
3370 em_print_debug_info(struct adapter *adapter)
3371 {
3372 int unit = adapter->unit;
3373 uint8_t *hw_addr = adapter->hw.hw_addr;
3374
3375 printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3376 printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3377 E1000_READ_REG(&adapter->hw, CTRL),
3378 E1000_READ_REG(&adapter->hw, RCTL));
3379 printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3380 ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3381 (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3382 printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3383 adapter->hw.fc_high_water,
3384 adapter->hw.fc_low_water);
3385 printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3386 E1000_READ_REG(&adapter->hw, TIDV),
3387 E1000_READ_REG(&adapter->hw, TADV));
3388 printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3389 E1000_READ_REG(&adapter->hw, RDTR),
3390 E1000_READ_REG(&adapter->hw, RADV));
3391 printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3392 unit, (long long)adapter->tx_fifo_wrk_cnt,
3393 (long long)adapter->tx_fifo_reset_cnt);
3394 printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3395 E1000_READ_REG(&adapter->hw, TDH),
3396 E1000_READ_REG(&adapter->hw, TDT));
3397 printf("em%d: Num Tx descriptors avail = %d\n", unit,
3398 adapter->num_tx_desc_avail);
3399 printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3400 adapter->no_tx_desc_avail1);
3401 printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3402 adapter->no_tx_desc_avail2);
3403 printf("em%d: Std mbuf failed = %ld\n", unit,
3404 adapter->mbuf_alloc_failed);
3405 printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3406 adapter->mbuf_cluster_failed);
3407 printf("em%d: Driver dropped packets = %ld\n", unit,
3408 adapter->dropped_pkts);
3409
3410 return;
3411 }
3412
3413 static void
3414 em_print_hw_stats(struct adapter *adapter)
3415 {
3416 int unit = adapter->unit;
3417
3418 printf("em%d: Excessive collisions = %lld\n", unit,
3419 (long long)adapter->stats.ecol);
3420 printf("em%d: Symbol errors = %lld\n", unit,
3421 (long long)adapter->stats.symerrs);
3422 printf("em%d: Sequence errors = %lld\n", unit,
3423 (long long)adapter->stats.sec);
3424 printf("em%d: Defer count = %lld\n", unit,
3425 (long long)adapter->stats.dc);
3426
3427 printf("em%d: Missed Packets = %lld\n", unit,
3428 (long long)adapter->stats.mpc);
3429 printf("em%d: Receive No Buffers = %lld\n", unit,
3430 (long long)adapter->stats.rnbc);
3431 printf("em%d: Receive length errors = %lld\n", unit,
3432 (long long)adapter->stats.rlec);
3433 printf("em%d: Receive errors = %lld\n", unit,
3434 (long long)adapter->stats.rxerrc);
3435 printf("em%d: Crc errors = %lld\n", unit,
3436 (long long)adapter->stats.crcerrs);
3437 printf("em%d: Alignment errors = %lld\n", unit,
3438 (long long)adapter->stats.algnerrc);
3439 printf("em%d: Carrier extension errors = %lld\n", unit,
3440 (long long)adapter->stats.cexterr);
3441 printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3442 printf("em%d: watchdog timeouts = %ld\n", unit,
3443 adapter->watchdog_events);
3444
3445 printf("em%d: XON Rcvd = %lld\n", unit,
3446 (long long)adapter->stats.xonrxc);
3447 printf("em%d: XON Xmtd = %lld\n", unit,
3448 (long long)adapter->stats.xontxc);
3449 printf("em%d: XOFF Rcvd = %lld\n", unit,
3450 (long long)adapter->stats.xoffrxc);
3451 printf("em%d: XOFF Xmtd = %lld\n", unit,
3452 (long long)adapter->stats.xofftxc);
3453
3454 printf("em%d: Good Packets Rcvd = %lld\n", unit,
3455 (long long)adapter->stats.gprc);
3456 printf("em%d: Good Packets Xmtd = %lld\n", unit,
3457 (long long)adapter->stats.gptc);
3458
3459 return;
3460 }
3461
3462 static int
3463 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3464 {
3465 int error;
3466 int result;
3467 struct adapter *adapter;
3468
3469 result = -1;
3470 error = sysctl_handle_int(oidp, &result, 0, req);
3471
3472 if (error || !req->newptr)
3473 return (error);
3474
3475 if (result == 1) {
3476 adapter = (struct adapter *)arg1;
3477 em_print_debug_info(adapter);
3478 }
3479
3480 return error;
3481 }
3482
3483
3484 static int
3485 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3486 {
3487 int error;
3488 int result;
3489 struct adapter *adapter;
3490
3491 result = -1;
3492 error = sysctl_handle_int(oidp, &result, 0, req);
3493
3494 if (error || !req->newptr)
3495 return (error);
3496
3497 if (result == 1) {
3498 adapter = (struct adapter *)arg1;
3499 em_print_hw_stats(adapter);
3500 }
3501
3502 return error;
3503 }
3504
3505 static int
3506 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3507 {
3508 struct em_int_delay_info *info;
3509 struct adapter *adapter;
3510 u_int32_t regval;
3511 int error;
3512 int usecs;
3513 int ticks;
3514
3515 info = (struct em_int_delay_info *)arg1;
3516 usecs = info->value;
3517 error = sysctl_handle_int(oidp, &usecs, 0, req);
3518 if (error != 0 || req->newptr == NULL)
3519 return error;
3520 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3521 return EINVAL;
3522 info->value = usecs;
3523 ticks = E1000_USECS_TO_TICKS(usecs);
3524
3525 adapter = info->adapter;
3526
3527 EM_LOCK(adapter);
3528 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3529 regval = (regval & ~0xffff) | (ticks & 0xffff);
3530 /* Handle a few special cases. */
3531 switch (info->offset) {
3532 case E1000_RDTR:
3533 case E1000_82542_RDTR:
3534 regval |= E1000_RDT_FPDB;
3535 break;
3536 case E1000_TIDV:
3537 case E1000_82542_TIDV:
3538 if (ticks == 0) {
3539 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3540 /* Don't write 0 into the TIDV register. */
3541 regval++;
3542 } else
3543 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3544 break;
3545 }
3546 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3547 EM_UNLOCK(adapter);
3548 return 0;
3549 }
3550
3551 static void
3552 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3553 const char *description, struct em_int_delay_info *info,
3554 int offset, int value)
3555 {
3556 info->adapter = adapter;
3557 info->offset = offset;
3558 info->value = value;
3559 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3560 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3561 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3562 info, 0, em_sysctl_int_delay, "I", description);
3563 }
Cache object: 905e32b8a0385dfe53a711c2210ed711
|