FreeBSD/Linux Kernel Cross Reference
sys/dev/em/if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD: releng/6.0/sys/dev/em/if_em.c 151562 2005-10-22 22:07:20Z glebius $*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/em/if_em.h>
41
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int em_display_debug_stats = 0;
46
47 /*********************************************************************
48 * Driver version
49 *********************************************************************/
50
51 char em_driver_version[] = "2.1.7";
52
53
54 /*********************************************************************
55 * PCI Device ID Table
56 *
57 * Used by probe to select devices to load on
58 * Last field stores an index into em_strings
59 * Last entry must be all 0s
60 *
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64 static em_vendor_info_t em_vendor_info_array[] =
65 {
66 /* Intel(R) PRO/1000 Network Connection */
67 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
68 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
69 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
72
73 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
74 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
80
81 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
82
83 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
85
86 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
90
91 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
96
97 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
98 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
99 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
101 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
102 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
103 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
108 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
109
110 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
112
113 /* required last entry */
114 { 0, 0, 0, 0, 0}
115 };
116
117 /*********************************************************************
118 * Table of branding strings for all supported NICs.
119 *********************************************************************/
120
121 static char *em_strings[] = {
122 "Intel(R) PRO/1000 Network Connection"
123 };
124
125 /*********************************************************************
126 * Function prototypes
127 *********************************************************************/
128 static int em_probe(device_t);
129 static int em_attach(device_t);
130 static int em_detach(device_t);
131 static int em_shutdown(device_t);
132 static void em_intr(void *);
133 static void em_start(struct ifnet *);
134 static int em_ioctl(struct ifnet *, u_long, caddr_t);
135 static void em_watchdog(struct ifnet *);
136 static void em_init(void *);
137 static void em_init_locked(struct adapter *);
138 static void em_stop(void *);
139 static void em_media_status(struct ifnet *, struct ifmediareq *);
140 static int em_media_change(struct ifnet *);
141 static void em_identify_hardware(struct adapter *);
142 static int em_allocate_pci_resources(struct adapter *);
143 static void em_free_pci_resources(struct adapter *);
144 static void em_local_timer(void *);
145 static int em_hardware_init(struct adapter *);
146 static void em_setup_interface(device_t, struct adapter *);
147 static int em_setup_transmit_structures(struct adapter *);
148 static void em_initialize_transmit_unit(struct adapter *);
149 static int em_setup_receive_structures(struct adapter *);
150 static void em_initialize_receive_unit(struct adapter *);
151 static void em_enable_intr(struct adapter *);
152 static void em_disable_intr(struct adapter *);
153 static void em_free_transmit_structures(struct adapter *);
154 static void em_free_receive_structures(struct adapter *);
155 static void em_update_stats_counters(struct adapter *);
156 static void em_clean_transmit_interrupts(struct adapter *);
157 static int em_allocate_receive_structures(struct adapter *);
158 static int em_allocate_transmit_structures(struct adapter *);
159 static void em_process_receive_interrupts(struct adapter *, int);
160 static void em_receive_checksum(struct adapter *,
161 struct em_rx_desc *,
162 struct mbuf *);
163 static void em_transmit_checksum_setup(struct adapter *,
164 struct mbuf *,
165 u_int32_t *,
166 u_int32_t *);
167 static void em_set_promisc(struct adapter *);
168 static void em_disable_promisc(struct adapter *);
169 static void em_set_multi(struct adapter *);
170 static void em_print_hw_stats(struct adapter *);
171 static void em_print_link_status(struct adapter *);
172 static int em_get_buf(int i, struct adapter *,
173 struct mbuf *);
174 static void em_enable_vlans(struct adapter *);
175 static void em_disable_vlans(struct adapter *);
176 static int em_encap(struct adapter *, struct mbuf **);
177 static void em_smartspeed(struct adapter *);
178 static int em_82547_fifo_workaround(struct adapter *, int);
179 static void em_82547_update_fifo_head(struct adapter *, int);
180 static int em_82547_tx_fifo_reset(struct adapter *);
181 static void em_82547_move_tail(void *arg);
182 static void em_82547_move_tail_locked(struct adapter *);
183 static int em_dma_malloc(struct adapter *, bus_size_t,
184 struct em_dma_alloc *, int);
185 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
186 static void em_print_debug_info(struct adapter *);
187 static int em_is_valid_ether_addr(u_int8_t *);
188 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
189 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
190 static u_int32_t em_fill_descriptors (u_int64_t address,
191 u_int32_t length,
192 PDESC_ARRAY desc_array);
193 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
194 static void em_add_int_delay_sysctl(struct adapter *, const char *,
195 const char *, struct em_int_delay_info *,
196 int, int);
197 #ifdef DEVICE_POLLING
198 static poll_handler_t em_poll;
199 #endif
200
201 /*********************************************************************
202 * FreeBSD Device Interface Entry Points
203 *********************************************************************/
204
205 static device_method_t em_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, em_probe),
208 DEVMETHOD(device_attach, em_attach),
209 DEVMETHOD(device_detach, em_detach),
210 DEVMETHOD(device_shutdown, em_shutdown),
211 {0, 0}
212 };
213
214 static driver_t em_driver = {
215 "em", em_methods, sizeof(struct adapter ),
216 };
217
218 static devclass_t em_devclass;
219 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
220 MODULE_DEPEND(em, pci, 1, 1, 1);
221 MODULE_DEPEND(em, ether, 1, 1, 1);
222
223 /*********************************************************************
224 * Tunable default values.
225 *********************************************************************/
226
227 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
228 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
229
230 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
231 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
232 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
233 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
234
235 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
236 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
237 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
238 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
239
240 /*********************************************************************
241 * Device identification routine
242 *
243 * em_probe determines if the driver should be loaded on
244 * adapter based on PCI vendor/device id of the adapter.
245 *
246 * return BUS_PROBE_DEFAULT on success, positive on failure
247 *********************************************************************/
248
249 static int
250 em_probe(device_t dev)
251 {
252 em_vendor_info_t *ent;
253
254 u_int16_t pci_vendor_id = 0;
255 u_int16_t pci_device_id = 0;
256 u_int16_t pci_subvendor_id = 0;
257 u_int16_t pci_subdevice_id = 0;
258 char adapter_name[60];
259
260 INIT_DEBUGOUT("em_probe: begin");
261
262 pci_vendor_id = pci_get_vendor(dev);
263 if (pci_vendor_id != EM_VENDOR_ID)
264 return(ENXIO);
265
266 pci_device_id = pci_get_device(dev);
267 pci_subvendor_id = pci_get_subvendor(dev);
268 pci_subdevice_id = pci_get_subdevice(dev);
269
270 ent = em_vendor_info_array;
271 while (ent->vendor_id != 0) {
272 if ((pci_vendor_id == ent->vendor_id) &&
273 (pci_device_id == ent->device_id) &&
274
275 ((pci_subvendor_id == ent->subvendor_id) ||
276 (ent->subvendor_id == PCI_ANY_ID)) &&
277
278 ((pci_subdevice_id == ent->subdevice_id) ||
279 (ent->subdevice_id == PCI_ANY_ID))) {
280 sprintf(adapter_name, "%s, Version - %s",
281 em_strings[ent->index],
282 em_driver_version);
283 device_set_desc_copy(dev, adapter_name);
284 return(BUS_PROBE_DEFAULT);
285 }
286 ent++;
287 }
288
289 return(ENXIO);
290 }
291
292 /*********************************************************************
293 * Device initialization routine
294 *
295 * The attach entry point is called when the driver is being loaded.
296 * This routine identifies the type of hardware, allocates all resources
297 * and initializes the hardware.
298 *
299 * return 0 on success, positive on failure
300 *********************************************************************/
301
302 static int
303 em_attach(device_t dev)
304 {
305 struct adapter * adapter;
306 int tsize, rsize;
307 int error = 0;
308
309 INIT_DEBUGOUT("em_attach: begin");
310
311 /* Allocate, clear, and link in our adapter structure */
312 if (!(adapter = device_get_softc(dev))) {
313 printf("em: adapter structure allocation failed\n");
314 return(ENOMEM);
315 }
316 bzero(adapter, sizeof(struct adapter ));
317 adapter->dev = dev;
318 adapter->osdep.dev = dev;
319 adapter->unit = device_get_unit(dev);
320 EM_LOCK_INIT(adapter, device_get_nameunit(dev));
321
322 /* SYSCTL stuff */
323 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
324 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
326 (void *)adapter, 0,
327 em_sysctl_debug_info, "I", "Debug Information");
328
329 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
330 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
331 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
332 (void *)adapter, 0,
333 em_sysctl_stats, "I", "Statistics");
334
335 callout_init(&adapter->timer, CALLOUT_MPSAFE);
336 callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
337
338 /* Determine hardware revision */
339 em_identify_hardware(adapter);
340
341 /* Set up some sysctls for the tunable interrupt delays */
342 em_add_int_delay_sysctl(adapter, "rx_int_delay",
343 "receive interrupt delay in usecs", &adapter->rx_int_delay,
344 E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
345 em_add_int_delay_sysctl(adapter, "tx_int_delay",
346 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
347 E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
348 if (adapter->hw.mac_type >= em_82540) {
349 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
350 "receive interrupt delay limit in usecs",
351 &adapter->rx_abs_int_delay,
352 E1000_REG_OFFSET(&adapter->hw, RADV),
353 em_rx_abs_int_delay_dflt);
354 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
355 "transmit interrupt delay limit in usecs",
356 &adapter->tx_abs_int_delay,
357 E1000_REG_OFFSET(&adapter->hw, TADV),
358 em_tx_abs_int_delay_dflt);
359 }
360
361 /* Parameters (to be read from user) */
362 adapter->num_tx_desc = EM_MAX_TXD;
363 adapter->num_rx_desc = EM_MAX_RXD;
364 adapter->hw.autoneg = DO_AUTO_NEG;
365 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
366 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
367 adapter->hw.tbi_compatibility_en = TRUE;
368 adapter->rx_buffer_len = EM_RXBUFFER_2048;
369
370 /*
371 * These parameters control the automatic generation(Tx) and
372 * response(Rx) to Ethernet PAUSE frames.
373 */
374 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
375 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH;
376 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
377 adapter->hw.fc_send_xon = TRUE;
378 adapter->hw.fc = em_fc_full;
379
380 adapter->hw.phy_init_script = 1;
381 adapter->hw.phy_reset_disable = FALSE;
382
383 #ifndef EM_MASTER_SLAVE
384 adapter->hw.master_slave = em_ms_hw_default;
385 #else
386 adapter->hw.master_slave = EM_MASTER_SLAVE;
387 #endif
388 /*
389 * Set the max frame size assuming standard ethernet
390 * sized frames
391 */
392 adapter->hw.max_frame_size =
393 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
394
395 adapter->hw.min_frame_size =
396 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
397
398 /*
399 * This controls when hardware reports transmit completion
400 * status.
401 */
402 adapter->hw.report_tx_early = 1;
403
404
405 if (em_allocate_pci_resources(adapter)) {
406 printf("em%d: Allocation of PCI resources failed\n",
407 adapter->unit);
408 error = ENXIO;
409 goto err_pci;
410 }
411
412
413 /* Initialize eeprom parameters */
414 em_init_eeprom_params(&adapter->hw);
415
416 tsize = EM_ROUNDUP(adapter->num_tx_desc *
417 sizeof(struct em_tx_desc), 4096);
418
419 /* Allocate Transmit Descriptor ring */
420 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
421 printf("em%d: Unable to allocate tx_desc memory\n",
422 adapter->unit);
423 error = ENOMEM;
424 goto err_tx_desc;
425 }
426 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
427
428 rsize = EM_ROUNDUP(adapter->num_rx_desc *
429 sizeof(struct em_rx_desc), 4096);
430
431 /* Allocate Receive Descriptor ring */
432 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
433 printf("em%d: Unable to allocate rx_desc memory\n",
434 adapter->unit);
435 error = ENOMEM;
436 goto err_rx_desc;
437 }
438 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
439
440 /* Initialize the hardware */
441 if (em_hardware_init(adapter)) {
442 printf("em%d: Unable to initialize the hardware\n",
443 adapter->unit);
444 error = EIO;
445 goto err_hw_init;
446 }
447
448 /* Copy the permanent MAC address out of the EEPROM */
449 if (em_read_mac_addr(&adapter->hw) < 0) {
450 printf("em%d: EEPROM read error while reading mac address\n",
451 adapter->unit);
452 error = EIO;
453 goto err_mac_addr;
454 }
455
456 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
457 printf("em%d: Invalid mac address\n", adapter->unit);
458 error = EIO;
459 goto err_mac_addr;
460 }
461
462 /* Setup OS specific network interface */
463 em_setup_interface(dev, adapter);
464
465 /* Initialize statistics */
466 em_clear_hw_cntrs(&adapter->hw);
467 em_update_stats_counters(adapter);
468 adapter->hw.get_link_status = 1;
469 em_check_for_link(&adapter->hw);
470
471 /* Print the link status */
472 if (adapter->link_active == 1) {
473 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
474 &adapter->link_duplex);
475 printf("em%d: Speed:%d Mbps Duplex:%s\n",
476 adapter->unit,
477 adapter->link_speed,
478 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
479 } else
480 printf("em%d: Speed:N/A Duplex:N/A\n", adapter->unit);
481
482 /* Identify 82544 on PCIX */
483 em_get_bus_info(&adapter->hw);
484 if(adapter->hw.bus_type == em_bus_type_pcix &&
485 adapter->hw.mac_type == em_82544) {
486 adapter->pcix_82544 = TRUE;
487 }
488 else {
489 adapter->pcix_82544 = FALSE;
490 }
491 INIT_DEBUGOUT("em_attach: end");
492 return(0);
493
494 err_mac_addr:
495 err_hw_init:
496 em_dma_free(adapter, &adapter->rxdma);
497 err_rx_desc:
498 em_dma_free(adapter, &adapter->txdma);
499 err_tx_desc:
500 err_pci:
501 em_free_pci_resources(adapter);
502 EM_LOCK_DESTROY(adapter);
503 return(error);
504
505 }
506
507 /*********************************************************************
508 * Device removal routine
509 *
510 * The detach entry point is called when the driver is being removed.
511 * This routine stops the adapter and deallocates all the resources
512 * that were allocated for driver operation.
513 *
514 * return 0 on success, positive on failure
515 *********************************************************************/
516
517 static int
518 em_detach(device_t dev)
519 {
520 struct adapter * adapter = device_get_softc(dev);
521 struct ifnet *ifp = adapter->ifp;
522
523 INIT_DEBUGOUT("em_detach: begin");
524
525 #ifdef DEVICE_POLLING
526 if (ifp->if_capenable & IFCAP_POLLING)
527 ether_poll_deregister(ifp);
528 #endif
529
530 EM_LOCK(adapter);
531 adapter->in_detach = 1;
532 em_stop(adapter);
533 em_phy_hw_reset(&adapter->hw);
534 EM_UNLOCK(adapter);
535 ether_ifdetach(adapter->ifp);
536
537 em_free_pci_resources(adapter);
538 bus_generic_detach(dev);
539 if_free(ifp);
540
541 /* Free Transmit Descriptor ring */
542 if (adapter->tx_desc_base) {
543 em_dma_free(adapter, &adapter->txdma);
544 adapter->tx_desc_base = NULL;
545 }
546
547 /* Free Receive Descriptor ring */
548 if (adapter->rx_desc_base) {
549 em_dma_free(adapter, &adapter->rxdma);
550 adapter->rx_desc_base = NULL;
551 }
552
553 EM_LOCK_DESTROY(adapter);
554
555 return(0);
556 }
557
558 /*********************************************************************
559 *
560 * Shutdown entry point
561 *
562 **********************************************************************/
563
564 static int
565 em_shutdown(device_t dev)
566 {
567 struct adapter *adapter = device_get_softc(dev);
568 EM_LOCK(adapter);
569 em_stop(adapter);
570 EM_UNLOCK(adapter);
571 return(0);
572 }
573
574
575 /*********************************************************************
576 * Transmit entry point
577 *
578 * em_start is called by the stack to initiate a transmit.
579 * The driver will remain in this routine as long as there are
580 * packets to transmit and transmit resources are available.
581 * In case resources are not available stack is notified and
582 * the packet is requeued.
583 **********************************************************************/
584
585 static void
586 em_start_locked(struct ifnet *ifp)
587 {
588 struct mbuf *m_head;
589 struct adapter *adapter = ifp->if_softc;
590
591 mtx_assert(&adapter->mtx, MA_OWNED);
592
593 if (!adapter->link_active)
594 return;
595
596 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
597
598 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
599
600 if (m_head == NULL) break;
601
602 /*
603 * em_encap() can modify our pointer, and or make it NULL on
604 * failure. In that event, we can't requeue.
605 */
606 if (em_encap(adapter, &m_head)) {
607 if (m_head == NULL)
608 break;
609 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
610 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
611 break;
612 }
613
614 /* Send a copy of the frame to the BPF listener */
615 BPF_MTAP(ifp, m_head);
616
617 /* Set timeout in case hardware has problems transmitting */
618 ifp->if_timer = EM_TX_TIMEOUT;
619
620 }
621 return;
622 }
623
624 static void
625 em_start(struct ifnet *ifp)
626 {
627 struct adapter *adapter = ifp->if_softc;
628
629 EM_LOCK(adapter);
630 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
631 em_start_locked(ifp);
632 EM_UNLOCK(adapter);
633 return;
634 }
635
636 /*********************************************************************
637 * Ioctl entry point
638 *
639 * em_ioctl is called when the user wants to configure the
640 * interface.
641 *
642 * return 0 on success, positive on failure
643 **********************************************************************/
644
645 static int
646 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
647 {
648 int mask, reinit, error = 0;
649 struct ifreq *ifr = (struct ifreq *) data;
650 struct adapter * adapter = ifp->if_softc;
651
652 if (adapter->in_detach) return(error);
653
654 switch (command) {
655 case SIOCSIFADDR:
656 case SIOCGIFADDR:
657 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
658 ether_ioctl(ifp, command, data);
659 break;
660 case SIOCSIFMTU:
661 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
662 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \
663 /* 82573 does not support jumbo frames */
664 (adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) {
665 error = EINVAL;
666 } else {
667 EM_LOCK(adapter);
668 ifp->if_mtu = ifr->ifr_mtu;
669 adapter->hw.max_frame_size =
670 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
671 em_init_locked(adapter);
672 EM_UNLOCK(adapter);
673 }
674 break;
675 case SIOCSIFFLAGS:
676 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
677 EM_LOCK(adapter);
678 if (ifp->if_flags & IFF_UP) {
679 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
680 em_init_locked(adapter);
681 }
682
683 em_disable_promisc(adapter);
684 em_set_promisc(adapter);
685 } else {
686 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
687 em_stop(adapter);
688 }
689 }
690 EM_UNLOCK(adapter);
691 break;
692 case SIOCADDMULTI:
693 case SIOCDELMULTI:
694 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
695 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
696 EM_LOCK(adapter);
697 em_disable_intr(adapter);
698 em_set_multi(adapter);
699 if (adapter->hw.mac_type == em_82542_rev2_0) {
700 em_initialize_receive_unit(adapter);
701 }
702 #ifdef DEVICE_POLLING
703 if (!(ifp->if_capenable & IFCAP_POLLING))
704 #endif
705 em_enable_intr(adapter);
706 EM_UNLOCK(adapter);
707 }
708 break;
709 case SIOCSIFMEDIA:
710 case SIOCGIFMEDIA:
711 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
712 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
713 break;
714 case SIOCSIFCAP:
715 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
716 reinit = 0;
717 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
718 #ifdef DEVICE_POLLING
719 if (mask & IFCAP_POLLING) {
720 if (ifr->ifr_reqcap & IFCAP_POLLING) {
721 error = ether_poll_register(em_poll, ifp);
722 if (error)
723 return(error);
724 EM_LOCK(adapter);
725 em_disable_intr(adapter);
726 ifp->if_capenable |= IFCAP_POLLING;
727 EM_UNLOCK(adapter);
728 } else {
729 error = ether_poll_deregister(ifp);
730 /* Enable interrupt even in error case */
731 EM_LOCK(adapter);
732 em_enable_intr(adapter);
733 ifp->if_capenable &= ~IFCAP_POLLING;
734 EM_UNLOCK(adapter);
735 }
736 }
737 #endif
738 if (mask & IFCAP_HWCSUM) {
739 ifp->if_capenable ^= IFCAP_HWCSUM;
740 reinit = 1;
741 }
742 if (mask & IFCAP_VLAN_HWTAGGING) {
743 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
744 reinit = 1;
745 }
746 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
747 em_init(adapter);
748 break;
749 default:
750 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
751 error = EINVAL;
752 }
753
754 return(error);
755 }
756
757 /*********************************************************************
758 * Watchdog entry point
759 *
760 * This routine is called whenever hardware quits transmitting.
761 *
762 **********************************************************************/
763
764 static void
765 em_watchdog(struct ifnet *ifp)
766 {
767 struct adapter * adapter;
768 adapter = ifp->if_softc;
769
770 EM_LOCK(adapter);
771 /* If we are in this routine because of pause frames, then
772 * don't reset the hardware.
773 */
774 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
775 ifp->if_timer = EM_TX_TIMEOUT;
776 EM_UNLOCK(adapter);
777 return;
778 }
779
780 if (em_check_for_link(&adapter->hw))
781 printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
782
783 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
784 ifp->if_oerrors++;
785
786 em_init_locked(adapter);
787 EM_UNLOCK(adapter);
788 }
789
790 /*********************************************************************
791 * Init entry point
792 *
793 * This routine is used in two ways. It is used by the stack as
794 * init entry point in network interface structure. It is also used
795 * by the driver as a hw/sw initialization routine to get to a
796 * consistent state.
797 *
798 * return 0 on success, positive on failure
799 **********************************************************************/
800
801 static void
802 em_init_locked(struct adapter * adapter)
803 {
804 struct ifnet *ifp;
805
806 uint32_t pba;
807 ifp = adapter->ifp;
808
809 INIT_DEBUGOUT("em_init: begin");
810
811 mtx_assert(&adapter->mtx, MA_OWNED);
812
813 em_stop(adapter);
814
815 /* Packet Buffer Allocation (PBA)
816 * Writing PBA sets the receive portion of the buffer
817 * the remainder is used for the transmit buffer.
818 *
819 * Devices before the 82547 had a Packet Buffer of 64K.
820 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
821 * After the 82547 the buffer was reduced to 40K.
822 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
823 * Note: default does not leave enough room for Jumbo Frame >10k.
824 */
825 if(adapter->hw.mac_type < em_82547) {
826 /* Total FIFO is 64K */
827 if(adapter->rx_buffer_len > EM_RXBUFFER_8192)
828 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
829 else
830 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
831 } else {
832 /* Total FIFO is 40K */
833 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) {
834 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
835 } else {
836 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
837 }
838 adapter->tx_fifo_head = 0;
839 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
840 adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
841 }
842 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
843 E1000_WRITE_REG(&adapter->hw, PBA, pba);
844
845 /* Get the latest mac address, User can use a LAA */
846 bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.mac_addr,
847 ETHER_ADDR_LEN);
848
849 /* Initialize the hardware */
850 if (em_hardware_init(adapter)) {
851 printf("em%d: Unable to initialize the hardware\n",
852 adapter->unit);
853 return;
854 }
855
856 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
857 em_enable_vlans(adapter);
858
859 /* Prepare transmit descriptors and buffers */
860 if (em_setup_transmit_structures(adapter)) {
861 printf("em%d: Could not setup transmit structures\n",
862 adapter->unit);
863 em_stop(adapter);
864 return;
865 }
866 em_initialize_transmit_unit(adapter);
867
868 /* Setup Multicast table */
869 em_set_multi(adapter);
870
871 /* Prepare receive descriptors and buffers */
872 if (em_setup_receive_structures(adapter)) {
873 printf("em%d: Could not setup receive structures\n",
874 adapter->unit);
875 em_stop(adapter);
876 return;
877 }
878 em_initialize_receive_unit(adapter);
879
880 /* Don't loose promiscuous settings */
881 em_set_promisc(adapter);
882
883 ifp->if_drv_flags |= IFF_DRV_RUNNING;
884 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
885
886 if (adapter->hw.mac_type >= em_82543) {
887 if (ifp->if_capenable & IFCAP_TXCSUM)
888 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
889 else
890 ifp->if_hwassist = 0;
891 }
892
893 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
894 em_clear_hw_cntrs(&adapter->hw);
895 #ifdef DEVICE_POLLING
896 /*
897 * Only enable interrupts if we are not polling, make sure
898 * they are off otherwise.
899 */
900 if (ifp->if_capenable & IFCAP_POLLING)
901 em_disable_intr(adapter);
902 else
903 #endif /* DEVICE_POLLING */
904 em_enable_intr(adapter);
905
906 /* Don't reset the phy next time init gets called */
907 adapter->hw.phy_reset_disable = TRUE;
908
909 return;
910 }
911
912 static void
913 em_init(void *arg)
914 {
915 struct adapter * adapter = arg;
916
917 EM_LOCK(adapter);
918 em_init_locked(adapter);
919 EM_UNLOCK(adapter);
920 return;
921 }
922
923
924 #ifdef DEVICE_POLLING
925 static void
926 em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
927 {
928 struct adapter *adapter = ifp->if_softc;
929 u_int32_t reg_icr;
930
931 mtx_assert(&adapter->mtx, MA_OWNED);
932
933 if (cmd == POLL_AND_CHECK_STATUS) {
934 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
935 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
936 callout_stop(&adapter->timer);
937 adapter->hw.get_link_status = 1;
938 em_check_for_link(&adapter->hw);
939 em_print_link_status(adapter);
940 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
941 }
942 }
943 em_process_receive_interrupts(adapter, count);
944 em_clean_transmit_interrupts(adapter);
945
946 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
947 em_start_locked(ifp);
948 }
949
950 static void
951 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
952 {
953 struct adapter *adapter = ifp->if_softc;
954
955 EM_LOCK(adapter);
956 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
957 em_poll_locked(ifp, cmd, count);
958 EM_UNLOCK(adapter);
959 }
960 #endif /* DEVICE_POLLING */
961
962 /*********************************************************************
963 *
964 * Interrupt Service routine
965 *
966 **********************************************************************/
967 static void
968 em_intr(void *arg)
969 {
970 struct adapter *adapter = arg;
971 struct ifnet *ifp;
972 uint32_t reg_icr;
973 int wantinit = 0;
974
975 EM_LOCK(adapter);
976
977 ifp = adapter->ifp;
978
979 #ifdef DEVICE_POLLING
980 if (ifp->if_capenable & IFCAP_POLLING) {
981 EM_UNLOCK(adapter);
982 return;
983 }
984 #endif /* DEVICE_POLLING */
985
986 for (;;) {
987 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
988 if (reg_icr == 0)
989 break;
990
991 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
992 em_process_receive_interrupts(adapter, -1);
993 em_clean_transmit_interrupts(adapter);
994 }
995
996 /* Link status change */
997 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
998 callout_stop(&adapter->timer);
999 adapter->hw.get_link_status = 1;
1000 em_check_for_link(&adapter->hw);
1001 em_print_link_status(adapter);
1002 callout_reset(&adapter->timer, hz, em_local_timer,
1003 adapter);
1004 }
1005
1006 if (reg_icr & E1000_ICR_RXO) {
1007 log(LOG_WARNING, "%s: RX overrun\n", ifp->if_xname);
1008 wantinit = 1;
1009 }
1010 }
1011 #if 0
1012 if (wantinit)
1013 em_init_locked(adapter);
1014 #endif
1015 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1016 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1017 em_start_locked(ifp);
1018
1019 EM_UNLOCK(adapter);
1020 return;
1021 }
1022
1023
1024
1025 /*********************************************************************
1026 *
1027 * Media Ioctl callback
1028 *
1029 * This routine is called whenever the user queries the status of
1030 * the interface using ifconfig.
1031 *
1032 **********************************************************************/
1033 static void
1034 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1035 {
1036 struct adapter * adapter = ifp->if_softc;
1037
1038 INIT_DEBUGOUT("em_media_status: begin");
1039
1040 em_check_for_link(&adapter->hw);
1041 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1042 if (adapter->link_active == 0) {
1043 em_get_speed_and_duplex(&adapter->hw,
1044 &adapter->link_speed,
1045 &adapter->link_duplex);
1046 adapter->link_active = 1;
1047 }
1048 } else {
1049 if (adapter->link_active == 1) {
1050 adapter->link_speed = 0;
1051 adapter->link_duplex = 0;
1052 adapter->link_active = 0;
1053 }
1054 }
1055
1056 ifmr->ifm_status = IFM_AVALID;
1057 ifmr->ifm_active = IFM_ETHER;
1058
1059 if (!adapter->link_active)
1060 return;
1061
1062 ifmr->ifm_status |= IFM_ACTIVE;
1063
1064 if (adapter->hw.media_type == em_media_type_fiber) {
1065 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1066 } else {
1067 switch (adapter->link_speed) {
1068 case 10:
1069 ifmr->ifm_active |= IFM_10_T;
1070 break;
1071 case 100:
1072 ifmr->ifm_active |= IFM_100_TX;
1073 break;
1074 case 1000:
1075 ifmr->ifm_active |= IFM_1000_T;
1076 break;
1077 }
1078 if (adapter->link_duplex == FULL_DUPLEX)
1079 ifmr->ifm_active |= IFM_FDX;
1080 else
1081 ifmr->ifm_active |= IFM_HDX;
1082 }
1083 return;
1084 }
1085
1086 /*********************************************************************
1087 *
1088 * Media Ioctl callback
1089 *
1090 * This routine is called when the user changes speed/duplex using
1091 * media/mediopt option with ifconfig.
1092 *
1093 **********************************************************************/
1094 static int
1095 em_media_change(struct ifnet *ifp)
1096 {
1097 struct adapter * adapter = ifp->if_softc;
1098 struct ifmedia *ifm = &adapter->media;
1099
1100 INIT_DEBUGOUT("em_media_change: begin");
1101
1102 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1103 return(EINVAL);
1104
1105 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1106 case IFM_AUTO:
1107 adapter->hw.autoneg = DO_AUTO_NEG;
1108 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1109 break;
1110 case IFM_1000_SX:
1111 case IFM_1000_T:
1112 adapter->hw.autoneg = DO_AUTO_NEG;
1113 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1114 break;
1115 case IFM_100_TX:
1116 adapter->hw.autoneg = FALSE;
1117 adapter->hw.autoneg_advertised = 0;
1118 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1119 adapter->hw.forced_speed_duplex = em_100_full;
1120 else
1121 adapter->hw.forced_speed_duplex = em_100_half;
1122 break;
1123 case IFM_10_T:
1124 adapter->hw.autoneg = FALSE;
1125 adapter->hw.autoneg_advertised = 0;
1126 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1127 adapter->hw.forced_speed_duplex = em_10_full;
1128 else
1129 adapter->hw.forced_speed_duplex = em_10_half;
1130 break;
1131 default:
1132 printf("em%d: Unsupported media type\n", adapter->unit);
1133 }
1134
1135 /* As the speed/duplex settings my have changed we need to
1136 * reset the PHY.
1137 */
1138 adapter->hw.phy_reset_disable = FALSE;
1139
1140 em_init(adapter);
1141
1142 return(0);
1143 }
1144
1145 /*********************************************************************
1146 *
1147 * This routine maps the mbufs to tx descriptors.
1148 *
1149 * return 0 on success, positive on failure
1150 **********************************************************************/
1151 static int
1152 em_encap(struct adapter *adapter, struct mbuf **m_headp)
1153 {
1154 u_int32_t txd_upper;
1155 u_int32_t txd_lower, txd_used = 0, txd_saved = 0;
1156 int i, j, error;
1157 u_int64_t address;
1158
1159 struct mbuf *m_head;
1160
1161 /* For 82544 Workaround */
1162 DESC_ARRAY desc_array;
1163 u_int32_t array_elements;
1164 u_int32_t counter;
1165 struct m_tag *mtag;
1166 bus_dma_segment_t segs[EM_MAX_SCATTER];
1167 bus_dmamap_t map;
1168 int nsegs;
1169 struct em_buffer *tx_buffer = NULL;
1170 struct em_tx_desc *current_tx_desc = NULL;
1171 struct ifnet *ifp = adapter->ifp;
1172
1173 m_head = *m_headp;
1174
1175 /*
1176 * Force a cleanup if number of TX descriptors
1177 * available hits the threshold
1178 */
1179 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1180 em_clean_transmit_interrupts(adapter);
1181 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1182 adapter->no_tx_desc_avail1++;
1183 return(ENOBUFS);
1184 }
1185 }
1186
1187 /*
1188 * Map the packet for DMA.
1189 */
1190 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
1191 adapter->no_tx_map_avail++;
1192 return (ENOMEM);
1193 }
1194 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
1195 &nsegs, BUS_DMA_NOWAIT);
1196 if (error != 0) {
1197 adapter->no_tx_dma_setup++;
1198 bus_dmamap_destroy(adapter->txtag, map);
1199 return (error);
1200 }
1201 KASSERT(nsegs != 0, ("em_encap: empty packet"));
1202
1203 if (nsegs > adapter->num_tx_desc_avail) {
1204 adapter->no_tx_desc_avail2++;
1205 bus_dmamap_destroy(adapter->txtag, map);
1206 return (ENOBUFS);
1207 }
1208
1209
1210 if (ifp->if_hwassist > 0) {
1211 em_transmit_checksum_setup(adapter, m_head,
1212 &txd_upper, &txd_lower);
1213 } else
1214 txd_upper = txd_lower = 0;
1215
1216
1217 /* Find out if we are in vlan mode */
1218 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1219
1220 /*
1221 * When operating in promiscuous mode, hardware encapsulation for
1222 * packets is disabled. This means we have to add the vlan
1223 * encapsulation in the driver, since it will have come down from the
1224 * VLAN layer with a tag instead of a VLAN header.
1225 */
1226 if (mtag != NULL && adapter->em_insert_vlan_header) {
1227 struct ether_vlan_header *evl;
1228 struct ether_header eh;
1229
1230 m_head = m_pullup(m_head, sizeof(eh));
1231 if (m_head == NULL) {
1232 *m_headp = NULL;
1233 bus_dmamap_destroy(adapter->txtag, map);
1234 return (ENOBUFS);
1235 }
1236 eh = *mtod(m_head, struct ether_header *);
1237 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1238 if (m_head == NULL) {
1239 *m_headp = NULL;
1240 bus_dmamap_destroy(adapter->txtag, map);
1241 return (ENOBUFS);
1242 }
1243 m_head = m_pullup(m_head, sizeof(*evl));
1244 if (m_head == NULL) {
1245 *m_headp = NULL;
1246 bus_dmamap_destroy(adapter->txtag, map);
1247 return (ENOBUFS);
1248 }
1249 evl = mtod(m_head, struct ether_vlan_header *);
1250 bcopy(&eh, evl, sizeof(*evl));
1251 evl->evl_proto = evl->evl_encap_proto;
1252 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1253 evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1254 m_tag_delete(m_head, mtag);
1255 mtag = NULL;
1256 *m_headp = m_head;
1257 }
1258
1259 i = adapter->next_avail_tx_desc;
1260 if (adapter->pcix_82544) {
1261 txd_saved = i;
1262 txd_used = 0;
1263 }
1264 for (j = 0; j < nsegs; j++) {
1265 /* If adapter is 82544 and on PCIX bus */
1266 if(adapter->pcix_82544) {
1267 array_elements = 0;
1268 address = htole64(segs[j].ds_addr);
1269 /*
1270 * Check the Address and Length combination and
1271 * split the data accordingly
1272 */
1273 array_elements = em_fill_descriptors(address,
1274 htole32(segs[j].ds_len),
1275 &desc_array);
1276 for (counter = 0; counter < array_elements; counter++) {
1277 if (txd_used == adapter->num_tx_desc_avail) {
1278 adapter->next_avail_tx_desc = txd_saved;
1279 adapter->no_tx_desc_avail2++;
1280 bus_dmamap_destroy(adapter->txtag, map);
1281 return (ENOBUFS);
1282 }
1283 tx_buffer = &adapter->tx_buffer_area[i];
1284 current_tx_desc = &adapter->tx_desc_base[i];
1285 current_tx_desc->buffer_addr = htole64(
1286 desc_array.descriptor[counter].address);
1287 current_tx_desc->lower.data = htole32(
1288 (adapter->txd_cmd | txd_lower |
1289 (u_int16_t)desc_array.descriptor[counter].length));
1290 current_tx_desc->upper.data = htole32((txd_upper));
1291 if (++i == adapter->num_tx_desc)
1292 i = 0;
1293
1294 tx_buffer->m_head = NULL;
1295 txd_used++;
1296 }
1297 } else {
1298 tx_buffer = &adapter->tx_buffer_area[i];
1299 current_tx_desc = &adapter->tx_desc_base[i];
1300
1301 current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1302 current_tx_desc->lower.data = htole32(
1303 adapter->txd_cmd | txd_lower | segs[j].ds_len);
1304 current_tx_desc->upper.data = htole32(txd_upper);
1305
1306 if (++i == adapter->num_tx_desc)
1307 i = 0;
1308
1309 tx_buffer->m_head = NULL;
1310 }
1311 }
1312
1313 adapter->next_avail_tx_desc = i;
1314 if (adapter->pcix_82544) {
1315 adapter->num_tx_desc_avail -= txd_used;
1316 }
1317 else {
1318 adapter->num_tx_desc_avail -= nsegs;
1319 }
1320
1321 if (mtag != NULL) {
1322 /* Set the vlan id */
1323 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1324
1325 /* Tell hardware to add tag */
1326 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1327 }
1328
1329 tx_buffer->m_head = m_head;
1330 tx_buffer->map = map;
1331 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1332
1333 /*
1334 * Last Descriptor of Packet needs End Of Packet (EOP)
1335 */
1336 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1337
1338 /*
1339 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1340 * that this frame is available to transmit.
1341 */
1342 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1343 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1344 if (adapter->hw.mac_type == em_82547 &&
1345 adapter->link_duplex == HALF_DUPLEX) {
1346 em_82547_move_tail_locked(adapter);
1347 } else {
1348 E1000_WRITE_REG(&adapter->hw, TDT, i);
1349 if (adapter->hw.mac_type == em_82547) {
1350 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1351 }
1352 }
1353
1354 return(0);
1355 }
1356
1357 /*********************************************************************
1358 *
1359 * 82547 workaround to avoid controller hang in half-duplex environment.
1360 * The workaround is to avoid queuing a large packet that would span
1361 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1362 * in this case. We do that only when FIFO is quiescent.
1363 *
1364 **********************************************************************/
1365 static void
1366 em_82547_move_tail_locked(struct adapter *adapter)
1367 {
1368 uint16_t hw_tdt;
1369 uint16_t sw_tdt;
1370 struct em_tx_desc *tx_desc;
1371 uint16_t length = 0;
1372 boolean_t eop = 0;
1373
1374 EM_LOCK_ASSERT(adapter);
1375
1376 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1377 sw_tdt = adapter->next_avail_tx_desc;
1378
1379 while (hw_tdt != sw_tdt) {
1380 tx_desc = &adapter->tx_desc_base[hw_tdt];
1381 length += tx_desc->lower.flags.length;
1382 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1383 if(++hw_tdt == adapter->num_tx_desc)
1384 hw_tdt = 0;
1385
1386 if(eop) {
1387 if (em_82547_fifo_workaround(adapter, length)) {
1388 adapter->tx_fifo_wrk_cnt++;
1389 callout_reset(&adapter->tx_fifo_timer, 1,
1390 em_82547_move_tail, adapter);
1391 break;
1392 }
1393 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1394 em_82547_update_fifo_head(adapter, length);
1395 length = 0;
1396 }
1397 }
1398 return;
1399 }
1400
1401 static void
1402 em_82547_move_tail(void *arg)
1403 {
1404 struct adapter *adapter = arg;
1405
1406 EM_LOCK(adapter);
1407 em_82547_move_tail_locked(adapter);
1408 EM_UNLOCK(adapter);
1409 }
1410
1411 static int
1412 em_82547_fifo_workaround(struct adapter *adapter, int len)
1413 {
1414 int fifo_space, fifo_pkt_len;
1415
1416 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1417
1418 if (adapter->link_duplex == HALF_DUPLEX) {
1419 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1420
1421 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1422 if (em_82547_tx_fifo_reset(adapter)) {
1423 return(0);
1424 }
1425 else {
1426 return(1);
1427 }
1428 }
1429 }
1430
1431 return(0);
1432 }
1433
1434 static void
1435 em_82547_update_fifo_head(struct adapter *adapter, int len)
1436 {
1437 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1438
1439 /* tx_fifo_head is always 16 byte aligned */
1440 adapter->tx_fifo_head += fifo_pkt_len;
1441 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1442 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1443 }
1444
1445 return;
1446 }
1447
1448
1449 static int
1450 em_82547_tx_fifo_reset(struct adapter *adapter)
1451 {
1452 uint32_t tctl;
1453
1454 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1455 E1000_READ_REG(&adapter->hw, TDH)) &&
1456 (E1000_READ_REG(&adapter->hw, TDFT) ==
1457 E1000_READ_REG(&adapter->hw, TDFH)) &&
1458 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1459 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1460 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1461
1462 /* Disable TX unit */
1463 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1464 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1465
1466 /* Reset FIFO pointers */
1467 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr);
1468 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr);
1469 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1470 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1471
1472 /* Re-enable TX unit */
1473 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1474 E1000_WRITE_FLUSH(&adapter->hw);
1475
1476 adapter->tx_fifo_head = 0;
1477 adapter->tx_fifo_reset_cnt++;
1478
1479 return(TRUE);
1480 }
1481 else {
1482 return(FALSE);
1483 }
1484 }
1485
1486 static void
1487 em_set_promisc(struct adapter * adapter)
1488 {
1489
1490 u_int32_t reg_rctl;
1491 struct ifnet *ifp = adapter->ifp;
1492
1493 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1494
1495 if (ifp->if_flags & IFF_PROMISC) {
1496 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1497 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1498 /* Disable VLAN stripping in promiscous mode
1499 * This enables bridging of vlan tagged frames to occur
1500 * and also allows vlan tags to be seen in tcpdump
1501 */
1502 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1503 em_disable_vlans(adapter);
1504 adapter->em_insert_vlan_header = 1;
1505 } else if (ifp->if_flags & IFF_ALLMULTI) {
1506 reg_rctl |= E1000_RCTL_MPE;
1507 reg_rctl &= ~E1000_RCTL_UPE;
1508 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1509 adapter->em_insert_vlan_header = 0;
1510 } else
1511 adapter->em_insert_vlan_header = 0;
1512
1513 return;
1514 }
1515
1516 static void
1517 em_disable_promisc(struct adapter * adapter)
1518 {
1519 u_int32_t reg_rctl;
1520 struct ifnet *ifp = adapter->ifp;
1521
1522 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1523
1524 reg_rctl &= (~E1000_RCTL_UPE);
1525 reg_rctl &= (~E1000_RCTL_MPE);
1526 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1527
1528 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1529 em_enable_vlans(adapter);
1530 adapter->em_insert_vlan_header = 0;
1531
1532 return;
1533 }
1534
1535
1536 /*********************************************************************
1537 * Multicast Update
1538 *
1539 * This routine is called whenever multicast address list is updated.
1540 *
1541 **********************************************************************/
1542
1543 static void
1544 em_set_multi(struct adapter * adapter)
1545 {
1546 u_int32_t reg_rctl = 0;
1547 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1548 struct ifmultiaddr *ifma;
1549 int mcnt = 0;
1550 struct ifnet *ifp = adapter->ifp;
1551
1552 IOCTL_DEBUGOUT("em_set_multi: begin");
1553
1554 if (adapter->hw.mac_type == em_82542_rev2_0) {
1555 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1556 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1557 em_pci_clear_mwi(&adapter->hw);
1558 }
1559 reg_rctl |= E1000_RCTL_RST;
1560 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1561 msec_delay(5);
1562 }
1563
1564 IF_ADDR_LOCK(ifp);
1565 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1566 if (ifma->ifma_addr->sa_family != AF_LINK)
1567 continue;
1568
1569 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1570
1571 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1572 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1573 mcnt++;
1574 }
1575 IF_ADDR_UNLOCK(ifp);
1576
1577 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1578 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1579 reg_rctl |= E1000_RCTL_MPE;
1580 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1581 } else
1582 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1583
1584 if (adapter->hw.mac_type == em_82542_rev2_0) {
1585 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1586 reg_rctl &= ~E1000_RCTL_RST;
1587 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1588 msec_delay(5);
1589 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1590 em_pci_set_mwi(&adapter->hw);
1591 }
1592 }
1593
1594 return;
1595 }
1596
1597
1598 /*********************************************************************
1599 * Timer routine
1600 *
1601 * This routine checks for link status and updates statistics.
1602 *
1603 **********************************************************************/
1604
1605 static void
1606 em_local_timer(void *arg)
1607 {
1608 struct ifnet *ifp;
1609 struct adapter * adapter = arg;
1610 ifp = adapter->ifp;
1611
1612 EM_LOCK(adapter);
1613
1614 em_check_for_link(&adapter->hw);
1615 em_print_link_status(adapter);
1616 em_update_stats_counters(adapter);
1617 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1618 em_print_hw_stats(adapter);
1619 }
1620 em_smartspeed(adapter);
1621
1622 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1623
1624 EM_UNLOCK(adapter);
1625 return;
1626 }
1627
1628 static void
1629 em_print_link_status(struct adapter * adapter)
1630 {
1631 struct ifnet *ifp = adapter->ifp;
1632
1633 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1634 if (adapter->link_active == 0) {
1635 em_get_speed_and_duplex(&adapter->hw,
1636 &adapter->link_speed,
1637 &adapter->link_duplex);
1638 if (bootverbose)
1639 printf("em%d: Link is up %d Mbps %s\n",
1640 adapter->unit,
1641 adapter->link_speed,
1642 ((adapter->link_duplex == FULL_DUPLEX) ?
1643 "Full Duplex" : "Half Duplex"));
1644 adapter->link_active = 1;
1645 adapter->smartspeed = 0;
1646 if_link_state_change(ifp, LINK_STATE_UP);
1647 }
1648 } else {
1649 if (adapter->link_active == 1) {
1650 adapter->link_speed = 0;
1651 adapter->link_duplex = 0;
1652 if (bootverbose)
1653 printf("em%d: Link is Down\n", adapter->unit);
1654 adapter->link_active = 0;
1655 if_link_state_change(ifp, LINK_STATE_DOWN);
1656 }
1657 }
1658
1659 return;
1660 }
1661
1662 /*********************************************************************
1663 *
1664 * This routine disables all traffic on the adapter by issuing a
1665 * global reset on the MAC and deallocates TX/RX buffers.
1666 *
1667 **********************************************************************/
1668
1669 static void
1670 em_stop(void *arg)
1671 {
1672 struct ifnet *ifp;
1673 struct adapter * adapter = arg;
1674 ifp = adapter->ifp;
1675
1676 mtx_assert(&adapter->mtx, MA_OWNED);
1677
1678 INIT_DEBUGOUT("em_stop: begin");
1679
1680 em_disable_intr(adapter);
1681 em_reset_hw(&adapter->hw);
1682 callout_stop(&adapter->timer);
1683 callout_stop(&adapter->tx_fifo_timer);
1684 em_free_transmit_structures(adapter);
1685 em_free_receive_structures(adapter);
1686
1687
1688 /* Tell the stack that the interface is no longer active */
1689 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1690
1691 return;
1692 }
1693
1694
1695 /*********************************************************************
1696 *
1697 * Determine hardware revision.
1698 *
1699 **********************************************************************/
1700 static void
1701 em_identify_hardware(struct adapter * adapter)
1702 {
1703 device_t dev = adapter->dev;
1704
1705 /* Make sure our PCI config space has the necessary stuff set */
1706 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1707 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1708 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1709 printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1710 adapter->unit);
1711 adapter->hw.pci_cmd_word |=
1712 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1713 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1714 }
1715
1716 /* Save off the information about this board */
1717 adapter->hw.vendor_id = pci_get_vendor(dev);
1718 adapter->hw.device_id = pci_get_device(dev);
1719 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1720 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1721 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1722
1723 /* Identify the MAC */
1724 if (em_set_mac_type(&adapter->hw))
1725 printf("em%d: Unknown MAC Type\n", adapter->unit);
1726
1727 if(adapter->hw.mac_type == em_82541 ||
1728 adapter->hw.mac_type == em_82541_rev_2 ||
1729 adapter->hw.mac_type == em_82547 ||
1730 adapter->hw.mac_type == em_82547_rev_2)
1731 adapter->hw.phy_init_script = TRUE;
1732
1733 return;
1734 }
1735
1736 static int
1737 em_allocate_pci_resources(struct adapter * adapter)
1738 {
1739 int i, val, rid;
1740 device_t dev = adapter->dev;
1741
1742 rid = EM_MMBA;
1743 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1744 &rid, RF_ACTIVE);
1745 if (!(adapter->res_memory)) {
1746 printf("em%d: Unable to allocate bus resource: memory\n",
1747 adapter->unit);
1748 return(ENXIO);
1749 }
1750 adapter->osdep.mem_bus_space_tag =
1751 rman_get_bustag(adapter->res_memory);
1752 adapter->osdep.mem_bus_space_handle =
1753 rman_get_bushandle(adapter->res_memory);
1754 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1755
1756
1757 if (adapter->hw.mac_type > em_82543) {
1758 /* Figure our where our IO BAR is ? */
1759 rid = EM_MMBA;
1760 for (i = 0; i < 5; i++) {
1761 val = pci_read_config(dev, rid, 4);
1762 if (val & 0x00000001) {
1763 adapter->io_rid = rid;
1764 break;
1765 }
1766 rid += 4;
1767 }
1768
1769 adapter->res_ioport = bus_alloc_resource_any(dev,
1770 SYS_RES_IOPORT,
1771 &adapter->io_rid,
1772 RF_ACTIVE);
1773 if (!(adapter->res_ioport)) {
1774 printf("em%d: Unable to allocate bus resource: ioport\n",
1775 adapter->unit);
1776 return(ENXIO);
1777 }
1778
1779 adapter->hw.io_base =
1780 rman_get_start(adapter->res_ioport);
1781 }
1782
1783 rid = 0x0;
1784 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1785 RF_SHAREABLE |
1786 RF_ACTIVE);
1787 if (!(adapter->res_interrupt)) {
1788 printf("em%d: Unable to allocate bus resource: interrupt\n",
1789 adapter->unit);
1790 return(ENXIO);
1791 }
1792 if (bus_setup_intr(dev, adapter->res_interrupt,
1793 INTR_TYPE_NET | INTR_MPSAFE,
1794 (void (*)(void *)) em_intr, adapter,
1795 &adapter->int_handler_tag)) {
1796 printf("em%d: Error registering interrupt handler!\n",
1797 adapter->unit);
1798 return(ENXIO);
1799 }
1800
1801 adapter->hw.back = &adapter->osdep;
1802
1803 return(0);
1804 }
1805
1806 static void
1807 em_free_pci_resources(struct adapter * adapter)
1808 {
1809 device_t dev = adapter->dev;
1810
1811 if (adapter->res_interrupt != NULL) {
1812 bus_teardown_intr(dev, adapter->res_interrupt,
1813 adapter->int_handler_tag);
1814 bus_release_resource(dev, SYS_RES_IRQ, 0,
1815 adapter->res_interrupt);
1816 }
1817 if (adapter->res_memory != NULL) {
1818 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
1819 adapter->res_memory);
1820 }
1821
1822 if (adapter->res_ioport != NULL) {
1823 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1824 adapter->res_ioport);
1825 }
1826 return;
1827 }
1828
1829 /*********************************************************************
1830 *
1831 * Initialize the hardware to a configuration as specified by the
1832 * adapter structure. The controller is reset, the EEPROM is
1833 * verified, the MAC address is set, then the shared initialization
1834 * routines are called.
1835 *
1836 **********************************************************************/
1837 static int
1838 em_hardware_init(struct adapter * adapter)
1839 {
1840 INIT_DEBUGOUT("em_hardware_init: begin");
1841 /* Issue a global reset */
1842 em_reset_hw(&adapter->hw);
1843
1844 /* When hardware is reset, fifo_head is also reset */
1845 adapter->tx_fifo_head = 0;
1846
1847 /* Make sure we have a good EEPROM before we read from it */
1848 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1849 printf("em%d: The EEPROM Checksum Is Not Valid\n",
1850 adapter->unit);
1851 return(EIO);
1852 }
1853
1854 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1855 printf("em%d: EEPROM read error while reading part number\n",
1856 adapter->unit);
1857 return(EIO);
1858 }
1859
1860 if (em_init_hw(&adapter->hw) < 0) {
1861 printf("em%d: Hardware Initialization Failed",
1862 adapter->unit);
1863 return(EIO);
1864 }
1865
1866 em_check_for_link(&adapter->hw);
1867 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1868 adapter->link_active = 1;
1869 else
1870 adapter->link_active = 0;
1871
1872 if (adapter->link_active) {
1873 em_get_speed_and_duplex(&adapter->hw,
1874 &adapter->link_speed,
1875 &adapter->link_duplex);
1876 } else {
1877 adapter->link_speed = 0;
1878 adapter->link_duplex = 0;
1879 }
1880
1881 return(0);
1882 }
1883
1884 /*********************************************************************
1885 *
1886 * Setup networking device structure and register an interface.
1887 *
1888 **********************************************************************/
1889 static void
1890 em_setup_interface(device_t dev, struct adapter * adapter)
1891 {
1892 struct ifnet *ifp;
1893 INIT_DEBUGOUT("em_setup_interface: begin");
1894
1895 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1896 if (ifp == NULL)
1897 panic("%s: can not if_alloc()", device_get_nameunit(dev));
1898 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1899 ifp->if_mtu = ETHERMTU;
1900 ifp->if_baudrate = 1000000000;
1901 ifp->if_init = em_init;
1902 ifp->if_softc = adapter;
1903 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1904 ifp->if_ioctl = em_ioctl;
1905 ifp->if_start = em_start;
1906 ifp->if_watchdog = em_watchdog;
1907 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1908 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1909 IFQ_SET_READY(&ifp->if_snd);
1910
1911 ether_ifattach(ifp, adapter->hw.mac_addr);
1912
1913 ifp->if_capabilities = ifp->if_capenable = 0;
1914
1915 if (adapter->hw.mac_type >= em_82543) {
1916 ifp->if_capabilities |= IFCAP_HWCSUM;
1917 ifp->if_capenable |= IFCAP_HWCSUM;
1918 }
1919
1920 /*
1921 * Tell the upper layer(s) we support long frames.
1922 */
1923 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1924 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1925 ifp->if_capenable |= IFCAP_VLAN_MTU;
1926
1927 #ifdef DEVICE_POLLING
1928 ifp->if_capabilities |= IFCAP_POLLING;
1929 #endif
1930
1931 /*
1932 * Specify the media types supported by this adapter and register
1933 * callbacks to update media and link information
1934 */
1935 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1936 em_media_status);
1937 if (adapter->hw.media_type == em_media_type_fiber) {
1938 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1939 0, NULL);
1940 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1941 0, NULL);
1942 } else {
1943 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1944 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1945 0, NULL);
1946 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1947 0, NULL);
1948 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1949 0, NULL);
1950 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1951 0, NULL);
1952 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1953 }
1954 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1955 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1956
1957 return;
1958 }
1959
1960
1961 /*********************************************************************
1962 *
1963 * Workaround for SmartSpeed on 82541 and 82547 controllers
1964 *
1965 **********************************************************************/
1966 static void
1967 em_smartspeed(struct adapter *adapter)
1968 {
1969 uint16_t phy_tmp;
1970
1971 if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1972 !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1973 return;
1974
1975 if(adapter->smartspeed == 0) {
1976 /* If Master/Slave config fault is asserted twice,
1977 * we assume back-to-back */
1978 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1979 if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
1980 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1981 if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1982 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1983 &phy_tmp);
1984 if(phy_tmp & CR_1000T_MS_ENABLE) {
1985 phy_tmp &= ~CR_1000T_MS_ENABLE;
1986 em_write_phy_reg(&adapter->hw,
1987 PHY_1000T_CTRL, phy_tmp);
1988 adapter->smartspeed++;
1989 if(adapter->hw.autoneg &&
1990 !em_phy_setup_autoneg(&adapter->hw) &&
1991 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1992 &phy_tmp)) {
1993 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1994 MII_CR_RESTART_AUTO_NEG);
1995 em_write_phy_reg(&adapter->hw,
1996 PHY_CTRL, phy_tmp);
1997 }
1998 }
1999 }
2000 return;
2001 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2002 /* If still no link, perhaps using 2/3 pair cable */
2003 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2004 phy_tmp |= CR_1000T_MS_ENABLE;
2005 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2006 if(adapter->hw.autoneg &&
2007 !em_phy_setup_autoneg(&adapter->hw) &&
2008 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2009 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2010 MII_CR_RESTART_AUTO_NEG);
2011 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2012 }
2013 }
2014 /* Restart process after EM_SMARTSPEED_MAX iterations */
2015 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2016 adapter->smartspeed = 0;
2017
2018 return;
2019 }
2020
2021
2022 /*
2023 * Manage DMA'able memory.
2024 */
2025 static void
2026 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2027 {
2028 if (error)
2029 return;
2030 *(bus_addr_t*) arg = segs->ds_addr;
2031 return;
2032 }
2033
2034 static int
2035 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2036 struct em_dma_alloc *dma, int mapflags)
2037 {
2038 int r;
2039
2040 r = bus_dma_tag_create(NULL, /* parent */
2041 PAGE_SIZE, 0, /* alignment, bounds */
2042 BUS_SPACE_MAXADDR, /* lowaddr */
2043 BUS_SPACE_MAXADDR, /* highaddr */
2044 NULL, NULL, /* filter, filterarg */
2045 size, /* maxsize */
2046 1, /* nsegments */
2047 size, /* maxsegsize */
2048 BUS_DMA_ALLOCNOW, /* flags */
2049 NULL, /* lockfunc */
2050 NULL, /* lockarg */
2051 &dma->dma_tag);
2052 if (r != 0) {
2053 printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2054 "error %u\n", adapter->unit, r);
2055 goto fail_0;
2056 }
2057
2058 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2059 BUS_DMA_NOWAIT, &dma->dma_map);
2060 if (r != 0) {
2061 printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2062 "size %ju, error %d\n", adapter->unit,
2063 (uintmax_t)size, r);
2064 goto fail_2;
2065 }
2066
2067 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2068 size,
2069 em_dmamap_cb,
2070 &dma->dma_paddr,
2071 mapflags | BUS_DMA_NOWAIT);
2072 if (r != 0) {
2073 printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2074 "error %u\n", adapter->unit, r);
2075 goto fail_3;
2076 }
2077
2078 return (0);
2079
2080 fail_3:
2081 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2082 fail_2:
2083 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2084 bus_dma_tag_destroy(dma->dma_tag);
2085 fail_0:
2086 dma->dma_map = NULL;
2087 dma->dma_tag = NULL;
2088 return (r);
2089 }
2090
2091 static void
2092 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2093 {
2094 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2095 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2096 bus_dma_tag_destroy(dma->dma_tag);
2097 }
2098
2099
2100 /*********************************************************************
2101 *
2102 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2103 * the information needed to transmit a packet on the wire.
2104 *
2105 **********************************************************************/
2106 static int
2107 em_allocate_transmit_structures(struct adapter * adapter)
2108 {
2109 if (!(adapter->tx_buffer_area =
2110 (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2111 adapter->num_tx_desc, M_DEVBUF,
2112 M_NOWAIT))) {
2113 printf("em%d: Unable to allocate tx_buffer memory\n",
2114 adapter->unit);
2115 return ENOMEM;
2116 }
2117
2118 bzero(adapter->tx_buffer_area,
2119 sizeof(struct em_buffer) * adapter->num_tx_desc);
2120
2121 return 0;
2122 }
2123
2124 /*********************************************************************
2125 *
2126 * Allocate and initialize transmit structures.
2127 *
2128 **********************************************************************/
2129 static int
2130 em_setup_transmit_structures(struct adapter * adapter)
2131 {
2132 /*
2133 * Setup DMA descriptor areas.
2134 */
2135 if (bus_dma_tag_create(NULL, /* parent */
2136 1, 0, /* alignment, bounds */
2137 BUS_SPACE_MAXADDR, /* lowaddr */
2138 BUS_SPACE_MAXADDR, /* highaddr */
2139 NULL, NULL, /* filter, filterarg */
2140 MCLBYTES * 8, /* maxsize */
2141 EM_MAX_SCATTER, /* nsegments */
2142 MCLBYTES * 8, /* maxsegsize */
2143 BUS_DMA_ALLOCNOW, /* flags */
2144 NULL, /* lockfunc */
2145 NULL, /* lockarg */
2146 &adapter->txtag)) {
2147 printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2148 return (ENOMEM);
2149 }
2150
2151 if (em_allocate_transmit_structures(adapter))
2152 return (ENOMEM);
2153
2154 bzero((void *) adapter->tx_desc_base,
2155 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2156
2157 adapter->next_avail_tx_desc = 0;
2158 adapter->oldest_used_tx_desc = 0;
2159
2160 /* Set number of descriptors available */
2161 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2162
2163 /* Set checksum context */
2164 adapter->active_checksum_context = OFFLOAD_NONE;
2165
2166 return (0);
2167 }
2168
2169 /*********************************************************************
2170 *
2171 * Enable transmit unit.
2172 *
2173 **********************************************************************/
2174 static void
2175 em_initialize_transmit_unit(struct adapter * adapter)
2176 {
2177 u_int32_t reg_tctl;
2178 u_int32_t reg_tipg = 0;
2179 u_int64_t bus_addr;
2180
2181 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2182 /* Setup the Base and Length of the Tx Descriptor Ring */
2183 bus_addr = adapter->txdma.dma_paddr;
2184 E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2185 E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2186 E1000_WRITE_REG(&adapter->hw, TDLEN,
2187 adapter->num_tx_desc *
2188 sizeof(struct em_tx_desc));
2189
2190 /* Setup the HW Tx Head and Tail descriptor pointers */
2191 E1000_WRITE_REG(&adapter->hw, TDH, 0);
2192 E1000_WRITE_REG(&adapter->hw, TDT, 0);
2193
2194
2195 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2196 E1000_READ_REG(&adapter->hw, TDBAL),
2197 E1000_READ_REG(&adapter->hw, TDLEN));
2198
2199 /* Set the default values for the Tx Inter Packet Gap timer */
2200 switch (adapter->hw.mac_type) {
2201 case em_82542_rev2_0:
2202 case em_82542_rev2_1:
2203 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2204 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2205 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2206 break;
2207 default:
2208 if (adapter->hw.media_type == em_media_type_fiber)
2209 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2210 else
2211 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2212 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2213 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2214 }
2215
2216 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2217 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2218 if(adapter->hw.mac_type >= em_82540)
2219 E1000_WRITE_REG(&adapter->hw, TADV,
2220 adapter->tx_abs_int_delay.value);
2221
2222 /* Program the Transmit Control Register */
2223 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2224 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2225 if (adapter->hw.mac_type >= em_82573)
2226 reg_tctl |= E1000_TCTL_MULR;
2227 if (adapter->link_duplex == 1) {
2228 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2229 } else {
2230 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2231 }
2232 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2233
2234 /* Setup Transmit Descriptor Settings for this adapter */
2235 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2236
2237 if (adapter->tx_int_delay.value > 0)
2238 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2239
2240 return;
2241 }
2242
2243 /*********************************************************************
2244 *
2245 * Free all transmit related data structures.
2246 *
2247 **********************************************************************/
2248 static void
2249 em_free_transmit_structures(struct adapter * adapter)
2250 {
2251 struct em_buffer *tx_buffer;
2252 int i;
2253
2254 INIT_DEBUGOUT("free_transmit_structures: begin");
2255
2256 if (adapter->tx_buffer_area != NULL) {
2257 tx_buffer = adapter->tx_buffer_area;
2258 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2259 if (tx_buffer->m_head != NULL) {
2260 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2261 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2262 m_freem(tx_buffer->m_head);
2263 }
2264 tx_buffer->m_head = NULL;
2265 }
2266 }
2267 if (adapter->tx_buffer_area != NULL) {
2268 free(adapter->tx_buffer_area, M_DEVBUF);
2269 adapter->tx_buffer_area = NULL;
2270 }
2271 if (adapter->txtag != NULL) {
2272 bus_dma_tag_destroy(adapter->txtag);
2273 adapter->txtag = NULL;
2274 }
2275 return;
2276 }
2277
2278 /*********************************************************************
2279 *
2280 * The offload context needs to be set when we transfer the first
2281 * packet of a particular protocol (TCP/UDP). We change the
2282 * context only if the protocol type changes.
2283 *
2284 **********************************************************************/
2285 static void
2286 em_transmit_checksum_setup(struct adapter * adapter,
2287 struct mbuf *mp,
2288 u_int32_t *txd_upper,
2289 u_int32_t *txd_lower)
2290 {
2291 struct em_context_desc *TXD;
2292 struct em_buffer *tx_buffer;
2293 int curr_txd;
2294
2295 if (mp->m_pkthdr.csum_flags) {
2296
2297 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2298 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2299 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2300 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2301 return;
2302 else
2303 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2304
2305 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2306 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2307 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2308 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2309 return;
2310 else
2311 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2312 } else {
2313 *txd_upper = 0;
2314 *txd_lower = 0;
2315 return;
2316 }
2317 } else {
2318 *txd_upper = 0;
2319 *txd_lower = 0;
2320 return;
2321 }
2322
2323 /* If we reach this point, the checksum offload context
2324 * needs to be reset.
2325 */
2326 curr_txd = adapter->next_avail_tx_desc;
2327 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2328 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2329
2330 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2331 TXD->lower_setup.ip_fields.ipcso =
2332 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2333 TXD->lower_setup.ip_fields.ipcse =
2334 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2335
2336 TXD->upper_setup.tcp_fields.tucss =
2337 ETHER_HDR_LEN + sizeof(struct ip);
2338 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2339
2340 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2341 TXD->upper_setup.tcp_fields.tucso =
2342 ETHER_HDR_LEN + sizeof(struct ip) +
2343 offsetof(struct tcphdr, th_sum);
2344 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2345 TXD->upper_setup.tcp_fields.tucso =
2346 ETHER_HDR_LEN + sizeof(struct ip) +
2347 offsetof(struct udphdr, uh_sum);
2348 }
2349
2350 TXD->tcp_seg_setup.data = htole32(0);
2351 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2352
2353 tx_buffer->m_head = NULL;
2354
2355 if (++curr_txd == adapter->num_tx_desc)
2356 curr_txd = 0;
2357
2358 adapter->num_tx_desc_avail--;
2359 adapter->next_avail_tx_desc = curr_txd;
2360
2361 return;
2362 }
2363
2364 /**********************************************************************
2365 *
2366 * Examine each tx_buffer in the used queue. If the hardware is done
2367 * processing the packet then free associated resources. The
2368 * tx_buffer is put back on the free queue.
2369 *
2370 **********************************************************************/
2371 static void
2372 em_clean_transmit_interrupts(struct adapter * adapter)
2373 {
2374 int i, num_avail;
2375 struct em_buffer *tx_buffer;
2376 struct em_tx_desc *tx_desc;
2377 struct ifnet *ifp = adapter->ifp;
2378
2379 mtx_assert(&adapter->mtx, MA_OWNED);
2380
2381 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2382 return;
2383
2384 num_avail = adapter->num_tx_desc_avail;
2385 i = adapter->oldest_used_tx_desc;
2386
2387 tx_buffer = &adapter->tx_buffer_area[i];
2388 tx_desc = &adapter->tx_desc_base[i];
2389
2390 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2391 BUS_DMASYNC_POSTREAD);
2392 while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2393
2394 tx_desc->upper.data = 0;
2395 num_avail++;
2396
2397 if (tx_buffer->m_head) {
2398 ifp->if_opackets++;
2399 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2400 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2401
2402 m_freem(tx_buffer->m_head);
2403 tx_buffer->m_head = NULL;
2404 }
2405
2406 if (++i == adapter->num_tx_desc)
2407 i = 0;
2408
2409 tx_buffer = &adapter->tx_buffer_area[i];
2410 tx_desc = &adapter->tx_desc_base[i];
2411 }
2412 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2413 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2414
2415 adapter->oldest_used_tx_desc = i;
2416
2417 /*
2418 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2419 * that it is OK to send packets.
2420 * If there are no pending descriptors, clear the timeout. Otherwise,
2421 * if some descriptors have been freed, restart the timeout.
2422 */
2423 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2424 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2425 if (num_avail == adapter->num_tx_desc)
2426 ifp->if_timer = 0;
2427 else if (num_avail == adapter->num_tx_desc_avail)
2428 ifp->if_timer = EM_TX_TIMEOUT;
2429 }
2430 adapter->num_tx_desc_avail = num_avail;
2431 return;
2432 }
2433
2434 /*********************************************************************
2435 *
2436 * Get a buffer from system mbuf buffer pool.
2437 *
2438 **********************************************************************/
2439 static int
2440 em_get_buf(int i, struct adapter *adapter,
2441 struct mbuf *nmp)
2442 {
2443 register struct mbuf *mp = nmp;
2444 struct em_buffer *rx_buffer;
2445 struct ifnet *ifp;
2446 bus_addr_t paddr;
2447 int error;
2448
2449 ifp = adapter->ifp;
2450
2451 if (mp == NULL) {
2452 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2453 if (mp == NULL) {
2454 adapter->mbuf_cluster_failed++;
2455 return(ENOBUFS);
2456 }
2457 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2458 } else {
2459 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2460 mp->m_data = mp->m_ext.ext_buf;
2461 mp->m_next = NULL;
2462 }
2463
2464 if (ifp->if_mtu <= ETHERMTU) {
2465 m_adj(mp, ETHER_ALIGN);
2466 }
2467
2468 rx_buffer = &adapter->rx_buffer_area[i];
2469
2470 /*
2471 * Using memory from the mbuf cluster pool, invoke the
2472 * bus_dma machinery to arrange the memory mapping.
2473 */
2474 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2475 mtod(mp, void *), mp->m_len,
2476 em_dmamap_cb, &paddr, 0);
2477 if (error) {
2478 m_free(mp);
2479 return(error);
2480 }
2481 rx_buffer->m_head = mp;
2482 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2483 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD |
2484 BUS_DMASYNC_PREWRITE);
2485
2486 return(0);
2487 }
2488
2489 /*********************************************************************
2490 *
2491 * Allocate memory for rx_buffer structures. Since we use one
2492 * rx_buffer per received packet, the maximum number of rx_buffer's
2493 * that we'll need is equal to the number of receive descriptors
2494 * that we've allocated.
2495 *
2496 **********************************************************************/
2497 static int
2498 em_allocate_receive_structures(struct adapter * adapter)
2499 {
2500 int i, error;
2501 struct em_buffer *rx_buffer;
2502
2503 if (!(adapter->rx_buffer_area =
2504 (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2505 adapter->num_rx_desc, M_DEVBUF,
2506 M_NOWAIT))) {
2507 printf("em%d: Unable to allocate rx_buffer memory\n",
2508 adapter->unit);
2509 return(ENOMEM);
2510 }
2511
2512 bzero(adapter->rx_buffer_area,
2513 sizeof(struct em_buffer) * adapter->num_rx_desc);
2514
2515 error = bus_dma_tag_create(NULL, /* parent */
2516 1, 0, /* alignment, bounds */
2517 BUS_SPACE_MAXADDR, /* lowaddr */
2518 BUS_SPACE_MAXADDR, /* highaddr */
2519 NULL, NULL, /* filter, filterarg */
2520 MCLBYTES, /* maxsize */
2521 1, /* nsegments */
2522 MCLBYTES, /* maxsegsize */
2523 BUS_DMA_ALLOCNOW, /* flags */
2524 NULL, /* lockfunc */
2525 NULL, /* lockarg */
2526 &adapter->rxtag);
2527 if (error != 0) {
2528 printf("em%d: em_allocate_receive_structures: "
2529 "bus_dma_tag_create failed; error %u\n",
2530 adapter->unit, error);
2531 goto fail_0;
2532 }
2533
2534 rx_buffer = adapter->rx_buffer_area;
2535 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2536 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2537 &rx_buffer->map);
2538 if (error != 0) {
2539 printf("em%d: em_allocate_receive_structures: "
2540 "bus_dmamap_create failed; error %u\n",
2541 adapter->unit, error);
2542 goto fail_1;
2543 }
2544 }
2545
2546 for (i = 0; i < adapter->num_rx_desc; i++) {
2547 error = em_get_buf(i, adapter, NULL);
2548 if (error != 0) {
2549 adapter->rx_buffer_area[i].m_head = NULL;
2550 adapter->rx_desc_base[i].buffer_addr = 0;
2551 return(error);
2552 }
2553 }
2554 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2555 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2556
2557 return(0);
2558
2559 fail_1:
2560 bus_dma_tag_destroy(adapter->rxtag);
2561 fail_0:
2562 adapter->rxtag = NULL;
2563 free(adapter->rx_buffer_area, M_DEVBUF);
2564 adapter->rx_buffer_area = NULL;
2565 return (error);
2566 }
2567
2568 /*********************************************************************
2569 *
2570 * Allocate and initialize receive structures.
2571 *
2572 **********************************************************************/
2573 static int
2574 em_setup_receive_structures(struct adapter * adapter)
2575 {
2576 bzero((void *) adapter->rx_desc_base,
2577 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2578
2579 if (em_allocate_receive_structures(adapter))
2580 return ENOMEM;
2581
2582 /* Setup our descriptor pointers */
2583 adapter->next_rx_desc_to_check = 0;
2584 return(0);
2585 }
2586
2587 /*********************************************************************
2588 *
2589 * Enable receive unit.
2590 *
2591 **********************************************************************/
2592 static void
2593 em_initialize_receive_unit(struct adapter * adapter)
2594 {
2595 u_int32_t reg_rctl;
2596 u_int32_t reg_rxcsum;
2597 struct ifnet *ifp;
2598 u_int64_t bus_addr;
2599
2600 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2601 ifp = adapter->ifp;
2602
2603 /* Make sure receives are disabled while setting up the descriptor ring */
2604 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2605
2606 /* Set the Receive Delay Timer Register */
2607 E1000_WRITE_REG(&adapter->hw, RDTR,
2608 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2609
2610 if(adapter->hw.mac_type >= em_82540) {
2611 E1000_WRITE_REG(&adapter->hw, RADV,
2612 adapter->rx_abs_int_delay.value);
2613
2614 /* Set the interrupt throttling rate. Value is calculated
2615 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2616 #define MAX_INTS_PER_SEC 8000
2617 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
2618 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2619 }
2620
2621 /* Setup the Base and Length of the Rx Descriptor Ring */
2622 bus_addr = adapter->rxdma.dma_paddr;
2623 E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2624 E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2625 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2626 sizeof(struct em_rx_desc));
2627
2628 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2629 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2630 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2631
2632 /* Setup the Receive Control Register */
2633 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2634 E1000_RCTL_RDMTS_HALF |
2635 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2636
2637 if (adapter->hw.tbi_compatibility_on == TRUE)
2638 reg_rctl |= E1000_RCTL_SBP;
2639
2640
2641 switch (adapter->rx_buffer_len) {
2642 default:
2643 case EM_RXBUFFER_2048:
2644 reg_rctl |= E1000_RCTL_SZ_2048;
2645 break;
2646 case EM_RXBUFFER_4096:
2647 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2648 break;
2649 case EM_RXBUFFER_8192:
2650 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2651 break;
2652 case EM_RXBUFFER_16384:
2653 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2654 break;
2655 }
2656
2657 if (ifp->if_mtu > ETHERMTU)
2658 reg_rctl |= E1000_RCTL_LPE;
2659
2660 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2661 if ((adapter->hw.mac_type >= em_82543) &&
2662 (ifp->if_capenable & IFCAP_RXCSUM)) {
2663 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2664 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2665 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2666 }
2667
2668 /* Enable Receives */
2669 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2670
2671 return;
2672 }
2673
2674 /*********************************************************************
2675 *
2676 * Free receive related data structures.
2677 *
2678 **********************************************************************/
2679 static void
2680 em_free_receive_structures(struct adapter *adapter)
2681 {
2682 struct em_buffer *rx_buffer;
2683 int i;
2684
2685 INIT_DEBUGOUT("free_receive_structures: begin");
2686
2687 if (adapter->rx_buffer_area != NULL) {
2688 rx_buffer = adapter->rx_buffer_area;
2689 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2690 if (rx_buffer->map != NULL) {
2691 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2692 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2693 }
2694 if (rx_buffer->m_head != NULL)
2695 m_freem(rx_buffer->m_head);
2696 rx_buffer->m_head = NULL;
2697 }
2698 }
2699 if (adapter->rx_buffer_area != NULL) {
2700 free(adapter->rx_buffer_area, M_DEVBUF);
2701 adapter->rx_buffer_area = NULL;
2702 }
2703 if (adapter->rxtag != NULL) {
2704 bus_dma_tag_destroy(adapter->rxtag);
2705 adapter->rxtag = NULL;
2706 }
2707 return;
2708 }
2709
2710 /*********************************************************************
2711 *
2712 * This routine executes in interrupt context. It replenishes
2713 * the mbufs in the descriptor and sends data which has been
2714 * dma'ed into host memory to upper layer.
2715 *
2716 * We loop at most count times if count is > 0, or until done if
2717 * count < 0.
2718 *
2719 *********************************************************************/
2720 static void
2721 em_process_receive_interrupts(struct adapter * adapter, int count)
2722 {
2723 struct ifnet *ifp;
2724 struct mbuf *mp;
2725 u_int8_t accept_frame = 0;
2726 u_int8_t eop = 0;
2727 u_int16_t len, desc_len, prev_len_adj;
2728 int i;
2729
2730 /* Pointer to the receive descriptor being examined. */
2731 struct em_rx_desc *current_desc;
2732
2733 mtx_assert(&adapter->mtx, MA_OWNED);
2734
2735 ifp = adapter->ifp;
2736 i = adapter->next_rx_desc_to_check;
2737 current_desc = &adapter->rx_desc_base[i];
2738 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2739 BUS_DMASYNC_POSTREAD);
2740
2741 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2742 return;
2743 }
2744
2745 while ((current_desc->status & E1000_RXD_STAT_DD) &&
2746 (count != 0) &&
2747 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2748 struct mbuf *m = NULL;
2749
2750 mp = adapter->rx_buffer_area[i].m_head;
2751 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2752 BUS_DMASYNC_POSTREAD);
2753
2754 accept_frame = 1;
2755 prev_len_adj = 0;
2756 desc_len = le16toh(current_desc->length);
2757 if (current_desc->status & E1000_RXD_STAT_EOP) {
2758 count--;
2759 eop = 1;
2760 if (desc_len < ETHER_CRC_LEN) {
2761 len = 0;
2762 prev_len_adj = ETHER_CRC_LEN - desc_len;
2763 }
2764 else {
2765 len = desc_len - ETHER_CRC_LEN;
2766 }
2767 } else {
2768 eop = 0;
2769 len = desc_len;
2770 }
2771
2772 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2773 u_int8_t last_byte;
2774 u_int32_t pkt_len = desc_len;
2775
2776 if (adapter->fmp != NULL)
2777 pkt_len += adapter->fmp->m_pkthdr.len;
2778
2779 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2780
2781 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2782 current_desc->errors,
2783 pkt_len, last_byte)) {
2784 em_tbi_adjust_stats(&adapter->hw,
2785 &adapter->stats,
2786 pkt_len,
2787 adapter->hw.mac_addr);
2788 if (len > 0) len--;
2789 }
2790 else {
2791 accept_frame = 0;
2792 }
2793 }
2794
2795 if (accept_frame) {
2796
2797 if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2798 adapter->dropped_pkts++;
2799 em_get_buf(i, adapter, mp);
2800 if (adapter->fmp != NULL)
2801 m_freem(adapter->fmp);
2802 adapter->fmp = NULL;
2803 adapter->lmp = NULL;
2804 break;
2805 }
2806
2807 /* Assign correct length to the current fragment */
2808 mp->m_len = len;
2809
2810 if (adapter->fmp == NULL) {
2811 mp->m_pkthdr.len = len;
2812 adapter->fmp = mp; /* Store the first mbuf */
2813 adapter->lmp = mp;
2814 } else {
2815 /* Chain mbuf's together */
2816 mp->m_flags &= ~M_PKTHDR;
2817 /*
2818 * Adjust length of previous mbuf in chain if we
2819 * received less than 4 bytes in the last descriptor.
2820 */
2821 if (prev_len_adj > 0) {
2822 adapter->lmp->m_len -= prev_len_adj;
2823 adapter->fmp->m_pkthdr.len -= prev_len_adj;
2824 }
2825 adapter->lmp->m_next = mp;
2826 adapter->lmp = adapter->lmp->m_next;
2827 adapter->fmp->m_pkthdr.len += len;
2828 }
2829
2830 if (eop) {
2831 adapter->fmp->m_pkthdr.rcvif = ifp;
2832 ifp->if_ipackets++;
2833 em_receive_checksum(adapter, current_desc,
2834 adapter->fmp);
2835 if (current_desc->status & E1000_RXD_STAT_VP)
2836 VLAN_INPUT_TAG(ifp, adapter->fmp,
2837 (current_desc->special &
2838 E1000_RXD_SPC_VLAN_MASK),
2839 adapter->fmp = NULL);
2840
2841 m = adapter->fmp;
2842 adapter->fmp = NULL;
2843 adapter->lmp = NULL;
2844 }
2845 } else {
2846 adapter->dropped_pkts++;
2847 em_get_buf(i, adapter, mp);
2848 if (adapter->fmp != NULL)
2849 m_freem(adapter->fmp);
2850 adapter->fmp = NULL;
2851 adapter->lmp = NULL;
2852 }
2853
2854 /* Zero out the receive descriptors status */
2855 current_desc->status = 0;
2856 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2857 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2858
2859 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
2860 E1000_WRITE_REG(&adapter->hw, RDT, i);
2861
2862 /* Advance our pointers to the next descriptor */
2863 if (++i == adapter->num_rx_desc)
2864 i = 0;
2865 if (m != NULL) {
2866 adapter->next_rx_desc_to_check = i;
2867 EM_UNLOCK(adapter);
2868 (*ifp->if_input)(ifp, m);
2869 EM_LOCK(adapter);
2870 i = adapter->next_rx_desc_to_check;
2871 }
2872 current_desc = &adapter->rx_desc_base[i];
2873 }
2874 adapter->next_rx_desc_to_check = i;
2875 return;
2876 }
2877
2878 /*********************************************************************
2879 *
2880 * Verify that the hardware indicated that the checksum is valid.
2881 * Inform the stack about the status of checksum so that stack
2882 * doesn't spend time verifying the checksum.
2883 *
2884 *********************************************************************/
2885 static void
2886 em_receive_checksum(struct adapter *adapter,
2887 struct em_rx_desc *rx_desc,
2888 struct mbuf *mp)
2889 {
2890 /* 82543 or newer only */
2891 if ((adapter->hw.mac_type < em_82543) ||
2892 /* Ignore Checksum bit is set */
2893 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2894 mp->m_pkthdr.csum_flags = 0;
2895 return;
2896 }
2897
2898 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2899 /* Did it pass? */
2900 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2901 /* IP Checksum Good */
2902 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2903 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2904
2905 } else {
2906 mp->m_pkthdr.csum_flags = 0;
2907 }
2908 }
2909
2910 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2911 /* Did it pass? */
2912 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2913 mp->m_pkthdr.csum_flags |=
2914 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2915 mp->m_pkthdr.csum_data = htons(0xffff);
2916 }
2917 }
2918
2919 return;
2920 }
2921
2922
2923 static void
2924 em_enable_vlans(struct adapter *adapter)
2925 {
2926 uint32_t ctrl;
2927
2928 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2929
2930 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2931 ctrl |= E1000_CTRL_VME;
2932 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2933
2934 return;
2935 }
2936
2937 static void
2938 em_disable_vlans(struct adapter *adapter)
2939 {
2940 uint32_t ctrl;
2941
2942 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2943 ctrl &= ~E1000_CTRL_VME;
2944 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2945
2946 return;
2947 }
2948
2949 static void
2950 em_enable_intr(struct adapter * adapter)
2951 {
2952 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2953 return;
2954 }
2955
2956 static void
2957 em_disable_intr(struct adapter *adapter)
2958 {
2959 /*
2960 * The first version of 82542 had an errata where when link was forced it
2961 * would stay up even up even if the cable was disconnected. Sequence errors
2962 * were used to detect the disconnect and then the driver would unforce the link.
2963 * This code in the in the ISR. For this to work correctly the Sequence error
2964 * interrupt had to be enabled all the time.
2965 */
2966
2967 if (adapter->hw.mac_type == em_82542_rev2_0)
2968 E1000_WRITE_REG(&adapter->hw, IMC,
2969 (0xffffffff & ~E1000_IMC_RXSEQ));
2970 else
2971 E1000_WRITE_REG(&adapter->hw, IMC,
2972 0xffffffff);
2973 return;
2974 }
2975
2976 static int
2977 em_is_valid_ether_addr(u_int8_t *addr)
2978 {
2979 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2980
2981 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
2982 return (FALSE);
2983 }
2984
2985 return(TRUE);
2986 }
2987
2988 void
2989 em_write_pci_cfg(struct em_hw *hw,
2990 uint32_t reg,
2991 uint16_t *value)
2992 {
2993 pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
2994 *value, 2);
2995 }
2996
2997 void
2998 em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
2999 uint16_t *value)
3000 {
3001 *value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3002 reg, 2);
3003 return;
3004 }
3005
3006 void
3007 em_pci_set_mwi(struct em_hw *hw)
3008 {
3009 pci_write_config(((struct em_osdep *)hw->back)->dev,
3010 PCIR_COMMAND,
3011 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3012 return;
3013 }
3014
3015 void
3016 em_pci_clear_mwi(struct em_hw *hw)
3017 {
3018 pci_write_config(((struct em_osdep *)hw->back)->dev,
3019 PCIR_COMMAND,
3020 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3021 return;
3022 }
3023
3024 uint32_t
3025 em_io_read(struct em_hw *hw, unsigned long port)
3026 {
3027 return(inl(port));
3028 }
3029
3030 void
3031 em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
3032 {
3033 outl(port, value);
3034 return;
3035 }
3036
3037 /*********************************************************************
3038 * 82544 Coexistence issue workaround.
3039 * There are 2 issues.
3040 * 1. Transmit Hang issue.
3041 * To detect this issue, following equation can be used...
3042 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3043 * If SUM[3:0] is in between 1 to 4, we will have this issue.
3044 *
3045 * 2. DAC issue.
3046 * To detect this issue, following equation can be used...
3047 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3048 * If SUM[3:0] is in between 9 to c, we will have this issue.
3049 *
3050 *
3051 * WORKAROUND:
3052 * Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3053 *
3054 *** *********************************************************************/
3055 static u_int32_t
3056 em_fill_descriptors (u_int64_t address,
3057 u_int32_t length,
3058 PDESC_ARRAY desc_array)
3059 {
3060 /* Since issue is sensitive to length and address.*/
3061 /* Let us first check the address...*/
3062 u_int32_t safe_terminator;
3063 if (length <= 4) {
3064 desc_array->descriptor[0].address = address;
3065 desc_array->descriptor[0].length = length;
3066 desc_array->elements = 1;
3067 return desc_array->elements;
3068 }
3069 safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3070 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3071 if (safe_terminator == 0 ||
3072 (safe_terminator > 4 &&
3073 safe_terminator < 9) ||
3074 (safe_terminator > 0xC &&
3075 safe_terminator <= 0xF)) {
3076 desc_array->descriptor[0].address = address;
3077 desc_array->descriptor[0].length = length;
3078 desc_array->elements = 1;
3079 return desc_array->elements;
3080 }
3081
3082 desc_array->descriptor[0].address = address;
3083 desc_array->descriptor[0].length = length - 4;
3084 desc_array->descriptor[1].address = address + (length - 4);
3085 desc_array->descriptor[1].length = 4;
3086 desc_array->elements = 2;
3087 return desc_array->elements;
3088 }
3089
3090 /**********************************************************************
3091 *
3092 * Update the board statistics counters.
3093 *
3094 **********************************************************************/
3095 static void
3096 em_update_stats_counters(struct adapter *adapter)
3097 {
3098 struct ifnet *ifp;
3099
3100 if(adapter->hw.media_type == em_media_type_copper ||
3101 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3102 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3103 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3104 }
3105 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3106 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3107 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3108 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3109
3110 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3111 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3112 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3113 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3114 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3115 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3116 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3117 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3118 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3119 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3120 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3121 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3122 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3123 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3124 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3125 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3126 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3127 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3128 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3129 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3130
3131 /* For the 64-bit byte counters the low dword must be read first. */
3132 /* Both registers clear on the read of the high dword */
3133
3134 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3135 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3136 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3137 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3138
3139 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3140 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3141 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3142 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3143 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3144
3145 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3146 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3147 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3148 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3149
3150 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3151 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3152 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3153 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3154 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3155 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3156 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3157 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3158 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3159 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3160
3161 if (adapter->hw.mac_type >= em_82543) {
3162 adapter->stats.algnerrc +=
3163 E1000_READ_REG(&adapter->hw, ALGNERRC);
3164 adapter->stats.rxerrc +=
3165 E1000_READ_REG(&adapter->hw, RXERRC);
3166 adapter->stats.tncrs +=
3167 E1000_READ_REG(&adapter->hw, TNCRS);
3168 adapter->stats.cexterr +=
3169 E1000_READ_REG(&adapter->hw, CEXTERR);
3170 adapter->stats.tsctc +=
3171 E1000_READ_REG(&adapter->hw, TSCTC);
3172 adapter->stats.tsctfc +=
3173 E1000_READ_REG(&adapter->hw, TSCTFC);
3174 }
3175 ifp = adapter->ifp;
3176
3177 ifp->if_collisions = adapter->stats.colc;
3178
3179 /* Rx Errors */
3180 ifp->if_ierrors =
3181 adapter->dropped_pkts +
3182 adapter->stats.rxerrc +
3183 adapter->stats.crcerrs +
3184 adapter->stats.algnerrc +
3185 adapter->stats.rlec +
3186 adapter->stats.mpc + adapter->stats.cexterr;
3187
3188 /* Tx Errors */
3189 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
3190
3191 }
3192
3193
3194 /**********************************************************************
3195 *
3196 * This routine is called only when em_display_debug_stats is enabled.
3197 * This routine provides a way to take a look at important statistics
3198 * maintained by the driver and hardware.
3199 *
3200 **********************************************************************/
3201 static void
3202 em_print_debug_info(struct adapter *adapter)
3203 {
3204 int unit = adapter->unit;
3205 uint8_t *hw_addr = adapter->hw.hw_addr;
3206
3207 printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3208 printf("em%d:CTRL = 0x%x\n", unit,
3209 E1000_READ_REG(&adapter->hw, CTRL));
3210 printf("em%d:RCTL = 0x%x PS=(0x8402)\n", unit,
3211 E1000_READ_REG(&adapter->hw, RCTL));
3212 printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3213 E1000_READ_REG(&adapter->hw, TIDV),
3214 E1000_READ_REG(&adapter->hw, TADV));
3215 printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3216 E1000_READ_REG(&adapter->hw, RDTR),
3217 E1000_READ_REG(&adapter->hw, RADV));
3218 printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit,
3219 (long long)adapter->tx_fifo_wrk_cnt,
3220 (long long)adapter->tx_fifo_reset_cnt);
3221 printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3222 E1000_READ_REG(&adapter->hw, TDH),
3223 E1000_READ_REG(&adapter->hw, TDT));
3224 printf("em%d: Num Tx descriptors avail = %d\n", unit,
3225 adapter->num_tx_desc_avail);
3226 printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3227 adapter->no_tx_desc_avail1);
3228 printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3229 adapter->no_tx_desc_avail2);
3230 printf("em%d: Std mbuf failed = %ld\n", unit,
3231 adapter->mbuf_alloc_failed);
3232 printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3233 adapter->mbuf_cluster_failed);
3234 printf("em%d: Driver dropped packets = %ld\n", unit,
3235 adapter->dropped_pkts);
3236
3237 return;
3238 }
3239
3240 static void
3241 em_print_hw_stats(struct adapter *adapter)
3242 {
3243 int unit = adapter->unit;
3244
3245 printf("em%d: Excessive collisions = %lld\n", unit,
3246 (long long)adapter->stats.ecol);
3247 printf("em%d: Symbol errors = %lld\n", unit,
3248 (long long)adapter->stats.symerrs);
3249 printf("em%d: Sequence errors = %lld\n", unit,
3250 (long long)adapter->stats.sec);
3251 printf("em%d: Defer count = %lld\n", unit,
3252 (long long)adapter->stats.dc);
3253
3254 printf("em%d: Missed Packets = %lld\n", unit,
3255 (long long)adapter->stats.mpc);
3256 printf("em%d: Receive No Buffers = %lld\n", unit,
3257 (long long)adapter->stats.rnbc);
3258 printf("em%d: Receive length errors = %lld\n", unit,
3259 (long long)adapter->stats.rlec);
3260 printf("em%d: Receive errors = %lld\n", unit,
3261 (long long)adapter->stats.rxerrc);
3262 printf("em%d: Crc errors = %lld\n", unit,
3263 (long long)adapter->stats.crcerrs);
3264 printf("em%d: Alignment errors = %lld\n", unit,
3265 (long long)adapter->stats.algnerrc);
3266 printf("em%d: Carrier extension errors = %lld\n", unit,
3267 (long long)adapter->stats.cexterr);
3268
3269 printf("em%d: XON Rcvd = %lld\n", unit,
3270 (long long)adapter->stats.xonrxc);
3271 printf("em%d: XON Xmtd = %lld\n", unit,
3272 (long long)adapter->stats.xontxc);
3273 printf("em%d: XOFF Rcvd = %lld\n", unit,
3274 (long long)adapter->stats.xoffrxc);
3275 printf("em%d: XOFF Xmtd = %lld\n", unit,
3276 (long long)adapter->stats.xofftxc);
3277
3278 printf("em%d: Good Packets Rcvd = %lld\n", unit,
3279 (long long)adapter->stats.gprc);
3280 printf("em%d: Good Packets Xmtd = %lld\n", unit,
3281 (long long)adapter->stats.gptc);
3282
3283 return;
3284 }
3285
3286 static int
3287 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3288 {
3289 int error;
3290 int result;
3291 struct adapter *adapter;
3292
3293 result = -1;
3294 error = sysctl_handle_int(oidp, &result, 0, req);
3295
3296 if (error || !req->newptr)
3297 return (error);
3298
3299 if (result == 1) {
3300 adapter = (struct adapter *)arg1;
3301 em_print_debug_info(adapter);
3302 }
3303
3304 return error;
3305 }
3306
3307
3308 static int
3309 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3310 {
3311 int error;
3312 int result;
3313 struct adapter *adapter;
3314
3315 result = -1;
3316 error = sysctl_handle_int(oidp, &result, 0, req);
3317
3318 if (error || !req->newptr)
3319 return (error);
3320
3321 if (result == 1) {
3322 adapter = (struct adapter *)arg1;
3323 em_print_hw_stats(adapter);
3324 }
3325
3326 return error;
3327 }
3328
3329 static int
3330 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3331 {
3332 struct em_int_delay_info *info;
3333 struct adapter *adapter;
3334 u_int32_t regval;
3335 int error;
3336 int usecs;
3337 int ticks;
3338
3339 info = (struct em_int_delay_info *)arg1;
3340 usecs = info->value;
3341 error = sysctl_handle_int(oidp, &usecs, 0, req);
3342 if (error != 0 || req->newptr == NULL)
3343 return error;
3344 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3345 return EINVAL;
3346 info->value = usecs;
3347 ticks = E1000_USECS_TO_TICKS(usecs);
3348
3349 adapter = info->adapter;
3350
3351 EM_LOCK(adapter);
3352 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3353 regval = (regval & ~0xffff) | (ticks & 0xffff);
3354 /* Handle a few special cases. */
3355 switch (info->offset) {
3356 case E1000_RDTR:
3357 case E1000_82542_RDTR:
3358 regval |= E1000_RDT_FPDB;
3359 break;
3360 case E1000_TIDV:
3361 case E1000_82542_TIDV:
3362 if (ticks == 0) {
3363 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3364 /* Don't write 0 into the TIDV register. */
3365 regval++;
3366 } else
3367 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3368 break;
3369 }
3370 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3371 EM_UNLOCK(adapter);
3372 return 0;
3373 }
3374
3375 static void
3376 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3377 const char *description, struct em_int_delay_info *info,
3378 int offset, int value)
3379 {
3380 info->adapter = adapter;
3381 info->offset = offset;
3382 info->value = value;
3383 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3384 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3385 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3386 info, 0, em_sysctl_int_delay, "I", description);
3387 }
Cache object: 2cd40caf4714671567f0b327c9b3e41b
|