FreeBSD/Linux Kernel Cross Reference
sys/dev/em/if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD$*/
35
36 #include <dev/em/if_em.h>
37
38 /*********************************************************************
39 * Set this to one to display debug statistics
40 *********************************************************************/
41 int em_display_debug_stats = 0;
42
43 /*********************************************************************
44 * Linked list of board private structures for all NICs found
45 *********************************************************************/
46
47 struct adapter *em_adapter_list = NULL;
48
49
50 /*********************************************************************
51 * Driver version
52 *********************************************************************/
53
54 char em_driver_version[] = "1.7.35";
55
56
57 /*********************************************************************
58 * PCI Device ID Table
59 *
60 * Used by probe to select devices to load on
61 * Last field stores an index into em_strings
62 * Last entry must be all 0s
63 *
64 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
65 *********************************************************************/
66
67 static em_vendor_info_t em_vendor_info_array[] =
68 {
69 /* Intel(R) PRO/1000 Network Connection */
70 { 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
72 { 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
73 { 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
74 { 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
80 { 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
81 { 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
82 { 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
83 { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
85 { 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
86 { 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
90 { 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
91 { 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
96 { 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
97 { 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
98 { 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
99 { 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
100 { 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
101 { 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
102 { 0x8086, 0x107C, PCI_ANY_ID, PCI_ANY_ID, 0},
103 { 0x8086, 0x108A, PCI_ANY_ID, PCI_ANY_ID, 0},
104 /* required last entry */
105 { 0, 0, 0, 0, 0}
106 };
107
108 /*********************************************************************
109 * Table of branding strings for all supported NICs.
110 *********************************************************************/
111
112 static char *em_strings[] = {
113 "Intel(R) PRO/1000 Network Connection"
114 };
115
116 /*********************************************************************
117 * Function prototypes
118 *********************************************************************/
119 static int em_probe(device_t);
120 static int em_attach(device_t);
121 static int em_detach(device_t);
122 static int em_shutdown(device_t);
123 static void em_intr(void *);
124 static void em_start(struct ifnet *);
125 static int em_ioctl(struct ifnet *, u_long, caddr_t);
126 static void em_watchdog(struct ifnet *);
127 static void em_init(void *);
128 static void em_init_locked(struct adapter *);
129 static void em_stop(void *);
130 static void em_media_status(struct ifnet *, struct ifmediareq *);
131 static int em_media_change(struct ifnet *);
132 static void em_identify_hardware(struct adapter *);
133 static int em_allocate_pci_resources(struct adapter *);
134 static void em_free_pci_resources(struct adapter *);
135 static void em_local_timer(void *);
136 static int em_hardware_init(struct adapter *);
137 static void em_setup_interface(device_t, struct adapter *);
138 static int em_setup_transmit_structures(struct adapter *);
139 static void em_initialize_transmit_unit(struct adapter *);
140 static int em_setup_receive_structures(struct adapter *);
141 static void em_initialize_receive_unit(struct adapter *);
142 static void em_enable_intr(struct adapter *);
143 static void em_disable_intr(struct adapter *);
144 static void em_free_transmit_structures(struct adapter *);
145 static void em_free_receive_structures(struct adapter *);
146 static void em_update_stats_counters(struct adapter *);
147 static void em_clean_transmit_interrupts(struct adapter *);
148 static int em_allocate_receive_structures(struct adapter *);
149 static int em_allocate_transmit_structures(struct adapter *);
150 static void em_process_receive_interrupts(struct adapter *, int);
151 #ifndef __NO_STRICT_ALIGNMENT
152 static int em_fixup_rx(struct adapter *);
153 #endif
154 static void em_receive_checksum(struct adapter *,
155 struct em_rx_desc *,
156 struct mbuf *);
157 static void em_transmit_checksum_setup(struct adapter *,
158 struct mbuf *,
159 u_int32_t *,
160 u_int32_t *);
161 static void em_set_promisc(struct adapter *);
162 static void em_disable_promisc(struct adapter *);
163 static void em_set_multi(struct adapter *);
164 static void em_print_hw_stats(struct adapter *);
165 static void em_print_link_status(struct adapter *);
166 static int em_get_buf(int i, struct adapter *,
167 struct mbuf *);
168 static void em_enable_vlans(struct adapter *);
169 static void em_disable_vlans(struct adapter *);
170 static int em_encap(struct adapter *, struct mbuf **);
171 static void em_smartspeed(struct adapter *);
172 static int em_82547_fifo_workaround(struct adapter *, int);
173 static void em_82547_update_fifo_head(struct adapter *, int);
174 static int em_82547_tx_fifo_reset(struct adapter *);
175 static void em_82547_move_tail(void *arg);
176 static void em_82547_move_tail_locked(struct adapter *);
177 static int em_dma_malloc(struct adapter *, bus_size_t,
178 struct em_dma_alloc *, int);
179 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
180 static void em_print_debug_info(struct adapter *);
181 static int em_is_valid_ether_addr(u_int8_t *);
182 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
183 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
184 static u_int32_t em_fill_descriptors (u_int64_t address,
185 u_int32_t length,
186 PDESC_ARRAY desc_array);
187 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
188 static void em_add_int_delay_sysctl(struct adapter *, const char *,
189 const char *, struct em_int_delay_info *,
190 int, int);
191
192 /*********************************************************************
193 * FreeBSD Device Interface Entry Points
194 *********************************************************************/
195
196 static device_method_t em_methods[] = {
197 /* Device interface */
198 DEVMETHOD(device_probe, em_probe),
199 DEVMETHOD(device_attach, em_attach),
200 DEVMETHOD(device_detach, em_detach),
201 DEVMETHOD(device_shutdown, em_shutdown),
202 {0, 0}
203 };
204
205 static driver_t em_driver = {
206 "em", em_methods, sizeof(struct adapter ),
207 };
208
209 static devclass_t em_devclass;
210 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
211 MODULE_DEPEND(em, pci, 1, 1, 1);
212 MODULE_DEPEND(em, ether, 1, 1, 1);
213
214 /*********************************************************************
215 * Tunable default values.
216 *********************************************************************/
217
218 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
219 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
220
221 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
222 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
223 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
224 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
225
226 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
227 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
228 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
229 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
230
231 /*********************************************************************
232 * Device identification routine
233 *
234 * em_probe determines if the driver should be loaded on
235 * adapter based on PCI vendor/device id of the adapter.
236 *
237 * return 0 on success, positive on failure
238 *********************************************************************/
239
240 static int
241 em_probe(device_t dev)
242 {
243 em_vendor_info_t *ent;
244
245 u_int16_t pci_vendor_id = 0;
246 u_int16_t pci_device_id = 0;
247 u_int16_t pci_subvendor_id = 0;
248 u_int16_t pci_subdevice_id = 0;
249 char adapter_name[60];
250
251 INIT_DEBUGOUT("em_probe: begin");
252
253 pci_vendor_id = pci_get_vendor(dev);
254 if (pci_vendor_id != EM_VENDOR_ID)
255 return(ENXIO);
256
257 pci_device_id = pci_get_device(dev);
258 pci_subvendor_id = pci_get_subvendor(dev);
259 pci_subdevice_id = pci_get_subdevice(dev);
260
261 ent = em_vendor_info_array;
262 while (ent->vendor_id != 0) {
263 if ((pci_vendor_id == ent->vendor_id) &&
264 (pci_device_id == ent->device_id) &&
265
266 ((pci_subvendor_id == ent->subvendor_id) ||
267 (ent->subvendor_id == PCI_ANY_ID)) &&
268
269 ((pci_subdevice_id == ent->subdevice_id) ||
270 (ent->subdevice_id == PCI_ANY_ID))) {
271 sprintf(adapter_name, "%s, Version - %s",
272 em_strings[ent->index],
273 em_driver_version);
274 device_set_desc_copy(dev, adapter_name);
275 return(0);
276 }
277 ent++;
278 }
279
280 return(ENXIO);
281 }
282
283 /*********************************************************************
284 * Device initialization routine
285 *
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
289 *
290 * return 0 on success, positive on failure
291 *********************************************************************/
292
293 static int
294 em_attach(device_t dev)
295 {
296 struct adapter * adapter;
297 int tsize, rsize;
298 int error = 0;
299
300 INIT_DEBUGOUT("em_attach: begin");
301
302 /* Allocate, clear, and link in our adapter structure */
303 if (!(adapter = device_get_softc(dev))) {
304 printf("em: adapter structure allocation failed\n");
305 return(ENOMEM);
306 }
307 bzero(adapter, sizeof(struct adapter ));
308 adapter->dev = dev;
309 adapter->osdep.dev = dev;
310 adapter->unit = device_get_unit(dev);
311 EM_LOCK_INIT(adapter, device_get_nameunit(dev));
312
313 if (em_adapter_list != NULL)
314 em_adapter_list->prev = adapter;
315 adapter->next = em_adapter_list;
316 em_adapter_list = adapter;
317
318 /* SYSCTL stuff */
319 sysctl_ctx_init(&adapter->sysctl_ctx);
320 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
321 SYSCTL_STATIC_CHILDREN(_hw),
322 OID_AUTO,
323 device_get_nameunit(dev),
324 CTLFLAG_RD,
325 0, "");
326 if (adapter->sysctl_tree == NULL) {
327 error = EIO;
328 goto err_sysctl;
329 }
330
331 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
332 SYSCTL_CHILDREN(adapter->sysctl_tree),
333 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
334 (void *)adapter, 0,
335 em_sysctl_debug_info, "I", "Debug Information");
336
337 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
338 SYSCTL_CHILDREN(adapter->sysctl_tree),
339 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
340 (void *)adapter, 0,
341 em_sysctl_stats, "I", "Statistics");
342
343 callout_init(&adapter->timer, CALLOUT_MPSAFE);
344 callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
345
346 /* Determine hardware revision */
347 em_identify_hardware(adapter);
348
349 /* Set up some sysctls for the tunable interrupt delays */
350 em_add_int_delay_sysctl(adapter, "rx_int_delay",
351 "receive interrupt delay in usecs", &adapter->rx_int_delay,
352 E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
353 em_add_int_delay_sysctl(adapter, "tx_int_delay",
354 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
355 E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
356 if (adapter->hw.mac_type >= em_82540) {
357 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
358 "receive interrupt delay limit in usecs",
359 &adapter->rx_abs_int_delay,
360 E1000_REG_OFFSET(&adapter->hw, RADV),
361 em_rx_abs_int_delay_dflt);
362 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
363 "transmit interrupt delay limit in usecs",
364 &adapter->tx_abs_int_delay,
365 E1000_REG_OFFSET(&adapter->hw, TADV),
366 em_tx_abs_int_delay_dflt);
367 }
368
369 /* Parameters (to be read from user) */
370 adapter->num_tx_desc = EM_MAX_TXD;
371 adapter->num_rx_desc = EM_MAX_RXD;
372 adapter->hw.autoneg = DO_AUTO_NEG;
373 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
374 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
375 adapter->hw.tbi_compatibility_en = TRUE;
376 adapter->rx_buffer_len = EM_RXBUFFER_2048;
377
378 /*
379 * These parameters control the automatic generation(Tx) and
380 * response(Rx) to Ethernet PAUSE frames.
381 */
382 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
383 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH;
384 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
385 adapter->hw.fc_send_xon = TRUE;
386 adapter->hw.fc = em_fc_full;
387
388 adapter->hw.phy_init_script = 1;
389 adapter->hw.phy_reset_disable = FALSE;
390
391 #ifndef EM_MASTER_SLAVE
392 adapter->hw.master_slave = em_ms_hw_default;
393 #else
394 adapter->hw.master_slave = EM_MASTER_SLAVE;
395 #endif
396 /*
397 * Set the max frame size assuming standard ethernet
398 * sized frames
399 */
400 adapter->hw.max_frame_size =
401 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
402
403 adapter->hw.min_frame_size =
404 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
405
406 /*
407 * This controls when hardware reports transmit completion
408 * status.
409 */
410 adapter->hw.report_tx_early = 1;
411
412
413 if (em_allocate_pci_resources(adapter)) {
414 printf("em%d: Allocation of PCI resources failed\n",
415 adapter->unit);
416 error = ENXIO;
417 goto err_pci;
418 }
419
420
421 /* Initialize eeprom parameters */
422 em_init_eeprom_params(&adapter->hw);
423
424 tsize = EM_ROUNDUP(adapter->num_tx_desc *
425 sizeof(struct em_tx_desc), 4096);
426
427 /* Allocate Transmit Descriptor ring */
428 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
429 printf("em%d: Unable to allocate tx_desc memory\n",
430 adapter->unit);
431 error = ENOMEM;
432 goto err_tx_desc;
433 }
434 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
435
436 rsize = EM_ROUNDUP(adapter->num_rx_desc *
437 sizeof(struct em_rx_desc), 4096);
438
439 /* Allocate Receive Descriptor ring */
440 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
441 printf("em%d: Unable to allocate rx_desc memory\n",
442 adapter->unit);
443 error = ENOMEM;
444 goto err_rx_desc;
445 }
446 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
447
448 /* Initialize the hardware */
449 if (em_hardware_init(adapter)) {
450 printf("em%d: Unable to initialize the hardware\n",
451 adapter->unit);
452 error = EIO;
453 goto err_hw_init;
454 }
455
456 /* Copy the permanent MAC address out of the EEPROM */
457 if (em_read_mac_addr(&adapter->hw) < 0) {
458 printf("em%d: EEPROM read error while reading mac address\n",
459 adapter->unit);
460 error = EIO;
461 goto err_mac_addr;
462 }
463
464 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
465 printf("em%d: Invalid mac address\n", adapter->unit);
466 error = EIO;
467 goto err_mac_addr;
468 }
469
470 bcopy(adapter->hw.mac_addr, adapter->interface_data.ac_enaddr,
471 ETHER_ADDR_LEN);
472
473 /* Setup OS specific network interface */
474 em_setup_interface(dev, adapter);
475
476 /* Initialize statistics */
477 em_clear_hw_cntrs(&adapter->hw);
478 em_update_stats_counters(adapter);
479 adapter->hw.get_link_status = 1;
480 em_check_for_link(&adapter->hw);
481
482 if (bootverbose) {
483 /* Print the link status */
484 if (adapter->link_active == 1) {
485 em_get_speed_and_duplex(&adapter->hw,
486 &adapter->link_speed, &adapter->link_duplex);
487 printf("em%d: Speed:%d Mbps Duplex:%s\n",
488 adapter->unit,
489 adapter->link_speed,
490 adapter->link_duplex == FULL_DUPLEX ? "Full" :
491 "Half");
492 } else
493 printf("em%d: Speed:N/A Duplex:N/A\n",
494 adapter->unit);
495 }
496
497 /* Identify 82544 on PCIX */
498 em_get_bus_info(&adapter->hw);
499 if(adapter->hw.bus_type == em_bus_type_pcix &&
500 adapter->hw.mac_type == em_82544) {
501 adapter->pcix_82544 = TRUE;
502 }
503 else {
504 adapter->pcix_82544 = FALSE;
505 }
506 INIT_DEBUGOUT("em_attach: end");
507 return(0);
508
509 err_mac_addr:
510 err_hw_init:
511 em_dma_free(adapter, &adapter->rxdma);
512 err_rx_desc:
513 em_dma_free(adapter, &adapter->txdma);
514 err_tx_desc:
515 err_pci:
516 em_free_pci_resources(adapter);
517 sysctl_ctx_free(&adapter->sysctl_ctx);
518 err_sysctl:
519 return(error);
520
521 }
522
523 /*********************************************************************
524 * Device removal routine
525 *
526 * The detach entry point is called when the driver is being removed.
527 * This routine stops the adapter and deallocates all the resources
528 * that were allocated for driver operation.
529 *
530 * return 0 on success, positive on failure
531 *********************************************************************/
532
533 static int
534 em_detach(device_t dev)
535 {
536 struct adapter * adapter = device_get_softc(dev);
537 struct ifnet *ifp = &adapter->interface_data.ac_if;
538
539 INIT_DEBUGOUT("em_detach: begin");
540
541 EM_LOCK(adapter);
542 adapter->in_detach = 1;
543 em_stop(adapter);
544 em_phy_hw_reset(&adapter->hw);
545 EM_UNLOCK(adapter);
546 ether_ifdetach(&adapter->interface_data.ac_if);
547 em_free_pci_resources(adapter);
548 bus_generic_detach(dev);
549
550 /* Free Transmit Descriptor ring */
551 if (adapter->tx_desc_base) {
552 em_dma_free(adapter, &adapter->txdma);
553 adapter->tx_desc_base = NULL;
554 }
555
556 /* Free Receive Descriptor ring */
557 if (adapter->rx_desc_base) {
558 em_dma_free(adapter, &adapter->rxdma);
559 adapter->rx_desc_base = NULL;
560 }
561
562 /* Free the sysctl tree */
563 sysctl_ctx_free(&adapter->sysctl_ctx);
564
565 /* Remove from the adapter list */
566 if (em_adapter_list == adapter)
567 em_adapter_list = adapter->next;
568 if (adapter->next != NULL)
569 adapter->next->prev = adapter->prev;
570 if (adapter->prev != NULL)
571 adapter->prev->next = adapter->next;
572
573 EM_LOCK_DESTROY(adapter);
574
575 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
576 ifp->if_timer = 0;
577
578 return(0);
579 }
580
581 /*********************************************************************
582 *
583 * Shutdown entry point
584 *
585 **********************************************************************/
586
587 static int
588 em_shutdown(device_t dev)
589 {
590 struct adapter *adapter = device_get_softc(dev);
591 EM_LOCK(adapter);
592 em_stop(adapter);
593 EM_UNLOCK(adapter);
594 return(0);
595 }
596
597
598 /*********************************************************************
599 * Transmit entry point
600 *
601 * em_start is called by the stack to initiate a transmit.
602 * The driver will remain in this routine as long as there are
603 * packets to transmit and transmit resources are available.
604 * In case resources are not available stack is notified and
605 * the packet is requeued.
606 **********************************************************************/
607
608 static void
609 em_start_locked(struct ifnet *ifp)
610 {
611 struct mbuf *m_head;
612 struct adapter *adapter = ifp->if_softc;
613
614 mtx_assert(&adapter->mtx, MA_OWNED);
615
616 if (!adapter->link_active)
617 return;
618
619 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
620
621 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
622
623 if (m_head == NULL) break;
624
625 /*
626 * em_encap() can modify our pointer, and or make it NULL on
627 * failure. In that event, we can't requeue.
628 */
629 if (em_encap(adapter, &m_head)) {
630 if (m_head == NULL)
631 break;
632 ifp->if_flags |= IFF_OACTIVE;
633 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
634 break;
635 }
636
637 /* Send a copy of the frame to the BPF listener */
638 BPF_MTAP(ifp, m_head);
639
640 /* Set timeout in case hardware has problems transmitting */
641 ifp->if_timer = EM_TX_TIMEOUT;
642
643 }
644 return;
645 }
646
647 static void
648 em_start(struct ifnet *ifp)
649 {
650 struct adapter *adapter = ifp->if_softc;
651
652 EM_LOCK(adapter);
653 em_start_locked(ifp);
654 EM_UNLOCK(adapter);
655 return;
656 }
657
658 /*********************************************************************
659 * Ioctl entry point
660 *
661 * em_ioctl is called when the user wants to configure the
662 * interface.
663 *
664 * return 0 on success, positive on failure
665 **********************************************************************/
666
667 static int
668 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
669 {
670 int mask, reinit, error = 0;
671 struct ifreq *ifr = (struct ifreq *) data;
672 struct adapter * adapter = ifp->if_softc;
673
674 if (adapter->in_detach) return(error);
675
676 switch (command) {
677 case SIOCSIFADDR:
678 case SIOCGIFADDR:
679 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
680 ether_ioctl(ifp, command, data);
681 break;
682 case SIOCSIFMTU:
683 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
684 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
685 error = EINVAL;
686 } else {
687 EM_LOCK(adapter);
688 ifp->if_mtu = ifr->ifr_mtu;
689 adapter->hw.max_frame_size =
690 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
691 em_init_locked(adapter);
692 EM_UNLOCK(adapter);
693 }
694 break;
695 case SIOCSIFFLAGS:
696 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
697 EM_LOCK(adapter);
698 if (ifp->if_flags & IFF_UP) {
699 if (!(ifp->if_flags & IFF_RUNNING)) {
700 em_init_locked(adapter);
701 }
702
703 em_disable_promisc(adapter);
704 em_set_promisc(adapter);
705 } else {
706 if (ifp->if_flags & IFF_RUNNING) {
707 em_stop(adapter);
708 }
709 }
710 EM_UNLOCK(adapter);
711 break;
712 case SIOCADDMULTI:
713 case SIOCDELMULTI:
714 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
715 if (ifp->if_flags & IFF_RUNNING) {
716 EM_LOCK(adapter);
717 em_disable_intr(adapter);
718 em_set_multi(adapter);
719 if (adapter->hw.mac_type == em_82542_rev2_0) {
720 em_initialize_receive_unit(adapter);
721 }
722 #ifdef DEVICE_POLLING
723 if (!(ifp->if_flags & IFF_POLLING))
724 #endif
725 em_enable_intr(adapter);
726 EM_UNLOCK(adapter);
727 }
728 break;
729 case SIOCSIFMEDIA:
730 case SIOCGIFMEDIA:
731 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
732 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
733 break;
734 case SIOCSIFCAP:
735 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
736 reinit = 0;
737 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
738 if (mask & IFCAP_POLLING)
739 ifp->if_capenable ^= IFCAP_POLLING;
740 if (mask & IFCAP_HWCSUM) {
741 ifp->if_capenable ^= IFCAP_HWCSUM;
742 reinit = 1;
743 }
744 if (mask & IFCAP_VLAN_HWTAGGING) {
745 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
746 reinit = 1;
747 }
748 if (reinit && (ifp->if_flags & IFF_RUNNING))
749 em_init(adapter);
750 break;
751 default:
752 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
753 error = EINVAL;
754 }
755
756 return(error);
757 }
758
759 /*********************************************************************
760 * Watchdog entry point
761 *
762 * This routine is called whenever hardware quits transmitting.
763 *
764 **********************************************************************/
765
766 static void
767 em_watchdog(struct ifnet *ifp)
768 {
769 struct adapter * adapter;
770 adapter = ifp->if_softc;
771
772 /* If we are in this routine because of pause frames, then
773 * don't reset the hardware.
774 */
775 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
776 ifp->if_timer = EM_TX_TIMEOUT;
777 return;
778 }
779
780 if (!em_check_for_link(&adapter->hw))
781 printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
782
783 ifp->if_flags &= ~IFF_RUNNING;
784
785 em_init(adapter);
786
787 ifp->if_oerrors++;
788 return;
789 }
790
791 /*********************************************************************
792 * Init entry point
793 *
794 * This routine is used in two ways. It is used by the stack as
795 * init entry point in network interface structure. It is also used
796 * by the driver as a hw/sw initialization routine to get to a
797 * consistent state.
798 *
799 * return 0 on success, positive on failure
800 **********************************************************************/
801
802 static void
803 em_init_locked(struct adapter * adapter)
804 {
805 struct ifnet *ifp;
806
807 uint32_t pba;
808 ifp = &adapter->interface_data.ac_if;
809
810 INIT_DEBUGOUT("em_init: begin");
811
812 mtx_assert(&adapter->mtx, MA_OWNED);
813
814 em_stop(adapter);
815
816 /* Packet Buffer Allocation (PBA)
817 * Writing PBA sets the receive portion of the buffer
818 * the remainder is used for the transmit buffer.
819 *
820 * Devices before the 82547 had a Packet Buffer of 64K.
821 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
822 * After the 82547 the buffer was reduced to 40K.
823 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
824 * Note: default does not leave enough room for Jumbo Frame >10k.
825 */
826 if(adapter->hw.mac_type < em_82547) {
827 /* Total FIFO is 64K */
828 if(adapter->rx_buffer_len > EM_RXBUFFER_8192)
829 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
830 else
831 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
832 } else {
833 /* Total FIFO is 40K */
834 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) {
835 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
836 } else {
837 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
838 }
839 adapter->tx_fifo_head = 0;
840 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
841 adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
842 }
843 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
844 E1000_WRITE_REG(&adapter->hw, PBA, pba);
845
846 /* Get the latest mac address, User can use a LAA */
847 bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
848 ETHER_ADDR_LEN);
849
850 /* Initialize the hardware */
851 if (em_hardware_init(adapter)) {
852 printf("em%d: Unable to initialize the hardware\n",
853 adapter->unit);
854 return;
855 }
856
857 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
858 em_enable_vlans(adapter);
859
860 /* Prepare transmit descriptors and buffers */
861 if (em_setup_transmit_structures(adapter)) {
862 printf("em%d: Could not setup transmit structures\n",
863 adapter->unit);
864 em_stop(adapter);
865 return;
866 }
867 em_initialize_transmit_unit(adapter);
868
869 /* Setup Multicast table */
870 em_set_multi(adapter);
871
872 /* Prepare receive descriptors and buffers */
873 if (em_setup_receive_structures(adapter)) {
874 printf("em%d: Could not setup receive structures\n",
875 adapter->unit);
876 em_stop(adapter);
877 return;
878 }
879 em_initialize_receive_unit(adapter);
880
881 /* Don't loose promiscuous settings */
882 em_set_promisc(adapter);
883
884 ifp->if_flags |= IFF_RUNNING;
885 ifp->if_flags &= ~IFF_OACTIVE;
886
887 if (adapter->hw.mac_type >= em_82543) {
888 if (ifp->if_capenable & IFCAP_TXCSUM)
889 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
890 else
891 ifp->if_hwassist = 0;
892 }
893
894 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
895 em_clear_hw_cntrs(&adapter->hw);
896 #ifdef DEVICE_POLLING
897 /*
898 * Only enable interrupts if we are not polling, make sure
899 * they are off otherwise.
900 */
901 if (ifp->if_flags & IFF_POLLING)
902 em_disable_intr(adapter);
903 else
904 #endif /* DEVICE_POLLING */
905 em_enable_intr(adapter);
906
907 /* Don't reset the phy next time init gets called */
908 adapter->hw.phy_reset_disable = TRUE;
909
910 return;
911 }
912
913 static void
914 em_init(void *arg)
915 {
916 struct adapter * adapter = arg;
917
918 EM_LOCK(adapter);
919 em_init_locked(adapter);
920 EM_UNLOCK(adapter);
921 return;
922 }
923
924
925 #ifdef DEVICE_POLLING
926 static poll_handler_t em_poll;
927
928 static void
929 em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
930 {
931 struct adapter *adapter = ifp->if_softc;
932 u_int32_t reg_icr;
933
934 mtx_assert(&adapter->mtx, MA_OWNED);
935
936 if (!(ifp->if_capenable & IFCAP_POLLING)) {
937 ether_poll_deregister(ifp);
938 cmd = POLL_DEREGISTER;
939 }
940 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
941 em_enable_intr(adapter);
942 return;
943 }
944 if (cmd == POLL_AND_CHECK_STATUS) {
945 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
946 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
947 callout_stop(&adapter->timer);
948 adapter->hw.get_link_status = 1;
949 em_check_for_link(&adapter->hw);
950 em_print_link_status(adapter);
951 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
952 }
953 }
954 if (ifp->if_flags & IFF_RUNNING) {
955 em_process_receive_interrupts(adapter, count);
956 em_clean_transmit_interrupts(adapter);
957 }
958
959 if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
960 em_start_locked(ifp);
961 }
962
963 static void
964 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
965 {
966 struct adapter *adapter = ifp->if_softc;
967
968 EM_LOCK(adapter);
969 em_poll_locked(ifp, cmd, count);
970 EM_UNLOCK(adapter);
971 }
972 #endif /* DEVICE_POLLING */
973
974 /*********************************************************************
975 *
976 * Interrupt Service routine
977 *
978 **********************************************************************/
979 static void
980 em_intr(void *arg)
981 {
982 u_int32_t loop_cnt = EM_MAX_INTR;
983 u_int32_t reg_icr;
984 struct ifnet *ifp;
985 struct adapter *adapter = arg;
986
987 EM_LOCK(adapter);
988
989 ifp = &adapter->interface_data.ac_if;
990
991 #ifdef DEVICE_POLLING
992 if (ifp->if_flags & IFF_POLLING) {
993 EM_UNLOCK(adapter);
994 return;
995 }
996
997 if ((ifp->if_capenable & IFCAP_POLLING) &&
998 ether_poll_register(em_poll, ifp)) {
999 em_disable_intr(adapter);
1000 em_poll_locked(ifp, 0, 1);
1001 EM_UNLOCK(adapter);
1002 return;
1003 }
1004 #endif /* DEVICE_POLLING */
1005
1006 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1007 if (!reg_icr) {
1008 EM_UNLOCK(adapter);
1009 return;
1010 }
1011
1012 /* Link status change */
1013 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1014 callout_stop(&adapter->timer);
1015 adapter->hw.get_link_status = 1;
1016 em_check_for_link(&adapter->hw);
1017 em_print_link_status(adapter);
1018 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1019 }
1020
1021 while (loop_cnt > 0) {
1022 if (ifp->if_flags & IFF_RUNNING) {
1023 em_process_receive_interrupts(adapter, -1);
1024 em_clean_transmit_interrupts(adapter);
1025 }
1026 loop_cnt--;
1027 }
1028
1029 if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1030 em_start_locked(ifp);
1031
1032 EM_UNLOCK(adapter);
1033 return;
1034 }
1035
1036
1037
1038 /*********************************************************************
1039 *
1040 * Media Ioctl callback
1041 *
1042 * This routine is called whenever the user queries the status of
1043 * the interface using ifconfig.
1044 *
1045 **********************************************************************/
1046 static void
1047 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1048 {
1049 struct adapter * adapter = ifp->if_softc;
1050
1051 INIT_DEBUGOUT("em_media_status: begin");
1052
1053 em_check_for_link(&adapter->hw);
1054 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1055 if (adapter->link_active == 0) {
1056 em_get_speed_and_duplex(&adapter->hw,
1057 &adapter->link_speed,
1058 &adapter->link_duplex);
1059 adapter->link_active = 1;
1060 }
1061 } else {
1062 if (adapter->link_active == 1) {
1063 adapter->link_speed = 0;
1064 adapter->link_duplex = 0;
1065 adapter->link_active = 0;
1066 }
1067 }
1068
1069 ifmr->ifm_status = IFM_AVALID;
1070 ifmr->ifm_active = IFM_ETHER;
1071
1072 if (!adapter->link_active)
1073 return;
1074
1075 ifmr->ifm_status |= IFM_ACTIVE;
1076
1077 if (adapter->hw.media_type == em_media_type_fiber) {
1078 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1079 } else {
1080 switch (adapter->link_speed) {
1081 case 10:
1082 ifmr->ifm_active |= IFM_10_T;
1083 break;
1084 case 100:
1085 ifmr->ifm_active |= IFM_100_TX;
1086 break;
1087 case 1000:
1088 ifmr->ifm_active |= IFM_1000_T;
1089 break;
1090 }
1091 if (adapter->link_duplex == FULL_DUPLEX)
1092 ifmr->ifm_active |= IFM_FDX;
1093 else
1094 ifmr->ifm_active |= IFM_HDX;
1095 }
1096 return;
1097 }
1098
1099 /*********************************************************************
1100 *
1101 * Media Ioctl callback
1102 *
1103 * This routine is called when the user changes speed/duplex using
1104 * media/mediopt option with ifconfig.
1105 *
1106 **********************************************************************/
1107 static int
1108 em_media_change(struct ifnet *ifp)
1109 {
1110 struct adapter * adapter = ifp->if_softc;
1111 struct ifmedia *ifm = &adapter->media;
1112
1113 INIT_DEBUGOUT("em_media_change: begin");
1114
1115 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1116 return(EINVAL);
1117
1118 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1119 case IFM_AUTO:
1120 adapter->hw.autoneg = DO_AUTO_NEG;
1121 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1122 break;
1123 case IFM_1000_SX:
1124 case IFM_1000_T:
1125 adapter->hw.autoneg = DO_AUTO_NEG;
1126 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1127 break;
1128 case IFM_100_TX:
1129 adapter->hw.autoneg = FALSE;
1130 adapter->hw.autoneg_advertised = 0;
1131 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1132 adapter->hw.forced_speed_duplex = em_100_full;
1133 else
1134 adapter->hw.forced_speed_duplex = em_100_half;
1135 break;
1136 case IFM_10_T:
1137 adapter->hw.autoneg = FALSE;
1138 adapter->hw.autoneg_advertised = 0;
1139 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1140 adapter->hw.forced_speed_duplex = em_10_full;
1141 else
1142 adapter->hw.forced_speed_duplex = em_10_half;
1143 break;
1144 default:
1145 printf("em%d: Unsupported media type\n", adapter->unit);
1146 }
1147
1148 /* As the speed/duplex settings my have changed we need to
1149 * reset the PHY.
1150 */
1151 adapter->hw.phy_reset_disable = FALSE;
1152
1153 em_init(adapter);
1154
1155 return(0);
1156 }
1157
1158 /*********************************************************************
1159 *
1160 * This routine maps the mbufs to tx descriptors.
1161 *
1162 * return 0 on success, positive on failure
1163 **********************************************************************/
1164 static int
1165 em_encap(struct adapter *adapter, struct mbuf **m_headp)
1166 {
1167 u_int32_t txd_upper;
1168 u_int32_t txd_lower, txd_used = 0, txd_saved = 0;
1169 int i, j, error;
1170 u_int64_t address;
1171
1172 struct mbuf *m_head;
1173
1174 /* For 82544 Workaround */
1175 DESC_ARRAY desc_array;
1176 u_int32_t array_elements;
1177 u_int32_t counter;
1178 struct m_tag *mtag;
1179 bus_dma_segment_t segs[EM_MAX_SCATTER];
1180 bus_dmamap_t map;
1181 int nsegs;
1182 struct em_buffer *tx_buffer = NULL;
1183 struct em_tx_desc *current_tx_desc = NULL;
1184 struct ifnet *ifp = &adapter->interface_data.ac_if;
1185
1186 m_head = *m_headp;
1187
1188 /*
1189 * Force a cleanup if number of TX descriptors
1190 * available hits the threshold
1191 */
1192 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1193 em_clean_transmit_interrupts(adapter);
1194 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1195 adapter->no_tx_desc_avail1++;
1196 return(ENOBUFS);
1197 }
1198 }
1199
1200 /*
1201 * Map the packet for DMA.
1202 */
1203 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
1204 adapter->no_tx_map_avail++;
1205 return (ENOMEM);
1206 }
1207 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
1208 &nsegs, BUS_DMA_NOWAIT);
1209 if (error != 0) {
1210 adapter->no_tx_dma_setup++;
1211 bus_dmamap_destroy(adapter->txtag, map);
1212 return (error);
1213 }
1214 KASSERT(nsegs != 0, ("em_encap: empty packet"));
1215
1216 if (nsegs > adapter->num_tx_desc_avail) {
1217 adapter->no_tx_desc_avail2++;
1218 bus_dmamap_destroy(adapter->txtag, map);
1219 return (ENOBUFS);
1220 }
1221
1222
1223 if (ifp->if_hwassist > 0) {
1224 em_transmit_checksum_setup(adapter, m_head,
1225 &txd_upper, &txd_lower);
1226 } else
1227 txd_upper = txd_lower = 0;
1228
1229
1230 /* Find out if we are in vlan mode */
1231 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1232
1233 /*
1234 * When operating in promiscuous mode, hardware encapsulation for
1235 * packets is disabled. This means we have to add the vlan
1236 * encapsulation in the driver, since it will have come down from the
1237 * VLAN layer with a tag instead of a VLAN header.
1238 */
1239 if (mtag != NULL && adapter->em_insert_vlan_header) {
1240 struct ether_vlan_header *evl;
1241 struct ether_header eh;
1242
1243 m_head = m_pullup(m_head, sizeof(eh));
1244 if (m_head == NULL) {
1245 *m_headp = NULL;
1246 bus_dmamap_destroy(adapter->txtag, map);
1247 return (ENOBUFS);
1248 }
1249 eh = *mtod(m_head, struct ether_header *);
1250 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1251 if (m_head == NULL) {
1252 *m_headp = NULL;
1253 bus_dmamap_destroy(adapter->txtag, map);
1254 return (ENOBUFS);
1255 }
1256 m_head = m_pullup(m_head, sizeof(*evl));
1257 if (m_head == NULL) {
1258 *m_headp = NULL;
1259 bus_dmamap_destroy(adapter->txtag, map);
1260 return (ENOBUFS);
1261 }
1262 evl = mtod(m_head, struct ether_vlan_header *);
1263 bcopy(&eh, evl, sizeof(*evl));
1264 evl->evl_proto = evl->evl_encap_proto;
1265 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1266 evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1267 m_tag_delete(m_head, mtag);
1268 mtag = NULL;
1269 *m_headp = m_head;
1270 }
1271
1272 i = adapter->next_avail_tx_desc;
1273 if (adapter->pcix_82544) {
1274 txd_saved = i;
1275 txd_used = 0;
1276 }
1277 for (j = 0; j < nsegs; j++) {
1278 /* If adapter is 82544 and on PCIX bus */
1279 if(adapter->pcix_82544) {
1280 array_elements = 0;
1281 address = htole64(segs[j].ds_addr);
1282 /*
1283 * Check the Address and Length combination and
1284 * split the data accordingly
1285 */
1286 array_elements = em_fill_descriptors(address,
1287 htole32(segs[j].ds_len),
1288 &desc_array);
1289 for (counter = 0; counter < array_elements; counter++) {
1290 if (txd_used == adapter->num_tx_desc_avail) {
1291 adapter->next_avail_tx_desc = txd_saved;
1292 adapter->no_tx_desc_avail2++;
1293 bus_dmamap_destroy(adapter->txtag, map);
1294 return (ENOBUFS);
1295 }
1296 tx_buffer = &adapter->tx_buffer_area[i];
1297 current_tx_desc = &adapter->tx_desc_base[i];
1298 current_tx_desc->buffer_addr = htole64(
1299 desc_array.descriptor[counter].address);
1300 current_tx_desc->lower.data = htole32(
1301 (adapter->txd_cmd | txd_lower |
1302 (u_int16_t)desc_array.descriptor[counter].length));
1303 current_tx_desc->upper.data = htole32((txd_upper));
1304 if (++i == adapter->num_tx_desc)
1305 i = 0;
1306
1307 tx_buffer->m_head = NULL;
1308 txd_used++;
1309 }
1310 } else {
1311 tx_buffer = &adapter->tx_buffer_area[i];
1312 current_tx_desc = &adapter->tx_desc_base[i];
1313
1314 current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1315 current_tx_desc->lower.data = htole32(
1316 adapter->txd_cmd | txd_lower | segs[j].ds_len);
1317 current_tx_desc->upper.data = htole32(txd_upper);
1318
1319 if (++i == adapter->num_tx_desc)
1320 i = 0;
1321
1322 tx_buffer->m_head = NULL;
1323 }
1324 }
1325
1326 adapter->next_avail_tx_desc = i;
1327 if (adapter->pcix_82544) {
1328 adapter->num_tx_desc_avail -= txd_used;
1329 }
1330 else {
1331 adapter->num_tx_desc_avail -= nsegs;
1332 }
1333
1334 if (mtag != NULL) {
1335 /* Set the vlan id */
1336 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1337
1338 /* Tell hardware to add tag */
1339 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1340 }
1341
1342 tx_buffer->m_head = m_head;
1343 tx_buffer->map = map;
1344 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1345
1346 /*
1347 * Last Descriptor of Packet needs End Of Packet (EOP)
1348 */
1349 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1350
1351 /*
1352 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1353 * that this frame is available to transmit.
1354 */
1355 if (adapter->hw.mac_type == em_82547 &&
1356 adapter->link_duplex == HALF_DUPLEX) {
1357 em_82547_move_tail_locked(adapter);
1358 } else {
1359 E1000_WRITE_REG(&adapter->hw, TDT, i);
1360 if (adapter->hw.mac_type == em_82547) {
1361 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1362 }
1363 }
1364
1365 return(0);
1366 }
1367
1368 /*********************************************************************
1369 *
1370 * 82547 workaround to avoid controller hang in half-duplex environment.
1371 * The workaround is to avoid queuing a large packet that would span
1372 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1373 * in this case. We do that only when FIFO is quiescent.
1374 *
1375 **********************************************************************/
1376 static void
1377 em_82547_move_tail_locked(struct adapter *adapter)
1378 {
1379 uint16_t hw_tdt;
1380 uint16_t sw_tdt;
1381 struct em_tx_desc *tx_desc;
1382 uint16_t length = 0;
1383 boolean_t eop = 0;
1384
1385 EM_LOCK_ASSERT(adapter);
1386
1387 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1388 sw_tdt = adapter->next_avail_tx_desc;
1389
1390 while (hw_tdt != sw_tdt) {
1391 tx_desc = &adapter->tx_desc_base[hw_tdt];
1392 length += tx_desc->lower.flags.length;
1393 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1394 if(++hw_tdt == adapter->num_tx_desc)
1395 hw_tdt = 0;
1396
1397 if(eop) {
1398 if (em_82547_fifo_workaround(adapter, length)) {
1399 adapter->tx_fifo_wrk_cnt++;
1400 callout_reset(&adapter->tx_fifo_timer, 1,
1401 em_82547_move_tail, adapter);
1402 break;
1403 }
1404 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1405 em_82547_update_fifo_head(adapter, length);
1406 length = 0;
1407 }
1408 }
1409 return;
1410 }
1411
1412 static void
1413 em_82547_move_tail(void *arg)
1414 {
1415 struct adapter *adapter = arg;
1416
1417 EM_LOCK(adapter);
1418 em_82547_move_tail_locked(adapter);
1419 EM_UNLOCK(adapter);
1420 }
1421
1422 static int
1423 em_82547_fifo_workaround(struct adapter *adapter, int len)
1424 {
1425 int fifo_space, fifo_pkt_len;
1426
1427 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1428
1429 if (adapter->link_duplex == HALF_DUPLEX) {
1430 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1431
1432 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1433 if (em_82547_tx_fifo_reset(adapter)) {
1434 return(0);
1435 }
1436 else {
1437 return(1);
1438 }
1439 }
1440 }
1441
1442 return(0);
1443 }
1444
1445 static void
1446 em_82547_update_fifo_head(struct adapter *adapter, int len)
1447 {
1448 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1449
1450 /* tx_fifo_head is always 16 byte aligned */
1451 adapter->tx_fifo_head += fifo_pkt_len;
1452 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1453 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1454 }
1455
1456 return;
1457 }
1458
1459
1460 static int
1461 em_82547_tx_fifo_reset(struct adapter *adapter)
1462 {
1463 uint32_t tctl;
1464
1465 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1466 E1000_READ_REG(&adapter->hw, TDH)) &&
1467 (E1000_READ_REG(&adapter->hw, TDFT) ==
1468 E1000_READ_REG(&adapter->hw, TDFH)) &&
1469 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1470 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1471 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1472
1473 /* Disable TX unit */
1474 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1475 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1476
1477 /* Reset FIFO pointers */
1478 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr);
1479 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr);
1480 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1481 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1482
1483 /* Re-enable TX unit */
1484 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1485 E1000_WRITE_FLUSH(&adapter->hw);
1486
1487 adapter->tx_fifo_head = 0;
1488 adapter->tx_fifo_reset_cnt++;
1489
1490 return(TRUE);
1491 }
1492 else {
1493 return(FALSE);
1494 }
1495 }
1496
1497 static void
1498 em_set_promisc(struct adapter * adapter)
1499 {
1500
1501 u_int32_t reg_rctl;
1502 struct ifnet *ifp = &adapter->interface_data.ac_if;
1503
1504 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1505
1506 if (ifp->if_flags & IFF_PROMISC) {
1507 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1508 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1509 /* Disable VLAN stripping in promiscous mode
1510 * This enables bridging of vlan tagged frames to occur
1511 * and also allows vlan tags to be seen in tcpdump
1512 */
1513 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1514 em_disable_vlans(adapter);
1515 adapter->em_insert_vlan_header = 1;
1516 } else if (ifp->if_flags & IFF_ALLMULTI) {
1517 reg_rctl |= E1000_RCTL_MPE;
1518 reg_rctl &= ~E1000_RCTL_UPE;
1519 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1520 adapter->em_insert_vlan_header = 0;
1521 } else
1522 adapter->em_insert_vlan_header = 0;
1523
1524 return;
1525 }
1526
1527 static void
1528 em_disable_promisc(struct adapter * adapter)
1529 {
1530 u_int32_t reg_rctl;
1531 struct ifnet *ifp = &adapter->interface_data.ac_if;
1532
1533 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1534
1535 reg_rctl &= (~E1000_RCTL_UPE);
1536 reg_rctl &= (~E1000_RCTL_MPE);
1537 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1538
1539 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1540 em_enable_vlans(adapter);
1541 adapter->em_insert_vlan_header = 0;
1542
1543 return;
1544 }
1545
1546
1547 /*********************************************************************
1548 * Multicast Update
1549 *
1550 * This routine is called whenever multicast address list is updated.
1551 *
1552 **********************************************************************/
1553
1554 static void
1555 em_set_multi(struct adapter * adapter)
1556 {
1557 u_int32_t reg_rctl = 0;
1558 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1559 struct ifmultiaddr *ifma;
1560 int mcnt = 0;
1561 struct ifnet *ifp = &adapter->interface_data.ac_if;
1562
1563 IOCTL_DEBUGOUT("em_set_multi: begin");
1564
1565 if (adapter->hw.mac_type == em_82542_rev2_0) {
1566 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1567 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1568 em_pci_clear_mwi(&adapter->hw);
1569 }
1570 reg_rctl |= E1000_RCTL_RST;
1571 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1572 msec_delay(5);
1573 }
1574
1575 IF_ADDR_LOCK(ifp);
1576 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1577 if (ifma->ifma_addr->sa_family != AF_LINK)
1578 continue;
1579
1580 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1581
1582 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1583 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1584 mcnt++;
1585 }
1586 IF_ADDR_UNLOCK(ifp);
1587
1588 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1589 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1590 reg_rctl |= E1000_RCTL_MPE;
1591 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1592 } else
1593 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1594
1595 if (adapter->hw.mac_type == em_82542_rev2_0) {
1596 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1597 reg_rctl &= ~E1000_RCTL_RST;
1598 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1599 msec_delay(5);
1600 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1601 em_pci_set_mwi(&adapter->hw);
1602 }
1603 }
1604
1605 return;
1606 }
1607
1608
1609 /*********************************************************************
1610 * Timer routine
1611 *
1612 * This routine checks for link status and updates statistics.
1613 *
1614 **********************************************************************/
1615
1616 static void
1617 em_local_timer(void *arg)
1618 {
1619 struct ifnet *ifp;
1620 struct adapter * adapter = arg;
1621 ifp = &adapter->interface_data.ac_if;
1622
1623 EM_LOCK(adapter);
1624
1625 em_check_for_link(&adapter->hw);
1626 em_print_link_status(adapter);
1627 em_update_stats_counters(adapter);
1628 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING) {
1629 em_print_hw_stats(adapter);
1630 }
1631 em_smartspeed(adapter);
1632
1633 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1634
1635 EM_UNLOCK(adapter);
1636 return;
1637 }
1638
1639 static void
1640 em_print_link_status(struct adapter * adapter)
1641 {
1642 struct ifnet *ifp = &adapter->interface_data.ac_if;
1643
1644 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1645 if (adapter->link_active == 0) {
1646 em_get_speed_and_duplex(&adapter->hw,
1647 &adapter->link_speed,
1648 &adapter->link_duplex);
1649 printf("em%d: Link is up %d Mbps %s\n",
1650 adapter->unit,
1651 adapter->link_speed,
1652 ((adapter->link_duplex == FULL_DUPLEX) ?
1653 "Full Duplex" : "Half Duplex"));
1654 adapter->link_active = 1;
1655 adapter->smartspeed = 0;
1656 ifp->if_link_state = LINK_STATE_UP;
1657 #ifdef DEV_CARP
1658 if (ifp->if_carp)
1659 carp_carpdev_state(ifp->if_carp);
1660 #endif
1661 }
1662 } else {
1663 if (adapter->link_active == 1) {
1664 adapter->link_speed = 0;
1665 adapter->link_duplex = 0;
1666 printf("em%d: Link is Down\n", adapter->unit);
1667 adapter->link_active = 0;
1668 ifp->if_link_state = LINK_STATE_DOWN;
1669 #ifdef DEV_CARP
1670 if (ifp->if_carp)
1671 carp_carpdev_state(ifp->if_carp);
1672 #endif
1673 }
1674 }
1675
1676 return;
1677 }
1678
1679 /*********************************************************************
1680 *
1681 * This routine disables all traffic on the adapter by issuing a
1682 * global reset on the MAC and deallocates TX/RX buffers.
1683 *
1684 **********************************************************************/
1685
1686 static void
1687 em_stop(void *arg)
1688 {
1689 struct ifnet *ifp;
1690 struct adapter * adapter = arg;
1691 ifp = &adapter->interface_data.ac_if;
1692
1693 mtx_assert(&adapter->mtx, MA_OWNED);
1694
1695 INIT_DEBUGOUT("em_stop: begin");
1696 em_disable_intr(adapter);
1697 em_reset_hw(&adapter->hw);
1698 callout_stop(&adapter->timer);
1699 callout_stop(&adapter->tx_fifo_timer);
1700 em_free_transmit_structures(adapter);
1701 em_free_receive_structures(adapter);
1702
1703
1704 /* Tell the stack that the interface is no longer active */
1705 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1706
1707 return;
1708 }
1709
1710
1711 /*********************************************************************
1712 *
1713 * Determine hardware revision.
1714 *
1715 **********************************************************************/
1716 static void
1717 em_identify_hardware(struct adapter * adapter)
1718 {
1719 device_t dev = adapter->dev;
1720
1721 /* Make sure our PCI config space has the necessary stuff set */
1722 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1723 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1724 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1725 printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1726 adapter->unit);
1727 adapter->hw.pci_cmd_word |=
1728 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1729 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1730 }
1731
1732 /* Save off the information about this board */
1733 adapter->hw.vendor_id = pci_get_vendor(dev);
1734 adapter->hw.device_id = pci_get_device(dev);
1735 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1736 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1737 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1738
1739 /* Identify the MAC */
1740 if (em_set_mac_type(&adapter->hw))
1741 printf("em%d: Unknown MAC Type\n", adapter->unit);
1742
1743 if(adapter->hw.mac_type == em_82541 ||
1744 adapter->hw.mac_type == em_82541_rev_2 ||
1745 adapter->hw.mac_type == em_82547 ||
1746 adapter->hw.mac_type == em_82547_rev_2)
1747 adapter->hw.phy_init_script = TRUE;
1748
1749 return;
1750 }
1751
1752 static int
1753 em_allocate_pci_resources(struct adapter * adapter)
1754 {
1755 int i, val, rid;
1756 device_t dev = adapter->dev;
1757
1758 rid = EM_MMBA;
1759 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1760 &rid, RF_ACTIVE);
1761 if (!(adapter->res_memory)) {
1762 printf("em%d: Unable to allocate bus resource: memory\n",
1763 adapter->unit);
1764 return(ENXIO);
1765 }
1766 adapter->osdep.mem_bus_space_tag =
1767 rman_get_bustag(adapter->res_memory);
1768 adapter->osdep.mem_bus_space_handle =
1769 rman_get_bushandle(adapter->res_memory);
1770 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1771
1772
1773 if (adapter->hw.mac_type > em_82543) {
1774 /* Figure our where our IO BAR is ? */
1775 rid = EM_MMBA;
1776 for (i = 0; i < 5; i++) {
1777 val = pci_read_config(dev, rid, 4);
1778 if (val & 0x00000001) {
1779 adapter->io_rid = rid;
1780 break;
1781 }
1782 rid += 4;
1783 }
1784
1785 adapter->res_ioport = bus_alloc_resource_any(dev,
1786 SYS_RES_IOPORT,
1787 &adapter->io_rid,
1788 RF_ACTIVE);
1789 if (!(adapter->res_ioport)) {
1790 printf("em%d: Unable to allocate bus resource: ioport\n",
1791 adapter->unit);
1792 return(ENXIO);
1793 }
1794
1795 adapter->hw.io_base =
1796 rman_get_start(adapter->res_ioport);
1797 }
1798
1799 rid = 0x0;
1800 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1801 RF_SHAREABLE |
1802 RF_ACTIVE);
1803 if (!(adapter->res_interrupt)) {
1804 printf("em%d: Unable to allocate bus resource: interrupt\n",
1805 adapter->unit);
1806 return(ENXIO);
1807 }
1808 if (bus_setup_intr(dev, adapter->res_interrupt,
1809 INTR_TYPE_NET | INTR_MPSAFE,
1810 (void (*)(void *)) em_intr, adapter,
1811 &adapter->int_handler_tag)) {
1812 printf("em%d: Error registering interrupt handler!\n",
1813 adapter->unit);
1814 return(ENXIO);
1815 }
1816
1817 adapter->hw.back = &adapter->osdep;
1818
1819 return(0);
1820 }
1821
1822 static void
1823 em_free_pci_resources(struct adapter * adapter)
1824 {
1825 device_t dev = adapter->dev;
1826
1827 if (adapter->res_interrupt != NULL) {
1828 bus_teardown_intr(dev, adapter->res_interrupt,
1829 adapter->int_handler_tag);
1830 bus_release_resource(dev, SYS_RES_IRQ, 0,
1831 adapter->res_interrupt);
1832 }
1833 if (adapter->res_memory != NULL) {
1834 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
1835 adapter->res_memory);
1836 }
1837
1838 if (adapter->res_ioport != NULL) {
1839 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1840 adapter->res_ioport);
1841 }
1842 return;
1843 }
1844
1845 /*********************************************************************
1846 *
1847 * Initialize the hardware to a configuration as specified by the
1848 * adapter structure. The controller is reset, the EEPROM is
1849 * verified, the MAC address is set, then the shared initialization
1850 * routines are called.
1851 *
1852 **********************************************************************/
1853 static int
1854 em_hardware_init(struct adapter * adapter)
1855 {
1856 INIT_DEBUGOUT("em_hardware_init: begin");
1857 /* Issue a global reset */
1858 em_reset_hw(&adapter->hw);
1859
1860 /* When hardware is reset, fifo_head is also reset */
1861 adapter->tx_fifo_head = 0;
1862
1863 /* Make sure we have a good EEPROM before we read from it */
1864 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1865 printf("em%d: The EEPROM Checksum Is Not Valid\n",
1866 adapter->unit);
1867 return(EIO);
1868 }
1869
1870 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1871 printf("em%d: EEPROM read error while reading part number\n",
1872 adapter->unit);
1873 return(EIO);
1874 }
1875
1876 if (em_init_hw(&adapter->hw) < 0) {
1877 printf("em%d: Hardware Initialization Failed",
1878 adapter->unit);
1879 return(EIO);
1880 }
1881
1882 em_check_for_link(&adapter->hw);
1883 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1884 adapter->link_active = 1;
1885 else
1886 adapter->link_active = 0;
1887
1888 if (adapter->link_active) {
1889 em_get_speed_and_duplex(&adapter->hw,
1890 &adapter->link_speed,
1891 &adapter->link_duplex);
1892 } else {
1893 adapter->link_speed = 0;
1894 adapter->link_duplex = 0;
1895 }
1896
1897 return(0);
1898 }
1899
1900 /*********************************************************************
1901 *
1902 * Setup networking device structure and register an interface.
1903 *
1904 **********************************************************************/
1905 static void
1906 em_setup_interface(device_t dev, struct adapter * adapter)
1907 {
1908 struct ifnet *ifp;
1909 INIT_DEBUGOUT("em_setup_interface: begin");
1910
1911 ifp = &adapter->interface_data.ac_if;
1912 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1913 ifp->if_mtu = ETHERMTU;
1914 ifp->if_baudrate = 1000000000;
1915 ifp->if_init = em_init;
1916 ifp->if_softc = adapter;
1917 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1918 ifp->if_ioctl = em_ioctl;
1919 ifp->if_start = em_start;
1920 ifp->if_watchdog = em_watchdog;
1921 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1922 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1923 IFQ_SET_READY(&ifp->if_snd);
1924 ether_ifattach(ifp, adapter->interface_data.ac_enaddr);
1925
1926 ifp->if_capabilities = ifp->if_capenable = 0;
1927
1928 if (adapter->hw.mac_type >= em_82543) {
1929 ifp->if_capabilities |= IFCAP_HWCSUM;
1930 ifp->if_capenable |= IFCAP_HWCSUM;
1931 }
1932
1933 /*
1934 * Tell the upper layer(s) we support long frames.
1935 */
1936 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1937 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1938 ifp->if_capenable |= IFCAP_VLAN_MTU;
1939
1940 #ifdef DEVICE_POLLING
1941 ifp->if_capabilities |= IFCAP_POLLING;
1942 ifp->if_capenable |= IFCAP_POLLING;
1943 #endif
1944
1945 /*
1946 * Specify the media types supported by this adapter and register
1947 * callbacks to update media and link information
1948 */
1949 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1950 em_media_status);
1951 if (adapter->hw.media_type == em_media_type_fiber) {
1952 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1953 0, NULL);
1954 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1955 0, NULL);
1956 } else {
1957 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1958 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1959 0, NULL);
1960 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1961 0, NULL);
1962 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1963 0, NULL);
1964 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1965 0, NULL);
1966 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1967 }
1968 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1969 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1970
1971 return;
1972 }
1973
1974
1975 /*********************************************************************
1976 *
1977 * Workaround for SmartSpeed on 82541 and 82547 controllers
1978 *
1979 **********************************************************************/
1980 static void
1981 em_smartspeed(struct adapter *adapter)
1982 {
1983 uint16_t phy_tmp;
1984
1985 if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1986 !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1987 return;
1988
1989 if(adapter->smartspeed == 0) {
1990 /* If Master/Slave config fault is asserted twice,
1991 * we assume back-to-back */
1992 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1993 if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
1994 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1995 if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1996 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1997 &phy_tmp);
1998 if(phy_tmp & CR_1000T_MS_ENABLE) {
1999 phy_tmp &= ~CR_1000T_MS_ENABLE;
2000 em_write_phy_reg(&adapter->hw,
2001 PHY_1000T_CTRL, phy_tmp);
2002 adapter->smartspeed++;
2003 if(adapter->hw.autoneg &&
2004 !em_phy_setup_autoneg(&adapter->hw) &&
2005 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2006 &phy_tmp)) {
2007 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2008 MII_CR_RESTART_AUTO_NEG);
2009 em_write_phy_reg(&adapter->hw,
2010 PHY_CTRL, phy_tmp);
2011 }
2012 }
2013 }
2014 return;
2015 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2016 /* If still no link, perhaps using 2/3 pair cable */
2017 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2018 phy_tmp |= CR_1000T_MS_ENABLE;
2019 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2020 if(adapter->hw.autoneg &&
2021 !em_phy_setup_autoneg(&adapter->hw) &&
2022 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2023 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2024 MII_CR_RESTART_AUTO_NEG);
2025 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2026 }
2027 }
2028 /* Restart process after EM_SMARTSPEED_MAX iterations */
2029 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2030 adapter->smartspeed = 0;
2031
2032 return;
2033 }
2034
2035
2036 /*
2037 * Manage DMA'able memory.
2038 */
2039 static void
2040 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2041 {
2042 if (error)
2043 return;
2044 *(bus_addr_t*) arg = segs->ds_addr;
2045 return;
2046 }
2047
2048 static int
2049 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2050 struct em_dma_alloc *dma, int mapflags)
2051 {
2052 int r;
2053
2054 r = bus_dma_tag_create(NULL, /* parent */
2055 PAGE_SIZE, 0, /* alignment, bounds */
2056 BUS_SPACE_MAXADDR, /* lowaddr */
2057 BUS_SPACE_MAXADDR, /* highaddr */
2058 NULL, NULL, /* filter, filterarg */
2059 size, /* maxsize */
2060 1, /* nsegments */
2061 size, /* maxsegsize */
2062 BUS_DMA_ALLOCNOW, /* flags */
2063 NULL, /* lockfunc */
2064 NULL, /* lockarg */
2065 &dma->dma_tag);
2066 if (r != 0) {
2067 printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2068 "error %u\n", adapter->unit, r);
2069 goto fail_0;
2070 }
2071
2072 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2073 BUS_DMA_NOWAIT, &dma->dma_map);
2074 if (r != 0) {
2075 printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2076 "size %ju, error %d\n", adapter->unit,
2077 (uintmax_t)size, r);
2078 goto fail_2;
2079 }
2080
2081 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2082 size,
2083 em_dmamap_cb,
2084 &dma->dma_paddr,
2085 mapflags | BUS_DMA_NOWAIT);
2086 if (r != 0) {
2087 printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2088 "error %u\n", adapter->unit, r);
2089 goto fail_3;
2090 }
2091
2092 dma->dma_size = size;
2093 return (0);
2094
2095 fail_3:
2096 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2097 fail_2:
2098 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2099 bus_dma_tag_destroy(dma->dma_tag);
2100 fail_0:
2101 dma->dma_map = NULL;
2102 dma->dma_tag = NULL;
2103 return (r);
2104 }
2105
2106 static void
2107 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2108 {
2109 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2110 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2111 bus_dma_tag_destroy(dma->dma_tag);
2112 }
2113
2114
2115 /*********************************************************************
2116 *
2117 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2118 * the information needed to transmit a packet on the wire.
2119 *
2120 **********************************************************************/
2121 static int
2122 em_allocate_transmit_structures(struct adapter * adapter)
2123 {
2124 if (!(adapter->tx_buffer_area =
2125 (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2126 adapter->num_tx_desc, M_DEVBUF,
2127 M_NOWAIT))) {
2128 printf("em%d: Unable to allocate tx_buffer memory\n",
2129 adapter->unit);
2130 return ENOMEM;
2131 }
2132
2133 bzero(adapter->tx_buffer_area,
2134 sizeof(struct em_buffer) * adapter->num_tx_desc);
2135
2136 return 0;
2137 }
2138
2139 /*********************************************************************
2140 *
2141 * Allocate and initialize transmit structures.
2142 *
2143 **********************************************************************/
2144 static int
2145 em_setup_transmit_structures(struct adapter * adapter)
2146 {
2147 /*
2148 * Setup DMA descriptor areas.
2149 */
2150 if (bus_dma_tag_create(NULL, /* parent */
2151 1, 0, /* alignment, bounds */
2152 BUS_SPACE_MAXADDR, /* lowaddr */
2153 BUS_SPACE_MAXADDR, /* highaddr */
2154 NULL, NULL, /* filter, filterarg */
2155 MCLBYTES * 8, /* maxsize */
2156 EM_MAX_SCATTER, /* nsegments */
2157 MCLBYTES * 8, /* maxsegsize */
2158 BUS_DMA_ALLOCNOW, /* flags */
2159 NULL, /* lockfunc */
2160 NULL, /* lockarg */
2161 &adapter->txtag)) {
2162 printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2163 return (ENOMEM);
2164 }
2165
2166 if (em_allocate_transmit_structures(adapter))
2167 return (ENOMEM);
2168
2169 bzero((void *) adapter->tx_desc_base,
2170 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2171
2172 adapter->next_avail_tx_desc = 0;
2173 adapter->oldest_used_tx_desc = 0;
2174
2175 /* Set number of descriptors available */
2176 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2177
2178 /* Set checksum context */
2179 adapter->active_checksum_context = OFFLOAD_NONE;
2180
2181 return (0);
2182 }
2183
2184 /*********************************************************************
2185 *
2186 * Enable transmit unit.
2187 *
2188 **********************************************************************/
2189 static void
2190 em_initialize_transmit_unit(struct adapter * adapter)
2191 {
2192 u_int32_t reg_tctl;
2193 u_int32_t reg_tipg = 0;
2194 u_int64_t bus_addr;
2195
2196 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2197 /* Setup the Base and Length of the Tx Descriptor Ring */
2198 bus_addr = adapter->txdma.dma_paddr;
2199 E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2200 E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2201 E1000_WRITE_REG(&adapter->hw, TDLEN,
2202 adapter->num_tx_desc *
2203 sizeof(struct em_tx_desc));
2204
2205 /* Setup the HW Tx Head and Tail descriptor pointers */
2206 E1000_WRITE_REG(&adapter->hw, TDH, 0);
2207 E1000_WRITE_REG(&adapter->hw, TDT, 0);
2208
2209
2210 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2211 E1000_READ_REG(&adapter->hw, TDBAL),
2212 E1000_READ_REG(&adapter->hw, TDLEN));
2213
2214 /* Set the default values for the Tx Inter Packet Gap timer */
2215 switch (adapter->hw.mac_type) {
2216 case em_82542_rev2_0:
2217 case em_82542_rev2_1:
2218 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2219 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2220 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2221 break;
2222 default:
2223 if (adapter->hw.media_type == em_media_type_fiber)
2224 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2225 else
2226 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2227 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2228 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2229 }
2230
2231 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2232 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2233 if(adapter->hw.mac_type >= em_82540)
2234 E1000_WRITE_REG(&adapter->hw, TADV,
2235 adapter->tx_abs_int_delay.value);
2236
2237 /* Program the Transmit Control Register */
2238 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2239 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2240 if (adapter->link_duplex == 1) {
2241 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2242 } else {
2243 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2244 }
2245 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2246
2247 /* Setup Transmit Descriptor Settings for this adapter */
2248 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2249
2250 if (adapter->tx_int_delay.value > 0)
2251 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2252
2253 return;
2254 }
2255
2256 /*********************************************************************
2257 *
2258 * Free all transmit related data structures.
2259 *
2260 **********************************************************************/
2261 static void
2262 em_free_transmit_structures(struct adapter * adapter)
2263 {
2264 struct em_buffer *tx_buffer;
2265 int i;
2266
2267 INIT_DEBUGOUT("free_transmit_structures: begin");
2268
2269 if (adapter->tx_buffer_area != NULL) {
2270 tx_buffer = adapter->tx_buffer_area;
2271 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2272 if (tx_buffer->m_head != NULL) {
2273 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2274 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2275 m_freem(tx_buffer->m_head);
2276 }
2277 tx_buffer->m_head = NULL;
2278 }
2279 }
2280 if (adapter->tx_buffer_area != NULL) {
2281 free(adapter->tx_buffer_area, M_DEVBUF);
2282 adapter->tx_buffer_area = NULL;
2283 }
2284 if (adapter->txtag != NULL) {
2285 bus_dma_tag_destroy(adapter->txtag);
2286 adapter->txtag = NULL;
2287 }
2288 return;
2289 }
2290
2291 /*********************************************************************
2292 *
2293 * The offload context needs to be set when we transfer the first
2294 * packet of a particular protocol (TCP/UDP). We change the
2295 * context only if the protocol type changes.
2296 *
2297 **********************************************************************/
2298 static void
2299 em_transmit_checksum_setup(struct adapter * adapter,
2300 struct mbuf *mp,
2301 u_int32_t *txd_upper,
2302 u_int32_t *txd_lower)
2303 {
2304 struct em_context_desc *TXD;
2305 struct em_buffer *tx_buffer;
2306 int curr_txd;
2307
2308 if (mp->m_pkthdr.csum_flags) {
2309
2310 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2311 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2312 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2313 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2314 return;
2315 else
2316 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2317
2318 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2319 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2320 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2321 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2322 return;
2323 else
2324 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2325 } else {
2326 *txd_upper = 0;
2327 *txd_lower = 0;
2328 return;
2329 }
2330 } else {
2331 *txd_upper = 0;
2332 *txd_lower = 0;
2333 return;
2334 }
2335
2336 /* If we reach this point, the checksum offload context
2337 * needs to be reset.
2338 */
2339 curr_txd = adapter->next_avail_tx_desc;
2340 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2341 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2342
2343 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2344 TXD->lower_setup.ip_fields.ipcso =
2345 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2346 TXD->lower_setup.ip_fields.ipcse =
2347 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2348
2349 TXD->upper_setup.tcp_fields.tucss =
2350 ETHER_HDR_LEN + sizeof(struct ip);
2351 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2352
2353 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2354 TXD->upper_setup.tcp_fields.tucso =
2355 ETHER_HDR_LEN + sizeof(struct ip) +
2356 offsetof(struct tcphdr, th_sum);
2357 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2358 TXD->upper_setup.tcp_fields.tucso =
2359 ETHER_HDR_LEN + sizeof(struct ip) +
2360 offsetof(struct udphdr, uh_sum);
2361 }
2362
2363 TXD->tcp_seg_setup.data = htole32(0);
2364 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2365
2366 tx_buffer->m_head = NULL;
2367
2368 if (++curr_txd == adapter->num_tx_desc)
2369 curr_txd = 0;
2370
2371 adapter->num_tx_desc_avail--;
2372 adapter->next_avail_tx_desc = curr_txd;
2373
2374 return;
2375 }
2376
2377 /**********************************************************************
2378 *
2379 * Examine each tx_buffer in the used queue. If the hardware is done
2380 * processing the packet then free associated resources. The
2381 * tx_buffer is put back on the free queue.
2382 *
2383 **********************************************************************/
2384 static void
2385 em_clean_transmit_interrupts(struct adapter * adapter)
2386 {
2387 int i, num_avail;
2388 struct em_buffer *tx_buffer;
2389 struct em_tx_desc *tx_desc;
2390 struct ifnet *ifp = &adapter->interface_data.ac_if;
2391
2392 mtx_assert(&adapter->mtx, MA_OWNED);
2393
2394 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2395 return;
2396
2397 #ifdef DBG_STATS
2398 adapter->clean_tx_interrupts++;
2399 #endif
2400 num_avail = adapter->num_tx_desc_avail;
2401 i = adapter->oldest_used_tx_desc;
2402
2403 tx_buffer = &adapter->tx_buffer_area[i];
2404 tx_desc = &adapter->tx_desc_base[i];
2405
2406 while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2407
2408 tx_desc->upper.data = 0;
2409 num_avail++;
2410
2411 if (tx_buffer->m_head) {
2412 ifp->if_opackets++;
2413 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2414 BUS_DMASYNC_POSTWRITE);
2415 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2416 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2417
2418 m_freem(tx_buffer->m_head);
2419 tx_buffer->m_head = NULL;
2420 }
2421
2422 if (++i == adapter->num_tx_desc)
2423 i = 0;
2424
2425 tx_buffer = &adapter->tx_buffer_area[i];
2426 tx_desc = &adapter->tx_desc_base[i];
2427 }
2428
2429 adapter->oldest_used_tx_desc = i;
2430
2431 /*
2432 * If we have enough room, clear IFF_OACTIVE to tell the stack
2433 * that it is OK to send packets.
2434 * If there are no pending descriptors, clear the timeout. Otherwise,
2435 * if some descriptors have been freed, restart the timeout.
2436 */
2437 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2438 ifp->if_flags &= ~IFF_OACTIVE;
2439 if (num_avail == adapter->num_tx_desc)
2440 ifp->if_timer = 0;
2441 else if (num_avail != adapter->num_tx_desc_avail)
2442 ifp->if_timer = EM_TX_TIMEOUT;
2443 }
2444 adapter->num_tx_desc_avail = num_avail;
2445 return;
2446 }
2447
2448 /*********************************************************************
2449 *
2450 * Get a buffer from system mbuf buffer pool.
2451 *
2452 **********************************************************************/
2453 static int
2454 em_get_buf(int i, struct adapter *adapter,
2455 struct mbuf *nmp)
2456 {
2457 register struct mbuf *mp = nmp;
2458 struct em_buffer *rx_buffer;
2459 struct ifnet *ifp;
2460 bus_addr_t paddr;
2461 int error;
2462
2463 ifp = &adapter->interface_data.ac_if;
2464
2465 if (mp == NULL) {
2466 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2467 if (mp == NULL) {
2468 adapter->mbuf_cluster_failed++;
2469 return(ENOBUFS);
2470 }
2471 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2472 } else {
2473 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2474 mp->m_data = mp->m_ext.ext_buf;
2475 mp->m_next = NULL;
2476 }
2477
2478 if (ifp->if_mtu <= ETHERMTU) {
2479 m_adj(mp, ETHER_ALIGN);
2480 }
2481
2482 rx_buffer = &adapter->rx_buffer_area[i];
2483
2484 /*
2485 * Using memory from the mbuf cluster pool, invoke the
2486 * bus_dma machinery to arrange the memory mapping.
2487 */
2488 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2489 mtod(mp, void *), mp->m_len,
2490 em_dmamap_cb, &paddr, 0);
2491 if (error) {
2492 m_free(mp);
2493 return(error);
2494 }
2495 rx_buffer->m_head = mp;
2496 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2497 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2498
2499 return(0);
2500 }
2501
2502 /*********************************************************************
2503 *
2504 * Allocate memory for rx_buffer structures. Since we use one
2505 * rx_buffer per received packet, the maximum number of rx_buffer's
2506 * that we'll need is equal to the number of receive descriptors
2507 * that we've allocated.
2508 *
2509 **********************************************************************/
2510 static int
2511 em_allocate_receive_structures(struct adapter * adapter)
2512 {
2513 int i, error;
2514 struct em_buffer *rx_buffer;
2515
2516 if (!(adapter->rx_buffer_area =
2517 (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2518 adapter->num_rx_desc, M_DEVBUF,
2519 M_NOWAIT))) {
2520 printf("em%d: Unable to allocate rx_buffer memory\n",
2521 adapter->unit);
2522 return(ENOMEM);
2523 }
2524
2525 bzero(adapter->rx_buffer_area,
2526 sizeof(struct em_buffer) * adapter->num_rx_desc);
2527
2528 error = bus_dma_tag_create(NULL, /* parent */
2529 1, 0, /* alignment, bounds */
2530 BUS_SPACE_MAXADDR, /* lowaddr */
2531 BUS_SPACE_MAXADDR, /* highaddr */
2532 NULL, NULL, /* filter, filterarg */
2533 MCLBYTES, /* maxsize */
2534 1, /* nsegments */
2535 MCLBYTES, /* maxsegsize */
2536 BUS_DMA_ALLOCNOW, /* flags */
2537 NULL, /* lockfunc */
2538 NULL, /* lockarg */
2539 &adapter->rxtag);
2540 if (error != 0) {
2541 printf("em%d: em_allocate_receive_structures: "
2542 "bus_dma_tag_create failed; error %u\n",
2543 adapter->unit, error);
2544 goto fail_0;
2545 }
2546
2547 rx_buffer = adapter->rx_buffer_area;
2548 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2549 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2550 &rx_buffer->map);
2551 if (error != 0) {
2552 printf("em%d: em_allocate_receive_structures: "
2553 "bus_dmamap_create failed; error %u\n",
2554 adapter->unit, error);
2555 goto fail_1;
2556 }
2557 }
2558
2559 for (i = 0; i < adapter->num_rx_desc; i++) {
2560 error = em_get_buf(i, adapter, NULL);
2561 if (error != 0) {
2562 adapter->rx_buffer_area[i].m_head = NULL;
2563 adapter->rx_desc_base[i].buffer_addr = 0;
2564 return(error);
2565 }
2566 }
2567
2568 return(0);
2569
2570 fail_1:
2571 bus_dma_tag_destroy(adapter->rxtag);
2572 fail_0:
2573 adapter->rxtag = NULL;
2574 free(adapter->rx_buffer_area, M_DEVBUF);
2575 adapter->rx_buffer_area = NULL;
2576 return (error);
2577 }
2578
2579 /*********************************************************************
2580 *
2581 * Allocate and initialize receive structures.
2582 *
2583 **********************************************************************/
2584 static int
2585 em_setup_receive_structures(struct adapter * adapter)
2586 {
2587 bzero((void *) adapter->rx_desc_base,
2588 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2589
2590 if (em_allocate_receive_structures(adapter))
2591 return ENOMEM;
2592
2593 /* Setup our descriptor pointers */
2594 adapter->next_rx_desc_to_check = 0;
2595 return(0);
2596 }
2597
2598 /*********************************************************************
2599 *
2600 * Enable receive unit.
2601 *
2602 **********************************************************************/
2603 static void
2604 em_initialize_receive_unit(struct adapter * adapter)
2605 {
2606 u_int32_t reg_rctl;
2607 u_int32_t reg_rxcsum;
2608 struct ifnet *ifp;
2609 u_int64_t bus_addr;
2610
2611 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2612 ifp = &adapter->interface_data.ac_if;
2613
2614 /* Make sure receives are disabled while setting up the descriptor ring */
2615 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2616
2617 /* Set the Receive Delay Timer Register */
2618 E1000_WRITE_REG(&adapter->hw, RDTR,
2619 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2620
2621 if(adapter->hw.mac_type >= em_82540) {
2622 E1000_WRITE_REG(&adapter->hw, RADV,
2623 adapter->rx_abs_int_delay.value);
2624
2625 /* Set the interrupt throttling rate. Value is calculated
2626 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2627 #define MAX_INTS_PER_SEC 8000
2628 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
2629 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2630 }
2631
2632 /* Setup the Base and Length of the Rx Descriptor Ring */
2633 bus_addr = adapter->rxdma.dma_paddr;
2634 E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2635 E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2636 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2637 sizeof(struct em_rx_desc));
2638
2639 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2640 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2641 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2642
2643 /* Setup the Receive Control Register */
2644 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2645 E1000_RCTL_RDMTS_HALF |
2646 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2647
2648 if (adapter->hw.tbi_compatibility_on == TRUE)
2649 reg_rctl |= E1000_RCTL_SBP;
2650
2651
2652 switch (adapter->rx_buffer_len) {
2653 default:
2654 case EM_RXBUFFER_2048:
2655 reg_rctl |= E1000_RCTL_SZ_2048;
2656 break;
2657 case EM_RXBUFFER_4096:
2658 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2659 break;
2660 case EM_RXBUFFER_8192:
2661 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2662 break;
2663 case EM_RXBUFFER_16384:
2664 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2665 break;
2666 }
2667
2668 if (ifp->if_mtu > ETHERMTU)
2669 reg_rctl |= E1000_RCTL_LPE;
2670
2671 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2672 if ((adapter->hw.mac_type >= em_82543) &&
2673 (ifp->if_capenable & IFCAP_RXCSUM)) {
2674 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2675 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2676 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2677 }
2678
2679 /* Enable Receives */
2680 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2681
2682 return;
2683 }
2684
2685 /*********************************************************************
2686 *
2687 * Free receive related data structures.
2688 *
2689 **********************************************************************/
2690 static void
2691 em_free_receive_structures(struct adapter *adapter)
2692 {
2693 struct em_buffer *rx_buffer;
2694 int i;
2695
2696 INIT_DEBUGOUT("free_receive_structures: begin");
2697
2698 if (adapter->rx_buffer_area != NULL) {
2699 rx_buffer = adapter->rx_buffer_area;
2700 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2701 if (rx_buffer->map != NULL) {
2702 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2703 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2704 }
2705 if (rx_buffer->m_head != NULL)
2706 m_freem(rx_buffer->m_head);
2707 rx_buffer->m_head = NULL;
2708 }
2709 }
2710 if (adapter->rx_buffer_area != NULL) {
2711 free(adapter->rx_buffer_area, M_DEVBUF);
2712 adapter->rx_buffer_area = NULL;
2713 }
2714 if (adapter->rxtag != NULL) {
2715 bus_dma_tag_destroy(adapter->rxtag);
2716 adapter->rxtag = NULL;
2717 }
2718 return;
2719 }
2720
2721 /*********************************************************************
2722 *
2723 * This routine executes in interrupt context. It replenishes
2724 * the mbufs in the descriptor and sends data which has been
2725 * dma'ed into host memory to upper layer.
2726 *
2727 * We loop at most count times if count is > 0, or until done if
2728 * count < 0.
2729 *
2730 *********************************************************************/
2731 static void
2732 em_process_receive_interrupts(struct adapter * adapter, int count)
2733 {
2734 struct ifnet *ifp;
2735 struct mbuf *mp;
2736 u_int8_t accept_frame = 0;
2737 u_int8_t eop = 0;
2738 u_int16_t len, desc_len, prev_len_adj;
2739 int i;
2740
2741 /* Pointer to the receive descriptor being examined. */
2742 struct em_rx_desc *current_desc;
2743
2744 mtx_assert(&adapter->mtx, MA_OWNED);
2745
2746 ifp = &adapter->interface_data.ac_if;
2747 i = adapter->next_rx_desc_to_check;
2748 current_desc = &adapter->rx_desc_base[i];
2749
2750 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2751 #ifdef DBG_STATS
2752 adapter->no_pkts_avail++;
2753 #endif
2754 return;
2755 }
2756
2757 while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2758 struct mbuf *m = NULL;
2759
2760 mp = adapter->rx_buffer_area[i].m_head;
2761 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2762 BUS_DMASYNC_POSTREAD);
2763
2764 accept_frame = 1;
2765 prev_len_adj = 0;
2766 desc_len = le16toh(current_desc->length);
2767 if (current_desc->status & E1000_RXD_STAT_EOP) {
2768 count--;
2769 eop = 1;
2770 if (desc_len < ETHER_CRC_LEN) {
2771 len = 0;
2772 prev_len_adj = ETHER_CRC_LEN - desc_len;
2773 }
2774 else {
2775 len = desc_len - ETHER_CRC_LEN;
2776 }
2777 } else {
2778 eop = 0;
2779 len = desc_len;
2780 }
2781
2782 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2783 u_int8_t last_byte;
2784 u_int32_t pkt_len = desc_len;
2785
2786 if (adapter->fmp != NULL)
2787 pkt_len += adapter->fmp->m_pkthdr.len;
2788
2789 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2790
2791 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2792 current_desc->errors,
2793 pkt_len, last_byte)) {
2794 em_tbi_adjust_stats(&adapter->hw,
2795 &adapter->stats,
2796 pkt_len,
2797 adapter->hw.mac_addr);
2798 if (len > 0) len--;
2799 }
2800 else {
2801 accept_frame = 0;
2802 }
2803 }
2804
2805 if (accept_frame) {
2806
2807 if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2808 adapter->dropped_pkts++;
2809 em_get_buf(i, adapter, mp);
2810 if (adapter->fmp != NULL)
2811 m_freem(adapter->fmp);
2812 adapter->fmp = NULL;
2813 adapter->lmp = NULL;
2814 break;
2815 }
2816
2817 /* Assign correct length to the current fragment */
2818 mp->m_len = len;
2819
2820 if (adapter->fmp == NULL) {
2821 mp->m_pkthdr.len = len;
2822 adapter->fmp = mp; /* Store the first mbuf */
2823 adapter->lmp = mp;
2824 } else {
2825 /* Chain mbuf's together */
2826 mp->m_flags &= ~M_PKTHDR;
2827 /*
2828 * Adjust length of previous mbuf in chain if we
2829 * received less than 4 bytes in the last descriptor.
2830 */
2831 if (prev_len_adj > 0) {
2832 adapter->lmp->m_len -= prev_len_adj;
2833 adapter->fmp->m_pkthdr.len -= prev_len_adj;
2834 }
2835 adapter->lmp->m_next = mp;
2836 adapter->lmp = adapter->lmp->m_next;
2837 adapter->fmp->m_pkthdr.len += len;
2838 }
2839
2840 if (eop) {
2841 adapter->fmp->m_pkthdr.rcvif = ifp;
2842 ifp->if_ipackets++;
2843 em_receive_checksum(adapter, current_desc,
2844 adapter->fmp);
2845 #ifndef __NO_STRICT_ALIGNMENT
2846 if (ifp->if_mtu > ETHERMTU &&
2847 em_fixup_rx(adapter) != 0)
2848 goto skip;
2849
2850 #endif
2851 if (current_desc->status & E1000_RXD_STAT_VP)
2852 VLAN_INPUT_TAG(ifp, adapter->fmp,
2853 (current_desc->special &
2854 E1000_RXD_SPC_VLAN_MASK),
2855 adapter->fmp = NULL);
2856 #ifndef __NO_STRICT_ALIGNMENT
2857 skip:
2858 #endif
2859 m = adapter->fmp;
2860 adapter->fmp = NULL;
2861 adapter->lmp = NULL;
2862 }
2863 } else {
2864 adapter->dropped_pkts++;
2865 em_get_buf(i, adapter, mp);
2866 if (adapter->fmp != NULL)
2867 m_freem(adapter->fmp);
2868 adapter->fmp = NULL;
2869 adapter->lmp = NULL;
2870 }
2871
2872 /* Zero out the receive descriptors status */
2873 current_desc->status = 0;
2874
2875 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
2876 E1000_WRITE_REG(&adapter->hw, RDT, i);
2877
2878 /* Advance our pointers to the next descriptor */
2879 if (++i == adapter->num_rx_desc)
2880 i = 0;
2881 if (m != NULL) {
2882 adapter->next_rx_desc_to_check = i;
2883 EM_UNLOCK(adapter);
2884 (*ifp->if_input)(ifp, m);
2885 EM_LOCK(adapter);
2886 i = adapter->next_rx_desc_to_check;
2887 }
2888 current_desc = &adapter->rx_desc_base[i];
2889 }
2890 adapter->next_rx_desc_to_check = i;
2891 return;
2892 }
2893
2894 #ifndef __NO_STRICT_ALIGNMENT
2895 /*
2896 * When jumbo frames are enabled we should realign entire payload on
2897 * architecures with strict alignment. This is serious design mistake of 8254x
2898 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
2899 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
2900 * payload. On architecures without strict alignment restrictions 8254x still
2901 * performs unaligned memory access which would reduce the performance too.
2902 * To avoid copying over an entire frame to align, we allocate a new mbuf and
2903 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
2904 * existing mbuf chain.
2905 *
2906 * Be aware, best performance of the 8254x is achived only when jumbo frame is
2907 * not used at all on architectures with strict alignment.
2908 */
2909 static int
2910 em_fixup_rx(struct adapter *adapter)
2911 {
2912 struct mbuf *m, *n;
2913 int error;
2914
2915 error = 0;
2916 m = adapter->fmp;
2917 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
2918 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
2919 m->m_data += ETHER_HDR_LEN;
2920 } else {
2921 MGETHDR(n, M_DONTWAIT, MT_DATA);
2922 if (n != NULL) {
2923 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2924 m->m_data += ETHER_HDR_LEN;
2925 m->m_len -= ETHER_HDR_LEN;
2926 n->m_len = ETHER_HDR_LEN;
2927 M_MOVE_PKTHDR(n, m);
2928 n->m_next = m;
2929 adapter->fmp = n;
2930 } else {
2931 adapter->dropped_pkts++;
2932 m_freem(adapter->fmp);
2933 adapter->fmp = NULL;
2934 error = ENOMEM;
2935 }
2936 }
2937
2938 return (error);
2939 }
2940 #endif
2941
2942 /*********************************************************************
2943 *
2944 * Verify that the hardware indicated that the checksum is valid.
2945 * Inform the stack about the status of checksum so that stack
2946 * doesn't spend time verifying the checksum.
2947 *
2948 *********************************************************************/
2949 static void
2950 em_receive_checksum(struct adapter *adapter,
2951 struct em_rx_desc *rx_desc,
2952 struct mbuf *mp)
2953 {
2954 /* 82543 or newer only */
2955 if ((adapter->hw.mac_type < em_82543) ||
2956 /* Ignore Checksum bit is set */
2957 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2958 mp->m_pkthdr.csum_flags = 0;
2959 return;
2960 }
2961
2962 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2963 /* Did it pass? */
2964 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2965 /* IP Checksum Good */
2966 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2967 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2968
2969 } else {
2970 mp->m_pkthdr.csum_flags = 0;
2971 }
2972 }
2973
2974 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2975 /* Did it pass? */
2976 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2977 mp->m_pkthdr.csum_flags |=
2978 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2979 mp->m_pkthdr.csum_data = htons(0xffff);
2980 }
2981 }
2982
2983 return;
2984 }
2985
2986
2987 static void
2988 em_enable_vlans(struct adapter *adapter)
2989 {
2990 uint32_t ctrl;
2991
2992 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2993
2994 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2995 ctrl |= E1000_CTRL_VME;
2996 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2997
2998 return;
2999 }
3000
3001 static void
3002 em_disable_vlans(struct adapter *adapter)
3003 {
3004 uint32_t ctrl;
3005
3006 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3007 ctrl &= ~E1000_CTRL_VME;
3008 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3009
3010 return;
3011 }
3012
3013 static void
3014 em_enable_intr(struct adapter * adapter)
3015 {
3016 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3017 return;
3018 }
3019
3020 static void
3021 em_disable_intr(struct adapter *adapter)
3022 {
3023 /*
3024 * The first version of 82542 had an errata where when link was forced it
3025 * would stay up even up even if the cable was disconnected. Sequence errors
3026 * were used to detect the disconnect and then the driver would unforce the link.
3027 * This code in the in the ISR. For this to work correctly the Sequence error
3028 * interrupt had to be enabled all the time.
3029 */
3030
3031 if (adapter->hw.mac_type == em_82542_rev2_0)
3032 E1000_WRITE_REG(&adapter->hw, IMC,
3033 (0xffffffff & ~E1000_IMC_RXSEQ));
3034 else
3035 E1000_WRITE_REG(&adapter->hw, IMC,
3036 0xffffffff);
3037 return;
3038 }
3039
3040 static int
3041 em_is_valid_ether_addr(u_int8_t *addr)
3042 {
3043 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3044
3045 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3046 return (FALSE);
3047 }
3048
3049 return(TRUE);
3050 }
3051
3052 void
3053 em_write_pci_cfg(struct em_hw *hw,
3054 uint32_t reg,
3055 uint16_t *value)
3056 {
3057 pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3058 *value, 2);
3059 }
3060
3061 void
3062 em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3063 uint16_t *value)
3064 {
3065 *value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3066 reg, 2);
3067 return;
3068 }
3069
3070 void
3071 em_pci_set_mwi(struct em_hw *hw)
3072 {
3073 pci_write_config(((struct em_osdep *)hw->back)->dev,
3074 PCIR_COMMAND,
3075 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3076 return;
3077 }
3078
3079 void
3080 em_pci_clear_mwi(struct em_hw *hw)
3081 {
3082 pci_write_config(((struct em_osdep *)hw->back)->dev,
3083 PCIR_COMMAND,
3084 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3085 return;
3086 }
3087
3088 uint32_t
3089 em_io_read(struct em_hw *hw, unsigned long port)
3090 {
3091 return(inl(port));
3092 }
3093
3094 void
3095 em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
3096 {
3097 outl(port, value);
3098 return;
3099 }
3100
3101 /*********************************************************************
3102 * 82544 Coexistence issue workaround.
3103 * There are 2 issues.
3104 * 1. Transmit Hang issue.
3105 * To detect this issue, following equation can be used...
3106 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3107 * If SUM[3:0] is in between 1 to 4, we will have this issue.
3108 *
3109 * 2. DAC issue.
3110 * To detect this issue, following equation can be used...
3111 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3112 * If SUM[3:0] is in between 9 to c, we will have this issue.
3113 *
3114 *
3115 * WORKAROUND:
3116 * Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3117 *
3118 *** *********************************************************************/
3119 static u_int32_t
3120 em_fill_descriptors (u_int64_t address,
3121 u_int32_t length,
3122 PDESC_ARRAY desc_array)
3123 {
3124 /* Since issue is sensitive to length and address.*/
3125 /* Let us first check the address...*/
3126 u_int32_t safe_terminator;
3127 if (length <= 4) {
3128 desc_array->descriptor[0].address = address;
3129 desc_array->descriptor[0].length = length;
3130 desc_array->elements = 1;
3131 return desc_array->elements;
3132 }
3133 safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3134 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3135 if (safe_terminator == 0 ||
3136 (safe_terminator > 4 &&
3137 safe_terminator < 9) ||
3138 (safe_terminator > 0xC &&
3139 safe_terminator <= 0xF)) {
3140 desc_array->descriptor[0].address = address;
3141 desc_array->descriptor[0].length = length;
3142 desc_array->elements = 1;
3143 return desc_array->elements;
3144 }
3145
3146 desc_array->descriptor[0].address = address;
3147 desc_array->descriptor[0].length = length - 4;
3148 desc_array->descriptor[1].address = address + (length - 4);
3149 desc_array->descriptor[1].length = 4;
3150 desc_array->elements = 2;
3151 return desc_array->elements;
3152 }
3153
3154 /**********************************************************************
3155 *
3156 * Update the board statistics counters.
3157 *
3158 **********************************************************************/
3159 static void
3160 em_update_stats_counters(struct adapter *adapter)
3161 {
3162 struct ifnet *ifp;
3163
3164 if(adapter->hw.media_type == em_media_type_copper ||
3165 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3166 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3167 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3168 }
3169 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3170 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3171 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3172 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3173
3174 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3175 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3176 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3177 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3178 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3179 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3180 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3181 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3182 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3183 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3184 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3185 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3186 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3187 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3188 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3189 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3190 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3191 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3192 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3193 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3194
3195 /* For the 64-bit byte counters the low dword must be read first. */
3196 /* Both registers clear on the read of the high dword */
3197
3198 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3199 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3200 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3201 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3202
3203 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3204 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3205 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3206 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3207 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3208
3209 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3210 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3211 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3212 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3213
3214 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3215 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3216 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3217 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3218 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3219 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3220 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3221 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3222 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3223 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3224
3225 if (adapter->hw.mac_type >= em_82543) {
3226 adapter->stats.algnerrc +=
3227 E1000_READ_REG(&adapter->hw, ALGNERRC);
3228 adapter->stats.rxerrc +=
3229 E1000_READ_REG(&adapter->hw, RXERRC);
3230 adapter->stats.tncrs +=
3231 E1000_READ_REG(&adapter->hw, TNCRS);
3232 adapter->stats.cexterr +=
3233 E1000_READ_REG(&adapter->hw, CEXTERR);
3234 adapter->stats.tsctc +=
3235 E1000_READ_REG(&adapter->hw, TSCTC);
3236 adapter->stats.tsctfc +=
3237 E1000_READ_REG(&adapter->hw, TSCTFC);
3238 }
3239 ifp = &adapter->interface_data.ac_if;
3240
3241 /* Fill out the OS statistics structure */
3242 ifp->if_ibytes = adapter->stats.gorcl;
3243 ifp->if_obytes = adapter->stats.gotcl;
3244 ifp->if_imcasts = adapter->stats.mprc;
3245 ifp->if_collisions = adapter->stats.colc;
3246
3247 /* Rx Errors */
3248 ifp->if_ierrors =
3249 adapter->dropped_pkts +
3250 adapter->stats.rxerrc +
3251 adapter->stats.crcerrs +
3252 adapter->stats.algnerrc +
3253 adapter->stats.rlec +
3254 adapter->stats.mpc + adapter->stats.cexterr;
3255
3256 /* Tx Errors */
3257 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
3258
3259 }
3260
3261
3262 /**********************************************************************
3263 *
3264 * This routine is called only when em_display_debug_stats is enabled.
3265 * This routine provides a way to take a look at important statistics
3266 * maintained by the driver and hardware.
3267 *
3268 **********************************************************************/
3269 static void
3270 em_print_debug_info(struct adapter *adapter)
3271 {
3272 int unit = adapter->unit;
3273 uint8_t *hw_addr = adapter->hw.hw_addr;
3274
3275 printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3276 printf("em%d:CTRL = 0x%x\n", unit,
3277 E1000_READ_REG(&adapter->hw, CTRL));
3278 printf("em%d:RCTL = 0x%x PS=(0x8402)\n", unit,
3279 E1000_READ_REG(&adapter->hw, RCTL));
3280 printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3281 E1000_READ_REG(&adapter->hw, TIDV),
3282 E1000_READ_REG(&adapter->hw, TADV));
3283 printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3284 E1000_READ_REG(&adapter->hw, RDTR),
3285 E1000_READ_REG(&adapter->hw, RADV));
3286
3287 #ifdef DBG_STATS
3288 printf("em%d: Packets not Avail = %ld\n", unit,
3289 adapter->no_pkts_avail);
3290 printf("em%d: CleanTxInterrupts = %ld\n", unit,
3291 adapter->clean_tx_interrupts);
3292 #endif
3293 printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit,
3294 (long long)adapter->tx_fifo_wrk_cnt,
3295 (long long)adapter->tx_fifo_reset_cnt);
3296 printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3297 E1000_READ_REG(&adapter->hw, TDH),
3298 E1000_READ_REG(&adapter->hw, TDT));
3299 printf("em%d: Num Tx descriptors avail = %d\n", unit,
3300 adapter->num_tx_desc_avail);
3301 printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3302 adapter->no_tx_desc_avail1);
3303 printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3304 adapter->no_tx_desc_avail2);
3305 printf("em%d: Std mbuf failed = %ld\n", unit,
3306 adapter->mbuf_alloc_failed);
3307 printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3308 adapter->mbuf_cluster_failed);
3309 printf("em%d: Driver dropped packets = %ld\n", unit,
3310 adapter->dropped_pkts);
3311
3312 return;
3313 }
3314
3315 static void
3316 em_print_hw_stats(struct adapter *adapter)
3317 {
3318 int unit = adapter->unit;
3319
3320 printf("em%d: Excessive collisions = %lld\n", unit,
3321 (long long)adapter->stats.ecol);
3322 printf("em%d: Symbol errors = %lld\n", unit,
3323 (long long)adapter->stats.symerrs);
3324 printf("em%d: Sequence errors = %lld\n", unit,
3325 (long long)adapter->stats.sec);
3326 printf("em%d: Defer count = %lld\n", unit,
3327 (long long)adapter->stats.dc);
3328
3329 printf("em%d: Missed Packets = %lld\n", unit,
3330 (long long)adapter->stats.mpc);
3331 printf("em%d: Receive No Buffers = %lld\n", unit,
3332 (long long)adapter->stats.rnbc);
3333 printf("em%d: Receive length errors = %lld\n", unit,
3334 (long long)adapter->stats.rlec);
3335 printf("em%d: Receive errors = %lld\n", unit,
3336 (long long)adapter->stats.rxerrc);
3337 printf("em%d: Crc errors = %lld\n", unit,
3338 (long long)adapter->stats.crcerrs);
3339 printf("em%d: Alignment errors = %lld\n", unit,
3340 (long long)adapter->stats.algnerrc);
3341 printf("em%d: Carrier extension errors = %lld\n", unit,
3342 (long long)adapter->stats.cexterr);
3343
3344 printf("em%d: XON Rcvd = %lld\n", unit,
3345 (long long)adapter->stats.xonrxc);
3346 printf("em%d: XON Xmtd = %lld\n", unit,
3347 (long long)adapter->stats.xontxc);
3348 printf("em%d: XOFF Rcvd = %lld\n", unit,
3349 (long long)adapter->stats.xoffrxc);
3350 printf("em%d: XOFF Xmtd = %lld\n", unit,
3351 (long long)adapter->stats.xofftxc);
3352
3353 printf("em%d: Good Packets Rcvd = %lld\n", unit,
3354 (long long)adapter->stats.gprc);
3355 printf("em%d: Good Packets Xmtd = %lld\n", unit,
3356 (long long)adapter->stats.gptc);
3357
3358 return;
3359 }
3360
3361 static int
3362 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3363 {
3364 int error;
3365 int result;
3366 struct adapter *adapter;
3367
3368 result = -1;
3369 error = sysctl_handle_int(oidp, &result, 0, req);
3370
3371 if (error || !req->newptr)
3372 return (error);
3373
3374 if (result == 1) {
3375 adapter = (struct adapter *)arg1;
3376 em_print_debug_info(adapter);
3377 }
3378
3379 return error;
3380 }
3381
3382
3383 static int
3384 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3385 {
3386 int error;
3387 int result;
3388 struct adapter *adapter;
3389
3390 result = -1;
3391 error = sysctl_handle_int(oidp, &result, 0, req);
3392
3393 if (error || !req->newptr)
3394 return (error);
3395
3396 if (result == 1) {
3397 adapter = (struct adapter *)arg1;
3398 em_print_hw_stats(adapter);
3399 }
3400
3401 return error;
3402 }
3403
3404 static int
3405 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3406 {
3407 struct em_int_delay_info *info;
3408 struct adapter *adapter;
3409 u_int32_t regval;
3410 int error;
3411 int usecs;
3412 int ticks;
3413 int s;
3414
3415 info = (struct em_int_delay_info *)arg1;
3416 adapter = info->adapter;
3417 usecs = info->value;
3418 error = sysctl_handle_int(oidp, &usecs, 0, req);
3419 if (error != 0 || req->newptr == NULL)
3420 return error;
3421 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3422 return EINVAL;
3423 info->value = usecs;
3424 ticks = E1000_USECS_TO_TICKS(usecs);
3425
3426 s = splimp();
3427 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3428 regval = (regval & ~0xffff) | (ticks & 0xffff);
3429 /* Handle a few special cases. */
3430 switch (info->offset) {
3431 case E1000_RDTR:
3432 case E1000_82542_RDTR:
3433 regval |= E1000_RDT_FPDB;
3434 break;
3435 case E1000_TIDV:
3436 case E1000_82542_TIDV:
3437 if (ticks == 0) {
3438 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3439 /* Don't write 0 into the TIDV register. */
3440 regval++;
3441 } else
3442 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3443 break;
3444 }
3445 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3446 splx(s);
3447 return 0;
3448 }
3449
3450 static void
3451 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3452 const char *description, struct em_int_delay_info *info,
3453 int offset, int value)
3454 {
3455 info->adapter = adapter;
3456 info->offset = offset;
3457 info->value = value;
3458 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3459 SYSCTL_CHILDREN(adapter->sysctl_tree),
3460 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3461 info, 0, em_sysctl_int_delay, "I", description);
3462 }
Cache object: b1c4571c135117c891de8a808a0c1036
|