FreeBSD/Linux Kernel Cross Reference
sys/dev/em/if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2001-2006, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD$*/
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/module.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47
48 #include <machine/bus.h>
49 #include <sys/rman.h>
50 #include <machine/resource.h>
51
52 #include <net/bpf.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58
59 #include <net/if_types.h>
60 #include <net/if_vlan_var.h>
61
62 #include <netinet/in_systm.h>
63 #include <netinet/in.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <netinet/udp.h>
67
68 #include <machine/in_cksum.h>
69
70 #include <pci/pcivar.h>
71 #include <pci/pcireg.h>
72 #include <dev/em/if_em_hw.h>
73 #include <dev/em/if_em.h>
74
75 /*********************************************************************
76 * Set this to one to display debug statistics
77 *********************************************************************/
78 int em_display_debug_stats = 0;
79
80 /*********************************************************************
81 * Driver version
82 *********************************************************************/
83 char em_driver_version[] = "Version - 6.2.9";
84
85
86 /*********************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into em_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 *********************************************************************/
95
96 static em_vendor_info_t em_vendor_info_array[] =
97 {
98 /* Intel(R) PRO/1000 Network Connection */
99 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
100 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
101 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
102 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
103 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
104
105 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
108 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
112
113 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
114
115 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
117
118 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
122
123 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
128
129 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
138 PCI_ANY_ID, PCI_ANY_ID, 0},
139
140 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
143
144 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
148 PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE,
150 PCI_ANY_ID, PCI_ANY_ID, 0},
151
152 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
156
157 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
161 PCI_ANY_ID, PCI_ANY_ID, 0},
162 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
163 PCI_ANY_ID, PCI_ANY_ID, 0},
164 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
165 PCI_ANY_ID, PCI_ANY_ID, 0},
166 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
167 PCI_ANY_ID, PCI_ANY_ID, 0},
168 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
169 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
170 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
173 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
175
176 /* required last entry */
177 { 0, 0, 0, 0, 0}
178 };
179
180 /*********************************************************************
181 * Table of branding strings for all supported NICs.
182 *********************************************************************/
183
184 static char *em_strings[] = {
185 "Intel(R) PRO/1000 Network Connection"
186 };
187
188 /*********************************************************************
189 * Function prototypes
190 *********************************************************************/
191 static int em_probe(device_t);
192 static int em_attach(device_t);
193 static int em_detach(device_t);
194 static int em_shutdown(device_t);
195 static int em_suspend(device_t);
196 static int em_resume(device_t);
197 static void em_start(struct ifnet *);
198 static int em_ioctl(struct ifnet *, u_long, caddr_t);
199 static void em_watchdog(struct ifnet *);
200 static void em_init(void *);
201 static void em_stop(void *);
202 static void em_media_status(struct ifnet *, struct ifmediareq *);
203 static int em_media_change(struct ifnet *);
204 static void em_identify_hardware(struct adapter *);
205 static int em_allocate_pci_resources(struct adapter *);
206 static int em_allocate_intr(struct adapter *);
207 static void em_free_intr(struct adapter *);
208 static void em_free_pci_resources(struct adapter *);
209 static void em_local_timer(void *);
210 static int em_hardware_init(struct adapter *);
211 static void em_setup_interface(device_t, struct adapter *);
212 static void em_setup_transmit_structures(struct adapter *);
213 static void em_initialize_transmit_unit(struct adapter *);
214 static int em_setup_receive_structures(struct adapter *);
215 static void em_initialize_receive_unit(struct adapter *);
216 static void em_enable_intr(struct adapter *);
217 static void em_disable_intr(struct adapter *);
218 static void em_free_transmit_structures(struct adapter *);
219 static void em_free_receive_structures(struct adapter *);
220 static void em_update_stats_counters(struct adapter *);
221 static void em_txeof(struct adapter *);
222 static int em_allocate_receive_structures(struct adapter *);
223 static int em_allocate_transmit_structures(struct adapter *);
224 static void em_rxeof(struct adapter *, int);
225 static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
226 struct mbuf *);
227 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
228 uint32_t *, uint32_t *);
229 static void em_set_promisc(struct adapter *);
230 static void em_disable_promisc(struct adapter *);
231 static void em_set_multi(struct adapter *);
232 static void em_print_hw_stats(struct adapter *);
233 static void em_update_link_status(struct adapter *);
234 static int em_get_buf(int i, struct adapter *, struct mbuf *);
235 static void em_enable_vlans(struct adapter *);
236 static void em_disable_vlans(struct adapter *);
237 static int em_encap(struct adapter *, struct mbuf **);
238 static void em_smartspeed(struct adapter *);
239 static int em_82547_fifo_workaround(struct adapter *, int);
240 static void em_82547_update_fifo_head(struct adapter *, int);
241 static int em_82547_tx_fifo_reset(struct adapter *);
242 static void em_82547_move_tail(void *);
243 static int em_dma_malloc(struct adapter *, bus_size_t,
244 struct em_dma_alloc *, int);
245 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
246 static void em_print_debug_info(struct adapter *);
247 static int em_is_valid_ether_addr(uint8_t *);
248 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
249 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
250 static uint32_t em_fill_descriptors (bus_addr_t address, uint32_t length,
251 PDESC_ARRAY desc_array);
252 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
253 static void em_add_int_delay_sysctl(struct adapter *, const char *,
254 const char *, struct em_int_delay_info *, int, int);
255 static void em_add_rx_process_limit(struct adapter *, const char *,
256 const char *, int *, int);
257 static void em_intr(void *);
258 #ifdef DEVICE_POLLING
259 static poll_handler_t em_poll;
260 #endif /* DEVICE_POLLING */
261
262 /*********************************************************************
263 * FreeBSD Device Interface Entry Points
264 *********************************************************************/
265
266 static device_method_t em_methods[] = {
267 /* Device interface */
268 DEVMETHOD(device_probe, em_probe),
269 DEVMETHOD(device_attach, em_attach),
270 DEVMETHOD(device_detach, em_detach),
271 DEVMETHOD(device_shutdown, em_shutdown),
272 DEVMETHOD(device_suspend, em_suspend),
273 DEVMETHOD(device_resume, em_resume),
274 {0, 0}
275 };
276
277 static driver_t em_driver = {
278 "em", em_methods, sizeof(struct adapter),
279 };
280
281 static devclass_t em_devclass;
282 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
283
284 /*********************************************************************
285 * Tunable default values.
286 *********************************************************************/
287
288 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
289 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
290
291 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
292 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
293 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
294 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
295 static int em_rxd = EM_DEFAULT_RXD;
296 static int em_txd = EM_DEFAULT_TXD;
297 static int em_smart_pwr_down = FALSE;
298
299 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
300 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
301 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
302 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
303 TUNABLE_INT("hw.em.rxd", &em_rxd);
304 TUNABLE_INT("hw.em.txd", &em_txd);
305 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
306
307 /* How many packets rxeof tries to clean at a time */
308 static int em_rx_process_limit = 100;
309 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
310
311 /*********************************************************************
312 * Device identification routine
313 *
314 * em_probe determines if the driver should be loaded on
315 * adapter based on PCI vendor/device id of the adapter.
316 *
317 * return 0 on success, positive on failure
318 *********************************************************************/
319
320 static int
321 em_probe(device_t dev)
322 {
323 char adapter_name[60];
324 uint16_t pci_vendor_id = 0;
325 uint16_t pci_device_id = 0;
326 uint16_t pci_subvendor_id = 0;
327 uint16_t pci_subdevice_id = 0;
328 em_vendor_info_t *ent;
329
330 INIT_DEBUGOUT("em_probe: begin");
331
332 pci_vendor_id = pci_get_vendor(dev);
333 if (pci_vendor_id != EM_VENDOR_ID)
334 return (ENXIO);
335
336 pci_device_id = pci_get_device(dev);
337 pci_subvendor_id = pci_get_subvendor(dev);
338 pci_subdevice_id = pci_get_subdevice(dev);
339
340 ent = em_vendor_info_array;
341 while (ent->vendor_id != 0) {
342 if ((pci_vendor_id == ent->vendor_id) &&
343 (pci_device_id == ent->device_id) &&
344
345 ((pci_subvendor_id == ent->subvendor_id) ||
346 (ent->subvendor_id == PCI_ANY_ID)) &&
347
348 ((pci_subdevice_id == ent->subdevice_id) ||
349 (ent->subdevice_id == PCI_ANY_ID))) {
350 sprintf(adapter_name, "%s %s",
351 em_strings[ent->index],
352 em_driver_version);
353 device_set_desc_copy(dev, adapter_name);
354 return (0);
355 }
356 ent++;
357 }
358
359 return (ENXIO);
360 }
361
362 /*********************************************************************
363 * Device initialization routine
364 *
365 * The attach entry point is called when the driver is being loaded.
366 * This routine identifies the type of hardware, allocates all resources
367 * and initializes the hardware.
368 *
369 * return 0 on success, positive on failure
370 *********************************************************************/
371
372 static int
373 em_attach(device_t dev)
374 {
375 struct adapter *adapter;
376 int tsize, rsize;
377 int error = 0;
378 int s;
379
380 INIT_DEBUGOUT("em_attach: begin");
381 s = splimp();
382
383 adapter = device_get_softc(dev);
384 adapter->dev = adapter->osdep.dev = dev;
385
386 /* SYSCTL stuff */
387 sysctl_ctx_init(&adapter->sysctl_ctx);
388 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
389 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev),
390 CTLFLAG_RD, 0, "");
391 if (adapter->sysctl_tree == NULL) {
392 error = EIO;
393 goto err_sysctl;
394 }
395
396 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
397 SYSCTL_CHILDREN(adapter->sysctl_tree),
398 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
399 em_sysctl_debug_info, "I", "Debug Information");
400
401 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
402 SYSCTL_CHILDREN(adapter->sysctl_tree),
403 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
404 em_sysctl_stats, "I", "Statistics");
405
406 callout_init(&adapter->timer);
407 callout_init(&adapter->tx_fifo_timer);
408
409 /* Determine hardware revision */
410 em_identify_hardware(adapter);
411
412 /* Set up some sysctls for the tunable interrupt delays */
413 em_add_int_delay_sysctl(adapter, "rx_int_delay",
414 "receive interrupt delay in usecs", &adapter->rx_int_delay,
415 E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
416 em_add_int_delay_sysctl(adapter, "tx_int_delay",
417 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
418 E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
419 if (adapter->hw.mac_type >= em_82540) {
420 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
421 "receive interrupt delay limit in usecs",
422 &adapter->rx_abs_int_delay,
423 E1000_REG_OFFSET(&adapter->hw, RADV),
424 em_rx_abs_int_delay_dflt);
425 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
426 "transmit interrupt delay limit in usecs",
427 &adapter->tx_abs_int_delay,
428 E1000_REG_OFFSET(&adapter->hw, TADV),
429 em_tx_abs_int_delay_dflt);
430 }
431
432 /* Sysctls for limiting the amount of work done in the taskqueue */
433 em_add_rx_process_limit(adapter, "rx_processing_limit",
434 "max number of rx packets to process", &adapter->rx_process_limit,
435 em_rx_process_limit);
436
437 /*
438 * Validate number of transmit and receive descriptors. It
439 * must not exceed hardware maximum, and must be multiple
440 * of EM_DBA_ALIGN.
441 */
442 if (((em_txd * sizeof(struct em_tx_desc)) % EM_DBA_ALIGN) != 0 ||
443 (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
444 (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
445 (em_txd < EM_MIN_TXD)) {
446 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
447 EM_DEFAULT_TXD, em_txd);
448 adapter->num_tx_desc = EM_DEFAULT_TXD;
449 } else
450 adapter->num_tx_desc = em_txd;
451 if (((em_rxd * sizeof(struct em_rx_desc)) % EM_DBA_ALIGN) != 0 ||
452 (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
453 (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
454 (em_rxd < EM_MIN_RXD)) {
455 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
456 EM_DEFAULT_RXD, em_rxd);
457 adapter->num_rx_desc = EM_DEFAULT_RXD;
458 } else
459 adapter->num_rx_desc = em_rxd;
460
461 adapter->hw.autoneg = DO_AUTO_NEG;
462 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
463 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
464 adapter->hw.tbi_compatibility_en = TRUE;
465 adapter->rx_buffer_len = EM_RXBUFFER_2048;
466
467 adapter->hw.phy_init_script = 1;
468 adapter->hw.phy_reset_disable = FALSE;
469
470 #ifndef EM_MASTER_SLAVE
471 adapter->hw.master_slave = em_ms_hw_default;
472 #else
473 adapter->hw.master_slave = EM_MASTER_SLAVE;
474 #endif
475 /*
476 * Set the max frame size assuming standard ethernet
477 * sized frames.
478 */
479 adapter->hw.max_frame_size =
480 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
481
482 adapter->hw.min_frame_size =
483 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
484
485 /*
486 * This controls when hardware reports transmit completion
487 * status.
488 */
489 adapter->hw.report_tx_early = 1;
490 if (em_allocate_pci_resources(adapter)) {
491 device_printf(dev, "Allocation of PCI resources failed\n");
492 error = ENXIO;
493 goto err_pci;
494 }
495
496 /* Initialize eeprom parameters */
497 em_init_eeprom_params(&adapter->hw);
498
499 tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
500 EM_DBA_ALIGN);
501
502 /* Allocate Transmit Descriptor ring */
503 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
504 device_printf(dev, "Unable to allocate tx_desc memory\n");
505 error = ENOMEM;
506 goto err_tx_desc;
507 }
508 adapter->tx_desc_base = (struct em_tx_desc *)adapter->txdma.dma_vaddr;
509
510 rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
511 EM_DBA_ALIGN);
512
513 /* Allocate Receive Descriptor ring */
514 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
515 device_printf(dev, "Unable to allocate rx_desc memory\n");
516 error = ENOMEM;
517 goto err_rx_desc;
518 }
519 adapter->rx_desc_base = (struct em_rx_desc *)adapter->rxdma.dma_vaddr;
520
521 /* Initialize the hardware */
522 if (em_hardware_init(adapter)) {
523 device_printf(dev, "Unable to initialize the hardware\n");
524 error = EIO;
525 goto err_hw_init;
526 }
527
528 /* Copy the permanent MAC address out of the EEPROM */
529 if (em_read_mac_addr(&adapter->hw) < 0) {
530 device_printf(dev, "EEPROM read error while reading MAC"
531 " address\n");
532 error = EIO;
533 goto err_hw_init;
534 }
535
536 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
537 device_printf(dev, "Invalid MAC address\n");
538 error = EIO;
539 goto err_hw_init;
540 }
541 bcopy(adapter->hw.mac_addr, adapter->interface_data.ac_enaddr,
542 ETHER_ADDR_LEN);
543
544 /* Allocate transmit descriptors and buffers */
545 if (em_allocate_transmit_structures(adapter)) {
546 device_printf(dev, "Could not setup transmit structures\n");
547 error = ENOMEM;
548 goto err_tx_struct;
549 }
550
551 /* Allocate receive descriptors and buffers */
552 if (em_allocate_receive_structures(adapter)) {
553 device_printf(dev, "Could not setup receive structures\n");
554 error = ENOMEM;
555 goto err_rx_struct;
556 }
557
558 /* Setup OS specific network interface */
559 em_setup_interface(dev, adapter);
560
561 em_allocate_intr(adapter);
562
563 /* Initialize statistics */
564 em_clear_hw_cntrs(&adapter->hw);
565 em_update_stats_counters(adapter);
566 adapter->hw.get_link_status = 1;
567 em_update_link_status(adapter);
568
569 /* Indicate SOL/IDER usage */
570 if (em_check_phy_reset_block(&adapter->hw))
571 device_printf(dev,
572 "PHY reset is blocked due to SOL/IDER session.\n");
573
574 /* Identify 82544 on PCIX */
575 em_get_bus_info(&adapter->hw);
576 if (adapter->hw.bus_type == em_bus_type_pcix &&
577 adapter->hw.mac_type == em_82544)
578 adapter->pcix_82544 = TRUE;
579 else
580 adapter->pcix_82544 = FALSE;
581
582 splx(s);
583 INIT_DEBUGOUT("em_attach: end");
584
585 return (0);
586
587 err_rx_struct:
588 em_free_transmit_structures(adapter);
589 err_hw_init:
590 err_tx_struct:
591 em_dma_free(adapter, &adapter->rxdma);
592 err_rx_desc:
593 em_dma_free(adapter, &adapter->txdma);
594 err_tx_desc:
595 err_pci:
596 em_free_intr(adapter);
597 em_free_pci_resources(adapter);
598 sysctl_ctx_free(&adapter->sysctl_ctx);
599 err_sysctl:
600 splx(s);
601
602 return (error);
603 }
604
605 /*********************************************************************
606 * Device removal routine
607 *
608 * The detach entry point is called when the driver is being removed.
609 * This routine stops the adapter and deallocates all the resources
610 * that were allocated for driver operation.
611 *
612 * return 0 on success, positive on failure
613 *********************************************************************/
614
615 static int
616 em_detach(device_t dev)
617 {
618 struct adapter *adapter = device_get_softc(dev);
619 struct ifnet *ifp = &adapter->interface_data.ac_if;
620 int s;
621
622 INIT_DEBUGOUT("em_detach: begin");
623 s = splimp();
624
625 em_free_intr(adapter);
626 adapter->in_detach = 1;
627 em_stop(adapter);
628 em_phy_hw_reset(&adapter->hw);
629 ether_ifdetach(&adapter->interface_data.ac_if, ETHER_BPF_SUPPORTED);
630
631 em_free_pci_resources(adapter);
632 bus_generic_detach(dev);
633
634 em_free_transmit_structures(adapter);
635 em_free_receive_structures(adapter);
636
637 /* Free Transmit Descriptor ring */
638 if (adapter->tx_desc_base) {
639 em_dma_free(adapter, &adapter->txdma);
640 adapter->tx_desc_base = NULL;
641 }
642
643 /* Free Receive Descriptor ring */
644 if (adapter->rx_desc_base) {
645 em_dma_free(adapter, &adapter->rxdma);
646 adapter->rx_desc_base = NULL;
647 }
648
649 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
650 ifp->if_timer = 0;
651
652 adapter->sysctl_tree = NULL;
653 sysctl_ctx_free(&adapter->sysctl_ctx);
654
655 splx(s);
656 return (0);
657 }
658
659 /*********************************************************************
660 *
661 * Shutdown entry point
662 *
663 **********************************************************************/
664
665 static int
666 em_shutdown(device_t dev)
667 {
668 struct adapter *adapter = device_get_softc(dev);
669 em_stop(adapter);
670 return (0);
671 }
672
673 /*
674 * Suspend/resume device methods.
675 */
676 static int
677 em_suspend(device_t dev)
678 {
679 struct adapter *adapter = device_get_softc(dev);
680 int s;
681
682 s = splimp();
683 em_stop(adapter);
684 splx(s);
685
686 return bus_generic_suspend(dev);
687 }
688
689 static int
690 em_resume(device_t dev)
691 {
692 struct adapter *adapter = device_get_softc(dev);
693 struct ifnet *ifp = &adapter->interface_data.ac_if;
694 int s;
695
696 s = splimp();
697 em_init(adapter);
698 if ((ifp->if_flags & IFF_UP) &&
699 (ifp->if_flags & IFF_RUNNING))
700 em_start(ifp);
701 splx(s);
702
703 return bus_generic_resume(dev);
704 }
705
706
707 /*********************************************************************
708 * Transmit entry point
709 *
710 * em_start is called by the stack to initiate a transmit.
711 * The driver will remain in this routine as long as there are
712 * packets to transmit and transmit resources are available.
713 * In case resources are not available stack is notified and
714 * the packet is requeued.
715 **********************************************************************/
716
717 static void
718 em_start(struct ifnet *ifp)
719 {
720 struct adapter *adapter = ifp->if_softc;
721 struct mbuf *m_head;
722 int s;
723
724 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
725 IFF_RUNNING)
726 return;
727 if (!adapter->link_active)
728 return;
729
730 s = splimp();
731 while (ifp->if_snd.ifq_head != NULL) {
732
733 IF_DEQUEUE(&ifp->if_snd, m_head);
734 if (m_head == NULL)
735 break;
736 /*
737 * em_encap() can modify our pointer, and or make it NULL on
738 * failure. In that event, we can't requeue.
739 */
740 if (em_encap(adapter, &m_head)) {
741 if (m_head == NULL)
742 break;
743 ifp->if_flags |= IFF_OACTIVE;
744 IF_PREPEND(&ifp->if_snd, m_head);
745 break;
746 }
747
748 /* Send a copy of the frame to the BPF listener */
749 if (ifp->if_bpf)
750 bpf_mtap(ifp, m_head);
751
752 /* Set timeout in case hardware has problems transmitting. */
753 ifp->if_timer = EM_TX_TIMEOUT;
754 }
755 splx(s);
756 }
757
758 /*********************************************************************
759 * Ioctl entry point
760 *
761 * em_ioctl is called when the user wants to configure the
762 * interface.
763 *
764 * return 0 on success, positive on failure
765 **********************************************************************/
766
767 static int
768 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
769 {
770 struct adapter *adapter = ifp->if_softc;
771 struct ifreq *ifr = (struct ifreq *)data;
772 int error = 0;
773 int s;
774
775 s = splimp();
776
777 if (adapter->in_detach)
778 goto out;
779
780 switch (command) {
781 case SIOCSIFADDR:
782 case SIOCGIFADDR:
783 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
784 ether_ioctl(ifp, command, data);
785 break;
786 case SIOCSIFMTU:
787 {
788 int max_frame_size;
789 uint16_t eeprom_data = 0;
790
791 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
792
793 switch (adapter->hw.mac_type) {
794 case em_82573:
795 /*
796 * 82573 only supports jumbo frames
797 * if ASPM is disabled.
798 */
799 em_read_eeprom(&adapter->hw,
800 EEPROM_INIT_3GIO_3, 1, &eeprom_data);
801 if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
802 max_frame_size = ETHER_MAX_LEN;
803 break;
804 }
805 /* Allow Jumbo frames - fall thru */
806 case em_82571:
807 case em_82572:
808 case em_80003es2lan: /* Limit Jumbo Frame size */
809 max_frame_size = 9234;
810 break;
811 case em_ich8lan:
812 /* ICH8 does not support jumbo frames */
813 max_frame_size = ETHER_MAX_LEN;
814 break;
815 default:
816 max_frame_size = MAX_JUMBO_FRAME_SIZE;
817 }
818 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
819 ETHER_CRC_LEN) {
820 error = EINVAL;
821 break;
822 }
823
824 ifp->if_mtu = ifr->ifr_mtu;
825 adapter->hw.max_frame_size =
826 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
827 em_init(adapter);
828 break;
829 }
830 case SIOCSIFFLAGS:
831 IOCTL_DEBUGOUT("ioctl rcv'd:\
832 SIOCSIFFLAGS (Set Interface Flags)");
833 if (ifp->if_flags & IFF_UP) {
834 if (!(ifp->if_flags & IFF_RUNNING)) {
835 em_init(adapter);
836 }
837
838 em_disable_promisc(adapter);
839 em_set_promisc(adapter);
840 } else {
841 if (ifp->if_flags & IFF_RUNNING) {
842 em_stop(adapter);
843 }
844 }
845 break;
846 case SIOCADDMULTI:
847 case SIOCDELMULTI:
848 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
849 if (ifp->if_flags & IFF_RUNNING) {
850 em_disable_intr(adapter);
851 em_set_multi(adapter);
852 if (adapter->hw.mac_type == em_82542_rev2_0) {
853 em_initialize_receive_unit(adapter);
854 }
855 #ifdef DEVICE_POLLING
856 if (!(ifp->if_ipending & IFF_POLLING))
857 #endif
858 em_enable_intr(adapter);
859 }
860 break;
861 case SIOCSIFMEDIA:
862 /* Check SOL/IDER usage */
863 if (em_check_phy_reset_block(&adapter->hw)) {
864 device_printf(adapter->dev, "Media change is"
865 "blocked due to SOL/IDER session.\n");
866 break;
867 }
868 case SIOCGIFMEDIA:
869 IOCTL_DEBUGOUT("ioctl rcv'd: \
870 SIOCxIFMEDIA (Get/Set Interface Media)");
871 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
872 break;
873 case SIOCSIFCAP:
874 {
875 int mask, reinit;
876
877 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
878 reinit = 0;
879 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
880 if (mask & IFCAP_HWCSUM) {
881 ifp->if_capenable ^= IFCAP_HWCSUM;
882 reinit = 1;
883 }
884 if (reinit && (ifp->if_flags & IFF_RUNNING))
885 em_init(adapter);
886 break;
887 }
888 default:
889 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
890 error = EINVAL;
891 }
892
893 out:
894 splx(s);
895 return (error);
896 }
897
898 /*********************************************************************
899 * Watchdog entry point
900 *
901 * This routine is called whenever hardware quits transmitting.
902 *
903 **********************************************************************/
904
905 static void
906 em_watchdog(struct ifnet *ifp)
907 {
908 struct adapter *adapter = ifp->if_softc;
909
910 /* If we are in this routine because of pause frames, then
911 * don't reset the hardware.
912 */
913 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
914 ifp->if_timer = EM_TX_TIMEOUT;
915 return;
916 }
917
918 if (em_check_for_link(&adapter->hw) == 0)
919 device_printf(adapter->dev, "watchdog timeout -- resetting\n");
920
921 ifp->if_flags &= ~IFF_RUNNING;
922 adapter->watchdog_events++;
923
924 em_init(adapter);
925 }
926
927 /*********************************************************************
928 * Init entry point
929 *
930 * This routine is used in two ways. It is used by the stack as
931 * init entry point in network interface structure. It is also used
932 * by the driver as a hw/sw initialization routine to get to a
933 * consistent state.
934 *
935 * return 0 on success, positive on failure
936 **********************************************************************/
937
938 static void
939 em_init(void *arg)
940 {
941 struct adapter *adapter = arg;
942 struct ifnet *ifp = &adapter->interface_data.ac_if;
943 device_t dev = adapter->dev;
944 uint32_t pba;
945 int s;
946
947 INIT_DEBUGOUT("em_init: begin");
948
949 s = splimp();
950
951 em_stop(adapter);
952
953 /*
954 * Packet Buffer Allocation (PBA)
955 * Writing PBA sets the receive portion of the buffer
956 * the remainder is used for the transmit buffer.
957 *
958 * Devices before the 82547 had a Packet Buffer of 64K.
959 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
960 * After the 82547 the buffer was reduced to 40K.
961 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
962 * Note: default does not leave enough room for Jumbo Frame >10k.
963 */
964 switch (adapter->hw.mac_type) {
965 case em_82547:
966 case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
967 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
968 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
969 else
970 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
971 adapter->tx_fifo_head = 0;
972 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
973 adapter->tx_fifo_size =
974 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
975 break;
976 /* Total Packet Buffer on these is 48K */
977 case em_82571:
978 case em_82572:
979 case em_80003es2lan:
980 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
981 break;
982 case em_82573: /* 82573: Total Packet Buffer is 32K */
983 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
984 break;
985 case em_ich8lan:
986 pba = E1000_PBA_8K;
987 break;
988 default:
989 /* Devices before 82547 had a Packet Buffer of 64K. */
990 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
991 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
992 else
993 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
994 }
995
996 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
997 E1000_WRITE_REG(&adapter->hw, PBA, pba);
998
999 /* Get the latest mac address, User can use a LAA */
1000 bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
1001 ETHER_ADDR_LEN);
1002
1003 /* Initialize the hardware */
1004 if (em_hardware_init(adapter)) {
1005 device_printf(dev, "Unable to initialize the hardware\n");
1006 splx(s);
1007 return;
1008 }
1009 em_update_link_status(adapter);
1010
1011 em_enable_vlans(adapter);
1012
1013 /* Set hardware offload abilities */
1014 ifp->if_hwassist = 0;
1015 if (adapter->hw.mac_type >= em_82543) {
1016 if (ifp->if_capenable & IFCAP_TXCSUM)
1017 ifp->if_hwassist |= EM_CHECKSUM_FEATURES;
1018 }
1019
1020 /* Prepare transmit descriptors and buffers */
1021 em_setup_transmit_structures(adapter);
1022 em_initialize_transmit_unit(adapter);
1023
1024 /* Setup Multicast table */
1025 em_set_multi(adapter);
1026
1027 /* Prepare receive descriptors and buffers */
1028 if (em_setup_receive_structures(adapter)) {
1029 device_printf(dev, "Could not setup receive structures\n");
1030 em_stop(adapter);
1031 splx(s);
1032 return;
1033 }
1034 em_initialize_receive_unit(adapter);
1035
1036 /* Don't lose promiscuous settings */
1037 em_set_promisc(adapter);
1038
1039 ifp->if_flags |= IFF_RUNNING;
1040 ifp->if_flags &= ~IFF_OACTIVE;
1041
1042 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1043 em_clear_hw_cntrs(&adapter->hw);
1044
1045 #ifdef DEVICE_POLLING
1046 /*
1047 * Only enable interrupts if we are not polling, make sure
1048 * they are off otherwise.
1049 */
1050 if (ifp->if_ipending & IFF_POLLING)
1051 em_disable_intr(adapter);
1052 else
1053 #endif /* DEVICE_POLLING */
1054 em_enable_intr(adapter);
1055
1056 /* Don't reset the phy next time init gets called */
1057 adapter->hw.phy_reset_disable = TRUE;
1058
1059 splx(s);
1060 }
1061
1062
1063 #ifdef DEVICE_POLLING
1064 /*********************************************************************
1065 *
1066 * Polling routine
1067 *
1068 *********************************************************************/
1069 static void
1070 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1071 {
1072 struct adapter *adapter = ifp->if_softc;
1073 uint32_t reg_icr;
1074
1075 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1076 em_enable_intr(adapter);
1077 return;
1078 }
1079
1080 if (cmd == POLL_AND_CHECK_STATUS) {
1081 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1082 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1083 callout_stop(&adapter->timer);
1084 adapter->hw.get_link_status = 1;
1085 em_check_for_link(&adapter->hw);
1086 em_update_link_status(adapter);
1087 callout_reset(&adapter->timer, hz,
1088 em_local_timer, adapter);
1089 }
1090 }
1091 if (!(ifp->if_flags & IFF_RUNNING))
1092 return;
1093
1094 em_rxeof(adapter, count);
1095 em_txeof(adapter);
1096
1097 if (ifp->if_snd.ifq_head != NULL)
1098 em_start(ifp);
1099 }
1100 #endif /* DEVICE_POLLING */
1101
1102 /*********************************************************************
1103 *
1104 * Interrupt Service routine
1105 *
1106 *********************************************************************/
1107 #define EM_MAX_INTR 10
1108
1109 static void
1110 em_intr(void *arg)
1111 {
1112 struct adapter *adapter = arg;
1113 struct ifnet *ifp;
1114 uint32_t reg_icr;
1115 int i;
1116
1117 ifp = &adapter->interface_data.ac_if;
1118
1119 #ifdef DEVICE_POLLING
1120 if (ifp->if_ipending & IFF_POLLING)
1121 return;
1122
1123 if (ether_poll_register(em_poll, ifp)) {
1124 em_disable_intr(adapter);
1125 em_poll(ifp, 0, 1);
1126 return;
1127 }
1128 #endif /* DEVICE_POLLING */
1129
1130 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1131
1132 if ((reg_icr == 0) || (adapter->hw.mac_type >= em_82571 &&
1133 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1134 /*
1135 * XXX: some laptops trigger several spurious interrupts
1136 * on em(4) when in the resume cycle. The ICR register
1137 * reports all-ones value in this case. Processing such
1138 * interrupts would lead to a freeze. I don't know why.
1139 */
1140 (reg_icr == 0xffffffff))
1141 goto leaving;
1142
1143 for (i = 0;i < EM_MAX_INTR; ++i) {
1144 if (ifp->if_flags & IFF_RUNNING) {
1145 em_rxeof(adapter, adapter->rx_process_limit);
1146 em_txeof(adapter);
1147 }
1148 /* Link status change */
1149 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1150 callout_stop(&adapter->timer);
1151 adapter->hw.get_link_status = 1;
1152 em_check_for_link(&adapter->hw);
1153 em_update_link_status(adapter);
1154 callout_reset(&adapter->timer, hz,
1155 em_local_timer, adapter);
1156 }
1157
1158 if (reg_icr & E1000_ICR_RXO)
1159 adapter->rx_overruns++;
1160 }
1161
1162 leaving:
1163 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
1164 em_start(ifp);
1165 }
1166
1167 /*********************************************************************
1168 *
1169 * Media Ioctl callback
1170 *
1171 * This routine is called whenever the user queries the status of
1172 * the interface using ifconfig.
1173 *
1174 **********************************************************************/
1175 static void
1176 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1177 {
1178 struct adapter *adapter = ifp->if_softc;
1179 u_char fiber_type = IFM_1000_SX;
1180
1181 INIT_DEBUGOUT("em_media_status: begin");
1182
1183 em_check_for_link(&adapter->hw);
1184 em_update_link_status(adapter);
1185
1186 ifmr->ifm_status = IFM_AVALID;
1187 ifmr->ifm_active = IFM_ETHER;
1188
1189 if (!adapter->link_active)
1190 return;
1191
1192 ifmr->ifm_status |= IFM_ACTIVE;
1193
1194 if ((adapter->hw.media_type == em_media_type_fiber) ||
1195 (adapter->hw.media_type == em_media_type_internal_serdes)) {
1196 if (adapter->hw.mac_type == em_82545)
1197 fiber_type = IFM_1000_LX;
1198 ifmr->ifm_active |= fiber_type | IFM_FDX;
1199 } else {
1200 switch (adapter->link_speed) {
1201 case 10:
1202 ifmr->ifm_active |= IFM_10_T;
1203 break;
1204 case 100:
1205 ifmr->ifm_active |= IFM_100_TX;
1206 break;
1207 case 1000:
1208 ifmr->ifm_active |= IFM_1000_TX;
1209 break;
1210 }
1211 if (adapter->link_duplex == FULL_DUPLEX)
1212 ifmr->ifm_active |= IFM_FDX;
1213 else
1214 ifmr->ifm_active |= IFM_HDX;
1215 }
1216 }
1217
1218 /*********************************************************************
1219 *
1220 * Media Ioctl callback
1221 *
1222 * This routine is called when the user changes speed/duplex using
1223 * media/mediopt option with ifconfig.
1224 *
1225 **********************************************************************/
1226 static int
1227 em_media_change(struct ifnet *ifp)
1228 {
1229 struct adapter *adapter = ifp->if_softc;
1230 struct ifmedia *ifm = &adapter->media;
1231
1232 INIT_DEBUGOUT("em_media_change: begin");
1233
1234 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1235 return (EINVAL);
1236
1237 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1238 case IFM_AUTO:
1239 adapter->hw.autoneg = DO_AUTO_NEG;
1240 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1241 break;
1242 case IFM_1000_LX:
1243 case IFM_1000_SX:
1244 case IFM_1000_TX:
1245 adapter->hw.autoneg = DO_AUTO_NEG;
1246 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1247 break;
1248 case IFM_100_TX:
1249 adapter->hw.autoneg = FALSE;
1250 adapter->hw.autoneg_advertised = 0;
1251 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1252 adapter->hw.forced_speed_duplex = em_100_full;
1253 else
1254 adapter->hw.forced_speed_duplex = em_100_half;
1255 break;
1256 case IFM_10_T:
1257 adapter->hw.autoneg = FALSE;
1258 adapter->hw.autoneg_advertised = 0;
1259 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1260 adapter->hw.forced_speed_duplex = em_10_full;
1261 else
1262 adapter->hw.forced_speed_duplex = em_10_half;
1263 break;
1264 default:
1265 device_printf(adapter->dev, "Unsupported media type\n");
1266 }
1267
1268 /* As the speed/duplex settings my have changed we need to
1269 * reset the PHY.
1270 */
1271 adapter->hw.phy_reset_disable = FALSE;
1272
1273 em_init(adapter);
1274
1275 return (0);
1276 }
1277
1278 /*********************************************************************
1279 *
1280 * This routine maps the mbufs to tx descriptors.
1281 *
1282 * return 0 on success, positive on failure
1283 **********************************************************************/
1284 static int
1285 em_encap(struct adapter *adapter, struct mbuf **m_headp)
1286 {
1287 struct ifnet *ifp = &adapter->interface_data.ac_if;
1288 bus_dma_segment_t segs[EM_MAX_SCATTER];
1289 bus_dmamap_t map;
1290 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1291 struct em_tx_desc *current_tx_desc;
1292 struct mbuf *m_head;
1293 struct ifvlan *ifv = NULL;
1294 uint32_t txd_upper, txd_lower, txd_used, txd_saved;
1295 int nsegs, i, j, first, last = 0;
1296 int error;
1297
1298 m_head = *m_headp;
1299 current_tx_desc = NULL;
1300 txd_upper = txd_lower = txd_used = txd_saved = 0;
1301
1302 /*
1303 * Force a cleanup if number of TX descriptors
1304 * available hits the threshold
1305 */
1306 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1307 em_txeof(adapter);
1308 /* Now do we at least have a minimal? */
1309 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1310 adapter->no_tx_desc_avail1++;
1311 return (ENOBUFS);
1312 }
1313 }
1314
1315 /* Find out if we are in vlan mode. */
1316 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1317 m_head->m_pkthdr.rcvif != NULL &&
1318 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1319 ifv = m_head->m_pkthdr.rcvif->if_softc;
1320
1321 /*
1322 * When operating in promiscuous mode, hardware encapsulation for
1323 * packets is disabled. This means we have to add the vlan
1324 * encapsulation in the driver, since it will have come down from the
1325 * VLAN layer with a tag instead of a VLAN header.
1326 */
1327 if (ifv != NULL && adapter->em_insert_vlan_header) {
1328 struct ether_vlan_header *evl;
1329 struct ether_header eh;
1330
1331 m_head = m_pullup(m_head, sizeof(eh));
1332 if (m_head == NULL) {
1333 *m_headp = NULL;
1334 return (ENOBUFS);
1335 }
1336 eh = *mtod(m_head, struct ether_header *);
1337
1338 /*
1339 * If it's already tagged don't add a new one. In 6.x and
1340 * later this case is handled by removing the VLAN mtag
1341 * once the vlan header is prepended.
1342 */
1343 if (eh.ether_type == htons(ETHERTYPE_VLAN))
1344 goto tagged;
1345 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1346 if (m_head == NULL) {
1347 *m_headp = NULL;
1348 return (ENOBUFS);
1349 }
1350 m_head = m_pullup(m_head, sizeof(*evl));
1351 if (m_head == NULL) {
1352 *m_headp = NULL;
1353 return (ENOBUFS);
1354 }
1355 evl = mtod(m_head, struct ether_vlan_header *);
1356 bcopy(&eh, evl, sizeof(*evl));
1357 evl->evl_proto = evl->evl_encap_proto;
1358 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1359 evl->evl_tag = htons(ifv->ifv_tag);
1360 tagged:
1361 ifv = NULL;
1362 *m_headp = m_head;
1363 }
1364
1365
1366 /*
1367 * Capture the first descriptor index,
1368 * this descriptor will have the index
1369 * of the EOP which is the only one that
1370 * now gets a DONE bit writeback.
1371 */
1372 first = adapter->next_avail_tx_desc;
1373
1374 /*
1375 * Map the packet for DMA.
1376 */
1377 tx_buffer = &adapter->tx_buffer_area[first];
1378 tx_buffer_mapped = tx_buffer;
1379 map = tx_buffer->map;
1380 error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map,
1381 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1382 if (error == EFBIG) {
1383 struct mbuf *m;
1384
1385 m = m_defrag(*m_headp, M_DONTWAIT);
1386 if (m == NULL) {
1387 adapter->mbuf_alloc_failed++;
1388 m_freem(*m_headp);
1389 *m_headp = NULL;
1390 return (ENOBUFS);
1391 }
1392 *m_headp = m;
1393 m_head = *m_headp;
1394
1395 /* Try it again */
1396 error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map,
1397 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1398
1399 if (error == ENOMEM) {
1400 adapter->no_tx_dma_setup++;
1401 return (error);
1402 } else if (error != 0) {
1403 adapter->no_tx_dma_setup++;
1404 m_freem(*m_headp);
1405 *m_headp = NULL;
1406 return (error);
1407 }
1408 } else if (error == ENOMEM) {
1409 adapter->no_tx_dma_setup++;
1410 return (error);
1411 } else if (error != 0) {
1412 adapter->no_tx_dma_setup++;
1413 m_freem(*m_headp);
1414 *m_headp = NULL;
1415 return (error);
1416 }
1417
1418 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1419 adapter->no_tx_desc_avail2++;
1420 error = ENOBUFS;
1421 goto encap_fail;
1422 }
1423
1424 /* Do hardware assists */
1425 if (ifp->if_hwassist > 0) {
1426 em_transmit_checksum_setup(adapter, m_head,
1427 &txd_upper, &txd_lower);
1428 }
1429
1430 i = adapter->next_avail_tx_desc;
1431 if (adapter->pcix_82544)
1432 txd_saved = i;
1433
1434 /* Set up our transmit descriptors */
1435 for (j = 0; j < nsegs; j++) {
1436 bus_size_t seg_len;
1437 bus_addr_t seg_addr;
1438 /* If adapter is 82544 and on PCIX bus */
1439 if(adapter->pcix_82544) {
1440 DESC_ARRAY desc_array;
1441 uint32_t array_elements, counter;
1442 /*
1443 * Check the Address and Length combination and
1444 * split the data accordingly
1445 */
1446 array_elements = em_fill_descriptors(segs[j].ds_addr,
1447 segs[j].ds_len, &desc_array);
1448 for (counter = 0; counter < array_elements; counter++) {
1449 if (txd_used == adapter->num_tx_desc_avail) {
1450 adapter->next_avail_tx_desc = txd_saved;
1451 adapter->no_tx_desc_avail2++;
1452 error = ENOBUFS;
1453 goto encap_fail;
1454 }
1455 tx_buffer = &adapter->tx_buffer_area[i];
1456 current_tx_desc = &adapter->tx_desc_base[i];
1457 current_tx_desc->buffer_addr = htole64(
1458 desc_array.descriptor[counter].address);
1459 current_tx_desc->lower.data = htole32(
1460 (adapter->txd_cmd | txd_lower | (uint16_t)
1461 desc_array.descriptor[counter].length));
1462 current_tx_desc->upper.data =
1463 htole32((txd_upper));
1464 last = i;
1465 if (++i == adapter->num_tx_desc)
1466 i = 0;
1467 tx_buffer->m_head = NULL;
1468 tx_buffer->next_eop = -1;
1469 txd_used++;
1470 }
1471 } else {
1472 tx_buffer = &adapter->tx_buffer_area[i];
1473 current_tx_desc = &adapter->tx_desc_base[i];
1474 seg_addr = htole64(segs[j].ds_addr);
1475 seg_len = segs[j].ds_len;
1476 current_tx_desc->buffer_addr = seg_addr;
1477 current_tx_desc->lower.data = htole32(
1478 adapter->txd_cmd | txd_lower | seg_len);
1479 current_tx_desc->upper.data =
1480 htole32(txd_upper);
1481 last = i;
1482 if (++i == adapter->num_tx_desc)
1483 i = 0;
1484 tx_buffer->m_head = NULL;
1485 tx_buffer->next_eop = -1;
1486 }
1487 }
1488
1489 adapter->next_avail_tx_desc = i;
1490 if (adapter->pcix_82544)
1491 adapter->num_tx_desc_avail -= txd_used;
1492 else
1493 adapter->num_tx_desc_avail -= nsegs;
1494
1495 if (ifv != NULL) {
1496 /* Set the vlan id. */
1497 current_tx_desc->upper.fields.special =
1498 htole16(ifv->ifv_tag);
1499 /* Tell hardware to add tag */
1500 current_tx_desc->lower.data |=
1501 htole32(E1000_TXD_CMD_VLE);
1502 }
1503
1504 tx_buffer->m_head = m_head;
1505 tx_buffer_mapped->map = tx_buffer->map;
1506 tx_buffer->map = map;
1507 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1508
1509 /*
1510 * Last Descriptor of Packet
1511 * needs End Of Packet (EOP)
1512 * and Report Status (RS)
1513 */
1514 current_tx_desc->lower.data |=
1515 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1516 /*
1517 * Keep track in the first buffer which
1518 * descriptor will be written back
1519 */
1520 tx_buffer = &adapter->tx_buffer_area[first];
1521 tx_buffer->next_eop = last;
1522
1523 /*
1524 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1525 * that this frame is available to transmit.
1526 */
1527 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1528 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1529 if (adapter->hw.mac_type == em_82547 &&
1530 adapter->link_duplex == HALF_DUPLEX)
1531 em_82547_move_tail(adapter);
1532 else {
1533 E1000_WRITE_REG(&adapter->hw, TDT, i);
1534 if (adapter->hw.mac_type == em_82547)
1535 em_82547_update_fifo_head(adapter,
1536 m_head->m_pkthdr.len);
1537 }
1538
1539 return (0);
1540
1541 encap_fail:
1542 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1543 return (error);
1544 }
1545
1546 /*********************************************************************
1547 *
1548 * 82547 workaround to avoid controller hang in half-duplex environment.
1549 * The workaround is to avoid queuing a large packet that would span
1550 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1551 * in this case. We do that only when FIFO is quiescent.
1552 *
1553 **********************************************************************/
1554 static void
1555 em_82547_move_tail(void *arg)
1556 {
1557 struct adapter *adapter = arg;
1558 uint16_t hw_tdt;
1559 uint16_t sw_tdt;
1560 struct em_tx_desc *tx_desc;
1561 uint16_t length = 0;
1562 boolean_t eop = 0;
1563 int s;
1564
1565 s = splimp();
1566
1567 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1568 sw_tdt = adapter->next_avail_tx_desc;
1569
1570 while (hw_tdt != sw_tdt) {
1571 tx_desc = &adapter->tx_desc_base[hw_tdt];
1572 length += tx_desc->lower.flags.length;
1573 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1574 if (++hw_tdt == adapter->num_tx_desc)
1575 hw_tdt = 0;
1576
1577 if (eop) {
1578 if (em_82547_fifo_workaround(adapter, length)) {
1579 adapter->tx_fifo_wrk_cnt++;
1580 callout_reset(&adapter->tx_fifo_timer, 1,
1581 em_82547_move_tail, adapter);
1582 break;
1583 }
1584 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1585 em_82547_update_fifo_head(adapter, length);
1586 length = 0;
1587 }
1588 }
1589
1590 splx(s);
1591 }
1592
1593 static int
1594 em_82547_fifo_workaround(struct adapter *adapter, int len)
1595 {
1596 int fifo_space, fifo_pkt_len;
1597
1598 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1599
1600 if (adapter->link_duplex == HALF_DUPLEX) {
1601 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1602
1603 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1604 if (em_82547_tx_fifo_reset(adapter))
1605 return (0);
1606 else
1607 return (1);
1608 }
1609 }
1610
1611 return (0);
1612 }
1613
1614 static void
1615 em_82547_update_fifo_head(struct adapter *adapter, int len)
1616 {
1617 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1618
1619 /* tx_fifo_head is always 16 byte aligned */
1620 adapter->tx_fifo_head += fifo_pkt_len;
1621 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1622 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1623 }
1624 }
1625
1626
1627 static int
1628 em_82547_tx_fifo_reset(struct adapter *adapter)
1629 {
1630 uint32_t tctl;
1631
1632 if ((E1000_READ_REG(&adapter->hw, TDT) ==
1633 E1000_READ_REG(&adapter->hw, TDH)) &&
1634 (E1000_READ_REG(&adapter->hw, TDFT) ==
1635 E1000_READ_REG(&adapter->hw, TDFH)) &&
1636 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1637 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1638 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1639 /* Disable TX unit */
1640 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1641 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1642
1643 /* Reset FIFO pointers */
1644 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr);
1645 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr);
1646 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1647 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1648
1649 /* Re-enable TX unit */
1650 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1651 E1000_WRITE_FLUSH(&adapter->hw);
1652
1653 adapter->tx_fifo_head = 0;
1654 adapter->tx_fifo_reset_cnt++;
1655
1656 return (TRUE);
1657 }
1658 else {
1659 return (FALSE);
1660 }
1661 }
1662
1663 static void
1664 em_set_promisc(struct adapter *adapter)
1665 {
1666 struct ifnet *ifp = &adapter->interface_data.ac_if;
1667 uint32_t reg_rctl;
1668
1669 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1670
1671 if (ifp->if_flags & IFF_PROMISC) {
1672 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1673 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1674 /* Disable VLAN stripping in promiscous mode
1675 * This enables bridging of vlan tagged frames to occur
1676 * and also allows vlan tags to be seen in tcpdump
1677 */
1678 em_disable_vlans(adapter);
1679 adapter->em_insert_vlan_header = 1;
1680 } else if (ifp->if_flags & IFF_ALLMULTI) {
1681 reg_rctl |= E1000_RCTL_MPE;
1682 reg_rctl &= ~E1000_RCTL_UPE;
1683 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1684 adapter->em_insert_vlan_header = 0;
1685 } else
1686 adapter->em_insert_vlan_header = 0;
1687 }
1688
1689 static void
1690 em_disable_promisc(struct adapter *adapter)
1691 {
1692 uint32_t reg_rctl;
1693
1694 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1695
1696 reg_rctl &= (~E1000_RCTL_UPE);
1697 reg_rctl &= (~E1000_RCTL_MPE);
1698 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1699
1700 em_enable_vlans(adapter);
1701 adapter->em_insert_vlan_header = 0;
1702 }
1703
1704
1705 /*********************************************************************
1706 * Multicast Update
1707 *
1708 * This routine is called whenever multicast address list is updated.
1709 *
1710 **********************************************************************/
1711
1712 static void
1713 em_set_multi(struct adapter *adapter)
1714 {
1715 struct ifnet *ifp = &adapter->interface_data.ac_if;
1716 struct ifmultiaddr *ifma;
1717 uint32_t reg_rctl = 0;
1718 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1719 int mcnt = 0;
1720
1721 IOCTL_DEBUGOUT("em_set_multi: begin");
1722
1723 if (adapter->hw.mac_type == em_82542_rev2_0) {
1724 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1725 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1726 em_pci_clear_mwi(&adapter->hw);
1727 reg_rctl |= E1000_RCTL_RST;
1728 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1729 msec_delay(5);
1730 }
1731
1732 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1733 if (ifma->ifma_addr->sa_family != AF_LINK)
1734 continue;
1735
1736 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1737 break;
1738
1739 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1740 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1741 mcnt++;
1742 }
1743
1744 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1745 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1746 reg_rctl |= E1000_RCTL_MPE;
1747 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1748 } else
1749 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1750
1751 if (adapter->hw.mac_type == em_82542_rev2_0) {
1752 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1753 reg_rctl &= ~E1000_RCTL_RST;
1754 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1755 msec_delay(5);
1756 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1757 em_pci_set_mwi(&adapter->hw);
1758 }
1759 }
1760
1761
1762 /*********************************************************************
1763 * Timer routine
1764 *
1765 * This routine checks for link status and updates statistics.
1766 *
1767 **********************************************************************/
1768
1769 static void
1770 em_local_timer(void *arg)
1771 {
1772 struct adapter *adapter = arg;
1773 struct ifnet *ifp = &adapter->interface_data.ac_if;
1774 int s;
1775
1776 s = splimp();
1777
1778 em_check_for_link(&adapter->hw);
1779 em_update_link_status(adapter);
1780 em_update_stats_counters(adapter);
1781 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1782 em_print_hw_stats(adapter);
1783 em_smartspeed(adapter);
1784
1785 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1786
1787 splx(s);
1788 }
1789
1790 static void
1791 em_update_link_status(struct adapter *adapter)
1792 {
1793 struct ifnet *ifp = &adapter->interface_data.ac_if;
1794 device_t dev = adapter->dev;
1795
1796 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1797 if (adapter->link_active == 0) {
1798 em_get_speed_and_duplex(&adapter->hw,
1799 &adapter->link_speed, &adapter->link_duplex);
1800 /* Check if we may set SPEED_MODE bit on PCI-E */
1801 if ((adapter->link_speed == SPEED_1000) &&
1802 ((adapter->hw.mac_type == em_82571) ||
1803 (adapter->hw.mac_type == em_82572))) {
1804 int tarc0;
1805
1806 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
1807 tarc0 |= SPEED_MODE_BIT;
1808 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
1809 }
1810 if (bootverbose)
1811 device_printf(dev, "Link is up %d Mbps %s\n",
1812 adapter->link_speed,
1813 ((adapter->link_duplex == FULL_DUPLEX) ?
1814 "Full Duplex" : "Half Duplex"));
1815 adapter->link_active = 1;
1816 adapter->smartspeed = 0;
1817 ifp->if_baudrate = adapter->link_speed * 1000000;
1818 }
1819 } else {
1820 if (adapter->link_active == 1) {
1821 ifp->if_baudrate = adapter->link_speed = 0;
1822 adapter->link_duplex = 0;
1823 if (bootverbose)
1824 device_printf(dev, "Link is Down\n");
1825 adapter->link_active = 0;
1826 }
1827 }
1828 }
1829
1830 /*********************************************************************
1831 *
1832 * This routine disables all traffic on the adapter by issuing a
1833 * global reset on the MAC and deallocates TX/RX buffers.
1834 *
1835 **********************************************************************/
1836
1837 static void
1838 em_stop(void *arg)
1839 {
1840 struct adapter *adapter = arg;
1841 struct ifnet *ifp = &adapter->interface_data.ac_if;
1842
1843 INIT_DEBUGOUT("em_stop: begin");
1844
1845 em_disable_intr(adapter);
1846 callout_stop(&adapter->timer);
1847 callout_stop(&adapter->tx_fifo_timer);
1848
1849 /* Tell the stack that the interface is no longer active */
1850 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1851 em_reset_hw(&adapter->hw);
1852 }
1853
1854
1855 /*********************************************************************
1856 *
1857 * Determine hardware revision.
1858 *
1859 **********************************************************************/
1860 static void
1861 em_identify_hardware(struct adapter *adapter)
1862 {
1863 device_t dev = adapter->dev;
1864
1865 /* Make sure our PCI config space has the necessary stuff set */
1866 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1867 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1868 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1869 device_printf(dev, "Memory Access and/or Bus Master bits "
1870 "were not set!\n");
1871 adapter->hw.pci_cmd_word |=
1872 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1873 pci_write_config(dev, PCIR_COMMAND,
1874 adapter->hw.pci_cmd_word, 2);
1875 }
1876
1877 /* Save off the information about this board */
1878 adapter->hw.vendor_id = pci_get_vendor(dev);
1879 adapter->hw.device_id = pci_get_device(dev);
1880 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1881 adapter->hw.subsystem_vendor_id =
1882 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1883 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1884
1885 /* Identify the MAC */
1886 if (em_set_mac_type(&adapter->hw))
1887 device_printf(dev, "Unknown MAC Type\n");
1888
1889 if (adapter->hw.mac_type == em_82541 ||
1890 adapter->hw.mac_type == em_82541_rev_2 ||
1891 adapter->hw.mac_type == em_82547 ||
1892 adapter->hw.mac_type == em_82547_rev_2)
1893 adapter->hw.phy_init_script = TRUE;
1894 }
1895
1896 static int
1897 em_allocate_pci_resources(struct adapter *adapter)
1898 {
1899 device_t dev = adapter->dev;
1900 int val, rid;
1901
1902 rid = PCIR_BAR(0);
1903 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1904 &rid, RF_ACTIVE);
1905 if (adapter->res_memory == NULL) {
1906 device_printf(dev, "Unable to allocate bus resource: memory\n");
1907 return (ENXIO);
1908 }
1909 adapter->osdep.mem_bus_space_tag =
1910 rman_get_bustag(adapter->res_memory);
1911 adapter->osdep.mem_bus_space_handle =
1912 rman_get_bushandle(adapter->res_memory);
1913 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1914
1915 if (adapter->hw.mac_type > em_82543) {
1916 /* Figure our where our IO BAR is ? */
1917 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1918 val = pci_read_config(dev, rid, 4);
1919 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1920 adapter->io_rid = rid;
1921 break;
1922 }
1923 rid += 4;
1924 /* check for 64bit BAR */
1925 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
1926 rid += 4;
1927 }
1928 if (rid >= PCIR_CIS) {
1929 device_printf(dev, "Unable to locate IO BAR\n");
1930 return (ENXIO);
1931 }
1932 adapter->res_ioport = bus_alloc_resource_any(dev,
1933 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
1934 if (adapter->res_ioport == NULL) {
1935 device_printf(dev, "Unable to allocate bus resource: "
1936 "ioport\n");
1937 return (ENXIO);
1938 }
1939 adapter->hw.io_base = 0;
1940 adapter->osdep.io_bus_space_tag =
1941 rman_get_bustag(adapter->res_ioport);
1942 adapter->osdep.io_bus_space_handle =
1943 rman_get_bushandle(adapter->res_ioport);
1944 }
1945
1946 /* For ICH8 we need to find the flash memory. */
1947 if (adapter->hw.mac_type == em_ich8lan) {
1948 rid = EM_FLASH;
1949 adapter->flash_mem = bus_alloc_resource_any(dev,
1950 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1951 adapter->osdep.flash_bus_space_tag =
1952 rman_get_bustag(adapter->flash_mem);
1953 adapter->osdep.flash_bus_space_handle =
1954 rman_get_bushandle(adapter->flash_mem);
1955 }
1956
1957 rid = 0x0;
1958 adapter->res_interrupt = bus_alloc_resource_any(dev,
1959 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1960 if (adapter->res_interrupt == NULL) {
1961 device_printf(dev, "Unable to allocate bus resource: "
1962 "interrupt\n");
1963 return (ENXIO);
1964 }
1965
1966 adapter->hw.back = &adapter->osdep;
1967
1968 return (0);
1969 }
1970
1971 /*********************************************************************
1972 *
1973 * Setup the appropriate Interrupt handlers.
1974 *
1975 **********************************************************************/
1976 int
1977 em_allocate_intr(struct adapter *adapter)
1978 {
1979 device_t dev = adapter->dev;
1980 int error;
1981
1982 /* Manually turn off all interrupts */
1983 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
1984
1985 if (adapter->int_handler_tag == NULL &&
1986 (error = bus_setup_intr(dev, adapter->res_interrupt,
1987 INTR_TYPE_NET, em_intr, adapter,
1988 &adapter->int_handler_tag)) != 0) {
1989 device_printf(dev, "Failed to register interrupt handler");
1990 return (error);
1991 }
1992
1993 em_enable_intr(adapter);
1994 return (0);
1995 }
1996
1997 static void
1998 em_free_intr(struct adapter *adapter)
1999 {
2000 device_t dev = adapter->dev;
2001
2002 if (adapter->int_handler_tag != NULL) {
2003 bus_teardown_intr(dev, adapter->res_interrupt,
2004 adapter->int_handler_tag);
2005 adapter->int_handler_tag = NULL;
2006 }
2007 }
2008
2009 static void
2010 em_free_pci_resources(struct adapter *adapter)
2011 {
2012 device_t dev = adapter->dev;
2013
2014 if (adapter->res_interrupt != NULL)
2015 bus_release_resource(dev, SYS_RES_IRQ,
2016 0, adapter->res_interrupt);
2017
2018 if (adapter->res_memory != NULL)
2019 bus_release_resource(dev, SYS_RES_MEMORY,
2020 PCIR_BAR(0), adapter->res_memory);
2021
2022 if (adapter->flash_mem != NULL)
2023 bus_release_resource(dev, SYS_RES_MEMORY,
2024 EM_FLASH, adapter->flash_mem);
2025
2026 if (adapter->res_ioport != NULL)
2027 bus_release_resource(dev, SYS_RES_IOPORT,
2028 adapter->io_rid, adapter->res_ioport);
2029 }
2030
2031 /*********************************************************************
2032 *
2033 * Initialize the hardware to a configuration as specified by the
2034 * adapter structure. The controller is reset, the EEPROM is
2035 * verified, the MAC address is set, then the shared initialization
2036 * routines are called.
2037 *
2038 **********************************************************************/
2039 static int
2040 em_hardware_init(struct adapter *adapter)
2041 {
2042 device_t dev = adapter->dev;
2043 uint16_t rx_buffer_size;
2044
2045 INIT_DEBUGOUT("em_hardware_init: begin");
2046 /* Issue a global reset */
2047 em_reset_hw(&adapter->hw);
2048
2049 /* When hardware is reset, fifo_head is also reset */
2050 adapter->tx_fifo_head = 0;
2051
2052 /* Make sure we have a good EEPROM before we read from it */
2053 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2054 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
2055 return (EIO);
2056 }
2057
2058 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2059 device_printf(dev, "EEPROM read error "
2060 "reading part number\n");
2061 return (EIO);
2062 }
2063
2064 /* Set up smart power down as default off on newer adapters. */
2065 if (!em_smart_pwr_down && (adapter->hw.mac_type == em_82571 ||
2066 adapter->hw.mac_type == em_82572)) {
2067 uint16_t phy_tmp = 0;
2068
2069 /* Speed up time to link by disabling smart power down. */
2070 em_read_phy_reg(&adapter->hw,
2071 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2072 phy_tmp &= ~IGP02E1000_PM_SPD;
2073 em_write_phy_reg(&adapter->hw,
2074 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2075 }
2076
2077 /*
2078 * These parameters control the automatic generation (Tx) and
2079 * response (Rx) to Ethernet PAUSE frames.
2080 * - High water mark should allow for at least two frames to be
2081 * received after sending an XOFF.
2082 * - Low water mark works best when it is very near the high water mark.
2083 * This allows the receiver to restart by sending XON when it has
2084 * drained a bit. Here we use an arbitary value of 1500 which will
2085 * restart after one full frame is pulled from the buffer. There
2086 * could be several smaller frames in the buffer and if so they will
2087 * not trigger the XON until their total number reduces the buffer
2088 * by 1500.
2089 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2090 */
2091 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
2092
2093 adapter->hw.fc_high_water = rx_buffer_size -
2094 roundup2(adapter->hw.max_frame_size, 1024);
2095 adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2096 if (adapter->hw.mac_type == em_80003es2lan)
2097 adapter->hw.fc_pause_time = 0xFFFF;
2098 else
2099 adapter->hw.fc_pause_time = 0x1000;
2100 adapter->hw.fc_send_xon = TRUE;
2101 adapter->hw.fc = E1000_FC_FULL;
2102
2103 if (em_init_hw(&adapter->hw) < 0) {
2104 device_printf(dev, "Hardware Initialization Failed\n");
2105 return (EIO);
2106 }
2107
2108 em_check_for_link(&adapter->hw);
2109
2110 return (0);
2111 }
2112
2113 /*********************************************************************
2114 *
2115 * Setup networking device structure and register an interface.
2116 *
2117 **********************************************************************/
2118 static void
2119 em_setup_interface(device_t dev, struct adapter *adapter)
2120 {
2121 struct ifnet *ifp;
2122 u_char fiber_type = IFM_1000_SX; /* default type */
2123
2124 INIT_DEBUGOUT("em_setup_interface: begin");
2125
2126 ifp = &adapter->interface_data.ac_if;
2127 ifp->if_unit = device_get_unit(dev);
2128 ifp->if_name = "em";
2129 ifp->if_mtu = ETHERMTU;
2130 ifp->if_output = ether_output;
2131 ifp->if_init = em_init;
2132 ifp->if_softc = adapter;
2133 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2134 ifp->if_ioctl = em_ioctl;
2135 ifp->if_start = em_start;
2136 ifp->if_watchdog = em_watchdog;
2137 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
2138
2139 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
2140
2141 ifp->if_capabilities = ifp->if_capenable = 0;
2142
2143 if (adapter->hw.mac_type >= em_82543) {
2144 ifp->if_capabilities |= IFCAP_HWCSUM;
2145 ifp->if_capenable |= IFCAP_HWCSUM;
2146 }
2147
2148 /*
2149 * Tell the upper layer(s) we support long frames.
2150 */
2151 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2152
2153 /*
2154 * Specify the media types supported by this adapter and register
2155 * callbacks to update media and link information
2156 */
2157 ifmedia_init(&adapter->media, IFM_IMASK,
2158 em_media_change, em_media_status);
2159 if ((adapter->hw.media_type == em_media_type_fiber) ||
2160 (adapter->hw.media_type == em_media_type_internal_serdes)) {
2161 if (adapter->hw.mac_type == em_82545)
2162 fiber_type = IFM_1000_LX;
2163 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2164 0, NULL);
2165 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2166 } else {
2167 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2168 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2169 0, NULL);
2170 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2171 0, NULL);
2172 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2173 0, NULL);
2174 if (adapter->hw.phy_type != em_phy_ife) {
2175 ifmedia_add(&adapter->media,
2176 IFM_ETHER | IFM_1000_TX | IFM_FDX, 0, NULL);
2177 ifmedia_add(&adapter->media,
2178 IFM_ETHER | IFM_1000_TX, 0, NULL);
2179 }
2180 }
2181 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2182 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2183 }
2184
2185
2186 /*********************************************************************
2187 *
2188 * Workaround for SmartSpeed on 82541 and 82547 controllers
2189 *
2190 **********************************************************************/
2191 static void
2192 em_smartspeed(struct adapter *adapter)
2193 {
2194 uint16_t phy_tmp;
2195
2196 if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2197 adapter->hw.autoneg == 0 ||
2198 (adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2199 return;
2200
2201 if (adapter->smartspeed == 0) {
2202 /* If Master/Slave config fault is asserted twice,
2203 * we assume back-to-back */
2204 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2205 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2206 return;
2207 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2208 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2209 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2210 if(phy_tmp & CR_1000T_MS_ENABLE) {
2211 phy_tmp &= ~CR_1000T_MS_ENABLE;
2212 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2213 phy_tmp);
2214 adapter->smartspeed++;
2215 if(adapter->hw.autoneg &&
2216 !em_phy_setup_autoneg(&adapter->hw) &&
2217 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2218 &phy_tmp)) {
2219 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2220 MII_CR_RESTART_AUTO_NEG);
2221 em_write_phy_reg(&adapter->hw, PHY_CTRL,
2222 phy_tmp);
2223 }
2224 }
2225 }
2226 return;
2227 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2228 /* If still no link, perhaps using 2/3 pair cable */
2229 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2230 phy_tmp |= CR_1000T_MS_ENABLE;
2231 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2232 if(adapter->hw.autoneg &&
2233 !em_phy_setup_autoneg(&adapter->hw) &&
2234 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2235 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2236 MII_CR_RESTART_AUTO_NEG);
2237 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2238 }
2239 }
2240 /* Restart process after EM_SMARTSPEED_MAX iterations */
2241 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2242 adapter->smartspeed = 0;
2243 }
2244
2245
2246 /*
2247 * Manage DMA'able memory.
2248 */
2249 static void
2250 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2251 {
2252 if (error)
2253 return;
2254 *(bus_addr_t *) arg = segs[0].ds_addr;
2255 }
2256
2257 static int
2258 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2259 struct em_dma_alloc *dma, int mapflags)
2260 {
2261 int error;
2262
2263 error = bus_dma_tag_create(NULL, /* parent */
2264 EM_DBA_ALIGN, 0, /* alignment, bounds */
2265 BUS_SPACE_MAXADDR, /* lowaddr */
2266 BUS_SPACE_MAXADDR, /* highaddr */
2267 NULL, NULL, /* filter, filterarg */
2268 size, /* maxsize */
2269 1, /* nsegments */
2270 size, /* maxsegsize */
2271 0, /* flags */
2272 &dma->dma_tag);
2273 if (error) {
2274 device_printf(adapter->dev,
2275 "%s: bus_dma_tag_create failed: %d\n",
2276 __func__, error);
2277 goto fail_0;
2278 }
2279
2280 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2281 BUS_DMA_NOWAIT, &dma->dma_map);
2282 if (error) {
2283 device_printf(adapter->dev,
2284 "%s: bus_dmamem_alloc(%llu) failed: %d\n",
2285 __func__, (unsigned long long)size, error);
2286 goto fail_2;
2287 }
2288
2289 dma->dma_paddr = 0;
2290 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2291 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2292 if (error || dma->dma_paddr == 0) {
2293 device_printf(adapter->dev,
2294 "%s: bus_dmamap_load failed: %d\n",
2295 __func__, error);
2296 goto fail_3;
2297 }
2298
2299 return (0);
2300
2301 fail_3:
2302 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2303 fail_2:
2304 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2305 bus_dma_tag_destroy(dma->dma_tag);
2306 fail_0:
2307 dma->dma_map = NULL;
2308 dma->dma_tag = NULL;
2309
2310 return (error);
2311 }
2312
2313 static void
2314 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2315 {
2316 if (dma->dma_tag == NULL)
2317 return;
2318 if (dma->dma_map != NULL) {
2319 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2320 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2321 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2322 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2323 dma->dma_map = NULL;
2324 }
2325 bus_dma_tag_destroy(dma->dma_tag);
2326 dma->dma_tag = NULL;
2327 }
2328
2329
2330 /*********************************************************************
2331 *
2332 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2333 * the information needed to transmit a packet on the wire.
2334 *
2335 **********************************************************************/
2336 static int
2337 em_allocate_transmit_structures(struct adapter *adapter)
2338 {
2339 device_t dev = adapter->dev;
2340 struct em_buffer *tx_buffer;
2341 int error, i;
2342
2343 /*
2344 * Create DMA tags for tx descriptors
2345 */
2346 if ((error = bus_dma_tag_create(NULL, /* parent */
2347 1, 0, /* alignment, bounds */
2348 BUS_SPACE_MAXADDR, /* lowaddr */
2349 BUS_SPACE_MAXADDR, /* highaddr */
2350 NULL, NULL, /* filter, filterarg */
2351 EM_TSO_SIZE, /* maxsize */
2352 EM_MAX_SCATTER, /* nsegments */
2353 PAGE_SIZE, /* maxsegsize */
2354 0, /* flags */
2355 &adapter->txtag)) != 0) {
2356 device_printf(dev, "Unable to allocate TX DMA tag\n");
2357 goto fail;
2358 }
2359
2360 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2361 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2362 if (adapter->tx_buffer_area == NULL) {
2363 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2364 error = ENOMEM;
2365 goto fail;
2366 }
2367
2368 /* Create the descriptor buffer dma maps */
2369 tx_buffer = adapter->tx_buffer_area;
2370 for (i = 0; i < adapter->num_tx_desc; i++) {
2371 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2372 if (error != 0) {
2373 device_printf(dev, "Unable to create TX DMA map\n");
2374 goto fail;
2375 }
2376 tx_buffer++;
2377 }
2378
2379 return (0);
2380
2381 fail:
2382 em_free_transmit_structures(adapter);
2383 return (error);
2384 }
2385
2386 /*********************************************************************
2387 *
2388 * Initialize transmit structures.
2389 *
2390 **********************************************************************/
2391 static void
2392 em_setup_transmit_structures(struct adapter *adapter)
2393 {
2394 struct em_buffer *tx_buffer;
2395 int i;
2396
2397 /* Clear the old ring contents */
2398 bzero(adapter->tx_desc_base,
2399 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2400
2401 adapter->next_avail_tx_desc = 0;
2402 adapter->next_tx_to_clean = 0;
2403
2404 /* Free any existing tx buffers. */
2405 tx_buffer = adapter->tx_buffer_area;
2406 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2407 if (tx_buffer->m_head != NULL) {
2408 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2409 BUS_DMASYNC_POSTWRITE);
2410 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2411 m_freem(tx_buffer->m_head);
2412 tx_buffer->m_head = NULL;
2413 }
2414 }
2415
2416 /* Set number of descriptors available */
2417 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2418
2419 /* Set checksum context */
2420 adapter->active_checksum_context = OFFLOAD_NONE;
2421 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2422 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2423 }
2424
2425 /*********************************************************************
2426 *
2427 * Enable transmit unit.
2428 *
2429 **********************************************************************/
2430 static void
2431 em_initialize_transmit_unit(struct adapter *adapter)
2432 {
2433 uint32_t reg_tctl, reg_tipg = 0;
2434 uint64_t bus_addr;
2435
2436 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2437 /* Setup the Base and Length of the Tx Descriptor Ring */
2438 bus_addr = adapter->txdma.dma_paddr;
2439 E1000_WRITE_REG(&adapter->hw, TDLEN,
2440 adapter->num_tx_desc * sizeof(struct em_tx_desc));
2441 E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
2442 E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
2443
2444 /* Setup the HW Tx Head and Tail descriptor pointers */
2445 E1000_WRITE_REG(&adapter->hw, TDT, 0);
2446 E1000_WRITE_REG(&adapter->hw, TDH, 0);
2447
2448 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2449 E1000_READ_REG(&adapter->hw, TDBAL),
2450 E1000_READ_REG(&adapter->hw, TDLEN));
2451
2452 /* Set the default values for the Tx Inter Packet Gap timer */
2453 switch (adapter->hw.mac_type) {
2454 case em_82542_rev2_0:
2455 case em_82542_rev2_1:
2456 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2457 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2458 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2459 break;
2460 case em_80003es2lan:
2461 reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2462 reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2463 E1000_TIPG_IPGR2_SHIFT;
2464 break;
2465 default:
2466 if ((adapter->hw.media_type == em_media_type_fiber) ||
2467 (adapter->hw.media_type == em_media_type_internal_serdes))
2468 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2469 else
2470 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2471 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2472 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2473 }
2474
2475 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2476 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2477 if(adapter->hw.mac_type >= em_82540)
2478 E1000_WRITE_REG(&adapter->hw, TADV,
2479 adapter->tx_abs_int_delay.value);
2480
2481 /* Program the Transmit Control Register */
2482 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2483 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2484 if (adapter->hw.mac_type >= em_82571)
2485 reg_tctl |= E1000_TCTL_MULR;
2486 if (adapter->link_duplex == FULL_DUPLEX) {
2487 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2488 } else {
2489 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2490 }
2491 /* This write will effectively turn on the transmit unit. */
2492 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2493
2494 /* Setup Transmit Descriptor Base Settings */
2495 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2496
2497 if (adapter->tx_int_delay.value > 0)
2498 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2499 }
2500
2501 /*********************************************************************
2502 *
2503 * Free all transmit related data structures.
2504 *
2505 **********************************************************************/
2506 static void
2507 em_free_transmit_structures(struct adapter *adapter)
2508 {
2509 struct em_buffer *tx_buffer;
2510 int i;
2511
2512 INIT_DEBUGOUT("free_transmit_structures: begin");
2513
2514 if (adapter->tx_buffer_area != NULL) {
2515 tx_buffer = adapter->tx_buffer_area;
2516 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2517 if (tx_buffer->m_head != NULL) {
2518 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2519 BUS_DMASYNC_POSTWRITE);
2520 bus_dmamap_unload(adapter->txtag,
2521 tx_buffer->map);
2522 m_freem(tx_buffer->m_head);
2523 tx_buffer->m_head = NULL;
2524 } else if (tx_buffer->map != NULL)
2525 bus_dmamap_unload(adapter->txtag,
2526 tx_buffer->map);
2527 if (tx_buffer->map != NULL) {
2528 bus_dmamap_destroy(adapter->txtag,
2529 tx_buffer->map);
2530 tx_buffer->map = NULL;
2531 }
2532 }
2533 }
2534 if (adapter->tx_buffer_area != NULL) {
2535 free(adapter->tx_buffer_area, M_DEVBUF);
2536 adapter->tx_buffer_area = NULL;
2537 }
2538 if (adapter->txtag != NULL) {
2539 bus_dma_tag_destroy(adapter->txtag);
2540 adapter->txtag = NULL;
2541 }
2542 }
2543
2544 /*********************************************************************
2545 *
2546 * The offload context needs to be set when we transfer the first
2547 * packet of a particular protocol (TCP/UDP). We change the
2548 * context only if the protocol type changes.
2549 *
2550 **********************************************************************/
2551 static void
2552 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2553 uint32_t *txd_upper, uint32_t *txd_lower)
2554 {
2555 struct em_context_desc *TXD;
2556 struct em_buffer *tx_buffer;
2557 int curr_txd;
2558
2559 if (mp->m_pkthdr.csum_flags) {
2560
2561 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2562 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2563 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2564 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2565 return;
2566 else
2567 adapter->active_checksum_context =
2568 OFFLOAD_TCP_IP;
2569 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2570 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2571 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2572 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2573 return;
2574 else
2575 adapter->active_checksum_context =
2576 OFFLOAD_UDP_IP;
2577 } else {
2578 *txd_upper = 0;
2579 *txd_lower = 0;
2580 return;
2581 }
2582 } else {
2583 *txd_upper = 0;
2584 *txd_lower = 0;
2585 return;
2586 }
2587
2588 /* If we reach this point, the checksum offload context
2589 * needs to be reset.
2590 */
2591 curr_txd = adapter->next_avail_tx_desc;
2592 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2593 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2594
2595 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2596 TXD->lower_setup.ip_fields.ipcso =
2597 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2598 TXD->lower_setup.ip_fields.ipcse =
2599 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2600
2601 TXD->upper_setup.tcp_fields.tucss =
2602 ETHER_HDR_LEN + sizeof(struct ip);
2603 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2604
2605 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2606 TXD->upper_setup.tcp_fields.tucso =
2607 ETHER_HDR_LEN + sizeof(struct ip) +
2608 offsetof(struct tcphdr, th_sum);
2609 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2610 TXD->upper_setup.tcp_fields.tucso =
2611 ETHER_HDR_LEN + sizeof(struct ip) +
2612 offsetof(struct udphdr, uh_sum);
2613 }
2614
2615 TXD->tcp_seg_setup.data = htole32(0);
2616 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2617
2618 tx_buffer->m_head = NULL;
2619 tx_buffer->next_eop = -1;
2620
2621 if (++curr_txd == adapter->num_tx_desc)
2622 curr_txd = 0;
2623
2624 adapter->num_tx_desc_avail--;
2625 adapter->next_avail_tx_desc = curr_txd;
2626 }
2627
2628 /**********************************************************************
2629 *
2630 * Examine each tx_buffer in the used queue. If the hardware is done
2631 * processing the packet then free associated resources. The
2632 * tx_buffer is put back on the free queue.
2633 *
2634 **********************************************************************/
2635 static void
2636 em_txeof(struct adapter *adapter)
2637 {
2638 int s;
2639 int first, last, done, num_avail;
2640 struct em_buffer *tx_buffer;
2641 struct em_tx_desc *tx_desc, *eop_desc;
2642 struct ifnet *ifp = &adapter->interface_data.ac_if;
2643
2644 s = splimp();
2645
2646 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2647 goto out;
2648
2649 num_avail = adapter->num_tx_desc_avail;
2650 first = adapter->next_tx_to_clean;
2651 tx_desc = &adapter->tx_desc_base[first];
2652 tx_buffer = &adapter->tx_buffer_area[first];
2653 last = tx_buffer->next_eop;
2654 eop_desc = &adapter->tx_desc_base[last];
2655
2656 /*
2657 * What this does is get the index of the
2658 * first descriptor AFTER the EOP of the
2659 * first packet, that way we can do the
2660 * simple comparison on the inner while loop.
2661 */
2662 if (++last == adapter->num_tx_desc) last = 0;
2663 done = last;
2664
2665 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2666 BUS_DMASYNC_POSTREAD);
2667
2668 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2669 /* We clean the range of the packet */
2670 while (first != done) {
2671 tx_desc->upper.data = 0;
2672 tx_desc->lower.data = 0;
2673 num_avail++;
2674
2675 if (tx_buffer->m_head) {
2676 ifp->if_opackets++;
2677 bus_dmamap_sync(adapter->txtag,
2678 tx_buffer->map,
2679 BUS_DMASYNC_POSTWRITE);
2680 bus_dmamap_unload(adapter->txtag,
2681 tx_buffer->map);
2682
2683 m_freem(tx_buffer->m_head);
2684 tx_buffer->m_head = NULL;
2685 }
2686 tx_buffer->next_eop = -1;
2687
2688 if (++first == adapter->num_tx_desc)
2689 first = 0;
2690
2691 tx_buffer = &adapter->tx_buffer_area[first];
2692 tx_desc = &adapter->tx_desc_base[first];
2693 }
2694 /* See if we can continue to the next packet */
2695 last = tx_buffer->next_eop;
2696 if (last != -1) {
2697 eop_desc = &adapter->tx_desc_base[last];
2698 /* Get new done point */
2699 if (++last == adapter->num_tx_desc) last = 0;
2700 done = last;
2701 } else
2702 break;
2703 }
2704 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2705 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2706
2707 adapter->next_tx_to_clean = first;
2708
2709 /*
2710 * If we have enough room, clear IFF_OACTIVE to tell the stack
2711 * that it is OK to send packets.
2712 * If there are no pending descriptors, clear the timeout. Otherwise,
2713 * if some descriptors have been freed, restart the timeout.
2714 */
2715 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2716 ifp->if_flags &= ~IFF_OACTIVE;
2717 /* All clean, turn off the timer */
2718 if (num_avail == adapter->num_tx_desc)
2719 ifp->if_timer = 0;
2720 /* Some cleaned, reset the timer */
2721 else if (num_avail != adapter->num_tx_desc_avail)
2722 ifp->if_timer = EM_TX_TIMEOUT;
2723 }
2724 adapter->num_tx_desc_avail = num_avail;
2725 out:
2726 splx(s);
2727 }
2728
2729 /*********************************************************************
2730 *
2731 * Get a buffer from system mbuf buffer pool.
2732 *
2733 **********************************************************************/
2734 static int
2735 em_get_buf(int i, struct adapter *adapter, struct mbuf *mp)
2736 {
2737 struct ifnet *ifp = &adapter->interface_data.ac_if;
2738 bus_dma_segment_t segs[1];
2739 struct em_buffer *rx_buffer;
2740 int error, nsegs;
2741
2742 if (mp == NULL) {
2743 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2744 if (mp == NULL) {
2745 adapter->mbuf_cluster_failed++;
2746 return (ENOBUFS);
2747 }
2748 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2749 } else {
2750 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2751 mp->m_data = mp->m_ext.ext_buf;
2752 mp->m_next = NULL;
2753 }
2754
2755 if (ifp->if_mtu <= ETHERMTU)
2756 m_adj(mp, ETHER_ALIGN);
2757
2758 rx_buffer = &adapter->rx_buffer_area[i];
2759
2760 /*
2761 * Using memory from the mbuf cluster pool, invoke the
2762 * bus_dma machinery to arrange the memory mapping.
2763 */
2764 error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2765 mp, segs, &nsegs, 0);
2766 if (error != 0) {
2767 m_free(mp);
2768 return (error);
2769 }
2770 /* If nsegs is wrong then the stack is corrupt. */
2771 KASSERT(nsegs == 1, ("Too many segments returned!"));
2772 rx_buffer->m_head = mp;
2773 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2774 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2775
2776 return (0);
2777 }
2778
2779 /*********************************************************************
2780 *
2781 * Allocate memory for rx_buffer structures. Since we use one
2782 * rx_buffer per received packet, the maximum number of rx_buffer's
2783 * that we'll need is equal to the number of receive descriptors
2784 * that we've allocated.
2785 *
2786 **********************************************************************/
2787 static int
2788 em_allocate_receive_structures(struct adapter *adapter)
2789 {
2790 device_t dev = adapter->dev;
2791 struct em_buffer *rx_buffer;
2792 int i, error;
2793
2794 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
2795 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT);
2796 if (adapter->rx_buffer_area == NULL) {
2797 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2798 return (ENOMEM);
2799 }
2800
2801 bzero(adapter->rx_buffer_area,
2802 sizeof(struct em_buffer) * adapter->num_rx_desc);
2803
2804 error = bus_dma_tag_create(NULL, /* parent */
2805 1, 0, /* alignment, bounds */
2806 BUS_SPACE_MAXADDR, /* lowaddr */
2807 BUS_SPACE_MAXADDR, /* highaddr */
2808 NULL, NULL, /* filter, filterarg */
2809 MCLBYTES, /* maxsize */
2810 1, /* nsegments */
2811 MCLBYTES, /* maxsegsize */
2812 0, /* flags */
2813 &adapter->rxtag);
2814 if (error) {
2815 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
2816 __func__, error);
2817 goto fail;
2818 }
2819
2820 rx_buffer = adapter->rx_buffer_area;
2821 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2822 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2823 &rx_buffer->map);
2824 if (error) {
2825 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
2826 __func__, error);
2827 goto fail;
2828 }
2829 }
2830
2831 return (0);
2832
2833 fail:
2834 em_free_receive_structures(adapter);
2835 return (error);
2836 }
2837
2838 /*********************************************************************
2839 *
2840 * Allocate and initialize receive structures.
2841 *
2842 **********************************************************************/
2843 static int
2844 em_setup_receive_structures(struct adapter *adapter)
2845 {
2846 struct em_buffer *rx_buffer;
2847 int i, error;
2848
2849 bzero(adapter->rx_desc_base,
2850 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2851
2852 /* Free current RX buffers. */
2853 rx_buffer = adapter->rx_buffer_area;
2854 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2855 if (rx_buffer->m_head != NULL) {
2856 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2857 BUS_DMASYNC_POSTREAD);
2858 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2859 m_freem(rx_buffer->m_head);
2860 rx_buffer->m_head = NULL;
2861 }
2862 }
2863
2864 /* Allocate new ones. */
2865 for (i = 0; i < adapter->num_rx_desc; i++) {
2866 error = em_get_buf(i, adapter, NULL);
2867 if (error)
2868 return (error);
2869 }
2870
2871 /* Setup our descriptor pointers */
2872 adapter->next_rx_desc_to_check = 0;
2873 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2874 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2875
2876 return (0);
2877 }
2878
2879 /*********************************************************************
2880 *
2881 * Enable receive unit.
2882 *
2883 **********************************************************************/
2884 static void
2885 em_initialize_receive_unit(struct adapter *adapter)
2886 {
2887 struct ifnet *ifp = &adapter->interface_data.ac_if;
2888 uint64_t bus_addr;
2889 uint32_t reg_rctl;
2890 uint32_t reg_rxcsum;
2891
2892 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2893
2894 /*
2895 * Make sure receives are disabled while setting
2896 * up the descriptor ring
2897 */
2898 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2899
2900 if(adapter->hw.mac_type >= em_82540) {
2901 E1000_WRITE_REG(&adapter->hw, RADV,
2902 adapter->rx_abs_int_delay.value);
2903 /*
2904 * Set the interrupt throttling rate. Value is calculated
2905 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
2906 */
2907 #define MAX_INTS_PER_SEC 8000
2908 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
2909 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2910 }
2911
2912 /* Setup the Base and Length of the Rx Descriptor Ring */
2913 bus_addr = adapter->rxdma.dma_paddr;
2914 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2915 sizeof(struct em_rx_desc));
2916 E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2917 E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2918
2919 /* Setup the Receive Control Register */
2920 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2921 E1000_RCTL_RDMTS_HALF |
2922 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2923
2924 if (adapter->hw.tbi_compatibility_on == TRUE)
2925 reg_rctl |= E1000_RCTL_SBP;
2926
2927
2928 switch (adapter->rx_buffer_len) {
2929 default:
2930 case EM_RXBUFFER_2048:
2931 reg_rctl |= E1000_RCTL_SZ_2048;
2932 break;
2933 case EM_RXBUFFER_4096:
2934 reg_rctl |= E1000_RCTL_SZ_4096 |
2935 E1000_RCTL_BSEX | E1000_RCTL_LPE;
2936 break;
2937 case EM_RXBUFFER_8192:
2938 reg_rctl |= E1000_RCTL_SZ_8192 |
2939 E1000_RCTL_BSEX | E1000_RCTL_LPE;
2940 break;
2941 case EM_RXBUFFER_16384:
2942 reg_rctl |= E1000_RCTL_SZ_16384 |
2943 E1000_RCTL_BSEX | E1000_RCTL_LPE;
2944 break;
2945 }
2946
2947 if (ifp->if_mtu > ETHERMTU)
2948 reg_rctl |= E1000_RCTL_LPE;
2949
2950 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2951 if ((adapter->hw.mac_type >= em_82543) &&
2952 (ifp->if_capenable & IFCAP_RXCSUM)) {
2953 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2954 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2955 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2956 }
2957
2958 /* Enable Receives */
2959 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2960 /*
2961 * Setup the HW Rx Head and
2962 * Tail Descriptor Pointers
2963 */
2964 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2965 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2966
2967 return;
2968 }
2969
2970 /*********************************************************************
2971 *
2972 * Free receive related data structures.
2973 *
2974 **********************************************************************/
2975 static void
2976 em_free_receive_structures(struct adapter *adapter)
2977 {
2978 struct em_buffer *rx_buffer;
2979 int i;
2980
2981 INIT_DEBUGOUT("free_receive_structures: begin");
2982
2983 if (adapter->rx_buffer_area != NULL) {
2984 rx_buffer = adapter->rx_buffer_area;
2985 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2986 if (rx_buffer->m_head != NULL) {
2987 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2988 BUS_DMASYNC_POSTREAD);
2989 bus_dmamap_unload(adapter->rxtag,
2990 rx_buffer->map);
2991 m_freem(rx_buffer->m_head);
2992 rx_buffer->m_head = NULL;
2993 } else if (rx_buffer->map != NULL)
2994 bus_dmamap_unload(adapter->rxtag,
2995 rx_buffer->map);
2996 if (rx_buffer->map != NULL) {
2997 bus_dmamap_destroy(adapter->rxtag,
2998 rx_buffer->map);
2999 rx_buffer->map = NULL;
3000 }
3001 }
3002 }
3003 if (adapter->rx_buffer_area != NULL) {
3004 free(adapter->rx_buffer_area, M_DEVBUF);
3005 adapter->rx_buffer_area = NULL;
3006 }
3007 if (adapter->rxtag != NULL) {
3008 bus_dma_tag_destroy(adapter->rxtag);
3009 adapter->rxtag = NULL;
3010 }
3011 }
3012
3013 /*********************************************************************
3014 *
3015 * This routine executes in interrupt context. It replenishes
3016 * the mbufs in the descriptor and sends data which has been
3017 * dma'ed into host memory to upper layer.
3018 *
3019 * We loop at most count times if count is > 0, or until done if
3020 * count < 0.
3021 *
3022 *********************************************************************/
3023 static void
3024 em_rxeof(struct adapter *adapter, int count)
3025 {
3026 struct ifnet *ifp;
3027 struct mbuf *mp;
3028 struct ether_header *eh;
3029 uint8_t accept_frame = 0;
3030 uint8_t eop = 0;
3031 uint16_t len, desc_len, prev_len_adj;
3032 int i;
3033
3034 /* Pointer to the receive descriptor being examined. */
3035 struct em_rx_desc *current_desc;
3036 uint8_t status;
3037
3038 ifp = &adapter->interface_data.ac_if;
3039 i = adapter->next_rx_desc_to_check;
3040 current_desc = &adapter->rx_desc_base[i];
3041 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3042 BUS_DMASYNC_POSTREAD);
3043
3044 if (!((current_desc->status) & E1000_RXD_STAT_DD))
3045 return;
3046
3047 while ((current_desc->status & E1000_RXD_STAT_DD) &&
3048 (count != 0) &&
3049 (ifp->if_flags & IFF_RUNNING)) {
3050
3051 mp = adapter->rx_buffer_area[i].m_head;
3052 /*
3053 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3054 * needs to access the last received byte in the mbuf.
3055 */
3056 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3057 BUS_DMASYNC_POSTREAD);
3058
3059 accept_frame = 1;
3060 prev_len_adj = 0;
3061 desc_len = le16toh(current_desc->length);
3062 status = current_desc->status;
3063 if (status & E1000_RXD_STAT_EOP) {
3064 count--;
3065 eop = 1;
3066 if (desc_len < ETHER_CRC_LEN) {
3067 len = 0;
3068 prev_len_adj = ETHER_CRC_LEN - desc_len;
3069 } else
3070 len = desc_len - ETHER_CRC_LEN;
3071 } else {
3072 eop = 0;
3073 len = desc_len;
3074 }
3075
3076 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3077 uint8_t last_byte;
3078 uint32_t pkt_len = desc_len;
3079
3080 if (adapter->fmp != NULL)
3081 pkt_len += adapter->fmp->m_pkthdr.len;
3082
3083 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3084 if (TBI_ACCEPT(&adapter->hw, status,
3085 current_desc->errors, pkt_len, last_byte)) {
3086 em_tbi_adjust_stats(&adapter->hw,
3087 &adapter->stats, pkt_len,
3088 adapter->hw.mac_addr);
3089 if (len > 0)
3090 len--;
3091 } else
3092 accept_frame = 0;
3093 }
3094
3095 if (accept_frame) {
3096 if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
3097 adapter->dropped_pkts++;
3098 em_get_buf(i, adapter, mp);
3099 if (adapter->fmp != NULL)
3100 m_freem(adapter->fmp);
3101 adapter->fmp = NULL;
3102 adapter->lmp = NULL;
3103 break;
3104 }
3105
3106 /* Assign correct length to the current fragment */
3107 mp->m_len = len;
3108
3109 if (adapter->fmp == NULL) {
3110 mp->m_pkthdr.len = len;
3111 adapter->fmp = mp; /* Store the first mbuf */
3112 adapter->lmp = mp;
3113 } else {
3114 /* Chain mbuf's together */
3115 mp->m_flags &= ~M_PKTHDR;
3116 /*
3117 * Adjust length of previous mbuf in chain if
3118 * we received less than 4 bytes in the last
3119 * descriptor.
3120 */
3121 if (prev_len_adj > 0) {
3122 adapter->lmp->m_len -= prev_len_adj;
3123 adapter->fmp->m_pkthdr.len -=
3124 prev_len_adj;
3125 }
3126 adapter->lmp->m_next = mp;
3127 adapter->lmp = adapter->lmp->m_next;
3128 adapter->fmp->m_pkthdr.len += len;
3129 }
3130
3131 if (eop) {
3132 adapter->fmp->m_pkthdr.rcvif = ifp;
3133 ifp->if_ipackets++;
3134
3135 eh = mtod(adapter->fmp, struct ether_header *);
3136 /* Remove ethernet header from mbuf */
3137 m_adj(adapter->fmp, sizeof(struct ether_header));
3138 em_receive_checksum(adapter, current_desc,
3139 adapter->fmp);
3140 if (status & E1000_RXD_STAT_VP)
3141 VLAN_INPUT_TAG(eh, adapter->fmp,
3142 (le16toh(current_desc->special) &
3143 E1000_RXD_SPC_VLAN_MASK));
3144 else
3145 ether_input(ifp, eh, adapter->fmp);
3146
3147 adapter->fmp = NULL;
3148 adapter->lmp = NULL;
3149 }
3150 } else {
3151 adapter->dropped_pkts++;
3152 em_get_buf(i, adapter, mp);
3153 if (adapter->fmp != NULL)
3154 m_freem(adapter->fmp);
3155 adapter->fmp = NULL;
3156 adapter->lmp = NULL;
3157 }
3158
3159 /* Zero out the receive descriptors status. */
3160 current_desc->status = 0;
3161 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3162 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3163
3164 /* Advance our pointers to the next descriptor. */
3165 if (++i == adapter->num_rx_desc)
3166 i = 0;
3167 current_desc = &adapter->rx_desc_base[i];
3168 }
3169 adapter->next_rx_desc_to_check = i;
3170
3171 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3172 if (--i < 0)
3173 i = adapter->num_rx_desc - 1;
3174 E1000_WRITE_REG(&adapter->hw, RDT, i);
3175 }
3176
3177 /*********************************************************************
3178 *
3179 * Verify that the hardware indicated that the checksum is valid.
3180 * Inform the stack about the status of checksum so that stack
3181 * doesn't spend time verifying the checksum.
3182 *
3183 *********************************************************************/
3184 static void
3185 em_receive_checksum(struct adapter *adapter,
3186 struct em_rx_desc *rx_desc, struct mbuf *mp)
3187 {
3188 /* 82543 or newer only */
3189 if ((adapter->hw.mac_type < em_82543) ||
3190 /* Ignore Checksum bit is set */
3191 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3192 mp->m_pkthdr.csum_flags = 0;
3193 return;
3194 }
3195
3196 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3197 /* Did it pass? */
3198 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3199 /* IP Checksum Good */
3200 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3201 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3202
3203 } else {
3204 mp->m_pkthdr.csum_flags = 0;
3205 }
3206 }
3207
3208 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3209 /* Did it pass? */
3210 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3211 mp->m_pkthdr.csum_flags |=
3212 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3213 mp->m_pkthdr.csum_data = htons(0xffff);
3214 }
3215 }
3216 }
3217
3218
3219 static void
3220 em_enable_vlans(struct adapter *adapter)
3221 {
3222 uint32_t ctrl;
3223
3224 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3225
3226 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3227 ctrl |= E1000_CTRL_VME;
3228 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3229 }
3230
3231 static void
3232 em_disable_vlans(struct adapter *adapter)
3233 {
3234 uint32_t ctrl;
3235
3236 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3237 ctrl &= ~E1000_CTRL_VME;
3238 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3239 }
3240
3241 static void
3242 em_enable_intr(struct adapter *adapter)
3243 {
3244 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3245 }
3246
3247 static void
3248 em_disable_intr(struct adapter *adapter)
3249 {
3250 /*
3251 * The first version of 82542 had an errata where when link was forced
3252 * it would stay up even up even if the cable was disconnected.
3253 * Sequence errors were used to detect the disconnect and then the
3254 * driver would unforce the link. This code in the in the ISR. For this
3255 * to work correctly the Sequence error interrupt had to be enabled
3256 * all the time.
3257 */
3258
3259 if (adapter->hw.mac_type == em_82542_rev2_0)
3260 E1000_WRITE_REG(&adapter->hw, IMC,
3261 (0xffffffff & ~E1000_IMC_RXSEQ));
3262 else
3263 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
3264 }
3265
3266 static int
3267 em_is_valid_ether_addr(uint8_t *addr)
3268 {
3269 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3270
3271 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3272 return (FALSE);
3273 }
3274
3275 return (TRUE);
3276 }
3277
3278 void
3279 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3280 {
3281 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
3282 }
3283
3284 void
3285 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3286 {
3287 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
3288 }
3289
3290 void
3291 em_pci_set_mwi(struct em_hw *hw)
3292 {
3293 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3294 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3295 }
3296
3297 void
3298 em_pci_clear_mwi(struct em_hw *hw)
3299 {
3300 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3301 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3302 }
3303
3304 uint32_t
3305 em_io_read(struct em_hw *hw, unsigned long port)
3306 {
3307 struct em_osdep *io = hw->back;
3308 uint32_t ret;
3309
3310 ret = bus_space_read_4(io->io_bus_space_tag,
3311 io->io_bus_space_handle, port);
3312 return (ret);
3313 }
3314
3315 void
3316 em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
3317 {
3318 struct em_osdep *io = hw->back;
3319
3320 bus_space_write_4(io->io_bus_space_tag,
3321 io->io_bus_space_handle, port, value);
3322 return;
3323 }
3324
3325 /*
3326 * We may eventually really do this, but its unnecessary
3327 * for now so we just return unsupported.
3328 */
3329 int32_t
3330 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3331 {
3332 return (0);
3333 }
3334
3335 /*********************************************************************
3336 * 82544 Coexistence issue workaround.
3337 * There are 2 issues.
3338 * 1. Transmit Hang issue.
3339 * To detect this issue, following equation can be used...
3340 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3341 * If SUM[3:0] is in between 1 to 4, we will have this issue.
3342 *
3343 * 2. DAC issue.
3344 * To detect this issue, following equation can be used...
3345 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3346 * If SUM[3:0] is in between 9 to c, we will have this issue.
3347 *
3348 *
3349 * WORKAROUND:
3350 * Make sure we do not have ending address
3351 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3352 *
3353 *************************************************************************/
3354 static uint32_t
3355 em_fill_descriptors (bus_addr_t address, uint32_t length,
3356 PDESC_ARRAY desc_array)
3357 {
3358 /* Since issue is sensitive to length and address.*/
3359 /* Let us first check the address...*/
3360 uint32_t safe_terminator;
3361 if (length <= 4) {
3362 desc_array->descriptor[0].address = address;
3363 desc_array->descriptor[0].length = length;
3364 desc_array->elements = 1;
3365 return (desc_array->elements);
3366 }
3367 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) +
3368 (length & 0xF)) & 0xF);
3369 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3370 if (safe_terminator == 0 ||
3371 (safe_terminator > 4 &&
3372 safe_terminator < 9) ||
3373 (safe_terminator > 0xC &&
3374 safe_terminator <= 0xF)) {
3375 desc_array->descriptor[0].address = address;
3376 desc_array->descriptor[0].length = length;
3377 desc_array->elements = 1;
3378 return (desc_array->elements);
3379 }
3380
3381 desc_array->descriptor[0].address = address;
3382 desc_array->descriptor[0].length = length - 4;
3383 desc_array->descriptor[1].address = address + (length - 4);
3384 desc_array->descriptor[1].length = 4;
3385 desc_array->elements = 2;
3386 return (desc_array->elements);
3387 }
3388
3389 /**********************************************************************
3390 *
3391 * Update the board statistics counters.
3392 *
3393 **********************************************************************/
3394 static void
3395 em_update_stats_counters(struct adapter *adapter)
3396 {
3397 struct ifnet *ifp;
3398
3399 if(adapter->hw.media_type == em_media_type_copper ||
3400 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3401 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3402 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3403 }
3404 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3405 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3406 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3407 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3408
3409 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3410 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3411 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3412 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3413 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3414 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3415 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3416 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3417 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3418 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3419 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3420 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3421 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3422 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3423 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3424 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3425 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3426 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3427 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3428 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3429
3430 /* For the 64-bit byte counters the low dword must be read first. */
3431 /* Both registers clear on the read of the high dword */
3432
3433 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3434 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3435 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3436 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3437
3438 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3439 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3440 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3441 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3442 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3443
3444 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3445 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3446 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3447 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3448
3449 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3450 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3451 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3452 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3453 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3454 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3455 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3456 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3457 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3458 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3459
3460 if (adapter->hw.mac_type >= em_82543) {
3461 adapter->stats.algnerrc +=
3462 E1000_READ_REG(&adapter->hw, ALGNERRC);
3463 adapter->stats.rxerrc +=
3464 E1000_READ_REG(&adapter->hw, RXERRC);
3465 adapter->stats.tncrs +=
3466 E1000_READ_REG(&adapter->hw, TNCRS);
3467 adapter->stats.cexterr +=
3468 E1000_READ_REG(&adapter->hw, CEXTERR);
3469 adapter->stats.tsctc +=
3470 E1000_READ_REG(&adapter->hw, TSCTC);
3471 adapter->stats.tsctfc +=
3472 E1000_READ_REG(&adapter->hw, TSCTFC);
3473 }
3474 ifp = &adapter->interface_data.ac_if;
3475
3476 ifp->if_collisions = adapter->stats.colc;
3477
3478 /* Rx Errors */
3479 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
3480 adapter->stats.crcerrs + adapter->stats.algnerrc +
3481 adapter->stats.ruc + adapter->stats.roc +
3482 adapter->stats.mpc + adapter->stats.cexterr;
3483
3484 /* Tx Errors */
3485 ifp->if_oerrors = adapter->stats.ecol +
3486 adapter->stats.latecol + adapter->watchdog_events;
3487 }
3488
3489
3490 /**********************************************************************
3491 *
3492 * This routine is called only when em_display_debug_stats is enabled.
3493 * This routine provides a way to take a look at important statistics
3494 * maintained by the driver and hardware.
3495 *
3496 **********************************************************************/
3497 static void
3498 em_print_debug_info(struct adapter *adapter)
3499 {
3500 device_t dev = adapter->dev;
3501 uint8_t *hw_addr = adapter->hw.hw_addr;
3502
3503 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3504 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
3505 E1000_READ_REG(&adapter->hw, CTRL),
3506 E1000_READ_REG(&adapter->hw, RCTL));
3507 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
3508 ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3509 (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3510 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3511 adapter->hw.fc_high_water,
3512 adapter->hw.fc_low_water);
3513 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3514 E1000_READ_REG(&adapter->hw, TIDV),
3515 E1000_READ_REG(&adapter->hw, TADV));
3516 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3517 E1000_READ_REG(&adapter->hw, RDTR),
3518 E1000_READ_REG(&adapter->hw, RADV));
3519 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
3520 (long long)adapter->tx_fifo_wrk_cnt,
3521 (long long)adapter->tx_fifo_reset_cnt);
3522 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3523 E1000_READ_REG(&adapter->hw, TDH),
3524 E1000_READ_REG(&adapter->hw, TDT));
3525 device_printf(dev, "Num Tx descriptors avail = %d\n",
3526 adapter->num_tx_desc_avail);
3527 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
3528 adapter->no_tx_desc_avail1);
3529 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
3530 adapter->no_tx_desc_avail2);
3531 device_printf(dev, "Std mbuf failed = %ld\n",
3532 adapter->mbuf_alloc_failed);
3533 device_printf(dev, "Std mbuf cluster failed = %ld\n",
3534 adapter->mbuf_cluster_failed);
3535 device_printf(dev, "Driver dropped packets = %ld\n",
3536 adapter->dropped_pkts);
3537 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
3538 adapter->no_tx_dma_setup);
3539 }
3540
3541 static void
3542 em_print_hw_stats(struct adapter *adapter)
3543 {
3544 device_t dev = adapter->dev;
3545
3546 device_printf(dev, "Excessive collisions = %lld\n",
3547 (long long)adapter->stats.ecol);
3548 #if (DEBUG_HW > 0) /* Dont output these errors normally */
3549 device_printf(dev, "Symbol errors = %lld\n",
3550 (long long)adapter->stats.symerrs);
3551 #endif
3552 device_printf(dev, "Sequence errors = %lld\n",
3553 (long long)adapter->stats.sec);
3554 device_printf(dev, "Defer count = %lld\n",
3555 (long long)adapter->stats.dc);
3556 device_printf(dev, "Missed Packets = %lld\n",
3557 (long long)adapter->stats.mpc);
3558 device_printf(dev, "Receive No Buffers = %lld\n",
3559 (long long)adapter->stats.rnbc);
3560 /* RLEC is inaccurate on some hardware, calculate our own. */
3561 device_printf(dev, "Receive Length Errors = %lld\n",
3562 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
3563 device_printf(dev, "Receive errors = %lld\n",
3564 (long long)adapter->stats.rxerrc);
3565 device_printf(dev, "Crc errors = %lld\n",
3566 (long long)adapter->stats.crcerrs);
3567 device_printf(dev, "Alignment errors = %lld\n",
3568 (long long)adapter->stats.algnerrc);
3569 device_printf(dev, "Carrier extension errors = %lld\n",
3570 (long long)adapter->stats.cexterr);
3571 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
3572 device_printf(dev, "watchdog timeouts = %ld\n",
3573 adapter->watchdog_events);
3574 device_printf(dev, "XON Rcvd = %lld\n",
3575 (long long)adapter->stats.xonrxc);
3576 device_printf(dev, "XON Xmtd = %lld\n",
3577 (long long)adapter->stats.xontxc);
3578 device_printf(dev, "XOFF Rcvd = %lld\n",
3579 (long long)adapter->stats.xoffrxc);
3580 device_printf(dev, "XOFF Xmtd = %lld\n",
3581 (long long)adapter->stats.xofftxc);
3582 device_printf(dev, "Good Packets Rcvd = %lld\n",
3583 (long long)adapter->stats.gprc);
3584 device_printf(dev, "Good Packets Xmtd = %lld\n",
3585 (long long)adapter->stats.gptc);
3586 }
3587
3588 static int
3589 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3590 {
3591 struct adapter *adapter;
3592 int error;
3593 int result;
3594
3595 result = -1;
3596 error = sysctl_handle_int(oidp, &result, 0, req);
3597
3598 if (error || !req->newptr)
3599 return (error);
3600
3601 if (result == 1) {
3602 adapter = (struct adapter *)arg1;
3603 em_print_debug_info(adapter);
3604 }
3605
3606 return (error);
3607 }
3608
3609
3610 static int
3611 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3612 {
3613 struct adapter *adapter;
3614 int error;
3615 int result;
3616
3617 result = -1;
3618 error = sysctl_handle_int(oidp, &result, 0, req);
3619
3620 if (error || !req->newptr)
3621 return (error);
3622
3623 if (result == 1) {
3624 adapter = (struct adapter *)arg1;
3625 em_print_hw_stats(adapter);
3626 }
3627
3628 return (error);
3629 }
3630
3631 static int
3632 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3633 {
3634 struct em_int_delay_info *info;
3635 struct adapter *adapter;
3636 uint32_t regval;
3637 int error;
3638 int usecs;
3639 int ticks;
3640 int s;
3641
3642 info = (struct em_int_delay_info *)arg1;
3643 usecs = info->value;
3644 error = sysctl_handle_int(oidp, &usecs, 0, req);
3645 if (error != 0 || req->newptr == NULL)
3646 return (error);
3647 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3648 return (EINVAL);
3649 info->value = usecs;
3650 ticks = E1000_USECS_TO_TICKS(usecs);
3651
3652 adapter = info->adapter;
3653
3654 s = splimp();
3655 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3656 regval = (regval & ~0xffff) | (ticks & 0xffff);
3657 /* Handle a few special cases. */
3658 switch (info->offset) {
3659 case E1000_RDTR:
3660 case E1000_82542_RDTR:
3661 regval |= E1000_RDT_FPDB;
3662 break;
3663 case E1000_TIDV:
3664 case E1000_82542_TIDV:
3665 if (ticks == 0) {
3666 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3667 /* Don't write 0 into the TIDV register. */
3668 regval++;
3669 } else
3670 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3671 break;
3672 }
3673 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3674 splx(s);
3675 return (0);
3676 }
3677
3678 static void
3679 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3680 const char *description, struct em_int_delay_info *info,
3681 int offset, int value)
3682 {
3683 info->adapter = adapter;
3684 info->offset = offset;
3685 info->value = value;
3686 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3687 SYSCTL_CHILDREN(adapter->sysctl_tree),
3688 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3689 info, 0, em_sysctl_int_delay, "I", description);
3690 }
3691
3692 static void
3693 em_add_rx_process_limit(struct adapter *adapter, const char *name,
3694 const char *description, int *limit, int value)
3695 {
3696 *limit = value;
3697 SYSCTL_ADD_INT(&adapter->sysctl_ctx,
3698 SYSCTL_CHILDREN(adapter->sysctl_tree),
3699 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3700 }
Cache object: fbb0c34a150fca3f772dd004aa4c008a
|