FreeBSD/Linux Kernel Cross Reference
sys/dev/em/if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2001-2007, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* $FreeBSD: releng/6.3/sys/dev/em/if_em.c 173402 2007-11-07 01:33:28Z jfv $*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/module.h>
49 #include <sys/rman.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/if.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
67
68 #include <netinet/in_systm.h>
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
75
76 #include <machine/in_cksum.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcireg.h>
79
80 #include "e1000_api.h"
81 #include "e1000_82575.h"
82 #include "if_em.h"
83
84 /*********************************************************************
85 * Set this to one to display debug statistics
86 *********************************************************************/
87 int em_display_debug_stats = 0;
88
89 /*********************************************************************
90 * Driver version:
91 *********************************************************************/
92 char em_driver_version[] = "Version - 6.7.2";
93
94
95 /*********************************************************************
96 * PCI Device ID Table
97 *
98 * Used by probe to select devices to load on
99 * Last field stores an index into e1000_strings
100 * Last entry must be all 0s
101 *
102 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
103 *********************************************************************/
104
105 static em_vendor_info_t em_vendor_info_array[] =
106 {
107 /* Intel(R) PRO/1000 Network Connection */
108 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
113
114 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
121
122 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
123
124 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
126
127 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
131
132 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
137
138 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
147 PCI_ANY_ID, PCI_ANY_ID, 0},
148
149 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
152
153 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
157 PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
159 PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
161 PCI_ANY_ID, PCI_ANY_ID, 0},
162 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
163 PCI_ANY_ID, PCI_ANY_ID, 0},
164 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
165 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
166 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
167 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
168
169 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
170 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
173 PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
175 PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
177 PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
179 PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
182 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
183 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
184 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
185 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
186 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
187
188 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
189 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
193
194 { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
196 PCI_ANY_ID, PCI_ANY_ID, 0},
197 { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
198 PCI_ANY_ID, PCI_ANY_ID, 0},
199 /* required last entry */
200 { 0, 0, 0, 0, 0}
201 };
202
203 /*********************************************************************
204 * Table of branding strings for all supported NICs.
205 *********************************************************************/
206
207 static char *em_strings[] = {
208 "Intel(R) PRO/1000 Network Connection"
209 };
210
211 /*********************************************************************
212 * Function prototypes
213 *********************************************************************/
214 static int em_probe(device_t);
215 static int em_attach(device_t);
216 static int em_detach(device_t);
217 static int em_shutdown(device_t);
218 static int em_suspend(device_t);
219 static int em_resume(device_t);
220 static void em_start(struct ifnet *);
221 static void em_start_locked(struct ifnet *ifp);
222 static int em_ioctl(struct ifnet *, u_long, caddr_t);
223 static void em_watchdog(struct adapter *);
224 static void em_init(void *);
225 static void em_init_locked(struct adapter *);
226 static void em_stop(void *);
227 static void em_media_status(struct ifnet *, struct ifmediareq *);
228 static int em_media_change(struct ifnet *);
229 static void em_identify_hardware(struct adapter *);
230 static int em_allocate_pci_resources(struct adapter *);
231 static int em_allocate_intr(struct adapter *);
232 static bool em_setup_msix(struct adapter *);
233 static void em_free_intr(struct adapter *);
234 static void em_free_pci_resources(struct adapter *);
235 static void em_local_timer(void *);
236 static int em_hardware_init(struct adapter *);
237 static void em_setup_interface(device_t, struct adapter *);
238 static void em_setup_transmit_structures(struct adapter *);
239 static void em_initialize_transmit_unit(struct adapter *);
240 static int em_setup_receive_structures(struct adapter *);
241 static void em_initialize_receive_unit(struct adapter *);
242 static void em_enable_intr(struct adapter *);
243 static void em_disable_intr(struct adapter *);
244 static void em_free_transmit_structures(struct adapter *);
245 static void em_free_receive_structures(struct adapter *);
246 static void em_update_stats_counters(struct adapter *);
247 static void em_txeof(struct adapter *);
248 static void em_tx_purge(struct adapter *);
249 static int em_allocate_receive_structures(struct adapter *);
250 static int em_allocate_transmit_structures(struct adapter *);
251 static int em_rxeof(struct adapter *, int);
252 #ifndef __NO_STRICT_ALIGNMENT
253 static int em_fixup_rx(struct adapter *);
254 #endif
255 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
256 struct mbuf *);
257 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
258 uint32_t *, uint32_t *);
259 static boolean_t em_tx_adv_ctx_setup(struct adapter *, struct mbuf *);
260 #if __FreeBSD_version >= 700000
261 static boolean_t em_tso_setup(struct adapter *, struct mbuf *, uint32_t *,
262 uint32_t *);
263 static boolean_t em_tso_adv_setup(struct adapter *, struct mbuf *, uint32_t *);
264 #endif /* FreeBSD_version >= 700000 */
265 static void em_set_promisc(struct adapter *);
266 static void em_disable_promisc(struct adapter *);
267 static void em_set_multi(struct adapter *);
268 static void em_print_hw_stats(struct adapter *);
269 static void em_update_link_status(struct adapter *);
270 static int em_get_buf(struct adapter *, int);
271 static void em_enable_hw_vlans(struct adapter *);
272 static int em_encap(struct adapter *, struct mbuf **);
273 static int em_adv_encap(struct adapter *, struct mbuf **);
274 static void em_smartspeed(struct adapter *);
275 static int em_82547_fifo_workaround(struct adapter *, int);
276 static void em_82547_update_fifo_head(struct adapter *, int);
277 static int em_82547_tx_fifo_reset(struct adapter *);
278 static void em_82547_move_tail(void *);
279 static int em_dma_malloc(struct adapter *, bus_size_t,
280 struct em_dma_alloc *, int);
281 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
282 static void em_print_debug_info(struct adapter *);
283 static void em_print_nvm_info(struct adapter *);
284 static int em_is_valid_ether_addr(uint8_t *);
285 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
286 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
287 static uint32_t em_fill_descriptors (bus_addr_t address, uint32_t length,
288 PDESC_ARRAY desc_array);
289 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
290 static void em_add_int_delay_sysctl(struct adapter *, const char *,
291 const char *, struct em_int_delay_info *, int, int);
292 /* Management and WOL Support */
293 static void em_init_manageability(struct adapter *);
294 static void em_release_manageability(struct adapter *);
295 static void em_get_hw_control(struct adapter *);
296 static void em_release_hw_control(struct adapter *);
297 static void em_enable_wakeup(device_t);
298
299 #ifndef EM_FAST_IRQ
300 static void em_intr(void *);
301 #ifdef DEVICE_POLLING
302 static poll_handler_t em_poll;
303 #endif /* POLLING */
304 #else /* FAST IRQ */
305 #if __FreeBSD_version < 700000
306 static void em_intr_fast(void *);
307 #else
308 static int em_intr_fast(void *);
309 #endif
310 static void em_add_rx_process_limit(struct adapter *, const char *,
311 const char *, int *, int);
312 static void em_handle_rxtx(void *context, int pending);
313 static void em_handle_link(void *context, int pending);
314 #endif /* EM_FAST_IRQ */
315
316 /*********************************************************************
317 * FreeBSD Device Interface Entry Points
318 *********************************************************************/
319
320 static device_method_t em_methods[] = {
321 /* Device interface */
322 DEVMETHOD(device_probe, em_probe),
323 DEVMETHOD(device_attach, em_attach),
324 DEVMETHOD(device_detach, em_detach),
325 DEVMETHOD(device_shutdown, em_shutdown),
326 DEVMETHOD(device_suspend, em_suspend),
327 DEVMETHOD(device_resume, em_resume),
328 {0, 0}
329 };
330
331 static driver_t em_driver = {
332 "em", em_methods, sizeof(struct adapter),
333 };
334
335 static devclass_t em_devclass;
336 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
337 MODULE_DEPEND(em, pci, 1, 1, 1);
338 MODULE_DEPEND(em, ether, 1, 1, 1);
339
340 /*********************************************************************
341 * Tunable default values.
342 *********************************************************************/
343
344 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
345 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
346 #define M_TSO_LEN 66
347
348 /* Allow common code without TSO */
349 #ifndef CSUM_TSO
350 #define CSUM_TSO 0
351 #endif
352
353 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
354 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
355 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
356 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
357 static int em_rxd = EM_DEFAULT_RXD;
358 static int em_txd = EM_DEFAULT_TXD;
359 static int em_smart_pwr_down = FALSE;
360
361 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
362 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
363 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
364 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
365 TUNABLE_INT("hw.em.rxd", &em_rxd);
366 TUNABLE_INT("hw.em.txd", &em_txd);
367 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
368 #ifdef EM_FAST_IRQ
369 /* How many packets rxeof tries to clean at a time */
370 static int em_rx_process_limit = 100;
371 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
372 #endif
373 /* Global used in WOL setup with multiport cards */
374 static int global_quad_port_a = 0;
375
376 /*********************************************************************
377 * Device identification routine
378 *
379 * em_probe determines if the driver should be loaded on
380 * adapter based on PCI vendor/device id of the adapter.
381 *
382 * return BUS_PROBE_DEFAULT on success, positive on failure
383 *********************************************************************/
384
385 static int
386 em_probe(device_t dev)
387 {
388 char adapter_name[60];
389 uint16_t pci_vendor_id = 0;
390 uint16_t pci_device_id = 0;
391 uint16_t pci_subvendor_id = 0;
392 uint16_t pci_subdevice_id = 0;
393 em_vendor_info_t *ent;
394
395 INIT_DEBUGOUT("em_probe: begin");
396
397 pci_vendor_id = pci_get_vendor(dev);
398 if (pci_vendor_id != EM_VENDOR_ID)
399 return (ENXIO);
400
401 pci_device_id = pci_get_device(dev);
402 pci_subvendor_id = pci_get_subvendor(dev);
403 pci_subdevice_id = pci_get_subdevice(dev);
404
405 ent = em_vendor_info_array;
406 while (ent->vendor_id != 0) {
407 if ((pci_vendor_id == ent->vendor_id) &&
408 (pci_device_id == ent->device_id) &&
409
410 ((pci_subvendor_id == ent->subvendor_id) ||
411 (ent->subvendor_id == PCI_ANY_ID)) &&
412
413 ((pci_subdevice_id == ent->subdevice_id) ||
414 (ent->subdevice_id == PCI_ANY_ID))) {
415 sprintf(adapter_name, "%s %s",
416 em_strings[ent->index],
417 em_driver_version);
418 device_set_desc_copy(dev, adapter_name);
419 return (BUS_PROBE_DEFAULT);
420 }
421 ent++;
422 }
423
424 return (ENXIO);
425 }
426
427 /*********************************************************************
428 * Device initialization routine
429 *
430 * The attach entry point is called when the driver is being loaded.
431 * This routine identifies the type of hardware, allocates all resources
432 * and initializes the hardware.
433 *
434 * return 0 on success, positive on failure
435 *********************************************************************/
436
437 static int
438 em_attach(device_t dev)
439 {
440 struct adapter *adapter;
441 int tsize, rsize;
442 int error = 0;
443 u16 eeprom_data, device_id;
444
445 INIT_DEBUGOUT("em_attach: begin");
446
447 adapter = device_get_softc(dev);
448 adapter->dev = adapter->osdep.dev = dev;
449 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
450 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
451
452 /* SYSCTL stuff */
453 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
454 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
455 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
456 em_sysctl_debug_info, "I", "Debug Information");
457
458 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
459 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
460 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
461 em_sysctl_stats, "I", "Statistics");
462
463 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
464 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
465
466 /* Determine hardware and mac info */
467 em_identify_hardware(adapter);
468
469 /* Setup PCI resources */
470 if (em_allocate_pci_resources(adapter)) {
471 device_printf(dev, "Allocation of PCI resources failed\n");
472 error = ENXIO;
473 goto err_pci;
474 }
475
476 /*
477 ** For ICH8 and family we need to
478 ** map the flash memory, and this
479 ** must happen after the MAC is
480 ** identified
481 */
482 if ((adapter->hw.mac.type == e1000_ich8lan) ||
483 (adapter->hw.mac.type == e1000_ich9lan)) {
484 int rid = EM_BAR_TYPE_FLASH;
485 adapter->flash_mem = bus_alloc_resource_any(dev,
486 SYS_RES_MEMORY, &rid, RF_ACTIVE);
487 /* This is used in the shared code */
488 adapter->hw.flash_address = (u8 *)adapter->flash_mem;
489 adapter->osdep.flash_bus_space_tag =
490 rman_get_bustag(adapter->flash_mem);
491 adapter->osdep.flash_bus_space_handle =
492 rman_get_bushandle(adapter->flash_mem);
493 }
494
495 /* Do Shared Code initialization */
496 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
497 device_printf(dev, "Setup of Shared code failed\n");
498 error = ENXIO;
499 goto err_pci;
500 }
501
502 e1000_get_bus_info(&adapter->hw);
503
504 /* Set up some sysctls for the tunable interrupt delays */
505 em_add_int_delay_sysctl(adapter, "rx_int_delay",
506 "receive interrupt delay in usecs", &adapter->rx_int_delay,
507 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
508 em_add_int_delay_sysctl(adapter, "tx_int_delay",
509 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
510 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
511 if (adapter->hw.mac.type >= e1000_82540) {
512 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
513 "receive interrupt delay limit in usecs",
514 &adapter->rx_abs_int_delay,
515 E1000_REGISTER(&adapter->hw, E1000_RADV),
516 em_rx_abs_int_delay_dflt);
517 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
518 "transmit interrupt delay limit in usecs",
519 &adapter->tx_abs_int_delay,
520 E1000_REGISTER(&adapter->hw, E1000_TADV),
521 em_tx_abs_int_delay_dflt);
522 }
523
524 #ifdef EM_FAST_IRQ
525 /* Sysctls for limiting the amount of work done in the taskqueue */
526 em_add_rx_process_limit(adapter, "rx_processing_limit",
527 "max number of rx packets to process", &adapter->rx_process_limit,
528 em_rx_process_limit);
529 #endif
530
531 /*
532 * Validate number of transmit and receive descriptors. It
533 * must not exceed hardware maximum, and must be multiple
534 * of E1000_DBA_ALIGN.
535 */
536 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
537 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
538 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
539 (em_txd < EM_MIN_TXD)) {
540 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
541 EM_DEFAULT_TXD, em_txd);
542 adapter->num_tx_desc = EM_DEFAULT_TXD;
543 } else
544 adapter->num_tx_desc = em_txd;
545 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
546 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
547 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
548 (em_rxd < EM_MIN_RXD)) {
549 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
550 EM_DEFAULT_RXD, em_rxd);
551 adapter->num_rx_desc = EM_DEFAULT_RXD;
552 } else
553 adapter->num_rx_desc = em_rxd;
554
555 adapter->hw.mac.autoneg = DO_AUTO_NEG;
556 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
557 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
558 adapter->rx_buffer_len = 2048;
559
560 e1000_init_script_state_82541(&adapter->hw, TRUE);
561 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
562
563 /* Copper options */
564 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
565 adapter->hw.phy.mdix = AUTO_ALL_MODES;
566 adapter->hw.phy.disable_polarity_correction = FALSE;
567 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
568 }
569
570 /*
571 * Set the frame limits assuming
572 * standard ethernet sized frames.
573 */
574 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
575 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
576
577 /*
578 * This controls when hardware reports transmit completion
579 * status.
580 */
581 adapter->hw.mac.report_tx_early = 1;
582
583 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
584 EM_DBA_ALIGN);
585
586 /* Allocate Transmit Descriptor ring */
587 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
588 device_printf(dev, "Unable to allocate tx_desc memory\n");
589 error = ENOMEM;
590 goto err_tx_desc;
591 }
592 adapter->tx_desc_base =
593 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
594
595 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
596 EM_DBA_ALIGN);
597
598 /* Allocate Receive Descriptor ring */
599 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
600 device_printf(dev, "Unable to allocate rx_desc memory\n");
601 error = ENOMEM;
602 goto err_rx_desc;
603 }
604 adapter->rx_desc_base =
605 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
606
607 /* Make sure we have a good EEPROM before we read from it */
608 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
609 /*
610 ** Some PCI-E parts fail the first check due to
611 ** the link being in sleep state, call it again,
612 ** if it fails a second time its a real issue.
613 */
614 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
615 device_printf(dev,
616 "The EEPROM Checksum Is Not Valid\n");
617 error = EIO;
618 goto err_hw_init;
619 }
620 }
621
622 /* Initialize the hardware */
623 if (em_hardware_init(adapter)) {
624 device_printf(dev, "Unable to initialize the hardware\n");
625 error = EIO;
626 goto err_hw_init;
627 }
628
629 /* Copy the permanent MAC address out of the EEPROM */
630 if (e1000_read_mac_addr(&adapter->hw) < 0) {
631 device_printf(dev, "EEPROM read error while reading MAC"
632 " address\n");
633 error = EIO;
634 goto err_hw_init;
635 }
636
637 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
638 device_printf(dev, "Invalid MAC address\n");
639 error = EIO;
640 goto err_hw_init;
641 }
642
643 /* Allocate transmit descriptors and buffers */
644 if (em_allocate_transmit_structures(adapter)) {
645 device_printf(dev, "Could not setup transmit structures\n");
646 error = ENOMEM;
647 goto err_tx_struct;
648 }
649
650 /* Allocate receive descriptors and buffers */
651 if (em_allocate_receive_structures(adapter)) {
652 device_printf(dev, "Could not setup receive structures\n");
653 error = ENOMEM;
654 goto err_rx_struct;
655 }
656
657 /* Setup OS specific network interface */
658 em_setup_interface(dev, adapter);
659
660 em_allocate_intr(adapter);
661
662 /* Initialize statistics */
663 em_update_stats_counters(adapter);
664
665 adapter->hw.mac.get_link_status = 1;
666 em_update_link_status(adapter);
667
668 /* Indicate SOL/IDER usage */
669 if (e1000_check_reset_block(&adapter->hw))
670 device_printf(dev,
671 "PHY reset is blocked due to SOL/IDER session.\n");
672
673 /* Determine if we have to control management hardware */
674 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
675
676 /*
677 * Setup Wake-on-Lan
678 */
679 switch (adapter->hw.mac.type) {
680
681 case e1000_82542:
682 case e1000_82543:
683 break;
684 case e1000_82546:
685 case e1000_82546_rev_3:
686 case e1000_82571:
687 case e1000_80003es2lan:
688 if (adapter->hw.bus.func == 1)
689 e1000_read_nvm(&adapter->hw,
690 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
691 else
692 e1000_read_nvm(&adapter->hw,
693 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
694 eeprom_data &= EM_EEPROM_APME;
695 break;
696 default:
697 /* APME bit in EEPROM is mapped to WUC.APME */
698 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
699 E1000_WUC_APME;
700 break;
701 }
702 if (eeprom_data)
703 adapter->wol = E1000_WUFC_MAG;
704 /*
705 * We have the eeprom settings, now apply the special cases
706 * where the eeprom may be wrong or the board won't support
707 * wake on lan on a particular port
708 */
709 device_id = pci_get_device(dev);
710 switch (device_id) {
711 case E1000_DEV_ID_82546GB_PCIE:
712 adapter->wol = 0;
713 break;
714 case E1000_DEV_ID_82546EB_FIBER:
715 case E1000_DEV_ID_82546GB_FIBER:
716 case E1000_DEV_ID_82571EB_FIBER:
717 /* Wake events only supported on port A for dual fiber
718 * regardless of eeprom setting */
719 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
720 E1000_STATUS_FUNC_1)
721 adapter->wol = 0;
722 break;
723 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
724 case E1000_DEV_ID_82571EB_QUAD_COPPER:
725 case E1000_DEV_ID_82571EB_QUAD_FIBER:
726 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
727 /* if quad port adapter, disable WoL on all but port A */
728 if (global_quad_port_a != 0)
729 adapter->wol = 0;
730 /* Reset for multiple quad port adapters */
731 if (++global_quad_port_a == 4)
732 global_quad_port_a = 0;
733 break;
734 }
735
736 /* Do we need workaround for 82544 PCI-X adapter? */
737 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
738 adapter->hw.mac.type == e1000_82544)
739 adapter->pcix_82544 = TRUE;
740 else
741 adapter->pcix_82544 = FALSE;
742
743 /* Tell the stack that the interface is not active */
744 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
745
746 INIT_DEBUGOUT("em_attach: end");
747
748 return (0);
749
750 err_rx_struct:
751 em_free_transmit_structures(adapter);
752 err_tx_struct:
753 err_hw_init:
754 em_release_hw_control(adapter);
755 e1000_remove_device(&adapter->hw);
756 em_dma_free(adapter, &adapter->rxdma);
757 err_rx_desc:
758 em_dma_free(adapter, &adapter->txdma);
759 err_tx_desc:
760 err_pci:
761 em_free_intr(adapter);
762 em_free_pci_resources(adapter);
763 EM_TX_LOCK_DESTROY(adapter);
764 EM_CORE_LOCK_DESTROY(adapter);
765
766 return (error);
767 }
768
769 /*********************************************************************
770 * Device removal routine
771 *
772 * The detach entry point is called when the driver is being removed.
773 * This routine stops the adapter and deallocates all the resources
774 * that were allocated for driver operation.
775 *
776 * return 0 on success, positive on failure
777 *********************************************************************/
778
779 static int
780 em_detach(device_t dev)
781 {
782 struct adapter *adapter = device_get_softc(dev);
783 struct ifnet *ifp = adapter->ifp;
784
785 INIT_DEBUGOUT("em_detach: begin");
786
787 /* Make sure VLANS are not using driver */
788 #if __FreeBSD_version >= 700000
789 if (adapter->ifp->if_vlantrunk != NULL) {
790 #else
791 if (adapter->ifp->if_nvlans != 0) {
792 #endif
793 device_printf(dev,"Vlan in use, detach first\n");
794 return (EBUSY);
795 }
796
797 #ifdef DEVICE_POLLING
798 if (ifp->if_capenable & IFCAP_POLLING)
799 ether_poll_deregister(ifp);
800 #endif
801
802 em_disable_intr(adapter);
803 em_free_intr(adapter);
804 EM_CORE_LOCK(adapter);
805 adapter->in_detach = 1;
806 em_stop(adapter);
807 e1000_phy_hw_reset(&adapter->hw);
808
809 em_release_manageability(adapter);
810
811 if (((adapter->hw.mac.type == e1000_82573) ||
812 (adapter->hw.mac.type == e1000_ich8lan) ||
813 (adapter->hw.mac.type == e1000_ich9lan)) &&
814 e1000_check_mng_mode(&adapter->hw))
815 em_release_hw_control(adapter);
816
817 if (adapter->wol) {
818 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
819 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
820 em_enable_wakeup(dev);
821 }
822
823 EM_CORE_UNLOCK(adapter);
824 ether_ifdetach(adapter->ifp);
825
826 callout_drain(&adapter->timer);
827 callout_drain(&adapter->tx_fifo_timer);
828
829 em_free_pci_resources(adapter);
830 bus_generic_detach(dev);
831 if_free(ifp);
832
833 e1000_remove_device(&adapter->hw);
834 em_free_transmit_structures(adapter);
835 em_free_receive_structures(adapter);
836
837 /* Free Transmit Descriptor ring */
838 if (adapter->tx_desc_base) {
839 em_dma_free(adapter, &adapter->txdma);
840 adapter->tx_desc_base = NULL;
841 }
842
843 /* Free Receive Descriptor ring */
844 if (adapter->rx_desc_base) {
845 em_dma_free(adapter, &adapter->rxdma);
846 adapter->rx_desc_base = NULL;
847 }
848
849 EM_TX_LOCK_DESTROY(adapter);
850 EM_CORE_LOCK_DESTROY(adapter);
851
852 return (0);
853 }
854
855 /*********************************************************************
856 *
857 * Shutdown entry point
858 *
859 **********************************************************************/
860
861 static int
862 em_shutdown(device_t dev)
863 {
864 return em_suspend(dev);
865 }
866
867 /*
868 * Suspend/resume device methods.
869 */
870 static int
871 em_suspend(device_t dev)
872 {
873 struct adapter *adapter = device_get_softc(dev);
874
875 EM_CORE_LOCK(adapter);
876 em_stop(adapter);
877
878 em_release_manageability(adapter);
879
880 if (((adapter->hw.mac.type == e1000_82573) ||
881 (adapter->hw.mac.type == e1000_ich8lan) ||
882 (adapter->hw.mac.type == e1000_ich9lan)) &&
883 e1000_check_mng_mode(&adapter->hw))
884 em_release_hw_control(adapter);
885
886 if (adapter->wol) {
887 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
888 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
889 em_enable_wakeup(dev);
890 }
891
892 EM_CORE_UNLOCK(adapter);
893
894 return bus_generic_suspend(dev);
895 }
896
897 static int
898 em_resume(device_t dev)
899 {
900 struct adapter *adapter = device_get_softc(dev);
901 struct ifnet *ifp = adapter->ifp;
902
903 EM_CORE_LOCK(adapter);
904 em_init_locked(adapter);
905 em_init_manageability(adapter);
906
907 if ((ifp->if_flags & IFF_UP) &&
908 (ifp->if_drv_flags & IFF_DRV_RUNNING))
909 em_start_locked(ifp);
910
911 EM_CORE_UNLOCK(adapter);
912
913 return bus_generic_resume(dev);
914 }
915
916
917 /*********************************************************************
918 * Transmit entry point
919 *
920 * em_start is called by the stack to initiate a transmit.
921 * The driver will remain in this routine as long as there are
922 * packets to transmit and transmit resources are available.
923 * In case resources are not available stack is notified and
924 * the packet is requeued.
925 **********************************************************************/
926
927 static void
928 em_start_locked(struct ifnet *ifp)
929 {
930 struct adapter *adapter = ifp->if_softc;
931 struct mbuf *m_head;
932
933 EM_TX_LOCK_ASSERT(adapter);
934
935 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
936 IFF_DRV_RUNNING)
937 return;
938 if (!adapter->link_active)
939 return;
940
941 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
942
943 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
944 if (m_head == NULL)
945 break;
946 /*
947 * Encapsulation can modify our pointer, and or make it
948 * NULL on failure. In that event, we can't requeue.
949 *
950 * We now use a pointer to accomodate legacy and
951 * advanced transmit functions.
952 */
953 if (adapter->em_xmit(adapter, &m_head)) {
954 if (m_head == NULL)
955 break;
956 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
957 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
958 break;
959 }
960
961 /* Send a copy of the frame to the BPF listener */
962 ETHER_BPF_MTAP(ifp, m_head);
963
964 /* Set timeout in case hardware has problems transmitting. */
965 adapter->watchdog_timer = EM_TX_TIMEOUT;
966 }
967 }
968
969 static void
970 em_start(struct ifnet *ifp)
971 {
972 struct adapter *adapter = ifp->if_softc;
973
974 EM_TX_LOCK(adapter);
975 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
976 em_start_locked(ifp);
977 EM_TX_UNLOCK(adapter);
978 }
979
980 /*********************************************************************
981 * Ioctl entry point
982 *
983 * em_ioctl is called when the user wants to configure the
984 * interface.
985 *
986 * return 0 on success, positive on failure
987 **********************************************************************/
988
989 static int
990 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
991 {
992 struct adapter *adapter = ifp->if_softc;
993 struct ifreq *ifr = (struct ifreq *)data;
994 struct ifaddr *ifa = (struct ifaddr *)data;
995 int error = 0;
996
997 if (adapter->in_detach)
998 return (error);
999
1000 switch (command) {
1001 case SIOCSIFADDR:
1002 if (ifa->ifa_addr->sa_family == AF_INET) {
1003 /*
1004 * XXX
1005 * Since resetting hardware takes a very long time
1006 * and results in link renegotiation we only
1007 * initialize the hardware only when it is absolutely
1008 * required.
1009 */
1010 ifp->if_flags |= IFF_UP;
1011 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1012 EM_CORE_LOCK(adapter);
1013 em_init_locked(adapter);
1014 EM_CORE_UNLOCK(adapter);
1015 }
1016 arp_ifinit(ifp, ifa);
1017 } else
1018 error = ether_ioctl(ifp, command, data);
1019 break;
1020 case SIOCSIFMTU:
1021 {
1022 int max_frame_size;
1023 uint16_t eeprom_data = 0;
1024
1025 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1026
1027 EM_CORE_LOCK(adapter);
1028 switch (adapter->hw.mac.type) {
1029 case e1000_82573:
1030 /*
1031 * 82573 only supports jumbo frames
1032 * if ASPM is disabled.
1033 */
1034 e1000_read_nvm(&adapter->hw,
1035 NVM_INIT_3GIO_3, 1, &eeprom_data);
1036 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1037 max_frame_size = ETHER_MAX_LEN;
1038 break;
1039 }
1040 /* Allow Jumbo frames - fall thru */
1041 case e1000_82571:
1042 case e1000_82572:
1043 case e1000_ich9lan:
1044 case e1000_82575:
1045 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1046 max_frame_size = 9234;
1047 break;
1048 /* Adapters that do not support jumbo frames */
1049 case e1000_82542:
1050 case e1000_ich8lan:
1051 max_frame_size = ETHER_MAX_LEN;
1052 break;
1053 default:
1054 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1055 }
1056 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1057 ETHER_CRC_LEN) {
1058 EM_CORE_UNLOCK(adapter);
1059 error = EINVAL;
1060 break;
1061 }
1062
1063 ifp->if_mtu = ifr->ifr_mtu;
1064 adapter->max_frame_size =
1065 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1066 em_init_locked(adapter);
1067 EM_CORE_UNLOCK(adapter);
1068 break;
1069 }
1070 case SIOCSIFFLAGS:
1071 IOCTL_DEBUGOUT("ioctl rcv'd:\
1072 SIOCSIFFLAGS (Set Interface Flags)");
1073 EM_CORE_LOCK(adapter);
1074 if (ifp->if_flags & IFF_UP) {
1075 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1076 if ((ifp->if_flags ^ adapter->if_flags) &
1077 IFF_PROMISC) {
1078 em_disable_promisc(adapter);
1079 em_set_promisc(adapter);
1080 }
1081 } else
1082 em_init_locked(adapter);
1083 } else
1084 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1085 em_stop(adapter);
1086 adapter->if_flags = ifp->if_flags;
1087 EM_CORE_UNLOCK(adapter);
1088 break;
1089 case SIOCADDMULTI:
1090 case SIOCDELMULTI:
1091 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1092 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1093 EM_CORE_LOCK(adapter);
1094 em_disable_intr(adapter);
1095 em_set_multi(adapter);
1096 if (adapter->hw.mac.type == e1000_82542 &&
1097 adapter->hw.revision_id == E1000_REVISION_2) {
1098 em_initialize_receive_unit(adapter);
1099 }
1100 #ifdef DEVICE_POLLING
1101 if (!(ifp->if_capenable & IFCAP_POLLING))
1102 #endif
1103 em_enable_intr(adapter);
1104 EM_CORE_UNLOCK(adapter);
1105 }
1106 break;
1107 case SIOCSIFMEDIA:
1108 /* Check SOL/IDER usage */
1109 EM_CORE_LOCK(adapter);
1110 if (e1000_check_reset_block(&adapter->hw)) {
1111 EM_CORE_UNLOCK(adapter);
1112 device_printf(adapter->dev, "Media change is"
1113 " blocked due to SOL/IDER session.\n");
1114 break;
1115 }
1116 EM_CORE_UNLOCK(adapter);
1117 case SIOCGIFMEDIA:
1118 IOCTL_DEBUGOUT("ioctl rcv'd: \
1119 SIOCxIFMEDIA (Get/Set Interface Media)");
1120 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1121 break;
1122 case SIOCSIFCAP:
1123 {
1124 int mask, reinit;
1125
1126 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1127 reinit = 0;
1128 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1129 #ifdef DEVICE_POLLING
1130 if (mask & IFCAP_POLLING) {
1131 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1132 error = ether_poll_register(em_poll, ifp);
1133 if (error)
1134 return (error);
1135 EM_CORE_LOCK(adapter);
1136 em_disable_intr(adapter);
1137 ifp->if_capenable |= IFCAP_POLLING;
1138 EM_CORE_UNLOCK(adapter);
1139 } else {
1140 error = ether_poll_deregister(ifp);
1141 /* Enable interrupt even in error case */
1142 EM_CORE_LOCK(adapter);
1143 em_enable_intr(adapter);
1144 ifp->if_capenable &= ~IFCAP_POLLING;
1145 EM_CORE_UNLOCK(adapter);
1146 }
1147 }
1148 #endif
1149 if (mask & IFCAP_HWCSUM) {
1150 ifp->if_capenable ^= IFCAP_HWCSUM;
1151 reinit = 1;
1152 }
1153 #if __FreeBSD_version >= 700000
1154 if (mask & IFCAP_TSO4) {
1155 ifp->if_capenable ^= IFCAP_TSO4;
1156 reinit = 1;
1157 }
1158 #endif
1159 if (mask & IFCAP_VLAN_HWTAGGING) {
1160 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1161 reinit = 1;
1162 }
1163 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1164 em_init(adapter);
1165 #if __FreeBSD_version >= 700000
1166 VLAN_CAPABILITIES(ifp);
1167 #endif
1168 break;
1169 }
1170 default:
1171 error = ether_ioctl(ifp, command, data);
1172 break;
1173 }
1174
1175 return (error);
1176 }
1177
1178 /*********************************************************************
1179 * Watchdog timer:
1180 *
1181 * This routine is called from the local timer every second.
1182 * As long as transmit descriptors are being cleaned the value
1183 * is non-zero and we do nothing. Reaching 0 indicates a tx hang
1184 * and we then reset the device.
1185 *
1186 **********************************************************************/
1187
1188 static void
1189 em_watchdog(struct adapter *adapter)
1190 {
1191 #ifdef EM_FAST_IRQ
1192 struct task *t = &adapter->rxtx_task;
1193 #endif
1194
1195 EM_CORE_LOCK_ASSERT(adapter);
1196
1197 /*
1198 ** The timer is set to 10 every time start queues a packet.
1199 ** Then txeof keeps resetting to 10 as long as it cleans at
1200 ** least one descriptor.
1201 ** Finally, anytime all descriptors are clean the timer is
1202 ** set to 0.
1203 */
1204 #ifndef EM_FAST_IRQ
1205 if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer))
1206 #else /* FAST_IRQ */
1207 if (adapter->watchdog_timer == 0)
1208 #endif
1209 return;
1210
1211 #ifdef EM_FAST_IRQ
1212 /*
1213 * Force a clean if things seem sluggish, this
1214 * is a 6.3 scheduler workaround.
1215 */
1216 if ((--adapter->watchdog_timer != 0) && (t->ta_pending == 0)) {
1217 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1218 return;
1219 }
1220 #endif
1221
1222 /* If we are in this routine because of pause frames, then
1223 * don't reset the hardware.
1224 */
1225 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1226 E1000_STATUS_TXOFF) {
1227 adapter->watchdog_timer = EM_TX_TIMEOUT;
1228 return;
1229 }
1230
1231 if (e1000_check_for_link(&adapter->hw) == 0)
1232 device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1233 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1234 adapter->watchdog_events++;
1235
1236 em_init_locked(adapter);
1237 }
1238
1239 /*********************************************************************
1240 * Init entry point
1241 *
1242 * This routine is used in two ways. It is used by the stack as
1243 * init entry point in network interface structure. It is also used
1244 * by the driver as a hw/sw initialization routine to get to a
1245 * consistent state.
1246 *
1247 * return 0 on success, positive on failure
1248 **********************************************************************/
1249
1250 static void
1251 em_init_locked(struct adapter *adapter)
1252 {
1253 struct ifnet *ifp = adapter->ifp;
1254 device_t dev = adapter->dev;
1255 uint32_t pba;
1256
1257 INIT_DEBUGOUT("em_init: begin");
1258
1259 EM_CORE_LOCK_ASSERT(adapter);
1260
1261 em_stop(adapter);
1262
1263 /*
1264 * Packet Buffer Allocation (PBA)
1265 * Writing PBA sets the receive portion of the buffer
1266 * the remainder is used for the transmit buffer.
1267 *
1268 * Devices before the 82547 had a Packet Buffer of 64K.
1269 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1270 * After the 82547 the buffer was reduced to 40K.
1271 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1272 * Note: default does not leave enough room for Jumbo Frame >10k.
1273 */
1274 switch (adapter->hw.mac.type) {
1275 case e1000_82547:
1276 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1277 if (adapter->max_frame_size > 8192)
1278 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1279 else
1280 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1281 adapter->tx_fifo_head = 0;
1282 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1283 adapter->tx_fifo_size =
1284 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1285 break;
1286 /* Total Packet Buffer on these is 48K */
1287 case e1000_82571:
1288 case e1000_82572:
1289 case e1000_82575:
1290 case e1000_80003es2lan:
1291 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1292 break;
1293 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1294 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1295 break;
1296 case e1000_ich9lan:
1297 #define E1000_PBA_10K 0x000A
1298 pba = E1000_PBA_10K;
1299 break;
1300 case e1000_ich8lan:
1301 pba = E1000_PBA_8K;
1302 break;
1303 default:
1304 /* Devices before 82547 had a Packet Buffer of 64K. */
1305 if (adapter->max_frame_size > 8192)
1306 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1307 else
1308 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1309 }
1310
1311 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1312 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1313
1314 /* Get the latest mac address, User can use a LAA */
1315 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1316 ETHER_ADDR_LEN);
1317
1318 /* Put the address into the Receive Address Array */
1319 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1320
1321 /*
1322 * With the 82571 adapter, RAR[0] may be overwritten
1323 * when the other port is reset, we make a duplicate
1324 * in RAR[14] for that eventuality, this assures
1325 * the interface continues to function.
1326 */
1327 if (adapter->hw.mac.type == e1000_82571) {
1328 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1329 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1330 E1000_RAR_ENTRIES - 1);
1331 }
1332
1333 /* Initialize the hardware */
1334 if (em_hardware_init(adapter)) {
1335 device_printf(dev, "Unable to initialize the hardware\n");
1336 return;
1337 }
1338 em_update_link_status(adapter);
1339
1340 /* Setup VLAN support, basic and offload if available */
1341 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1342 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1343 em_enable_hw_vlans(adapter);
1344
1345 /* Set hardware offload abilities */
1346 ifp->if_hwassist = 0;
1347 if (adapter->hw.mac.type >= e1000_82543) {
1348 if (ifp->if_capenable & IFCAP_TXCSUM)
1349 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1350 #if __FreeBSD_version >= 700000
1351 if (ifp->if_capenable & IFCAP_TSO4)
1352 ifp->if_hwassist |= CSUM_TSO;
1353 #endif
1354 }
1355
1356 /* Configure for OS presence */
1357 em_init_manageability(adapter);
1358
1359 /* Prepare transmit descriptors and buffers */
1360 em_setup_transmit_structures(adapter);
1361 em_initialize_transmit_unit(adapter);
1362
1363 /* Setup Multicast table */
1364 em_set_multi(adapter);
1365
1366 /* Prepare receive descriptors and buffers */
1367 if (em_setup_receive_structures(adapter)) {
1368 device_printf(dev, "Could not setup receive structures\n");
1369 em_stop(adapter);
1370 return;
1371 }
1372 em_initialize_receive_unit(adapter);
1373
1374 /* Don't lose promiscuous settings */
1375 em_set_promisc(adapter);
1376
1377 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1378 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1379
1380 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1381 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1382
1383 #ifdef DEVICE_POLLING
1384 /*
1385 * Only enable interrupts if we are not polling, make sure
1386 * they are off otherwise.
1387 */
1388 if (ifp->if_capenable & IFCAP_POLLING)
1389 em_disable_intr(adapter);
1390 else
1391 #endif /* DEVICE_POLLING */
1392 em_enable_intr(adapter);
1393
1394 /* Don't reset the phy next time init gets called */
1395 adapter->hw.phy.reset_disable = TRUE;
1396 }
1397
1398 static void
1399 em_init(void *arg)
1400 {
1401 struct adapter *adapter = arg;
1402
1403 EM_CORE_LOCK(adapter);
1404 em_init_locked(adapter);
1405 EM_CORE_UNLOCK(adapter);
1406 }
1407
1408
1409 #ifdef DEVICE_POLLING
1410 /*********************************************************************
1411 *
1412 * Legacy polling routine
1413 *
1414 *********************************************************************/
1415 static void
1416 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1417 {
1418 struct adapter *adapter = ifp->if_softc;
1419 uint32_t reg_icr;
1420
1421 EM_CORE_LOCK(adapter);
1422 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1423 EM_CORE_UNLOCK(adapter);
1424 return;
1425 }
1426
1427 if (cmd == POLL_AND_CHECK_STATUS) {
1428 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1429 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1430 callout_stop(&adapter->timer);
1431 adapter->hw.mac.get_link_status = 1;
1432 e1000_check_for_link(&adapter->hw);
1433 em_update_link_status(adapter);
1434 callout_reset(&adapter->timer, hz,
1435 em_local_timer, adapter);
1436 }
1437 }
1438 em_rxeof(adapter, count);
1439 EM_CORE_UNLOCK(adapter);
1440
1441 EM_TX_LOCK(adapter);
1442 em_txeof(adapter);
1443
1444 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1445 em_start_locked(ifp);
1446 EM_TX_UNLOCK(adapter);
1447 }
1448 #endif /* DEVICE_POLLING */
1449
1450 #ifndef EM_FAST_IRQ
1451 /*********************************************************************
1452 *
1453 * Legacy Interrupt Service routine
1454 *
1455 *********************************************************************/
1456
1457 static void
1458 em_intr(void *arg)
1459 {
1460 struct adapter *adapter = arg;
1461 struct ifnet *ifp;
1462 uint32_t reg_icr;
1463
1464 EM_CORE_LOCK(adapter);
1465 ifp = adapter->ifp;
1466
1467 if (ifp->if_capenable & IFCAP_POLLING) {
1468 EM_CORE_UNLOCK(adapter);
1469 return;
1470 }
1471
1472 for (;;) {
1473 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1474
1475 if (adapter->hw.mac.type >= e1000_82571 &&
1476 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1477 break;
1478 else if (reg_icr == 0)
1479 break;
1480
1481 /*
1482 * XXX: some laptops trigger several spurious interrupts
1483 * on em(4) when in the resume cycle. The ICR register
1484 * reports all-ones value in this case. Processing such
1485 * interrupts would lead to a freeze. I don't know why.
1486 */
1487 if (reg_icr == 0xffffffff)
1488 break;
1489
1490 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1491 em_rxeof(adapter, -1);
1492 EM_TX_LOCK(adapter);
1493 em_txeof(adapter);
1494 EM_TX_UNLOCK(adapter);
1495 }
1496
1497 /* Link status change */
1498 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1499 callout_stop(&adapter->timer);
1500 adapter->hw.mac.get_link_status = 1;
1501 e1000_check_for_link(&adapter->hw);
1502 em_update_link_status(adapter);
1503 /* Deal with TX cruft when link lost */
1504 em_tx_purge(adapter);
1505 callout_reset(&adapter->timer, hz,
1506 em_local_timer, adapter);
1507 }
1508
1509 if (reg_icr & E1000_ICR_RXO)
1510 adapter->rx_overruns++;
1511 }
1512 EM_CORE_UNLOCK(adapter);
1513
1514 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1515 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1516 em_start(ifp);
1517 }
1518
1519 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1520
1521 static void
1522 em_handle_link(void *context, int pending)
1523 {
1524 struct adapter *adapter = context;
1525 struct ifnet *ifp;
1526
1527 ifp = adapter->ifp;
1528
1529 EM_CORE_LOCK(adapter);
1530 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1531 EM_CORE_UNLOCK(adapter);
1532 return;
1533 }
1534
1535 callout_stop(&adapter->timer);
1536 adapter->hw.mac.get_link_status = 1;
1537 e1000_check_for_link(&adapter->hw);
1538 em_update_link_status(adapter);
1539 /* Deal with TX cruft when link lost */
1540 em_tx_purge(adapter);
1541 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1542 EM_CORE_UNLOCK(adapter);
1543 }
1544
1545 #if __FreeBSD_version >= 700000
1546 #if !defined(NET_LOCK_GIANT)
1547 #define NET_LOCK_GIANT()
1548 #define NET_UNLOCK_GIANT()
1549 #endif
1550 #endif
1551
1552 static void
1553 em_handle_rxtx(void *context, int pending)
1554 {
1555 struct adapter *adapter = context;
1556 struct ifnet *ifp;
1557
1558 NET_LOCK_GIANT();
1559 ifp = adapter->ifp;
1560
1561 /*
1562 * TODO:
1563 * It should be possible to run the tx clean loop without the lock.
1564 */
1565 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1566 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1567 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1568 EM_TX_LOCK(adapter);
1569 em_txeof(adapter);
1570
1571 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1572 em_start_locked(ifp);
1573 EM_TX_UNLOCK(adapter);
1574 }
1575
1576 em_enable_intr(adapter);
1577 NET_UNLOCK_GIANT();
1578 }
1579
1580 /*********************************************************************
1581 *
1582 * Fast Interrupt Service routine
1583 *
1584 *********************************************************************/
1585 #if __FreeBSD_version < 700000
1586 #define FILTER_STRAY
1587 #define FILTER_HANDLED
1588 static void
1589 #else
1590 static int
1591 #endif
1592 em_intr_fast(void *arg)
1593 {
1594 struct adapter *adapter = arg;
1595 struct ifnet *ifp;
1596 uint32_t reg_icr;
1597
1598 ifp = adapter->ifp;
1599
1600 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1601
1602 /* Hot eject? */
1603 if (reg_icr == 0xffffffff)
1604 return FILTER_STRAY;
1605
1606 /* Definitely not our interrupt. */
1607 if (reg_icr == 0x0)
1608 return FILTER_STRAY;
1609
1610 /*
1611 * Starting with the 82571 chip, bit 31 should be used to
1612 * determine whether the interrupt belongs to us.
1613 */
1614 if (adapter->hw.mac.type >= e1000_82571 &&
1615 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1616 return FILTER_STRAY;
1617
1618 /*
1619 * Mask interrupts until the taskqueue is finished running. This is
1620 * cheap, just assume that it is needed. This also works around the
1621 * MSI message reordering errata on certain systems.
1622 */
1623 em_disable_intr(adapter);
1624 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1625
1626 /* Link status change */
1627 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1628 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1629
1630 if (reg_icr & E1000_ICR_RXO)
1631 adapter->rx_overruns++;
1632 return FILTER_HANDLED;
1633 }
1634 #endif /* EM_FAST_IRQ */
1635
1636 /*********************************************************************
1637 *
1638 * Media Ioctl callback
1639 *
1640 * This routine is called whenever the user queries the status of
1641 * the interface using ifconfig.
1642 *
1643 **********************************************************************/
1644 static void
1645 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1646 {
1647 struct adapter *adapter = ifp->if_softc;
1648 u_char fiber_type = IFM_1000_SX;
1649
1650 INIT_DEBUGOUT("em_media_status: begin");
1651
1652 EM_CORE_LOCK(adapter);
1653 e1000_check_for_link(&adapter->hw);
1654 em_update_link_status(adapter);
1655
1656 ifmr->ifm_status = IFM_AVALID;
1657 ifmr->ifm_active = IFM_ETHER;
1658
1659 if (!adapter->link_active) {
1660 EM_CORE_UNLOCK(adapter);
1661 return;
1662 }
1663
1664 ifmr->ifm_status |= IFM_ACTIVE;
1665
1666 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1667 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1668 if (adapter->hw.mac.type == e1000_82545)
1669 fiber_type = IFM_1000_LX;
1670 ifmr->ifm_active |= fiber_type | IFM_FDX;
1671 } else {
1672 switch (adapter->link_speed) {
1673 case 10:
1674 ifmr->ifm_active |= IFM_10_T;
1675 break;
1676 case 100:
1677 ifmr->ifm_active |= IFM_100_TX;
1678 break;
1679 case 1000:
1680 ifmr->ifm_active |= IFM_1000_T;
1681 break;
1682 }
1683 if (adapter->link_duplex == FULL_DUPLEX)
1684 ifmr->ifm_active |= IFM_FDX;
1685 else
1686 ifmr->ifm_active |= IFM_HDX;
1687 }
1688 EM_CORE_UNLOCK(adapter);
1689 }
1690
1691 /*********************************************************************
1692 *
1693 * Media Ioctl callback
1694 *
1695 * This routine is called when the user changes speed/duplex using
1696 * media/mediopt option with ifconfig.
1697 *
1698 **********************************************************************/
1699 static int
1700 em_media_change(struct ifnet *ifp)
1701 {
1702 struct adapter *adapter = ifp->if_softc;
1703 struct ifmedia *ifm = &adapter->media;
1704
1705 INIT_DEBUGOUT("em_media_change: begin");
1706
1707 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1708 return (EINVAL);
1709
1710 EM_CORE_LOCK(adapter);
1711 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1712 case IFM_AUTO:
1713 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1714 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1715 break;
1716 case IFM_1000_LX:
1717 case IFM_1000_SX:
1718 case IFM_1000_T:
1719 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1720 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1721 break;
1722 case IFM_100_TX:
1723 adapter->hw.mac.autoneg = FALSE;
1724 adapter->hw.phy.autoneg_advertised = 0;
1725 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1726 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1727 else
1728 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1729 break;
1730 case IFM_10_T:
1731 adapter->hw.mac.autoneg = FALSE;
1732 adapter->hw.phy.autoneg_advertised = 0;
1733 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1734 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1735 else
1736 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1737 break;
1738 default:
1739 device_printf(adapter->dev, "Unsupported media type\n");
1740 }
1741
1742 /* As the speed/duplex settings my have changed we need to
1743 * reset the PHY.
1744 */
1745 adapter->hw.phy.reset_disable = FALSE;
1746
1747 em_init_locked(adapter);
1748 EM_CORE_UNLOCK(adapter);
1749
1750 return (0);
1751 }
1752
1753 /*********************************************************************
1754 *
1755 * This routine maps the mbufs to tx descriptors.
1756 *
1757 * return 0 on success, positive on failure
1758 **********************************************************************/
1759
1760 static int
1761 em_encap(struct adapter *adapter, struct mbuf **m_headp)
1762 {
1763 bus_dma_segment_t segs[EM_MAX_SCATTER];
1764 bus_dmamap_t map;
1765 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1766 struct e1000_tx_desc *ctxd = NULL;
1767 struct mbuf *m_head;
1768 uint32_t txd_upper, txd_lower, txd_used, txd_saved;
1769 int nsegs, i, j, first, last = 0;
1770 int error, do_tso, tso_desc = 0;
1771 #if __FreeBSD_version < 700000
1772 struct m_tag *mtag;
1773 #endif
1774 m_head = *m_headp;
1775 txd_upper = txd_lower = txd_used = txd_saved = 0;
1776
1777 #if __FreeBSD_version >= 700000
1778 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1779 #else
1780 do_tso = 0;
1781 #endif
1782
1783 /*
1784 * Force a cleanup if number of TX descriptors
1785 * available hits the threshold
1786 */
1787 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1788 em_txeof(adapter);
1789 /* Now do we at least have a minimal? */
1790 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1791 adapter->no_tx_desc_avail1++;
1792 return (ENOBUFS);
1793 }
1794 }
1795
1796
1797 /*
1798 * TSO workaround:
1799 * If an mbuf is only header we need
1800 * to pull 4 bytes of data into it.
1801 */
1802 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
1803 m_head = m_pullup(m_head, M_TSO_LEN + 4);
1804 *m_headp = m_head;
1805 if (m_head == NULL)
1806 return (ENOBUFS);
1807 }
1808
1809 /*
1810 * Map the packet for DMA
1811 *
1812 * Capture the first descriptor index,
1813 * this descriptor will have the index
1814 * of the EOP which is the only one that
1815 * now gets a DONE bit writeback.
1816 */
1817 first = adapter->next_avail_tx_desc;
1818 tx_buffer = &adapter->tx_buffer_area[first];
1819 tx_buffer_mapped = tx_buffer;
1820 map = tx_buffer->map;
1821
1822 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1823 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1824
1825 /*
1826 * There are two types of errors we can (try) to handle:
1827 * - EFBIG means the mbuf chain was too long and bus_dma ran
1828 * out of segments. Defragment the mbuf chain and try again.
1829 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1830 * at this point in time. Defer sending and try again later.
1831 * All other errors, in particular EINVAL, are fatal and prevent the
1832 * mbuf chain from ever going through. Drop it and report error.
1833 */
1834 if (error == EFBIG) {
1835 struct mbuf *m;
1836
1837 m = m_defrag(*m_headp, M_DONTWAIT);
1838 if (m == NULL) {
1839 adapter->mbuf_alloc_failed++;
1840 m_freem(*m_headp);
1841 *m_headp = NULL;
1842 return (ENOBUFS);
1843 }
1844 *m_headp = m;
1845
1846 /* Try it again */
1847 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1848 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1849
1850 if (error == ENOMEM) {
1851 adapter->no_tx_dma_setup++;
1852 return (error);
1853 } else if (error != 0) {
1854 adapter->no_tx_dma_setup++;
1855 m_freem(*m_headp);
1856 *m_headp = NULL;
1857 return (error);
1858 }
1859 } else if (error == ENOMEM) {
1860 adapter->no_tx_dma_setup++;
1861 return (error);
1862 } else if (error != 0) {
1863 adapter->no_tx_dma_setup++;
1864 m_freem(*m_headp);
1865 *m_headp = NULL;
1866 return (error);
1867 }
1868
1869 /*
1870 * TSO Hardware workaround, if this packet is not
1871 * TSO, and is only a single descriptor long, and
1872 * it follows a TSO burst, then we need to add a
1873 * sentinel descriptor to prevent premature writeback.
1874 */
1875 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
1876 if (nsegs == 1)
1877 tso_desc = TRUE;
1878 adapter->tx_tso = FALSE;
1879 }
1880
1881 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1882 adapter->no_tx_desc_avail2++;
1883 bus_dmamap_unload(adapter->txtag, map);
1884 return (ENOBUFS);
1885 }
1886 m_head = *m_headp;
1887
1888 /* Do hardware assists */
1889 #if __FreeBSD_version >= 700000
1890 if (em_tso_setup(adapter, m_head, &txd_upper, &txd_lower))
1891 /* we need to make a final sentinel transmit desc */
1892 tso_desc = TRUE;
1893 else
1894 #endif
1895 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1896 em_transmit_checksum_setup(adapter, m_head,
1897 &txd_upper, &txd_lower);
1898
1899 i = adapter->next_avail_tx_desc;
1900 if (adapter->pcix_82544)
1901 txd_saved = i;
1902
1903 /* Set up our transmit descriptors */
1904 for (j = 0; j < nsegs; j++) {
1905 bus_size_t seg_len;
1906 bus_addr_t seg_addr;
1907 /* If adapter is 82544 and on PCIX bus */
1908 if(adapter->pcix_82544) {
1909 DESC_ARRAY desc_array;
1910 uint32_t array_elements, counter;
1911 /*
1912 * Check the Address and Length combination and
1913 * split the data accordingly
1914 */
1915 array_elements = em_fill_descriptors(segs[j].ds_addr,
1916 segs[j].ds_len, &desc_array);
1917 for (counter = 0; counter < array_elements; counter++) {
1918 if (txd_used == adapter->num_tx_desc_avail) {
1919 adapter->next_avail_tx_desc = txd_saved;
1920 adapter->no_tx_desc_avail2++;
1921 bus_dmamap_unload(adapter->txtag, map);
1922 return (ENOBUFS);
1923 }
1924 tx_buffer = &adapter->tx_buffer_area[i];
1925 ctxd = &adapter->tx_desc_base[i];
1926 ctxd->buffer_addr = htole64(
1927 desc_array.descriptor[counter].address);
1928 ctxd->lower.data = htole32(
1929 (adapter->txd_cmd | txd_lower | (uint16_t)
1930 desc_array.descriptor[counter].length));
1931 ctxd->upper.data =
1932 htole32((txd_upper));
1933 last = i;
1934 if (++i == adapter->num_tx_desc)
1935 i = 0;
1936 tx_buffer->m_head = NULL;
1937 tx_buffer->next_eop = -1;
1938 txd_used++;
1939 }
1940 } else {
1941 tx_buffer = &adapter->tx_buffer_area[i];
1942 ctxd = &adapter->tx_desc_base[i];
1943 seg_addr = segs[j].ds_addr;
1944 seg_len = segs[j].ds_len;
1945 /*
1946 ** TSO Workaround:
1947 ** If this is the last descriptor, we want to
1948 ** split it so we have a small final sentinel
1949 */
1950 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
1951 seg_len -= 4;
1952 ctxd->buffer_addr = htole64(seg_addr);
1953 ctxd->lower.data = htole32(
1954 adapter->txd_cmd | txd_lower | seg_len);
1955 ctxd->upper.data =
1956 htole32(txd_upper);
1957 if (++i == adapter->num_tx_desc)
1958 i = 0;
1959 /* Now make the sentinel */
1960 ++txd_used; /* using an extra txd */
1961 ctxd = &adapter->tx_desc_base[i];
1962 tx_buffer = &adapter->tx_buffer_area[i];
1963 ctxd->buffer_addr =
1964 htole64(seg_addr + seg_len);
1965 ctxd->lower.data = htole32(
1966 adapter->txd_cmd | txd_lower | 4);
1967 ctxd->upper.data =
1968 htole32(txd_upper);
1969 last = i;
1970 if (++i == adapter->num_tx_desc)
1971 i = 0;
1972 } else {
1973 ctxd->buffer_addr = htole64(seg_addr);
1974 ctxd->lower.data = htole32(
1975 adapter->txd_cmd | txd_lower | seg_len);
1976 ctxd->upper.data =
1977 htole32(txd_upper);
1978 last = i;
1979 if (++i == adapter->num_tx_desc)
1980 i = 0;
1981 }
1982 tx_buffer->m_head = NULL;
1983 tx_buffer->next_eop = -1;
1984 }
1985 }
1986
1987 adapter->next_avail_tx_desc = i;
1988 if (adapter->pcix_82544)
1989 adapter->num_tx_desc_avail -= txd_used;
1990 else {
1991 adapter->num_tx_desc_avail -= nsegs;
1992 if (tso_desc) /* TSO used an extra for sentinel */
1993 adapter->num_tx_desc_avail -= txd_used;
1994 }
1995
1996 /*
1997 ** Handle VLAN tag, this is the
1998 ** biggest difference between
1999 ** 6.x and 7
2000 */
2001 #if __FreeBSD_version < 700000
2002 /* Find out if we are in vlan mode. */
2003 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2004 if (mtag != NULL) {
2005 ctxd->upper.fields.special =
2006 htole16(VLAN_TAG_VALUE(mtag));
2007 #else /* FreeBSD 7 */
2008 if (m_head->m_flags & M_VLANTAG) {
2009 /* Set the vlan id. */
2010 ctxd->upper.fields.special =
2011 htole16(m_head->m_pkthdr.ether_vtag);
2012 #endif
2013 /* Tell hardware to add tag */
2014 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2015 }
2016
2017 tx_buffer->m_head = m_head;
2018 tx_buffer_mapped->map = tx_buffer->map;
2019 tx_buffer->map = map;
2020 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2021
2022 /*
2023 * Last Descriptor of Packet
2024 * needs End Of Packet (EOP)
2025 * and Report Status (RS)
2026 */
2027 ctxd->lower.data |=
2028 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2029 /*
2030 * Keep track in the first buffer which
2031 * descriptor will be written back
2032 */
2033 tx_buffer = &adapter->tx_buffer_area[first];
2034 tx_buffer->next_eop = last;
2035
2036 /*
2037 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2038 * that this frame is available to transmit.
2039 */
2040 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2041 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2042 if (adapter->hw.mac.type == e1000_82547 &&
2043 adapter->link_duplex == HALF_DUPLEX)
2044 em_82547_move_tail(adapter);
2045 else {
2046 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2047 if (adapter->hw.mac.type == e1000_82547)
2048 em_82547_update_fifo_head(adapter,
2049 m_head->m_pkthdr.len);
2050 }
2051
2052 return (0);
2053 }
2054
2055 /*********************************************************************
2056 *
2057 * This routine maps the mbufs to Advanced TX descriptors.
2058 * used by the 82575 adapter. It also needs no workarounds.
2059 *
2060 **********************************************************************/
2061
2062 static int
2063 em_adv_encap(struct adapter *adapter, struct mbuf **m_headp)
2064 {
2065 bus_dma_segment_t segs[EM_MAX_SCATTER];
2066 bus_dmamap_t map;
2067 struct em_buffer *tx_buffer, *tx_buffer_mapped;
2068 union e1000_adv_tx_desc *txd = NULL;
2069 struct mbuf *m_head;
2070 u32 olinfo_status = 0, cmd_type_len = 0;
2071 int nsegs, i, j, error, first, last = 0;
2072 #if __FreeBSD_version < 700000
2073 struct m_tag *mtag;
2074 #else
2075 u32 paylen = 0;
2076 #endif
2077
2078 m_head = *m_headp;
2079
2080
2081 /* Set basic descriptor constants */
2082 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
2083 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
2084 #if __FreeBSD_version < 700000
2085 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2086 if (mtag != NULL)
2087 #else
2088 if (m_head->m_flags & M_VLANTAG)
2089 #endif
2090 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2091
2092 /*
2093 * Force a cleanup if number of TX descriptors
2094 * available hits the threshold
2095 */
2096 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
2097 em_txeof(adapter);
2098 /* Now do we at least have a minimal? */
2099 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2100 adapter->no_tx_desc_avail1++;
2101 return (ENOBUFS);
2102 }
2103 }
2104
2105 /*
2106 * Map the packet for DMA.
2107 *
2108 * Capture the first descriptor index,
2109 * this descriptor will have the index
2110 * of the EOP which is the only one that
2111 * now gets a DONE bit writeback.
2112 */
2113 first = adapter->next_avail_tx_desc;
2114 tx_buffer = &adapter->tx_buffer_area[first];
2115 tx_buffer_mapped = tx_buffer;
2116 map = tx_buffer->map;
2117
2118 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2119 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2120
2121 if (error == EFBIG) {
2122 struct mbuf *m;
2123
2124 m = m_defrag(*m_headp, M_DONTWAIT);
2125 if (m == NULL) {
2126 adapter->mbuf_alloc_failed++;
2127 m_freem(*m_headp);
2128 *m_headp = NULL;
2129 return (ENOBUFS);
2130 }
2131 *m_headp = m;
2132
2133 /* Try it again */
2134 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2135 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2136
2137 if (error == ENOMEM) {
2138 adapter->no_tx_dma_setup++;
2139 return (error);
2140 } else if (error != 0) {
2141 adapter->no_tx_dma_setup++;
2142 m_freem(*m_headp);
2143 *m_headp = NULL;
2144 return (error);
2145 }
2146 } else if (error == ENOMEM) {
2147 adapter->no_tx_dma_setup++;
2148 return (error);
2149 } else if (error != 0) {
2150 adapter->no_tx_dma_setup++;
2151 m_freem(*m_headp);
2152 *m_headp = NULL;
2153 return (error);
2154 }
2155
2156 /* Check again to be sure we have enough descriptors */
2157 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2158 adapter->no_tx_desc_avail2++;
2159 bus_dmamap_unload(adapter->txtag, map);
2160 return (ENOBUFS);
2161 }
2162 m_head = *m_headp;
2163
2164 /*
2165 * Set up the context descriptor:
2166 * used when any hardware offload is done.
2167 * This includes CSUM, VLAN, and TSO. It
2168 * will use the first descriptor.
2169 */
2170 #if __FreeBSD_version >= 700000
2171 /* First try TSO */
2172 if (em_tso_adv_setup(adapter, m_head, &paylen)) {
2173 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2174 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2175 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2176 olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
2177 } else
2178 #endif
2179 /* Do all other context descriptor setup */
2180 if (em_tx_adv_ctx_setup(adapter, m_head))
2181 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2182
2183 /* Set up our transmit descriptors */
2184 i = adapter->next_avail_tx_desc;
2185 for (j = 0; j < nsegs; j++) {
2186 bus_size_t seg_len;
2187 bus_addr_t seg_addr;
2188
2189 tx_buffer = &adapter->tx_buffer_area[i];
2190 txd = (union e1000_adv_tx_desc *)&adapter->tx_desc_base[i];
2191 seg_addr = segs[j].ds_addr;
2192 seg_len = segs[j].ds_len;
2193
2194 txd->read.buffer_addr = htole64(seg_addr);
2195 txd->read.cmd_type_len = htole32(
2196 adapter->txd_cmd | cmd_type_len | seg_len);
2197 txd->read.olinfo_status = htole32(olinfo_status);
2198 last = i;
2199 if (++i == adapter->num_tx_desc)
2200 i = 0;
2201 tx_buffer->m_head = NULL;
2202 tx_buffer->next_eop = -1;
2203 }
2204
2205 adapter->next_avail_tx_desc = i;
2206 adapter->num_tx_desc_avail -= nsegs;
2207
2208 tx_buffer->m_head = m_head;
2209 tx_buffer_mapped->map = tx_buffer->map;
2210 tx_buffer->map = map;
2211 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2212
2213 /*
2214 * Last Descriptor of Packet
2215 * needs End Of Packet (EOP)
2216 * and Report Status (RS)
2217 */
2218 txd->read.cmd_type_len |=
2219 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2220 /*
2221 * Keep track in the first buffer which
2222 * descriptor will be written back
2223 */
2224 tx_buffer = &adapter->tx_buffer_area[first];
2225 tx_buffer->next_eop = last;
2226
2227 /*
2228 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2229 * that this frame is available to transmit.
2230 */
2231 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2232 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2233 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2234
2235 return (0);
2236
2237 }
2238
2239 /*********************************************************************
2240 *
2241 * 82547 workaround to avoid controller hang in half-duplex environment.
2242 * The workaround is to avoid queuing a large packet that would span
2243 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2244 * in this case. We do that only when FIFO is quiescent.
2245 *
2246 **********************************************************************/
2247 static void
2248 em_82547_move_tail(void *arg)
2249 {
2250 struct adapter *adapter = arg;
2251 uint16_t hw_tdt;
2252 uint16_t sw_tdt;
2253 struct e1000_tx_desc *tx_desc;
2254 uint16_t length = 0;
2255 boolean_t eop = 0;
2256
2257 EM_TX_LOCK_ASSERT(adapter);
2258
2259 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2260 sw_tdt = adapter->next_avail_tx_desc;
2261
2262 while (hw_tdt != sw_tdt) {
2263 tx_desc = &adapter->tx_desc_base[hw_tdt];
2264 length += tx_desc->lower.flags.length;
2265 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2266 if (++hw_tdt == adapter->num_tx_desc)
2267 hw_tdt = 0;
2268
2269 if (eop) {
2270 if (em_82547_fifo_workaround(adapter, length)) {
2271 adapter->tx_fifo_wrk_cnt++;
2272 callout_reset(&adapter->tx_fifo_timer, 1,
2273 em_82547_move_tail, adapter);
2274 break;
2275 }
2276 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2277 em_82547_update_fifo_head(adapter, length);
2278 length = 0;
2279 }
2280 }
2281 }
2282
2283 static int
2284 em_82547_fifo_workaround(struct adapter *adapter, int len)
2285 {
2286 int fifo_space, fifo_pkt_len;
2287
2288 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2289
2290 if (adapter->link_duplex == HALF_DUPLEX) {
2291 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2292
2293 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2294 if (em_82547_tx_fifo_reset(adapter))
2295 return (0);
2296 else
2297 return (1);
2298 }
2299 }
2300
2301 return (0);
2302 }
2303
2304 static void
2305 em_82547_update_fifo_head(struct adapter *adapter, int len)
2306 {
2307 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2308
2309 /* tx_fifo_head is always 16 byte aligned */
2310 adapter->tx_fifo_head += fifo_pkt_len;
2311 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2312 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2313 }
2314 }
2315
2316
2317 static int
2318 em_82547_tx_fifo_reset(struct adapter *adapter)
2319 {
2320 uint32_t tctl;
2321
2322 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2323 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2324 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2325 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2326 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2327 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2328 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2329 /* Disable TX unit */
2330 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2331 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2332 tctl & ~E1000_TCTL_EN);
2333
2334 /* Reset FIFO pointers */
2335 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2336 adapter->tx_head_addr);
2337 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2338 adapter->tx_head_addr);
2339 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2340 adapter->tx_head_addr);
2341 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2342 adapter->tx_head_addr);
2343
2344 /* Re-enable TX unit */
2345 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2346 E1000_WRITE_FLUSH(&adapter->hw);
2347
2348 adapter->tx_fifo_head = 0;
2349 adapter->tx_fifo_reset_cnt++;
2350
2351 return (TRUE);
2352 }
2353 else {
2354 return (FALSE);
2355 }
2356 }
2357
2358 static void
2359 em_set_promisc(struct adapter *adapter)
2360 {
2361 struct ifnet *ifp = adapter->ifp;
2362 uint32_t reg_rctl;
2363
2364 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2365
2366 if (ifp->if_flags & IFF_PROMISC) {
2367 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2368 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2369 } else if (ifp->if_flags & IFF_ALLMULTI) {
2370 reg_rctl |= E1000_RCTL_MPE;
2371 reg_rctl &= ~E1000_RCTL_UPE;
2372 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2373 }
2374 }
2375
2376 static void
2377 em_disable_promisc(struct adapter *adapter)
2378 {
2379 uint32_t reg_rctl;
2380
2381 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2382
2383 reg_rctl &= (~E1000_RCTL_UPE);
2384 reg_rctl &= (~E1000_RCTL_MPE);
2385 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2386 }
2387
2388
2389 /*********************************************************************
2390 * Multicast Update
2391 *
2392 * This routine is called whenever multicast address list is updated.
2393 *
2394 **********************************************************************/
2395
2396 static void
2397 em_set_multi(struct adapter *adapter)
2398 {
2399 struct ifnet *ifp = adapter->ifp;
2400 struct ifmultiaddr *ifma;
2401 uint32_t reg_rctl = 0;
2402 uint8_t mta[512]; /* Largest MTS is 4096 bits */
2403 int mcnt = 0;
2404
2405 IOCTL_DEBUGOUT("em_set_multi: begin");
2406
2407 if (adapter->hw.mac.type == e1000_82542 &&
2408 adapter->hw.revision_id == E1000_REVISION_2) {
2409 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2410 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2411 e1000_pci_clear_mwi(&adapter->hw);
2412 reg_rctl |= E1000_RCTL_RST;
2413 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2414 msec_delay(5);
2415 }
2416
2417 IF_ADDR_LOCK(ifp);
2418 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2419 if (ifma->ifma_addr->sa_family != AF_LINK)
2420 continue;
2421
2422 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2423 break;
2424
2425 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2426 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2427 mcnt++;
2428 }
2429 IF_ADDR_UNLOCK(ifp);
2430
2431 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2432 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2433 reg_rctl |= E1000_RCTL_MPE;
2434 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2435 } else
2436 e1000_update_mc_addr_list(&adapter->hw, mta,
2437 mcnt, 1, adapter->hw.mac.rar_entry_count);
2438
2439 if (adapter->hw.mac.type == e1000_82542 &&
2440 adapter->hw.revision_id == E1000_REVISION_2) {
2441 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2442 reg_rctl &= ~E1000_RCTL_RST;
2443 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2444 msec_delay(5);
2445 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2446 e1000_pci_set_mwi(&adapter->hw);
2447 }
2448 }
2449
2450
2451 /*********************************************************************
2452 * Timer routine
2453 *
2454 * This routine checks for link status and updates statistics.
2455 *
2456 **********************************************************************/
2457
2458 static void
2459 em_local_timer(void *arg)
2460 {
2461 struct adapter *adapter = arg;
2462 struct ifnet *ifp = adapter->ifp;
2463
2464 EM_CORE_LOCK_ASSERT(adapter);
2465
2466 e1000_check_for_link(&adapter->hw);
2467 em_update_link_status(adapter);
2468 em_update_stats_counters(adapter);
2469
2470 /* Reset LAA into RAR[0] on 82571 */
2471 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2472 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2473
2474 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2475 em_print_hw_stats(adapter);
2476
2477 em_smartspeed(adapter);
2478
2479 /*
2480 * Each second we check the watchdog to
2481 * protect against hardware hangs.
2482 */
2483 em_watchdog(adapter);
2484
2485 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2486
2487 }
2488
2489 static void
2490 em_update_link_status(struct adapter *adapter)
2491 {
2492 struct ifnet *ifp = adapter->ifp;
2493 device_t dev = adapter->dev;
2494
2495 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
2496 E1000_STATUS_LU) {
2497 if (adapter->link_active == 0) {
2498 e1000_get_speed_and_duplex(&adapter->hw,
2499 &adapter->link_speed, &adapter->link_duplex);
2500 /* Check if we must disable SPEED_MODE bit on PCI-E */
2501 if ((adapter->link_speed != SPEED_1000) &&
2502 ((adapter->hw.mac.type == e1000_82571) ||
2503 (adapter->hw.mac.type == e1000_82572))) {
2504 int tarc0;
2505
2506 tarc0 = E1000_READ_REG(&adapter->hw,
2507 E1000_TARC(0));
2508 tarc0 &= ~SPEED_MODE_BIT;
2509 E1000_WRITE_REG(&adapter->hw,
2510 E1000_TARC(0), tarc0);
2511 }
2512 if (bootverbose)
2513 device_printf(dev, "Link is up %d Mbps %s\n",
2514 adapter->link_speed,
2515 ((adapter->link_duplex == FULL_DUPLEX) ?
2516 "Full Duplex" : "Half Duplex"));
2517 adapter->link_active = 1;
2518 adapter->smartspeed = 0;
2519 ifp->if_baudrate = adapter->link_speed * 1000000;
2520 if_link_state_change(ifp, LINK_STATE_UP);
2521 }
2522 } else {
2523 if (adapter->link_active == 1) {
2524 ifp->if_baudrate = adapter->link_speed = 0;
2525 adapter->link_duplex = 0;
2526 if (bootverbose)
2527 device_printf(dev, "Link is Down\n");
2528 adapter->link_active = 0;
2529 if_link_state_change(ifp, LINK_STATE_DOWN);
2530 }
2531 }
2532 }
2533
2534 /*********************************************************************
2535 *
2536 * This routine disables all traffic on the adapter by issuing a
2537 * global reset on the MAC and deallocates TX/RX buffers.
2538 *
2539 **********************************************************************/
2540
2541 static void
2542 em_stop(void *arg)
2543 {
2544 struct adapter *adapter = arg;
2545 struct ifnet *ifp = adapter->ifp;
2546
2547 EM_CORE_LOCK_ASSERT(adapter);
2548
2549 INIT_DEBUGOUT("em_stop: begin");
2550
2551 em_disable_intr(adapter);
2552 callout_stop(&adapter->timer);
2553 callout_stop(&adapter->tx_fifo_timer);
2554
2555 /* Tell the stack that the interface is no longer active */
2556 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2557
2558 e1000_reset_hw(&adapter->hw);
2559 if (adapter->hw.mac.type >= e1000_82544)
2560 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2561 }
2562
2563
2564 /*********************************************************************
2565 *
2566 * Determine hardware revision.
2567 *
2568 **********************************************************************/
2569 static void
2570 em_identify_hardware(struct adapter *adapter)
2571 {
2572 device_t dev = adapter->dev;
2573
2574 /* Make sure our PCI config space has the necessary stuff set */
2575 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2576 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2577 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2578 device_printf(dev, "Memory Access and/or Bus Master bits "
2579 "were not set!\n");
2580 adapter->hw.bus.pci_cmd_word |=
2581 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2582 pci_write_config(dev, PCIR_COMMAND,
2583 adapter->hw.bus.pci_cmd_word, 2);
2584 }
2585
2586 /* Save off the information about this board */
2587 adapter->hw.vendor_id = pci_get_vendor(dev);
2588 adapter->hw.device_id = pci_get_device(dev);
2589 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2590 adapter->hw.subsystem_vendor_id =
2591 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2592 adapter->hw.subsystem_device_id =
2593 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2594
2595 /* Do Shared Code Init and Setup */
2596 if (e1000_set_mac_type(&adapter->hw)) {
2597 device_printf(dev, "Setup init failure\n");
2598 return;
2599 }
2600 }
2601
2602 static int
2603 em_allocate_pci_resources(struct adapter *adapter)
2604 {
2605 device_t dev = adapter->dev;
2606 int val, rid;
2607
2608 rid = PCIR_BAR(0);
2609 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2610 &rid, RF_ACTIVE);
2611 if (adapter->res_memory == NULL) {
2612 device_printf(dev, "Unable to allocate bus resource: memory\n");
2613 return (ENXIO);
2614 }
2615 adapter->osdep.mem_bus_space_tag =
2616 rman_get_bustag(adapter->res_memory);
2617 adapter->osdep.mem_bus_space_handle =
2618 rman_get_bushandle(adapter->res_memory);
2619 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
2620
2621 /* Only older adapters use IO mapping */
2622 if ((adapter->hw.mac.type > e1000_82543) &&
2623 (adapter->hw.mac.type < e1000_82571)) {
2624 /* Figure our where our IO BAR is ? */
2625 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2626 val = pci_read_config(dev, rid, 4);
2627 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2628 adapter->io_rid = rid;
2629 break;
2630 }
2631 rid += 4;
2632 /* check for 64bit BAR */
2633 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2634 rid += 4;
2635 }
2636 if (rid >= PCIR_CIS) {
2637 device_printf(dev, "Unable to locate IO BAR\n");
2638 return (ENXIO);
2639 }
2640 adapter->res_ioport = bus_alloc_resource_any(dev,
2641 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2642 if (adapter->res_ioport == NULL) {
2643 device_printf(dev, "Unable to allocate bus resource: "
2644 "ioport\n");
2645 return (ENXIO);
2646 }
2647 adapter->hw.io_base = 0;
2648 adapter->osdep.io_bus_space_tag =
2649 rman_get_bustag(adapter->res_ioport);
2650 adapter->osdep.io_bus_space_handle =
2651 rman_get_bushandle(adapter->res_ioport);
2652 }
2653
2654 /*
2655 * Setup MSI/X or MSI if PCI Express
2656 * only the latest can use MSI/X and
2657 * real support for it is forthcoming
2658 */
2659 adapter->msi = 0; /* Set defaults */
2660 rid = 0x0;
2661
2662 #if __FreeBSD_version > 602111 /* MSI support is present */
2663 /* This will setup either MSI/X or MSI */
2664 if (em_setup_msix(adapter))
2665 rid = 1;
2666 #endif /* FreeBSD_version */
2667
2668 adapter->res_interrupt = bus_alloc_resource_any(dev,
2669 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2670 if (adapter->res_interrupt == NULL) {
2671 device_printf(dev, "Unable to allocate bus resource: "
2672 "interrupt\n");
2673 return (ENXIO);
2674 }
2675
2676 adapter->hw.back = &adapter->osdep;
2677
2678 return (0);
2679 }
2680
2681 /*********************************************************************
2682 *
2683 * Setup the appropriate Interrupt handlers.
2684 *
2685 **********************************************************************/
2686 int
2687 em_allocate_intr(struct adapter *adapter)
2688 {
2689 device_t dev = adapter->dev;
2690 int error;
2691
2692 /* Manually turn off all interrupts */
2693 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2694
2695 #ifndef EM_FAST_IRQ
2696 /* We do Legacy setup */
2697 if (adapter->int_handler_tag == NULL &&
2698 (error = bus_setup_intr(dev, adapter->res_interrupt,
2699 #if __FreeBSD_version > 700000
2700 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2701 #else /* 6.X */
2702 INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2703 #endif
2704 &adapter->int_handler_tag)) != 0) {
2705 device_printf(dev, "Failed to register interrupt handler");
2706 return (error);
2707 }
2708
2709 #else /* FAST_IRQ */
2710 /*
2711 * Try allocating a fast interrupt and the associated deferred
2712 * processing contexts.
2713 */
2714 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2715 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2716 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2717 taskqueue_thread_enqueue, &adapter->tq);
2718 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2719 device_get_nameunit(adapter->dev));
2720 #if __FreeBSD_version < 700000
2721 if ((error = bus_setup_intr(dev, adapter->res_interrupt,
2722 INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter,
2723 #else
2724 if ((error = bus_setup_intr(dev, adapter->res_interrupt,
2725 INTR_TYPE_NET, em_intr_fast, NULL, adapter,
2726 #endif
2727 &adapter->int_handler_tag)) != 0) {
2728 device_printf(dev, "Failed to register fast interrupt "
2729 "handler: %d\n", error);
2730 taskqueue_free(adapter->tq);
2731 adapter->tq = NULL;
2732 return (error);
2733 }
2734 #endif /* EM_FAST_IRQ */
2735
2736 em_enable_intr(adapter);
2737 return (0);
2738 }
2739
2740 static void
2741 em_free_intr(struct adapter *adapter)
2742 {
2743 device_t dev = adapter->dev;
2744
2745 if (adapter->res_interrupt != NULL) {
2746 bus_teardown_intr(dev, adapter->res_interrupt,
2747 adapter->int_handler_tag);
2748 adapter->int_handler_tag = NULL;
2749 }
2750 if (adapter->tq != NULL) {
2751 taskqueue_drain(adapter->tq, &adapter->rxtx_task);
2752 taskqueue_drain(taskqueue_fast, &adapter->link_task);
2753 taskqueue_free(adapter->tq);
2754 adapter->tq = NULL;
2755 }
2756 }
2757
2758 static void
2759 em_free_pci_resources(struct adapter *adapter)
2760 {
2761 device_t dev = adapter->dev;
2762
2763 if (adapter->res_interrupt != NULL)
2764 bus_release_resource(dev, SYS_RES_IRQ,
2765 adapter->msi ? 1 : 0, adapter->res_interrupt);
2766
2767 #if __FreeBSD_version > 602111 /* MSI support is present */
2768 if (adapter->msix_mem != NULL)
2769 bus_release_resource(dev, SYS_RES_MEMORY,
2770 PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
2771
2772 if (adapter->msi)
2773 pci_release_msi(dev);
2774 #endif /* FreeBSD_version */
2775
2776 if (adapter->res_memory != NULL)
2777 bus_release_resource(dev, SYS_RES_MEMORY,
2778 PCIR_BAR(0), adapter->res_memory);
2779
2780 if (adapter->flash_mem != NULL)
2781 bus_release_resource(dev, SYS_RES_MEMORY,
2782 EM_FLASH, adapter->flash_mem);
2783
2784 if (adapter->res_ioport != NULL)
2785 bus_release_resource(dev, SYS_RES_IOPORT,
2786 adapter->io_rid, adapter->res_ioport);
2787 }
2788
2789 #if __FreeBSD_version > 602111 /* MSI support is present */
2790 /*
2791 * Setup MSI/X
2792 */
2793 static bool
2794 em_setup_msix(struct adapter *adapter)
2795 {
2796 device_t dev = adapter->dev;
2797 int rid, val;
2798
2799 if (adapter->hw.mac.type < e1000_82571)
2800 return (FALSE);
2801
2802 /* First try MSI/X if possible */
2803 if (adapter->hw.mac.type >= e1000_82575) {
2804 rid = PCIR_BAR(EM_MSIX_BAR);
2805 adapter->msix_mem = bus_alloc_resource_any(dev,
2806 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2807 if (!adapter->msix_mem) {
2808 /* May not be enabled */
2809 device_printf(adapter->dev,
2810 "Unable to map MSIX table \n");
2811 goto msi;
2812 }
2813 val = pci_msix_count(dev);
2814 if ((val) && pci_alloc_msix(dev, &val) == 0) {
2815 adapter->msi = 1;
2816 device_printf(adapter->dev,"Using MSIX interrupts\n");
2817 return (TRUE);
2818 }
2819 }
2820 msi:
2821 val = pci_msi_count(dev);
2822 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2823 adapter->msi = 1;
2824 device_printf(adapter->dev,"Using MSI interrupts\n");
2825 return (TRUE);
2826 }
2827 return (FALSE);
2828 }
2829 #endif /* FreeBSD_version */
2830
2831 /*********************************************************************
2832 *
2833 * Initialize the hardware to a configuration
2834 * as specified by the adapter structure.
2835 *
2836 **********************************************************************/
2837 static int
2838 em_hardware_init(struct adapter *adapter)
2839 {
2840 device_t dev = adapter->dev;
2841 uint16_t rx_buffer_size;
2842
2843 INIT_DEBUGOUT("em_hardware_init: begin");
2844
2845 /* Issue a global reset */
2846 e1000_reset_hw(&adapter->hw);
2847
2848 /* Get control from any management/hw control */
2849 if (((adapter->hw.mac.type == e1000_82573) ||
2850 (adapter->hw.mac.type == e1000_ich8lan) ||
2851 (adapter->hw.mac.type == e1000_ich9lan)) &&
2852 e1000_check_mng_mode(&adapter->hw))
2853 em_get_hw_control(adapter);
2854
2855 /* When hardware is reset, fifo_head is also reset */
2856 adapter->tx_fifo_head = 0;
2857
2858 /* Set up smart power down as default off on newer adapters. */
2859 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
2860 adapter->hw.mac.type == e1000_82572)) {
2861 uint16_t phy_tmp = 0;
2862
2863 /* Speed up time to link by disabling smart power down. */
2864 e1000_read_phy_reg(&adapter->hw,
2865 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2866 phy_tmp &= ~IGP02E1000_PM_SPD;
2867 e1000_write_phy_reg(&adapter->hw,
2868 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2869 }
2870
2871 /*
2872 * These parameters control the automatic generation (Tx) and
2873 * response (Rx) to Ethernet PAUSE frames.
2874 * - High water mark should allow for at least two frames to be
2875 * received after sending an XOFF.
2876 * - Low water mark works best when it is very near the high water mark.
2877 * This allows the receiver to restart by sending XON when it has
2878 * drained a bit. Here we use an arbitary value of 1500 which will
2879 * restart after one full frame is pulled from the buffer. There
2880 * could be several smaller frames in the buffer and if so they will
2881 * not trigger the XON until their total number reduces the buffer
2882 * by 1500.
2883 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2884 */
2885 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2886 0xffff) << 10 );
2887
2888 adapter->hw.fc.high_water = rx_buffer_size -
2889 roundup2(adapter->max_frame_size, 1024);
2890 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2891
2892 if (adapter->hw.mac.type == e1000_80003es2lan)
2893 adapter->hw.fc.pause_time = 0xFFFF;
2894 else
2895 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2896 adapter->hw.fc.send_xon = TRUE;
2897 adapter->hw.fc.type = e1000_fc_full;
2898
2899 if (e1000_init_hw(&adapter->hw) < 0) {
2900 device_printf(dev, "Hardware Initialization Failed\n");
2901 return (EIO);
2902 }
2903
2904 e1000_check_for_link(&adapter->hw);
2905
2906 return (0);
2907 }
2908
2909 /*********************************************************************
2910 *
2911 * Setup networking device structure and register an interface.
2912 *
2913 **********************************************************************/
2914 static void
2915 em_setup_interface(device_t dev, struct adapter *adapter)
2916 {
2917 struct ifnet *ifp;
2918
2919 INIT_DEBUGOUT("em_setup_interface: begin");
2920
2921 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2922 if (ifp == NULL)
2923 panic("%s: can not if_alloc()", device_get_nameunit(dev));
2924 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2925 ifp->if_mtu = ETHERMTU;
2926 ifp->if_init = em_init;
2927 ifp->if_softc = adapter;
2928 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2929 ifp->if_ioctl = em_ioctl;
2930 ifp->if_start = em_start;
2931 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2932 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2933 IFQ_SET_READY(&ifp->if_snd);
2934
2935 ether_ifattach(ifp, adapter->hw.mac.addr);
2936
2937 ifp->if_capabilities = ifp->if_capenable = 0;
2938
2939 if (adapter->hw.mac.type >= e1000_82543) {
2940 int version_cap;
2941 #if __FreeBSD_version < 700000
2942 version_cap = IFCAP_HWCSUM;
2943 #else
2944 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2945 #endif
2946 ifp->if_capabilities |= version_cap;
2947 ifp->if_capenable |= version_cap;
2948 }
2949
2950 #if __FreeBSD_version >= 700000
2951 /* Identify TSO capable adapters */
2952 if ((adapter->hw.mac.type > e1000_82544) &&
2953 (adapter->hw.mac.type != e1000_82547))
2954 ifp->if_capabilities |= IFCAP_TSO4;
2955 /*
2956 * By default only enable on PCI-E, this
2957 * can be overriden by ifconfig.
2958 */
2959 if (adapter->hw.mac.type >= e1000_82571)
2960 ifp->if_capenable |= IFCAP_TSO4;
2961 #endif
2962
2963 /*
2964 * Tell the upper layer(s) we support long frames.
2965 */
2966 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2967 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2968 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2969
2970 #ifdef DEVICE_POLLING
2971 ifp->if_capabilities |= IFCAP_POLLING;
2972 #endif
2973
2974 /*
2975 * Specify the media types supported by this adapter and register
2976 * callbacks to update media and link information
2977 */
2978 ifmedia_init(&adapter->media, IFM_IMASK,
2979 em_media_change, em_media_status);
2980 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2981 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2982 u_char fiber_type = IFM_1000_SX; /* default type */
2983
2984 if (adapter->hw.mac.type == e1000_82545)
2985 fiber_type = IFM_1000_LX;
2986 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2987 0, NULL);
2988 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2989 } else {
2990 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2991 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2992 0, NULL);
2993 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2994 0, NULL);
2995 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2996 0, NULL);
2997 if (adapter->hw.phy.type != e1000_phy_ife) {
2998 ifmedia_add(&adapter->media,
2999 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3000 ifmedia_add(&adapter->media,
3001 IFM_ETHER | IFM_1000_T, 0, NULL);
3002 }
3003 }
3004 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3005 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3006 }
3007
3008
3009 /*********************************************************************
3010 *
3011 * Workaround for SmartSpeed on 82541 and 82547 controllers
3012 *
3013 **********************************************************************/
3014 static void
3015 em_smartspeed(struct adapter *adapter)
3016 {
3017 uint16_t phy_tmp;
3018
3019 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3020 adapter->hw.mac.autoneg == 0 ||
3021 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3022 return;
3023
3024 if (adapter->smartspeed == 0) {
3025 /* If Master/Slave config fault is asserted twice,
3026 * we assume back-to-back */
3027 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3028 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3029 return;
3030 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3031 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3032 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3033 if(phy_tmp & CR_1000T_MS_ENABLE) {
3034 phy_tmp &= ~CR_1000T_MS_ENABLE;
3035 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
3036 phy_tmp);
3037 adapter->smartspeed++;
3038 if(adapter->hw.mac.autoneg &&
3039 !e1000_phy_setup_autoneg(&adapter->hw) &&
3040 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL,
3041 &phy_tmp)) {
3042 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3043 MII_CR_RESTART_AUTO_NEG);
3044 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL,
3045 phy_tmp);
3046 }
3047 }
3048 }
3049 return;
3050 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3051 /* If still no link, perhaps using 2/3 pair cable */
3052 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3053 phy_tmp |= CR_1000T_MS_ENABLE;
3054 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3055 if(adapter->hw.mac.autoneg &&
3056 !e1000_phy_setup_autoneg(&adapter->hw) &&
3057 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3058 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3059 MII_CR_RESTART_AUTO_NEG);
3060 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3061 }
3062 }
3063 /* Restart process after EM_SMARTSPEED_MAX iterations */
3064 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3065 adapter->smartspeed = 0;
3066 }
3067
3068
3069 /*
3070 * Manage DMA'able memory.
3071 */
3072 static void
3073 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3074 {
3075 if (error)
3076 return;
3077 *(bus_addr_t *) arg = segs[0].ds_addr;
3078 }
3079
3080 static int
3081 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3082 struct em_dma_alloc *dma, int mapflags)
3083 {
3084 int error;
3085
3086 #if __FreeBSD_version >= 700000
3087 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3088 #else
3089 error = bus_dma_tag_create(NULL, /* parent */
3090 #endif
3091 EM_DBA_ALIGN, 0, /* alignment, bounds */
3092 BUS_SPACE_MAXADDR, /* lowaddr */
3093 BUS_SPACE_MAXADDR, /* highaddr */
3094 NULL, NULL, /* filter, filterarg */
3095 size, /* maxsize */
3096 1, /* nsegments */
3097 size, /* maxsegsize */
3098 0, /* flags */
3099 NULL, /* lockfunc */
3100 NULL, /* lockarg */
3101 &dma->dma_tag);
3102 if (error) {
3103 device_printf(adapter->dev,
3104 "%s: bus_dma_tag_create failed: %d\n",
3105 __func__, error);
3106 goto fail_0;
3107 }
3108
3109 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3110 BUS_DMA_NOWAIT, &dma->dma_map);
3111 if (error) {
3112 device_printf(adapter->dev,
3113 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3114 __func__, (uintmax_t)size, error);
3115 goto fail_2;
3116 }
3117
3118 dma->dma_paddr = 0;
3119 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3120 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3121 if (error || dma->dma_paddr == 0) {
3122 device_printf(adapter->dev,
3123 "%s: bus_dmamap_load failed: %d\n",
3124 __func__, error);
3125 goto fail_3;
3126 }
3127
3128 return (0);
3129
3130 fail_3:
3131 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3132 fail_2:
3133 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3134 bus_dma_tag_destroy(dma->dma_tag);
3135 fail_0:
3136 dma->dma_map = NULL;
3137 dma->dma_tag = NULL;
3138
3139 return (error);
3140 }
3141
3142 static void
3143 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3144 {
3145 if (dma->dma_tag == NULL)
3146 return;
3147 if (dma->dma_map != NULL) {
3148 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3149 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3150 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3151 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3152 dma->dma_map = NULL;
3153 }
3154 bus_dma_tag_destroy(dma->dma_tag);
3155 dma->dma_tag = NULL;
3156 }
3157
3158
3159 /*********************************************************************
3160 *
3161 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3162 * the information needed to transmit a packet on the wire.
3163 *
3164 **********************************************************************/
3165 static int
3166 em_allocate_transmit_structures(struct adapter *adapter)
3167 {
3168 device_t dev = adapter->dev;
3169 struct em_buffer *tx_buffer;
3170 int error;
3171
3172 /*
3173 * Create DMA tags for tx descriptors
3174 */
3175 #if __FreeBSD_version >= 700000
3176 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3177 #else
3178 if ((error = bus_dma_tag_create(NULL, /* parent */
3179 #endif
3180 1, 0, /* alignment, bounds */
3181 BUS_SPACE_MAXADDR, /* lowaddr */
3182 BUS_SPACE_MAXADDR, /* highaddr */
3183 NULL, NULL, /* filter, filterarg */
3184 EM_TSO_SIZE, /* maxsize */
3185 EM_MAX_SCATTER, /* nsegments */
3186 EM_TSO_SEG_SIZE, /* maxsegsize */
3187 0, /* flags */
3188 NULL, /* lockfunc */
3189 NULL, /* lockarg */
3190 &adapter->txtag)) != 0) {
3191 device_printf(dev, "Unable to allocate TX DMA tag\n");
3192 goto fail;
3193 }
3194
3195 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3196 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3197 if (adapter->tx_buffer_area == NULL) {
3198 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3199 error = ENOMEM;
3200 goto fail;
3201 }
3202
3203 /* Create the descriptor buffer dma maps */
3204 for (int i = 0; i < adapter->num_tx_desc; i++) {
3205 tx_buffer = &adapter->tx_buffer_area[i];
3206 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3207 if (error != 0) {
3208 device_printf(dev, "Unable to create TX DMA map\n");
3209 goto fail;
3210 }
3211 tx_buffer->next_eop = -1;
3212 }
3213
3214 return (0);
3215 fail:
3216 em_free_transmit_structures(adapter);
3217 return (error);
3218 }
3219
3220 /*********************************************************************
3221 *
3222 * (Re)Initialize transmit structures.
3223 *
3224 **********************************************************************/
3225 static void
3226 em_setup_transmit_structures(struct adapter *adapter)
3227 {
3228 struct em_buffer *tx_buffer;
3229
3230 /* Clear the old ring contents */
3231 bzero(adapter->tx_desc_base,
3232 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3233
3234 /* Free any existing TX buffers */
3235 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3236 tx_buffer = &adapter->tx_buffer_area[i];
3237 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3238 BUS_DMASYNC_POSTWRITE);
3239 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3240 m_freem(tx_buffer->m_head);
3241 tx_buffer->m_head = NULL;
3242 tx_buffer->next_eop = -1;
3243 }
3244
3245 /* Reset state */
3246 adapter->next_avail_tx_desc = 0;
3247 adapter->next_tx_to_clean = 0;
3248 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3249
3250 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3251 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3252
3253 return;
3254 }
3255
3256 /*********************************************************************
3257 *
3258 * Enable transmit unit.
3259 *
3260 **********************************************************************/
3261 static void
3262 em_initialize_transmit_unit(struct adapter *adapter)
3263 {
3264 uint32_t tctl, tarc, tipg = 0;
3265 uint64_t bus_addr;
3266
3267 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3268 /* Setup the Base and Length of the Tx Descriptor Ring */
3269 bus_addr = adapter->txdma.dma_paddr;
3270 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3271 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3272 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3273 (uint32_t)(bus_addr >> 32));
3274 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3275 (uint32_t)bus_addr);
3276 /* Setup the HW Tx Head and Tail descriptor pointers */
3277 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3278 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3279
3280 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3281 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3282 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3283
3284 /* Set the default values for the Tx Inter Packet Gap timer */
3285 switch (adapter->hw.mac.type) {
3286 case e1000_82542:
3287 tipg = DEFAULT_82542_TIPG_IPGT;
3288 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3289 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3290 break;
3291 case e1000_80003es2lan:
3292 tipg = DEFAULT_82543_TIPG_IPGR1;
3293 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3294 E1000_TIPG_IPGR2_SHIFT;
3295 break;
3296 default:
3297 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3298 (adapter->hw.phy.media_type ==
3299 e1000_media_type_internal_serdes))
3300 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3301 else
3302 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3303 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3304 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3305 }
3306
3307 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3308 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3309 if(adapter->hw.mac.type >= e1000_82540)
3310 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3311 adapter->tx_abs_int_delay.value);
3312
3313 if ((adapter->hw.mac.type == e1000_82571) ||
3314 (adapter->hw.mac.type == e1000_82572)) {
3315 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3316 tarc |= SPEED_MODE_BIT;
3317 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3318 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3319 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3320 tarc |= 1;
3321 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3322 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3323 tarc |= 1;
3324 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3325 }
3326
3327 /* Program the Transmit Control Register */
3328 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3329 tctl &= ~E1000_TCTL_CT;
3330 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3331 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3332
3333 if (adapter->hw.mac.type >= e1000_82571)
3334 tctl |= E1000_TCTL_MULR;
3335
3336 /* This write will effectively turn on the transmit unit. */
3337 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3338
3339 /* Setup Transmit Descriptor Base Settings */
3340 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3341
3342 if ((adapter->tx_int_delay.value > 0) &&
3343 (adapter->hw.mac.type != e1000_82575))
3344 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3345
3346 /* Set the function pointer for the transmit routine */
3347 if (adapter->hw.mac.type >= e1000_82575)
3348 adapter->em_xmit = em_adv_encap;
3349 else
3350 adapter->em_xmit = em_encap;
3351 }
3352
3353 /*********************************************************************
3354 *
3355 * Free all transmit related data structures.
3356 *
3357 **********************************************************************/
3358 static void
3359 em_free_transmit_structures(struct adapter *adapter)
3360 {
3361 struct em_buffer *tx_buffer;
3362
3363 INIT_DEBUGOUT("free_transmit_structures: begin");
3364
3365 if (adapter->tx_buffer_area != NULL) {
3366 for (int i = 0; i < adapter->num_tx_desc; i++) {
3367 tx_buffer = &adapter->tx_buffer_area[i];
3368 if (tx_buffer->m_head != NULL) {
3369 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3370 BUS_DMASYNC_POSTWRITE);
3371 bus_dmamap_unload(adapter->txtag,
3372 tx_buffer->map);
3373 m_freem(tx_buffer->m_head);
3374 tx_buffer->m_head = NULL;
3375 } else if (tx_buffer->map != NULL)
3376 bus_dmamap_unload(adapter->txtag,
3377 tx_buffer->map);
3378 if (tx_buffer->map != NULL) {
3379 bus_dmamap_destroy(adapter->txtag,
3380 tx_buffer->map);
3381 tx_buffer->map = NULL;
3382 }
3383 }
3384 }
3385 if (adapter->tx_buffer_area != NULL) {
3386 free(adapter->tx_buffer_area, M_DEVBUF);
3387 adapter->tx_buffer_area = NULL;
3388 }
3389 if (adapter->txtag != NULL) {
3390 bus_dma_tag_destroy(adapter->txtag);
3391 adapter->txtag = NULL;
3392 }
3393 }
3394
3395 /*********************************************************************
3396 *
3397 * The offload context needs to be set when we transfer the first
3398 * packet of a particular protocol (TCP/UDP). This routine has been
3399 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3400 *
3401 **********************************************************************/
3402 static void
3403 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3404 uint32_t *txd_upper, uint32_t *txd_lower)
3405 {
3406 struct e1000_context_desc *TXD;
3407 struct em_buffer *tx_buffer;
3408 struct ether_vlan_header *eh;
3409 struct ip *ip;
3410 struct ip6_hdr *ip6;
3411 struct tcp_hdr *th;
3412 int curr_txd, ehdrlen, hdr_len, ip_hlen;
3413 uint32_t cmd = 0;
3414 uint16_t etype;
3415 uint8_t ipproto;
3416
3417 /* Setup checksum offload context. */
3418 curr_txd = adapter->next_avail_tx_desc;
3419 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3420 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3421
3422 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
3423 E1000_TXD_DTYP_D; /* Data descr */
3424
3425 /*
3426 * Determine where frame payload starts.
3427 * Jump over vlan headers if already present,
3428 * helpful for QinQ too.
3429 */
3430 eh = mtod(mp, struct ether_vlan_header *);
3431 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3432 etype = ntohs(eh->evl_proto);
3433 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3434 } else {
3435 etype = ntohs(eh->evl_encap_proto);
3436 ehdrlen = ETHER_HDR_LEN;
3437 }
3438
3439 /*
3440 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3441 * TODO: Support SCTP too when it hits the tree.
3442 */
3443 switch (etype) {
3444 case ETHERTYPE_IP:
3445 ip = (struct ip *)(mp->m_data + ehdrlen);
3446 ip_hlen = ip->ip_hl << 2;
3447
3448 /* Setup of IP header checksum. */
3449 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3450 /*
3451 * Start offset for header checksum calculation.
3452 * End offset for header checksum calculation.
3453 * Offset of place to put the checksum.
3454 */
3455 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3456 TXD->lower_setup.ip_fields.ipcse =
3457 htole16(ehdrlen + ip_hlen);
3458 TXD->lower_setup.ip_fields.ipcso =
3459 ehdrlen + offsetof(struct ip, ip_sum);
3460 cmd |= E1000_TXD_CMD_IP;
3461 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3462 }
3463
3464 if (mp->m_len < ehdrlen + ip_hlen)
3465 return; /* failure */
3466
3467 hdr_len = ehdrlen + ip_hlen;
3468 ipproto = ip->ip_p;
3469
3470 break;
3471 case ETHERTYPE_IPV6:
3472 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3473 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3474
3475 if (mp->m_len < ehdrlen + ip_hlen)
3476 return; /* failure */
3477
3478 /* IPv6 doesn't have a header checksum. */
3479
3480 hdr_len = ehdrlen + ip_hlen;
3481 ipproto = ip6->ip6_nxt;
3482
3483 break;
3484 default:
3485 *txd_upper = 0;
3486 *txd_lower = 0;
3487 return;
3488 }
3489
3490 switch (ipproto) {
3491 case IPPROTO_TCP:
3492 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3493 /*
3494 * Start offset for payload checksum calculation.
3495 * End offset for payload checksum calculation.
3496 * Offset of place to put the checksum.
3497 */
3498 th = (struct tcp_hdr *)(mp->m_data + hdr_len);
3499 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3500 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3501 TXD->upper_setup.tcp_fields.tucso =
3502 hdr_len + offsetof(struct tcphdr, th_sum);
3503 cmd |= E1000_TXD_CMD_TCP;
3504 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3505 }
3506 break;
3507 case IPPROTO_UDP:
3508 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3509 /*
3510 * Start offset for header checksum calculation.
3511 * End offset for header checksum calculation.
3512 * Offset of place to put the checksum.
3513 */
3514 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3515 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3516 TXD->upper_setup.tcp_fields.tucso =
3517 hdr_len + offsetof(struct udphdr, uh_sum);
3518 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3519 }
3520 break;
3521 default:
3522 break;
3523 }
3524
3525 TXD->tcp_seg_setup.data = htole32(0);
3526 TXD->cmd_and_length =
3527 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3528 tx_buffer->m_head = NULL;
3529 tx_buffer->next_eop = -1;
3530
3531 if (++curr_txd == adapter->num_tx_desc)
3532 curr_txd = 0;
3533
3534 adapter->num_tx_desc_avail--;
3535 adapter->next_avail_tx_desc = curr_txd;
3536 }
3537
3538
3539 #if __FreeBSD_version >= 700000
3540 /**********************************************************************
3541 *
3542 * Setup work for hardware segmentation offload (TSO)
3543 *
3544 **********************************************************************/
3545 static boolean_t
3546 em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
3547 uint32_t *txd_lower)
3548 {
3549 struct e1000_context_desc *TXD;
3550 struct em_buffer *tx_buffer;
3551 struct ether_vlan_header *eh;
3552 struct ip *ip;
3553 struct ip6_hdr *ip6;
3554 struct tcphdr *th;
3555 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3556 uint16_t etype;
3557
3558 /*
3559 * XXX: This is not really correct as the stack would not have
3560 * set up all checksums.
3561 * XXX: Return FALSE is not sufficient as we may have to return
3562 * in true failure cases as well. Should do -1 (failure), 0 (no)
3563 * and 1 (success).
3564 */
3565 if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
3566 (mp->m_pkthdr.len <= EM_TX_BUFFER_SIZE))
3567 return FALSE;
3568
3569 /*
3570 * This function could/should be extended to support IP/IPv6
3571 * fragmentation as well. But as they say, one step at a time.
3572 */
3573
3574 /*
3575 * Determine where frame payload starts.
3576 * Jump over vlan headers if already present,
3577 * helpful for QinQ too.
3578 */
3579 eh = mtod(mp, struct ether_vlan_header *);
3580 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3581 etype = ntohs(eh->evl_proto);
3582 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3583 } else {
3584 etype = ntohs(eh->evl_encap_proto);
3585 ehdrlen = ETHER_HDR_LEN;
3586 }
3587
3588 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3589 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3590 return FALSE; /* -1 */
3591
3592 /*
3593 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3594 * TODO: Support SCTP too when it hits the tree.
3595 */
3596 switch (etype) {
3597 case ETHERTYPE_IP:
3598 isip6 = 0;
3599 ip = (struct ip *)(mp->m_data + ehdrlen);
3600 if (ip->ip_p != IPPROTO_TCP)
3601 return FALSE; /* 0 */
3602 ip->ip_len = 0;
3603 ip->ip_sum = 0;
3604 ip_hlen = ip->ip_hl << 2;
3605 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3606 return FALSE; /* -1 */
3607 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3608 #if 1
3609 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3610 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3611 #else
3612 th->th_sum = mp->m_pkthdr.csum_data;
3613 #endif
3614 break;
3615 case ETHERTYPE_IPV6:
3616 isip6 = 1;
3617 return FALSE; /* Not supported yet. */
3618 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3619 if (ip6->ip6_nxt != IPPROTO_TCP)
3620 return FALSE; /* 0 */
3621 ip6->ip6_plen = 0;
3622 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3623 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3624 return FALSE; /* -1 */
3625 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3626 #if 0
3627 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3628 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3629 #else
3630 th->th_sum = mp->m_pkthdr.csum_data;
3631 #endif
3632 break;
3633 default:
3634 return FALSE;
3635 }
3636 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3637
3638 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3639 E1000_TXD_DTYP_D | /* Data descr type */
3640 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3641
3642 /* IP and/or TCP header checksum calculation and insertion. */
3643 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3644 E1000_TXD_POPTS_TXSM) << 8;
3645
3646 curr_txd = adapter->next_avail_tx_desc;
3647 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3648 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3649
3650 /* IPv6 doesn't have a header checksum. */
3651 if (!isip6) {
3652 /*
3653 * Start offset for header checksum calculation.
3654 * End offset for header checksum calculation.
3655 * Offset of place put the checksum.
3656 */
3657 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3658 TXD->lower_setup.ip_fields.ipcse =
3659 htole16(ehdrlen + ip_hlen - 1);
3660 TXD->lower_setup.ip_fields.ipcso =
3661 ehdrlen + offsetof(struct ip, ip_sum);
3662 }
3663 /*
3664 * Start offset for payload checksum calculation.
3665 * End offset for payload checksum calculation.
3666 * Offset of place to put the checksum.
3667 */
3668 TXD->upper_setup.tcp_fields.tucss =
3669 ehdrlen + ip_hlen;
3670 TXD->upper_setup.tcp_fields.tucse = 0;
3671 TXD->upper_setup.tcp_fields.tucso =
3672 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3673 /*
3674 * Payload size per packet w/o any headers.
3675 * Length of all headers up to payload.
3676 */
3677 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3678 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3679
3680 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3681 E1000_TXD_CMD_DEXT | /* Extended descr */
3682 E1000_TXD_CMD_TSE | /* TSE context */
3683 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3684 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3685 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3686
3687 tx_buffer->m_head = NULL;
3688 tx_buffer->next_eop = -1;
3689
3690 if (++curr_txd == adapter->num_tx_desc)
3691 curr_txd = 0;
3692
3693 adapter->num_tx_desc_avail--;
3694 adapter->next_avail_tx_desc = curr_txd;
3695 adapter->tx_tso = TRUE;
3696
3697 return TRUE;
3698 }
3699
3700
3701 /**********************************************************************
3702 *
3703 * Setup work for hardware segmentation offload (TSO) on
3704 * adapters using advanced tx descriptors (82575)
3705 *
3706 **********************************************************************/
3707 static boolean_t
3708 em_tso_adv_setup(struct adapter *adapter, struct mbuf *mp, u32 *paylen)
3709 {
3710 struct e1000_adv_tx_context_desc *TXD;
3711 struct em_buffer *tx_buffer;
3712 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3713 u32 mss_l4len_idx = 0;
3714 u16 vtag = 0;
3715 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3716 struct ether_vlan_header *eh;
3717 struct ip *ip;
3718 struct tcphdr *th;
3719
3720 if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
3721 (mp->m_pkthdr.len <= EM_TX_BUFFER_SIZE))
3722 return FALSE;
3723
3724 /*
3725 * Determine where frame payload starts.
3726 * Jump over vlan headers if already present
3727 */
3728 eh = mtod(mp, struct ether_vlan_header *);
3729 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3730 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3731 else
3732 ehdrlen = ETHER_HDR_LEN;
3733
3734 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3735 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3736 return FALSE;
3737
3738 /* Only supports IPV4 for now */
3739 ctxd = adapter->next_avail_tx_desc;
3740 tx_buffer = &adapter->tx_buffer_area[ctxd];
3741 TXD = (struct e1000_adv_tx_context_desc *) &adapter->tx_desc_base[ctxd];
3742
3743 ip = (struct ip *)(mp->m_data + ehdrlen);
3744 if (ip->ip_p != IPPROTO_TCP)
3745 return FALSE; /* 0 */
3746 ip->ip_len = 0;
3747 ip->ip_sum = 0;
3748 ip_hlen = ip->ip_hl << 2;
3749 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3750 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3751 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3752 tcp_hlen = th->th_off << 2;
3753 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3754 /* Calculate payload, this is used in the transmit desc in encap */
3755 *paylen = mp->m_pkthdr.len - hdrlen;
3756
3757 /* VLAN MACLEN IPLEN */
3758 if (mp->m_flags & M_VLANTAG) {
3759 vtag = htole16(mp->m_pkthdr.ether_vtag);
3760 vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
3761 }
3762
3763 vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT);
3764 vlan_macip_lens |= ip_hlen;
3765 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3766
3767 /* ADV DTYPE TUCMD */
3768 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3769 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
3770 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
3771 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3772
3773 /* MSS L4LEN IDX */
3774 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
3775 mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
3776 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3777
3778 TXD->seqnum_seed = htole32(0);
3779 tx_buffer->m_head = NULL;
3780 tx_buffer->next_eop = -1;
3781
3782 if (++ctxd == adapter->num_tx_desc)
3783 ctxd = 0;
3784
3785 adapter->num_tx_desc_avail--;
3786 adapter->next_avail_tx_desc = ctxd;
3787 return TRUE;
3788 }
3789
3790 #endif /* FreeBSD_version >= 700000 */
3791
3792 /*********************************************************************
3793 *
3794 * Advanced Context Descriptor setup for VLAN or CSUM
3795 *
3796 **********************************************************************/
3797
3798 static boolean_t
3799 em_tx_adv_ctx_setup(struct adapter *adapter, struct mbuf *mp)
3800 {
3801 struct e1000_adv_tx_context_desc *TXD;
3802 struct em_buffer *tx_buffer;
3803 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3804 struct ether_vlan_header *eh;
3805 struct ip *ip;
3806 struct ip6_hdr *ip6;
3807 int ehdrlen, ip_hlen = 0;
3808 u16 etype;
3809 u8 ipproto = 0;
3810 bool offload = TRUE;
3811 #if __FreeBSD_version < 700000
3812 struct m_tag *mtag;
3813 #else
3814 u16 vtag = 0;
3815 #endif
3816
3817 int ctxd = adapter->next_avail_tx_desc;
3818 tx_buffer = &adapter->tx_buffer_area[ctxd];
3819 TXD = (struct e1000_adv_tx_context_desc *) &adapter->tx_desc_base[ctxd];
3820
3821 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3822 offload = FALSE; /* Only here to handle VLANs */
3823 /*
3824 ** In advanced descriptors the vlan tag must
3825 ** be placed into the descriptor itself.
3826 */
3827 #if __FreeBSD_version < 700000
3828 mtag = VLAN_OUTPUT_TAG(ifp, mp);
3829 if (mtag != NULL) {
3830 vlan_macip_lens |=
3831 htole16(VLAN_TAG_VALUE(mtag)) << E1000_ADVTXD_VLAN_SHIFT;
3832 } else if (offload == FALSE)
3833 return FALSE; /* No CTX needed */
3834 #else
3835 if (mp->m_flags & M_VLANTAG) {
3836 vtag = htole16(mp->m_pkthdr.ether_vtag);
3837 vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
3838 } else if (offload == FALSE)
3839 return FALSE;
3840 #endif
3841 /*
3842 * Determine where frame payload starts.
3843 * Jump over vlan headers if already present,
3844 * helpful for QinQ too.
3845 */
3846 eh = mtod(mp, struct ether_vlan_header *);
3847 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3848 etype = ntohs(eh->evl_proto);
3849 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3850 } else {
3851 etype = ntohs(eh->evl_encap_proto);
3852 ehdrlen = ETHER_HDR_LEN;
3853 }
3854
3855 /* Set the ether header length */
3856 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
3857
3858 switch (etype) {
3859 case ETHERTYPE_IP:
3860 ip = (struct ip *)(mp->m_data + ehdrlen);
3861 ip_hlen = ip->ip_hl << 2;
3862 if (mp->m_len < ehdrlen + ip_hlen) {
3863 offload = FALSE;
3864 break;
3865 }
3866 ipproto = ip->ip_p;
3867 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
3868 break;
3869 case ETHERTYPE_IPV6:
3870 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3871 ip_hlen = sizeof(struct ip6_hdr);
3872 if (mp->m_len < ehdrlen + ip_hlen)
3873 return FALSE; /* failure */
3874 ipproto = ip6->ip6_nxt;
3875 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
3876 break;
3877 default:
3878 offload = FALSE;
3879 break;
3880 }
3881
3882 vlan_macip_lens |= ip_hlen;
3883 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3884
3885 switch (ipproto) {
3886 case IPPROTO_TCP:
3887 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3888 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
3889 break;
3890 case IPPROTO_UDP:
3891 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3892 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
3893 break;
3894 default:
3895 offload = FALSE;
3896 break;
3897 }
3898
3899 /* Now copy bits into descriptor */
3900 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3901 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3902 TXD->seqnum_seed = htole32(0);
3903 TXD->mss_l4len_idx = htole32(0);
3904
3905 tx_buffer->m_head = NULL;
3906 tx_buffer->next_eop = -1;
3907
3908 /* We've consumed the first desc, adjust counters */
3909 if (++ctxd == adapter->num_tx_desc)
3910 ctxd = 0;
3911 adapter->next_avail_tx_desc = ctxd;
3912 --adapter->num_tx_desc_avail;
3913
3914 return (offload);
3915 }
3916
3917
3918 /**********************************************************************
3919 *
3920 * Examine each tx_buffer in the used queue. If the hardware is done
3921 * processing the packet then free associated resources. The
3922 * tx_buffer is put back on the free queue.
3923 *
3924 **********************************************************************/
3925 static void
3926 em_txeof(struct adapter *adapter)
3927 {
3928 int first, last, done, num_avail;
3929 struct em_buffer *tx_buffer;
3930 struct e1000_tx_desc *tx_desc, *eop_desc;
3931 struct ifnet *ifp = adapter->ifp;
3932
3933 EM_TX_LOCK_ASSERT(adapter);
3934
3935 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3936 return;
3937
3938 num_avail = adapter->num_tx_desc_avail;
3939 first = adapter->next_tx_to_clean;
3940 tx_desc = &adapter->tx_desc_base[first];
3941 tx_buffer = &adapter->tx_buffer_area[first];
3942 last = tx_buffer->next_eop;
3943 eop_desc = &adapter->tx_desc_base[last];
3944
3945 /*
3946 * What this does is get the index of the
3947 * first descriptor AFTER the EOP of the
3948 * first packet, that way we can do the
3949 * simple comparison on the inner while loop.
3950 */
3951 if (++last == adapter->num_tx_desc)
3952 last = 0;
3953 done = last;
3954
3955 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3956 BUS_DMASYNC_POSTREAD);
3957
3958 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3959 /* We clean the range of the packet */
3960 while (first != done) {
3961 tx_desc->upper.data = 0;
3962 tx_desc->lower.data = 0;
3963 tx_desc->buffer_addr = 0;
3964 num_avail++;
3965
3966 if (tx_buffer->m_head) {
3967 ifp->if_opackets++;
3968 bus_dmamap_sync(adapter->txtag,
3969 tx_buffer->map,
3970 BUS_DMASYNC_POSTWRITE);
3971 bus_dmamap_unload(adapter->txtag,
3972 tx_buffer->map);
3973
3974 m_freem(tx_buffer->m_head);
3975 tx_buffer->m_head = NULL;
3976 }
3977 tx_buffer->next_eop = -1;
3978
3979 if (++first == adapter->num_tx_desc)
3980 first = 0;
3981
3982 tx_buffer = &adapter->tx_buffer_area[first];
3983 tx_desc = &adapter->tx_desc_base[first];
3984 }
3985 /* See if we can continue to the next packet */
3986 last = tx_buffer->next_eop;
3987 if (last != -1) {
3988 eop_desc = &adapter->tx_desc_base[last];
3989 /* Get new done point */
3990 if (++last == adapter->num_tx_desc) last = 0;
3991 done = last;
3992 } else
3993 break;
3994 }
3995 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3996 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3997
3998 adapter->next_tx_to_clean = first;
3999
4000 /*
4001 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
4002 * that it is OK to send packets.
4003 * If there are no pending descriptors, clear the timeout. Otherwise,
4004 * if some descriptors have been freed, restart the timeout.
4005 */
4006 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
4007 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4008 /* All clean, turn off the timer */
4009 if (num_avail == adapter->num_tx_desc)
4010 adapter->watchdog_timer = 0;
4011 /* Some cleaned, reset the timer */
4012 else if (num_avail != adapter->num_tx_desc_avail)
4013 adapter->watchdog_timer = EM_TX_TIMEOUT;
4014 }
4015 adapter->num_tx_desc_avail = num_avail;
4016 return;
4017 }
4018
4019 /*********************************************************************
4020 *
4021 * When Link is lost sometimes there is work still in the TX ring
4022 * which will result in a watchdog, rather than allow that do an
4023 * attempted cleanup and then reinit here. Note that this has been
4024 * seens mostly with fiber adapters.
4025 *
4026 **********************************************************************/
4027 static void
4028 em_tx_purge(struct adapter *adapter)
4029 {
4030 if ((!adapter->link_active) && (adapter->watchdog_timer)) {
4031 EM_TX_LOCK(adapter);
4032 em_txeof(adapter);
4033 EM_TX_UNLOCK(adapter);
4034 if (adapter->watchdog_timer) { /* Still not clean? */
4035 adapter->watchdog_timer = 0;
4036 em_init_locked(adapter);
4037 }
4038 }
4039 }
4040
4041 /*********************************************************************
4042 *
4043 * Get a buffer from system mbuf buffer pool.
4044 *
4045 **********************************************************************/
4046 static int
4047 em_get_buf(struct adapter *adapter, int i)
4048 {
4049 struct mbuf *m;
4050 bus_dma_segment_t segs[1];
4051 bus_dmamap_t map;
4052 struct em_buffer *rx_buffer;
4053 int error, nsegs;
4054
4055 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4056 if (m == NULL) {
4057 adapter->mbuf_cluster_failed++;
4058 return (ENOBUFS);
4059 }
4060 m->m_len = m->m_pkthdr.len = MCLBYTES;
4061
4062 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4063 m_adj(m, ETHER_ALIGN);
4064
4065 /*
4066 * Using memory from the mbuf cluster pool, invoke the
4067 * bus_dma machinery to arrange the memory mapping.
4068 */
4069 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4070 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4071 if (error != 0) {
4072 m_free(m);
4073 return (error);
4074 }
4075
4076 /* If nsegs is wrong then the stack is corrupt. */
4077 KASSERT(nsegs == 1, ("Too many segments returned!"));
4078
4079 rx_buffer = &adapter->rx_buffer_area[i];
4080 if (rx_buffer->m_head != NULL)
4081 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4082
4083 map = rx_buffer->map;
4084 rx_buffer->map = adapter->rx_sparemap;
4085 adapter->rx_sparemap = map;
4086 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4087 rx_buffer->m_head = m;
4088
4089 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4090 return (0);
4091 }
4092
4093 /*********************************************************************
4094 *
4095 * Allocate memory for rx_buffer structures. Since we use one
4096 * rx_buffer per received packet, the maximum number of rx_buffer's
4097 * that we'll need is equal to the number of receive descriptors
4098 * that we've allocated.
4099 *
4100 **********************************************************************/
4101 static int
4102 em_allocate_receive_structures(struct adapter *adapter)
4103 {
4104 device_t dev = adapter->dev;
4105 struct em_buffer *rx_buffer;
4106 int i, error;
4107
4108 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4109 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4110 if (adapter->rx_buffer_area == NULL) {
4111 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4112 return (ENOMEM);
4113 }
4114
4115 #if __FreeBSD_version >= 700000
4116 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4117 #else
4118 error = bus_dma_tag_create(NULL, /* parent */
4119 #endif
4120 1, 0, /* alignment, bounds */
4121 BUS_SPACE_MAXADDR, /* lowaddr */
4122 BUS_SPACE_MAXADDR, /* highaddr */
4123 NULL, NULL, /* filter, filterarg */
4124 MCLBYTES, /* maxsize */
4125 1, /* nsegments */
4126 MCLBYTES, /* maxsegsize */
4127 0, /* flags */
4128 NULL, /* lockfunc */
4129 NULL, /* lockarg */
4130 &adapter->rxtag);
4131 if (error) {
4132 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4133 __func__, error);
4134 goto fail;
4135 }
4136
4137 /* Create the spare map (used by getbuf) */
4138 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4139 &adapter->rx_sparemap);
4140 if (error) {
4141 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4142 __func__, error);
4143 goto fail;
4144 }
4145
4146 rx_buffer = adapter->rx_buffer_area;
4147 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4148 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4149 &rx_buffer->map);
4150 if (error) {
4151 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4152 __func__, error);
4153 goto fail;
4154 }
4155 }
4156
4157 return (0);
4158
4159 fail:
4160 em_free_receive_structures(adapter);
4161 return (error);
4162 }
4163
4164 /*********************************************************************
4165 *
4166 * (Re)initialize receive structures.
4167 *
4168 **********************************************************************/
4169 static int
4170 em_setup_receive_structures(struct adapter *adapter)
4171 {
4172 struct em_buffer *rx_buffer;
4173 int i, error;
4174
4175 /* Reset descriptor ring */
4176 bzero(adapter->rx_desc_base,
4177 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4178
4179 /* Free current RX buffers. */
4180 rx_buffer = adapter->rx_buffer_area;
4181 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4182 if (rx_buffer->m_head != NULL) {
4183 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4184 BUS_DMASYNC_POSTREAD);
4185 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4186 m_freem(rx_buffer->m_head);
4187 rx_buffer->m_head = NULL;
4188 }
4189 }
4190
4191 /* Allocate new ones. */
4192 for (i = 0; i < adapter->num_rx_desc; i++) {
4193 error = em_get_buf(adapter, i);
4194 if (error)
4195 return (error);
4196 }
4197
4198 /* Setup our descriptor pointers */
4199 adapter->next_rx_desc_to_check = 0;
4200 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4201 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4202
4203 return (0);
4204 }
4205
4206 /*********************************************************************
4207 *
4208 * Enable receive unit.
4209 *
4210 **********************************************************************/
4211 static void
4212 em_initialize_receive_unit(struct adapter *adapter)
4213 {
4214 struct ifnet *ifp = adapter->ifp;
4215 uint64_t bus_addr;
4216 uint32_t reg_rctl;
4217 uint32_t reg_rxcsum;
4218
4219 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4220
4221 /*
4222 * Make sure receives are disabled while setting
4223 * up the descriptor ring
4224 */
4225 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4226 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl & ~E1000_RCTL_EN);
4227
4228 if(adapter->hw.mac.type >= e1000_82540) {
4229 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4230 adapter->rx_abs_int_delay.value);
4231 /*
4232 * Set the interrupt throttling rate. Value is calculated
4233 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4234 */
4235 #define MAX_INTS_PER_SEC 8000
4236 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4237 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4238 }
4239
4240 /* Setup the Base and Length of the Rx Descriptor Ring */
4241 bus_addr = adapter->rxdma.dma_paddr;
4242 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4243 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4244 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4245 (uint32_t)(bus_addr >> 32));
4246 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4247 (uint32_t)bus_addr);
4248
4249 /* Setup the Receive Control Register */
4250 reg_rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4251 reg_rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4252 E1000_RCTL_RDMTS_HALF |
4253 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4254
4255 /* Make sure VLAN Filters are off */
4256 reg_rctl &= ~E1000_RCTL_VFE;
4257
4258 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4259 reg_rctl |= E1000_RCTL_SBP;
4260 else
4261 reg_rctl &= ~E1000_RCTL_SBP;
4262
4263 switch (adapter->rx_buffer_len) {
4264 default:
4265 case 2048:
4266 reg_rctl |= E1000_RCTL_SZ_2048;
4267 break;
4268 case 4096:
4269 reg_rctl |= E1000_RCTL_SZ_4096 |
4270 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4271 break;
4272 case 8192:
4273 reg_rctl |= E1000_RCTL_SZ_8192 |
4274 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4275 break;
4276 case 16384:
4277 reg_rctl |= E1000_RCTL_SZ_16384 |
4278 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4279 break;
4280 }
4281
4282 if (ifp->if_mtu > ETHERMTU)
4283 reg_rctl |= E1000_RCTL_LPE;
4284 else
4285 reg_rctl &= ~E1000_RCTL_LPE;
4286
4287 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4288 if ((adapter->hw.mac.type >= e1000_82543) &&
4289 (ifp->if_capenable & IFCAP_RXCSUM)) {
4290 reg_rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4291 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4292 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, reg_rxcsum);
4293 }
4294
4295 /*
4296 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4297 ** long latencies are observed, like Lenovo X60. This
4298 ** change eliminates the problem, but since having positive
4299 ** values in RDTR is a known source of problems on other
4300 ** platforms another solution is being sought.
4301 */
4302 if (adapter->hw.mac.type == e1000_82573)
4303 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4304
4305 /* Enable Receives */
4306 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
4307
4308 /*
4309 * Setup the HW Rx Head and
4310 * Tail Descriptor Pointers
4311 */
4312 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4313 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4314
4315 return;
4316 }
4317
4318 /*********************************************************************
4319 *
4320 * Free receive related data structures.
4321 *
4322 **********************************************************************/
4323 static void
4324 em_free_receive_structures(struct adapter *adapter)
4325 {
4326 struct em_buffer *rx_buffer;
4327 int i;
4328
4329 INIT_DEBUGOUT("free_receive_structures: begin");
4330
4331 if (adapter->rx_sparemap) {
4332 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4333 adapter->rx_sparemap = NULL;
4334 }
4335
4336 /* Cleanup any existing buffers */
4337 if (adapter->rx_buffer_area != NULL) {
4338 rx_buffer = adapter->rx_buffer_area;
4339 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4340 if (rx_buffer->m_head != NULL) {
4341 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4342 BUS_DMASYNC_POSTREAD);
4343 bus_dmamap_unload(adapter->rxtag,
4344 rx_buffer->map);
4345 m_freem(rx_buffer->m_head);
4346 rx_buffer->m_head = NULL;
4347 } else if (rx_buffer->map != NULL)
4348 bus_dmamap_unload(adapter->rxtag,
4349 rx_buffer->map);
4350 if (rx_buffer->map != NULL) {
4351 bus_dmamap_destroy(adapter->rxtag,
4352 rx_buffer->map);
4353 rx_buffer->map = NULL;
4354 }
4355 }
4356 }
4357
4358 if (adapter->rx_buffer_area != NULL) {
4359 free(adapter->rx_buffer_area, M_DEVBUF);
4360 adapter->rx_buffer_area = NULL;
4361 }
4362
4363 if (adapter->rxtag != NULL) {
4364 bus_dma_tag_destroy(adapter->rxtag);
4365 adapter->rxtag = NULL;
4366 }
4367 }
4368
4369 /*********************************************************************
4370 *
4371 * This routine executes in interrupt context. It replenishes
4372 * the mbufs in the descriptor and sends data which has been
4373 * dma'ed into host memory to upper layer.
4374 *
4375 * We loop at most count times if count is > 0, or until done if
4376 * count < 0.
4377 *
4378 *********************************************************************/
4379 static int
4380 em_rxeof(struct adapter *adapter, int count)
4381 {
4382 struct ifnet *ifp;
4383 struct mbuf *mp;
4384 uint8_t accept_frame = 0;
4385 uint8_t eop = 0;
4386 uint16_t len, desc_len, prev_len_adj;
4387 int i;
4388
4389 /* Pointer to the receive descriptor being examined. */
4390 struct e1000_rx_desc *current_desc;
4391 uint8_t status;
4392
4393 ifp = adapter->ifp;
4394 i = adapter->next_rx_desc_to_check;
4395 current_desc = &adapter->rx_desc_base[i];
4396 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4397 BUS_DMASYNC_POSTREAD);
4398
4399 if (!((current_desc->status) & E1000_RXD_STAT_DD))
4400 return (0);
4401
4402 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4403 (count != 0) &&
4404 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4405 struct mbuf *m = NULL;
4406
4407 mp = adapter->rx_buffer_area[i].m_head;
4408 /*
4409 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4410 * needs to access the last received byte in the mbuf.
4411 */
4412 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4413 BUS_DMASYNC_POSTREAD);
4414
4415 accept_frame = 1;
4416 prev_len_adj = 0;
4417 desc_len = le16toh(current_desc->length);
4418 status = current_desc->status;
4419 if (status & E1000_RXD_STAT_EOP) {
4420 count--;
4421 eop = 1;
4422 if (desc_len < ETHER_CRC_LEN) {
4423 len = 0;
4424 prev_len_adj = ETHER_CRC_LEN - desc_len;
4425 } else
4426 len = desc_len - ETHER_CRC_LEN;
4427 } else {
4428 eop = 0;
4429 len = desc_len;
4430 }
4431
4432 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4433 uint8_t last_byte;
4434 uint32_t pkt_len = desc_len;
4435
4436 if (adapter->fmp != NULL)
4437 pkt_len += adapter->fmp->m_pkthdr.len;
4438
4439 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4440 if (TBI_ACCEPT(&adapter->hw, status,
4441 current_desc->errors, pkt_len, last_byte,
4442 adapter->min_frame_size, adapter->max_frame_size)) {
4443 e1000_tbi_adjust_stats_82543(&adapter->hw,
4444 &adapter->stats, pkt_len,
4445 adapter->hw.mac.addr,
4446 adapter->max_frame_size);
4447 if (len > 0)
4448 len--;
4449 } else
4450 accept_frame = 0;
4451 }
4452
4453 if (accept_frame) {
4454 if (em_get_buf(adapter, i) != 0) {
4455 ifp->if_iqdrops++;
4456 goto discard;
4457 }
4458
4459 /* Assign correct length to the current fragment */
4460 mp->m_len = len;
4461
4462 if (adapter->fmp == NULL) {
4463 mp->m_pkthdr.len = len;
4464 adapter->fmp = mp; /* Store the first mbuf */
4465 adapter->lmp = mp;
4466 } else {
4467 /* Chain mbuf's together */
4468 mp->m_flags &= ~M_PKTHDR;
4469 /*
4470 * Adjust length of previous mbuf in chain if
4471 * we received less than 4 bytes in the last
4472 * descriptor.
4473 */
4474 if (prev_len_adj > 0) {
4475 adapter->lmp->m_len -= prev_len_adj;
4476 adapter->fmp->m_pkthdr.len -=
4477 prev_len_adj;
4478 }
4479 adapter->lmp->m_next = mp;
4480 adapter->lmp = adapter->lmp->m_next;
4481 adapter->fmp->m_pkthdr.len += len;
4482 }
4483
4484 if (eop) {
4485 adapter->fmp->m_pkthdr.rcvif = ifp;
4486 ifp->if_ipackets++;
4487 em_receive_checksum(adapter, current_desc,
4488 adapter->fmp);
4489 #ifndef __NO_STRICT_ALIGNMENT
4490 if (adapter->max_frame_size >
4491 (MCLBYTES - ETHER_ALIGN) &&
4492 em_fixup_rx(adapter) != 0)
4493 goto skip;
4494 #endif
4495 if (status & E1000_RXD_STAT_VP) {
4496 #if __FreeBSD_version < 700000
4497 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4498 (le16toh(current_desc->special) &
4499 E1000_RXD_SPC_VLAN_MASK));
4500 #else
4501 adapter->fmp->m_pkthdr.ether_vtag =
4502 (le16toh(current_desc->special) &
4503 E1000_RXD_SPC_VLAN_MASK);
4504 adapter->fmp->m_flags |= M_VLANTAG;
4505 #endif
4506 }
4507 #ifndef __NO_STRICT_ALIGNMENT
4508 skip:
4509 #endif
4510 m = adapter->fmp;
4511 adapter->fmp = NULL;
4512 adapter->lmp = NULL;
4513 }
4514 } else {
4515 ifp->if_ierrors++;
4516 discard:
4517 /* Reuse loaded DMA map and just update mbuf chain */
4518 mp = adapter->rx_buffer_area[i].m_head;
4519 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4520 mp->m_data = mp->m_ext.ext_buf;
4521 mp->m_next = NULL;
4522 if (adapter->max_frame_size <=
4523 (MCLBYTES - ETHER_ALIGN))
4524 m_adj(mp, ETHER_ALIGN);
4525 if (adapter->fmp != NULL) {
4526 m_freem(adapter->fmp);
4527 adapter->fmp = NULL;
4528 adapter->lmp = NULL;
4529 }
4530 m = NULL;
4531 }
4532
4533 /* Zero out the receive descriptors status. */
4534 current_desc->status = 0;
4535 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4536 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4537
4538 /* Advance our pointers to the next descriptor. */
4539 if (++i == adapter->num_rx_desc)
4540 i = 0;
4541 if (m != NULL) {
4542 adapter->next_rx_desc_to_check = i;
4543 #ifndef EM_FAST_IRQ
4544 EM_CORE_UNLOCK(adapter);
4545 (*ifp->if_input)(ifp, m);
4546 EM_CORE_LOCK(adapter);
4547 #else
4548 /* Already running unlocked */
4549 (*ifp->if_input)(ifp, m);
4550 #endif
4551 i = adapter->next_rx_desc_to_check;
4552 }
4553 current_desc = &adapter->rx_desc_base[i];
4554 }
4555 adapter->next_rx_desc_to_check = i;
4556
4557 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4558 if (--i < 0)
4559 i = adapter->num_rx_desc - 1;
4560 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4561 if (!((current_desc->status) & E1000_RXD_STAT_DD))
4562 return (0);
4563
4564 return (1);
4565 }
4566
4567 #ifndef __NO_STRICT_ALIGNMENT
4568 /*
4569 * When jumbo frames are enabled we should realign entire payload on
4570 * architecures with strict alignment. This is serious design mistake of 8254x
4571 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4572 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4573 * payload. On architecures without strict alignment restrictions 8254x still
4574 * performs unaligned memory access which would reduce the performance too.
4575 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4576 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4577 * existing mbuf chain.
4578 *
4579 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4580 * not used at all on architectures with strict alignment.
4581 */
4582 static int
4583 em_fixup_rx(struct adapter *adapter)
4584 {
4585 struct mbuf *m, *n;
4586 int error;
4587
4588 error = 0;
4589 m = adapter->fmp;
4590 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4591 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4592 m->m_data += ETHER_HDR_LEN;
4593 } else {
4594 MGETHDR(n, M_DONTWAIT, MT_DATA);
4595 if (n != NULL) {
4596 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4597 m->m_data += ETHER_HDR_LEN;
4598 m->m_len -= ETHER_HDR_LEN;
4599 n->m_len = ETHER_HDR_LEN;
4600 M_MOVE_PKTHDR(n, m);
4601 n->m_next = m;
4602 adapter->fmp = n;
4603 } else {
4604 adapter->dropped_pkts++;
4605 m_freem(adapter->fmp);
4606 adapter->fmp = NULL;
4607 error = ENOMEM;
4608 }
4609 }
4610
4611 return (error);
4612 }
4613 #endif
4614
4615 /*********************************************************************
4616 *
4617 * Verify that the hardware indicated that the checksum is valid.
4618 * Inform the stack about the status of checksum so that stack
4619 * doesn't spend time verifying the checksum.
4620 *
4621 *********************************************************************/
4622 static void
4623 em_receive_checksum(struct adapter *adapter,
4624 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4625 {
4626 /* 82543 or newer only */
4627 if ((adapter->hw.mac.type < e1000_82543) ||
4628 /* Ignore Checksum bit is set */
4629 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4630 mp->m_pkthdr.csum_flags = 0;
4631 return;
4632 }
4633
4634 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4635 /* Did it pass? */
4636 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4637 /* IP Checksum Good */
4638 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4639 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4640
4641 } else {
4642 mp->m_pkthdr.csum_flags = 0;
4643 }
4644 }
4645
4646 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4647 /* Did it pass? */
4648 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4649 mp->m_pkthdr.csum_flags |=
4650 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4651 mp->m_pkthdr.csum_data = htons(0xffff);
4652 }
4653 }
4654 }
4655
4656 /*
4657 * This turns on the hardware offload of the VLAN
4658 * tag insertion and strip
4659 */
4660 static void
4661 em_enable_hw_vlans(struct adapter *adapter)
4662 {
4663 uint32_t ctrl;
4664
4665 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4666 ctrl |= E1000_CTRL_VME;
4667 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4668 }
4669
4670 static void
4671 em_enable_intr(struct adapter *adapter)
4672 {
4673 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
4674 (IMS_ENABLE_MASK));
4675 }
4676
4677 static void
4678 em_disable_intr(struct adapter *adapter)
4679 {
4680 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4681 }
4682
4683 /*
4684 * Bit of a misnomer, what this really means is
4685 * to enable OS management of the system... aka
4686 * to disable special hardware management features
4687 */
4688 static void
4689 em_init_manageability(struct adapter *adapter)
4690 {
4691 /* A shared code workaround */
4692 #define E1000_82542_MANC2H E1000_MANC2H
4693 if (adapter->has_manage) {
4694 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4695 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4696
4697 /* disable hardware interception of ARP */
4698 manc &= ~(E1000_MANC_ARP_EN);
4699
4700 /* enable receiving management packets to the host */
4701 if (adapter->hw.mac.type >= e1000_82571) {
4702 manc |= E1000_MANC_EN_MNG2HOST;
4703 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4704 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4705 manc2h |= E1000_MNG2HOST_PORT_623;
4706 manc2h |= E1000_MNG2HOST_PORT_664;
4707 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4708 }
4709
4710 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4711 }
4712 }
4713
4714 /*
4715 * Give control back to hardware management
4716 * controller if there is one.
4717 */
4718 static void
4719 em_release_manageability(struct adapter *adapter)
4720 {
4721 if (adapter->has_manage) {
4722 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4723
4724 /* re-enable hardware interception of ARP */
4725 manc |= E1000_MANC_ARP_EN;
4726
4727 if (adapter->hw.mac.type >= e1000_82571)
4728 manc &= ~E1000_MANC_EN_MNG2HOST;
4729
4730 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4731 }
4732 }
4733
4734 /*
4735 * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4736 * For ASF and Pass Through versions of f/w this means that
4737 * the driver is loaded. For AMT version (only with 82573)
4738 * of the f/w this means that the network i/f is open.
4739 *
4740 */
4741 static void
4742 em_get_hw_control(struct adapter *adapter)
4743 {
4744 u32 ctrl_ext, swsm;
4745
4746 /* Let firmware know the driver has taken over */
4747 switch (adapter->hw.mac.type) {
4748 case e1000_82573:
4749 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4750 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4751 swsm | E1000_SWSM_DRV_LOAD);
4752 break;
4753 case e1000_82571:
4754 case e1000_82572:
4755 case e1000_80003es2lan:
4756 case e1000_ich8lan:
4757 case e1000_ich9lan:
4758 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4759 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4760 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4761 break;
4762 default:
4763 break;
4764 }
4765 }
4766
4767 /*
4768 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4769 * For ASF and Pass Through versions of f/w this means that the
4770 * driver is no longer loaded. For AMT version (only with 82573) i
4771 * of the f/w this means that the network i/f is closed.
4772 *
4773 */
4774 static void
4775 em_release_hw_control(struct adapter *adapter)
4776 {
4777 u32 ctrl_ext, swsm;
4778
4779 /* Let firmware taken over control of h/w */
4780 switch (adapter->hw.mac.type) {
4781 case e1000_82573:
4782 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4783 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4784 swsm & ~E1000_SWSM_DRV_LOAD);
4785 break;
4786 case e1000_82571:
4787 case e1000_82572:
4788 case e1000_80003es2lan:
4789 case e1000_ich8lan:
4790 case e1000_ich9lan:
4791 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4792 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4793 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4794 break;
4795 default:
4796 break;
4797
4798 }
4799 }
4800
4801 static int
4802 em_is_valid_ether_addr(uint8_t *addr)
4803 {
4804 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4805
4806 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4807 return (FALSE);
4808 }
4809
4810 return (TRUE);
4811 }
4812
4813 /*
4814 * NOTE: the following routines using the e1000
4815 * naming style are provided to the shared
4816 * code which expects that rather than 'em'
4817 */
4818
4819 void
4820 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4821 {
4822 pci_write_config(((struct e1000_osdep *)hw->back)->dev, reg, *value, 2);
4823 }
4824
4825 void
4826 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4827 {
4828 *value = pci_read_config(((struct e1000_osdep *)hw->back)->dev, reg, 2);
4829 }
4830
4831 void
4832 e1000_pci_set_mwi(struct e1000_hw *hw)
4833 {
4834 pci_write_config(((struct e1000_osdep *)hw->back)->dev, PCIR_COMMAND,
4835 (hw->bus.pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
4836 }
4837
4838 void
4839 e1000_pci_clear_mwi(struct e1000_hw *hw)
4840 {
4841 pci_write_config(((struct e1000_osdep *)hw->back)->dev, PCIR_COMMAND,
4842 (hw->bus.pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
4843 }
4844
4845 /*
4846 * Read the PCI Express capabilities
4847 */
4848 int32_t
4849 e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4850 {
4851 int32_t error = E1000_SUCCESS;
4852 uint16_t cap_off;
4853
4854 switch (hw->mac.type) {
4855
4856 case e1000_82571:
4857 case e1000_82572:
4858 case e1000_82573:
4859 case e1000_80003es2lan:
4860 cap_off = 0xE0;
4861 e1000_read_pci_cfg(hw, cap_off + reg, value);
4862 break;
4863 default:
4864 error = ~E1000_NOT_IMPLEMENTED;
4865 break;
4866 }
4867
4868 return (error);
4869 }
4870
4871 int32_t
4872 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, uint32_t size)
4873 {
4874 int32_t error = 0;
4875
4876 hw->dev_spec = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4877 if (hw->dev_spec == NULL)
4878 error = ENOMEM;
4879
4880 return (error);
4881 }
4882
4883 void
4884 e1000_free_dev_spec_struct(struct e1000_hw *hw)
4885 {
4886 if (hw->dev_spec != NULL)
4887 free(hw->dev_spec, M_DEVBUF);
4888 return;
4889 }
4890
4891 /*
4892 * Enable PCI Wake On Lan capability
4893 */
4894 void
4895 em_enable_wakeup(device_t dev)
4896 {
4897 u16 cap, status;
4898 u8 id;
4899
4900 /* First find the capabilities pointer*/
4901 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4902 /* Read the PM Capabilities */
4903 id = pci_read_config(dev, cap, 1);
4904 if (id != PCIY_PMG) /* Something wrong */
4905 return;
4906 /* OK, we have the power capabilities, so
4907 now get the status register */
4908 cap += PCIR_POWER_STATUS;
4909 status = pci_read_config(dev, cap, 2);
4910 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4911 pci_write_config(dev, cap, status, 2);
4912 return;
4913 }
4914
4915
4916 /*********************************************************************
4917 * 82544 Coexistence issue workaround.
4918 * There are 2 issues.
4919 * 1. Transmit Hang issue.
4920 * To detect this issue, following equation can be used...
4921 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4922 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4923 *
4924 * 2. DAC issue.
4925 * To detect this issue, following equation can be used...
4926 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4927 * If SUM[3:0] is in between 9 to c, we will have this issue.
4928 *
4929 *
4930 * WORKAROUND:
4931 * Make sure we do not have ending address
4932 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4933 *
4934 *************************************************************************/
4935 static uint32_t
4936 em_fill_descriptors (bus_addr_t address, uint32_t length,
4937 PDESC_ARRAY desc_array)
4938 {
4939 /* Since issue is sensitive to length and address.*/
4940 /* Let us first check the address...*/
4941 uint32_t safe_terminator;
4942 if (length <= 4) {
4943 desc_array->descriptor[0].address = address;
4944 desc_array->descriptor[0].length = length;
4945 desc_array->elements = 1;
4946 return (desc_array->elements);
4947 }
4948 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) +
4949 (length & 0xF)) & 0xF);
4950 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4951 if (safe_terminator == 0 ||
4952 (safe_terminator > 4 &&
4953 safe_terminator < 9) ||
4954 (safe_terminator > 0xC &&
4955 safe_terminator <= 0xF)) {
4956 desc_array->descriptor[0].address = address;
4957 desc_array->descriptor[0].length = length;
4958 desc_array->elements = 1;
4959 return (desc_array->elements);
4960 }
4961
4962 desc_array->descriptor[0].address = address;
4963 desc_array->descriptor[0].length = length - 4;
4964 desc_array->descriptor[1].address = address + (length - 4);
4965 desc_array->descriptor[1].length = 4;
4966 desc_array->elements = 2;
4967 return (desc_array->elements);
4968 }
4969
4970 /**********************************************************************
4971 *
4972 * Update the board statistics counters.
4973 *
4974 **********************************************************************/
4975 static void
4976 em_update_stats_counters(struct adapter *adapter)
4977 {
4978 struct ifnet *ifp;
4979
4980 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4981 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4982 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4983 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4984 }
4985 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4986 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4987 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4988 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4989
4990 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4991 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4992 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4993 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4994 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4995 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4996 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4997 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4998 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4999 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5000 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5001 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5002 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5003 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5004 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5005 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5006 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5007 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5008 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5009 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5010
5011 /* For the 64-bit byte counters the low dword must be read first. */
5012 /* Both registers clear on the read of the high dword */
5013
5014 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5015 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5016
5017 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5018 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5019 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5020 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5021 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5022
5023 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5024 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5025
5026 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5027 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5028 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5029 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5030 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5031 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5032 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5033 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5034 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5035 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5036
5037 if (adapter->hw.mac.type >= e1000_82543) {
5038 adapter->stats.algnerrc +=
5039 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5040 adapter->stats.rxerrc +=
5041 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5042 adapter->stats.tncrs +=
5043 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5044 adapter->stats.cexterr +=
5045 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5046 adapter->stats.tsctc +=
5047 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5048 adapter->stats.tsctfc +=
5049 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5050 }
5051 ifp = adapter->ifp;
5052
5053 ifp->if_collisions = adapter->stats.colc;
5054
5055 /* Rx Errors */
5056 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5057 adapter->stats.crcerrs + adapter->stats.algnerrc +
5058 adapter->stats.ruc + adapter->stats.roc +
5059 adapter->stats.mpc + adapter->stats.cexterr;
5060
5061 /* Tx Errors */
5062 ifp->if_oerrors = adapter->stats.ecol +
5063 adapter->stats.latecol + adapter->watchdog_events;
5064 }
5065
5066
5067 /**********************************************************************
5068 *
5069 * This routine is called only when em_display_debug_stats is enabled.
5070 * This routine provides a way to take a look at important statistics
5071 * maintained by the driver and hardware.
5072 *
5073 **********************************************************************/
5074 static void
5075 em_print_debug_info(struct adapter *adapter)
5076 {
5077 device_t dev = adapter->dev;
5078 uint8_t *hw_addr = adapter->hw.hw_addr;
5079
5080 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5081 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5082 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5083 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5084 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5085 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5086 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5087 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5088 adapter->hw.fc.high_water,
5089 adapter->hw.fc.low_water);
5090 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5091 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5092 E1000_READ_REG(&adapter->hw, E1000_TADV));
5093 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5094 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5095 E1000_READ_REG(&adapter->hw, E1000_RADV));
5096 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5097 (long long)adapter->tx_fifo_wrk_cnt,
5098 (long long)adapter->tx_fifo_reset_cnt);
5099 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5100 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5101 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5102 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5103 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5104 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5105 device_printf(dev, "Num Tx descriptors avail = %d\n",
5106 adapter->num_tx_desc_avail);
5107 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5108 adapter->no_tx_desc_avail1);
5109 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5110 adapter->no_tx_desc_avail2);
5111 device_printf(dev, "Std mbuf failed = %ld\n",
5112 adapter->mbuf_alloc_failed);
5113 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5114 adapter->mbuf_cluster_failed);
5115 device_printf(dev, "Driver dropped packets = %ld\n",
5116 adapter->dropped_pkts);
5117 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5118 adapter->no_tx_dma_setup);
5119 }
5120
5121 static void
5122 em_print_hw_stats(struct adapter *adapter)
5123 {
5124 device_t dev = adapter->dev;
5125
5126 device_printf(dev, "Excessive collisions = %lld\n",
5127 (long long)adapter->stats.ecol);
5128 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5129 device_printf(dev, "Symbol errors = %lld\n",
5130 (long long)adapter->stats.symerrs);
5131 #endif
5132 device_printf(dev, "Sequence errors = %lld\n",
5133 (long long)adapter->stats.sec);
5134 device_printf(dev, "Defer count = %lld\n",
5135 (long long)adapter->stats.dc);
5136 device_printf(dev, "Missed Packets = %lld\n",
5137 (long long)adapter->stats.mpc);
5138 device_printf(dev, "Receive No Buffers = %lld\n",
5139 (long long)adapter->stats.rnbc);
5140 /* RLEC is inaccurate on some hardware, calculate our own. */
5141 device_printf(dev, "Receive Length Errors = %lld\n",
5142 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5143 device_printf(dev, "Receive errors = %lld\n",
5144 (long long)adapter->stats.rxerrc);
5145 device_printf(dev, "Crc errors = %lld\n",
5146 (long long)adapter->stats.crcerrs);
5147 device_printf(dev, "Alignment errors = %lld\n",
5148 (long long)adapter->stats.algnerrc);
5149 /* On 82575 these are collision counts */
5150 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5151 (long long)adapter->stats.cexterr);
5152 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5153 device_printf(dev, "watchdog timeouts = %ld\n",
5154 adapter->watchdog_events);
5155 device_printf(dev, "XON Rcvd = %lld\n",
5156 (long long)adapter->stats.xonrxc);
5157 device_printf(dev, "XON Xmtd = %lld\n",
5158 (long long)adapter->stats.xontxc);
5159 device_printf(dev, "XOFF Rcvd = %lld\n",
5160 (long long)adapter->stats.xoffrxc);
5161 device_printf(dev, "XOFF Xmtd = %lld\n",
5162 (long long)adapter->stats.xofftxc);
5163 device_printf(dev, "Good Packets Rcvd = %lld\n",
5164 (long long)adapter->stats.gprc);
5165 device_printf(dev, "Good Packets Xmtd = %lld\n",
5166 (long long)adapter->stats.gptc);
5167 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5168 (long long)adapter->stats.tsctc);
5169 device_printf(dev, "TSO Contexts Failed = %lld\n",
5170 (long long)adapter->stats.tsctfc);
5171 }
5172
5173 /**********************************************************************
5174 *
5175 * This routine provides a way to dump out the adapter eeprom,
5176 * often a useful debug/service tool. This only dumps the first
5177 * 32 words, stuff that matters is in that extent.
5178 *
5179 **********************************************************************/
5180 static void
5181 em_print_nvm_info(struct adapter *adapter)
5182 {
5183 u16 eeprom_data;
5184 int i, j, row = 0;
5185
5186 /* Its a bit crude, but it gets the job done */
5187 printf("\nInterface EEPROM Dump:\n");
5188 printf("Offset\n0x0000 ");
5189 for (i = 0, j = 0; i < 32; i++, j++) {
5190 if (j == 8) { /* Make the offset block */
5191 j = 0; ++row;
5192 printf("\n0x00%x0 ",row);
5193 }
5194 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5195 printf("%04x ", eeprom_data);
5196 }
5197 printf("\n");
5198 }
5199
5200 static int
5201 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5202 {
5203 struct adapter *adapter;
5204 int error;
5205 int result;
5206
5207 result = -1;
5208 error = sysctl_handle_int(oidp, &result, 0, req);
5209
5210 if (error || !req->newptr)
5211 return (error);
5212
5213 if (result == 1) {
5214 adapter = (struct adapter *)arg1;
5215 em_print_debug_info(adapter);
5216 }
5217 /*
5218 * This value will cause a hex dump of the
5219 * first 32 16-bit words of the EEPROM to
5220 * the screen.
5221 */
5222 if (result == 2) {
5223 adapter = (struct adapter *)arg1;
5224 em_print_nvm_info(adapter);
5225 }
5226
5227 return (error);
5228 }
5229
5230
5231 static int
5232 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5233 {
5234 struct adapter *adapter;
5235 int error;
5236 int result;
5237
5238 result = -1;
5239 error = sysctl_handle_int(oidp, &result, 0, req);
5240
5241 if (error || !req->newptr)
5242 return (error);
5243
5244 if (result == 1) {
5245 adapter = (struct adapter *)arg1;
5246 em_print_hw_stats(adapter);
5247 }
5248
5249 return (error);
5250 }
5251
5252 static int
5253 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5254 {
5255 struct em_int_delay_info *info;
5256 struct adapter *adapter;
5257 uint32_t regval;
5258 int error;
5259 int usecs;
5260 int ticks;
5261
5262 info = (struct em_int_delay_info *)arg1;
5263 usecs = info->value;
5264 error = sysctl_handle_int(oidp, &usecs, 0, req);
5265 if (error != 0 || req->newptr == NULL)
5266 return (error);
5267 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5268 return (EINVAL);
5269 info->value = usecs;
5270 ticks = EM_USECS_TO_TICKS(usecs);
5271
5272 adapter = info->adapter;
5273
5274 EM_CORE_LOCK(adapter);
5275 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5276 regval = (regval & ~0xffff) | (ticks & 0xffff);
5277 /* Handle a few special cases. */
5278 switch (info->offset) {
5279 case E1000_RDTR:
5280 break;
5281 case E1000_TIDV:
5282 if (ticks == 0) {
5283 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5284 /* Don't write 0 into the TIDV register. */
5285 regval++;
5286 } else
5287 if (adapter->hw.mac.type != e1000_82575)
5288 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5289 break;
5290 }
5291 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5292 EM_CORE_UNLOCK(adapter);
5293 return (0);
5294 }
5295
5296 static void
5297 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5298 const char *description, struct em_int_delay_info *info,
5299 int offset, int value)
5300 {
5301 info->adapter = adapter;
5302 info->offset = offset;
5303 info->value = value;
5304 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5305 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5306 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5307 info, 0, em_sysctl_int_delay, "I", description);
5308 }
5309
5310 #ifdef EM_FAST_IRQ
5311 static void
5312 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5313 const char *description, int *limit, int value)
5314 {
5315 *limit = value;
5316 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5317 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5318 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5319 }
5320 #endif
Cache object: df41ef0a8d92214dd3f1c29c19394963
|