FreeBSD/Linux Kernel Cross Reference
sys/dev/ixgb/if_ixgb.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2004, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD: releng/6.0/sys/dev/ixgb/if_ixgb.c 151061 2005-10-07 14:00:06Z glebius $*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/ixgb/if_ixgb.h>
41
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int ixgb_display_debug_stats = 0;
46
47 /*********************************************************************
48 * Linked list of board private structures for all NICs found
49 *********************************************************************/
50
51 struct adapter *ixgb_adapter_list = NULL;
52
53
54
55 /*********************************************************************
56 * Driver version
57 *********************************************************************/
58
59 char ixgb_driver_version[] = "1.0.6";
60 char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62 /*********************************************************************
63 * PCI Device ID Table
64 *
65 * Used by probe to select devices to load on
66 * Last field stores an index into ixgb_strings
67 * Last entry must be all 0s
68 *
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
71
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73 {
74 /* Intel(R) PRO/10000 Network Connection */
75 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77 /* required last entry */
78 {0, 0, 0, 0, 0}
79 };
80
81 /*********************************************************************
82 * Table of branding strings for all supported NICs.
83 *********************************************************************/
84
85 static char *ixgb_strings[] = {
86 "Intel(R) PRO/10GbE Network Driver"
87 };
88
89 /*********************************************************************
90 * Function prototypes
91 *********************************************************************/
92 static int ixgb_probe(device_t);
93 static int ixgb_attach(device_t);
94 static int ixgb_detach(device_t);
95 static int ixgb_shutdown(device_t);
96 static void ixgb_intr(void *);
97 static void ixgb_start(struct ifnet *);
98 static void ixgb_start_locked(struct ifnet *);
99 static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static void ixgb_watchdog(struct ifnet *);
101 static void ixgb_init(void *);
102 static void ixgb_init_locked(struct adapter *);
103 static void ixgb_stop(void *);
104 static void ixgb_media_status(struct ifnet *, struct ifmediareq *);
105 static int ixgb_media_change(struct ifnet *);
106 static void ixgb_identify_hardware(struct adapter *);
107 static int ixgb_allocate_pci_resources(struct adapter *);
108 static void ixgb_free_pci_resources(struct adapter *);
109 static void ixgb_local_timer(void *);
110 static int ixgb_hardware_init(struct adapter *);
111 static void ixgb_setup_interface(device_t, struct adapter *);
112 static int ixgb_setup_transmit_structures(struct adapter *);
113 static void ixgb_initialize_transmit_unit(struct adapter *);
114 static int ixgb_setup_receive_structures(struct adapter *);
115 static void ixgb_initialize_receive_unit(struct adapter *);
116 static void ixgb_enable_intr(struct adapter *);
117 static void ixgb_disable_intr(struct adapter *);
118 static void ixgb_free_transmit_structures(struct adapter *);
119 static void ixgb_free_receive_structures(struct adapter *);
120 static void ixgb_update_stats_counters(struct adapter *);
121 static void ixgb_clean_transmit_interrupts(struct adapter *);
122 static int ixgb_allocate_receive_structures(struct adapter *);
123 static int ixgb_allocate_transmit_structures(struct adapter *);
124 static void ixgb_process_receive_interrupts(struct adapter *, int);
125 static void
126 ixgb_receive_checksum(struct adapter *,
127 struct ixgb_rx_desc * rx_desc,
128 struct mbuf *);
129 static void
130 ixgb_transmit_checksum_setup(struct adapter *,
131 struct mbuf *,
132 u_int8_t *);
133 static void ixgb_set_promisc(struct adapter *);
134 static void ixgb_disable_promisc(struct adapter *);
135 static void ixgb_set_multi(struct adapter *);
136 static void ixgb_print_hw_stats(struct adapter *);
137 static void ixgb_print_link_status(struct adapter *);
138 static int
139 ixgb_get_buf(int i, struct adapter *,
140 struct mbuf *);
141 static void ixgb_enable_vlans(struct adapter * adapter);
142 static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143 static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
144 static int
145 ixgb_dma_malloc(struct adapter *, bus_size_t,
146 struct ixgb_dma_alloc *, int);
147 static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148 #ifdef DEVICE_POLLING
149 static poll_handler_t ixgb_poll;
150 #endif
151
152 /*********************************************************************
153 * FreeBSD Device Interface Entry Points
154 *********************************************************************/
155
156 static device_method_t ixgb_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, ixgb_probe),
159 DEVMETHOD(device_attach, ixgb_attach),
160 DEVMETHOD(device_detach, ixgb_detach),
161 DEVMETHOD(device_shutdown, ixgb_shutdown),
162 {0, 0}
163 };
164
165 static driver_t ixgb_driver = {
166 "ixgb", ixgb_methods, sizeof(struct adapter),
167 };
168
169 static devclass_t ixgb_devclass;
170 DRIVER_MODULE(if_ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
171
172 MODULE_DEPEND(if_ixgb, pci, 1, 1, 1);
173 MODULE_DEPEND(if_ixgb, ether, 1, 1, 1);
174
175 /* some defines for controlling descriptor fetches in h/w */
176 #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
177 #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
178 * pushed this many descriptors from
179 * head */
180 #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */
181
182
183 /*********************************************************************
184 * Device identification routine
185 *
186 * ixgb_probe determines if the driver should be loaded on
187 * adapter based on PCI vendor/device id of the adapter.
188 *
189 * return 0 on success, positive on failure
190 *********************************************************************/
191
192 static int
193 ixgb_probe(device_t dev)
194 {
195 ixgb_vendor_info_t *ent;
196
197 u_int16_t pci_vendor_id = 0;
198 u_int16_t pci_device_id = 0;
199 u_int16_t pci_subvendor_id = 0;
200 u_int16_t pci_subdevice_id = 0;
201 char adapter_name[60];
202
203 INIT_DEBUGOUT("ixgb_probe: begin");
204
205 pci_vendor_id = pci_get_vendor(dev);
206 if (pci_vendor_id != IXGB_VENDOR_ID)
207 return (ENXIO);
208
209 pci_device_id = pci_get_device(dev);
210 pci_subvendor_id = pci_get_subvendor(dev);
211 pci_subdevice_id = pci_get_subdevice(dev);
212
213 ent = ixgb_vendor_info_array;
214 while (ent->vendor_id != 0) {
215 if ((pci_vendor_id == ent->vendor_id) &&
216 (pci_device_id == ent->device_id) &&
217
218 ((pci_subvendor_id == ent->subvendor_id) ||
219 (ent->subvendor_id == PCI_ANY_ID)) &&
220
221 ((pci_subdevice_id == ent->subdevice_id) ||
222 (ent->subdevice_id == PCI_ANY_ID))) {
223 sprintf(adapter_name, "%s, Version - %s",
224 ixgb_strings[ent->index],
225 ixgb_driver_version);
226 device_set_desc_copy(dev, adapter_name);
227 return (BUS_PROBE_DEFAULT);
228 }
229 ent++;
230 }
231
232 return (ENXIO);
233 }
234
235 /*********************************************************************
236 * Device initialization routine
237 *
238 * The attach entry point is called when the driver is being loaded.
239 * This routine identifies the type of hardware, allocates all resources
240 * and initializes the hardware.
241 *
242 * return 0 on success, positive on failure
243 *********************************************************************/
244
245 static int
246 ixgb_attach(device_t dev)
247 {
248 struct adapter *adapter;
249 int tsize, rsize;
250 int error = 0;
251
252 printf("ixgb%d: %s\n", device_get_unit(dev), ixgb_copyright);
253 INIT_DEBUGOUT("ixgb_attach: begin");
254
255 /* Allocate, clear, and link in our adapter structure */
256 if (!(adapter = device_get_softc(dev))) {
257 printf("ixgb: adapter structure allocation failed\n");
258 return (ENOMEM);
259 }
260 bzero(adapter, sizeof(struct adapter));
261 adapter->dev = dev;
262 adapter->osdep.dev = dev;
263 adapter->unit = device_get_unit(dev);
264 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
265
266 if (ixgb_adapter_list != NULL)
267 ixgb_adapter_list->prev = adapter;
268 adapter->next = ixgb_adapter_list;
269 ixgb_adapter_list = adapter;
270
271 /* SYSCTL APIs */
272 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
275 (void *)adapter, 0,
276 ixgb_sysctl_stats, "I", "Statistics");
277
278 callout_init(&adapter->timer, CALLOUT_MPSAFE);
279
280 /* Determine hardware revision */
281 ixgb_identify_hardware(adapter);
282
283 /* Parameters (to be read from user) */
284 adapter->num_tx_desc = IXGB_MAX_TXD;
285 adapter->num_rx_desc = IXGB_MAX_RXD;
286 adapter->tx_int_delay = TIDV;
287 adapter->rx_int_delay = RDTR;
288 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
289
290 adapter->hw.fc.high_water = FCRTH;
291 adapter->hw.fc.low_water = FCRTL;
292 adapter->hw.fc.pause_time = FCPAUSE;
293 adapter->hw.fc.send_xon = TRUE;
294 adapter->hw.fc.type = FLOW_CONTROL;
295
296
297 /* Set the max frame size assuming standard ethernet sized frames */
298 adapter->hw.max_frame_size =
299 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
300
301 if (ixgb_allocate_pci_resources(adapter)) {
302 printf("ixgb%d: Allocation of PCI resources failed\n",
303 adapter->unit);
304 error = ENXIO;
305 goto err_pci;
306 }
307 tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
308 sizeof(struct ixgb_tx_desc), 4096);
309
310 /* Allocate Transmit Descriptor ring */
311 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
312 printf("ixgb%d: Unable to allocate TxDescriptor memory\n",
313 adapter->unit);
314 error = ENOMEM;
315 goto err_tx_desc;
316 }
317 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
318
319 rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
320 sizeof(struct ixgb_rx_desc), 4096);
321
322 /* Allocate Receive Descriptor ring */
323 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
324 printf("ixgb%d: Unable to allocate rx_desc memory\n",
325 adapter->unit);
326 error = ENOMEM;
327 goto err_rx_desc;
328 }
329 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
330
331 /* Initialize the hardware */
332 if (ixgb_hardware_init(adapter)) {
333 printf("ixgb%d: Unable to initialize the hardware\n",
334 adapter->unit);
335 error = EIO;
336 goto err_hw_init;
337 }
338 /* Setup OS specific network interface */
339 ixgb_setup_interface(dev, adapter);
340
341 /* Initialize statistics */
342 ixgb_clear_hw_cntrs(&adapter->hw);
343 ixgb_update_stats_counters(adapter);
344
345 INIT_DEBUGOUT("ixgb_attach: end");
346 return (0);
347
348 err_hw_init:
349 ixgb_dma_free(adapter, &adapter->rxdma);
350 err_rx_desc:
351 ixgb_dma_free(adapter, &adapter->txdma);
352 err_tx_desc:
353 err_pci:
354 ixgb_free_pci_resources(adapter);
355 sysctl_ctx_free(&adapter->sysctl_ctx);
356 return (error);
357
358 }
359
360 /*********************************************************************
361 * Device removal routine
362 *
363 * The detach entry point is called when the driver is being removed.
364 * This routine stops the adapter and deallocates all the resources
365 * that were allocated for driver operation.
366 *
367 * return 0 on success, positive on failure
368 *********************************************************************/
369
370 static int
371 ixgb_detach(device_t dev)
372 {
373 struct adapter *adapter = device_get_softc(dev);
374 struct ifnet *ifp = adapter->ifp;
375
376 INIT_DEBUGOUT("ixgb_detach: begin");
377
378 #ifdef DEVICE_POLLING
379 if (ifp->if_capenable & IFCAP_POLLING)
380 ether_poll_deregister(ifp);
381 #endif
382
383 IXGB_LOCK(adapter);
384 adapter->in_detach = 1;
385
386 ixgb_stop(adapter);
387 IXGB_UNLOCK(adapter);
388
389 #if __FreeBSD_version < 500000
390 ether_ifdetach(adapter->ifp, ETHER_BPF_SUPPORTED);
391 #else
392 ether_ifdetach(adapter->ifp);
393 if_free(adapter->ifp);
394 #endif
395 ixgb_free_pci_resources(adapter);
396
397
398 /* Free Transmit Descriptor ring */
399 if (adapter->tx_desc_base) {
400 ixgb_dma_free(adapter, &adapter->txdma);
401 adapter->tx_desc_base = NULL;
402 }
403 /* Free Receive Descriptor ring */
404 if (adapter->rx_desc_base) {
405 ixgb_dma_free(adapter, &adapter->rxdma);
406 adapter->rx_desc_base = NULL;
407 }
408 /* Remove from the adapter list */
409 if (ixgb_adapter_list == adapter)
410 ixgb_adapter_list = adapter->next;
411 if (adapter->next != NULL)
412 adapter->next->prev = adapter->prev;
413 if (adapter->prev != NULL)
414 adapter->prev->next = adapter->next;
415
416 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
417 ifp->if_timer = 0;
418
419 IXGB_LOCK_DESTROY(adapter);
420 return (0);
421 }
422
423 /*********************************************************************
424 *
425 * Shutdown entry point
426 *
427 **********************************************************************/
428
429 static int
430 ixgb_shutdown(device_t dev)
431 {
432 struct adapter *adapter = device_get_softc(dev);
433 IXGB_LOCK(adapter);
434 ixgb_stop(adapter);
435 IXGB_UNLOCK(adapter);
436 return (0);
437 }
438
439
440 /*********************************************************************
441 * Transmit entry point
442 *
443 * ixgb_start is called by the stack to initiate a transmit.
444 * The driver will remain in this routine as long as there are
445 * packets to transmit and transmit resources are available.
446 * In case resources are not available stack is notified and
447 * the packet is requeued.
448 **********************************************************************/
449
450 static void
451 ixgb_start_locked(struct ifnet * ifp)
452 {
453 struct mbuf *m_head;
454 struct adapter *adapter = ifp->if_softc;
455
456 IXGB_LOCK_ASSERT(adapter);
457
458 if (!adapter->link_active)
459 return;
460
461 while (ifp->if_snd.ifq_head != NULL) {
462 IF_DEQUEUE(&ifp->if_snd, m_head);
463
464 if (m_head == NULL)
465 break;
466
467 if (ixgb_encap(adapter, m_head)) {
468 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
469 IF_PREPEND(&ifp->if_snd, m_head);
470 break;
471 }
472 /* Send a copy of the frame to the BPF listener */
473 #if __FreeBSD_version < 500000
474 if (ifp->if_bpf)
475 bpf_mtap(ifp, m_head);
476 #else
477 BPF_MTAP(ifp, m_head);
478 #endif
479 /* Set timeout in case hardware has problems transmitting */
480 ifp->if_timer = IXGB_TX_TIMEOUT;
481
482 }
483 return;
484 }
485
486 static void
487 ixgb_start(struct ifnet *ifp)
488 {
489 struct adapter *adapter = ifp->if_softc;
490
491 IXGB_LOCK(adapter);
492 ixgb_start_locked(ifp);
493 IXGB_UNLOCK(adapter);
494 return;
495 }
496
497 /*********************************************************************
498 * Ioctl entry point
499 *
500 * ixgb_ioctl is called when the user wants to configure the
501 * interface.
502 *
503 * return 0 on success, positive on failure
504 **********************************************************************/
505
506 static int
507 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
508 {
509 int mask, error = 0;
510 struct ifreq *ifr = (struct ifreq *) data;
511 struct adapter *adapter = ifp->if_softc;
512
513 if (adapter->in_detach)
514 goto out;
515
516 switch (command) {
517 case SIOCSIFADDR:
518 case SIOCGIFADDR:
519 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
520 ether_ioctl(ifp, command, data);
521 break;
522 case SIOCSIFMTU:
523 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
524 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
525 error = EINVAL;
526 } else {
527 IXGB_LOCK(adapter);
528 ifp->if_mtu = ifr->ifr_mtu;
529 adapter->hw.max_frame_size =
530 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
531
532 ixgb_init_locked(adapter);
533 IXGB_UNLOCK(adapter);
534 }
535 break;
536 case SIOCSIFFLAGS:
537 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
538 IXGB_LOCK(adapter);
539 if (ifp->if_flags & IFF_UP) {
540 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
541 ixgb_init_locked(adapter);
542 }
543 ixgb_disable_promisc(adapter);
544 ixgb_set_promisc(adapter);
545 } else {
546 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
547 ixgb_stop(adapter);
548 }
549 }
550 IXGB_UNLOCK(adapter);
551 break;
552 case SIOCADDMULTI:
553 case SIOCDELMULTI:
554 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
555 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
556 IXGB_LOCK(adapter);
557 ixgb_disable_intr(adapter);
558 ixgb_set_multi(adapter);
559 ixgb_enable_intr(adapter);
560 IXGB_UNLOCK(adapter);
561 }
562 break;
563 case SIOCSIFMEDIA:
564 case SIOCGIFMEDIA:
565 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
566 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
567 break;
568 case SIOCSIFCAP:
569 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
570 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
571 #ifdef DEVICE_POLLING
572 if (mask & IFCAP_POLLING) {
573 if (ifr->ifr_reqcap & IFCAP_POLLING) {
574 error = ether_poll_register(ixgb_poll, ifp);
575 if (error)
576 return(error);
577 IXGB_LOCK(adapter);
578 ixgb_disable_intr(adapter);
579 ifp->if_capenable |= IFCAP_POLLING;
580 IXGB_UNLOCK(adapter);
581 } else {
582 error = ether_poll_deregister(ifp);
583 /* Enable interrupt even in error case */
584 IXGB_LOCK(adapter);
585 ixgb_enable_intr(adapter);
586 ifp->if_capenable &= ~IFCAP_POLLING;
587 IXGB_UNLOCK(adapter);
588 }
589 }
590 #endif /* DEVICE_POLLING */
591 if (mask & IFCAP_HWCSUM) {
592 if (IFCAP_HWCSUM & ifp->if_capenable)
593 ifp->if_capenable &= ~IFCAP_HWCSUM;
594 else
595 ifp->if_capenable |= IFCAP_HWCSUM;
596 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
597 ixgb_init(adapter);
598 }
599 break;
600 default:
601 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
602 error = EINVAL;
603 }
604
605 out:
606 return (error);
607 }
608
609 /*********************************************************************
610 * Watchdog entry point
611 *
612 * This routine is called whenever hardware quits transmitting.
613 *
614 **********************************************************************/
615
616 static void
617 ixgb_watchdog(struct ifnet * ifp)
618 {
619 struct adapter *adapter;
620 adapter = ifp->if_softc;
621
622 /*
623 * If we are in this routine because of pause frames, then don't
624 * reset the hardware.
625 */
626 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
627 ifp->if_timer = IXGB_TX_TIMEOUT;
628 return;
629 }
630 printf("ixgb%d: watchdog timeout -- resetting\n", adapter->unit);
631
632 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
633
634
635 ixgb_stop(adapter);
636 ixgb_init(adapter);
637
638
639 ifp->if_oerrors++;
640
641 return;
642 }
643
644 /*********************************************************************
645 * Init entry point
646 *
647 * This routine is used in two ways. It is used by the stack as
648 * init entry point in network interface structure. It is also used
649 * by the driver as a hw/sw initialization routine to get to a
650 * consistent state.
651 *
652 * return 0 on success, positive on failure
653 **********************************************************************/
654
655 static void
656 ixgb_init_locked(struct adapter *adapter)
657 {
658 struct ifnet *ifp;
659
660 INIT_DEBUGOUT("ixgb_init: begin");
661
662 IXGB_LOCK_ASSERT(adapter);
663
664 ixgb_stop(adapter);
665
666 /* Get the latest mac address, User can use a LAA */
667 bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.curr_mac_addr,
668 IXGB_ETH_LENGTH_OF_ADDRESS);
669
670 /* Initialize the hardware */
671 if (ixgb_hardware_init(adapter)) {
672 printf("ixgb%d: Unable to initialize the hardware\n",
673 adapter->unit);
674 return;
675 }
676 ixgb_enable_vlans(adapter);
677
678 /* Prepare transmit descriptors and buffers */
679 if (ixgb_setup_transmit_structures(adapter)) {
680 printf("ixgb%d: Could not setup transmit structures\n",
681 adapter->unit);
682 ixgb_stop(adapter);
683 return;
684 }
685 ixgb_initialize_transmit_unit(adapter);
686
687 /* Setup Multicast table */
688 ixgb_set_multi(adapter);
689
690 /* Prepare receive descriptors and buffers */
691 if (ixgb_setup_receive_structures(adapter)) {
692 printf("ixgb%d: Could not setup receive structures\n",
693 adapter->unit);
694 ixgb_stop(adapter);
695 return;
696 }
697 ixgb_initialize_receive_unit(adapter);
698
699 /* Don't loose promiscuous settings */
700 ixgb_set_promisc(adapter);
701
702 ifp = adapter->ifp;
703 ifp->if_drv_flags |= IFF_DRV_RUNNING;
704 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
705
706
707 if (ifp->if_capenable & IFCAP_TXCSUM)
708 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
709 else
710 ifp->if_hwassist = 0;
711
712
713 /* Enable jumbo frames */
714 if (ifp->if_mtu > ETHERMTU) {
715 uint32_t temp_reg;
716 IXGB_WRITE_REG(&adapter->hw, MFS,
717 adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
718 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
719 temp_reg |= IXGB_CTRL0_JFE;
720 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
721 }
722 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter);
723 ixgb_clear_hw_cntrs(&adapter->hw);
724 #ifdef DEVICE_POLLING
725 /*
726 * Only disable interrupts if we are polling, make sure they are on
727 * otherwise.
728 */
729 if (ifp->if_capenable & IFCAP_POLLING)
730 ixgb_disable_intr(adapter);
731 else
732 #endif
733 ixgb_enable_intr(adapter);
734
735 return;
736 }
737
738 static void
739 ixgb_init(void *arg)
740 {
741 struct adapter *adapter = arg;
742
743 IXGB_LOCK(adapter);
744 ixgb_init_locked(adapter);
745 IXGB_UNLOCK(adapter);
746 return;
747 }
748
749 #ifdef DEVICE_POLLING
750 static void
751 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
752 {
753 struct adapter *adapter = ifp->if_softc;
754 u_int32_t reg_icr;
755
756 IXGB_LOCK_ASSERT(adapter);
757
758 if (cmd == POLL_AND_CHECK_STATUS) {
759 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
760 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
761 callout_stop(&adapter->timer);
762 ixgb_check_for_link(&adapter->hw);
763 ixgb_print_link_status(adapter);
764 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer,
765 adapter);
766 }
767 }
768 ixgb_process_receive_interrupts(adapter, count);
769 ixgb_clean_transmit_interrupts(adapter);
770
771 if (ifp->if_snd.ifq_head != NULL)
772 ixgb_start_locked(ifp);
773 }
774
775 static void
776 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
777 {
778 struct adapter *adapter = ifp->if_softc;
779
780 IXGB_LOCK(adapter);
781 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
782 ixgb_poll_locked(ifp, cmd, count);
783 IXGB_UNLOCK(adapter);
784 }
785 #endif /* DEVICE_POLLING */
786
787 /*********************************************************************
788 *
789 * Interrupt Service routine
790 *
791 **********************************************************************/
792
793 static void
794 ixgb_intr(void *arg)
795 {
796 u_int32_t loop_cnt = IXGB_MAX_INTR;
797 u_int32_t reg_icr;
798 struct ifnet *ifp;
799 struct adapter *adapter = arg;
800 boolean_t rxdmt0 = FALSE;
801
802 IXGB_LOCK(adapter);
803
804 ifp = adapter->ifp;
805
806 #ifdef DEVICE_POLLING
807 if (ifp->if_capenable & IFCAP_POLLING) {
808 IXGB_UNLOCK(adapter);
809 return;
810 }
811 #endif
812
813 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
814 if (reg_icr == 0) {
815 IXGB_UNLOCK(adapter);
816 return;
817 }
818
819 if (reg_icr & IXGB_INT_RXDMT0)
820 rxdmt0 = TRUE;
821
822 #ifdef _SV_
823 if (reg_icr & IXGB_INT_RXDMT0)
824 adapter->sv_stats.icr_rxdmt0++;
825 if (reg_icr & IXGB_INT_RXO)
826 adapter->sv_stats.icr_rxo++;
827 if (reg_icr & IXGB_INT_RXT0)
828 adapter->sv_stats.icr_rxt0++;
829 if (reg_icr & IXGB_INT_TXDW)
830 adapter->sv_stats.icr_TXDW++;
831 #endif /* _SV_ */
832
833 /* Link status change */
834 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
835 callout_stop(&adapter->timer);
836 ixgb_check_for_link(&adapter->hw);
837 ixgb_print_link_status(adapter);
838 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer,
839 adapter);
840 }
841 while (loop_cnt > 0) {
842 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
843 ixgb_process_receive_interrupts(adapter, -1);
844 ixgb_clean_transmit_interrupts(adapter);
845 }
846 loop_cnt--;
847 }
848
849 if (rxdmt0 && adapter->raidc) {
850 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
851 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
852 }
853 if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
854 ixgb_start_locked(ifp);
855
856 IXGB_UNLOCK(adapter);
857 return;
858 }
859
860
861 /*********************************************************************
862 *
863 * Media Ioctl callback
864 *
865 * This routine is called whenever the user queries the status of
866 * the interface using ifconfig.
867 *
868 **********************************************************************/
869 static void
870 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
871 {
872 struct adapter *adapter = ifp->if_softc;
873
874 INIT_DEBUGOUT("ixgb_media_status: begin");
875
876 ixgb_check_for_link(&adapter->hw);
877 ixgb_print_link_status(adapter);
878
879 ifmr->ifm_status = IFM_AVALID;
880 ifmr->ifm_active = IFM_ETHER;
881
882 if (!adapter->hw.link_up)
883 return;
884
885 ifmr->ifm_status |= IFM_ACTIVE;
886 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
887
888 return;
889 }
890
891 /*********************************************************************
892 *
893 * Media Ioctl callback
894 *
895 * This routine is called when the user changes speed/duplex using
896 * media/mediopt option with ifconfig.
897 *
898 **********************************************************************/
899 static int
900 ixgb_media_change(struct ifnet * ifp)
901 {
902 struct adapter *adapter = ifp->if_softc;
903 struct ifmedia *ifm = &adapter->media;
904
905 INIT_DEBUGOUT("ixgb_media_change: begin");
906
907 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
908 return (EINVAL);
909
910 return (0);
911 }
912
913 /*********************************************************************
914 *
915 * This routine maps the mbufs to tx descriptors.
916 *
917 * return 0 on success, positive on failure
918 **********************************************************************/
919
920 static int
921 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
922 {
923 u_int8_t txd_popts;
924 int i, j, error, nsegs;
925
926 #if __FreeBSD_version < 500000
927 struct ifvlan *ifv = NULL;
928 #else
929 struct m_tag *mtag;
930 #endif
931 bus_dma_segment_t segs[IXGB_MAX_SCATTER];
932 bus_dmamap_t map;
933 struct ixgb_buffer *tx_buffer = NULL;
934 struct ixgb_tx_desc *current_tx_desc = NULL;
935 struct ifnet *ifp = adapter->ifp;
936
937 /*
938 * Force a cleanup if number of TX descriptors available hits the
939 * threshold
940 */
941 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
942 ixgb_clean_transmit_interrupts(adapter);
943 }
944 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
945 adapter->no_tx_desc_avail1++;
946 return (ENOBUFS);
947 }
948 /*
949 * Map the packet for DMA.
950 */
951 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
952 adapter->no_tx_map_avail++;
953 return (ENOMEM);
954 }
955 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
956 &nsegs, BUS_DMA_NOWAIT);
957 if (error != 0) {
958 adapter->no_tx_dma_setup++;
959 printf("ixgb%d: ixgb_encap: bus_dmamap_load_mbuf failed; "
960 "error %u\n", adapter->unit, error);
961 bus_dmamap_destroy(adapter->txtag, map);
962 return (error);
963 }
964 KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
965
966 if (nsegs > adapter->num_tx_desc_avail) {
967 adapter->no_tx_desc_avail2++;
968 bus_dmamap_destroy(adapter->txtag, map);
969 return (ENOBUFS);
970 }
971 if (ifp->if_hwassist > 0) {
972 ixgb_transmit_checksum_setup(adapter, m_head,
973 &txd_popts);
974 } else
975 txd_popts = 0;
976
977 /* Find out if we are in vlan mode */
978 #if __FreeBSD_version < 500000
979 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
980 m_head->m_pkthdr.rcvif != NULL &&
981 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
982 ifv = m_head->m_pkthdr.rcvif->if_softc;
983 #else
984 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
985 #endif
986 i = adapter->next_avail_tx_desc;
987 for (j = 0; j < nsegs; j++) {
988 tx_buffer = &adapter->tx_buffer_area[i];
989 current_tx_desc = &adapter->tx_desc_base[i];
990
991 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
992 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
993 current_tx_desc->popts = txd_popts;
994 if (++i == adapter->num_tx_desc)
995 i = 0;
996
997 tx_buffer->m_head = NULL;
998 }
999
1000 adapter->num_tx_desc_avail -= nsegs;
1001 adapter->next_avail_tx_desc = i;
1002
1003 #if __FreeBSD_version < 500000
1004 if (ifv != NULL) {
1005 /* Set the vlan id */
1006 current_tx_desc->vlan = ifv->ifv_tag;
1007 #else
1008 if (mtag != NULL) {
1009 /* Set the vlan id */
1010 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1011 #endif
1012
1013 /* Tell hardware to add tag */
1014 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1015 }
1016 tx_buffer->m_head = m_head;
1017 tx_buffer->map = map;
1018 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1019
1020 /*
1021 * Last Descriptor of Packet needs End Of Packet (EOP)
1022 */
1023 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1024
1025 /*
1026 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1027 * that this frame is available to transmit.
1028 */
1029 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1030
1031 return (0);
1032 }
1033
1034 static void
1035 ixgb_set_promisc(struct adapter * adapter)
1036 {
1037
1038 u_int32_t reg_rctl;
1039 struct ifnet *ifp = adapter->ifp;
1040
1041 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1042
1043 if (ifp->if_flags & IFF_PROMISC) {
1044 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1045 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1046 } else if (ifp->if_flags & IFF_ALLMULTI) {
1047 reg_rctl |= IXGB_RCTL_MPE;
1048 reg_rctl &= ~IXGB_RCTL_UPE;
1049 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1050 }
1051 return;
1052 }
1053
1054 static void
1055 ixgb_disable_promisc(struct adapter * adapter)
1056 {
1057 u_int32_t reg_rctl;
1058
1059 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1060
1061 reg_rctl &= (~IXGB_RCTL_UPE);
1062 reg_rctl &= (~IXGB_RCTL_MPE);
1063 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1064
1065 return;
1066 }
1067
1068
1069 /*********************************************************************
1070 * Multicast Update
1071 *
1072 * This routine is called whenever multicast address list is updated.
1073 *
1074 **********************************************************************/
1075
1076 static void
1077 ixgb_set_multi(struct adapter * adapter)
1078 {
1079 u_int32_t reg_rctl = 0;
1080 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
1081 struct ifmultiaddr *ifma;
1082 int mcnt = 0;
1083 struct ifnet *ifp = adapter->ifp;
1084
1085 IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1086
1087 IF_ADDR_LOCK(ifp);
1088 #if __FreeBSD_version < 500000
1089 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1090 #else
1091 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1092 #endif
1093 if (ifma->ifma_addr->sa_family != AF_LINK)
1094 continue;
1095
1096 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1097 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1098 mcnt++;
1099 }
1100 IF_ADDR_UNLOCK(ifp);
1101
1102 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1103 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1104 reg_rctl |= IXGB_RCTL_MPE;
1105 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1106 } else
1107 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1108
1109 return;
1110 }
1111
1112
1113 /*********************************************************************
1114 * Timer routine
1115 *
1116 * This routine checks for link status and updates statistics.
1117 *
1118 **********************************************************************/
1119
1120 static void
1121 ixgb_local_timer(void *arg)
1122 {
1123 struct ifnet *ifp;
1124 struct adapter *adapter = arg;
1125 ifp = adapter->ifp;
1126
1127 IXGB_LOCK(adapter);
1128
1129 ixgb_check_for_link(&adapter->hw);
1130 ixgb_print_link_status(adapter);
1131 ixgb_update_stats_counters(adapter);
1132 if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1133 ixgb_print_hw_stats(adapter);
1134 }
1135 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter);
1136
1137 IXGB_UNLOCK(adapter);
1138 return;
1139 }
1140
1141 static void
1142 ixgb_print_link_status(struct adapter * adapter)
1143 {
1144 if (adapter->hw.link_up) {
1145 if (!adapter->link_active) {
1146 printf("ixgb%d: Link is up %d Mbps %s \n",
1147 adapter->unit,
1148 10000,
1149 "Full Duplex");
1150 adapter->link_active = 1;
1151 }
1152 } else {
1153 if (adapter->link_active) {
1154 printf("ixgb%d: Link is Down \n", adapter->unit);
1155 adapter->link_active = 0;
1156 }
1157 }
1158
1159 return;
1160 }
1161
1162
1163
1164 /*********************************************************************
1165 *
1166 * This routine disables all traffic on the adapter by issuing a
1167 * global reset on the MAC and deallocates TX/RX buffers.
1168 *
1169 **********************************************************************/
1170
1171 static void
1172 ixgb_stop(void *arg)
1173 {
1174 struct ifnet *ifp;
1175 struct adapter *adapter = arg;
1176 ifp = adapter->ifp;
1177
1178 IXGB_LOCK_ASSERT(adapter);
1179
1180 INIT_DEBUGOUT("ixgb_stop: begin\n");
1181 ixgb_disable_intr(adapter);
1182 adapter->hw.adapter_stopped = FALSE;
1183 ixgb_adapter_stop(&adapter->hw);
1184 callout_stop(&adapter->timer);
1185 ixgb_free_transmit_structures(adapter);
1186 ixgb_free_receive_structures(adapter);
1187
1188
1189 /* Tell the stack that the interface is no longer active */
1190 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1191
1192 return;
1193 }
1194
1195
1196 /*********************************************************************
1197 *
1198 * Determine hardware revision.
1199 *
1200 **********************************************************************/
1201 static void
1202 ixgb_identify_hardware(struct adapter * adapter)
1203 {
1204 device_t dev = adapter->dev;
1205
1206 /* Make sure our PCI config space has the necessary stuff set */
1207 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1208 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1209 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1210 printf("ixgb%d: Memory Access and/or Bus Master bits were not set!\n",
1211 adapter->unit);
1212 adapter->hw.pci_cmd_word |=
1213 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1214 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1215 }
1216 /* Save off the information about this board */
1217 adapter->hw.vendor_id = pci_get_vendor(dev);
1218 adapter->hw.device_id = pci_get_device(dev);
1219 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1220 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1221 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1222
1223 /* Set MacType, etc. based on this PCI info */
1224 switch (adapter->hw.device_id) {
1225 case IXGB_DEVICE_ID_82597EX:
1226 case IXGB_DEVICE_ID_82597EX_SR:
1227 adapter->hw.mac_type = ixgb_82597;
1228 break;
1229 default:
1230 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1231 printf("ixgb%d: unsupported device id 0x%x\n", adapter->unit, adapter->hw.device_id);
1232 }
1233
1234 return;
1235 }
1236
1237 static int
1238 ixgb_allocate_pci_resources(struct adapter * adapter)
1239 {
1240 int rid;
1241 device_t dev = adapter->dev;
1242
1243 rid = IXGB_MMBA;
1244 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1245 &rid, 0, ~0, 1,
1246 RF_ACTIVE);
1247 if (!(adapter->res_memory)) {
1248 printf("ixgb%d: Unable to allocate bus resource: memory\n",
1249 adapter->unit);
1250 return (ENXIO);
1251 }
1252 adapter->osdep.mem_bus_space_tag =
1253 rman_get_bustag(adapter->res_memory);
1254 adapter->osdep.mem_bus_space_handle =
1255 rman_get_bushandle(adapter->res_memory);
1256 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1257
1258 rid = 0x0;
1259 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1260 &rid, 0, ~0, 1,
1261 RF_SHAREABLE | RF_ACTIVE);
1262 if (!(adapter->res_interrupt)) {
1263 printf("ixgb%d: Unable to allocate bus resource: interrupt\n",
1264 adapter->unit);
1265 return (ENXIO);
1266 }
1267 if (bus_setup_intr(dev, adapter->res_interrupt,
1268 INTR_TYPE_NET | INTR_MPSAFE,
1269 (void (*) (void *))ixgb_intr, adapter,
1270 &adapter->int_handler_tag)) {
1271 printf("ixgb%d: Error registering interrupt handler!\n",
1272 adapter->unit);
1273 return (ENXIO);
1274 }
1275 adapter->hw.back = &adapter->osdep;
1276
1277 return (0);
1278 }
1279
1280 static void
1281 ixgb_free_pci_resources(struct adapter * adapter)
1282 {
1283 device_t dev = adapter->dev;
1284
1285 if (adapter->res_interrupt != NULL) {
1286 bus_teardown_intr(dev, adapter->res_interrupt,
1287 adapter->int_handler_tag);
1288 bus_release_resource(dev, SYS_RES_IRQ, 0,
1289 adapter->res_interrupt);
1290 }
1291 if (adapter->res_memory != NULL) {
1292 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1293 adapter->res_memory);
1294 }
1295 if (adapter->res_ioport != NULL) {
1296 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1297 adapter->res_ioport);
1298 }
1299 return;
1300 }
1301
1302 /*********************************************************************
1303 *
1304 * Initialize the hardware to a configuration as specified by the
1305 * adapter structure. The controller is reset, the EEPROM is
1306 * verified, the MAC address is set, then the shared initialization
1307 * routines are called.
1308 *
1309 **********************************************************************/
1310 static int
1311 ixgb_hardware_init(struct adapter * adapter)
1312 {
1313 /* Issue a global reset */
1314 adapter->hw.adapter_stopped = FALSE;
1315 ixgb_adapter_stop(&adapter->hw);
1316
1317 /* Make sure we have a good EEPROM before we read from it */
1318 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1319 printf("ixgb%d: The EEPROM Checksum Is Not Valid\n",
1320 adapter->unit);
1321 return (EIO);
1322 }
1323 if (!ixgb_init_hw(&adapter->hw)) {
1324 printf("ixgb%d: Hardware Initialization Failed",
1325 adapter->unit);
1326 return (EIO);
1327 }
1328
1329 return (0);
1330 }
1331
1332 /*********************************************************************
1333 *
1334 * Setup networking device structure and register an interface.
1335 *
1336 **********************************************************************/
1337 static void
1338 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1339 {
1340 struct ifnet *ifp;
1341 INIT_DEBUGOUT("ixgb_setup_interface: begin");
1342
1343 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1344 if (ifp == NULL)
1345 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1346 #if __FreeBSD_version >= 502000
1347 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1348 #else
1349 ifp->if_unit = adapter->unit;
1350 ifp->if_name = "ixgb";
1351 #endif
1352 ifp->if_mtu = ETHERMTU;
1353 ifp->if_baudrate = 1000000000;
1354 ifp->if_init = ixgb_init;
1355 ifp->if_softc = adapter;
1356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1357 ifp->if_ioctl = ixgb_ioctl;
1358 ifp->if_start = ixgb_start;
1359 ifp->if_watchdog = ixgb_watchdog;
1360 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1361
1362 #if __FreeBSD_version < 500000
1363 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1364 #else
1365 ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1366 #endif
1367
1368 ifp->if_capabilities = IFCAP_HWCSUM;
1369
1370 /*
1371 * Tell the upper layer(s) we support long frames.
1372 */
1373 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1374
1375 #if __FreeBSD_version >= 500000
1376 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1377 #endif
1378
1379 ifp->if_capenable = ifp->if_capabilities;
1380
1381 #ifdef DEVICE_POLLING
1382 ifp->if_capabilities |= IFCAP_POLLING;
1383 #endif
1384
1385 /*
1386 * Specify the media types supported by this adapter and register
1387 * callbacks to update media and link information
1388 */
1389 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1390 ixgb_media_status);
1391 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1392 0, NULL);
1393 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1394 0, NULL);
1395 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1396 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1397
1398 return;
1399 }
1400
1401 /********************************************************************
1402 * Manage DMA'able memory.
1403 *******************************************************************/
1404 static void
1405 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1406 {
1407 if (error)
1408 return;
1409 *(bus_addr_t *) arg = segs->ds_addr;
1410 return;
1411 }
1412
1413 static int
1414 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1415 struct ixgb_dma_alloc * dma, int mapflags)
1416 {
1417 int r;
1418
1419 r = bus_dma_tag_create(NULL, /* parent */
1420 PAGE_SIZE, 0, /* alignment, bounds */
1421 BUS_SPACE_MAXADDR, /* lowaddr */
1422 BUS_SPACE_MAXADDR, /* highaddr */
1423 NULL, NULL, /* filter, filterarg */
1424 size, /* maxsize */
1425 1, /* nsegments */
1426 size, /* maxsegsize */
1427 BUS_DMA_ALLOCNOW, /* flags */
1428 #if __FreeBSD_version >= 502000
1429 NULL, /* lockfunc */
1430 NULL, /* lockfuncarg */
1431 #endif
1432 &dma->dma_tag);
1433 if (r != 0) {
1434 printf("ixgb%d: ixgb_dma_malloc: bus_dma_tag_create failed; "
1435 "error %u\n", adapter->unit, r);
1436 goto fail_0;
1437 }
1438 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1439 BUS_DMA_NOWAIT, &dma->dma_map);
1440 if (r != 0) {
1441 printf("ixgb%d: ixgb_dma_malloc: bus_dmamem_alloc failed; "
1442 "error %u\n", adapter->unit, r);
1443 goto fail_1;
1444 }
1445 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1446 size,
1447 ixgb_dmamap_cb,
1448 &dma->dma_paddr,
1449 mapflags | BUS_DMA_NOWAIT);
1450 if (r != 0) {
1451 printf("ixgb%d: ixgb_dma_malloc: bus_dmamap_load failed; "
1452 "error %u\n", adapter->unit, r);
1453 goto fail_2;
1454 }
1455 dma->dma_size = size;
1456 return (0);
1457 fail_2:
1458 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1459 fail_1:
1460 bus_dma_tag_destroy(dma->dma_tag);
1461 fail_0:
1462 dma->dma_map = NULL;
1463 dma->dma_tag = NULL;
1464 return (r);
1465 }
1466
1467
1468
1469 static void
1470 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1471 {
1472 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1473 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1474 bus_dma_tag_destroy(dma->dma_tag);
1475 }
1476
1477 /*********************************************************************
1478 *
1479 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1480 * the information needed to transmit a packet on the wire.
1481 *
1482 **********************************************************************/
1483 static int
1484 ixgb_allocate_transmit_structures(struct adapter * adapter)
1485 {
1486 if (!(adapter->tx_buffer_area =
1487 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1488 adapter->num_tx_desc, M_DEVBUF,
1489 M_NOWAIT | M_ZERO))) {
1490 printf("ixgb%d: Unable to allocate tx_buffer memory\n",
1491 adapter->unit);
1492 return ENOMEM;
1493 }
1494 bzero(adapter->tx_buffer_area,
1495 sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1496
1497 return 0;
1498 }
1499
1500 /*********************************************************************
1501 *
1502 * Allocate and initialize transmit structures.
1503 *
1504 **********************************************************************/
1505 static int
1506 ixgb_setup_transmit_structures(struct adapter * adapter)
1507 {
1508 /*
1509 * Setup DMA descriptor areas.
1510 */
1511 if (bus_dma_tag_create(NULL, /* parent */
1512 PAGE_SIZE, 0, /* alignment, bounds */
1513 BUS_SPACE_MAXADDR, /* lowaddr */
1514 BUS_SPACE_MAXADDR, /* highaddr */
1515 NULL, NULL, /* filter, filterarg */
1516 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */
1517 IXGB_MAX_SCATTER, /* nsegments */
1518 MCLBYTES, /* maxsegsize */
1519 BUS_DMA_ALLOCNOW, /* flags */
1520 #if __FreeBSD_version >= 502000
1521 NULL, /* lockfunc */
1522 NULL, /* lockfuncarg */
1523 #endif
1524 &adapter->txtag)) {
1525 printf("ixgb%d: Unable to allocate TX DMA tag\n", adapter->unit);
1526 return (ENOMEM);
1527 }
1528 if (ixgb_allocate_transmit_structures(adapter))
1529 return ENOMEM;
1530
1531 bzero((void *)adapter->tx_desc_base,
1532 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1533
1534 adapter->next_avail_tx_desc = 0;
1535 adapter->oldest_used_tx_desc = 0;
1536
1537 /* Set number of descriptors available */
1538 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1539
1540 /* Set checksum context */
1541 adapter->active_checksum_context = OFFLOAD_NONE;
1542
1543 return 0;
1544 }
1545
1546 /*********************************************************************
1547 *
1548 * Enable transmit unit.
1549 *
1550 **********************************************************************/
1551 static void
1552 ixgb_initialize_transmit_unit(struct adapter * adapter)
1553 {
1554 u_int32_t reg_tctl;
1555 u_int64_t tdba = adapter->txdma.dma_paddr;
1556
1557 /* Setup the Base and Length of the Tx Descriptor Ring */
1558 IXGB_WRITE_REG(&adapter->hw, TDBAL,
1559 (tdba & 0x00000000ffffffffULL));
1560 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1561 IXGB_WRITE_REG(&adapter->hw, TDLEN,
1562 adapter->num_tx_desc *
1563 sizeof(struct ixgb_tx_desc));
1564
1565 /* Setup the HW Tx Head and Tail descriptor pointers */
1566 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1567 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1568
1569
1570 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1571 IXGB_READ_REG(&adapter->hw, TDBAL),
1572 IXGB_READ_REG(&adapter->hw, TDLEN));
1573
1574 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1575
1576
1577 /* Program the Transmit Control Register */
1578 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1579 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1580 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1581
1582 /* Setup Transmit Descriptor Settings for this adapter */
1583 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1584
1585 if (adapter->tx_int_delay > 0)
1586 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1587 return;
1588 }
1589
1590 /*********************************************************************
1591 *
1592 * Free all transmit related data structures.
1593 *
1594 **********************************************************************/
1595 static void
1596 ixgb_free_transmit_structures(struct adapter * adapter)
1597 {
1598 struct ixgb_buffer *tx_buffer;
1599 int i;
1600
1601 INIT_DEBUGOUT("free_transmit_structures: begin");
1602
1603 if (adapter->tx_buffer_area != NULL) {
1604 tx_buffer = adapter->tx_buffer_area;
1605 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1606 if (tx_buffer->m_head != NULL) {
1607 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1608 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1609 m_freem(tx_buffer->m_head);
1610 }
1611 tx_buffer->m_head = NULL;
1612 }
1613 }
1614 if (adapter->tx_buffer_area != NULL) {
1615 free(adapter->tx_buffer_area, M_DEVBUF);
1616 adapter->tx_buffer_area = NULL;
1617 }
1618 if (adapter->txtag != NULL) {
1619 bus_dma_tag_destroy(adapter->txtag);
1620 adapter->txtag = NULL;
1621 }
1622 return;
1623 }
1624
1625 /*********************************************************************
1626 *
1627 * The offload context needs to be set when we transfer the first
1628 * packet of a particular protocol (TCP/UDP). We change the
1629 * context only if the protocol type changes.
1630 *
1631 **********************************************************************/
1632 static void
1633 ixgb_transmit_checksum_setup(struct adapter * adapter,
1634 struct mbuf * mp,
1635 u_int8_t * txd_popts)
1636 {
1637 struct ixgb_context_desc *TXD;
1638 struct ixgb_buffer *tx_buffer;
1639 int curr_txd;
1640
1641 if (mp->m_pkthdr.csum_flags) {
1642
1643 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1644 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1645 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1646 return;
1647 else
1648 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1649 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1650 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1651 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1652 return;
1653 else
1654 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1655 } else {
1656 *txd_popts = 0;
1657 return;
1658 }
1659 } else {
1660 *txd_popts = 0;
1661 return;
1662 }
1663
1664 /*
1665 * If we reach this point, the checksum offload context needs to be
1666 * reset.
1667 */
1668 curr_txd = adapter->next_avail_tx_desc;
1669 tx_buffer = &adapter->tx_buffer_area[curr_txd];
1670 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1671
1672
1673 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1674 TXD->tucse = 0;
1675
1676 TXD->mss = 0;
1677
1678 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1679 TXD->tucso =
1680 ENET_HEADER_SIZE + sizeof(struct ip) +
1681 offsetof(struct tcphdr, th_sum);
1682 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1683 TXD->tucso =
1684 ENET_HEADER_SIZE + sizeof(struct ip) +
1685 offsetof(struct udphdr, uh_sum);
1686 }
1687 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1688
1689 tx_buffer->m_head = NULL;
1690
1691 if (++curr_txd == adapter->num_tx_desc)
1692 curr_txd = 0;
1693
1694 adapter->num_tx_desc_avail--;
1695 adapter->next_avail_tx_desc = curr_txd;
1696 return;
1697 }
1698
1699 /**********************************************************************
1700 *
1701 * Examine each tx_buffer in the used queue. If the hardware is done
1702 * processing the packet then free associated resources. The
1703 * tx_buffer is put back on the free queue.
1704 *
1705 **********************************************************************/
1706 static void
1707 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1708 {
1709 int i, num_avail;
1710 struct ixgb_buffer *tx_buffer;
1711 struct ixgb_tx_desc *tx_desc;
1712
1713 IXGB_LOCK_ASSERT(adapter);
1714
1715 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1716 return;
1717
1718 #ifdef _SV_
1719 adapter->clean_tx_interrupts++;
1720 #endif
1721 num_avail = adapter->num_tx_desc_avail;
1722 i = adapter->oldest_used_tx_desc;
1723
1724 tx_buffer = &adapter->tx_buffer_area[i];
1725 tx_desc = &adapter->tx_desc_base[i];
1726
1727 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1728
1729 tx_desc->status = 0;
1730 num_avail++;
1731
1732 if (tx_buffer->m_head) {
1733 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1734 BUS_DMASYNC_POSTWRITE);
1735 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1736 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1737 m_freem(tx_buffer->m_head);
1738 tx_buffer->m_head = NULL;
1739 }
1740 if (++i == adapter->num_tx_desc)
1741 i = 0;
1742
1743 tx_buffer = &adapter->tx_buffer_area[i];
1744 tx_desc = &adapter->tx_desc_base[i];
1745 }
1746
1747 adapter->oldest_used_tx_desc = i;
1748
1749 /*
1750 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1751 * it is OK to send packets. If there are no pending descriptors,
1752 * clear the timeout. Otherwise, if some descriptors have been freed,
1753 * restart the timeout.
1754 */
1755 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1756 struct ifnet *ifp = adapter->ifp;
1757
1758 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1759 if (num_avail == adapter->num_tx_desc)
1760 ifp->if_timer = 0;
1761 else if (num_avail == adapter->num_tx_desc_avail)
1762 ifp->if_timer = IXGB_TX_TIMEOUT;
1763 }
1764 adapter->num_tx_desc_avail = num_avail;
1765 return;
1766 }
1767
1768
1769 /*********************************************************************
1770 *
1771 * Get a buffer from system mbuf buffer pool.
1772 *
1773 **********************************************************************/
1774 static int
1775 ixgb_get_buf(int i, struct adapter * adapter,
1776 struct mbuf * nmp)
1777 {
1778 register struct mbuf *mp = nmp;
1779 struct ixgb_buffer *rx_buffer;
1780 struct ifnet *ifp;
1781 bus_addr_t paddr;
1782 int error;
1783
1784 ifp = adapter->ifp;
1785
1786 if (mp == NULL) {
1787
1788 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1789
1790 if (mp == NULL) {
1791 adapter->mbuf_alloc_failed++;
1792 return (ENOBUFS);
1793 }
1794 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1795 } else {
1796 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1797 mp->m_data = mp->m_ext.ext_buf;
1798 mp->m_next = NULL;
1799 }
1800
1801 if (ifp->if_mtu <= ETHERMTU) {
1802 m_adj(mp, ETHER_ALIGN);
1803 }
1804 rx_buffer = &adapter->rx_buffer_area[i];
1805
1806 /*
1807 * Using memory from the mbuf cluster pool, invoke the bus_dma
1808 * machinery to arrange the memory mapping.
1809 */
1810 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1811 mtod(mp, void *), mp->m_len,
1812 ixgb_dmamap_cb, &paddr, 0);
1813 if (error) {
1814 m_free(mp);
1815 return (error);
1816 }
1817 rx_buffer->m_head = mp;
1818 adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1819 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1820
1821 return (0);
1822 }
1823
1824 /*********************************************************************
1825 *
1826 * Allocate memory for rx_buffer structures. Since we use one
1827 * rx_buffer per received packet, the maximum number of rx_buffer's
1828 * that we'll need is equal to the number of receive descriptors
1829 * that we've allocated.
1830 *
1831 **********************************************************************/
1832 static int
1833 ixgb_allocate_receive_structures(struct adapter * adapter)
1834 {
1835 int i, error;
1836 struct ixgb_buffer *rx_buffer;
1837
1838 if (!(adapter->rx_buffer_area =
1839 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1840 adapter->num_rx_desc, M_DEVBUF,
1841 M_NOWAIT | M_ZERO))) {
1842 printf("ixgb%d: Unable to allocate rx_buffer memory\n",
1843 adapter->unit);
1844 return (ENOMEM);
1845 }
1846 bzero(adapter->rx_buffer_area,
1847 sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1848
1849 error = bus_dma_tag_create(NULL, /* parent */
1850 PAGE_SIZE, 0, /* alignment, bounds */
1851 BUS_SPACE_MAXADDR, /* lowaddr */
1852 BUS_SPACE_MAXADDR, /* highaddr */
1853 NULL, NULL, /* filter, filterarg */
1854 MCLBYTES, /* maxsize */
1855 1, /* nsegments */
1856 MCLBYTES, /* maxsegsize */
1857 BUS_DMA_ALLOCNOW, /* flags */
1858 #if __FreeBSD_version >= 502000
1859 NULL, /* lockfunc */
1860 NULL, /* lockfuncarg */
1861 #endif
1862 &adapter->rxtag);
1863 if (error != 0) {
1864 printf("ixgb%d: ixgb_allocate_receive_structures: "
1865 "bus_dma_tag_create failed; error %u\n",
1866 adapter->unit, error);
1867 goto fail_0;
1868 }
1869 rx_buffer = adapter->rx_buffer_area;
1870 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1871 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1872 &rx_buffer->map);
1873 if (error != 0) {
1874 printf("ixgb%d: ixgb_allocate_receive_structures: "
1875 "bus_dmamap_create failed; error %u\n",
1876 adapter->unit, error);
1877 goto fail_1;
1878 }
1879 }
1880
1881 for (i = 0; i < adapter->num_rx_desc; i++) {
1882 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1883 adapter->rx_buffer_area[i].m_head = NULL;
1884 adapter->rx_desc_base[i].buff_addr = 0;
1885 return (ENOBUFS);
1886 }
1887 }
1888
1889 return (0);
1890 fail_1:
1891 bus_dma_tag_destroy(adapter->rxtag);
1892 fail_0:
1893 adapter->rxtag = NULL;
1894 free(adapter->rx_buffer_area, M_DEVBUF);
1895 adapter->rx_buffer_area = NULL;
1896 return (error);
1897 }
1898
1899 /*********************************************************************
1900 *
1901 * Allocate and initialize receive structures.
1902 *
1903 **********************************************************************/
1904 static int
1905 ixgb_setup_receive_structures(struct adapter * adapter)
1906 {
1907 bzero((void *)adapter->rx_desc_base,
1908 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1909
1910 if (ixgb_allocate_receive_structures(adapter))
1911 return ENOMEM;
1912
1913 /* Setup our descriptor pointers */
1914 adapter->next_rx_desc_to_check = 0;
1915 adapter->next_rx_desc_to_use = 0;
1916 return (0);
1917 }
1918
1919 /*********************************************************************
1920 *
1921 * Enable receive unit.
1922 *
1923 **********************************************************************/
1924 static void
1925 ixgb_initialize_receive_unit(struct adapter * adapter)
1926 {
1927 u_int32_t reg_rctl;
1928 u_int32_t reg_rxcsum;
1929 u_int32_t reg_rxdctl;
1930 struct ifnet *ifp;
1931 u_int64_t rdba = adapter->rxdma.dma_paddr;
1932
1933 ifp = adapter->ifp;
1934
1935 /*
1936 * Make sure receives are disabled while setting up the descriptor
1937 * ring
1938 */
1939 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1940 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1941
1942 /* Set the Receive Delay Timer Register */
1943 IXGB_WRITE_REG(&adapter->hw, RDTR,
1944 adapter->rx_int_delay);
1945
1946
1947 /* Setup the Base and Length of the Rx Descriptor Ring */
1948 IXGB_WRITE_REG(&adapter->hw, RDBAL,
1949 (rdba & 0x00000000ffffffffULL));
1950 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1951 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1952 sizeof(struct ixgb_rx_desc));
1953
1954 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1955 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1956
1957 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1958
1959
1960
1961 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1962 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1963 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1964 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1965
1966
1967 adapter->raidc = 1;
1968 if (adapter->raidc) {
1969 uint32_t raidc;
1970 uint8_t poll_threshold;
1971 #define IXGB_RAIDC_POLL_DEFAULT 120
1972
1973 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1974 poll_threshold >>= 1;
1975 poll_threshold &= 0x3F;
1976 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1977 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1978 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1979 poll_threshold;
1980 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1981 }
1982 /* Enable Receive Checksum Offload for TCP and UDP ? */
1983 if (ifp->if_capenable & IFCAP_RXCSUM) {
1984 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1985 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1986 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1987 }
1988 /* Setup the Receive Control Register */
1989 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1990 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1991 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1992 IXGB_RCTL_CFF |
1993 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1994
1995 switch (adapter->rx_buffer_len) {
1996 default:
1997 case IXGB_RXBUFFER_2048:
1998 reg_rctl |= IXGB_RCTL_BSIZE_2048;
1999 break;
2000 case IXGB_RXBUFFER_4096:
2001 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2002 break;
2003 case IXGB_RXBUFFER_8192:
2004 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2005 break;
2006 case IXGB_RXBUFFER_16384:
2007 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2008 break;
2009 }
2010
2011 reg_rctl |= IXGB_RCTL_RXEN;
2012
2013
2014 /* Enable Receives */
2015 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2016
2017 return;
2018 }
2019
2020 /*********************************************************************
2021 *
2022 * Free receive related data structures.
2023 *
2024 **********************************************************************/
2025 static void
2026 ixgb_free_receive_structures(struct adapter * adapter)
2027 {
2028 struct ixgb_buffer *rx_buffer;
2029 int i;
2030
2031 INIT_DEBUGOUT("free_receive_structures: begin");
2032
2033 if (adapter->rx_buffer_area != NULL) {
2034 rx_buffer = adapter->rx_buffer_area;
2035 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2036 if (rx_buffer->map != NULL) {
2037 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2038 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2039 }
2040 if (rx_buffer->m_head != NULL)
2041 m_freem(rx_buffer->m_head);
2042 rx_buffer->m_head = NULL;
2043 }
2044 }
2045 if (adapter->rx_buffer_area != NULL) {
2046 free(adapter->rx_buffer_area, M_DEVBUF);
2047 adapter->rx_buffer_area = NULL;
2048 }
2049 if (adapter->rxtag != NULL) {
2050 bus_dma_tag_destroy(adapter->rxtag);
2051 adapter->rxtag = NULL;
2052 }
2053 return;
2054 }
2055
2056 /*********************************************************************
2057 *
2058 * This routine executes in interrupt context. It replenishes
2059 * the mbufs in the descriptor and sends data which has been
2060 * dma'ed into host memory to upper layer.
2061 *
2062 * We loop at most count times if count is > 0, or until done if
2063 * count < 0.
2064 *
2065 *********************************************************************/
2066 static void
2067 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2068 {
2069 struct ifnet *ifp;
2070 struct mbuf *mp;
2071 #if __FreeBSD_version < 500000
2072 struct ether_header *eh;
2073 #endif
2074 int eop = 0;
2075 int len;
2076 u_int8_t accept_frame = 0;
2077 int i;
2078 int next_to_use = 0;
2079 int eop_desc;
2080 /* Pointer to the receive descriptor being examined. */
2081 struct ixgb_rx_desc *current_desc;
2082
2083 IXGB_LOCK_ASSERT(adapter);
2084
2085 ifp = adapter->ifp;
2086 i = adapter->next_rx_desc_to_check;
2087 next_to_use = adapter->next_rx_desc_to_use;
2088 eop_desc = adapter->next_rx_desc_to_check;
2089 current_desc = &adapter->rx_desc_base[i];
2090
2091 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2092 #ifdef _SV_
2093 adapter->no_pkts_avail++;
2094 #endif
2095 return;
2096 }
2097 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2098
2099 mp = adapter->rx_buffer_area[i].m_head;
2100 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2101 BUS_DMASYNC_POSTREAD);
2102 accept_frame = 1;
2103 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2104 count--;
2105 eop = 1;
2106 } else {
2107 eop = 0;
2108 }
2109 len = current_desc->length;
2110
2111 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2112 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2113 IXGB_RX_DESC_ERRORS_RXE)) {
2114 accept_frame = 0;
2115 }
2116 if (accept_frame) {
2117
2118 /* Assign correct length to the current fragment */
2119 mp->m_len = len;
2120
2121 if (adapter->fmp == NULL) {
2122 mp->m_pkthdr.len = len;
2123 adapter->fmp = mp; /* Store the first mbuf */
2124 adapter->lmp = mp;
2125 } else {
2126 /* Chain mbuf's together */
2127 mp->m_flags &= ~M_PKTHDR;
2128 adapter->lmp->m_next = mp;
2129 adapter->lmp = adapter->lmp->m_next;
2130 adapter->fmp->m_pkthdr.len += len;
2131 }
2132
2133 if (eop) {
2134 eop_desc = i;
2135 adapter->fmp->m_pkthdr.rcvif = ifp;
2136
2137 #if __FreeBSD_version < 500000
2138 eh = mtod(adapter->fmp, struct ether_header *);
2139
2140 /* Remove ethernet header from mbuf */
2141 m_adj(adapter->fmp, sizeof(struct ether_header));
2142 ixgb_receive_checksum(adapter, current_desc,
2143 adapter->fmp);
2144
2145 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2146 VLAN_INPUT_TAG(eh, adapter->fmp,
2147 current_desc->special);
2148 else
2149 ether_input(ifp, eh, adapter->fmp);
2150 #else
2151 ixgb_receive_checksum(adapter, current_desc,
2152 adapter->fmp);
2153 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2154 VLAN_INPUT_TAG(ifp, adapter->fmp,
2155 current_desc->special,
2156 adapter->fmp = NULL);
2157
2158 if (adapter->fmp != NULL) {
2159 IXGB_UNLOCK(adapter);
2160 (*ifp->if_input) (ifp, adapter->fmp);
2161 IXGB_LOCK(adapter);
2162 }
2163 #endif
2164 adapter->fmp = NULL;
2165 adapter->lmp = NULL;
2166 }
2167 adapter->rx_buffer_area[i].m_head = NULL;
2168 } else {
2169 adapter->dropped_pkts++;
2170 if (adapter->fmp != NULL)
2171 m_freem(adapter->fmp);
2172 adapter->fmp = NULL;
2173 adapter->lmp = NULL;
2174 }
2175
2176 /* Zero out the receive descriptors status */
2177 current_desc->status = 0;
2178
2179 /* Advance our pointers to the next descriptor */
2180 if (++i == adapter->num_rx_desc) {
2181 i = 0;
2182 current_desc = adapter->rx_desc_base;
2183 } else
2184 current_desc++;
2185 }
2186 adapter->next_rx_desc_to_check = i;
2187
2188 if (--i < 0)
2189 i = (adapter->num_rx_desc - 1);
2190
2191 /*
2192 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2193 * memory corruption). Avoid using and re-submitting the most recently received RX
2194 * descriptor back to hardware.
2195 *
2196 * if(Last written back descriptor == EOP bit set descriptor)
2197 * then avoid re-submitting the most recently received RX descriptor
2198 * back to hardware.
2199 * if(Last written back descriptor != EOP bit set descriptor)
2200 * then avoid re-submitting the most recently received RX descriptors
2201 * till last EOP bit set descriptor.
2202 */
2203 if (eop_desc != i) {
2204 if (++eop_desc == adapter->num_rx_desc)
2205 eop_desc = 0;
2206 i = eop_desc;
2207 }
2208 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2209 while (next_to_use != i) {
2210 current_desc = &adapter->rx_desc_base[next_to_use];
2211 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2212 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2213 IXGB_RX_DESC_ERRORS_RXE))) {
2214 mp = adapter->rx_buffer_area[next_to_use].m_head;
2215 ixgb_get_buf(next_to_use, adapter, mp);
2216 } else {
2217 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2218 break;
2219 }
2220 /* Advance our pointers to the next descriptor */
2221 if (++next_to_use == adapter->num_rx_desc) {
2222 next_to_use = 0;
2223 current_desc = adapter->rx_desc_base;
2224 } else
2225 current_desc++;
2226 }
2227 adapter->next_rx_desc_to_use = next_to_use;
2228 if (--next_to_use < 0)
2229 next_to_use = (adapter->num_rx_desc - 1);
2230 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */
2231 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2232
2233 return;
2234 }
2235
2236 /*********************************************************************
2237 *
2238 * Verify that the hardware indicated that the checksum is valid.
2239 * Inform the stack about the status of checksum so that stack
2240 * doesn't spend time verifying the checksum.
2241 *
2242 *********************************************************************/
2243 static void
2244 ixgb_receive_checksum(struct adapter * adapter,
2245 struct ixgb_rx_desc * rx_desc,
2246 struct mbuf * mp)
2247 {
2248 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2249 mp->m_pkthdr.csum_flags = 0;
2250 return;
2251 }
2252 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2253 /* Did it pass? */
2254 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2255 /* IP Checksum Good */
2256 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2257 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2258
2259 } else {
2260 mp->m_pkthdr.csum_flags = 0;
2261 }
2262 }
2263 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2264 /* Did it pass? */
2265 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2266 mp->m_pkthdr.csum_flags |=
2267 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2268 mp->m_pkthdr.csum_data = htons(0xffff);
2269 }
2270 }
2271 return;
2272 }
2273
2274
2275 static void
2276 ixgb_enable_vlans(struct adapter * adapter)
2277 {
2278 uint32_t ctrl;
2279
2280 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2281 ctrl |= IXGB_CTRL0_VME;
2282 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2283
2284 return;
2285 }
2286
2287
2288 static void
2289 ixgb_enable_intr(struct adapter * adapter)
2290 {
2291 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2292 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2293 return;
2294 }
2295
2296 static void
2297 ixgb_disable_intr(struct adapter * adapter)
2298 {
2299 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2300 return;
2301 }
2302
2303 void
2304 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2305 uint32_t reg,
2306 uint16_t * value)
2307 {
2308 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2309 *value, 2);
2310 }
2311
2312 /**********************************************************************
2313 *
2314 * Update the board statistics counters.
2315 *
2316 **********************************************************************/
2317 static void
2318 ixgb_update_stats_counters(struct adapter * adapter)
2319 {
2320 struct ifnet *ifp;
2321
2322 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2323 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2324 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2325 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2326 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2327 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2328 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2329 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2330 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2331 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2332
2333 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2334 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2335 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2336 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2337 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2338 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2339 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2340 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2341 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2342 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2343 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2344 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2345 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2346 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2347 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2348 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2349 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2350 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2351 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2352 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2353 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2354 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2355 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2356 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2357 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2358 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2359 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2360
2361 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2362 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2363 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2364 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2365 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2366 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2367 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2368 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2369 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2370 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2371 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2372 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2373 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2374 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2375 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2376 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2377 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2378 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2379 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2380 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2381 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2382 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2383
2384 ifp = adapter->ifp;
2385
2386 /* Fill out the OS statistics structure */
2387 ifp->if_ipackets = adapter->stats.gprcl;
2388 ifp->if_opackets = adapter->stats.gptcl;
2389 ifp->if_ibytes = adapter->stats.gorcl;
2390 ifp->if_obytes = adapter->stats.gotcl;
2391 ifp->if_imcasts = adapter->stats.mprcl;
2392 ifp->if_collisions = 0;
2393
2394 /* Rx Errors */
2395 ifp->if_ierrors =
2396 adapter->dropped_pkts +
2397 adapter->stats.crcerrs +
2398 adapter->stats.rnbc +
2399 adapter->stats.mpc +
2400 adapter->stats.rlec;
2401
2402
2403 }
2404
2405
2406 /**********************************************************************
2407 *
2408 * This routine is called only when ixgb_display_debug_stats is enabled.
2409 * This routine provides a way to take a look at important statistics
2410 * maintained by the driver and hardware.
2411 *
2412 **********************************************************************/
2413 static void
2414 ixgb_print_hw_stats(struct adapter * adapter)
2415 {
2416 char buf_speed[100], buf_type[100];
2417 ixgb_bus_speed bus_speed;
2418 ixgb_bus_type bus_type;
2419 int unit = adapter->unit;
2420
2421 #ifdef _SV_
2422 printf("ixgb%d: Packets not Avail = %ld\n", unit,
2423 adapter->no_pkts_avail);
2424 printf("ixgb%d: CleanTxInterrupts = %ld\n", unit,
2425 adapter->clean_tx_interrupts);
2426 printf("ixgb%d: ICR RXDMT0 = %lld\n", unit,
2427 (long long)adapter->sv_stats.icr_rxdmt0);
2428 printf("ixgb%d: ICR RXO = %lld\n", unit,
2429 (long long)adapter->sv_stats.icr_rxo);
2430 printf("ixgb%d: ICR RXT0 = %lld\n", unit,
2431 (long long)adapter->sv_stats.icr_rxt0);
2432 printf("ixgb%d: ICR TXDW = %lld\n", unit,
2433 (long long)adapter->sv_stats.icr_TXDW);
2434 #endif /* _SV_ */
2435
2436 bus_speed = adapter->hw.bus.speed;
2437 bus_type = adapter->hw.bus.type;
2438 sprintf(buf_speed,
2439 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2440 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2441 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2442 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2443 "UNKNOWN");
2444 printf("ixgb%d: PCI_Bus_Speed = %s\n", unit,
2445 buf_speed);
2446
2447 sprintf(buf_type,
2448 bus_type == ixgb_bus_type_pci ? "PCI" :
2449 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2450 "UNKNOWN");
2451 printf("ixgb%d: PCI_Bus_Type = %s\n", unit,
2452 buf_type);
2453
2454 printf("ixgb%d: Tx Descriptors not Avail1 = %ld\n", unit,
2455 adapter->no_tx_desc_avail1);
2456 printf("ixgb%d: Tx Descriptors not Avail2 = %ld\n", unit,
2457 adapter->no_tx_desc_avail2);
2458 printf("ixgb%d: Std Mbuf Failed = %ld\n", unit,
2459 adapter->mbuf_alloc_failed);
2460 printf("ixgb%d: Std Cluster Failed = %ld\n", unit,
2461 adapter->mbuf_cluster_failed);
2462
2463 printf("ixgb%d: Defer count = %lld\n", unit,
2464 (long long)adapter->stats.dc);
2465 printf("ixgb%d: Missed Packets = %lld\n", unit,
2466 (long long)adapter->stats.mpc);
2467 printf("ixgb%d: Receive No Buffers = %lld\n", unit,
2468 (long long)adapter->stats.rnbc);
2469 printf("ixgb%d: Receive length errors = %lld\n", unit,
2470 (long long)adapter->stats.rlec);
2471 printf("ixgb%d: Crc errors = %lld\n", unit,
2472 (long long)adapter->stats.crcerrs);
2473 printf("ixgb%d: Driver dropped packets = %ld\n", unit,
2474 adapter->dropped_pkts);
2475
2476 printf("ixgb%d: XON Rcvd = %lld\n", unit,
2477 (long long)adapter->stats.xonrxc);
2478 printf("ixgb%d: XON Xmtd = %lld\n", unit,
2479 (long long)adapter->stats.xontxc);
2480 printf("ixgb%d: XOFF Rcvd = %lld\n", unit,
2481 (long long)adapter->stats.xoffrxc);
2482 printf("ixgb%d: XOFF Xmtd = %lld\n", unit,
2483 (long long)adapter->stats.xofftxc);
2484
2485 printf("ixgb%d: Good Packets Rcvd = %lld\n", unit,
2486 (long long)adapter->stats.gprcl);
2487 printf("ixgb%d: Good Packets Xmtd = %lld\n", unit,
2488 (long long)adapter->stats.gptcl);
2489
2490 printf("ixgb%d: Jumbo frames recvd = %lld\n", unit,
2491 (long long)adapter->stats.jprcl);
2492 printf("ixgb%d: Jumbo frames Xmtd = %lld\n", unit,
2493 (long long)adapter->stats.jptcl);
2494
2495 return;
2496
2497 }
2498
2499 static int
2500 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2501 {
2502 int error;
2503 int result;
2504 struct adapter *adapter;
2505
2506 result = -1;
2507 error = sysctl_handle_int(oidp, &result, 0, req);
2508
2509 if (error || !req->newptr)
2510 return (error);
2511
2512 if (result == 1) {
2513 adapter = (struct adapter *) arg1;
2514 ixgb_print_hw_stats(adapter);
2515 }
2516 return error;
2517 }
Cache object: 26160d0e3e0bd1a379b1204edf6eabc7
|