The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/cavium/octe/ethernet-rx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*************************************************************************
    2 Copyright (c) 2003-2007  Cavium Networks (support@cavium.com). All rights
    3 reserved.
    4 
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are
    8 met:
    9 
   10     * Redistributions of source code must retain the above copyright
   11       notice, this list of conditions and the following disclaimer.
   12 
   13     * Redistributions in binary form must reproduce the above
   14       copyright notice, this list of conditions and the following
   15       disclaimer in the documentation and/or other materials provided
   16       with the distribution.
   17 
   18     * Neither the name of Cavium Networks nor the names of
   19       its contributors may be used to endorse or promote products
   20       derived from this software without specific prior written
   21       permission.
   22 
   23 This Software, including technical data, may be subject to U.S. export  control laws, including the U.S. Export Administration Act and its  associated regulations, and may be subject to export or import  regulations in other countries.
   24 
   25 TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
   26 AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
   27 
   28 *************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.4/sys/mips/cavium/octe/ethernet-rx.c 215938 2010-11-27 12:26:40Z jchandra $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/bus.h>
   36 #include <sys/endian.h>
   37 #include <sys/kernel.h>
   38 #include <sys/mbuf.h>
   39 #include <sys/socket.h>
   40 #include <sys/proc.h>
   41 #include <sys/sched.h>
   42 #include <sys/smp.h>
   43 #include <sys/taskqueue.h>
   44 
   45 #include <net/ethernet.h>
   46 #include <net/if.h>
   47 
   48 #include "wrapper-cvmx-includes.h"
   49 #include "ethernet-headers.h"
   50 
   51 extern int pow_receive_group;
   52 extern struct ifnet *cvm_oct_device[];
   53 
   54 static struct task cvm_oct_task;
   55 static struct taskqueue *cvm_oct_taskq;
   56 
   57 /**
   58  * Interrupt handler. The interrupt occurs whenever the POW
   59  * transitions from 0->1 packets in our group.
   60  *
   61  * @param cpl
   62  * @param dev_id
   63  * @param regs
   64  * @return
   65  */
   66 int cvm_oct_do_interrupt(void *dev_id)
   67 {
   68         /* Acknowledge the interrupt */
   69         if (INTERRUPT_LIMIT)
   70                 cvmx_write_csr(CVMX_POW_WQ_INT, 1<<pow_receive_group);
   71         else
   72                 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001<<pow_receive_group);
   73         taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
   74         return FILTER_HANDLED;
   75 }
   76 
   77 
   78 #ifdef CONFIG_NET_POLL_CONTROLLER
   79 /**
   80  * This is called when the kernel needs to manually poll the
   81  * device. For Octeon, this is simply calling the interrupt
   82  * handler. We actually poll all the devices, not just the
   83  * one supplied.
   84  *
   85  * @param dev    Device to poll. Unused
   86  */
   87 void cvm_oct_poll_controller(struct ifnet *ifp)
   88 {
   89         taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
   90 }
   91 #endif
   92 
   93 /**
   94  * This is called on receive errors, and determines if the packet
   95  * can be dropped early-on in cvm_oct_tasklet_rx().
   96  *
   97  * @param work Work queue entry pointing to the packet.
   98  * @return Non-zero if the packet can be dropped, zero otherwise.
   99  */
  100 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
  101 {
  102         if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
  103                 /* Ignore length errors on min size packets. Some equipment
  104                    incorrectly pads packets to 64+4FCS instead of 60+4FCS.
  105                    Note these packets still get counted as frame errors. */
  106         } else
  107         if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) {
  108 
  109                 /* We received a packet with either an alignment error or a
  110                    FCS error. This may be signalling that we are running
  111                    10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} off. If this is the
  112                    case we need to parse the packet to determine if we can
  113                    remove a non spec preamble and generate a correct packet */
  114                 int interface = cvmx_helper_get_interface_num(work->ipprt);
  115                 int index = cvmx_helper_get_interface_index_num(work->ipprt);
  116                 cvmx_gmxx_rxx_frm_ctl_t gmxx_rxx_frm_ctl;
  117                 gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
  118                 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
  119 
  120                         uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr);
  121                         int i = 0;
  122 
  123                         while (i < work->len-1) {
  124                                 if (*ptr != 0x55)
  125                                         break;
  126                                 ptr++;
  127                                 i++;
  128                         }
  129 
  130                         if (*ptr == 0xd5) {
  131                                 /*
  132                                 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
  133                                 */
  134                                 work->packet_ptr.s.addr += i+1;
  135                                 work->len -= i+5;
  136                         } else
  137                         if ((*ptr & 0xf) == 0xd) {
  138                                 /*
  139                                 DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
  140                                 */
  141                                 work->packet_ptr.s.addr += i;
  142                                 work->len -= i+4;
  143                                 for (i = 0; i < work->len; i++) {
  144                                         *ptr = ((*ptr&0xf0)>>4) | ((*(ptr+1)&0xf)<<4);
  145                                         ptr++;
  146                                 }
  147                         } else {
  148                                 DEBUGPRINT("Port %d unknown preamble, packet dropped\n", work->ipprt);
  149                                 /*
  150                                 cvmx_helper_dump_packet(work);
  151                                 */
  152                                 cvm_oct_free_work(work);
  153                                 return 1;
  154                         }
  155                 }
  156         } else {
  157                 DEBUGPRINT("Port %d receive error code %d, packet dropped\n", work->ipprt, work->word2.snoip.err_code);
  158                 cvm_oct_free_work(work);
  159                 return 1;
  160         }
  161 
  162         return 0;
  163 }
  164 
  165 /**
  166  * Tasklet function that is scheduled on a core when an interrupt occurs.
  167  *
  168  * @param unused
  169  */
  170 void cvm_oct_tasklet_rx(void *context, int pending)
  171 {
  172         int                 coreid;
  173         uint64_t            old_group_mask;
  174         uint64_t            old_scratch;
  175         int                 rx_count = 0;
  176         int                 number_to_free;
  177         int                 num_freed;
  178         int                 packet_not_copied;
  179 
  180         sched_pin();
  181         coreid = cvmx_get_core_num();
  182 
  183         /* Prefetch cvm_oct_device since we know we need it soon */
  184         CVMX_PREFETCH(cvm_oct_device, 0);
  185 
  186         if (USE_ASYNC_IOBDMA) {
  187                 /* Save scratch in case userspace is using it */
  188                 CVMX_SYNCIOBDMA;
  189                 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
  190         }
  191 
  192         /* Only allow work for our group (and preserve priorities) */
  193         old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
  194         cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
  195                        (old_group_mask & ~0xFFFFull) | 1<<pow_receive_group);
  196 
  197         if (USE_ASYNC_IOBDMA)
  198                 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
  199 
  200         while (1) {
  201                 struct mbuf *m = NULL;
  202                 int mbuf_in_hw;
  203                 cvmx_wqe_t *work;
  204 
  205                 if (USE_ASYNC_IOBDMA) {
  206                         work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
  207                 } else {
  208                         if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
  209                                 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
  210                         else
  211                                 work = NULL;
  212                 }
  213                 CVMX_PREFETCH(work, 0);
  214                 if (work == NULL)
  215                         break;
  216 
  217                 /* Limit each core to processing MAX_RX_PACKETS packets without a break.
  218                    This way the RX can't starve the TX task. */
  219                 if (USE_ASYNC_IOBDMA) {
  220 
  221                         if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
  222                                 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
  223                         else {
  224                                 cvmx_scratch_write64(CVMX_SCR_SCRATCH, 0x8000000000000000ull);
  225                                 cvmx_pow_tag_sw_null_nocheck();
  226                         }
  227                 }
  228 
  229                 mbuf_in_hw = USE_MBUFS_IN_HW && work->word2.s.bufs == 1;
  230                 if ((mbuf_in_hw)) {
  231                         m = *(struct mbuf **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
  232                         CVMX_PREFETCH(m, offsetof(struct mbuf, m_data));
  233                         CVMX_PREFETCH(m, offsetof(struct mbuf, m_pkthdr));
  234                 }
  235                 CVMX_PREFETCH(cvm_oct_device[work->ipprt], 0);
  236                 //CVMX_PREFETCH(m, 0);
  237 
  238 
  239                 rx_count++;
  240                 /* Immediately throw away all packets with receive errors */
  241                 if ((work->word2.snoip.rcv_error)) {
  242                         if (cvm_oct_check_rcv_error(work))
  243                                 continue;
  244                 }
  245 
  246                 /* We can only use the zero copy path if mbufs are in the FPA pool
  247                    and the packet fits in a single buffer */
  248                 if ((mbuf_in_hw)) {
  249                         CVMX_PREFETCH(m->m_data, 0);
  250 
  251                         m->m_pkthdr.len = m->m_len = work->len;
  252 
  253                         packet_not_copied = 1;
  254 
  255                         /*
  256                          * Adjust the data pointer based on the offset
  257                          * of the packet within the buffer.
  258                          */
  259                         m->m_data += (work->packet_ptr.s.back << 7) + (work->packet_ptr.s.addr & 0x7f);
  260                 } else {
  261 
  262                         /* We have to copy the packet. First allocate an
  263                            mbuf for it */
  264                         MGETHDR(m, M_DONTWAIT, MT_DATA);
  265                         if (m == NULL) {
  266                                 DEBUGPRINT("Port %d failed to allocate mbuf, packet dropped\n", work->ipprt);
  267                                 cvm_oct_free_work(work);
  268                                 continue;
  269                         }
  270 
  271                         /* Check if we've received a packet that was entirely
  272                            stored in the work entry. This is untested */
  273                         if ((work->word2.s.bufs == 0)) {
  274                                 uint8_t *ptr = work->packet_data;
  275 
  276                                 if (cvmx_likely(!work->word2.s.not_IP)) {
  277                                         /* The beginning of the packet moves
  278                                            for IP packets */
  279                                         if (work->word2.s.is_v6)
  280                                                 ptr += 2;
  281                                         else
  282                                                 ptr += 6;
  283                                 }
  284                                 panic("%s: not yet implemented; copy in small packet.", __func__);
  285                                 /* No packet buffers to free */
  286                         } else {
  287                                 int segments = work->word2.s.bufs;
  288                                 cvmx_buf_ptr_t segment_ptr = work->packet_ptr;
  289                                 int len = work->len;
  290 
  291                                 while (segments--) {
  292                                         cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
  293                                         /* Octeon Errata PKI-100: The segment
  294                                            size is wrong. Until it is fixed,
  295                                            calculate the segment size based on
  296                                            the packet pool buffer size. When
  297                                            it is fixed, the following line
  298                                            should be replaced with this one:
  299                                         int segment_size = segment_ptr.s.size; */
  300                                         int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
  301                                         /* Don't copy more than what is left
  302                                            in the packet */
  303                                         if (segment_size > len)
  304                                                 segment_size = len;
  305                                         /* Copy the data into the packet */
  306                                         panic("%s: not yet implemented; copy in packet segments.", __func__);
  307 #if 0
  308                                         memcpy(m_put(m, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size);
  309 #endif
  310                                         /* Reduce the amount of bytes left
  311                                            to copy */
  312                                         len -= segment_size;
  313                                         segment_ptr = next_ptr;
  314                                 }
  315                         }
  316                         packet_not_copied = 0;
  317                 }
  318 
  319                 if (((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
  320                     cvm_oct_device[work->ipprt])) {
  321                         struct ifnet *ifp = cvm_oct_device[work->ipprt];
  322 
  323                         /* Only accept packets for devices
  324                            that are currently up */
  325                         if ((ifp->if_flags & IFF_UP)) {
  326                                 m->m_pkthdr.rcvif = ifp;
  327 
  328                                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  329                                         if ((work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
  330                                                 m->m_pkthdr.csum_flags = 0; /* XXX */
  331                                         else {
  332                                                 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
  333                                                 m->m_pkthdr.csum_data = 0xffff;
  334                                         }
  335                                 } else {
  336                                         m->m_pkthdr.csum_flags = 0; /* XXX */
  337                                 }
  338 
  339                                 ifp->if_ipackets++;
  340 
  341                                 (*ifp->if_input)(ifp, m);
  342                         } else {
  343                                 /* Drop any packet received for a device that isn't up */
  344                                 /*
  345                                 DEBUGPRINT("%s: Device not up, packet dropped\n",
  346                                            if_name(ifp));
  347                                 */
  348                                 m_freem(m);
  349                         }
  350                 } else {
  351                         /* Drop any packet received for a device that
  352                            doesn't exist */
  353                         DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n", work->ipprt);
  354                         m_freem(m);
  355                 }
  356 
  357                 /* Check to see if the mbuf and work share
  358                    the same packet buffer */
  359                 if (USE_MBUFS_IN_HW && (packet_not_copied)) {
  360                         /* This buffer needs to be replaced, increment
  361                         the number of buffers we need to free by one */
  362                         cvmx_fau_atomic_add32(
  363                                 FAU_NUM_PACKET_BUFFERS_TO_FREE, 1);
  364 
  365                         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
  366                                       DONT_WRITEBACK(1));
  367                 } else
  368                         cvm_oct_free_work(work);
  369         }
  370 
  371         /* Restore the original POW group mask */
  372         cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
  373         if (USE_ASYNC_IOBDMA) {
  374                 /* Restore the scratch area */
  375                 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
  376         }
  377 
  378         if (USE_MBUFS_IN_HW) {
  379                 /* Refill the packet buffer pool */
  380                 number_to_free =
  381                   cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
  382 
  383                 if (number_to_free > 0) {
  384                         cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
  385                                               -number_to_free);
  386                         num_freed =
  387                                 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
  388                                                      CVMX_FPA_PACKET_POOL_SIZE,
  389                                                      number_to_free);
  390                         if (num_freed != number_to_free) {
  391                                 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
  392                                                       number_to_free - num_freed);
  393                         }
  394                 }
  395         }
  396         sched_unpin();
  397 }
  398 
  399 
  400 
  401 void cvm_oct_rx_initialize(void)
  402 {
  403         TASK_INIT(&cvm_oct_task, 0, cvm_oct_tasklet_rx, NULL);
  404 
  405         cvm_oct_taskq = taskqueue_create_fast("oct_rx", M_NOWAIT,
  406                                               taskqueue_thread_enqueue,
  407                                               &cvm_oct_taskq);
  408         taskqueue_start_threads(&cvm_oct_taskq, min(mp_ncpus, MAXCPU),
  409                                 PI_NET, "octe taskq");
  410 }
  411 
  412 void cvm_oct_rx_shutdown(void)
  413 {
  414         panic("%s: not yet implemented.", __func__);
  415 }
  416 

Cache object: e7937f65094d9f3595d39434f68c984a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.