1 /*************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
5 reserved.
6
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are
10 met:
11
12 * Redistributions of source code must retain the above copyright
13 notice, this list of conditions and the following disclaimer.
14
15 * Redistributions in binary form must reproduce the above
16 copyright notice, this list of conditions and the following
17 disclaimer in the documentation and/or other materials provided
18 with the distribution.
19
20 * Neither the name of Cavium Networks nor the names of
21 its contributors may be used to endorse or promote products
22 derived from this software without specific prior written
23 permission.
24
25 This Software, including technical data, may be subject to U.S. export control laws, including the U.S. Export Administration Act and its associated regulations, and may be subject to export or import regulations in other countries.
26
27 TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
28 AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
29
30 *************************************************************************/
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/12.0/sys/mips/cavium/octe/ethernet-rx.c 326023 2017-11-20 19:43:44Z pfg $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/taskqueue.h>
46
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/if_var.h>
50
51 #include "wrapper-cvmx-includes.h"
52 #include "ethernet-headers.h"
53
54 extern int pow_receive_group;
55 extern struct ifnet *cvm_oct_device[];
56
57 static struct task cvm_oct_task;
58 static struct taskqueue *cvm_oct_taskq;
59
60 static int cvm_oct_rx_active;
61
62 /**
63 * Interrupt handler. The interrupt occurs whenever the POW
64 * transitions from 0->1 packets in our group.
65 *
66 * @param cpl
67 * @param dev_id
68 * @param regs
69 * @return
70 */
71 int cvm_oct_do_interrupt(void *dev_id)
72 {
73 /* Acknowledge the interrupt */
74 if (INTERRUPT_LIMIT)
75 cvmx_write_csr(CVMX_POW_WQ_INT, 1<<pow_receive_group);
76 else
77 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001<<pow_receive_group);
78
79 /*
80 * Schedule task if there isn't one running.
81 */
82 if (atomic_cmpset_int(&cvm_oct_rx_active, 0, 1))
83 taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
84
85 return FILTER_HANDLED;
86 }
87
88
89 /**
90 * This is called on receive errors, and determines if the packet
91 * can be dropped early-on in cvm_oct_tasklet_rx().
92 *
93 * @param work Work queue entry pointing to the packet.
94 * @return Non-zero if the packet can be dropped, zero otherwise.
95 */
96 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
97 {
98 if ((work->word2.snoip.err_code == 10) && (work->word1.s.len <= 64)) {
99 /* Ignore length errors on min size packets. Some equipment
100 incorrectly pads packets to 64+4FCS instead of 60+4FCS.
101 Note these packets still get counted as frame errors. */
102 } else
103 if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) {
104
105 /* We received a packet with either an alignment error or a
106 FCS error. This may be signalling that we are running
107 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} off. If this is the
108 case we need to parse the packet to determine if we can
109 remove a non spec preamble and generate a correct packet */
110 int interface = cvmx_helper_get_interface_num(work->word1.cn38xx.ipprt);
111 int index = cvmx_helper_get_interface_index_num(work->word1.cn38xx.ipprt);
112 cvmx_gmxx_rxx_frm_ctl_t gmxx_rxx_frm_ctl;
113 gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
114 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
115
116 uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr);
117 int i = 0;
118
119 while (i < work->word1.s.len-1) {
120 if (*ptr != 0x55)
121 break;
122 ptr++;
123 i++;
124 }
125
126 if (*ptr == 0xd5) {
127 /*
128 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->word1.cn38xx.ipprt);
129 */
130 work->packet_ptr.s.addr += i+1;
131 work->word1.s.len -= i+5;
132 } else
133 if ((*ptr & 0xf) == 0xd) {
134 /*
135 DEBUGPRINT("Port %d received 0x?d preamble\n", work->word1.cn38xx.ipprt);
136 */
137 work->packet_ptr.s.addr += i;
138 work->word1.s.len -= i+4;
139 for (i = 0; i < work->word1.s.len; i++) {
140 *ptr = ((*ptr&0xf0)>>4) | ((*(ptr+1)&0xf)<<4);
141 ptr++;
142 }
143 } else {
144 DEBUGPRINT("Port %d unknown preamble, packet dropped\n", work->word1.cn38xx.ipprt);
145 /*
146 cvmx_helper_dump_packet(work);
147 */
148 cvm_oct_free_work(work);
149 return 1;
150 }
151 }
152 } else {
153 DEBUGPRINT("Port %d receive error code %d, packet dropped\n", work->word1.cn38xx.ipprt, work->word2.snoip.err_code);
154 cvm_oct_free_work(work);
155 return 1;
156 }
157
158 return 0;
159 }
160
161 /**
162 * Tasklet function that is scheduled on a core when an interrupt occurs.
163 *
164 * @param unused
165 */
166 void cvm_oct_tasklet_rx(void *context, int pending)
167 {
168 int coreid;
169 uint64_t old_group_mask;
170 int rx_count = 0;
171 int number_to_free;
172 int num_freed;
173 int packet_not_copied;
174
175 sched_pin();
176 coreid = cvmx_get_core_num();
177
178 /* Prefetch cvm_oct_device since we know we need it soon */
179 CVMX_PREFETCH(cvm_oct_device, 0);
180
181 /* Only allow work for our group (and preserve priorities) */
182 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
183 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
184 (old_group_mask & ~0xFFFFull) | 1<<pow_receive_group);
185
186 while (1) {
187 struct mbuf *m = NULL;
188 int mbuf_in_hw;
189 cvmx_wqe_t *work;
190
191 if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
192 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
193 else
194 work = NULL;
195 CVMX_PREFETCH(work, 0);
196 if (work == NULL)
197 break;
198
199 mbuf_in_hw = work->word2.s.bufs == 1;
200 if ((mbuf_in_hw)) {
201 m = *(struct mbuf **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
202 CVMX_PREFETCH(m, offsetof(struct mbuf, m_data));
203 CVMX_PREFETCH(m, offsetof(struct mbuf, m_pkthdr));
204 }
205 CVMX_PREFETCH(cvm_oct_device[work->word1.cn38xx.ipprt], 0);
206 //CVMX_PREFETCH(m, 0);
207
208
209 rx_count++;
210 /* Immediately throw away all packets with receive errors */
211 if ((work->word2.snoip.rcv_error)) {
212 if (cvm_oct_check_rcv_error(work))
213 continue;
214 }
215
216 /* We can only use the zero copy path if mbufs are in the FPA pool
217 and the packet fits in a single buffer */
218 if ((mbuf_in_hw)) {
219 CVMX_PREFETCH(m->m_data, 0);
220
221 m->m_pkthdr.len = m->m_len = work->word1.s.len;
222
223 packet_not_copied = 1;
224
225 /*
226 * Adjust the data pointer based on the offset
227 * of the packet within the buffer.
228 */
229 m->m_data += (work->packet_ptr.s.back << 7) + (work->packet_ptr.s.addr & 0x7f);
230 } else {
231
232 /* We have to copy the packet. First allocate an
233 mbuf for it */
234 MGETHDR(m, M_NOWAIT, MT_DATA);
235 if (m == NULL) {
236 DEBUGPRINT("Port %d failed to allocate mbuf, packet dropped\n", work->word1.cn38xx.ipprt);
237 cvm_oct_free_work(work);
238 continue;
239 }
240
241 /* Check if we've received a packet that was entirely
242 stored in the work entry. This is untested */
243 if ((work->word2.s.bufs == 0)) {
244 uint8_t *ptr = work->packet_data;
245
246 if (cvmx_likely(!work->word2.s.not_IP)) {
247 /* The beginning of the packet moves
248 for IP packets */
249 if (work->word2.s.is_v6)
250 ptr += 2;
251 else
252 ptr += 6;
253 }
254 panic("%s: not yet implemented; copy in small packet.", __func__);
255 /* No packet buffers to free */
256 } else {
257 int segments = work->word2.s.bufs;
258 cvmx_buf_ptr_t segment_ptr = work->packet_ptr;
259 int len = work->word1.s.len;
260
261 while (segments--) {
262 cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
263 /* Octeon Errata PKI-100: The segment
264 size is wrong. Until it is fixed,
265 calculate the segment size based on
266 the packet pool buffer size. When
267 it is fixed, the following line
268 should be replaced with this one:
269 int segment_size = segment_ptr.s.size; */
270 int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
271 /* Don't copy more than what is left
272 in the packet */
273 if (segment_size > len)
274 segment_size = len;
275 /* Copy the data into the packet */
276 panic("%s: not yet implemented; copy in packet segments.", __func__);
277 #if 0
278 memcpy(m_put(m, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size);
279 #endif
280 /* Reduce the amount of bytes left
281 to copy */
282 len -= segment_size;
283 segment_ptr = next_ptr;
284 }
285 }
286 packet_not_copied = 0;
287 }
288
289 if (((work->word1.cn38xx.ipprt < TOTAL_NUMBER_OF_PORTS) &&
290 cvm_oct_device[work->word1.cn38xx.ipprt])) {
291 struct ifnet *ifp = cvm_oct_device[work->word1.cn38xx.ipprt];
292
293 /* Only accept packets for devices
294 that are currently up */
295 if ((ifp->if_flags & IFF_UP)) {
296 m->m_pkthdr.rcvif = ifp;
297
298 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
299 if ((work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
300 m->m_pkthdr.csum_flags = 0; /* XXX */
301 else {
302 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
303 m->m_pkthdr.csum_data = 0xffff;
304 }
305 } else {
306 m->m_pkthdr.csum_flags = 0; /* XXX */
307 }
308
309 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
310
311 (*ifp->if_input)(ifp, m);
312 } else {
313 /* Drop any packet received for a device that isn't up */
314 /*
315 DEBUGPRINT("%s: Device not up, packet dropped\n",
316 if_name(ifp));
317 */
318 m_freem(m);
319 }
320 } else {
321 /* Drop any packet received for a device that
322 doesn't exist */
323 DEBUGPRINT("Port %d not controlled by FreeBSD, packet dropped\n", work->word1.cn38xx.ipprt);
324 m_freem(m);
325 }
326
327 /* Check to see if the mbuf and work share
328 the same packet buffer */
329 if ((packet_not_copied)) {
330 /* This buffer needs to be replaced, increment
331 the number of buffers we need to free by one */
332 cvmx_fau_atomic_add32(
333 FAU_NUM_PACKET_BUFFERS_TO_FREE, 1);
334
335 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
336 DONT_WRITEBACK(1));
337 } else
338 cvm_oct_free_work(work);
339 }
340
341 /*
342 * If we hit our limit, schedule another task while we clean up.
343 */
344 if (INTERRUPT_LIMIT != 0 && rx_count == MAX_RX_PACKETS) {
345 taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
346 } else {
347 /*
348 * No more packets, all done.
349 */
350 if (!atomic_cmpset_int(&cvm_oct_rx_active, 1, 0))
351 panic("%s: inconsistent rx active state.", __func__);
352 }
353
354 /* Restore the original POW group mask */
355 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
356
357 /* Refill the packet buffer pool */
358 number_to_free =
359 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
360
361 if (number_to_free > 0) {
362 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
363 -number_to_free);
364 num_freed =
365 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
366 CVMX_FPA_PACKET_POOL_SIZE,
367 number_to_free);
368 if (num_freed != number_to_free) {
369 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
370 number_to_free - num_freed);
371 }
372 }
373 sched_unpin();
374 }
375
376
377
378 void cvm_oct_rx_initialize(void)
379 {
380 TASK_INIT(&cvm_oct_task, 0, cvm_oct_tasklet_rx, NULL);
381
382 cvm_oct_taskq = taskqueue_create_fast("oct_rx", M_NOWAIT,
383 taskqueue_thread_enqueue,
384 &cvm_oct_taskq);
385 taskqueue_start_threads(&cvm_oct_taskq, min(mp_ncpus, MAXCPU),
386 PI_NET, "octe taskq");
387 }
388
389 void cvm_oct_rx_shutdown(void)
390 {
391 panic("%s: not yet implemented.", __func__);
392 }
393
Cache object: 46b43214fad52c130fd93e24b3303602
|