1 /*************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
5 reserved.
6
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are
10 met:
11
12 * Redistributions of source code must retain the above copyright
13 notice, this list of conditions and the following disclaimer.
14
15 * Redistributions in binary form must reproduce the above
16 copyright notice, this list of conditions and the following
17 disclaimer in the documentation and/or other materials provided
18 with the distribution.
19
20 * Neither the name of Cavium Networks nor the names of
21 its contributors may be used to endorse or promote products
22 derived from this software without specific prior written
23 permission.
24
25 This Software, including technical data, may be subject to U.S. export control laws, including the U.S. Export Administration Act and its associated regulations, and may be subject to export or import regulations in other countries.
26
27 TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
28 AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
29
30 *************************************************************************/
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/taskqueue.h>
46
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/if_var.h>
50
51 #include "wrapper-cvmx-includes.h"
52 #include "ethernet-headers.h"
53
54 extern int pow_receive_group;
55 extern struct ifnet *cvm_oct_device[];
56
57 static struct task cvm_oct_task;
58 static struct taskqueue *cvm_oct_taskq;
59
60 /**
61 * Interrupt handler. The interrupt occurs whenever the POW
62 * transitions from 0->1 packets in our group.
63 *
64 * @param cpl
65 * @param dev_id
66 * @param regs
67 * @return
68 */
69 int cvm_oct_do_interrupt(void *dev_id)
70 {
71 /* Acknowledge the interrupt */
72 if (INTERRUPT_LIMIT)
73 cvmx_write_csr(CVMX_POW_WQ_INT, 1<<pow_receive_group);
74 else
75 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001<<pow_receive_group);
76
77 /*
78 * Schedule task.
79 */
80 taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
81
82 return FILTER_HANDLED;
83 }
84
85
86 /**
87 * This is called on receive errors, and determines if the packet
88 * can be dropped early-on in cvm_oct_tasklet_rx().
89 *
90 * @param work Work queue entry pointing to the packet.
91 * @return Non-zero if the packet can be dropped, zero otherwise.
92 */
93 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
94 {
95 if ((work->word2.snoip.err_code == 10) && (work->word1.s.len <= 64)) {
96 /* Ignore length errors on min size packets. Some equipment
97 incorrectly pads packets to 64+4FCS instead of 60+4FCS.
98 Note these packets still get counted as frame errors. */
99 } else
100 if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) {
101
102 /* We received a packet with either an alignment error or a
103 FCS error. This may be signalling that we are running
104 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} off. If this is the
105 case we need to parse the packet to determine if we can
106 remove a non spec preamble and generate a correct packet */
107 int interface = cvmx_helper_get_interface_num(work->word1.cn38xx.ipprt);
108 int index = cvmx_helper_get_interface_index_num(work->word1.cn38xx.ipprt);
109 cvmx_gmxx_rxx_frm_ctl_t gmxx_rxx_frm_ctl;
110 gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
111 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
112
113 uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr);
114 int i = 0;
115
116 while (i < work->word1.s.len-1) {
117 if (*ptr != 0x55)
118 break;
119 ptr++;
120 i++;
121 }
122
123 if (*ptr == 0xd5) {
124 /*
125 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->word1.cn38xx.ipprt);
126 */
127 work->packet_ptr.s.addr += i+1;
128 work->word1.s.len -= i+5;
129 } else
130 if ((*ptr & 0xf) == 0xd) {
131 /*
132 DEBUGPRINT("Port %d received 0x?d preamble\n", work->word1.cn38xx.ipprt);
133 */
134 work->packet_ptr.s.addr += i;
135 work->word1.s.len -= i+4;
136 for (i = 0; i < work->word1.s.len; i++) {
137 *ptr = ((*ptr&0xf0)>>4) | ((*(ptr+1)&0xf)<<4);
138 ptr++;
139 }
140 } else {
141 DEBUGPRINT("Port %d unknown preamble, packet dropped\n", work->word1.cn38xx.ipprt);
142 /*
143 cvmx_helper_dump_packet(work);
144 */
145 cvm_oct_free_work(work);
146 return 1;
147 }
148 }
149 } else {
150 DEBUGPRINT("Port %d receive error code %d, packet dropped\n", work->word1.cn38xx.ipprt, work->word2.snoip.err_code);
151 cvm_oct_free_work(work);
152 return 1;
153 }
154
155 return 0;
156 }
157
158 /**
159 * Tasklet function that is scheduled on a core when an interrupt occurs.
160 *
161 * @param unused
162 */
163 void cvm_oct_tasklet_rx(void *context, int pending)
164 {
165 int coreid;
166 uint64_t old_group_mask;
167 int rx_count = 0;
168 int number_to_free;
169 int num_freed;
170 int packet_not_copied;
171
172 coreid = cvmx_get_core_num();
173
174 /* Prefetch cvm_oct_device since we know we need it soon */
175 CVMX_PREFETCH(cvm_oct_device, 0);
176
177 /* Only allow work for our group (and preserve priorities) */
178 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
179 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
180 (old_group_mask & ~0xFFFFull) | 1<<pow_receive_group);
181
182 while (1) {
183 struct mbuf *m = NULL;
184 int mbuf_in_hw;
185 cvmx_wqe_t *work;
186
187 if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
188 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
189 else
190 work = NULL;
191 CVMX_PREFETCH(work, 0);
192 if (work == NULL)
193 break;
194
195 mbuf_in_hw = work->word2.s.bufs == 1;
196 if ((mbuf_in_hw)) {
197 m = *(struct mbuf **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
198 CVMX_PREFETCH(m, offsetof(struct mbuf, m_data));
199 CVMX_PREFETCH(m, offsetof(struct mbuf, m_pkthdr));
200 }
201 CVMX_PREFETCH(cvm_oct_device[work->word1.cn38xx.ipprt], 0);
202 //CVMX_PREFETCH(m, 0);
203
204
205 rx_count++;
206 /* Immediately throw away all packets with receive errors */
207 if ((work->word2.snoip.rcv_error)) {
208 if (cvm_oct_check_rcv_error(work))
209 continue;
210 }
211
212 /* We can only use the zero copy path if mbufs are in the FPA pool
213 and the packet fits in a single buffer */
214 if ((mbuf_in_hw)) {
215 CVMX_PREFETCH(m->m_data, 0);
216
217 m->m_pkthdr.len = m->m_len = work->word1.s.len;
218
219 packet_not_copied = 1;
220
221 /*
222 * Adjust the data pointer based on the offset
223 * of the packet within the buffer.
224 */
225 m->m_data += (work->packet_ptr.s.back << 7) + (work->packet_ptr.s.addr & 0x7f);
226 } else {
227
228 /* We have to copy the packet. First allocate an
229 mbuf for it */
230 MGETHDR(m, M_NOWAIT, MT_DATA);
231 if (m == NULL) {
232 DEBUGPRINT("Port %d failed to allocate mbuf, packet dropped\n", work->word1.cn38xx.ipprt);
233 cvm_oct_free_work(work);
234 continue;
235 }
236
237 /* Check if we've received a packet that was entirely
238 stored in the work entry. This is untested */
239 if ((work->word2.s.bufs == 0)) {
240 uint8_t *ptr = work->packet_data;
241
242 if (cvmx_likely(!work->word2.s.not_IP)) {
243 /* The beginning of the packet moves
244 for IP packets */
245 if (work->word2.s.is_v6)
246 ptr += 2;
247 else
248 ptr += 6;
249 }
250 panic("%s: not yet implemented; copy in small packet.", __func__);
251 /* No packet buffers to free */
252 } else {
253 int segments = work->word2.s.bufs;
254 cvmx_buf_ptr_t segment_ptr = work->packet_ptr;
255 int len = work->word1.s.len;
256
257 while (segments--) {
258 cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
259 /* Octeon Errata PKI-100: The segment
260 size is wrong. Until it is fixed,
261 calculate the segment size based on
262 the packet pool buffer size. When
263 it is fixed, the following line
264 should be replaced with this one:
265 int segment_size = segment_ptr.s.size; */
266 int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
267 /* Don't copy more than what is left
268 in the packet */
269 if (segment_size > len)
270 segment_size = len;
271 /* Copy the data into the packet */
272 panic("%s: not yet implemented; copy in packet segments.", __func__);
273 #if 0
274 memcpy(m_put(m, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size);
275 #endif
276 /* Reduce the amount of bytes left
277 to copy */
278 len -= segment_size;
279 segment_ptr = next_ptr;
280 }
281 }
282 packet_not_copied = 0;
283 }
284
285 if (((work->word1.cn38xx.ipprt < TOTAL_NUMBER_OF_PORTS) &&
286 cvm_oct_device[work->word1.cn38xx.ipprt])) {
287 struct ifnet *ifp = cvm_oct_device[work->word1.cn38xx.ipprt];
288
289 /* Only accept packets for devices
290 that are currently up */
291 if ((ifp->if_flags & IFF_UP)) {
292 m->m_pkthdr.rcvif = ifp;
293
294 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
295 if ((work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
296 m->m_pkthdr.csum_flags = 0; /* XXX */
297 else {
298 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
299 m->m_pkthdr.csum_data = 0xffff;
300 }
301 } else {
302 m->m_pkthdr.csum_flags = 0; /* XXX */
303 }
304
305 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
306
307 (*ifp->if_input)(ifp, m);
308 } else {
309 /* Drop any packet received for a device that isn't up */
310 /*
311 DEBUGPRINT("%s: Device not up, packet dropped\n",
312 if_name(ifp));
313 */
314 m_freem(m);
315 }
316 } else {
317 /* Drop any packet received for a device that
318 doesn't exist */
319 DEBUGPRINT("Port %d not controlled by FreeBSD, packet dropped\n", work->word1.cn38xx.ipprt);
320 m_freem(m);
321 }
322
323 /* Check to see if the mbuf and work share
324 the same packet buffer */
325 if ((packet_not_copied)) {
326 /* This buffer needs to be replaced, increment
327 the number of buffers we need to free by one */
328 cvmx_fau_atomic_add32(
329 FAU_NUM_PACKET_BUFFERS_TO_FREE, 1);
330
331 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
332 DONT_WRITEBACK(1));
333 } else
334 cvm_oct_free_work(work);
335 }
336
337 /*
338 * If we hit our limit, schedule another task while we clean up.
339 */
340 if (INTERRUPT_LIMIT != 0 && rx_count == MAX_RX_PACKETS) {
341 taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
342 }
343
344 /* Restore the original POW group mask */
345 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
346
347 /* Refill the packet buffer pool */
348 number_to_free =
349 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
350
351 if (number_to_free > 0) {
352 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
353 -number_to_free);
354 num_freed =
355 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
356 CVMX_FPA_PACKET_POOL_SIZE,
357 number_to_free);
358 if (num_freed != number_to_free) {
359 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
360 number_to_free - num_freed);
361 }
362 }
363 }
364
365
366
367 void cvm_oct_rx_initialize(void)
368 {
369 int cpu;
370 TASK_INIT(&cvm_oct_task, 0, cvm_oct_tasklet_rx, NULL);
371
372 cvm_oct_taskq = taskqueue_create_fast("oct_rx", M_NOWAIT,
373 taskqueue_thread_enqueue,
374 &cvm_oct_taskq);
375
376 CPU_FOREACH(cpu) {
377 cpuset_t cpu_mask;
378 CPU_SETOF(cpu, &cpu_mask);
379 taskqueue_start_threads_cpuset(&cvm_oct_taskq, 1, PI_NET,
380 &cpu_mask, "octe taskq");
381 }
382 }
383
384 void cvm_oct_rx_shutdown(void)
385 {
386 panic("%s: not yet implemented.", __func__);
387 }
388
Cache object: 9ef9ae73dc824e5ba9b69aebf9e19b4c
|