1 /*************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
5 reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13
14 * Redistributions in binary form must reproduce the above
15 copyright notice, this list of conditions and the following
16 disclaimer in the documentation and/or other materials provided
17 with the distribution.
18
19 * Neither the name of Cavium Networks nor the names of
20 its contributors may be used to endorse or promote products
21 derived from this software without specific prior written
22 permission.
23
24 This Software, including technical data, may be subject to U.S. export control laws, including the U.S. Export Administration Act and its associated regulations, and may be subject to export or import regulations in other countries.
25
26 TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
27 AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
28
29 *************************************************************************/
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/mbuf.h>
40 #include <sys/socket.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/smp.h>
44 #include <sys/taskqueue.h>
45
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49
50 #include "wrapper-cvmx-includes.h"
51 #include "ethernet-headers.h"
52
53 extern int pow_receive_group;
54 extern struct ifnet *cvm_oct_device[];
55
56 static struct task cvm_oct_task;
57 static struct taskqueue *cvm_oct_taskq;
58
59 /**
60 * Interrupt handler. The interrupt occurs whenever the POW
61 * transitions from 0->1 packets in our group.
62 *
63 * @param cpl
64 * @param dev_id
65 * @param regs
66 * @return
67 */
68 int cvm_oct_do_interrupt(void *dev_id)
69 {
70 /* Acknowledge the interrupt */
71 if (INTERRUPT_LIMIT)
72 cvmx_write_csr(CVMX_POW_WQ_INT, 1<<pow_receive_group);
73 else
74 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001<<pow_receive_group);
75
76 /*
77 * Schedule task.
78 */
79 taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
80
81 return FILTER_HANDLED;
82 }
83
84 /**
85 * This is called on receive errors, and determines if the packet
86 * can be dropped early-on in cvm_oct_tasklet_rx().
87 *
88 * @param work Work queue entry pointing to the packet.
89 * @return Non-zero if the packet can be dropped, zero otherwise.
90 */
91 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
92 {
93 if ((work->word2.snoip.err_code == 10) && (work->word1.s.len <= 64)) {
94 /* Ignore length errors on min size packets. Some equipment
95 incorrectly pads packets to 64+4FCS instead of 60+4FCS.
96 Note these packets still get counted as frame errors. */
97 } else
98 if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) {
99 /* We received a packet with either an alignment error or a
100 FCS error. This may be signalling that we are running
101 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} off. If this is the
102 case we need to parse the packet to determine if we can
103 remove a non spec preamble and generate a correct packet */
104 int interface = cvmx_helper_get_interface_num(work->word1.cn38xx.ipprt);
105 int index = cvmx_helper_get_interface_index_num(work->word1.cn38xx.ipprt);
106 cvmx_gmxx_rxx_frm_ctl_t gmxx_rxx_frm_ctl;
107 gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
108 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
109 uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr);
110 int i = 0;
111
112 while (i < work->word1.s.len-1) {
113 if (*ptr != 0x55)
114 break;
115 ptr++;
116 i++;
117 }
118
119 if (*ptr == 0xd5) {
120 /*
121 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->word1.cn38xx.ipprt);
122 */
123 work->packet_ptr.s.addr += i+1;
124 work->word1.s.len -= i+5;
125 } else
126 if ((*ptr & 0xf) == 0xd) {
127 /*
128 DEBUGPRINT("Port %d received 0x?d preamble\n", work->word1.cn38xx.ipprt);
129 */
130 work->packet_ptr.s.addr += i;
131 work->word1.s.len -= i+4;
132 for (i = 0; i < work->word1.s.len; i++) {
133 *ptr = ((*ptr&0xf0)>>4) | ((*(ptr+1)&0xf)<<4);
134 ptr++;
135 }
136 } else {
137 DEBUGPRINT("Port %d unknown preamble, packet dropped\n", work->word1.cn38xx.ipprt);
138 /*
139 cvmx_helper_dump_packet(work);
140 */
141 cvm_oct_free_work(work);
142 return 1;
143 }
144 }
145 } else {
146 DEBUGPRINT("Port %d receive error code %d, packet dropped\n", work->word1.cn38xx.ipprt, work->word2.snoip.err_code);
147 cvm_oct_free_work(work);
148 return 1;
149 }
150
151 return 0;
152 }
153
154 /**
155 * Tasklet function that is scheduled on a core when an interrupt occurs.
156 *
157 * @param unused
158 */
159 void cvm_oct_tasklet_rx(void *context, int pending)
160 {
161 int coreid;
162 uint64_t old_group_mask;
163 int rx_count = 0;
164 int number_to_free;
165 int num_freed;
166 int packet_not_copied;
167
168 coreid = cvmx_get_core_num();
169
170 /* Prefetch cvm_oct_device since we know we need it soon */
171 CVMX_PREFETCH(cvm_oct_device, 0);
172
173 /* Only allow work for our group (and preserve priorities) */
174 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
175 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
176 (old_group_mask & ~0xFFFFull) | 1<<pow_receive_group);
177
178 while (1) {
179 struct mbuf *m = NULL;
180 int mbuf_in_hw;
181 cvmx_wqe_t *work;
182
183 if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
184 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
185 else
186 work = NULL;
187 CVMX_PREFETCH(work, 0);
188 if (work == NULL)
189 break;
190
191 mbuf_in_hw = work->word2.s.bufs == 1;
192 if ((mbuf_in_hw)) {
193 m = *(struct mbuf **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
194 CVMX_PREFETCH(m, offsetof(struct mbuf, m_data));
195 CVMX_PREFETCH(m, offsetof(struct mbuf, m_pkthdr));
196 }
197 CVMX_PREFETCH(cvm_oct_device[work->word1.cn38xx.ipprt], 0);
198 //CVMX_PREFETCH(m, 0);
199
200 rx_count++;
201 /* Immediately throw away all packets with receive errors */
202 if ((work->word2.snoip.rcv_error)) {
203 if (cvm_oct_check_rcv_error(work))
204 continue;
205 }
206
207 /* We can only use the zero copy path if mbufs are in the FPA pool
208 and the packet fits in a single buffer */
209 if ((mbuf_in_hw)) {
210 CVMX_PREFETCH(m->m_data, 0);
211
212 m->m_pkthdr.len = m->m_len = work->word1.s.len;
213
214 packet_not_copied = 1;
215
216 /*
217 * Adjust the data pointer based on the offset
218 * of the packet within the buffer.
219 */
220 m->m_data += (work->packet_ptr.s.back << 7) + (work->packet_ptr.s.addr & 0x7f);
221 } else {
222 /* We have to copy the packet. First allocate an
223 mbuf for it */
224 MGETHDR(m, M_NOWAIT, MT_DATA);
225 if (m == NULL) {
226 DEBUGPRINT("Port %d failed to allocate mbuf, packet dropped\n", work->word1.cn38xx.ipprt);
227 cvm_oct_free_work(work);
228 continue;
229 }
230
231 /* Check if we've received a packet that was entirely
232 stored in the work entry. This is untested */
233 if ((work->word2.s.bufs == 0)) {
234 uint8_t *ptr = work->packet_data;
235
236 if (cvmx_likely(!work->word2.s.not_IP)) {
237 /* The beginning of the packet moves
238 for IP packets */
239 if (work->word2.s.is_v6)
240 ptr += 2;
241 else
242 ptr += 6;
243 }
244 panic("%s: not yet implemented; copy in small packet.", __func__);
245 /* No packet buffers to free */
246 } else {
247 int segments = work->word2.s.bufs;
248 cvmx_buf_ptr_t segment_ptr = work->packet_ptr;
249 int len = work->word1.s.len;
250
251 while (segments--) {
252 cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
253 /* Octeon Errata PKI-100: The segment
254 size is wrong. Until it is fixed,
255 calculate the segment size based on
256 the packet pool buffer size. When
257 it is fixed, the following line
258 should be replaced with this one:
259 int segment_size = segment_ptr.s.size; */
260 int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
261 /* Don't copy more than what is left
262 in the packet */
263 if (segment_size > len)
264 segment_size = len;
265 /* Copy the data into the packet */
266 panic("%s: not yet implemented; copy in packet segments.", __func__);
267 #if 0
268 memcpy(m_put(m, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size);
269 #endif
270 /* Reduce the amount of bytes left
271 to copy */
272 len -= segment_size;
273 segment_ptr = next_ptr;
274 }
275 }
276 packet_not_copied = 0;
277 }
278
279 if (((work->word1.cn38xx.ipprt < TOTAL_NUMBER_OF_PORTS) &&
280 cvm_oct_device[work->word1.cn38xx.ipprt])) {
281 struct ifnet *ifp = cvm_oct_device[work->word1.cn38xx.ipprt];
282
283 /* Only accept packets for devices
284 that are currently up */
285 if ((ifp->if_flags & IFF_UP)) {
286 m->m_pkthdr.rcvif = ifp;
287
288 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
289 if ((work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
290 m->m_pkthdr.csum_flags = 0; /* XXX */
291 else {
292 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
293 m->m_pkthdr.csum_data = 0xffff;
294 }
295 } else {
296 m->m_pkthdr.csum_flags = 0; /* XXX */
297 }
298
299 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
300
301 (*ifp->if_input)(ifp, m);
302 } else {
303 /* Drop any packet received for a device that isn't up */
304 /*
305 DEBUGPRINT("%s: Device not up, packet dropped\n",
306 if_name(ifp));
307 */
308 m_freem(m);
309 }
310 } else {
311 /* Drop any packet received for a device that
312 doesn't exist */
313 DEBUGPRINT("Port %d not controlled by FreeBSD, packet dropped\n", work->word1.cn38xx.ipprt);
314 m_freem(m);
315 }
316
317 /* Check to see if the mbuf and work share
318 the same packet buffer */
319 if ((packet_not_copied)) {
320 /* This buffer needs to be replaced, increment
321 the number of buffers we need to free by one */
322 cvmx_fau_atomic_add32(
323 FAU_NUM_PACKET_BUFFERS_TO_FREE, 1);
324
325 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
326 DONT_WRITEBACK(1));
327 } else
328 cvm_oct_free_work(work);
329 }
330
331 /*
332 * If we hit our limit, schedule another task while we clean up.
333 */
334 if (INTERRUPT_LIMIT != 0 && rx_count == MAX_RX_PACKETS) {
335 taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
336 }
337
338 /* Restore the original POW group mask */
339 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
340
341 /* Refill the packet buffer pool */
342 number_to_free =
343 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
344
345 if (number_to_free > 0) {
346 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
347 -number_to_free);
348 num_freed =
349 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
350 CVMX_FPA_PACKET_POOL_SIZE,
351 number_to_free);
352 if (num_freed != number_to_free) {
353 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
354 number_to_free - num_freed);
355 }
356 }
357 }
358
359 void cvm_oct_rx_initialize(void)
360 {
361 int cpu;
362 TASK_INIT(&cvm_oct_task, 0, cvm_oct_tasklet_rx, NULL);
363
364 cvm_oct_taskq = taskqueue_create_fast("oct_rx", M_NOWAIT,
365 taskqueue_thread_enqueue,
366 &cvm_oct_taskq);
367
368 CPU_FOREACH(cpu) {
369 cpuset_t cpu_mask;
370 CPU_SETOF(cpu, &cpu_mask);
371 taskqueue_start_threads_cpuset(&cvm_oct_taskq, 1, PI_NET,
372 &cpu_mask, "octe taskq");
373 }
374 }
375
376 void cvm_oct_rx_shutdown(void)
377 {
378 panic("%s: not yet implemented.", __func__);
379 }
Cache object: 7b3d1f6f0c8122defe73c4e463180296
|