1 /*************************************************************************
2 Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
3 reserved.
4
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
8 met:
9
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following
15 disclaimer in the documentation and/or other materials provided
16 with the distribution.
17
18 * Neither the name of Cavium Networks nor the names of
19 its contributors may be used to endorse or promote products
20 derived from this software without specific prior written
21 permission.
22
23 This Software, including technical data, may be subject to U.S. export control laws, including the U.S. Export Administration Act and its associated regulations, and may be subject to export or import regulations in other countries.
24
25 TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
26 AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
27
28 *************************************************************************/
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40
41 #include <net/bpf.h>
42 #include <net/ethernet.h>
43 #include <net/if.h>
44 #include <net/if_var.h>
45
46 #include "wrapper-cvmx-includes.h"
47 #include "ethernet-headers.h"
48
49 /* You can define GET_MBUF_QOS() to override how the mbuf output function
50 determines which output queue is used. The default implementation
51 always uses the base queue for the port. If, for example, you wanted
52 to use the m->priority fieid, define GET_MBUF_QOS as:
53 #define GET_MBUF_QOS(m) ((m)->priority) */
54 #ifndef GET_MBUF_QOS
55 #define GET_MBUF_QOS(m) 0
56 #endif
57
58
59 /**
60 * Packet transmit
61 *
62 * @param m Packet to send
63 * @param dev Device info structure
64 * @return Always returns zero
65 */
66 int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
67 {
68 cvmx_pko_command_word0_t pko_command;
69 cvmx_buf_ptr_t hw_buffer;
70 int dropped;
71 int qos;
72 cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
73 int32_t in_use;
74 int32_t buffers_to_free;
75 cvmx_wqe_t *work;
76
77 /* Prefetch the private data structure.
78 It is larger that one cache line */
79 CVMX_PREFETCH(priv, 0);
80
81 /* Start off assuming no drop */
82 dropped = 0;
83
84 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
85 remove "qos" in the event neither interface supports multiple queues
86 per port */
87 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
88 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
89 qos = GET_MBUF_QOS(m);
90 if (qos <= 0)
91 qos = 0;
92 else if (qos >= cvmx_pko_get_num_queues(priv->port))
93 qos = 0;
94 } else
95 qos = 0;
96
97 /* The CN3XXX series of parts has an errata (GMX-401) which causes the
98 GMX block to hang if a collision occurs towards the end of a
99 <68 byte packet. As a workaround for this, we pad packets to be
100 68 bytes whenever we are in half duplex mode. We don't handle
101 the case of having a small packet but no room to add the padding.
102 The kernel should always give us at least a cache line */
103 if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
104 cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
105 int interface = INTERFACE(priv->port);
106 int index = INDEX(priv->port);
107
108 if (interface < 2) {
109 /* We only need to pad packet in half duplex mode */
110 gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
111 if (gmx_prt_cfg.s.duplex == 0) {
112 static uint8_t pad[64];
113
114 if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
115 printf("%s: unable to padd small packet.", __func__);
116 }
117 }
118 }
119
120 #ifdef OCTEON_VENDOR_RADISYS
121 /*
122 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
123 */
124 if (__predict_false(m->m_pkthdr.len < 60) &&
125 cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
126 static uint8_t pad[60];
127
128 if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
129 printf("%s: unable to pad small packet.", __func__);
130 }
131 #endif
132
133 /*
134 * If the packet is not fragmented.
135 */
136 if (m->m_pkthdr.len == m->m_len) {
137 /* Build the PKO buffer pointer */
138 hw_buffer.u64 = 0;
139 hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
140 hw_buffer.s.pool = 0;
141 hw_buffer.s.size = m->m_len;
142
143 /* Build the PKO command */
144 pko_command.u64 = 0;
145 pko_command.s.segs = 1;
146 pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA. */
147
148 work = NULL;
149 } else {
150 struct mbuf *n;
151 unsigned segs;
152 uint64_t *gp;
153
154 /*
155 * The packet is fragmented, we need to send a list of segments
156 * in memory we borrow from the WQE pool.
157 */
158 work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
159 if (work == NULL) {
160 m_freem(m);
161 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
162 return 1;
163 }
164
165 segs = 0;
166 gp = (uint64_t *)work;
167 for (n = m; n != NULL; n = n->m_next) {
168 if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
169 panic("%s: too many segments in packet; call m_collapse().", __func__);
170
171 /* Build the PKO buffer pointer */
172 hw_buffer.u64 = 0;
173 hw_buffer.s.i = 1; /* Do not put this buffer into the FPA. */
174 hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
175 hw_buffer.s.pool = 0;
176 hw_buffer.s.size = n->m_len;
177
178 *gp++ = hw_buffer.u64;
179 segs++;
180 }
181
182 /* Build the PKO buffer gather list pointer */
183 hw_buffer.u64 = 0;
184 hw_buffer.s.addr = cvmx_ptr_to_phys(work);
185 hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
186 hw_buffer.s.size = segs;
187
188 /* Build the PKO command */
189 pko_command.u64 = 0;
190 pko_command.s.segs = segs;
191 pko_command.s.gather = 1;
192 pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA. */
193 }
194
195 /* Finish building the PKO command */
196 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
197 pko_command.s.reg0 = priv->fau+qos*4;
198 pko_command.s.total_bytes = m->m_pkthdr.len;
199 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
200 pko_command.s.subone0 = 1;
201
202 /* Check if we can use the hardware checksumming */
203 if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
204 /* Use hardware checksum calc */
205 pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
206 }
207
208 /*
209 * XXX
210 * Could use a different free queue (and different FAU address) per
211 * core instead of per QoS, to reduce contention here.
212 */
213 IF_LOCK(&priv->tx_free_queue[qos]);
214 /* Get the number of mbufs in use by the hardware */
215 in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
216 buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
217
218 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);
219
220 /* Drop this packet if we have too many already queued to the HW */
221 if (_IF_QFULL(&priv->tx_free_queue[qos])) {
222 dropped = 1;
223 }
224 /* Send the packet to the output queue */
225 else
226 if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
227 DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
228 dropped = 1;
229 }
230
231 if (__predict_false(dropped)) {
232 m_freem(m);
233 cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
234 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
235 } else {
236 /* Put this packet on the queue to be freed later */
237 _IF_ENQUEUE(&priv->tx_free_queue[qos], m);
238
239 /* Pass it to any BPF listeners. */
240 ETHER_BPF_MTAP(ifp, m);
241
242 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
243 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
244 }
245
246 /* Free mbufs not in use by the hardware */
247 if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
248 while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
249 _IF_DEQUEUE(&priv->tx_free_queue[qos], m);
250 m_freem(m);
251 }
252 }
253 IF_UNLOCK(&priv->tx_free_queue[qos]);
254
255 return dropped;
256 }
257
258
259 /**
260 * This function frees all mbufs that are currenty queued for TX.
261 *
262 * @param dev Device being shutdown
263 */
264 void cvm_oct_tx_shutdown(struct ifnet *ifp)
265 {
266 cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
267 int qos;
268
269 for (qos = 0; qos < 16; qos++) {
270 IF_DRAIN(&priv->tx_free_queue[qos]);
271 }
272 }
Cache object: fcb354e8799e94fcf63699181195d9b9
|