1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010-2016 Solarflare Communications Inc.
5 * All rights reserved.
6 *
7 * This software was developed in part by Philip Paeps under contract for
8 * Solarflare Communications, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing official
33 * policies, either expressed or implied, of the FreeBSD Project.
34 *
35 * $FreeBSD$
36 */
37
38 #ifndef _SFXGE_TX_H
39 #define _SFXGE_TX_H
40
41 #include <netinet/in.h>
42 #include <netinet/ip.h>
43 #include <netinet/tcp.h>
44
45 /* If defined, parse TX packets directly in if_transmit
46 * for better cache locality and reduced time under TX lock
47 */
48 #define SFXGE_TX_PARSE_EARLY 1
49
50 /* Maximum size of TSO packet */
51 #define SFXGE_TSO_MAX_SIZE (65535)
52
53 /*
54 * Maximum number of segments to be created for a TSO packet.
55 * Allow for a reasonable minimum MSS of 512.
56 */
57 #define SFXGE_TSO_MAX_SEGS howmany(SFXGE_TSO_MAX_SIZE, 512)
58
59 /* Maximum number of DMA segments needed to map an mbuf chain. With
60 * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
61 * clusters taking into account that the first may be not 2K cluster
62 * boundary aligned.
63 * Packet header may be split into two segments because of, for example,
64 * VLAN header insertion.
65 * The chain could be longer than this initially, but can be shortened
66 * with m_collapse().
67 */
68 #define SFXGE_TX_MAPPING_MAX_SEG \
69 (2 + howmany(SFXGE_TSO_MAX_SIZE, MCLBYTES) + 1)
70
71 /*
72 * Buffer mapping flags.
73 *
74 * Buffers and DMA mappings must be freed when the last descriptor
75 * referring to them is completed. Set the TX_BUF_UNMAP and
76 * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
77 * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to
78 * a heap buffer.
79 */
80 enum sfxge_tx_buf_flags {
81 TX_BUF_UNMAP = 1,
82 TX_BUF_MBUF = 2,
83 };
84
85 /*
86 * Buffer mapping information for descriptors in flight.
87 */
88 struct sfxge_tx_mapping {
89 union {
90 struct mbuf *mbuf;
91 caddr_t heap_buf;
92 } u;
93 bus_dmamap_t map;
94 enum sfxge_tx_buf_flags flags;
95 };
96
97 #define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT (64 * 1024)
98 #define SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT 1024
99 #define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 1024
100
101 /*
102 * Deferred packet list.
103 */
104 struct sfxge_tx_dpl {
105 unsigned int std_get_max; /* Maximum number of packets
106 * in get list */
107 unsigned int std_get_non_tcp_max; /* Maximum number
108 * of non-TCP packets
109 * in get list */
110 unsigned int std_put_max; /* Maximum number of packets
111 * in put list */
112 uintptr_t std_put; /* Head of put list. */
113 struct mbuf *std_get; /* Head of get list. */
114 struct mbuf **std_getp; /* Tail of get list. */
115 unsigned int std_get_count; /* Packets in get list. */
116 unsigned int std_get_non_tcp_count; /* Non-TCP packets
117 * in get list */
118 unsigned int std_get_hiwat; /* Packets in get list
119 * high watermark */
120 unsigned int std_put_hiwat; /* Packets in put list
121 * high watermark */
122 };
123
124 #define SFXGE_TX_BUFFER_SIZE 0x400
125 #define SFXGE_TX_HEADER_SIZE 0x100
126 #define SFXGE_TX_COPY_THRESHOLD 0x200
127
128 enum sfxge_txq_state {
129 SFXGE_TXQ_UNINITIALIZED = 0,
130 SFXGE_TXQ_INITIALIZED,
131 SFXGE_TXQ_STARTED
132 };
133
134 enum sfxge_txq_type {
135 SFXGE_TXQ_NON_CKSUM = 0,
136 SFXGE_TXQ_IP_CKSUM,
137 SFXGE_TXQ_IP_TCP_UDP_CKSUM,
138 SFXGE_TXQ_NTYPES
139 };
140
141 #define SFXGE_EVQ0_N_TXQ(_sc) \
142 ((_sc)->txq_dynamic_cksum_toggle_supported ? \
143 1 : SFXGE_TXQ_NTYPES)
144
145 #define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4)
146
147 #define SFXGE_TX_BATCH 64
148
149 #define SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index) \
150 do { \
151 struct sfxge_txq *__txq = (_txq); \
152 \
153 snprintf((__txq)->lock_name, \
154 sizeof((__txq)->lock_name), \
155 "%s:txq%u", (_ifname), (_txq_index)); \
156 mtx_init(&(__txq)->lock, (__txq)->lock_name, \
157 NULL, MTX_DEF); \
158 } while (B_FALSE)
159 #define SFXGE_TXQ_LOCK_DESTROY(_txq) \
160 mtx_destroy(&(_txq)->lock)
161 #define SFXGE_TXQ_LOCK(_txq) \
162 mtx_lock(&(_txq)->lock)
163 #define SFXGE_TXQ_TRYLOCK(_txq) \
164 mtx_trylock(&(_txq)->lock)
165 #define SFXGE_TXQ_UNLOCK(_txq) \
166 mtx_unlock(&(_txq)->lock)
167 #define SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq) \
168 mtx_assert(&(_txq)->lock, MA_OWNED)
169 #define SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \
170 mtx_assert(&(_txq)->lock, MA_NOTOWNED)
171
172 struct sfxge_txq {
173 /* The following fields should be written very rarely */
174 struct sfxge_softc *sc;
175 enum sfxge_txq_state init_state;
176 enum sfxge_flush_state flush_state;
177 unsigned int tso_fw_assisted;
178 enum sfxge_txq_type type;
179 unsigned int evq_index;
180 efsys_mem_t mem;
181 unsigned int buf_base_id;
182 unsigned int entries;
183 unsigned int ptr_mask;
184 unsigned int max_pkt_desc;
185
186 struct sfxge_tx_mapping *stmp; /* Packets in flight. */
187 bus_dma_tag_t packet_dma_tag;
188 efx_desc_t *pend_desc;
189 efx_txq_t *common;
190
191 efsys_mem_t *tsoh_buffer;
192
193 char lock_name[SFXGE_LOCK_NAME_MAX];
194
195 /* This field changes more often and is read regularly on both
196 * the initiation and completion paths
197 */
198 int blocked __aligned(CACHE_LINE_SIZE);
199
200 /* The following fields change more often, and are used mostly
201 * on the initiation path
202 */
203 struct mtx lock __aligned(CACHE_LINE_SIZE);
204 struct sfxge_tx_dpl dpl; /* Deferred packet list. */
205 unsigned int n_pend_desc;
206 unsigned int added;
207 unsigned int reaped;
208
209 /* The last (or constant) set of HW offloads requested on the queue */
210 uint16_t hw_cksum_flags;
211
212 /* The last VLAN TCI seen on the queue if FW-assisted tagging is
213 used */
214 uint16_t hw_vlan_tci;
215
216 /* Statistics */
217 unsigned long tso_bursts;
218 unsigned long tso_packets;
219 unsigned long tso_long_headers;
220 unsigned long collapses;
221 unsigned long drops;
222 unsigned long get_overflow;
223 unsigned long get_non_tcp_overflow;
224 unsigned long put_overflow;
225 unsigned long netdown_drops;
226 unsigned long tso_pdrop_too_many;
227 unsigned long tso_pdrop_no_rsrc;
228
229 /* The following fields change more often, and are used mostly
230 * on the completion path
231 */
232 unsigned int pending __aligned(CACHE_LINE_SIZE);
233 unsigned int completed;
234 struct sfxge_txq *next;
235 };
236
237 struct sfxge_evq;
238
239 extern uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc);
240
241 extern int sfxge_tx_init(struct sfxge_softc *sc);
242 extern void sfxge_tx_fini(struct sfxge_softc *sc);
243 extern int sfxge_tx_start(struct sfxge_softc *sc);
244 extern void sfxge_tx_stop(struct sfxge_softc *sc);
245 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq);
246 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
247 extern void sfxge_if_qflush(struct ifnet *ifp);
248 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
249
250 #endif
Cache object: 295b8ab9529e497dccf72d34e67be4dd
|