1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include "opt_inet.h"
34
35 #include <sys/param.h>
36 #include <sys/aio.h>
37 #include <sys/bio.h>
38 #include <sys/file.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/module.h>
43 #include <sys/protosw.h>
44 #include <sys/proc.h>
45 #include <sys/domain.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/taskqueue.h>
49 #include <sys/uio.h>
50 #include <netinet/in.h>
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/tcp_var.h>
54 #define TCPSTATES
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/toecore.h>
57
58 #include <vm/vm.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_param.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_object.h>
65
66 #include <cam/scsi/scsi_all.h>
67 #include <cam/ctl/ctl_io.h>
68
69 #ifdef TCP_OFFLOAD
70 #include "common/common.h"
71 #include "common/t4_msg.h"
72 #include "common/t4_regs.h"
73 #include "common/t4_tcb.h"
74 #include "tom/t4_tom.h"
75
76 /*
77 * Use the 'backend3' field in AIO jobs to store the amount of data
78 * received by the AIO job so far.
79 */
80 #define aio_received backend3
81
82 static void aio_ddp_requeue_task(void *context, int pending);
83 static void ddp_complete_all(struct toepcb *toep, int error);
84 static void t4_aio_cancel_active(struct kaiocb *job);
85 static void t4_aio_cancel_queued(struct kaiocb *job);
86
87 static TAILQ_HEAD(, pageset) ddp_orphan_pagesets;
88 static struct mtx ddp_orphan_pagesets_lock;
89 static struct task ddp_orphan_task;
90
91 #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN)
92
93 /*
94 * A page set holds information about a buffer used for DDP. The page
95 * set holds resources such as the VM pages backing the buffer (either
96 * held or wired) and the page pods associated with the buffer.
97 * Recently used page sets are cached to allow for efficient reuse of
98 * buffers (avoiding the need to re-fault in pages, hold them, etc.).
99 * Note that cached page sets keep the backing pages wired. The
100 * number of wired pages is capped by only allowing for two wired
101 * pagesets per connection. This is not a perfect cap, but is a
102 * trade-off for performance.
103 *
104 * If an application ping-pongs two buffers for a connection via
105 * aio_read(2) then those buffers should remain wired and expensive VM
106 * fault lookups should be avoided after each buffer has been used
107 * once. If an application uses more than two buffers then this will
108 * fall back to doing expensive VM fault lookups for each operation.
109 */
110 static void
111 free_pageset(struct tom_data *td, struct pageset *ps)
112 {
113 vm_page_t p;
114 int i;
115
116 if (ps->prsv.prsv_nppods > 0)
117 t4_free_page_pods(&ps->prsv);
118
119 for (i = 0; i < ps->npages; i++) {
120 p = ps->pages[i];
121 vm_page_unwire(p, PQ_INACTIVE);
122 }
123 mtx_lock(&ddp_orphan_pagesets_lock);
124 TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link);
125 taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task);
126 mtx_unlock(&ddp_orphan_pagesets_lock);
127 }
128
129 static void
130 ddp_free_orphan_pagesets(void *context, int pending)
131 {
132 struct pageset *ps;
133
134 mtx_lock(&ddp_orphan_pagesets_lock);
135 while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) {
136 ps = TAILQ_FIRST(&ddp_orphan_pagesets);
137 TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link);
138 mtx_unlock(&ddp_orphan_pagesets_lock);
139 if (ps->vm)
140 vmspace_free(ps->vm);
141 free(ps, M_CXGBE);
142 mtx_lock(&ddp_orphan_pagesets_lock);
143 }
144 mtx_unlock(&ddp_orphan_pagesets_lock);
145 }
146
147 static void
148 recycle_pageset(struct toepcb *toep, struct pageset *ps)
149 {
150
151 DDP_ASSERT_LOCKED(toep);
152 if (!(toep->ddp.flags & DDP_DEAD)) {
153 KASSERT(toep->ddp.cached_count + toep->ddp.active_count <
154 nitems(toep->ddp.db), ("too many wired pagesets"));
155 TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link);
156 toep->ddp.cached_count++;
157 } else
158 free_pageset(toep->td, ps);
159 }
160
161 static void
162 ddp_complete_one(struct kaiocb *job, int error)
163 {
164 long copied;
165
166 /*
167 * If this job had copied data out of the socket buffer before
168 * it was cancelled, report it as a short read rather than an
169 * error.
170 */
171 copied = job->aio_received;
172 if (copied != 0 || error == 0)
173 aio_complete(job, copied, 0);
174 else
175 aio_complete(job, -1, error);
176 }
177
178 static void
179 free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db)
180 {
181
182 if (db->job) {
183 /*
184 * XXX: If we are un-offloading the socket then we
185 * should requeue these on the socket somehow. If we
186 * got a FIN from the remote end, then this completes
187 * any remaining requests with an EOF read.
188 */
189 if (!aio_clear_cancel_function(db->job))
190 ddp_complete_one(db->job, 0);
191 }
192
193 if (db->ps)
194 free_pageset(td, db->ps);
195 }
196
197 void
198 ddp_init_toep(struct toepcb *toep)
199 {
200
201 TAILQ_INIT(&toep->ddp.aiojobq);
202 TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, toep);
203 toep->ddp.flags = DDP_OK;
204 toep->ddp.active_id = -1;
205 mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF);
206 }
207
208 void
209 ddp_uninit_toep(struct toepcb *toep)
210 {
211
212 mtx_destroy(&toep->ddp.lock);
213 }
214
215 void
216 release_ddp_resources(struct toepcb *toep)
217 {
218 struct pageset *ps;
219 int i;
220
221 DDP_LOCK(toep);
222 toep->ddp.flags |= DDP_DEAD;
223 for (i = 0; i < nitems(toep->ddp.db); i++) {
224 free_ddp_buffer(toep->td, &toep->ddp.db[i]);
225 }
226 while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) {
227 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
228 free_pageset(toep->td, ps);
229 }
230 ddp_complete_all(toep, 0);
231 DDP_UNLOCK(toep);
232 }
233
234 #ifdef INVARIANTS
235 void
236 ddp_assert_empty(struct toepcb *toep)
237 {
238 int i;
239
240 MPASS(!(toep->ddp.flags & DDP_TASK_ACTIVE));
241 for (i = 0; i < nitems(toep->ddp.db); i++) {
242 MPASS(toep->ddp.db[i].job == NULL);
243 MPASS(toep->ddp.db[i].ps == NULL);
244 }
245 MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets));
246 MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq));
247 }
248 #endif
249
250 static void
251 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db,
252 unsigned int db_idx)
253 {
254 unsigned int db_flag;
255
256 toep->ddp.active_count--;
257 if (toep->ddp.active_id == db_idx) {
258 if (toep->ddp.active_count == 0) {
259 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL,
260 ("%s: active_count mismatch", __func__));
261 toep->ddp.active_id = -1;
262 } else
263 toep->ddp.active_id ^= 1;
264 #ifdef VERBOSE_TRACES
265 CTR3(KTR_CXGBE, "%s: tid %u, ddp_active_id = %d", __func__,
266 toep->tid, toep->ddp.active_id);
267 #endif
268 } else {
269 KASSERT(toep->ddp.active_count != 0 &&
270 toep->ddp.active_id != -1,
271 ("%s: active count mismatch", __func__));
272 }
273
274 db->cancel_pending = 0;
275 db->job = NULL;
276 recycle_pageset(toep, db->ps);
277 db->ps = NULL;
278
279 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
280 KASSERT(toep->ddp.flags & db_flag,
281 ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x",
282 __func__, toep, toep->ddp.flags));
283 toep->ddp.flags &= ~db_flag;
284 }
285
286 /* XXX: handle_ddp_data code duplication */
287 void
288 insert_ddp_data(struct toepcb *toep, uint32_t n)
289 {
290 struct inpcb *inp = toep->inp;
291 struct tcpcb *tp = intotcpcb(inp);
292 struct ddp_buffer *db;
293 struct kaiocb *job;
294 size_t placed;
295 long copied;
296 unsigned int db_idx;
297 #ifdef INVARIANTS
298 unsigned int db_flag;
299 #endif
300
301 INP_WLOCK_ASSERT(inp);
302 DDP_ASSERT_LOCKED(toep);
303
304 tp->rcv_nxt += n;
305 #ifndef USE_DDP_RX_FLOW_CONTROL
306 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__));
307 tp->rcv_wnd -= n;
308 #endif
309 CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP",
310 __func__, n);
311 while (toep->ddp.active_count > 0) {
312 MPASS(toep->ddp.active_id != -1);
313 db_idx = toep->ddp.active_id;
314 #ifdef INVARIANTS
315 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
316 #endif
317 MPASS((toep->ddp.flags & db_flag) != 0);
318 db = &toep->ddp.db[db_idx];
319 job = db->job;
320 copied = job->aio_received;
321 placed = n;
322 if (placed > job->uaiocb.aio_nbytes - copied)
323 placed = job->uaiocb.aio_nbytes - copied;
324 if (placed > 0)
325 job->msgrcv = 1;
326 if (!aio_clear_cancel_function(job)) {
327 /*
328 * Update the copied length for when
329 * t4_aio_cancel_active() completes this
330 * request.
331 */
332 job->aio_received += placed;
333 } else if (copied + placed != 0) {
334 CTR4(KTR_CXGBE,
335 "%s: completing %p (copied %ld, placed %lu)",
336 __func__, job, copied, placed);
337 /* XXX: This always completes if there is some data. */
338 aio_complete(job, copied + placed, 0);
339 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) {
340 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
341 toep->ddp.waiting_count++;
342 } else
343 aio_cancel(job);
344 n -= placed;
345 complete_ddp_buffer(toep, db, db_idx);
346 }
347
348 MPASS(n == 0);
349 }
350
351 /* SET_TCB_FIELD sent as a ULP command looks like this */
352 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
353 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
354
355 /* RX_DATA_ACK sent as a ULP command looks like this */
356 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \
357 sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core))
358
359 static inline void *
360 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
361 uint64_t word, uint64_t mask, uint64_t val)
362 {
363 struct ulptx_idata *ulpsc;
364 struct cpl_set_tcb_field_core *req;
365
366 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
367 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
368
369 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
370 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
371 ulpsc->len = htobe32(sizeof(*req));
372
373 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
374 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
375 req->reply_ctrl = htobe16(V_NO_REPLY(1) |
376 V_QUEUENO(toep->ofld_rxq->iq.abs_id));
377 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
378 req->mask = htobe64(mask);
379 req->val = htobe64(val);
380
381 ulpsc = (struct ulptx_idata *)(req + 1);
382 if (LEN__SET_TCB_FIELD_ULP % 16) {
383 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
384 ulpsc->len = htobe32(0);
385 return (ulpsc + 1);
386 }
387 return (ulpsc);
388 }
389
390 static inline void *
391 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep)
392 {
393 struct ulptx_idata *ulpsc;
394 struct cpl_rx_data_ack_core *req;
395
396 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
397 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16));
398
399 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
400 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
401 ulpsc->len = htobe32(sizeof(*req));
402
403 req = (struct cpl_rx_data_ack_core *)(ulpsc + 1);
404 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid));
405 req->credit_dack = htobe32(F_RX_MODULATE_RX);
406
407 ulpsc = (struct ulptx_idata *)(req + 1);
408 if (LEN__RX_DATA_ACK_ULP % 16) {
409 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
410 ulpsc->len = htobe32(0);
411 return (ulpsc + 1);
412 }
413 return (ulpsc);
414 }
415
416 static struct wrqe *
417 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
418 struct pageset *ps, int offset, uint64_t ddp_flags, uint64_t ddp_flags_mask)
419 {
420 struct wrqe *wr;
421 struct work_request_hdr *wrh;
422 struct ulp_txpkt *ulpmc;
423 int len;
424
425 KASSERT(db_idx == 0 || db_idx == 1,
426 ("%s: bad DDP buffer index %d", __func__, db_idx));
427
428 /*
429 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an
430 * RX_DATA_ACK (with RX_MODULATE to speed up delivery).
431 *
432 * The work request header is 16B and always ends at a 16B boundary.
433 * The ULPTX master commands that follow must all end at 16B boundaries
434 * too so we round up the size to 16.
435 */
436 len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
437 roundup2(LEN__RX_DATA_ACK_ULP, 16);
438
439 wr = alloc_wrqe(len, toep->ctrlq);
440 if (wr == NULL)
441 return (NULL);
442 wrh = wrtod(wr);
443 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
444 ulpmc = (struct ulp_txpkt *)(wrh + 1);
445
446 /* Write the buffer's tag */
447 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
448 W_TCB_RX_DDP_BUF0_TAG + db_idx,
449 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
450 V_TCB_RX_DDP_BUF0_TAG(ps->prsv.prsv_tag));
451
452 /* Update the current offset in the DDP buffer and its total length */
453 if (db_idx == 0)
454 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
455 W_TCB_RX_DDP_BUF0_OFFSET,
456 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
457 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
458 V_TCB_RX_DDP_BUF0_OFFSET(offset) |
459 V_TCB_RX_DDP_BUF0_LEN(ps->len));
460 else
461 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
462 W_TCB_RX_DDP_BUF1_OFFSET,
463 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
464 V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
465 V_TCB_RX_DDP_BUF1_OFFSET(offset) |
466 V_TCB_RX_DDP_BUF1_LEN((u64)ps->len << 32));
467
468 /* Update DDP flags */
469 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS,
470 ddp_flags_mask, ddp_flags);
471
472 /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
473 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep);
474
475 return (wr);
476 }
477
478 static int
479 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
480 {
481 uint32_t report = be32toh(ddp_report);
482 unsigned int db_idx;
483 struct inpcb *inp = toep->inp;
484 struct ddp_buffer *db;
485 struct tcpcb *tp;
486 struct socket *so;
487 struct sockbuf *sb;
488 struct kaiocb *job;
489 long copied;
490
491 db_idx = report & F_DDP_BUF_IDX ? 1 : 0;
492
493 if (__predict_false(!(report & F_DDP_INV)))
494 CXGBE_UNIMPLEMENTED("DDP buffer still valid");
495
496 INP_WLOCK(inp);
497 so = inp_inpcbtosocket(inp);
498 sb = &so->so_rcv;
499 DDP_LOCK(toep);
500
501 KASSERT(toep->ddp.active_id == db_idx,
502 ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx,
503 toep->ddp.active_id, toep->tid));
504 db = &toep->ddp.db[db_idx];
505 job = db->job;
506
507 if (__predict_false(inp->inp_flags & INP_DROPPED)) {
508 /*
509 * This can happen due to an administrative tcpdrop(8).
510 * Just fail the request with ECONNRESET.
511 */
512 CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
513 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
514 if (aio_clear_cancel_function(job))
515 ddp_complete_one(job, ECONNRESET);
516 goto completed;
517 }
518
519 tp = intotcpcb(inp);
520
521 /*
522 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
523 * sequence number of the next byte to receive. The length of
524 * the data received for this message must be computed by
525 * comparing the new and old values of rcv_nxt.
526 *
527 * For RX_DATA_DDP, len might be non-zero, but it is only the
528 * length of the most recent DMA. It does not include the
529 * total length of the data received since the previous update
530 * for this DDP buffer. rcv_nxt is the sequence number of the
531 * first received byte from the most recent DMA.
532 */
533 len += be32toh(rcv_nxt) - tp->rcv_nxt;
534 tp->rcv_nxt += len;
535 tp->t_rcvtime = ticks;
536 #ifndef USE_DDP_RX_FLOW_CONTROL
537 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
538 tp->rcv_wnd -= len;
539 #endif
540 #ifdef VERBOSE_TRACES
541 CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__,
542 toep->tid, db_idx, len, report);
543 #endif
544
545 /* receive buffer autosize */
546 MPASS(toep->vnet == so->so_vnet);
547 CURVNET_SET(toep->vnet);
548 SOCKBUF_LOCK(sb);
549 if (sb->sb_flags & SB_AUTOSIZE &&
550 V_tcp_do_autorcvbuf &&
551 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
552 len > (sbspace(sb) / 8 * 7)) {
553 struct adapter *sc = td_adapter(toep->td);
554 unsigned int hiwat = sb->sb_hiwat;
555 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
556 V_tcp_autorcvbuf_max);
557
558 if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
559 sb->sb_flags &= ~SB_AUTOSIZE;
560 }
561 SOCKBUF_UNLOCK(sb);
562 CURVNET_RESTORE();
563
564 job->msgrcv = 1;
565 if (db->cancel_pending) {
566 /*
567 * Update the job's length but defer completion to the
568 * TCB_RPL callback.
569 */
570 job->aio_received += len;
571 goto out;
572 } else if (!aio_clear_cancel_function(job)) {
573 /*
574 * Update the copied length for when
575 * t4_aio_cancel_active() completes this request.
576 */
577 job->aio_received += len;
578 } else {
579 copied = job->aio_received;
580 #ifdef VERBOSE_TRACES
581 CTR5(KTR_CXGBE,
582 "%s: tid %u, completing %p (copied %ld, placed %d)",
583 __func__, toep->tid, job, copied, len);
584 #endif
585 aio_complete(job, copied + len, 0);
586 t4_rcvd(&toep->td->tod, tp);
587 }
588
589 completed:
590 complete_ddp_buffer(toep, db, db_idx);
591 if (toep->ddp.waiting_count > 0)
592 ddp_queue_toep(toep);
593 out:
594 DDP_UNLOCK(toep);
595 INP_WUNLOCK(inp);
596
597 return (0);
598 }
599
600 void
601 handle_ddp_indicate(struct toepcb *toep)
602 {
603
604 DDP_ASSERT_LOCKED(toep);
605 MPASS(toep->ddp.active_count == 0);
606 MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0);
607 if (toep->ddp.waiting_count == 0) {
608 /*
609 * The pending requests that triggered the request for an
610 * an indicate were cancelled. Those cancels should have
611 * already disabled DDP. Just ignore this as the data is
612 * going into the socket buffer anyway.
613 */
614 return;
615 }
616 CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__,
617 toep->tid, toep->ddp.waiting_count);
618 ddp_queue_toep(toep);
619 }
620
621 CTASSERT(CPL_COOKIE_DDP0 + 1 == CPL_COOKIE_DDP1);
622
623 static int
624 do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
625 {
626 struct adapter *sc = iq->adapter;
627 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
628 unsigned int tid = GET_TID(cpl);
629 unsigned int db_idx;
630 struct toepcb *toep;
631 struct inpcb *inp;
632 struct ddp_buffer *db;
633 struct kaiocb *job;
634 long copied;
635
636 if (cpl->status != CPL_ERR_NONE)
637 panic("XXX: tcp_rpl failed: %d", cpl->status);
638
639 toep = lookup_tid(sc, tid);
640 inp = toep->inp;
641 switch (cpl->cookie) {
642 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP0):
643 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP1):
644 /*
645 * XXX: This duplicates a lot of code with handle_ddp_data().
646 */
647 db_idx = G_COOKIE(cpl->cookie) - CPL_COOKIE_DDP0;
648 MPASS(db_idx < nitems(toep->ddp.db));
649 INP_WLOCK(inp);
650 DDP_LOCK(toep);
651 db = &toep->ddp.db[db_idx];
652
653 /*
654 * handle_ddp_data() should leave the job around until
655 * this callback runs once a cancel is pending.
656 */
657 MPASS(db != NULL);
658 MPASS(db->job != NULL);
659 MPASS(db->cancel_pending);
660
661 /*
662 * XXX: It's not clear what happens if there is data
663 * placed when the buffer is invalidated. I suspect we
664 * need to read the TCB to see how much data was placed.
665 *
666 * For now this just pretends like nothing was placed.
667 *
668 * XXX: Note that if we did check the PCB we would need to
669 * also take care of updating the tp, etc.
670 */
671 job = db->job;
672 copied = job->aio_received;
673 if (copied == 0) {
674 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job);
675 aio_cancel(job);
676 } else {
677 CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)",
678 __func__, job, copied);
679 aio_complete(job, copied, 0);
680 t4_rcvd(&toep->td->tod, intotcpcb(inp));
681 }
682
683 complete_ddp_buffer(toep, db, db_idx);
684 if (toep->ddp.waiting_count > 0)
685 ddp_queue_toep(toep);
686 DDP_UNLOCK(toep);
687 INP_WUNLOCK(inp);
688 break;
689 default:
690 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x",
691 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie));
692 }
693
694 return (0);
695 }
696
697 void
698 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt)
699 {
700 struct ddp_buffer *db;
701 struct kaiocb *job;
702 long copied;
703 unsigned int db_idx;
704 #ifdef INVARIANTS
705 unsigned int db_flag;
706 #endif
707 int len, placed;
708
709 INP_WLOCK_ASSERT(toep->inp);
710 DDP_ASSERT_LOCKED(toep);
711
712 /* - 1 is to ignore the byte for FIN */
713 len = be32toh(rcv_nxt) - tp->rcv_nxt - 1;
714 tp->rcv_nxt += len;
715
716 while (toep->ddp.active_count > 0) {
717 MPASS(toep->ddp.active_id != -1);
718 db_idx = toep->ddp.active_id;
719 #ifdef INVARIANTS
720 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
721 #endif
722 MPASS((toep->ddp.flags & db_flag) != 0);
723 db = &toep->ddp.db[db_idx];
724 job = db->job;
725 copied = job->aio_received;
726 placed = len;
727 if (placed > job->uaiocb.aio_nbytes - copied)
728 placed = job->uaiocb.aio_nbytes - copied;
729 if (placed > 0)
730 job->msgrcv = 1;
731 if (!aio_clear_cancel_function(job)) {
732 /*
733 * Update the copied length for when
734 * t4_aio_cancel_active() completes this
735 * request.
736 */
737 job->aio_received += placed;
738 } else {
739 CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d",
740 __func__, toep->tid, db_idx, placed);
741 aio_complete(job, copied + placed, 0);
742 }
743 len -= placed;
744 complete_ddp_buffer(toep, db, db_idx);
745 }
746
747 MPASS(len == 0);
748 ddp_complete_all(toep, 0);
749 }
750
751 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\
752 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\
753 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
754 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
755
756 extern cpl_handler_t t4_cpl_handler[];
757
758 static int
759 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
760 {
761 struct adapter *sc = iq->adapter;
762 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
763 unsigned int tid = GET_TID(cpl);
764 uint32_t vld;
765 struct toepcb *toep = lookup_tid(sc, tid);
766
767 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
768 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
769 KASSERT(!(toep->flags & TPF_SYNQE),
770 ("%s: toep %p claims to be a synq entry", __func__, toep));
771
772 vld = be32toh(cpl->ddpvld);
773 if (__predict_false(vld & DDP_ERR)) {
774 panic("%s: DDP error 0x%x (tid %d, toep %p)",
775 __func__, vld, tid, toep);
776 }
777
778 if (ulp_mode(toep) == ULP_MODE_ISCSI) {
779 t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
780 return (0);
781 }
782
783 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len));
784
785 return (0);
786 }
787
788 static int
789 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss,
790 struct mbuf *m)
791 {
792 struct adapter *sc = iq->adapter;
793 const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1);
794 unsigned int tid = GET_TID(cpl);
795 struct toepcb *toep = lookup_tid(sc, tid);
796
797 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
798 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
799 KASSERT(!(toep->flags & TPF_SYNQE),
800 ("%s: toep %p claims to be a synq entry", __func__, toep));
801
802 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0);
803
804 return (0);
805 }
806
807 static void
808 enable_ddp(struct adapter *sc, struct toepcb *toep)
809 {
810
811 KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
812 ("%s: toep %p has bad ddp_flags 0x%x",
813 __func__, toep, toep->ddp.flags));
814
815 CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
816 __func__, toep->tid, time_uptime);
817
818 DDP_ASSERT_LOCKED(toep);
819 toep->ddp.flags |= DDP_SC_REQ;
820 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS,
821 V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
822 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
823 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
824 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0);
825 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
826 V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0);
827 }
828
829 static int
830 calculate_hcf(int n1, int n2)
831 {
832 int a, b, t;
833
834 if (n1 <= n2) {
835 a = n1;
836 b = n2;
837 } else {
838 a = n2;
839 b = n1;
840 }
841
842 while (a != 0) {
843 t = a;
844 a = b % a;
845 b = t;
846 }
847
848 return (b);
849 }
850
851 static inline int
852 pages_to_nppods(int npages, int ddp_page_shift)
853 {
854
855 MPASS(ddp_page_shift >= PAGE_SHIFT);
856
857 return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES));
858 }
859
860 static int
861 alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx,
862 struct ppod_reservation *prsv)
863 {
864 vmem_addr_t addr; /* relative to start of region */
865
866 if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT,
867 &addr) != 0)
868 return (ENOMEM);
869
870 #ifdef VERBOSE_TRACES
871 CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d",
872 __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask,
873 nppods, 1 << pr->pr_page_shift[pgsz_idx]);
874 #endif
875
876 /*
877 * The hardware tagmask includes an extra invalid bit but the arena was
878 * seeded with valid values only. An allocation out of this arena will
879 * fit inside the tagmask but won't have the invalid bit set.
880 */
881 MPASS((addr & pr->pr_tag_mask) == addr);
882 MPASS((addr & pr->pr_invalid_bit) == 0);
883
884 prsv->prsv_pr = pr;
885 prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr;
886 prsv->prsv_nppods = nppods;
887
888 return (0);
889 }
890
891 static int
892 t4_alloc_page_pods_for_vmpages(struct ppod_region *pr, vm_page_t *pages,
893 int npages, struct ppod_reservation *prsv)
894 {
895 int i, hcf, seglen, idx, nppods;
896
897 /*
898 * The DDP page size is unrelated to the VM page size. We combine
899 * contiguous physical pages into larger segments to get the best DDP
900 * page size possible. This is the largest of the four sizes in
901 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in
902 * the page list.
903 */
904 hcf = 0;
905 for (i = 0; i < npages; i++) {
906 seglen = PAGE_SIZE;
907 while (i < npages - 1 &&
908 VM_PAGE_TO_PHYS(pages[i]) + PAGE_SIZE ==
909 VM_PAGE_TO_PHYS(pages[i + 1])) {
910 seglen += PAGE_SIZE;
911 i++;
912 }
913
914 hcf = calculate_hcf(hcf, seglen);
915 if (hcf < (1 << pr->pr_page_shift[1])) {
916 idx = 0;
917 goto have_pgsz; /* give up, short circuit */
918 }
919 }
920
921 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
922 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
923 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
924 if ((hcf & PR_PAGE_MASK(idx)) == 0)
925 break;
926 }
927 #undef PR_PAGE_MASK
928
929 have_pgsz:
930 MPASS(idx <= M_PPOD_PGSZ);
931
932 nppods = pages_to_nppods(npages, pr->pr_page_shift[idx]);
933 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
934 return (ENOMEM);
935 MPASS(prsv->prsv_nppods > 0);
936
937 return (0);
938 }
939
940 int
941 t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps)
942 {
943 struct ppod_reservation *prsv = &ps->prsv;
944
945 KASSERT(prsv->prsv_nppods == 0,
946 ("%s: page pods already allocated", __func__));
947
948 return (t4_alloc_page_pods_for_vmpages(pr, ps->pages, ps->npages,
949 prsv));
950 }
951
952 int
953 t4_alloc_page_pods_for_bio(struct ppod_region *pr, struct bio *bp,
954 struct ppod_reservation *prsv)
955 {
956
957 MPASS(bp->bio_flags & BIO_UNMAPPED);
958
959 return (t4_alloc_page_pods_for_vmpages(pr, bp->bio_ma, bp->bio_ma_n,
960 prsv));
961 }
962
963 int
964 t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len,
965 struct ppod_reservation *prsv)
966 {
967 int hcf, seglen, idx, npages, nppods;
968 uintptr_t start_pva, end_pva, pva, p1;
969
970 MPASS(buf > 0);
971 MPASS(len > 0);
972
973 /*
974 * The DDP page size is unrelated to the VM page size. We combine
975 * contiguous physical pages into larger segments to get the best DDP
976 * page size possible. This is the largest of the four sizes in
977 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
978 * in the page list.
979 */
980 hcf = 0;
981 start_pva = trunc_page(buf);
982 end_pva = trunc_page(buf + len - 1);
983 pva = start_pva;
984 while (pva <= end_pva) {
985 seglen = PAGE_SIZE;
986 p1 = pmap_kextract(pva);
987 pva += PAGE_SIZE;
988 while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) {
989 seglen += PAGE_SIZE;
990 pva += PAGE_SIZE;
991 }
992
993 hcf = calculate_hcf(hcf, seglen);
994 if (hcf < (1 << pr->pr_page_shift[1])) {
995 idx = 0;
996 goto have_pgsz; /* give up, short circuit */
997 }
998 }
999
1000 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
1001 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
1002 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
1003 if ((hcf & PR_PAGE_MASK(idx)) == 0)
1004 break;
1005 }
1006 #undef PR_PAGE_MASK
1007
1008 have_pgsz:
1009 MPASS(idx <= M_PPOD_PGSZ);
1010
1011 npages = 1;
1012 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
1013 nppods = howmany(npages, PPOD_PAGES);
1014 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
1015 return (ENOMEM);
1016 MPASS(prsv->prsv_nppods > 0);
1017
1018 return (0);
1019 }
1020
1021 int
1022 t4_alloc_page_pods_for_sgl(struct ppod_region *pr, struct ctl_sg_entry *sgl,
1023 int entries, struct ppod_reservation *prsv)
1024 {
1025 int hcf, seglen, idx = 0, npages, nppods, i, len;
1026 uintptr_t start_pva, end_pva, pva, p1 ;
1027 vm_offset_t buf;
1028 struct ctl_sg_entry *sge;
1029
1030 MPASS(entries > 0);
1031 MPASS(sgl);
1032
1033 /*
1034 * The DDP page size is unrelated to the VM page size. We combine
1035 * contiguous physical pages into larger segments to get the best DDP
1036 * page size possible. This is the largest of the four sizes in
1037 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
1038 * in the page list.
1039 */
1040 hcf = 0;
1041 for (i = entries - 1; i >= 0; i--) {
1042 sge = sgl + i;
1043 buf = (vm_offset_t)sge->addr;
1044 len = sge->len;
1045 start_pva = trunc_page(buf);
1046 end_pva = trunc_page(buf + len - 1);
1047 pva = start_pva;
1048 while (pva <= end_pva) {
1049 seglen = PAGE_SIZE;
1050 p1 = pmap_kextract(pva);
1051 pva += PAGE_SIZE;
1052 while (pva <= end_pva && p1 + seglen ==
1053 pmap_kextract(pva)) {
1054 seglen += PAGE_SIZE;
1055 pva += PAGE_SIZE;
1056 }
1057
1058 hcf = calculate_hcf(hcf, seglen);
1059 if (hcf < (1 << pr->pr_page_shift[1])) {
1060 idx = 0;
1061 goto have_pgsz; /* give up, short circuit */
1062 }
1063 }
1064 }
1065 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
1066 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
1067 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
1068 if ((hcf & PR_PAGE_MASK(idx)) == 0)
1069 break;
1070 }
1071 #undef PR_PAGE_MASK
1072
1073 have_pgsz:
1074 MPASS(idx <= M_PPOD_PGSZ);
1075
1076 npages = 0;
1077 while (entries--) {
1078 npages++;
1079 start_pva = trunc_page((vm_offset_t)sgl->addr);
1080 end_pva = trunc_page((vm_offset_t)sgl->addr + sgl->len - 1);
1081 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
1082 sgl = sgl + 1;
1083 }
1084 nppods = howmany(npages, PPOD_PAGES);
1085 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
1086 return (ENOMEM);
1087 MPASS(prsv->prsv_nppods > 0);
1088 return (0);
1089 }
1090
1091 void
1092 t4_free_page_pods(struct ppod_reservation *prsv)
1093 {
1094 struct ppod_region *pr = prsv->prsv_pr;
1095 vmem_addr_t addr;
1096
1097 MPASS(prsv != NULL);
1098 MPASS(prsv->prsv_nppods != 0);
1099
1100 addr = prsv->prsv_tag & pr->pr_tag_mask;
1101 MPASS((addr & pr->pr_invalid_bit) == 0);
1102
1103 #ifdef VERBOSE_TRACES
1104 CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__,
1105 pr->pr_arena, addr, prsv->prsv_nppods);
1106 #endif
1107
1108 vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods));
1109 prsv->prsv_nppods = 0;
1110 }
1111
1112 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE)
1113
1114 int
1115 t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
1116 struct pageset *ps)
1117 {
1118 struct wrqe *wr;
1119 struct ulp_mem_io *ulpmc;
1120 struct ulptx_idata *ulpsc;
1121 struct pagepod *ppod;
1122 int i, j, k, n, chunk, len, ddp_pgsz, idx;
1123 u_int ppod_addr;
1124 uint32_t cmd;
1125 struct ppod_reservation *prsv = &ps->prsv;
1126 struct ppod_region *pr = prsv->prsv_pr;
1127 vm_paddr_t pa;
1128
1129 KASSERT(!(ps->flags & PS_PPODS_WRITTEN),
1130 ("%s: page pods already written", __func__));
1131 MPASS(prsv->prsv_nppods > 0);
1132
1133 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1134 if (is_t4(sc))
1135 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1136 else
1137 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1138 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1139 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1140 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1141
1142 /* How many page pods are we writing in this cycle */
1143 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1144 chunk = PPOD_SZ(n);
1145 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1146
1147 wr = alloc_wrqe(len, wrq);
1148 if (wr == NULL)
1149 return (ENOMEM); /* ok to just bail out */
1150 ulpmc = wrtod(wr);
1151
1152 INIT_ULPTX_WR(ulpmc, len, 0, 0);
1153 ulpmc->cmd = cmd;
1154 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1155 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1156 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1157
1158 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1159 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1160 ulpsc->len = htobe32(chunk);
1161
1162 ppod = (struct pagepod *)(ulpsc + 1);
1163 for (j = 0; j < n; i++, j++, ppod++) {
1164 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1165 V_PPOD_TID(tid) | prsv->prsv_tag);
1166 ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) |
1167 V_PPOD_OFST(ps->offset));
1168 ppod->rsvd = 0;
1169 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
1170 for (k = 0; k < nitems(ppod->addr); k++) {
1171 if (idx < ps->npages) {
1172 pa = VM_PAGE_TO_PHYS(ps->pages[idx]);
1173 ppod->addr[k] = htobe64(pa);
1174 idx += ddp_pgsz / PAGE_SIZE;
1175 } else
1176 ppod->addr[k] = 0;
1177 #if 0
1178 CTR5(KTR_CXGBE,
1179 "%s: tid %d ppod[%d]->addr[%d] = %p",
1180 __func__, tid, i, k,
1181 be64toh(ppod->addr[k]));
1182 #endif
1183 }
1184
1185 }
1186
1187 t4_wrq_tx(sc, wr);
1188 }
1189 ps->flags |= PS_PPODS_WRITTEN;
1190
1191 return (0);
1192 }
1193
1194 static struct mbuf *
1195 alloc_raw_wr_mbuf(int len)
1196 {
1197 struct mbuf *m;
1198
1199 if (len <= MHLEN)
1200 m = m_gethdr(M_NOWAIT, MT_DATA);
1201 else if (len <= MCLBYTES)
1202 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1203 else
1204 m = NULL;
1205 if (m == NULL)
1206 return (NULL);
1207 m->m_pkthdr.len = len;
1208 m->m_len = len;
1209 set_mbuf_raw_wr(m, true);
1210 return (m);
1211 }
1212
1213 int
1214 t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep,
1215 struct ppod_reservation *prsv, struct bio *bp, struct mbufq *wrq)
1216 {
1217 struct ulp_mem_io *ulpmc;
1218 struct ulptx_idata *ulpsc;
1219 struct pagepod *ppod;
1220 int i, j, k, n, chunk, len, ddp_pgsz, idx;
1221 u_int ppod_addr;
1222 uint32_t cmd;
1223 struct ppod_region *pr = prsv->prsv_pr;
1224 vm_paddr_t pa;
1225 struct mbuf *m;
1226
1227 MPASS(bp->bio_flags & BIO_UNMAPPED);
1228
1229 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1230 if (is_t4(sc))
1231 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1232 else
1233 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1234 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1235 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1236 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1237
1238 /* How many page pods are we writing in this cycle */
1239 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1240 MPASS(n > 0);
1241 chunk = PPOD_SZ(n);
1242 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1243
1244 m = alloc_raw_wr_mbuf(len);
1245 if (m == NULL)
1246 return (ENOMEM);
1247
1248 ulpmc = mtod(m, struct ulp_mem_io *);
1249 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
1250 ulpmc->cmd = cmd;
1251 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1252 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1253 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1254
1255 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1256 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1257 ulpsc->len = htobe32(chunk);
1258
1259 ppod = (struct pagepod *)(ulpsc + 1);
1260 for (j = 0; j < n; i++, j++, ppod++) {
1261 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1262 V_PPOD_TID(toep->tid) |
1263 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1264 ppod->len_offset = htobe64(V_PPOD_LEN(bp->bio_bcount) |
1265 V_PPOD_OFST(bp->bio_ma_offset));
1266 ppod->rsvd = 0;
1267 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
1268 for (k = 0; k < nitems(ppod->addr); k++) {
1269 if (idx < bp->bio_ma_n) {
1270 pa = VM_PAGE_TO_PHYS(bp->bio_ma[idx]);
1271 ppod->addr[k] = htobe64(pa);
1272 idx += ddp_pgsz / PAGE_SIZE;
1273 } else
1274 ppod->addr[k] = 0;
1275 #if 0
1276 CTR5(KTR_CXGBE,
1277 "%s: tid %d ppod[%d]->addr[%d] = %p",
1278 __func__, toep->tid, i, k,
1279 be64toh(ppod->addr[k]));
1280 #endif
1281 }
1282 }
1283
1284 mbufq_enqueue(wrq, m);
1285 }
1286
1287 return (0);
1288 }
1289
1290 int
1291 t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep,
1292 struct ppod_reservation *prsv, vm_offset_t buf, int buflen,
1293 struct mbufq *wrq)
1294 {
1295 struct ulp_mem_io *ulpmc;
1296 struct ulptx_idata *ulpsc;
1297 struct pagepod *ppod;
1298 int i, j, k, n, chunk, len, ddp_pgsz;
1299 u_int ppod_addr, offset;
1300 uint32_t cmd;
1301 struct ppod_region *pr = prsv->prsv_pr;
1302 uintptr_t end_pva, pva;
1303 vm_paddr_t pa;
1304 struct mbuf *m;
1305
1306 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1307 if (is_t4(sc))
1308 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1309 else
1310 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1311 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1312 offset = buf & PAGE_MASK;
1313 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1314 pva = trunc_page(buf);
1315 end_pva = trunc_page(buf + buflen - 1);
1316 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1317
1318 /* How many page pods are we writing in this cycle */
1319 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1320 MPASS(n > 0);
1321 chunk = PPOD_SZ(n);
1322 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1323
1324 m = alloc_raw_wr_mbuf(len);
1325 if (m == NULL)
1326 return (ENOMEM);
1327 ulpmc = mtod(m, struct ulp_mem_io *);
1328
1329 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
1330 ulpmc->cmd = cmd;
1331 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1332 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1333 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1334
1335 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1336 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1337 ulpsc->len = htobe32(chunk);
1338
1339 ppod = (struct pagepod *)(ulpsc + 1);
1340 for (j = 0; j < n; i++, j++, ppod++) {
1341 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1342 V_PPOD_TID(toep->tid) |
1343 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1344 ppod->len_offset = htobe64(V_PPOD_LEN(buflen) |
1345 V_PPOD_OFST(offset));
1346 ppod->rsvd = 0;
1347
1348 for (k = 0; k < nitems(ppod->addr); k++) {
1349 if (pva > end_pva)
1350 ppod->addr[k] = 0;
1351 else {
1352 pa = pmap_kextract(pva);
1353 ppod->addr[k] = htobe64(pa);
1354 pva += ddp_pgsz;
1355 }
1356 #if 0
1357 CTR5(KTR_CXGBE,
1358 "%s: tid %d ppod[%d]->addr[%d] = %p",
1359 __func__, toep->tid, i, k,
1360 be64toh(ppod->addr[k]));
1361 #endif
1362 }
1363
1364 /*
1365 * Walk back 1 segment so that the first address in the
1366 * next pod is the same as the last one in the current
1367 * pod.
1368 */
1369 pva -= ddp_pgsz;
1370 }
1371
1372 mbufq_enqueue(wrq, m);
1373 }
1374
1375 MPASS(pva <= end_pva);
1376
1377 return (0);
1378 }
1379
1380 int
1381 t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep,
1382 struct ppod_reservation *prsv, struct ctl_sg_entry *sgl, int entries,
1383 int xferlen, struct mbufq *wrq)
1384 {
1385 struct ulp_mem_io *ulpmc;
1386 struct ulptx_idata *ulpsc;
1387 struct pagepod *ppod;
1388 int i, j, k, n, chunk, len, ddp_pgsz;
1389 u_int ppod_addr, offset, sg_offset = 0;
1390 uint32_t cmd;
1391 struct ppod_region *pr = prsv->prsv_pr;
1392 uintptr_t pva;
1393 vm_paddr_t pa;
1394 struct mbuf *m;
1395
1396 MPASS(sgl != NULL);
1397 MPASS(entries > 0);
1398 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1399 if (is_t4(sc))
1400 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1401 else
1402 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1403 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1404 offset = (vm_offset_t)sgl->addr & PAGE_MASK;
1405 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1406 pva = trunc_page((vm_offset_t)sgl->addr);
1407 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1408
1409 /* How many page pods are we writing in this cycle */
1410 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1411 MPASS(n > 0);
1412 chunk = PPOD_SZ(n);
1413 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1414
1415 m = alloc_raw_wr_mbuf(len);
1416 if (m == NULL)
1417 return (ENOMEM);
1418 ulpmc = mtod(m, struct ulp_mem_io *);
1419
1420 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
1421 ulpmc->cmd = cmd;
1422 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1423 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1424 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1425
1426 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1427 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1428 ulpsc->len = htobe32(chunk);
1429
1430 ppod = (struct pagepod *)(ulpsc + 1);
1431 for (j = 0; j < n; i++, j++, ppod++) {
1432 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1433 V_PPOD_TID(toep->tid) |
1434 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1435 ppod->len_offset = htobe64(V_PPOD_LEN(xferlen) |
1436 V_PPOD_OFST(offset));
1437 ppod->rsvd = 0;
1438
1439 for (k = 0; k < nitems(ppod->addr); k++) {
1440 if (entries != 0) {
1441 pa = pmap_kextract(pva + sg_offset);
1442 ppod->addr[k] = htobe64(pa);
1443 } else
1444 ppod->addr[k] = 0;
1445
1446 #if 0
1447 CTR5(KTR_CXGBE,
1448 "%s: tid %d ppod[%d]->addr[%d] = %p",
1449 __func__, toep->tid, i, k,
1450 be64toh(ppod->addr[k]));
1451 #endif
1452
1453 /*
1454 * If this is the last entry in a pod,
1455 * reuse the same entry for first address
1456 * in the next pod.
1457 */
1458 if (k + 1 == nitems(ppod->addr))
1459 break;
1460
1461 /*
1462 * Don't move to the next DDP page if the
1463 * sgl is already finished.
1464 */
1465 if (entries == 0)
1466 continue;
1467
1468 sg_offset += ddp_pgsz;
1469 if (sg_offset == sgl->len) {
1470 /*
1471 * This sgl entry is done. Go
1472 * to the next.
1473 */
1474 entries--;
1475 sgl++;
1476 sg_offset = 0;
1477 if (entries != 0)
1478 pva = trunc_page(
1479 (vm_offset_t)sgl->addr);
1480 }
1481 }
1482 }
1483
1484 mbufq_enqueue(wrq, m);
1485 }
1486
1487 return (0);
1488 }
1489
1490 /*
1491 * Prepare a pageset for DDP. This sets up page pods.
1492 */
1493 static int
1494 prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
1495 {
1496 struct tom_data *td = sc->tom_softc;
1497
1498 if (ps->prsv.prsv_nppods == 0 &&
1499 t4_alloc_page_pods_for_ps(&td->pr, ps) != 0) {
1500 return (0);
1501 }
1502 if (!(ps->flags & PS_PPODS_WRITTEN) &&
1503 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) {
1504 return (0);
1505 }
1506
1507 return (1);
1508 }
1509
1510 int
1511 t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz,
1512 const char *name)
1513 {
1514 int i;
1515
1516 MPASS(pr != NULL);
1517 MPASS(r->size > 0);
1518
1519 pr->pr_start = r->start;
1520 pr->pr_len = r->size;
1521 pr->pr_page_shift[0] = 12 + G_HPZ0(psz);
1522 pr->pr_page_shift[1] = 12 + G_HPZ1(psz);
1523 pr->pr_page_shift[2] = 12 + G_HPZ2(psz);
1524 pr->pr_page_shift[3] = 12 + G_HPZ3(psz);
1525
1526 /* The SGL -> page pod algorithm requires the sizes to be in order. */
1527 for (i = 1; i < nitems(pr->pr_page_shift); i++) {
1528 if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1])
1529 return (ENXIO);
1530 }
1531
1532 pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG);
1533 pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask;
1534 if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0)
1535 return (ENXIO);
1536 pr->pr_alias_shift = fls(pr->pr_tag_mask);
1537 pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1);
1538
1539 pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0,
1540 M_FIRSTFIT | M_NOWAIT);
1541 if (pr->pr_arena == NULL)
1542 return (ENOMEM);
1543
1544 return (0);
1545 }
1546
1547 void
1548 t4_free_ppod_region(struct ppod_region *pr)
1549 {
1550
1551 MPASS(pr != NULL);
1552
1553 if (pr->pr_arena)
1554 vmem_destroy(pr->pr_arena);
1555 bzero(pr, sizeof(*pr));
1556 }
1557
1558 static int
1559 pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages,
1560 int pgoff, int len)
1561 {
1562
1563 if (ps->start != start || ps->npages != npages ||
1564 ps->offset != pgoff || ps->len != len)
1565 return (1);
1566
1567 return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp);
1568 }
1569
1570 static int
1571 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
1572 {
1573 struct vmspace *vm;
1574 vm_map_t map;
1575 vm_offset_t start, end, pgoff;
1576 struct pageset *ps;
1577 int n;
1578
1579 DDP_ASSERT_LOCKED(toep);
1580
1581 /*
1582 * The AIO subsystem will cancel and drain all requests before
1583 * permitting a process to exit or exec, so p_vmspace should
1584 * be stable here.
1585 */
1586 vm = job->userproc->p_vmspace;
1587 map = &vm->vm_map;
1588 start = (uintptr_t)job->uaiocb.aio_buf;
1589 pgoff = start & PAGE_MASK;
1590 end = round_page(start + job->uaiocb.aio_nbytes);
1591 start = trunc_page(start);
1592
1593 if (end - start > MAX_DDP_BUFFER_SIZE) {
1594 /*
1595 * Truncate the request to a short read.
1596 * Alternatively, we could DDP in chunks to the larger
1597 * buffer, but that would be quite a bit more work.
1598 *
1599 * When truncating, round the request down to avoid
1600 * crossing a cache line on the final transaction.
1601 */
1602 end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE);
1603 #ifdef VERBOSE_TRACES
1604 CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu",
1605 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes,
1606 (unsigned long)(end - (start + pgoff)));
1607 job->uaiocb.aio_nbytes = end - (start + pgoff);
1608 #endif
1609 end = round_page(end);
1610 }
1611
1612 n = atop(end - start);
1613
1614 /*
1615 * Try to reuse a cached pageset.
1616 */
1617 TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) {
1618 if (pscmp(ps, vm, start, n, pgoff,
1619 job->uaiocb.aio_nbytes) == 0) {
1620 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
1621 toep->ddp.cached_count--;
1622 *pps = ps;
1623 return (0);
1624 }
1625 }
1626
1627 /*
1628 * If there are too many cached pagesets to create a new one,
1629 * free a pageset before creating a new one.
1630 */
1631 KASSERT(toep->ddp.active_count + toep->ddp.cached_count <=
1632 nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__));
1633 if (toep->ddp.active_count + toep->ddp.cached_count ==
1634 nitems(toep->ddp.db)) {
1635 KASSERT(toep->ddp.cached_count > 0,
1636 ("no cached pageset to free"));
1637 ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq);
1638 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
1639 toep->ddp.cached_count--;
1640 free_pageset(toep->td, ps);
1641 }
1642 DDP_UNLOCK(toep);
1643
1644 /* Create a new pageset. */
1645 ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
1646 M_ZERO);
1647 ps->pages = (vm_page_t *)(ps + 1);
1648 ps->vm_timestamp = map->timestamp;
1649 ps->npages = vm_fault_quick_hold_pages(map, start, end - start,
1650 VM_PROT_WRITE, ps->pages, n);
1651
1652 DDP_LOCK(toep);
1653 if (ps->npages < 0) {
1654 free(ps, M_CXGBE);
1655 return (EFAULT);
1656 }
1657
1658 KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d",
1659 ps->npages, n));
1660
1661 ps->offset = pgoff;
1662 ps->len = job->uaiocb.aio_nbytes;
1663 refcount_acquire(&vm->vm_refcnt);
1664 ps->vm = vm;
1665 ps->start = start;
1666
1667 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
1668 __func__, toep->tid, ps, job, ps->npages);
1669 *pps = ps;
1670 return (0);
1671 }
1672
1673 static void
1674 ddp_complete_all(struct toepcb *toep, int error)
1675 {
1676 struct kaiocb *job;
1677
1678 DDP_ASSERT_LOCKED(toep);
1679 while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) {
1680 job = TAILQ_FIRST(&toep->ddp.aiojobq);
1681 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
1682 toep->ddp.waiting_count--;
1683 if (aio_clear_cancel_function(job))
1684 ddp_complete_one(job, error);
1685 }
1686 }
1687
1688 static void
1689 aio_ddp_cancel_one(struct kaiocb *job)
1690 {
1691 long copied;
1692
1693 /*
1694 * If this job had copied data out of the socket buffer before
1695 * it was cancelled, report it as a short read rather than an
1696 * error.
1697 */
1698 copied = job->aio_received;
1699 if (copied != 0)
1700 aio_complete(job, copied, 0);
1701 else
1702 aio_cancel(job);
1703 }
1704
1705 /*
1706 * Called when the main loop wants to requeue a job to retry it later.
1707 * Deals with the race of the job being cancelled while it was being
1708 * examined.
1709 */
1710 static void
1711 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job)
1712 {
1713
1714 DDP_ASSERT_LOCKED(toep);
1715 if (!(toep->ddp.flags & DDP_DEAD) &&
1716 aio_set_cancel_function(job, t4_aio_cancel_queued)) {
1717 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
1718 toep->ddp.waiting_count++;
1719 } else
1720 aio_ddp_cancel_one(job);
1721 }
1722
1723 static void
1724 aio_ddp_requeue(struct toepcb *toep)
1725 {
1726 struct adapter *sc = td_adapter(toep->td);
1727 struct socket *so;
1728 struct sockbuf *sb;
1729 struct inpcb *inp;
1730 struct kaiocb *job;
1731 struct ddp_buffer *db;
1732 size_t copied, offset, resid;
1733 struct pageset *ps;
1734 struct mbuf *m;
1735 uint64_t ddp_flags, ddp_flags_mask;
1736 struct wrqe *wr;
1737 int buf_flag, db_idx, error;
1738
1739 DDP_ASSERT_LOCKED(toep);
1740
1741 restart:
1742 if (toep->ddp.flags & DDP_DEAD) {
1743 MPASS(toep->ddp.waiting_count == 0);
1744 MPASS(toep->ddp.active_count == 0);
1745 return;
1746 }
1747
1748 if (toep->ddp.waiting_count == 0 ||
1749 toep->ddp.active_count == nitems(toep->ddp.db)) {
1750 return;
1751 }
1752
1753 job = TAILQ_FIRST(&toep->ddp.aiojobq);
1754 so = job->fd_file->f_data;
1755 sb = &so->so_rcv;
1756 SOCKBUF_LOCK(sb);
1757
1758 /* We will never get anything unless we are or were connected. */
1759 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1760 SOCKBUF_UNLOCK(sb);
1761 ddp_complete_all(toep, ENOTCONN);
1762 return;
1763 }
1764
1765 KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0,
1766 ("%s: pending sockbuf data and DDP is active", __func__));
1767
1768 /* Abort if socket has reported problems. */
1769 /* XXX: Wait for any queued DDP's to finish and/or flush them? */
1770 if (so->so_error && sbavail(sb) == 0) {
1771 toep->ddp.waiting_count--;
1772 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
1773 if (!aio_clear_cancel_function(job)) {
1774 SOCKBUF_UNLOCK(sb);
1775 goto restart;
1776 }
1777
1778 /*
1779 * If this job has previously copied some data, report
1780 * a short read and leave the error to be reported by
1781 * a future request.
1782 */
1783 copied = job->aio_received;
1784 if (copied != 0) {
1785 SOCKBUF_UNLOCK(sb);
1786 aio_complete(job, copied, 0);
1787 goto restart;
1788 }
1789 error = so->so_error;
1790 so->so_error = 0;
1791 SOCKBUF_UNLOCK(sb);
1792 aio_complete(job, -1, error);
1793 goto restart;
1794 }
1795
1796 /*
1797 * Door is closed. If there is pending data in the socket buffer,
1798 * deliver it. If there are pending DDP requests, wait for those
1799 * to complete. Once they have completed, return EOF reads.
1800 */
1801 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
1802 SOCKBUF_UNLOCK(sb);
1803 if (toep->ddp.active_count != 0)
1804 return;
1805 ddp_complete_all(toep, 0);
1806 return;
1807 }
1808
1809 /*
1810 * If DDP is not enabled and there is no pending socket buffer
1811 * data, try to enable DDP.
1812 */
1813 if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) {
1814 SOCKBUF_UNLOCK(sb);
1815
1816 /*
1817 * Wait for the card to ACK that DDP is enabled before
1818 * queueing any buffers. Currently this waits for an
1819 * indicate to arrive. This could use a TCB_SET_FIELD_RPL
1820 * message to know that DDP was enabled instead of waiting
1821 * for the indicate which would avoid copying the indicate
1822 * if no data is pending.
1823 *
1824 * XXX: Might want to limit the indicate size to the size
1825 * of the first queued request.
1826 */
1827 if ((toep->ddp.flags & DDP_SC_REQ) == 0)
1828 enable_ddp(sc, toep);
1829 return;
1830 }
1831 SOCKBUF_UNLOCK(sb);
1832
1833 /*
1834 * If another thread is queueing a buffer for DDP, let it
1835 * drain any work and return.
1836 */
1837 if (toep->ddp.queueing != NULL)
1838 return;
1839
1840 /* Take the next job to prep it for DDP. */
1841 toep->ddp.waiting_count--;
1842 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
1843 if (!aio_clear_cancel_function(job))
1844 goto restart;
1845 toep->ddp.queueing = job;
1846
1847 /* NB: This drops DDP_LOCK while it holds the backing VM pages. */
1848 error = hold_aio(toep, job, &ps);
1849 if (error != 0) {
1850 ddp_complete_one(job, error);
1851 toep->ddp.queueing = NULL;
1852 goto restart;
1853 }
1854
1855 SOCKBUF_LOCK(sb);
1856 if (so->so_error && sbavail(sb) == 0) {
1857 copied = job->aio_received;
1858 if (copied != 0) {
1859 SOCKBUF_UNLOCK(sb);
1860 recycle_pageset(toep, ps);
1861 aio_complete(job, copied, 0);
1862 toep->ddp.queueing = NULL;
1863 goto restart;
1864 }
1865
1866 error = so->so_error;
1867 so->so_error = 0;
1868 SOCKBUF_UNLOCK(sb);
1869 recycle_pageset(toep, ps);
1870 aio_complete(job, -1, error);
1871 toep->ddp.queueing = NULL;
1872 goto restart;
1873 }
1874
1875 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
1876 SOCKBUF_UNLOCK(sb);
1877 recycle_pageset(toep, ps);
1878 if (toep->ddp.active_count != 0) {
1879 /*
1880 * The door is closed, but there are still pending
1881 * DDP buffers. Requeue. These jobs will all be
1882 * completed once those buffers drain.
1883 */
1884 aio_ddp_requeue_one(toep, job);
1885 toep->ddp.queueing = NULL;
1886 return;
1887 }
1888 ddp_complete_one(job, 0);
1889 ddp_complete_all(toep, 0);
1890 toep->ddp.queueing = NULL;
1891 return;
1892 }
1893
1894 sbcopy:
1895 /*
1896 * If the toep is dead, there shouldn't be any data in the socket
1897 * buffer, so the above case should have handled this.
1898 */
1899 MPASS(!(toep->ddp.flags & DDP_DEAD));
1900
1901 /*
1902 * If there is pending data in the socket buffer (either
1903 * from before the requests were queued or a DDP indicate),
1904 * copy those mbufs out directly.
1905 */
1906 copied = 0;
1907 offset = ps->offset + job->aio_received;
1908 MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
1909 resid = job->uaiocb.aio_nbytes - job->aio_received;
1910 m = sb->sb_mb;
1911 KASSERT(m == NULL || toep->ddp.active_count == 0,
1912 ("%s: sockbuf data with active DDP", __func__));
1913 while (m != NULL && resid > 0) {
1914 struct iovec iov[1];
1915 struct uio uio;
1916 #ifdef INVARIANTS
1917 int error;
1918 #endif
1919
1920 iov[0].iov_base = mtod(m, void *);
1921 iov[0].iov_len = m->m_len;
1922 if (iov[0].iov_len > resid)
1923 iov[0].iov_len = resid;
1924 uio.uio_iov = iov;
1925 uio.uio_iovcnt = 1;
1926 uio.uio_offset = 0;
1927 uio.uio_resid = iov[0].iov_len;
1928 uio.uio_segflg = UIO_SYSSPACE;
1929 uio.uio_rw = UIO_WRITE;
1930 #ifdef INVARIANTS
1931 error = uiomove_fromphys(ps->pages, offset + copied,
1932 uio.uio_resid, &uio);
1933 #else
1934 uiomove_fromphys(ps->pages, offset + copied, uio.uio_resid, &uio);
1935 #endif
1936 MPASS(error == 0 && uio.uio_resid == 0);
1937 copied += uio.uio_offset;
1938 resid -= uio.uio_offset;
1939 m = m->m_next;
1940 }
1941 if (copied != 0) {
1942 sbdrop_locked(sb, copied);
1943 job->aio_received += copied;
1944 job->msgrcv = 1;
1945 copied = job->aio_received;
1946 inp = sotoinpcb(so);
1947 if (!INP_TRY_WLOCK(inp)) {
1948 /*
1949 * The reference on the socket file descriptor in
1950 * the AIO job should keep 'sb' and 'inp' stable.
1951 * Our caller has a reference on the 'toep' that
1952 * keeps it stable.
1953 */
1954 SOCKBUF_UNLOCK(sb);
1955 DDP_UNLOCK(toep);
1956 INP_WLOCK(inp);
1957 DDP_LOCK(toep);
1958 SOCKBUF_LOCK(sb);
1959
1960 /*
1961 * If the socket has been closed, we should detect
1962 * that and complete this request if needed on
1963 * the next trip around the loop.
1964 */
1965 }
1966 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp));
1967 INP_WUNLOCK(inp);
1968 if (resid == 0 || toep->ddp.flags & DDP_DEAD) {
1969 /*
1970 * We filled the entire buffer with socket
1971 * data, DDP is not being used, or the socket
1972 * is being shut down, so complete the
1973 * request.
1974 */
1975 SOCKBUF_UNLOCK(sb);
1976 recycle_pageset(toep, ps);
1977 aio_complete(job, copied, 0);
1978 toep->ddp.queueing = NULL;
1979 goto restart;
1980 }
1981
1982 /*
1983 * If DDP is not enabled, requeue this request and restart.
1984 * This will either enable DDP or wait for more data to
1985 * arrive on the socket buffer.
1986 */
1987 if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) {
1988 SOCKBUF_UNLOCK(sb);
1989 recycle_pageset(toep, ps);
1990 aio_ddp_requeue_one(toep, job);
1991 toep->ddp.queueing = NULL;
1992 goto restart;
1993 }
1994
1995 /*
1996 * An indicate might have arrived and been added to
1997 * the socket buffer while it was unlocked after the
1998 * copy to lock the INP. If so, restart the copy.
1999 */
2000 if (sbavail(sb) != 0)
2001 goto sbcopy;
2002 }
2003 SOCKBUF_UNLOCK(sb);
2004
2005 if (prep_pageset(sc, toep, ps) == 0) {
2006 recycle_pageset(toep, ps);
2007 aio_ddp_requeue_one(toep, job);
2008 toep->ddp.queueing = NULL;
2009
2010 /*
2011 * XXX: Need to retry this later. Mostly need a trigger
2012 * when page pods are freed up.
2013 */
2014 printf("%s: prep_pageset failed\n", __func__);
2015 return;
2016 }
2017
2018 /* Determine which DDP buffer to use. */
2019 if (toep->ddp.db[0].job == NULL) {
2020 db_idx = 0;
2021 } else {
2022 MPASS(toep->ddp.db[1].job == NULL);
2023 db_idx = 1;
2024 }
2025
2026 ddp_flags = 0;
2027 ddp_flags_mask = 0;
2028 if (db_idx == 0) {
2029 ddp_flags |= V_TF_DDP_BUF0_VALID(1);
2030 if (so->so_state & SS_NBIO)
2031 ddp_flags |= V_TF_DDP_BUF0_FLUSH(1);
2032 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) |
2033 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) |
2034 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1);
2035 buf_flag = DDP_BUF0_ACTIVE;
2036 } else {
2037 ddp_flags |= V_TF_DDP_BUF1_VALID(1);
2038 if (so->so_state & SS_NBIO)
2039 ddp_flags |= V_TF_DDP_BUF1_FLUSH(1);
2040 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) |
2041 V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) |
2042 V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1);
2043 buf_flag = DDP_BUF1_ACTIVE;
2044 }
2045 MPASS((toep->ddp.flags & buf_flag) == 0);
2046 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) {
2047 MPASS(db_idx == 0);
2048 MPASS(toep->ddp.active_id == -1);
2049 MPASS(toep->ddp.active_count == 0);
2050 ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1);
2051 }
2052
2053 /*
2054 * The TID for this connection should still be valid. If DDP_DEAD
2055 * is set, SBS_CANTRCVMORE should be set, so we shouldn't be
2056 * this far anyway. Even if the socket is closing on the other
2057 * end, the AIO job holds a reference on this end of the socket
2058 * which will keep it open and keep the TCP PCB attached until
2059 * after the job is completed.
2060 */
2061 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received,
2062 ddp_flags, ddp_flags_mask);
2063 if (wr == NULL) {
2064 recycle_pageset(toep, ps);
2065 aio_ddp_requeue_one(toep, job);
2066 toep->ddp.queueing = NULL;
2067
2068 /*
2069 * XXX: Need a way to kick a retry here.
2070 *
2071 * XXX: We know the fixed size needed and could
2072 * preallocate this using a blocking request at the
2073 * start of the task to avoid having to handle this
2074 * edge case.
2075 */
2076 printf("%s: mk_update_tcb_for_ddp failed\n", __func__);
2077 return;
2078 }
2079
2080 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) {
2081 free_wrqe(wr);
2082 recycle_pageset(toep, ps);
2083 aio_ddp_cancel_one(job);
2084 toep->ddp.queueing = NULL;
2085 goto restart;
2086 }
2087
2088 #ifdef VERBOSE_TRACES
2089 CTR6(KTR_CXGBE,
2090 "%s: tid %u, scheduling %p for DDP[%d] (flags %#lx/%#lx)", __func__,
2091 toep->tid, job, db_idx, ddp_flags, ddp_flags_mask);
2092 #endif
2093 /* Give the chip the go-ahead. */
2094 t4_wrq_tx(sc, wr);
2095 db = &toep->ddp.db[db_idx];
2096 db->cancel_pending = 0;
2097 db->job = job;
2098 db->ps = ps;
2099 toep->ddp.queueing = NULL;
2100 toep->ddp.flags |= buf_flag;
2101 toep->ddp.active_count++;
2102 if (toep->ddp.active_count == 1) {
2103 MPASS(toep->ddp.active_id == -1);
2104 toep->ddp.active_id = db_idx;
2105 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
2106 toep->ddp.active_id);
2107 }
2108 goto restart;
2109 }
2110
2111 void
2112 ddp_queue_toep(struct toepcb *toep)
2113 {
2114
2115 DDP_ASSERT_LOCKED(toep);
2116 if (toep->ddp.flags & DDP_TASK_ACTIVE)
2117 return;
2118 toep->ddp.flags |= DDP_TASK_ACTIVE;
2119 hold_toepcb(toep);
2120 soaio_enqueue(&toep->ddp.requeue_task);
2121 }
2122
2123 static void
2124 aio_ddp_requeue_task(void *context, int pending)
2125 {
2126 struct toepcb *toep = context;
2127
2128 DDP_LOCK(toep);
2129 aio_ddp_requeue(toep);
2130 toep->ddp.flags &= ~DDP_TASK_ACTIVE;
2131 DDP_UNLOCK(toep);
2132
2133 free_toepcb(toep);
2134 }
2135
2136 static void
2137 t4_aio_cancel_active(struct kaiocb *job)
2138 {
2139 struct socket *so = job->fd_file->f_data;
2140 struct tcpcb *tp = sototcpcb(so);
2141 struct toepcb *toep = tp->t_toe;
2142 struct adapter *sc = td_adapter(toep->td);
2143 uint64_t valid_flag;
2144 int i;
2145
2146 DDP_LOCK(toep);
2147 if (aio_cancel_cleared(job)) {
2148 DDP_UNLOCK(toep);
2149 aio_ddp_cancel_one(job);
2150 return;
2151 }
2152
2153 for (i = 0; i < nitems(toep->ddp.db); i++) {
2154 if (toep->ddp.db[i].job == job) {
2155 /* Should only ever get one cancel request for a job. */
2156 MPASS(toep->ddp.db[i].cancel_pending == 0);
2157
2158 /*
2159 * Invalidate this buffer. It will be
2160 * cancelled or partially completed once the
2161 * card ACKs the invalidate.
2162 */
2163 valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) :
2164 V_TF_DDP_BUF1_VALID(1);
2165 t4_set_tcb_field(sc, toep->ctrlq, toep,
2166 W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1,
2167 CPL_COOKIE_DDP0 + i);
2168 toep->ddp.db[i].cancel_pending = 1;
2169 CTR2(KTR_CXGBE, "%s: request %p marked pending",
2170 __func__, job);
2171 break;
2172 }
2173 }
2174 DDP_UNLOCK(toep);
2175 }
2176
2177 static void
2178 t4_aio_cancel_queued(struct kaiocb *job)
2179 {
2180 struct socket *so = job->fd_file->f_data;
2181 struct tcpcb *tp = sototcpcb(so);
2182 struct toepcb *toep = tp->t_toe;
2183
2184 DDP_LOCK(toep);
2185 if (!aio_cancel_cleared(job)) {
2186 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2187 toep->ddp.waiting_count--;
2188 if (toep->ddp.waiting_count == 0)
2189 ddp_queue_toep(toep);
2190 }
2191 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job);
2192 DDP_UNLOCK(toep);
2193
2194 aio_ddp_cancel_one(job);
2195 }
2196
2197 int
2198 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
2199 {
2200 struct tcpcb *tp = sototcpcb(so);
2201 struct toepcb *toep = tp->t_toe;
2202
2203
2204 /* Ignore writes. */
2205 if (job->uaiocb.aio_lio_opcode != LIO_READ)
2206 return (EOPNOTSUPP);
2207
2208 DDP_LOCK(toep);
2209
2210 /*
2211 * XXX: Think about possibly returning errors for ENOTCONN,
2212 * etc. Perhaps the caller would only queue the request
2213 * if it failed with EOPNOTSUPP?
2214 */
2215
2216 #ifdef VERBOSE_TRACES
2217 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid);
2218 #endif
2219 if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
2220 panic("new job was cancelled");
2221 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list);
2222 toep->ddp.waiting_count++;
2223 toep->ddp.flags |= DDP_OK;
2224
2225 /*
2226 * Try to handle this request synchronously. If this has
2227 * to block because the task is running, it will just bail
2228 * and let the task handle it instead.
2229 */
2230 aio_ddp_requeue(toep);
2231 DDP_UNLOCK(toep);
2232 return (0);
2233 }
2234
2235 void
2236 t4_ddp_mod_load(void)
2237 {
2238
2239 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl,
2240 CPL_COOKIE_DDP0);
2241 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl,
2242 CPL_COOKIE_DDP1);
2243 t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
2244 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
2245 TAILQ_INIT(&ddp_orphan_pagesets);
2246 mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF);
2247 TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL);
2248 }
2249
2250 void
2251 t4_ddp_mod_unload(void)
2252 {
2253
2254 taskqueue_drain(taskqueue_thread, &ddp_orphan_task);
2255 MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets));
2256 mtx_destroy(&ddp_orphan_pagesets_lock);
2257 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0);
2258 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1);
2259 t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL);
2260 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL);
2261 }
2262 #endif
Cache object: 7e8fa450cdaa9e88d5cf53dca53f8328
|