1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2007 Seccuris Inc.
5 * All rights reserved.
6 *
7 * This software was developed by Robert N. M. Watson under contract to
8 * Seccuris Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/12.0/sys/net/bpf_zerocopy.c 326272 2017-11-27 15:23:17Z pfg $");
34
35 #include "opt_bpf.h"
36
37 #include <sys/param.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/sf_buf.h>
44 #include <sys/socket.h>
45 #include <sys/uio.h>
46
47 #include <machine/atomic.h>
48
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/bpf_zerocopy.h>
52 #include <net/bpfdesc.h>
53
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60
61 /*
62 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
63 * are mapped into the kernel address space using sf_bufs and used directly
64 * by BPF. Memory is wired since page faults cannot be tolerated in the
65 * contexts where the buffers are copied to (locks held, interrupt context,
66 * etc). Access to shared memory buffers is synchronized using a header on
67 * each buffer, allowing the number of system calls to go to zero as BPF
68 * reaches saturation (buffers filled as fast as they can be drained by the
69 * user process). Full details of the protocol for communicating between the
70 * user process and BPF may be found in bpf(4).
71 */
72
73 /*
74 * Maximum number of pages per buffer. Since all BPF devices use two, the
75 * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of
76 * sf_bufs may be an issue, so do not set this too high. On older systems,
77 * kernel address space limits may also be an issue.
78 */
79 #define BPF_MAX_PAGES 512
80
81 /*
82 * struct zbuf describes a memory buffer loaned by a user process to the
83 * kernel. We represent this as a series of pages managed using an array of
84 * sf_bufs. Even though the memory is contiguous in user space, it may not
85 * be mapped contiguously in the kernel (i.e., a set of physically
86 * non-contiguous pages in the direct map region) so we must implement
87 * scatter-gather copying. One significant mitigating factor is that on
88 * systems with a direct memory map, we can avoid TLB misses.
89 *
90 * At the front of the shared memory region is a bpf_zbuf_header, which
91 * contains shared control data to allow user space and the kernel to
92 * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
93 * knows that the space is not available.
94 */
95 struct zbuf {
96 vm_offset_t zb_uaddr; /* User address at time of setup. */
97 size_t zb_size; /* Size of buffer, incl. header. */
98 u_int zb_numpages; /* Number of pages. */
99 int zb_flags; /* Flags on zbuf. */
100 struct sf_buf **zb_pages; /* Pages themselves. */
101 struct bpf_zbuf_header *zb_header; /* Shared header. */
102 };
103
104 /*
105 * When a buffer has been assigned to userspace, flag it as such, as the
106 * buffer may remain in the store position as a result of the user process
107 * not yet having acknowledged the buffer in the hold position yet.
108 */
109 #define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */
110
111 /*
112 * Release a page we've previously wired.
113 */
114 static void
115 zbuf_page_free(vm_page_t pp)
116 {
117
118 vm_page_lock(pp);
119 if (vm_page_unwire(pp, PQ_INACTIVE) && pp->object == NULL)
120 vm_page_free(pp);
121 vm_page_unlock(pp);
122 }
123
124 /*
125 * Free an sf_buf with attached page.
126 */
127 static void
128 zbuf_sfbuf_free(struct sf_buf *sf)
129 {
130 vm_page_t pp;
131
132 pp = sf_buf_page(sf);
133 sf_buf_free(sf);
134 zbuf_page_free(pp);
135 }
136
137 /*
138 * Free a zbuf, including its page array, sbufs, and pages. Allow partially
139 * allocated zbufs to be freed so that it may be used even during a zbuf
140 * setup.
141 */
142 static void
143 zbuf_free(struct zbuf *zb)
144 {
145 int i;
146
147 for (i = 0; i < zb->zb_numpages; i++) {
148 if (zb->zb_pages[i] != NULL)
149 zbuf_sfbuf_free(zb->zb_pages[i]);
150 }
151 free(zb->zb_pages, M_BPF);
152 free(zb, M_BPF);
153 }
154
155 /*
156 * Given a user pointer to a page of user memory, return an sf_buf for the
157 * page. Because we may be requesting quite a few sf_bufs, prefer failure to
158 * deadlock and use SFB_NOWAIT.
159 */
160 static struct sf_buf *
161 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
162 {
163 struct sf_buf *sf;
164 vm_page_t pp;
165
166 if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
167 VM_PROT_WRITE, &pp, 1) < 0)
168 return (NULL);
169 vm_page_lock(pp);
170 vm_page_wire(pp);
171 vm_page_unhold(pp);
172 vm_page_unlock(pp);
173 sf = sf_buf_alloc(pp, SFB_NOWAIT);
174 if (sf == NULL) {
175 zbuf_page_free(pp);
176 return (NULL);
177 }
178 return (sf);
179 }
180
181 /*
182 * Create a zbuf describing a range of user address space memory. Validate
183 * page alignment, size requirements, etc.
184 */
185 static int
186 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len,
187 struct zbuf **zbp)
188 {
189 struct zbuf *zb;
190 struct vm_map *map;
191 int error, i;
192
193 *zbp = NULL;
194
195 /*
196 * User address must be page-aligned.
197 */
198 if (uaddr & PAGE_MASK)
199 return (EINVAL);
200
201 /*
202 * Length must be an integer number of full pages.
203 */
204 if (len & PAGE_MASK)
205 return (EINVAL);
206
207 /*
208 * Length must not exceed per-buffer resource limit.
209 */
210 if ((len / PAGE_SIZE) > BPF_MAX_PAGES)
211 return (EINVAL);
212
213 /*
214 * Allocate the buffer and set up each page with is own sf_buf.
215 */
216 error = 0;
217 zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK);
218 zb->zb_uaddr = uaddr;
219 zb->zb_size = len;
220 zb->zb_numpages = len / PAGE_SIZE;
221 zb->zb_pages = malloc(sizeof(struct sf_buf *) *
222 zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK);
223 map = &td->td_proc->p_vmspace->vm_map;
224 for (i = 0; i < zb->zb_numpages; i++) {
225 zb->zb_pages[i] = zbuf_sfbuf_get(map,
226 uaddr + (i * PAGE_SIZE));
227 if (zb->zb_pages[i] == NULL) {
228 error = EFAULT;
229 goto error;
230 }
231 }
232 zb->zb_header =
233 (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]);
234 bzero(zb->zb_header, sizeof(*zb->zb_header));
235 *zbp = zb;
236 return (0);
237
238 error:
239 zbuf_free(zb);
240 return (error);
241 }
242
243 /*
244 * Copy bytes from a source into the specified zbuf. The caller is
245 * responsible for performing bounds checking, etc.
246 */
247 void
248 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
249 void *src, u_int len)
250 {
251 u_int count, page, poffset;
252 u_char *src_bytes;
253 struct zbuf *zb;
254
255 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
256 ("bpf_zerocopy_append_bytes: not in zbuf mode"));
257 KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf"));
258
259 src_bytes = (u_char *)src;
260 zb = (struct zbuf *)buf;
261
262 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
263 ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
264
265 /*
266 * Scatter-gather copy to user pages mapped into kernel address space
267 * using sf_bufs: copy up to a page at a time.
268 */
269 offset += sizeof(struct bpf_zbuf_header);
270 page = offset / PAGE_SIZE;
271 poffset = offset % PAGE_SIZE;
272 while (len > 0) {
273 KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:"
274 " page overflow (%d p %d np)\n", page, zb->zb_numpages));
275
276 count = min(len, PAGE_SIZE - poffset);
277 bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) +
278 poffset, count);
279 poffset += count;
280 if (poffset == PAGE_SIZE) {
281 poffset = 0;
282 page++;
283 }
284 KASSERT(poffset < PAGE_SIZE,
285 ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
286 poffset));
287 len -= count;
288 src_bytes += count;
289 }
290 }
291
292 /*
293 * Copy bytes from an mbuf chain to the specified zbuf: copying will be
294 * scatter-gather both from mbufs, which may be fragmented over memory, and
295 * to pages, which may not be contiguously mapped in kernel address space.
296 * As with bpf_zerocopy_append_bytes(), the caller is responsible for
297 * checking that this will not exceed the buffer limit.
298 */
299 void
300 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
301 void *src, u_int len)
302 {
303 u_int count, moffset, page, poffset;
304 const struct mbuf *m;
305 struct zbuf *zb;
306
307 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
308 ("bpf_zerocopy_append_mbuf not in zbuf mode"));
309 KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf"));
310
311 m = (struct mbuf *)src;
312 zb = (struct zbuf *)buf;
313
314 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
315 ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
316
317 /*
318 * Scatter gather both from an mbuf chain and to a user page set
319 * mapped into kernel address space using sf_bufs. If we're lucky,
320 * each mbuf requires one copy operation, but if page alignment and
321 * mbuf alignment work out less well, we'll be doing two copies per
322 * mbuf.
323 */
324 offset += sizeof(struct bpf_zbuf_header);
325 page = offset / PAGE_SIZE;
326 poffset = offset % PAGE_SIZE;
327 moffset = 0;
328 while (len > 0) {
329 KASSERT(page < zb->zb_numpages,
330 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
331 "np)\n", page, zb->zb_numpages));
332 KASSERT(m != NULL,
333 ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
334
335 count = min(m->m_len - moffset, len);
336 count = min(count, PAGE_SIZE - poffset);
337 bcopy(mtod(m, u_char *) + moffset,
338 ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset,
339 count);
340 poffset += count;
341 if (poffset == PAGE_SIZE) {
342 poffset = 0;
343 page++;
344 }
345 KASSERT(poffset < PAGE_SIZE,
346 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
347 poffset));
348 moffset += count;
349 if (moffset == m->m_len) {
350 m = m->m_next;
351 moffset = 0;
352 }
353 len -= count;
354 }
355 }
356
357 /*
358 * Notification from the BPF framework that a buffer in the store position is
359 * rejecting packets and may be considered full. We mark the buffer as
360 * immutable and assign to userspace so that it is immediately available for
361 * the user process to access.
362 */
363 void
364 bpf_zerocopy_buffull(struct bpf_d *d)
365 {
366 struct zbuf *zb;
367
368 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
369 ("bpf_zerocopy_buffull: not in zbuf mode"));
370
371 zb = (struct zbuf *)d->bd_sbuf;
372 KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
373
374 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
375 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
376 zb->zb_header->bzh_kernel_len = d->bd_slen;
377 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
378 }
379 }
380
381 /*
382 * Notification from the BPF framework that a buffer has moved into the held
383 * slot on a descriptor. Zero-copy BPF will update the shared page to let
384 * the user process know and flag the buffer as assigned if it hasn't already
385 * been marked assigned due to filling while it was in the store position.
386 *
387 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
388 * on bd_hbuf and bd_hlen.
389 */
390 void
391 bpf_zerocopy_bufheld(struct bpf_d *d)
392 {
393 struct zbuf *zb;
394
395 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
396 ("bpf_zerocopy_bufheld: not in zbuf mode"));
397
398 zb = (struct zbuf *)d->bd_hbuf;
399 KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
400
401 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
402 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
403 zb->zb_header->bzh_kernel_len = d->bd_hlen;
404 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
405 }
406 }
407
408 /*
409 * Notification from the BPF framework that the free buffer has been been
410 * rotated out of the held position to the free position. This happens when
411 * the user acknowledges the held buffer.
412 */
413 void
414 bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
415 {
416 struct zbuf *zb;
417
418 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
419 ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
420
421 KASSERT(d->bd_fbuf != NULL,
422 ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
423 zb = (struct zbuf *)d->bd_fbuf;
424 zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED;
425 }
426
427 /*
428 * Query from the BPF framework regarding whether the buffer currently in the
429 * held position can be moved to the free position, which can be indicated by
430 * the user process making their generation number equal to the kernel
431 * generation number.
432 */
433 int
434 bpf_zerocopy_canfreebuf(struct bpf_d *d)
435 {
436 struct zbuf *zb;
437
438 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
439 ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
440
441 zb = (struct zbuf *)d->bd_hbuf;
442 if (zb == NULL)
443 return (0);
444 if (zb->zb_header->bzh_kernel_gen ==
445 atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
446 return (1);
447 return (0);
448 }
449
450 /*
451 * Query from the BPF framework as to whether or not the buffer current in
452 * the store position can actually be written to. This may return false if
453 * the store buffer is assigned to userspace before the hold buffer is
454 * acknowledged.
455 */
456 int
457 bpf_zerocopy_canwritebuf(struct bpf_d *d)
458 {
459 struct zbuf *zb;
460
461 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
462 ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
463
464 zb = (struct zbuf *)d->bd_sbuf;
465 KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
466
467 if (zb->zb_flags & ZBUF_FLAG_ASSIGNED)
468 return (0);
469 return (1);
470 }
471
472 /*
473 * Free zero copy buffers at request of descriptor.
474 */
475 void
476 bpf_zerocopy_free(struct bpf_d *d)
477 {
478 struct zbuf *zb;
479
480 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
481 ("bpf_zerocopy_free: not in zbuf mode"));
482
483 zb = (struct zbuf *)d->bd_sbuf;
484 if (zb != NULL)
485 zbuf_free(zb);
486 zb = (struct zbuf *)d->bd_hbuf;
487 if (zb != NULL)
488 zbuf_free(zb);
489 zb = (struct zbuf *)d->bd_fbuf;
490 if (zb != NULL)
491 zbuf_free(zb);
492 }
493
494 /*
495 * Ioctl to return the maximum buffer size.
496 */
497 int
498 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
499 {
500
501 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
502 ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
503
504 *i = BPF_MAX_PAGES * PAGE_SIZE;
505 return (0);
506 }
507
508 /*
509 * Ioctl to force rotation of the two buffers, if there's any data available.
510 * This can be used by user space to implement timeouts when waiting for a
511 * buffer to fill.
512 */
513 int
514 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
515 struct bpf_zbuf *bz)
516 {
517 struct zbuf *bzh;
518
519 bzero(bz, sizeof(*bz));
520 BPFD_LOCK(d);
521 if (d->bd_hbuf == NULL && d->bd_slen != 0) {
522 ROTATE_BUFFERS(d);
523 bzh = (struct zbuf *)d->bd_hbuf;
524 bz->bz_bufa = (void *)bzh->zb_uaddr;
525 bz->bz_buflen = d->bd_hlen;
526 }
527 BPFD_UNLOCK(d);
528 return (0);
529 }
530
531 /*
532 * Ioctl to configure zero-copy buffers -- may be done only once.
533 */
534 int
535 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
536 struct bpf_zbuf *bz)
537 {
538 struct zbuf *zba, *zbb;
539 int error;
540
541 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
542 ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
543
544 /*
545 * Must set both buffers. Cannot clear them.
546 */
547 if (bz->bz_bufa == NULL || bz->bz_bufb == NULL)
548 return (EINVAL);
549
550 /*
551 * Buffers must have a size greater than 0. Alignment and other size
552 * validity checking is done in zbuf_setup().
553 */
554 if (bz->bz_buflen == 0)
555 return (EINVAL);
556
557 /*
558 * Allocate new buffers.
559 */
560 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen,
561 &zba);
562 if (error)
563 return (error);
564 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen,
565 &zbb);
566 if (error) {
567 zbuf_free(zba);
568 return (error);
569 }
570
571 /*
572 * We only allow buffers to be installed once, so atomically check
573 * that no buffers are currently installed and install new buffers.
574 */
575 BPFD_LOCK(d);
576 if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL ||
577 d->bd_bif != NULL) {
578 BPFD_UNLOCK(d);
579 zbuf_free(zba);
580 zbuf_free(zbb);
581 return (EINVAL);
582 }
583
584 /*
585 * Point BPF descriptor at buffers; initialize sbuf as zba so that
586 * it is always filled first in the sequence, per bpf(4).
587 */
588 d->bd_fbuf = (caddr_t)zbb;
589 d->bd_sbuf = (caddr_t)zba;
590 d->bd_slen = 0;
591 d->bd_hlen = 0;
592
593 /*
594 * We expose only the space left in the buffer after the size of the
595 * shared management region.
596 */
597 d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header);
598 BPFD_UNLOCK(d);
599 return (0);
600 }
Cache object: 94a16ed76e3cafe9e6044722d9c9816e
|