1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2007 Seccuris Inc.
5 * All rights reserved.
6 *
7 * This software was developed by Robert N. M. Watson under contract to
8 * Seccuris Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_bpf.h"
36
37 #include <sys/param.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/sf_buf.h>
44 #include <sys/socket.h>
45 #include <sys/uio.h>
46
47 #include <machine/atomic.h>
48
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/bpf_zerocopy.h>
52 #include <net/bpfdesc.h>
53
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60
61 /*
62 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
63 * are mapped into the kernel address space using sf_bufs and used directly
64 * by BPF. Memory is wired since page faults cannot be tolerated in the
65 * contexts where the buffers are copied to (locks held, interrupt context,
66 * etc). Access to shared memory buffers is synchronized using a header on
67 * each buffer, allowing the number of system calls to go to zero as BPF
68 * reaches saturation (buffers filled as fast as they can be drained by the
69 * user process). Full details of the protocol for communicating between the
70 * user process and BPF may be found in bpf(4).
71 */
72
73 /*
74 * Maximum number of pages per buffer. Since all BPF devices use two, the
75 * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of
76 * sf_bufs may be an issue, so do not set this too high. On older systems,
77 * kernel address space limits may also be an issue.
78 */
79 #define BPF_MAX_PAGES 512
80
81 /*
82 * struct zbuf describes a memory buffer loaned by a user process to the
83 * kernel. We represent this as a series of pages managed using an array of
84 * sf_bufs. Even though the memory is contiguous in user space, it may not
85 * be mapped contiguously in the kernel (i.e., a set of physically
86 * non-contiguous pages in the direct map region) so we must implement
87 * scatter-gather copying. One significant mitigating factor is that on
88 * systems with a direct memory map, we can avoid TLB misses.
89 *
90 * At the front of the shared memory region is a bpf_zbuf_header, which
91 * contains shared control data to allow user space and the kernel to
92 * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
93 * knows that the space is not available.
94 */
95 struct zbuf {
96 vm_offset_t zb_uaddr; /* User address at time of setup. */
97 size_t zb_size; /* Size of buffer, incl. header. */
98 u_int zb_numpages; /* Number of pages. */
99 int zb_flags; /* Flags on zbuf. */
100 struct sf_buf **zb_pages; /* Pages themselves. */
101 struct bpf_zbuf_header *zb_header; /* Shared header. */
102 };
103
104 /*
105 * When a buffer has been assigned to userspace, flag it as such, as the
106 * buffer may remain in the store position as a result of the user process
107 * not yet having acknowledged the buffer in the hold position yet.
108 */
109 #define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */
110
111 /*
112 * Release a page we've previously wired.
113 */
114 static void
115 zbuf_page_free(vm_page_t pp)
116 {
117
118 vm_page_unwire(pp, PQ_INACTIVE);
119 }
120
121 /*
122 * Free an sf_buf with attached page.
123 */
124 static void
125 zbuf_sfbuf_free(struct sf_buf *sf)
126 {
127 vm_page_t pp;
128
129 pp = sf_buf_page(sf);
130 sf_buf_free(sf);
131 zbuf_page_free(pp);
132 }
133
134 /*
135 * Free a zbuf, including its page array, sbufs, and pages. Allow partially
136 * allocated zbufs to be freed so that it may be used even during a zbuf
137 * setup.
138 */
139 static void
140 zbuf_free(struct zbuf *zb)
141 {
142 int i;
143
144 for (i = 0; i < zb->zb_numpages; i++) {
145 if (zb->zb_pages[i] != NULL)
146 zbuf_sfbuf_free(zb->zb_pages[i]);
147 }
148 free(zb->zb_pages, M_BPF);
149 free(zb, M_BPF);
150 }
151
152 /*
153 * Given a user pointer to a page of user memory, return an sf_buf for the
154 * page. Because we may be requesting quite a few sf_bufs, prefer failure to
155 * deadlock and use SFB_NOWAIT.
156 */
157 static struct sf_buf *
158 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
159 {
160 struct sf_buf *sf;
161 vm_page_t pp;
162
163 if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
164 VM_PROT_WRITE, &pp, 1) < 0)
165 return (NULL);
166 sf = sf_buf_alloc(pp, SFB_NOWAIT);
167 if (sf == NULL) {
168 zbuf_page_free(pp);
169 return (NULL);
170 }
171 return (sf);
172 }
173
174 /*
175 * Create a zbuf describing a range of user address space memory. Validate
176 * page alignment, size requirements, etc.
177 */
178 static int
179 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len,
180 struct zbuf **zbp)
181 {
182 struct zbuf *zb;
183 struct vm_map *map;
184 int error, i;
185
186 *zbp = NULL;
187
188 /*
189 * User address must be page-aligned.
190 */
191 if (uaddr & PAGE_MASK)
192 return (EINVAL);
193
194 /*
195 * Length must be an integer number of full pages.
196 */
197 if (len & PAGE_MASK)
198 return (EINVAL);
199
200 /*
201 * Length must not exceed per-buffer resource limit.
202 */
203 if ((len / PAGE_SIZE) > BPF_MAX_PAGES)
204 return (EINVAL);
205
206 /*
207 * Allocate the buffer and set up each page with is own sf_buf.
208 */
209 error = 0;
210 zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK);
211 zb->zb_uaddr = uaddr;
212 zb->zb_size = len;
213 zb->zb_numpages = len / PAGE_SIZE;
214 zb->zb_pages = malloc(sizeof(struct sf_buf *) *
215 zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK);
216 map = &td->td_proc->p_vmspace->vm_map;
217 for (i = 0; i < zb->zb_numpages; i++) {
218 zb->zb_pages[i] = zbuf_sfbuf_get(map,
219 uaddr + (i * PAGE_SIZE));
220 if (zb->zb_pages[i] == NULL) {
221 error = EFAULT;
222 goto error;
223 }
224 }
225 zb->zb_header =
226 (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]);
227 bzero(zb->zb_header, sizeof(*zb->zb_header));
228 *zbp = zb;
229 return (0);
230
231 error:
232 zbuf_free(zb);
233 return (error);
234 }
235
236 /*
237 * Copy bytes from a source into the specified zbuf. The caller is
238 * responsible for performing bounds checking, etc.
239 */
240 void
241 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
242 void *src, u_int len)
243 {
244 u_int count, page, poffset;
245 u_char *src_bytes;
246 struct zbuf *zb;
247
248 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
249 ("bpf_zerocopy_append_bytes: not in zbuf mode"));
250 KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf"));
251
252 src_bytes = (u_char *)src;
253 zb = (struct zbuf *)buf;
254
255 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
256 ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
257
258 /*
259 * Scatter-gather copy to user pages mapped into kernel address space
260 * using sf_bufs: copy up to a page at a time.
261 */
262 offset += sizeof(struct bpf_zbuf_header);
263 page = offset / PAGE_SIZE;
264 poffset = offset % PAGE_SIZE;
265 while (len > 0) {
266 KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:"
267 " page overflow (%d p %d np)\n", page, zb->zb_numpages));
268
269 count = min(len, PAGE_SIZE - poffset);
270 bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) +
271 poffset, count);
272 poffset += count;
273 if (poffset == PAGE_SIZE) {
274 poffset = 0;
275 page++;
276 }
277 KASSERT(poffset < PAGE_SIZE,
278 ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
279 poffset));
280 len -= count;
281 src_bytes += count;
282 }
283 }
284
285 /*
286 * Copy bytes from an mbuf chain to the specified zbuf: copying will be
287 * scatter-gather both from mbufs, which may be fragmented over memory, and
288 * to pages, which may not be contiguously mapped in kernel address space.
289 * As with bpf_zerocopy_append_bytes(), the caller is responsible for
290 * checking that this will not exceed the buffer limit.
291 */
292 void
293 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
294 void *src, u_int len)
295 {
296 u_int count, moffset, page, poffset;
297 const struct mbuf *m;
298 struct zbuf *zb;
299
300 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
301 ("bpf_zerocopy_append_mbuf not in zbuf mode"));
302 KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf"));
303
304 m = (struct mbuf *)src;
305 zb = (struct zbuf *)buf;
306
307 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
308 ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
309
310 /*
311 * Scatter gather both from an mbuf chain and to a user page set
312 * mapped into kernel address space using sf_bufs. If we're lucky,
313 * each mbuf requires one copy operation, but if page alignment and
314 * mbuf alignment work out less well, we'll be doing two copies per
315 * mbuf.
316 */
317 offset += sizeof(struct bpf_zbuf_header);
318 page = offset / PAGE_SIZE;
319 poffset = offset % PAGE_SIZE;
320 moffset = 0;
321 while (len > 0) {
322 KASSERT(page < zb->zb_numpages,
323 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
324 "np)\n", page, zb->zb_numpages));
325 KASSERT(m != NULL,
326 ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
327
328 count = min(m->m_len - moffset, len);
329 count = min(count, PAGE_SIZE - poffset);
330 bcopy(mtod(m, u_char *) + moffset,
331 ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset,
332 count);
333 poffset += count;
334 if (poffset == PAGE_SIZE) {
335 poffset = 0;
336 page++;
337 }
338 KASSERT(poffset < PAGE_SIZE,
339 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
340 poffset));
341 moffset += count;
342 if (moffset == m->m_len) {
343 m = m->m_next;
344 moffset = 0;
345 }
346 len -= count;
347 }
348 }
349
350 /*
351 * Notification from the BPF framework that a buffer in the store position is
352 * rejecting packets and may be considered full. We mark the buffer as
353 * immutable and assign to userspace so that it is immediately available for
354 * the user process to access.
355 */
356 void
357 bpf_zerocopy_buffull(struct bpf_d *d)
358 {
359 struct zbuf *zb;
360
361 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
362 ("bpf_zerocopy_buffull: not in zbuf mode"));
363
364 zb = (struct zbuf *)d->bd_sbuf;
365 KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
366
367 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
368 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
369 zb->zb_header->bzh_kernel_len = d->bd_slen;
370 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
371 }
372 }
373
374 /*
375 * Notification from the BPF framework that a buffer has moved into the held
376 * slot on a descriptor. Zero-copy BPF will update the shared page to let
377 * the user process know and flag the buffer as assigned if it hasn't already
378 * been marked assigned due to filling while it was in the store position.
379 *
380 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
381 * on bd_hbuf and bd_hlen.
382 */
383 void
384 bpf_zerocopy_bufheld(struct bpf_d *d)
385 {
386 struct zbuf *zb;
387
388 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
389 ("bpf_zerocopy_bufheld: not in zbuf mode"));
390
391 zb = (struct zbuf *)d->bd_hbuf;
392 KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
393
394 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
395 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
396 zb->zb_header->bzh_kernel_len = d->bd_hlen;
397 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
398 }
399 }
400
401 /*
402 * Notification from the BPF framework that the free buffer has been been
403 * rotated out of the held position to the free position. This happens when
404 * the user acknowledges the held buffer.
405 */
406 void
407 bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
408 {
409 struct zbuf *zb;
410
411 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
412 ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
413
414 KASSERT(d->bd_fbuf != NULL,
415 ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
416 zb = (struct zbuf *)d->bd_fbuf;
417 zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED;
418 }
419
420 /*
421 * Query from the BPF framework regarding whether the buffer currently in the
422 * held position can be moved to the free position, which can be indicated by
423 * the user process making their generation number equal to the kernel
424 * generation number.
425 */
426 int
427 bpf_zerocopy_canfreebuf(struct bpf_d *d)
428 {
429 struct zbuf *zb;
430
431 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
432 ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
433
434 zb = (struct zbuf *)d->bd_hbuf;
435 if (zb == NULL)
436 return (0);
437 if (zb->zb_header->bzh_kernel_gen ==
438 atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
439 return (1);
440 return (0);
441 }
442
443 /*
444 * Query from the BPF framework as to whether or not the buffer current in
445 * the store position can actually be written to. This may return false if
446 * the store buffer is assigned to userspace before the hold buffer is
447 * acknowledged.
448 */
449 int
450 bpf_zerocopy_canwritebuf(struct bpf_d *d)
451 {
452 struct zbuf *zb;
453
454 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
455 ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
456
457 zb = (struct zbuf *)d->bd_sbuf;
458 KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
459
460 if (zb->zb_flags & ZBUF_FLAG_ASSIGNED)
461 return (0);
462 return (1);
463 }
464
465 /*
466 * Free zero copy buffers at request of descriptor.
467 */
468 void
469 bpf_zerocopy_free(struct bpf_d *d)
470 {
471 struct zbuf *zb;
472
473 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
474 ("bpf_zerocopy_free: not in zbuf mode"));
475
476 zb = (struct zbuf *)d->bd_sbuf;
477 if (zb != NULL)
478 zbuf_free(zb);
479 zb = (struct zbuf *)d->bd_hbuf;
480 if (zb != NULL)
481 zbuf_free(zb);
482 zb = (struct zbuf *)d->bd_fbuf;
483 if (zb != NULL)
484 zbuf_free(zb);
485 }
486
487 /*
488 * Ioctl to return the maximum buffer size.
489 */
490 int
491 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
492 {
493
494 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
495 ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
496
497 *i = BPF_MAX_PAGES * PAGE_SIZE;
498 return (0);
499 }
500
501 /*
502 * Ioctl to force rotation of the two buffers, if there's any data available.
503 * This can be used by user space to implement timeouts when waiting for a
504 * buffer to fill.
505 */
506 int
507 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
508 struct bpf_zbuf *bz)
509 {
510 struct zbuf *bzh;
511
512 bzero(bz, sizeof(*bz));
513 BPFD_LOCK(d);
514 if (d->bd_hbuf == NULL && d->bd_slen != 0) {
515 ROTATE_BUFFERS(d);
516 bzh = (struct zbuf *)d->bd_hbuf;
517 bz->bz_bufa = (void *)bzh->zb_uaddr;
518 bz->bz_buflen = d->bd_hlen;
519 }
520 BPFD_UNLOCK(d);
521 return (0);
522 }
523
524 /*
525 * Ioctl to configure zero-copy buffers -- may be done only once.
526 */
527 int
528 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
529 struct bpf_zbuf *bz)
530 {
531 struct zbuf *zba, *zbb;
532 int error;
533
534 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
535 ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
536
537 /*
538 * Must set both buffers. Cannot clear them.
539 */
540 if (bz->bz_bufa == NULL || bz->bz_bufb == NULL)
541 return (EINVAL);
542
543 /*
544 * Buffers must have a size greater than 0. Alignment and other size
545 * validity checking is done in zbuf_setup().
546 */
547 if (bz->bz_buflen == 0)
548 return (EINVAL);
549
550 /*
551 * Allocate new buffers.
552 */
553 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen,
554 &zba);
555 if (error)
556 return (error);
557 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen,
558 &zbb);
559 if (error) {
560 zbuf_free(zba);
561 return (error);
562 }
563
564 /*
565 * We only allow buffers to be installed once, so atomically check
566 * that no buffers are currently installed and install new buffers.
567 */
568 BPFD_LOCK(d);
569 if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL ||
570 d->bd_bif != NULL) {
571 BPFD_UNLOCK(d);
572 zbuf_free(zba);
573 zbuf_free(zbb);
574 return (EINVAL);
575 }
576
577 /*
578 * Point BPF descriptor at buffers; initialize sbuf as zba so that
579 * it is always filled first in the sequence, per bpf(4).
580 */
581 d->bd_fbuf = (caddr_t)zbb;
582 d->bd_sbuf = (caddr_t)zba;
583 d->bd_slen = 0;
584 d->bd_hlen = 0;
585
586 /*
587 * We expose only the space left in the buffer after the size of the
588 * shared management region.
589 */
590 d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header);
591 BPFD_UNLOCK(d);
592 return (0);
593 }
Cache object: d4c95f39d972538dad9f9c8e12d90d7f
|