1 /*-
2 * Copyright (c) 2007 Seccuris Inc.
3 * All rights reserved.
4 *
5 * This software was developed by Robert N. M. Watson under contract to
6 * Seccuris Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/8.4/sys/net/bpf_zerocopy.c 234970 2012-05-03 16:49:27Z eadler $");
32
33 #include "opt_bpf.h"
34
35 #include <sys/param.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/sf_buf.h>
42 #include <sys/socket.h>
43 #include <sys/uio.h>
44
45 #include <machine/atomic.h>
46
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/bpf_zerocopy.h>
50 #include <net/bpfdesc.h>
51
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_page.h>
57
58 /*
59 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
60 * are mapped into the kernel address space using sf_bufs and used directly
61 * by BPF. Memory is wired since page faults cannot be tolerated in the
62 * contexts where the buffers are copied to (locks held, interrupt context,
63 * etc). Access to shared memory buffers is synchronized using a header on
64 * each buffer, allowing the number of system calls to go to zero as BPF
65 * reaches saturation (buffers filled as fast as they can be drained by the
66 * user process). Full details of the protocol for communicating between the
67 * user process and BPF may be found in bpf(4).
68 */
69
70 /*
71 * Maximum number of pages per buffer. Since all BPF devices use two, the
72 * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of
73 * sf_bufs may be an issue, so do not set this too high. On older systems,
74 * kernel address space limits may also be an issue.
75 */
76 #define BPF_MAX_PAGES 512
77
78 /*
79 * struct zbuf describes a memory buffer loaned by a user process to the
80 * kernel. We represent this as a series of pages managed using an array of
81 * sf_bufs. Even though the memory is contiguous in user space, it may not
82 * be mapped contiguously in the kernel (i.e., a set of physically
83 * non-contiguous pages in the direct map region) so we must implement
84 * scatter-gather copying. One significant mitigating factor is that on
85 * systems with a direct memory map, we can avoid TLB misses.
86 *
87 * At the front of the shared memory region is a bpf_zbuf_header, which
88 * contains shared control data to allow user space and the kernel to
89 * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
90 * knows that the space is not available.
91 */
92 struct zbuf {
93 vm_offset_t zb_uaddr; /* User address at time of setup. */
94 size_t zb_size; /* Size of buffer, incl. header. */
95 u_int zb_numpages; /* Number of pages. */
96 int zb_flags; /* Flags on zbuf. */
97 struct sf_buf **zb_pages; /* Pages themselves. */
98 struct bpf_zbuf_header *zb_header; /* Shared header. */
99 };
100
101 /*
102 * When a buffer has been assigned to userspace, flag it as such, as the
103 * buffer may remain in the store position as a result of the user process
104 * not yet having acknowledged the buffer in the hold position yet.
105 */
106 #define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */
107
108 /*
109 * Release a page we've previously wired.
110 */
111 static void
112 zbuf_page_free(vm_page_t pp)
113 {
114
115 vm_page_lock_queues();
116 vm_page_unwire(pp, 0);
117 if (pp->wire_count == 0 && pp->object == NULL)
118 vm_page_free(pp);
119 vm_page_unlock_queues();
120 }
121
122 /*
123 * Free an sf_buf with attached page.
124 */
125 static void
126 zbuf_sfbuf_free(struct sf_buf *sf)
127 {
128 vm_page_t pp;
129
130 pp = sf_buf_page(sf);
131 sf_buf_free(sf);
132 zbuf_page_free(pp);
133 }
134
135 /*
136 * Free a zbuf, including its page array, sbufs, and pages. Allow partially
137 * allocated zbufs to be freed so that it may be used even during a zbuf
138 * setup.
139 */
140 static void
141 zbuf_free(struct zbuf *zb)
142 {
143 int i;
144
145 for (i = 0; i < zb->zb_numpages; i++) {
146 if (zb->zb_pages[i] != NULL)
147 zbuf_sfbuf_free(zb->zb_pages[i]);
148 }
149 free(zb->zb_pages, M_BPF);
150 free(zb, M_BPF);
151 }
152
153 /*
154 * Given a user pointer to a page of user memory, return an sf_buf for the
155 * page. Because we may be requesting quite a few sf_bufs, prefer failure to
156 * deadlock and use SFB_NOWAIT.
157 */
158 static struct sf_buf *
159 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
160 {
161 struct sf_buf *sf;
162 vm_page_t pp;
163
164 if (vm_fault_quick((caddr_t) uaddr, VM_PROT_READ | VM_PROT_WRITE) <
165 0)
166 return (NULL);
167 pp = pmap_extract_and_hold(map->pmap, uaddr, VM_PROT_READ |
168 VM_PROT_WRITE);
169 if (pp == NULL)
170 return (NULL);
171 vm_page_lock_queues();
172 vm_page_wire(pp);
173 vm_page_unhold(pp);
174 vm_page_unlock_queues();
175 sf = sf_buf_alloc(pp, SFB_NOWAIT);
176 if (sf == NULL) {
177 zbuf_page_free(pp);
178 return (NULL);
179 }
180 return (sf);
181 }
182
183 /*
184 * Create a zbuf describing a range of user address space memory. Validate
185 * page alignment, size requirements, etc.
186 */
187 static int
188 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len,
189 struct zbuf **zbp)
190 {
191 struct zbuf *zb;
192 struct vm_map *map;
193 int error, i;
194
195 *zbp = NULL;
196
197 /*
198 * User address must be page-aligned.
199 */
200 if (uaddr & PAGE_MASK)
201 return (EINVAL);
202
203 /*
204 * Length must be an integer number of full pages.
205 */
206 if (len & PAGE_MASK)
207 return (EINVAL);
208
209 /*
210 * Length must not exceed per-buffer resource limit.
211 */
212 if ((len / PAGE_SIZE) > BPF_MAX_PAGES)
213 return (EINVAL);
214
215 /*
216 * Allocate the buffer and set up each page with is own sf_buf.
217 */
218 error = 0;
219 zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK);
220 zb->zb_uaddr = uaddr;
221 zb->zb_size = len;
222 zb->zb_numpages = len / PAGE_SIZE;
223 zb->zb_pages = malloc(sizeof(struct sf_buf *) *
224 zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK);
225 map = &td->td_proc->p_vmspace->vm_map;
226 for (i = 0; i < zb->zb_numpages; i++) {
227 zb->zb_pages[i] = zbuf_sfbuf_get(map,
228 uaddr + (i * PAGE_SIZE));
229 if (zb->zb_pages[i] == NULL) {
230 error = EFAULT;
231 goto error;
232 }
233 }
234 zb->zb_header =
235 (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]);
236 bzero(zb->zb_header, sizeof(*zb->zb_header));
237 *zbp = zb;
238 return (0);
239
240 error:
241 zbuf_free(zb);
242 return (error);
243 }
244
245 /*
246 * Copy bytes from a source into the specified zbuf. The caller is
247 * responsible for performing bounds checking, etc.
248 */
249 void
250 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
251 void *src, u_int len)
252 {
253 u_int count, page, poffset;
254 u_char *src_bytes;
255 struct zbuf *zb;
256
257 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
258 ("bpf_zerocopy_append_bytes: not in zbuf mode"));
259 KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf"));
260
261 src_bytes = (u_char *)src;
262 zb = (struct zbuf *)buf;
263
264 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
265 ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
266
267 /*
268 * Scatter-gather copy to user pages mapped into kernel address space
269 * using sf_bufs: copy up to a page at a time.
270 */
271 offset += sizeof(struct bpf_zbuf_header);
272 page = offset / PAGE_SIZE;
273 poffset = offset % PAGE_SIZE;
274 while (len > 0) {
275 KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:"
276 " page overflow (%d p %d np)\n", page, zb->zb_numpages));
277
278 count = min(len, PAGE_SIZE - poffset);
279 bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) +
280 poffset, count);
281 poffset += count;
282 if (poffset == PAGE_SIZE) {
283 poffset = 0;
284 page++;
285 }
286 KASSERT(poffset < PAGE_SIZE,
287 ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
288 poffset));
289 len -= count;
290 src_bytes += count;
291 }
292 }
293
294 /*
295 * Copy bytes from an mbuf chain to the specified zbuf: copying will be
296 * scatter-gather both from mbufs, which may be fragmented over memory, and
297 * to pages, which may not be contiguously mapped in kernel address space.
298 * As with bpf_zerocopy_append_bytes(), the caller is responsible for
299 * checking that this will not exceed the buffer limit.
300 */
301 void
302 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
303 void *src, u_int len)
304 {
305 u_int count, moffset, page, poffset;
306 const struct mbuf *m;
307 struct zbuf *zb;
308
309 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
310 ("bpf_zerocopy_append_mbuf not in zbuf mode"));
311 KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf"));
312
313 m = (struct mbuf *)src;
314 zb = (struct zbuf *)buf;
315
316 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
317 ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
318
319 /*
320 * Scatter gather both from an mbuf chain and to a user page set
321 * mapped into kernel address space using sf_bufs. If we're lucky,
322 * each mbuf requires one copy operation, but if page alignment and
323 * mbuf alignment work out less well, we'll be doing two copies per
324 * mbuf.
325 */
326 offset += sizeof(struct bpf_zbuf_header);
327 page = offset / PAGE_SIZE;
328 poffset = offset % PAGE_SIZE;
329 moffset = 0;
330 while (len > 0) {
331 KASSERT(page < zb->zb_numpages,
332 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
333 "np)\n", page, zb->zb_numpages));
334 KASSERT(m != NULL,
335 ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
336
337 count = min(m->m_len - moffset, len);
338 count = min(count, PAGE_SIZE - poffset);
339 bcopy(mtod(m, u_char *) + moffset,
340 ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset,
341 count);
342 poffset += count;
343 if (poffset == PAGE_SIZE) {
344 poffset = 0;
345 page++;
346 }
347 KASSERT(poffset < PAGE_SIZE,
348 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
349 poffset));
350 moffset += count;
351 if (moffset == m->m_len) {
352 m = m->m_next;
353 moffset = 0;
354 }
355 len -= count;
356 }
357 }
358
359 /*
360 * Notification from the BPF framework that a buffer in the store position is
361 * rejecting packets and may be considered full. We mark the buffer as
362 * immutable and assign to userspace so that it is immediately available for
363 * the user process to access.
364 */
365 void
366 bpf_zerocopy_buffull(struct bpf_d *d)
367 {
368 struct zbuf *zb;
369
370 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
371 ("bpf_zerocopy_buffull: not in zbuf mode"));
372
373 zb = (struct zbuf *)d->bd_sbuf;
374 KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
375
376 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
377 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
378 zb->zb_header->bzh_kernel_len = d->bd_slen;
379 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
380 }
381 }
382
383 /*
384 * Notification from the BPF framework that a buffer has moved into the held
385 * slot on a descriptor. Zero-copy BPF will update the shared page to let
386 * the user process know and flag the buffer as assigned if it hasn't already
387 * been marked assigned due to filling while it was in the store position.
388 *
389 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
390 * on bd_hbuf and bd_hlen.
391 */
392 void
393 bpf_zerocopy_bufheld(struct bpf_d *d)
394 {
395 struct zbuf *zb;
396
397 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
398 ("bpf_zerocopy_bufheld: not in zbuf mode"));
399
400 zb = (struct zbuf *)d->bd_hbuf;
401 KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
402
403 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
404 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
405 zb->zb_header->bzh_kernel_len = d->bd_hlen;
406 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
407 }
408 }
409
410 /*
411 * Notification from the BPF framework that the free buffer has been been
412 * rotated out of the held position to the free position. This happens when
413 * the user acknowledges the held buffer.
414 */
415 void
416 bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
417 {
418 struct zbuf *zb;
419
420 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
421 ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
422
423 KASSERT(d->bd_fbuf != NULL,
424 ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
425 zb = (struct zbuf *)d->bd_fbuf;
426 zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED;
427 }
428
429 /*
430 * Query from the BPF framework regarding whether the buffer currently in the
431 * held position can be moved to the free position, which can be indicated by
432 * the user process making their generation number equal to the kernel
433 * generation number.
434 */
435 int
436 bpf_zerocopy_canfreebuf(struct bpf_d *d)
437 {
438 struct zbuf *zb;
439
440 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
441 ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
442
443 zb = (struct zbuf *)d->bd_hbuf;
444 if (zb == NULL)
445 return (0);
446 if (zb->zb_header->bzh_kernel_gen ==
447 atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
448 return (1);
449 return (0);
450 }
451
452 /*
453 * Query from the BPF framework as to whether or not the buffer current in
454 * the store position can actually be written to. This may return false if
455 * the store buffer is assigned to userspace before the hold buffer is
456 * acknowledged.
457 */
458 int
459 bpf_zerocopy_canwritebuf(struct bpf_d *d)
460 {
461 struct zbuf *zb;
462
463 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
464 ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
465
466 zb = (struct zbuf *)d->bd_sbuf;
467 KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
468
469 if (zb->zb_flags & ZBUF_FLAG_ASSIGNED)
470 return (0);
471 return (1);
472 }
473
474 /*
475 * Free zero copy buffers at request of descriptor.
476 */
477 void
478 bpf_zerocopy_free(struct bpf_d *d)
479 {
480 struct zbuf *zb;
481
482 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
483 ("bpf_zerocopy_free: not in zbuf mode"));
484
485 zb = (struct zbuf *)d->bd_sbuf;
486 if (zb != NULL)
487 zbuf_free(zb);
488 zb = (struct zbuf *)d->bd_hbuf;
489 if (zb != NULL)
490 zbuf_free(zb);
491 zb = (struct zbuf *)d->bd_fbuf;
492 if (zb != NULL)
493 zbuf_free(zb);
494 }
495
496 /*
497 * Ioctl to return the maximum buffer size.
498 */
499 int
500 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
501 {
502
503 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
504 ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
505
506 *i = BPF_MAX_PAGES * PAGE_SIZE;
507 return (0);
508 }
509
510 /*
511 * Ioctl to force rotation of the two buffers, if there's any data available.
512 * This can be used by user space to implement timeouts when waiting for a
513 * buffer to fill.
514 */
515 int
516 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
517 struct bpf_zbuf *bz)
518 {
519 struct zbuf *bzh;
520
521 bzero(bz, sizeof(*bz));
522 BPFD_LOCK(d);
523 if (d->bd_hbuf == NULL && d->bd_slen != 0) {
524 ROTATE_BUFFERS(d);
525 bzh = (struct zbuf *)d->bd_hbuf;
526 bz->bz_bufa = (void *)bzh->zb_uaddr;
527 bz->bz_buflen = d->bd_hlen;
528 }
529 BPFD_UNLOCK(d);
530 return (0);
531 }
532
533 /*
534 * Ioctl to configure zero-copy buffers -- may be done only once.
535 */
536 int
537 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
538 struct bpf_zbuf *bz)
539 {
540 struct zbuf *zba, *zbb;
541 int error;
542
543 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
544 ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
545
546 /*
547 * Must set both buffers. Cannot clear them.
548 */
549 if (bz->bz_bufa == NULL || bz->bz_bufb == NULL)
550 return (EINVAL);
551
552 /*
553 * Buffers must have a size greater than 0. Alignment and other size
554 * validity checking is done in zbuf_setup().
555 */
556 if (bz->bz_buflen == 0)
557 return (EINVAL);
558
559 /*
560 * Allocate new buffers.
561 */
562 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen,
563 &zba);
564 if (error)
565 return (error);
566 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen,
567 &zbb);
568 if (error) {
569 zbuf_free(zba);
570 return (error);
571 }
572
573 /*
574 * We only allow buffers to be installed once, so atomically check
575 * that no buffers are currently installed and install new buffers.
576 */
577 BPFD_LOCK(d);
578 if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL ||
579 d->bd_bif != NULL) {
580 BPFD_UNLOCK(d);
581 zbuf_free(zba);
582 zbuf_free(zbb);
583 return (EINVAL);
584 }
585
586 /*
587 * Point BPF descriptor at buffers; initialize sbuf as zba so that
588 * it is always filled first in the sequence, per bpf(4).
589 */
590 d->bd_fbuf = (caddr_t)zbb;
591 d->bd_sbuf = (caddr_t)zba;
592 d->bd_slen = 0;
593 d->bd_hlen = 0;
594
595 /*
596 * We expose only the space left in the buffer after the size of the
597 * shared management region.
598 */
599 d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header);
600 BPFD_UNLOCK(d);
601 return (0);
602 }
Cache object: 5281b3704ffa4e4e1f332056ed5209e2
|