1 /* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
2
3 /*-
4 * Copyright (c) 1999 Theo de Raadt
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/proc.h>
36 #include <sys/errno.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/mbuf.h>
40 #include <sys/uio.h>
41 #include <sys/limits.h>
42 #include <sys/lock.h>
43 #include <sys/sdt.h>
44
45 #include <machine/vmparam.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <vm/pmap.h>
50
51 #include <opencrypto/cryptodev.h>
52
53 SDT_PROVIDER_DECLARE(opencrypto);
54
55 /*
56 * These macros are only for avoiding code duplication, as we need to skip
57 * given number of bytes in the same way in several functions below.
58 */
59 #define CUIO_SKIP() do { \
60 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
61 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
62 while (off > 0) { \
63 KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
64 if (off < iov->iov_len) \
65 break; \
66 off -= iov->iov_len; \
67 iol--; \
68 iov++; \
69 } \
70 } while (0)
71
72 #define CVM_PAGE_SKIP() do { \
73 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
74 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
75 while (off > 0) { \
76 if (off < PAGE_SIZE) \
77 break; \
78 processed += PAGE_SIZE - off; \
79 off -= PAGE_SIZE - off; \
80 pages++; \
81 } \
82 } while (0)
83
84 static void
85 cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
86 {
87 struct iovec *iov = uio->uio_iov;
88 int iol __diagused = uio->uio_iovcnt;
89 unsigned count;
90
91 CUIO_SKIP();
92 while (len > 0) {
93 KASSERT(iol >= 0, ("%s: empty", __func__));
94 count = min(iov->iov_len - off, len);
95 bcopy(((caddr_t)iov->iov_base) + off, cp, count);
96 len -= count;
97 cp += count;
98 off = 0;
99 iol--;
100 iov++;
101 }
102 }
103
104 static void
105 cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp)
106 {
107 struct iovec *iov = uio->uio_iov;
108 int iol __diagused = uio->uio_iovcnt;
109 unsigned count;
110
111 CUIO_SKIP();
112 while (len > 0) {
113 KASSERT(iol >= 0, ("%s: empty", __func__));
114 count = min(iov->iov_len - off, len);
115 bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
116 len -= count;
117 cp += count;
118 off = 0;
119 iol--;
120 iov++;
121 }
122 }
123
124 /*
125 * Return the index and offset of location in iovec list.
126 */
127 static int
128 cuio_getptr(struct uio *uio, int loc, int *off)
129 {
130 int ind, len;
131
132 ind = 0;
133 while (loc >= 0 && ind < uio->uio_iovcnt) {
134 len = uio->uio_iov[ind].iov_len;
135 if (len > loc) {
136 *off = loc;
137 return (ind);
138 }
139 loc -= len;
140 ind++;
141 }
142
143 if (ind > 0 && loc == 0) {
144 ind--;
145 *off = uio->uio_iov[ind].iov_len;
146 return (ind);
147 }
148
149 return (-1);
150 }
151
152 #if CRYPTO_MAY_HAVE_VMPAGE
153 /*
154 * Apply function f to the data in a vm_page_t list starting "off" bytes from
155 * the beginning, continuing for "len" bytes.
156 */
157 static int
158 cvm_page_apply(vm_page_t *pages, int off, int len,
159 int (*f)(void *, const void *, u_int), void *arg)
160 {
161 int processed __unused;
162 unsigned count;
163 int rval;
164
165 processed = 0;
166 CVM_PAGE_SKIP();
167 while (len > 0) {
168 char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages));
169 count = min(PAGE_SIZE - off, len);
170 rval = (*f)(arg, kaddr + off, count);
171 if (rval)
172 return (rval);
173 len -= count;
174 processed += count;
175 off = 0;
176 pages++;
177 }
178 return (0);
179 }
180
181 static inline void *
182 cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len)
183 {
184 if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE)
185 return (NULL);
186
187 pages += (skip / PAGE_SIZE);
188 skip -= rounddown(skip, PAGE_SIZE);
189 return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip);
190 }
191
192 /*
193 * Copy len bytes of data from the vm_page_t array, skipping the first off
194 * bytes, into the pointer cp. Return the number of bytes skipped and copied.
195 * Does not verify the length of the array.
196 */
197 static int
198 cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp)
199 {
200 int processed = 0;
201 unsigned count;
202
203 CVM_PAGE_SKIP();
204 while (len > 0) {
205 count = min(PAGE_SIZE - off, len);
206 bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off,
207 count);
208 len -= count;
209 cp += count;
210 processed += count;
211 off = 0;
212 pages++;
213 }
214 return (processed);
215 }
216
217 /*
218 * Copy len bytes of data from the pointer cp into the vm_page_t array,
219 * skipping the first off bytes, Return the number of bytes skipped and copied.
220 * Does not verify the length of the array.
221 */
222 static int
223 cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp)
224 {
225 int processed = 0;
226 unsigned count;
227
228 CVM_PAGE_SKIP();
229 while (len > 0) {
230 count = min(PAGE_SIZE - off, len);
231 bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp,
232 count);
233 len -= count;
234 cp += count;
235 processed += count;
236 off = 0;
237 pages++;
238 }
239 return processed;
240 }
241 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
242
243 /*
244 * Given a starting page in an m_epg, determine the length of the
245 * current physically contiguous segment.
246 */
247 static __inline size_t
248 m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen)
249 {
250 size_t len;
251 u_int i;
252
253 len = pglen;
254 for (i = idx + 1; i < m->m_epg_npgs; i++) {
255 if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i])
256 break;
257 len += m_epg_pagelen(m, i, 0);
258 }
259 return (len);
260 }
261
262 static void *
263 m_epg_segment(struct mbuf *m, size_t offset, size_t *len)
264 {
265 u_int i, pglen, pgoff;
266
267 offset += mtod(m, vm_offset_t);
268 if (offset < m->m_epg_hdrlen) {
269 *len = m->m_epg_hdrlen - offset;
270 return (m->m_epg_hdr + offset);
271 }
272 offset -= m->m_epg_hdrlen;
273 pgoff = m->m_epg_1st_off;
274 for (i = 0; i < m->m_epg_npgs; i++) {
275 pglen = m_epg_pagelen(m, i, pgoff);
276 if (offset < pglen) {
277 *len = m_epg_pages_extent(m, i, pglen) - offset;
278 return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff +
279 offset));
280 }
281 offset -= pglen;
282 pgoff = 0;
283 }
284 KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer",
285 __func__));
286 *len = m->m_epg_trllen - offset;
287 return (m->m_epg_trail + offset);
288 }
289
290 static __inline void *
291 m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
292 {
293 void *base;
294 size_t seglen;
295
296 base = m_epg_segment(m, skip, &seglen);
297 if (len > seglen)
298 return (NULL);
299 return (base);
300 }
301
302 void
303 crypto_cursor_init(struct crypto_buffer_cursor *cc,
304 const struct crypto_buffer *cb)
305 {
306 memset(cc, 0, sizeof(*cc));
307 cc->cc_type = cb->cb_type;
308 switch (cc->cc_type) {
309 case CRYPTO_BUF_CONTIG:
310 cc->cc_buf = cb->cb_buf;
311 cc->cc_buf_len = cb->cb_buf_len;
312 break;
313 case CRYPTO_BUF_MBUF:
314 case CRYPTO_BUF_SINGLE_MBUF:
315 cc->cc_mbuf = cb->cb_mbuf;
316 break;
317 case CRYPTO_BUF_VMPAGE:
318 cc->cc_vmpage = cb->cb_vm_page;
319 cc->cc_buf_len = cb->cb_vm_page_len;
320 cc->cc_offset = cb->cb_vm_page_offset;
321 break;
322 case CRYPTO_BUF_UIO:
323 cc->cc_iov = cb->cb_uio->uio_iov;
324 break;
325 default:
326 #ifdef INVARIANTS
327 panic("%s: invalid buffer type %d", __func__, cb->cb_type);
328 #endif
329 break;
330 }
331 }
332
333 SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t");
334
335 void
336 crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount)
337 {
338 size_t remain;
339
340 switch (cc->cc_type) {
341 case CRYPTO_BUF_CONTIG:
342 MPASS(cc->cc_buf_len >= amount);
343 cc->cc_buf += amount;
344 cc->cc_buf_len -= amount;
345 break;
346 case CRYPTO_BUF_MBUF:
347 for (;;) {
348 remain = cc->cc_mbuf->m_len - cc->cc_offset;
349 if (amount < remain) {
350 cc->cc_offset += amount;
351 break;
352 }
353 amount -= remain;
354 cc->cc_mbuf = cc->cc_mbuf->m_next;
355 cc->cc_offset = 0;
356 if (amount == 0)
357 break;
358 }
359 break;
360 case CRYPTO_BUF_SINGLE_MBUF:
361 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount);
362 cc->cc_offset += amount;
363 break;
364 case CRYPTO_BUF_VMPAGE:
365 for (;;) {
366 SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage,
367 cc, amount);
368 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
369 if (amount < remain) {
370 cc->cc_buf_len -= amount;
371 cc->cc_offset += amount;
372 break;
373 }
374 cc->cc_buf_len -= remain;
375 amount -= remain;
376 cc->cc_vmpage++;
377 cc->cc_offset = 0;
378 if (amount == 0 || cc->cc_buf_len == 0)
379 break;
380 }
381 break;
382 case CRYPTO_BUF_UIO:
383 for (;;) {
384 remain = cc->cc_iov->iov_len - cc->cc_offset;
385 if (amount < remain) {
386 cc->cc_offset += amount;
387 break;
388 }
389 amount -= remain;
390 cc->cc_iov++;
391 cc->cc_offset = 0;
392 if (amount == 0)
393 break;
394 }
395 break;
396 default:
397 #ifdef INVARIANTS
398 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
399 #endif
400 break;
401 }
402 }
403
404 void *
405 crypto_cursor_segment(struct crypto_buffer_cursor *cc, size_t *len)
406 {
407 switch (cc->cc_type) {
408 case CRYPTO_BUF_CONTIG:
409 *len = cc->cc_buf_len;
410 return (cc->cc_buf);
411 case CRYPTO_BUF_MBUF:
412 case CRYPTO_BUF_SINGLE_MBUF:
413 if (cc->cc_mbuf == NULL) {
414 *len = 0;
415 return (NULL);
416 }
417 if (cc->cc_mbuf->m_flags & M_EXTPG)
418 return (m_epg_segment(cc->cc_mbuf, cc->cc_offset, len));
419 *len = cc->cc_mbuf->m_len - cc->cc_offset;
420 return (mtod(cc->cc_mbuf, char *) + cc->cc_offset);
421 case CRYPTO_BUF_VMPAGE:
422 *len = PAGE_SIZE - cc->cc_offset;
423 return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
424 *cc->cc_vmpage)) + cc->cc_offset);
425 case CRYPTO_BUF_UIO:
426 *len = cc->cc_iov->iov_len - cc->cc_offset;
427 return ((char *)cc->cc_iov->iov_base + cc->cc_offset);
428 default:
429 #ifdef INVARIANTS
430 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
431 #endif
432 *len = 0;
433 return (NULL);
434 }
435 }
436
437 void
438 crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size,
439 const void *vsrc)
440 {
441 size_t remain, todo;
442 const char *src;
443 char *dst;
444
445 src = vsrc;
446 switch (cc->cc_type) {
447 case CRYPTO_BUF_CONTIG:
448 MPASS(cc->cc_buf_len >= size);
449 memcpy(cc->cc_buf, src, size);
450 cc->cc_buf += size;
451 cc->cc_buf_len -= size;
452 break;
453 case CRYPTO_BUF_MBUF:
454 for (;;) {
455 /*
456 * This uses m_copyback() for individual
457 * mbufs so that cc_mbuf and cc_offset are
458 * updated.
459 */
460 remain = cc->cc_mbuf->m_len - cc->cc_offset;
461 todo = MIN(remain, size);
462 m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src);
463 src += todo;
464 if (todo < remain) {
465 cc->cc_offset += todo;
466 break;
467 }
468 size -= todo;
469 cc->cc_mbuf = cc->cc_mbuf->m_next;
470 cc->cc_offset = 0;
471 if (size == 0)
472 break;
473 }
474 break;
475 case CRYPTO_BUF_SINGLE_MBUF:
476 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
477 m_copyback(cc->cc_mbuf, cc->cc_offset, size, src);
478 cc->cc_offset += size;
479 break;
480 case CRYPTO_BUF_VMPAGE:
481 for (;;) {
482 dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
483 *cc->cc_vmpage)) + cc->cc_offset;
484 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
485 todo = MIN(remain, size);
486 memcpy(dst, src, todo);
487 src += todo;
488 cc->cc_buf_len -= todo;
489 if (todo < remain) {
490 cc->cc_offset += todo;
491 break;
492 }
493 size -= todo;
494 cc->cc_vmpage++;
495 cc->cc_offset = 0;
496 if (size == 0)
497 break;
498 }
499 break;
500 case CRYPTO_BUF_UIO:
501 for (;;) {
502 dst = (char *)cc->cc_iov->iov_base + cc->cc_offset;
503 remain = cc->cc_iov->iov_len - cc->cc_offset;
504 todo = MIN(remain, size);
505 memcpy(dst, src, todo);
506 src += todo;
507 if (todo < remain) {
508 cc->cc_offset += todo;
509 break;
510 }
511 size -= todo;
512 cc->cc_iov++;
513 cc->cc_offset = 0;
514 if (size == 0)
515 break;
516 }
517 break;
518 default:
519 #ifdef INVARIANTS
520 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
521 #endif
522 break;
523 }
524 }
525
526 void
527 crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst)
528 {
529 size_t remain, todo;
530 const char *src;
531 char *dst;
532
533 dst = vdst;
534 switch (cc->cc_type) {
535 case CRYPTO_BUF_CONTIG:
536 MPASS(cc->cc_buf_len >= size);
537 memcpy(dst, cc->cc_buf, size);
538 cc->cc_buf += size;
539 cc->cc_buf_len -= size;
540 break;
541 case CRYPTO_BUF_MBUF:
542 for (;;) {
543 /*
544 * This uses m_copydata() for individual
545 * mbufs so that cc_mbuf and cc_offset are
546 * updated.
547 */
548 remain = cc->cc_mbuf->m_len - cc->cc_offset;
549 todo = MIN(remain, size);
550 m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst);
551 dst += todo;
552 if (todo < remain) {
553 cc->cc_offset += todo;
554 break;
555 }
556 size -= todo;
557 cc->cc_mbuf = cc->cc_mbuf->m_next;
558 cc->cc_offset = 0;
559 if (size == 0)
560 break;
561 }
562 break;
563 case CRYPTO_BUF_SINGLE_MBUF:
564 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
565 m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst);
566 cc->cc_offset += size;
567 break;
568 case CRYPTO_BUF_VMPAGE:
569 for (;;) {
570 src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
571 *cc->cc_vmpage)) + cc->cc_offset;
572 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
573 todo = MIN(remain, size);
574 memcpy(dst, src, todo);
575 src += todo;
576 cc->cc_buf_len -= todo;
577 if (todo < remain) {
578 cc->cc_offset += todo;
579 break;
580 }
581 size -= todo;
582 cc->cc_vmpage++;
583 cc->cc_offset = 0;
584 if (size == 0)
585 break;
586 }
587 break;
588 case CRYPTO_BUF_UIO:
589 for (;;) {
590 src = (const char *)cc->cc_iov->iov_base +
591 cc->cc_offset;
592 remain = cc->cc_iov->iov_len - cc->cc_offset;
593 todo = MIN(remain, size);
594 memcpy(dst, src, todo);
595 dst += todo;
596 if (todo < remain) {
597 cc->cc_offset += todo;
598 break;
599 }
600 size -= todo;
601 cc->cc_iov++;
602 cc->cc_offset = 0;
603 if (size == 0)
604 break;
605 }
606 break;
607 default:
608 #ifdef INVARIANTS
609 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
610 #endif
611 break;
612 }
613 }
614
615 /*
616 * To avoid advancing 'cursor', make a local copy that gets advanced
617 * instead.
618 */
619 void
620 crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size,
621 void *vdst)
622 {
623 struct crypto_buffer_cursor copy;
624
625 copy = *cc;
626 crypto_cursor_copydata(©, size, vdst);
627 }
628
629 /*
630 * Apply function f to the data in an iovec list starting "off" bytes from
631 * the beginning, continuing for "len" bytes.
632 */
633 static int
634 cuio_apply(struct uio *uio, int off, int len,
635 int (*f)(void *, const void *, u_int), void *arg)
636 {
637 struct iovec *iov = uio->uio_iov;
638 int iol __diagused = uio->uio_iovcnt;
639 unsigned count;
640 int rval;
641
642 CUIO_SKIP();
643 while (len > 0) {
644 KASSERT(iol >= 0, ("%s: empty", __func__));
645 count = min(iov->iov_len - off, len);
646 rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
647 if (rval)
648 return (rval);
649 len -= count;
650 off = 0;
651 iol--;
652 iov++;
653 }
654 return (0);
655 }
656
657 void
658 crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
659 {
660 struct crypto_buffer *cb;
661
662 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE)
663 cb = &crp->crp_obuf;
664 else
665 cb = &crp->crp_buf;
666 switch (cb->cb_type) {
667 case CRYPTO_BUF_MBUF:
668 case CRYPTO_BUF_SINGLE_MBUF:
669 m_copyback(cb->cb_mbuf, off, size, src);
670 break;
671 #if CRYPTO_MAY_HAVE_VMPAGE
672 case CRYPTO_BUF_VMPAGE:
673 MPASS(size <= cb->cb_vm_page_len);
674 MPASS(size + off <=
675 cb->cb_vm_page_len + cb->cb_vm_page_offset);
676 cvm_page_copyback(cb->cb_vm_page,
677 off + cb->cb_vm_page_offset, size, src);
678 break;
679 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
680 case CRYPTO_BUF_UIO:
681 cuio_copyback(cb->cb_uio, off, size, src);
682 break;
683 case CRYPTO_BUF_CONTIG:
684 MPASS(off + size <= cb->cb_buf_len);
685 bcopy(src, cb->cb_buf + off, size);
686 break;
687 default:
688 #ifdef INVARIANTS
689 panic("invalid crp buf type %d", cb->cb_type);
690 #endif
691 break;
692 }
693 }
694
695 void
696 crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
697 {
698
699 switch (crp->crp_buf.cb_type) {
700 case CRYPTO_BUF_MBUF:
701 case CRYPTO_BUF_SINGLE_MBUF:
702 m_copydata(crp->crp_buf.cb_mbuf, off, size, dst);
703 break;
704 #if CRYPTO_MAY_HAVE_VMPAGE
705 case CRYPTO_BUF_VMPAGE:
706 MPASS(size <= crp->crp_buf.cb_vm_page_len);
707 MPASS(size + off <= crp->crp_buf.cb_vm_page_len +
708 crp->crp_buf.cb_vm_page_offset);
709 cvm_page_copydata(crp->crp_buf.cb_vm_page,
710 off + crp->crp_buf.cb_vm_page_offset, size, dst);
711 break;
712 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
713 case CRYPTO_BUF_UIO:
714 cuio_copydata(crp->crp_buf.cb_uio, off, size, dst);
715 break;
716 case CRYPTO_BUF_CONTIG:
717 MPASS(off + size <= crp->crp_buf.cb_buf_len);
718 bcopy(crp->crp_buf.cb_buf + off, dst, size);
719 break;
720 default:
721 #ifdef INVARIANTS
722 panic("invalid crp buf type %d", crp->crp_buf.cb_type);
723 #endif
724 break;
725 }
726 }
727
728 int
729 crypto_apply_buf(struct crypto_buffer *cb, int off, int len,
730 int (*f)(void *, const void *, u_int), void *arg)
731 {
732 int error;
733
734 switch (cb->cb_type) {
735 case CRYPTO_BUF_MBUF:
736 case CRYPTO_BUF_SINGLE_MBUF:
737 error = m_apply(cb->cb_mbuf, off, len,
738 (int (*)(void *, void *, u_int))f, arg);
739 break;
740 case CRYPTO_BUF_UIO:
741 error = cuio_apply(cb->cb_uio, off, len, f, arg);
742 break;
743 #if CRYPTO_MAY_HAVE_VMPAGE
744 case CRYPTO_BUF_VMPAGE:
745 error = cvm_page_apply(cb->cb_vm_page,
746 off + cb->cb_vm_page_offset, len, f, arg);
747 break;
748 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
749 case CRYPTO_BUF_CONTIG:
750 MPASS(off + len <= cb->cb_buf_len);
751 error = (*f)(arg, cb->cb_buf + off, len);
752 break;
753 default:
754 #ifdef INVARIANTS
755 panic("invalid crypto buf type %d", cb->cb_type);
756 #endif
757 error = 0;
758 break;
759 }
760 return (error);
761 }
762
763 int
764 crypto_apply(struct cryptop *crp, int off, int len,
765 int (*f)(void *, const void *, u_int), void *arg)
766 {
767 return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg));
768 }
769
770 static inline void *
771 m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
772 {
773 int rel_off;
774
775 MPASS(skip <= INT_MAX);
776
777 m = m_getptr(m, (int)skip, &rel_off);
778 if (m == NULL)
779 return (NULL);
780
781 MPASS(rel_off >= 0);
782 skip = rel_off;
783 if (skip + len > m->m_len)
784 return (NULL);
785
786 if (m->m_flags & M_EXTPG)
787 return (m_epg_contiguous_subsegment(m, skip, len));
788 return (mtod(m, char*) + skip);
789 }
790
791 static inline void *
792 cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len)
793 {
794 int rel_off, idx;
795
796 MPASS(skip <= INT_MAX);
797 idx = cuio_getptr(uio, (int)skip, &rel_off);
798 if (idx < 0)
799 return (NULL);
800
801 MPASS(rel_off >= 0);
802 skip = rel_off;
803 if (skip + len > uio->uio_iov[idx].iov_len)
804 return (NULL);
805 return ((char *)uio->uio_iov[idx].iov_base + skip);
806 }
807
808 void *
809 crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip,
810 size_t len)
811 {
812
813 switch (cb->cb_type) {
814 case CRYPTO_BUF_MBUF:
815 case CRYPTO_BUF_SINGLE_MBUF:
816 return (m_contiguous_subsegment(cb->cb_mbuf, skip, len));
817 case CRYPTO_BUF_UIO:
818 return (cuio_contiguous_segment(cb->cb_uio, skip, len));
819 #if CRYPTO_MAY_HAVE_VMPAGE
820 case CRYPTO_BUF_VMPAGE:
821 MPASS(skip + len <= cb->cb_vm_page_len);
822 return (cvm_page_contiguous_segment(cb->cb_vm_page,
823 skip + cb->cb_vm_page_offset, len));
824 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
825 case CRYPTO_BUF_CONTIG:
826 MPASS(skip + len <= cb->cb_buf_len);
827 return (cb->cb_buf + skip);
828 default:
829 #ifdef INVARIANTS
830 panic("invalid crp buf type %d", cb->cb_type);
831 #endif
832 return (NULL);
833 }
834 }
835
836 void *
837 crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
838 {
839 return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len));
840 }
Cache object: 276901d67d2e045b30620918def8ac5b
|