1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/linker.h>
36 #include <sys/module.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/bio.h>
40 #include <sys/sysctl.h>
41 #include <sys/kthread.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/vnode.h>
46
47 #include <vm/uma.h>
48
49 #include <geom/geom.h>
50 #include <geom/geom_dbg.h>
51 #include <geom/eli/g_eli.h>
52 #include <geom/eli/pkcs5v2.h>
53
54 /*
55 * Code paths:
56 * BIO_READ:
57 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
58 * BIO_WRITE:
59 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
60 */
61
62 /*
63 * Copy data from a (potentially unmapped) bio to a kernelspace buffer.
64 *
65 * The buffer must have at least as much room as bp->bio_length.
66 */
67 static void
68 g_eli_bio_copyin(struct bio *bp, void *kaddr)
69 {
70 struct uio uio;
71 struct iovec iov[1];
72
73 iov[0].iov_base = kaddr;
74 iov[0].iov_len = bp->bio_length;
75 uio.uio_iov = iov;
76 uio.uio_iovcnt = 1;
77 uio.uio_offset = 0;
78 uio.uio_resid = bp->bio_length;
79 uio.uio_segflg = UIO_SYSSPACE;
80 uio.uio_rw = UIO_READ;
81 uiomove_fromphys(bp->bio_ma, bp->bio_ma_offset, bp->bio_length, &uio);
82 }
83
84 /*
85 * The function is called after we read and decrypt data.
86 *
87 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver
88 */
89 static int
90 g_eli_crypto_read_done(struct cryptop *crp)
91 {
92 struct g_eli_softc *sc;
93 struct bio *bp;
94
95 if (crp->crp_etype == EAGAIN) {
96 if (g_eli_crypto_rerun(crp) == 0)
97 return (0);
98 }
99 bp = (struct bio *)crp->crp_opaque;
100 bp->bio_inbed++;
101 if (crp->crp_etype == 0) {
102 G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).",
103 bp->bio_inbed, bp->bio_children);
104 bp->bio_completed += crp->crp_payload_length;
105 } else {
106 G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
107 bp->bio_inbed, bp->bio_children, crp->crp_etype);
108 if (bp->bio_error == 0)
109 bp->bio_error = crp->crp_etype;
110 }
111 sc = bp->bio_to->geom->softc;
112 if (sc != NULL && crp->crp_cipher_key != NULL)
113 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
114 crypto_freereq(crp);
115 /*
116 * Do we have all sectors already?
117 */
118 if (bp->bio_inbed < bp->bio_children)
119 return (0);
120
121 if (bp->bio_error != 0) {
122 G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).",
123 bp->bio_error);
124 bp->bio_completed = 0;
125 }
126 /*
127 * Read is finished, send it up.
128 */
129 g_io_deliver(bp, bp->bio_error);
130 if (sc != NULL)
131 atomic_subtract_int(&sc->sc_inflight, 1);
132 return (0);
133 }
134
135 /*
136 * The function is called after data encryption.
137 *
138 * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
139 */
140 static int
141 g_eli_crypto_write_done(struct cryptop *crp)
142 {
143 struct g_eli_softc *sc;
144 struct g_geom *gp;
145 struct g_consumer *cp;
146 struct bio *bp, *cbp;
147
148 if (crp->crp_etype == EAGAIN) {
149 if (g_eli_crypto_rerun(crp) == 0)
150 return (0);
151 }
152 bp = (struct bio *)crp->crp_opaque;
153 bp->bio_inbed++;
154 if (crp->crp_etype == 0) {
155 G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
156 bp->bio_inbed, bp->bio_children);
157 } else {
158 G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
159 bp->bio_inbed, bp->bio_children, crp->crp_etype);
160 if (bp->bio_error == 0)
161 bp->bio_error = crp->crp_etype;
162 }
163 gp = bp->bio_to->geom;
164 sc = gp->softc;
165 if (crp->crp_cipher_key != NULL)
166 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
167 crypto_freereq(crp);
168 /*
169 * All sectors are already encrypted?
170 */
171 if (bp->bio_inbed < bp->bio_children)
172 return (0);
173 bp->bio_inbed = 0;
174 bp->bio_children = 1;
175 cbp = bp->bio_driver1;
176 bp->bio_driver1 = NULL;
177 if (bp->bio_error != 0) {
178 G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
179 bp->bio_error);
180 g_eli_free_data(bp);
181 g_destroy_bio(cbp);
182 g_io_deliver(bp, bp->bio_error);
183 atomic_subtract_int(&sc->sc_inflight, 1);
184 return (0);
185 }
186 cbp->bio_data = bp->bio_driver2;
187 /*
188 * Clear BIO_UNMAPPED, which was inherited from where we cloned the bio
189 * in g_eli_start, because we manually set bio_data
190 */
191 cbp->bio_flags &= ~BIO_UNMAPPED;
192 cbp->bio_done = g_eli_write_done;
193 cp = LIST_FIRST(&gp->consumer);
194 cbp->bio_to = cp->provider;
195 G_ELI_LOGREQ(2, cbp, "Sending request.");
196 /*
197 * Send encrypted data to the provider.
198 */
199 g_io_request(cbp, cp);
200 return (0);
201 }
202
203 /*
204 * The function is called to read encrypted data.
205 *
206 * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
207 */
208 void
209 g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker)
210 {
211 struct g_consumer *cp;
212 struct bio *cbp;
213
214 if (!fromworker) {
215 /*
216 * We are not called from the worker thread, so check if
217 * device is suspended.
218 */
219 mtx_lock(&sc->sc_queue_mtx);
220 if (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
221 /*
222 * If device is suspended, we place the request onto
223 * the queue, so it can be handled after resume.
224 */
225 G_ELI_DEBUG(0, "device suspended, move onto queue");
226 bioq_insert_tail(&sc->sc_queue, bp);
227 mtx_unlock(&sc->sc_queue_mtx);
228 wakeup(sc);
229 return;
230 }
231 atomic_add_int(&sc->sc_inflight, 1);
232 mtx_unlock(&sc->sc_queue_mtx);
233 }
234 G_ELI_SETWORKER(bp->bio_pflags, 0);
235 bp->bio_driver2 = NULL;
236 cbp = bp->bio_driver1;
237 cbp->bio_done = g_eli_read_done;
238 cp = LIST_FIRST(&sc->sc_geom->consumer);
239 cbp->bio_to = cp->provider;
240 G_ELI_LOGREQ(2, cbp, "Sending request.");
241 /*
242 * Read encrypted data from provider.
243 */
244 g_io_request(cbp, cp);
245 }
246
247 /*
248 * This is the main function responsible for cryptography (ie. communication
249 * with crypto(9) subsystem).
250 *
251 * BIO_READ:
252 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver
253 * BIO_WRITE:
254 * g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
255 */
256 void
257 g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp)
258 {
259 struct g_eli_softc *sc;
260 struct cryptopq crpq;
261 struct cryptop *crp;
262 vm_page_t *pages;
263 u_int i, nsec, secsize;
264 off_t dstoff;
265 u_char *data = NULL;
266 int error __diagused, pages_offset;
267 bool batch;
268
269 G_ELI_LOGREQ(3, bp, "%s", __func__);
270
271 G_ELI_SETWORKER(bp->bio_pflags, wr->w_number);
272 sc = wr->w_softc;
273 secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize;
274 nsec = bp->bio_length / secsize;
275
276 bp->bio_inbed = 0;
277 bp->bio_children = nsec;
278
279 /*
280 * If we write the data we cannot destroy current bio_data content,
281 * so we need to allocate more memory for encrypted data.
282 */
283 if (bp->bio_cmd == BIO_WRITE) {
284 if (!g_eli_alloc_data(bp, bp->bio_length)) {
285 G_ELI_LOGREQ(0, bp, "Crypto request failed (ENOMEM).");
286 if (bp->bio_driver1 != NULL) {
287 g_destroy_bio(bp->bio_driver1);
288 bp->bio_driver1 = NULL;
289 }
290 bp->bio_error = ENOMEM;
291 g_io_deliver(bp, bp->bio_error);
292 if (sc != NULL)
293 atomic_subtract_int(&sc->sc_inflight, 1);
294 return;
295 }
296 data = bp->bio_driver2;
297 /*
298 * This copy could be eliminated by using crypto's output
299 * buffer, instead of using a single overwriting buffer.
300 */
301 if ((bp->bio_flags & BIO_UNMAPPED) != 0)
302 g_eli_bio_copyin(bp, data);
303 else
304 bcopy(bp->bio_data, data, bp->bio_length);
305 } else {
306 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
307 pages = bp->bio_ma;
308 pages_offset = bp->bio_ma_offset;
309 } else {
310 data = bp->bio_data;
311 }
312 }
313
314 TAILQ_INIT(&crpq);
315 batch = atomic_load_int(&g_eli_batch) != 0;
316
317 for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) {
318 crp = crypto_getreq(wr->w_sid, M_WAITOK);
319
320 if (data) {
321 crypto_use_buf(crp, data, secsize);
322 data += secsize;
323 } else {
324 MPASS(pages != NULL);
325 crypto_use_vmpage(crp, pages, secsize, pages_offset);
326 pages_offset += secsize;
327 pages += pages_offset >> PAGE_SHIFT;
328 pages_offset &= PAGE_MASK;
329 }
330 crp->crp_opaque = (void *)bp;
331 if (bp->bio_cmd == BIO_WRITE) {
332 crp->crp_op = CRYPTO_OP_ENCRYPT;
333 crp->crp_callback = g_eli_crypto_write_done;
334 } else /* if (bp->bio_cmd == BIO_READ) */ {
335 crp->crp_op = CRYPTO_OP_DECRYPT;
336 crp->crp_callback = g_eli_crypto_read_done;
337 }
338 crp->crp_flags = CRYPTO_F_CBIFSYNC;
339 crp->crp_payload_start = 0;
340 crp->crp_payload_length = secsize;
341 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) {
342 crp->crp_cipher_key = g_eli_key_hold(sc, dstoff,
343 secsize);
344 }
345 if (g_eli_ivlen(sc->sc_ealgo) != 0) {
346 crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
347 g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv,
348 sizeof(crp->crp_iv));
349 }
350
351 if (batch) {
352 TAILQ_INSERT_TAIL(&crpq, crp, crp_next);
353 } else {
354 error = crypto_dispatch(crp);
355 KASSERT(error == 0,
356 ("crypto_dispatch() failed (error=%d)", error));
357 }
358 }
359
360 if (batch)
361 crypto_dispatch_batch(&crpq, 0);
362 }
Cache object: 8a7f98caad9e7808588db0c4784d7812
|