FreeBSD/Linux Kernel Cross Reference
sys/crypto/cipher.c
1 /*
2 * Cryptographic API.
3 *
4 * Cipher operations.
5 *
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 * Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15 #include <linux/kernel.h>
16 #include <linux/crypto.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <asm/scatterlist.h>
23 #include "internal.h"
24
25 typedef void (cryptfn_t)(void *, u8 *, const u8 *);
26 typedef void (procfn_t)(struct crypto_tfm *, u8 *,
27 u8*, cryptfn_t, int enc, void *);
28
29 struct scatter_walk {
30 struct scatterlist *sg;
31 struct page *page;
32 void *data;
33 unsigned int len_this_page;
34 unsigned int len_this_segment;
35 unsigned int offset;
36 };
37
38 enum km_type crypto_km_types[] = {
39 KM_USER0,
40 KM_USER1,
41 KM_SOFTIRQ0,
42 KM_SOFTIRQ1,
43 };
44
45 static inline void xor_64(u8 *a, const u8 *b)
46 {
47 ((u32 *)a)[0] ^= ((u32 *)b)[0];
48 ((u32 *)a)[1] ^= ((u32 *)b)[1];
49 }
50
51 static inline void xor_128(u8 *a, const u8 *b)
52 {
53 ((u32 *)a)[0] ^= ((u32 *)b)[0];
54 ((u32 *)a)[1] ^= ((u32 *)b)[1];
55 ((u32 *)a)[2] ^= ((u32 *)b)[2];
56 ((u32 *)a)[3] ^= ((u32 *)b)[3];
57 }
58
59
60 /* Define sg_next is an inline routine now in case we want to change
61 scatterlist to a linked list later. */
62 static inline struct scatterlist *sg_next(struct scatterlist *sg)
63 {
64 return sg + 1;
65 }
66
67 void *which_buf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
68 {
69 if (nbytes <= walk->len_this_page &&
70 (((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
71 PAGE_CACHE_SIZE)
72 return walk->data;
73 else
74 return scratch;
75 }
76
77 static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
78 {
79 if (out)
80 memcpy(sgdata, buf, nbytes);
81 else
82 memcpy(buf, sgdata, nbytes);
83 }
84
85 static void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
86 {
87 unsigned int rest_of_page;
88
89 walk->sg = sg;
90
91 walk->page = sg->page;
92 walk->len_this_segment = sg->length;
93
94 rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
95 walk->len_this_page = min(sg->length, rest_of_page);
96 walk->offset = sg->offset;
97 }
98
99 static void scatterwalk_map(struct scatter_walk *walk, int out)
100 {
101 walk->data = crypto_kmap(walk->page, out) + walk->offset;
102 }
103
104 static void scatter_page_done(struct scatter_walk *walk, int out,
105 unsigned int more)
106 {
107 /* walk->data may be pointing the first byte of the next page;
108 however, we know we transfered at least one byte. So,
109 walk->data - 1 will be a virutual address in the mapped page. */
110
111 if (out)
112 flush_dcache_page(walk->page);
113
114 if (more) {
115 walk->len_this_segment -= walk->len_this_page;
116
117 if (walk->len_this_segment) {
118 walk->page++;
119 walk->len_this_page = min(walk->len_this_segment,
120 (unsigned)PAGE_CACHE_SIZE);
121 walk->offset = 0;
122 }
123 else
124 scatterwalk_start(walk, sg_next(walk->sg));
125 }
126 }
127
128 static void scatter_done(struct scatter_walk *walk, int out, int more)
129 {
130 crypto_kunmap(walk->data, out);
131 if (walk->len_this_page == 0 || !more)
132 scatter_page_done(walk, out, more);
133 }
134
135 /*
136 * Do not call this unless the total length of all of the fragments
137 * has been verified as multiple of the block size.
138 */
139 static int copy_chunks(void *buf, struct scatter_walk *walk,
140 size_t nbytes, int out)
141 {
142 if (buf != walk->data) {
143 while (nbytes > walk->len_this_page) {
144 memcpy_dir(buf, walk->data, walk->len_this_page, out);
145 buf += walk->len_this_page;
146 nbytes -= walk->len_this_page;
147
148 crypto_kunmap(walk->data, out);
149 scatter_page_done(walk, out, 1);
150 scatterwalk_map(walk, out);
151 }
152
153 memcpy_dir(buf, walk->data, nbytes, out);
154 }
155
156 walk->offset += nbytes;
157 walk->len_this_page -= nbytes;
158 walk->len_this_segment -= nbytes;
159 return 0;
160 }
161
162 /*
163 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
164 * multiple page boundaries by using temporary blocks. In user context,
165 * the kernel is given a chance to schedule us once per block.
166 */
167 static int crypt(struct crypto_tfm *tfm,
168 struct scatterlist *dst,
169 struct scatterlist *src,
170 unsigned int nbytes, cryptfn_t crfn,
171 procfn_t prfn, int enc, void *info)
172 {
173 struct scatter_walk walk_in, walk_out;
174 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
175 u8 tmp_src[nbytes > src->length ? bsize : 0];
176 u8 tmp_dst[nbytes > dst->length ? bsize : 0];
177
178 if (!nbytes)
179 return 0;
180
181 if (nbytes % bsize) {
182 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
183 return -EINVAL;
184 }
185
186 scatterwalk_start(&walk_in, src);
187 scatterwalk_start(&walk_out, dst);
188
189 for(;;) {
190 u8 *src_p, *dst_p;
191
192 scatterwalk_map(&walk_in, 0);
193 scatterwalk_map(&walk_out, 1);
194 src_p = which_buf(&walk_in, bsize, tmp_src);
195 dst_p = which_buf(&walk_out, bsize, tmp_dst);
196
197 nbytes -= bsize;
198
199 copy_chunks(src_p, &walk_in, bsize, 0);
200
201 prfn(tfm, dst_p, src_p, crfn, enc, info);
202
203 scatter_done(&walk_in, 0, nbytes);
204
205 copy_chunks(dst_p, &walk_out, bsize, 1);
206 scatter_done(&walk_out, 1, nbytes);
207
208 if (!nbytes)
209 return 0;
210
211 crypto_yield(tfm);
212 }
213 }
214
215 static void cbc_process(struct crypto_tfm *tfm,
216 u8 *dst, u8 *src, cryptfn_t fn, int enc, void *info)
217 {
218 u8 *iv = info;
219
220 /* Null encryption */
221 if (!iv)
222 return;
223
224 if (enc) {
225 tfm->crt_u.cipher.cit_xor_block(iv, src);
226 fn(crypto_tfm_ctx(tfm), dst, iv);
227 memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
228 } else {
229 const int need_stack = (src == dst);
230 u8 stack[need_stack ? crypto_tfm_alg_blocksize(tfm) : 0];
231 u8 *buf = need_stack ? stack : dst;
232
233 fn(crypto_tfm_ctx(tfm), buf, src);
234 tfm->crt_u.cipher.cit_xor_block(buf, iv);
235 memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
236 if (buf != dst)
237 memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
238 }
239 }
240
241 static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
242 cryptfn_t fn, int enc, void *info)
243 {
244 fn(crypto_tfm_ctx(tfm), dst, src);
245 }
246
247 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
248 {
249 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
250
251 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
252 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
253 return -EINVAL;
254 } else
255 return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
256 &tfm->crt_flags);
257 }
258
259 static int ecb_encrypt(struct crypto_tfm *tfm,
260 struct scatterlist *dst,
261 struct scatterlist *src, unsigned int nbytes)
262 {
263 return crypt(tfm, dst, src, nbytes,
264 tfm->__crt_alg->cra_cipher.cia_encrypt,
265 ecb_process, 1, NULL);
266 }
267
268 static int ecb_decrypt(struct crypto_tfm *tfm,
269 struct scatterlist *dst,
270 struct scatterlist *src,
271 unsigned int nbytes)
272 {
273 return crypt(tfm, dst, src, nbytes,
274 tfm->__crt_alg->cra_cipher.cia_decrypt,
275 ecb_process, 1, NULL);
276 }
277
278 static int cbc_encrypt(struct crypto_tfm *tfm,
279 struct scatterlist *dst,
280 struct scatterlist *src,
281 unsigned int nbytes)
282 {
283 return crypt(tfm, dst, src, nbytes,
284 tfm->__crt_alg->cra_cipher.cia_encrypt,
285 cbc_process, 1, tfm->crt_cipher.cit_iv);
286 }
287
288 static int cbc_encrypt_iv(struct crypto_tfm *tfm,
289 struct scatterlist *dst,
290 struct scatterlist *src,
291 unsigned int nbytes, u8 *iv)
292 {
293 return crypt(tfm, dst, src, nbytes,
294 tfm->__crt_alg->cra_cipher.cia_encrypt,
295 cbc_process, 1, iv);
296 }
297
298 static int cbc_decrypt(struct crypto_tfm *tfm,
299 struct scatterlist *dst,
300 struct scatterlist *src,
301 unsigned int nbytes)
302 {
303 return crypt(tfm, dst, src, nbytes,
304 tfm->__crt_alg->cra_cipher.cia_decrypt,
305 cbc_process, 0, tfm->crt_cipher.cit_iv);
306 }
307
308 static int cbc_decrypt_iv(struct crypto_tfm *tfm,
309 struct scatterlist *dst,
310 struct scatterlist *src,
311 unsigned int nbytes, u8 *iv)
312 {
313 return crypt(tfm, dst, src, nbytes,
314 tfm->__crt_alg->cra_cipher.cia_decrypt,
315 cbc_process, 0, iv);
316 }
317
318 static int nocrypt(struct crypto_tfm *tfm,
319 struct scatterlist *dst,
320 struct scatterlist *src,
321 unsigned int nbytes)
322 {
323 return -ENOSYS;
324 }
325
326 static int nocrypt_iv(struct crypto_tfm *tfm,
327 struct scatterlist *dst,
328 struct scatterlist *src,
329 unsigned int nbytes, u8 *iv)
330 {
331 return -ENOSYS;
332 }
333
334 int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
335 {
336 u32 mode = flags & CRYPTO_TFM_MODE_MASK;
337
338 tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
339 if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
340 tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
341
342 return 0;
343 }
344
345 int crypto_init_cipher_ops(struct crypto_tfm *tfm)
346 {
347 int ret = 0;
348 struct crypto_alg *alg = tfm->__crt_alg;
349 struct cipher_tfm *ops = &tfm->crt_cipher;
350
351 ops->cit_setkey = setkey;
352
353 switch (tfm->crt_cipher.cit_mode) {
354 case CRYPTO_TFM_MODE_ECB:
355 ops->cit_encrypt = ecb_encrypt;
356 ops->cit_decrypt = ecb_decrypt;
357 break;
358
359 case CRYPTO_TFM_MODE_CBC:
360 ops->cit_encrypt = cbc_encrypt;
361 ops->cit_decrypt = cbc_decrypt;
362 ops->cit_encrypt_iv = cbc_encrypt_iv;
363 ops->cit_decrypt_iv = cbc_decrypt_iv;
364 break;
365
366 case CRYPTO_TFM_MODE_CFB:
367 ops->cit_encrypt = nocrypt;
368 ops->cit_decrypt = nocrypt;
369 ops->cit_encrypt_iv = nocrypt_iv;
370 ops->cit_decrypt_iv = nocrypt_iv;
371 break;
372
373 case CRYPTO_TFM_MODE_CTR:
374 ops->cit_encrypt = nocrypt;
375 ops->cit_decrypt = nocrypt;
376 ops->cit_encrypt_iv = nocrypt_iv;
377 ops->cit_decrypt_iv = nocrypt_iv;
378 break;
379
380 default:
381 BUG();
382 }
383
384 if (alg->cra_cipher.cia_ivsize &&
385 ops->cit_mode != CRYPTO_TFM_MODE_ECB) {
386
387 switch (crypto_tfm_alg_blocksize(tfm)) {
388 case 8:
389 ops->cit_xor_block = xor_64;
390 break;
391
392 case 16:
393 ops->cit_xor_block = xor_128;
394 break;
395
396 default:
397 printk(KERN_WARNING "%s: block size %u not supported\n",
398 crypto_tfm_alg_name(tfm),
399 crypto_tfm_alg_blocksize(tfm));
400 ret = -EINVAL;
401 goto out;
402 }
403
404 ops->cit_iv = kmalloc(alg->cra_cipher.cia_ivsize, GFP_KERNEL);
405 if (ops->cit_iv == NULL)
406 ret = -ENOMEM;
407 }
408
409 out:
410 return ret;
411 }
412
413 void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
414 {
415 if (tfm->crt_cipher.cit_iv)
416 kfree(tfm->crt_cipher.cit_iv);
417 }
Cache object: fbe172cf65939f98d0268ac4232e8c75
|