Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/crypto/aes-ce-glue.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* aes-ce-glue.c - wrapper code for ARMv8 AES
4
*
5
* Copyright (C) 2015 Linaro Ltd <[email protected]>
6
*/
7
8
#include <asm/hwcap.h>
9
#include <asm/neon.h>
10
#include <asm/simd.h>
11
#include <linux/unaligned.h>
12
#include <crypto/aes.h>
13
#include <crypto/internal/skcipher.h>
14
#include <crypto/scatterwalk.h>
15
#include <linux/cpufeature.h>
16
#include <linux/module.h>
17
#include <crypto/xts.h>
18
19
MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
20
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
21
MODULE_LICENSE("GPL v2");
22
23
/* defined in aes-ce-core.S */
24
asmlinkage u32 ce_aes_sub(u32 input);
25
asmlinkage void ce_aes_invert(void *dst, void *src);
26
27
asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
28
int rounds, int blocks);
29
asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
30
int rounds, int blocks);
31
32
asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
33
int rounds, int blocks, u8 iv[]);
34
asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
35
int rounds, int blocks, u8 iv[]);
36
asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
37
int rounds, int bytes, u8 const iv[]);
38
asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
39
int rounds, int bytes, u8 const iv[]);
40
41
asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
42
int rounds, int blocks, u8 ctr[]);
43
44
asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
45
int rounds, int bytes, u8 iv[],
46
u32 const rk2[], int first);
47
asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
48
int rounds, int bytes, u8 iv[],
49
u32 const rk2[], int first);
50
51
struct aes_block {
52
u8 b[AES_BLOCK_SIZE];
53
};
54
55
static int num_rounds(struct crypto_aes_ctx *ctx)
56
{
57
/*
58
* # of rounds specified by AES:
59
* 128 bit key 10 rounds
60
* 192 bit key 12 rounds
61
* 256 bit key 14 rounds
62
* => n byte key => 6 + (n/4) rounds
63
*/
64
return 6 + ctx->key_length / 4;
65
}
66
67
static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
68
unsigned int key_len)
69
{
70
/*
71
* The AES key schedule round constants
72
*/
73
static u8 const rcon[] = {
74
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
75
};
76
77
u32 kwords = key_len / sizeof(u32);
78
struct aes_block *key_enc, *key_dec;
79
int i, j;
80
81
if (key_len != AES_KEYSIZE_128 &&
82
key_len != AES_KEYSIZE_192 &&
83
key_len != AES_KEYSIZE_256)
84
return -EINVAL;
85
86
ctx->key_length = key_len;
87
for (i = 0; i < kwords; i++)
88
ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
89
90
kernel_neon_begin();
91
for (i = 0; i < sizeof(rcon); i++) {
92
u32 *rki = ctx->key_enc + (i * kwords);
93
u32 *rko = rki + kwords;
94
95
rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
96
rko[0] = rko[0] ^ rki[0] ^ rcon[i];
97
rko[1] = rko[0] ^ rki[1];
98
rko[2] = rko[1] ^ rki[2];
99
rko[3] = rko[2] ^ rki[3];
100
101
if (key_len == AES_KEYSIZE_192) {
102
if (i >= 7)
103
break;
104
rko[4] = rko[3] ^ rki[4];
105
rko[5] = rko[4] ^ rki[5];
106
} else if (key_len == AES_KEYSIZE_256) {
107
if (i >= 6)
108
break;
109
rko[4] = ce_aes_sub(rko[3]) ^ rki[4];
110
rko[5] = rko[4] ^ rki[5];
111
rko[6] = rko[5] ^ rki[6];
112
rko[7] = rko[6] ^ rki[7];
113
}
114
}
115
116
/*
117
* Generate the decryption keys for the Equivalent Inverse Cipher.
118
* This involves reversing the order of the round keys, and applying
119
* the Inverse Mix Columns transformation on all but the first and
120
* the last one.
121
*/
122
key_enc = (struct aes_block *)ctx->key_enc;
123
key_dec = (struct aes_block *)ctx->key_dec;
124
j = num_rounds(ctx);
125
126
key_dec[0] = key_enc[j];
127
for (i = 1, j--; j > 0; i++, j--)
128
ce_aes_invert(key_dec + i, key_enc + j);
129
key_dec[i] = key_enc[0];
130
131
kernel_neon_end();
132
return 0;
133
}
134
135
static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
136
unsigned int key_len)
137
{
138
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
139
140
return ce_aes_expandkey(ctx, in_key, key_len);
141
}
142
143
struct crypto_aes_xts_ctx {
144
struct crypto_aes_ctx key1;
145
struct crypto_aes_ctx __aligned(8) key2;
146
};
147
148
static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
149
unsigned int key_len)
150
{
151
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
152
int ret;
153
154
ret = xts_verify_key(tfm, in_key, key_len);
155
if (ret)
156
return ret;
157
158
ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2);
159
if (!ret)
160
ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
161
key_len / 2);
162
return ret;
163
}
164
165
static int ecb_encrypt(struct skcipher_request *req)
166
{
167
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
168
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
169
struct skcipher_walk walk;
170
unsigned int blocks;
171
int err;
172
173
err = skcipher_walk_virt(&walk, req, false);
174
175
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
176
kernel_neon_begin();
177
ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
178
ctx->key_enc, num_rounds(ctx), blocks);
179
kernel_neon_end();
180
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
181
}
182
return err;
183
}
184
185
static int ecb_decrypt(struct skcipher_request *req)
186
{
187
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
188
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
189
struct skcipher_walk walk;
190
unsigned int blocks;
191
int err;
192
193
err = skcipher_walk_virt(&walk, req, false);
194
195
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
196
kernel_neon_begin();
197
ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
198
ctx->key_dec, num_rounds(ctx), blocks);
199
kernel_neon_end();
200
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
201
}
202
return err;
203
}
204
205
static int cbc_encrypt_walk(struct skcipher_request *req,
206
struct skcipher_walk *walk)
207
{
208
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
209
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
210
unsigned int blocks;
211
int err = 0;
212
213
while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
214
kernel_neon_begin();
215
ce_aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
216
ctx->key_enc, num_rounds(ctx), blocks,
217
walk->iv);
218
kernel_neon_end();
219
err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
220
}
221
return err;
222
}
223
224
static int cbc_encrypt(struct skcipher_request *req)
225
{
226
struct skcipher_walk walk;
227
int err;
228
229
err = skcipher_walk_virt(&walk, req, false);
230
if (err)
231
return err;
232
return cbc_encrypt_walk(req, &walk);
233
}
234
235
static int cbc_decrypt_walk(struct skcipher_request *req,
236
struct skcipher_walk *walk)
237
{
238
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
239
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
240
unsigned int blocks;
241
int err = 0;
242
243
while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
244
kernel_neon_begin();
245
ce_aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
246
ctx->key_dec, num_rounds(ctx), blocks,
247
walk->iv);
248
kernel_neon_end();
249
err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
250
}
251
return err;
252
}
253
254
static int cbc_decrypt(struct skcipher_request *req)
255
{
256
struct skcipher_walk walk;
257
int err;
258
259
err = skcipher_walk_virt(&walk, req, false);
260
if (err)
261
return err;
262
return cbc_decrypt_walk(req, &walk);
263
}
264
265
static int cts_cbc_encrypt(struct skcipher_request *req)
266
{
267
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
268
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
269
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
270
struct scatterlist *src = req->src, *dst = req->dst;
271
struct scatterlist sg_src[2], sg_dst[2];
272
struct skcipher_request subreq;
273
struct skcipher_walk walk;
274
int err;
275
276
skcipher_request_set_tfm(&subreq, tfm);
277
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
278
NULL, NULL);
279
280
if (req->cryptlen <= AES_BLOCK_SIZE) {
281
if (req->cryptlen < AES_BLOCK_SIZE)
282
return -EINVAL;
283
cbc_blocks = 1;
284
}
285
286
if (cbc_blocks > 0) {
287
skcipher_request_set_crypt(&subreq, req->src, req->dst,
288
cbc_blocks * AES_BLOCK_SIZE,
289
req->iv);
290
291
err = skcipher_walk_virt(&walk, &subreq, false) ?:
292
cbc_encrypt_walk(&subreq, &walk);
293
if (err)
294
return err;
295
296
if (req->cryptlen == AES_BLOCK_SIZE)
297
return 0;
298
299
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
300
if (req->dst != req->src)
301
dst = scatterwalk_ffwd(sg_dst, req->dst,
302
subreq.cryptlen);
303
}
304
305
/* handle ciphertext stealing */
306
skcipher_request_set_crypt(&subreq, src, dst,
307
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
308
req->iv);
309
310
err = skcipher_walk_virt(&walk, &subreq, false);
311
if (err)
312
return err;
313
314
kernel_neon_begin();
315
ce_aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
316
ctx->key_enc, num_rounds(ctx), walk.nbytes,
317
walk.iv);
318
kernel_neon_end();
319
320
return skcipher_walk_done(&walk, 0);
321
}
322
323
static int cts_cbc_decrypt(struct skcipher_request *req)
324
{
325
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
326
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
327
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
328
struct scatterlist *src = req->src, *dst = req->dst;
329
struct scatterlist sg_src[2], sg_dst[2];
330
struct skcipher_request subreq;
331
struct skcipher_walk walk;
332
int err;
333
334
skcipher_request_set_tfm(&subreq, tfm);
335
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
336
NULL, NULL);
337
338
if (req->cryptlen <= AES_BLOCK_SIZE) {
339
if (req->cryptlen < AES_BLOCK_SIZE)
340
return -EINVAL;
341
cbc_blocks = 1;
342
}
343
344
if (cbc_blocks > 0) {
345
skcipher_request_set_crypt(&subreq, req->src, req->dst,
346
cbc_blocks * AES_BLOCK_SIZE,
347
req->iv);
348
349
err = skcipher_walk_virt(&walk, &subreq, false) ?:
350
cbc_decrypt_walk(&subreq, &walk);
351
if (err)
352
return err;
353
354
if (req->cryptlen == AES_BLOCK_SIZE)
355
return 0;
356
357
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
358
if (req->dst != req->src)
359
dst = scatterwalk_ffwd(sg_dst, req->dst,
360
subreq.cryptlen);
361
}
362
363
/* handle ciphertext stealing */
364
skcipher_request_set_crypt(&subreq, src, dst,
365
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
366
req->iv);
367
368
err = skcipher_walk_virt(&walk, &subreq, false);
369
if (err)
370
return err;
371
372
kernel_neon_begin();
373
ce_aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
374
ctx->key_dec, num_rounds(ctx), walk.nbytes,
375
walk.iv);
376
kernel_neon_end();
377
378
return skcipher_walk_done(&walk, 0);
379
}
380
381
static int ctr_encrypt(struct skcipher_request *req)
382
{
383
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
384
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
385
struct skcipher_walk walk;
386
int err, blocks;
387
388
err = skcipher_walk_virt(&walk, req, false);
389
390
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
391
kernel_neon_begin();
392
ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
393
ctx->key_enc, num_rounds(ctx), blocks,
394
walk.iv);
395
kernel_neon_end();
396
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
397
}
398
if (walk.nbytes) {
399
u8 __aligned(8) tail[AES_BLOCK_SIZE];
400
const u8 *tsrc = walk.src.virt.addr;
401
unsigned int nbytes = walk.nbytes;
402
u8 *tdst = walk.dst.virt.addr;
403
404
/*
405
* Tell aes_ctr_encrypt() to process a tail block.
406
*/
407
blocks = -1;
408
409
kernel_neon_begin();
410
ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx),
411
blocks, walk.iv);
412
kernel_neon_end();
413
crypto_xor_cpy(tdst, tsrc, tail, nbytes);
414
err = skcipher_walk_done(&walk, 0);
415
}
416
return err;
417
}
418
419
static int xts_encrypt(struct skcipher_request *req)
420
{
421
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
422
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
423
int err, first, rounds = num_rounds(&ctx->key1);
424
int tail = req->cryptlen % AES_BLOCK_SIZE;
425
struct scatterlist sg_src[2], sg_dst[2];
426
struct skcipher_request subreq;
427
struct scatterlist *src, *dst;
428
struct skcipher_walk walk;
429
430
if (req->cryptlen < AES_BLOCK_SIZE)
431
return -EINVAL;
432
433
err = skcipher_walk_virt(&walk, req, false);
434
435
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
436
int xts_blocks = DIV_ROUND_UP(req->cryptlen,
437
AES_BLOCK_SIZE) - 2;
438
439
skcipher_walk_abort(&walk);
440
441
skcipher_request_set_tfm(&subreq, tfm);
442
skcipher_request_set_callback(&subreq,
443
skcipher_request_flags(req),
444
NULL, NULL);
445
skcipher_request_set_crypt(&subreq, req->src, req->dst,
446
xts_blocks * AES_BLOCK_SIZE,
447
req->iv);
448
req = &subreq;
449
err = skcipher_walk_virt(&walk, req, false);
450
} else {
451
tail = 0;
452
}
453
454
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
455
int nbytes = walk.nbytes;
456
457
if (walk.nbytes < walk.total)
458
nbytes &= ~(AES_BLOCK_SIZE - 1);
459
460
kernel_neon_begin();
461
ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
462
ctx->key1.key_enc, rounds, nbytes, walk.iv,
463
ctx->key2.key_enc, first);
464
kernel_neon_end();
465
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
466
}
467
468
if (err || likely(!tail))
469
return err;
470
471
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
472
if (req->dst != req->src)
473
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
474
475
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
476
req->iv);
477
478
err = skcipher_walk_virt(&walk, req, false);
479
if (err)
480
return err;
481
482
kernel_neon_begin();
483
ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
484
ctx->key1.key_enc, rounds, walk.nbytes, walk.iv,
485
ctx->key2.key_enc, first);
486
kernel_neon_end();
487
488
return skcipher_walk_done(&walk, 0);
489
}
490
491
static int xts_decrypt(struct skcipher_request *req)
492
{
493
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
494
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
495
int err, first, rounds = num_rounds(&ctx->key1);
496
int tail = req->cryptlen % AES_BLOCK_SIZE;
497
struct scatterlist sg_src[2], sg_dst[2];
498
struct skcipher_request subreq;
499
struct scatterlist *src, *dst;
500
struct skcipher_walk walk;
501
502
if (req->cryptlen < AES_BLOCK_SIZE)
503
return -EINVAL;
504
505
err = skcipher_walk_virt(&walk, req, false);
506
507
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
508
int xts_blocks = DIV_ROUND_UP(req->cryptlen,
509
AES_BLOCK_SIZE) - 2;
510
511
skcipher_walk_abort(&walk);
512
513
skcipher_request_set_tfm(&subreq, tfm);
514
skcipher_request_set_callback(&subreq,
515
skcipher_request_flags(req),
516
NULL, NULL);
517
skcipher_request_set_crypt(&subreq, req->src, req->dst,
518
xts_blocks * AES_BLOCK_SIZE,
519
req->iv);
520
req = &subreq;
521
err = skcipher_walk_virt(&walk, req, false);
522
} else {
523
tail = 0;
524
}
525
526
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
527
int nbytes = walk.nbytes;
528
529
if (walk.nbytes < walk.total)
530
nbytes &= ~(AES_BLOCK_SIZE - 1);
531
532
kernel_neon_begin();
533
ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
534
ctx->key1.key_dec, rounds, nbytes, walk.iv,
535
ctx->key2.key_enc, first);
536
kernel_neon_end();
537
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
538
}
539
540
if (err || likely(!tail))
541
return err;
542
543
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
544
if (req->dst != req->src)
545
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
546
547
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
548
req->iv);
549
550
err = skcipher_walk_virt(&walk, req, false);
551
if (err)
552
return err;
553
554
kernel_neon_begin();
555
ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
556
ctx->key1.key_dec, rounds, walk.nbytes, walk.iv,
557
ctx->key2.key_enc, first);
558
kernel_neon_end();
559
560
return skcipher_walk_done(&walk, 0);
561
}
562
563
static struct skcipher_alg aes_algs[] = { {
564
.base.cra_name = "ecb(aes)",
565
.base.cra_driver_name = "ecb-aes-ce",
566
.base.cra_priority = 300,
567
.base.cra_blocksize = AES_BLOCK_SIZE,
568
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
569
.base.cra_module = THIS_MODULE,
570
571
.min_keysize = AES_MIN_KEY_SIZE,
572
.max_keysize = AES_MAX_KEY_SIZE,
573
.setkey = ce_aes_setkey,
574
.encrypt = ecb_encrypt,
575
.decrypt = ecb_decrypt,
576
}, {
577
.base.cra_name = "cbc(aes)",
578
.base.cra_driver_name = "cbc-aes-ce",
579
.base.cra_priority = 300,
580
.base.cra_blocksize = AES_BLOCK_SIZE,
581
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
582
.base.cra_module = THIS_MODULE,
583
584
.min_keysize = AES_MIN_KEY_SIZE,
585
.max_keysize = AES_MAX_KEY_SIZE,
586
.ivsize = AES_BLOCK_SIZE,
587
.setkey = ce_aes_setkey,
588
.encrypt = cbc_encrypt,
589
.decrypt = cbc_decrypt,
590
}, {
591
.base.cra_name = "cts(cbc(aes))",
592
.base.cra_driver_name = "cts-cbc-aes-ce",
593
.base.cra_priority = 300,
594
.base.cra_blocksize = AES_BLOCK_SIZE,
595
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
596
.base.cra_module = THIS_MODULE,
597
598
.min_keysize = AES_MIN_KEY_SIZE,
599
.max_keysize = AES_MAX_KEY_SIZE,
600
.ivsize = AES_BLOCK_SIZE,
601
.walksize = 2 * AES_BLOCK_SIZE,
602
.setkey = ce_aes_setkey,
603
.encrypt = cts_cbc_encrypt,
604
.decrypt = cts_cbc_decrypt,
605
}, {
606
.base.cra_name = "ctr(aes)",
607
.base.cra_driver_name = "ctr-aes-ce",
608
.base.cra_priority = 300,
609
.base.cra_blocksize = 1,
610
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
611
.base.cra_module = THIS_MODULE,
612
613
.min_keysize = AES_MIN_KEY_SIZE,
614
.max_keysize = AES_MAX_KEY_SIZE,
615
.ivsize = AES_BLOCK_SIZE,
616
.chunksize = AES_BLOCK_SIZE,
617
.setkey = ce_aes_setkey,
618
.encrypt = ctr_encrypt,
619
.decrypt = ctr_encrypt,
620
}, {
621
.base.cra_name = "xts(aes)",
622
.base.cra_driver_name = "xts-aes-ce",
623
.base.cra_priority = 300,
624
.base.cra_blocksize = AES_BLOCK_SIZE,
625
.base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
626
.base.cra_module = THIS_MODULE,
627
628
.min_keysize = 2 * AES_MIN_KEY_SIZE,
629
.max_keysize = 2 * AES_MAX_KEY_SIZE,
630
.ivsize = AES_BLOCK_SIZE,
631
.walksize = 2 * AES_BLOCK_SIZE,
632
.setkey = xts_set_key,
633
.encrypt = xts_encrypt,
634
.decrypt = xts_decrypt,
635
} };
636
637
static void aes_exit(void)
638
{
639
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
640
}
641
642
static int __init aes_init(void)
643
{
644
return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
645
}
646
647
module_cpu_feature_match(AES, aes_init);
648
module_exit(aes_exit);
649
650