Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/amcc/crypto4xx_core.c
29268 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* AMCC SoC PPC4xx Crypto Driver
4
*
5
* Copyright (c) 2008 Applied Micro Circuits Corporation.
6
* All rights reserved. James Hsiao <[email protected]>
7
*
8
* This file implements AMCC crypto offload Linux device driver for use with
9
* Linux CryptoAPI.
10
*/
11
12
#include <linux/kernel.h>
13
#include <linux/interrupt.h>
14
#include <linux/spinlock_types.h>
15
#include <linux/random.h>
16
#include <linux/scatterlist.h>
17
#include <linux/crypto.h>
18
#include <linux/dma-mapping.h>
19
#include <linux/platform_device.h>
20
#include <linux/init.h>
21
#include <linux/module.h>
22
#include <linux/of_address.h>
23
#include <linux/of_irq.h>
24
#include <linux/of_platform.h>
25
#include <linux/slab.h>
26
#include <asm/dcr.h>
27
#include <asm/dcr-regs.h>
28
#include <asm/cacheflush.h>
29
#include <crypto/aead.h>
30
#include <crypto/aes.h>
31
#include <crypto/ctr.h>
32
#include <crypto/gcm.h>
33
#include <crypto/sha1.h>
34
#include <crypto/rng.h>
35
#include <crypto/scatterwalk.h>
36
#include <crypto/skcipher.h>
37
#include <crypto/internal/aead.h>
38
#include <crypto/internal/rng.h>
39
#include <crypto/internal/skcipher.h>
40
#include "crypto4xx_reg_def.h"
41
#include "crypto4xx_core.h"
42
#include "crypto4xx_sa.h"
43
#include "crypto4xx_trng.h"
44
45
#define PPC4XX_SEC_VERSION_STR "0.5"
46
47
/*
48
* PPC4xx Crypto Engine Initialization Routine
49
*/
50
static void crypto4xx_hw_init(struct crypto4xx_device *dev)
51
{
52
union ce_ring_size ring_size;
53
union ce_ring_control ring_ctrl;
54
union ce_part_ring_size part_ring_size;
55
union ce_io_threshold io_threshold;
56
u32 rand_num;
57
union ce_pe_dma_cfg pe_dma_cfg;
58
u32 device_ctrl;
59
60
writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
61
/* setup pe dma, include reset sg, pdr and pe, then release reset */
62
pe_dma_cfg.w = 0;
63
pe_dma_cfg.bf.bo_sgpd_en = 1;
64
pe_dma_cfg.bf.bo_data_en = 0;
65
pe_dma_cfg.bf.bo_sa_en = 1;
66
pe_dma_cfg.bf.bo_pd_en = 1;
67
pe_dma_cfg.bf.dynamic_sa_en = 1;
68
pe_dma_cfg.bf.reset_sg = 1;
69
pe_dma_cfg.bf.reset_pdr = 1;
70
pe_dma_cfg.bf.reset_pe = 1;
71
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
72
/* un reset pe,sg and pdr */
73
pe_dma_cfg.bf.pe_mode = 0;
74
pe_dma_cfg.bf.reset_sg = 0;
75
pe_dma_cfg.bf.reset_pdr = 0;
76
pe_dma_cfg.bf.reset_pe = 0;
77
pe_dma_cfg.bf.bo_td_en = 0;
78
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
80
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
81
writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
82
get_random_bytes(&rand_num, sizeof(rand_num));
83
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
84
get_random_bytes(&rand_num, sizeof(rand_num));
85
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
86
ring_size.w = 0;
87
ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
88
ring_size.bf.ring_size = PPC4XX_NUM_PD;
89
writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
90
ring_ctrl.w = 0;
91
writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
92
device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
93
device_ctrl |= PPC4XX_DC_3DES_EN;
94
writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
95
writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
96
writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
97
part_ring_size.w = 0;
98
part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
99
part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
100
writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
101
writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
102
io_threshold.w = 0;
103
io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
104
io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
105
writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
106
writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
107
writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
108
writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
109
writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
110
writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
111
writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
112
writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
113
/* un reset pe,sg and pdr */
114
pe_dma_cfg.bf.pe_mode = 1;
115
pe_dma_cfg.bf.reset_sg = 0;
116
pe_dma_cfg.bf.reset_pdr = 0;
117
pe_dma_cfg.bf.reset_pe = 0;
118
pe_dma_cfg.bf.bo_td_en = 0;
119
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
120
/*clear all pending interrupt*/
121
writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
122
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
124
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
125
if (dev->is_revb) {
126
writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
127
dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
128
writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
129
dev->ce_base + CRYPTO4XX_INT_EN);
130
} else {
131
writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
132
}
133
}
134
135
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
136
{
137
ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
138
if (ctx->sa_in == NULL)
139
return -ENOMEM;
140
141
ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
142
if (ctx->sa_out == NULL) {
143
kfree(ctx->sa_in);
144
ctx->sa_in = NULL;
145
return -ENOMEM;
146
}
147
148
ctx->sa_len = size;
149
150
return 0;
151
}
152
153
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
154
{
155
kfree(ctx->sa_in);
156
ctx->sa_in = NULL;
157
kfree(ctx->sa_out);
158
ctx->sa_out = NULL;
159
ctx->sa_len = 0;
160
}
161
162
/*
163
* alloc memory for the gather ring
164
* no need to alloc buf for the ring
165
* gdr_tail, gdr_head and gdr_count are initialized by this function
166
*/
167
static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
168
{
169
int i;
170
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
171
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
172
&dev->pdr_pa, GFP_KERNEL);
173
if (!dev->pdr)
174
return -ENOMEM;
175
176
dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
177
GFP_KERNEL);
178
if (!dev->pdr_uinfo) {
179
dma_free_coherent(dev->core_dev->device,
180
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
181
dev->pdr,
182
dev->pdr_pa);
183
return -ENOMEM;
184
}
185
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
186
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
187
&dev->shadow_sa_pool_pa,
188
GFP_KERNEL);
189
if (!dev->shadow_sa_pool)
190
return -ENOMEM;
191
192
dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
193
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
194
&dev->shadow_sr_pool_pa, GFP_KERNEL);
195
if (!dev->shadow_sr_pool)
196
return -ENOMEM;
197
for (i = 0; i < PPC4XX_NUM_PD; i++) {
198
struct ce_pd *pd = &dev->pdr[i];
199
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
200
201
pd->sa = dev->shadow_sa_pool_pa +
202
sizeof(union shadow_sa_buf) * i;
203
204
/* alloc 256 bytes which is enough for any kind of dynamic sa */
205
pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
206
207
/* alloc state record */
208
pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
209
pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
210
sizeof(struct sa_state_record) * i;
211
}
212
213
return 0;
214
}
215
216
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
217
{
218
if (dev->pdr)
219
dma_free_coherent(dev->core_dev->device,
220
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
221
dev->pdr, dev->pdr_pa);
222
223
if (dev->shadow_sa_pool)
224
dma_free_coherent(dev->core_dev->device,
225
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
226
dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
227
228
if (dev->shadow_sr_pool)
229
dma_free_coherent(dev->core_dev->device,
230
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
231
dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
232
233
kfree(dev->pdr_uinfo);
234
}
235
236
static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
237
{
238
u32 retval;
239
u32 tmp;
240
241
retval = dev->pdr_head;
242
tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
243
244
if (tmp == dev->pdr_tail)
245
return ERING_WAS_FULL;
246
247
dev->pdr_head = tmp;
248
249
return retval;
250
}
251
252
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
253
{
254
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
255
u32 tail;
256
unsigned long flags;
257
258
spin_lock_irqsave(&dev->core_dev->lock, flags);
259
pd_uinfo->state = PD_ENTRY_FREE;
260
261
if (dev->pdr_tail != PPC4XX_LAST_PD)
262
dev->pdr_tail++;
263
else
264
dev->pdr_tail = 0;
265
tail = dev->pdr_tail;
266
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
267
268
return tail;
269
}
270
271
/*
272
* alloc memory for the gather ring
273
* no need to alloc buf for the ring
274
* gdr_tail, gdr_head and gdr_count are initialized by this function
275
*/
276
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
277
{
278
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
279
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
280
&dev->gdr_pa, GFP_KERNEL);
281
if (!dev->gdr)
282
return -ENOMEM;
283
284
return 0;
285
}
286
287
static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
288
{
289
if (dev->gdr)
290
dma_free_coherent(dev->core_dev->device,
291
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
292
dev->gdr, dev->gdr_pa);
293
}
294
295
/*
296
* when this function is called.
297
* preemption or interrupt must be disabled
298
*/
299
static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
300
{
301
u32 retval;
302
u32 tmp;
303
304
if (n >= PPC4XX_NUM_GD)
305
return ERING_WAS_FULL;
306
307
retval = dev->gdr_head;
308
tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
309
if (dev->gdr_head > dev->gdr_tail) {
310
if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
311
return ERING_WAS_FULL;
312
} else if (dev->gdr_head < dev->gdr_tail) {
313
if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
314
return ERING_WAS_FULL;
315
}
316
dev->gdr_head = tmp;
317
318
return retval;
319
}
320
321
static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
322
{
323
unsigned long flags;
324
325
spin_lock_irqsave(&dev->core_dev->lock, flags);
326
if (dev->gdr_tail == dev->gdr_head) {
327
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
328
return 0;
329
}
330
331
if (dev->gdr_tail != PPC4XX_LAST_GD)
332
dev->gdr_tail++;
333
else
334
dev->gdr_tail = 0;
335
336
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
337
338
return 0;
339
}
340
341
static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
342
dma_addr_t *gd_dma, u32 idx)
343
{
344
*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
345
346
return &dev->gdr[idx];
347
}
348
349
/*
350
* alloc memory for the scatter ring
351
* need to alloc buf for the ring
352
* sdr_tail, sdr_head and sdr_count are initialized by this function
353
*/
354
static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
355
{
356
int i;
357
358
dev->scatter_buffer_va =
359
dma_alloc_coherent(dev->core_dev->device,
360
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
361
&dev->scatter_buffer_pa, GFP_KERNEL);
362
if (!dev->scatter_buffer_va)
363
return -ENOMEM;
364
365
/* alloc memory for scatter descriptor ring */
366
dev->sdr = dma_alloc_coherent(dev->core_dev->device,
367
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
368
&dev->sdr_pa, GFP_KERNEL);
369
if (!dev->sdr)
370
return -ENOMEM;
371
372
for (i = 0; i < PPC4XX_NUM_SD; i++) {
373
dev->sdr[i].ptr = dev->scatter_buffer_pa +
374
PPC4XX_SD_BUFFER_SIZE * i;
375
}
376
377
return 0;
378
}
379
380
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
381
{
382
if (dev->sdr)
383
dma_free_coherent(dev->core_dev->device,
384
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
385
dev->sdr, dev->sdr_pa);
386
387
if (dev->scatter_buffer_va)
388
dma_free_coherent(dev->core_dev->device,
389
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
390
dev->scatter_buffer_va,
391
dev->scatter_buffer_pa);
392
}
393
394
/*
395
* when this function is called.
396
* preemption or interrupt must be disabled
397
*/
398
static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
399
{
400
u32 retval;
401
u32 tmp;
402
403
if (n >= PPC4XX_NUM_SD)
404
return ERING_WAS_FULL;
405
406
retval = dev->sdr_head;
407
tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
408
if (dev->sdr_head > dev->gdr_tail) {
409
if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
410
return ERING_WAS_FULL;
411
} else if (dev->sdr_head < dev->sdr_tail) {
412
if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
413
return ERING_WAS_FULL;
414
} /* the head = tail, or empty case is already take cared */
415
dev->sdr_head = tmp;
416
417
return retval;
418
}
419
420
static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
421
{
422
unsigned long flags;
423
424
spin_lock_irqsave(&dev->core_dev->lock, flags);
425
if (dev->sdr_tail == dev->sdr_head) {
426
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
427
return 0;
428
}
429
if (dev->sdr_tail != PPC4XX_LAST_SD)
430
dev->sdr_tail++;
431
else
432
dev->sdr_tail = 0;
433
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
434
435
return 0;
436
}
437
438
static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
439
dma_addr_t *sd_dma, u32 idx)
440
{
441
*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
442
443
return &dev->sdr[idx];
444
}
445
446
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
447
struct ce_pd *pd,
448
struct pd_uinfo *pd_uinfo,
449
u32 nbytes,
450
struct scatterlist *dst)
451
{
452
unsigned int first_sd = pd_uinfo->first_sd;
453
unsigned int last_sd;
454
unsigned int overflow = 0;
455
unsigned int to_copy;
456
unsigned int dst_start = 0;
457
458
/*
459
* Because the scatter buffers are all neatly organized in one
460
* big continuous ringbuffer; scatterwalk_map_and_copy() can
461
* be instructed to copy a range of buffers in one go.
462
*/
463
464
last_sd = (first_sd + pd_uinfo->num_sd);
465
if (last_sd > PPC4XX_LAST_SD) {
466
last_sd = PPC4XX_LAST_SD;
467
overflow = last_sd % PPC4XX_NUM_SD;
468
}
469
470
while (nbytes) {
471
void *buf = dev->scatter_buffer_va +
472
first_sd * PPC4XX_SD_BUFFER_SIZE;
473
474
to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
475
(1 + last_sd - first_sd));
476
scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
477
nbytes -= to_copy;
478
479
if (overflow) {
480
first_sd = 0;
481
last_sd = overflow;
482
dst_start += to_copy;
483
overflow = 0;
484
}
485
}
486
}
487
488
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
489
struct pd_uinfo *pd_uinfo)
490
{
491
int i;
492
if (pd_uinfo->num_gd) {
493
for (i = 0; i < pd_uinfo->num_gd; i++)
494
crypto4xx_put_gd_to_gdr(dev);
495
pd_uinfo->first_gd = 0xffffffff;
496
pd_uinfo->num_gd = 0;
497
}
498
if (pd_uinfo->num_sd) {
499
for (i = 0; i < pd_uinfo->num_sd; i++)
500
crypto4xx_put_sd_to_sdr(dev);
501
502
pd_uinfo->first_sd = 0xffffffff;
503
pd_uinfo->num_sd = 0;
504
}
505
}
506
507
static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
508
struct pd_uinfo *pd_uinfo,
509
struct ce_pd *pd)
510
{
511
struct skcipher_request *req;
512
struct scatterlist *dst;
513
514
req = skcipher_request_cast(pd_uinfo->async_req);
515
516
if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
517
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
518
req->cryptlen, req->dst);
519
} else {
520
dst = pd_uinfo->dest_va;
521
dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
522
DMA_FROM_DEVICE);
523
}
524
525
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
526
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
527
528
crypto4xx_memcpy_from_le32((u32 *)req->iv,
529
pd_uinfo->sr_va->save_iv,
530
crypto_skcipher_ivsize(skcipher));
531
}
532
533
crypto4xx_ret_sg_desc(dev, pd_uinfo);
534
535
if (pd_uinfo->state & PD_ENTRY_BUSY)
536
skcipher_request_complete(req, -EINPROGRESS);
537
skcipher_request_complete(req, 0);
538
}
539
540
static void crypto4xx_aead_done(struct crypto4xx_device *dev,
541
struct pd_uinfo *pd_uinfo,
542
struct ce_pd *pd)
543
{
544
struct aead_request *aead_req = container_of(pd_uinfo->async_req,
545
struct aead_request, base);
546
struct scatterlist *dst = pd_uinfo->dest_va;
547
size_t cp_len = crypto_aead_authsize(
548
crypto_aead_reqtfm(aead_req));
549
u32 icv[AES_BLOCK_SIZE];
550
int err = 0;
551
552
if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
553
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
554
pd->pd_ctl_len.bf.pkt_len,
555
dst);
556
} else {
557
dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
558
DMA_FROM_DEVICE);
559
}
560
561
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
562
/* append icv at the end */
563
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
564
sizeof(icv));
565
566
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
567
cp_len, 1);
568
} else {
569
/* check icv at the end */
570
scatterwalk_map_and_copy(icv, aead_req->src,
571
aead_req->assoclen + aead_req->cryptlen -
572
cp_len, cp_len, 0);
573
574
crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
575
576
if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
577
err = -EBADMSG;
578
}
579
580
crypto4xx_ret_sg_desc(dev, pd_uinfo);
581
582
if (pd->pd_ctl.bf.status & 0xff) {
583
if (!__ratelimit(&dev->aead_ratelimit)) {
584
if (pd->pd_ctl.bf.status & 2)
585
pr_err("pad fail error\n");
586
if (pd->pd_ctl.bf.status & 4)
587
pr_err("seqnum fail\n");
588
if (pd->pd_ctl.bf.status & 8)
589
pr_err("error _notify\n");
590
pr_err("aead return err status = 0x%02x\n",
591
pd->pd_ctl.bf.status & 0xff);
592
pr_err("pd pad_ctl = 0x%08x\n",
593
pd->pd_ctl.bf.pd_pad_ctl);
594
}
595
err = -EINVAL;
596
}
597
598
if (pd_uinfo->state & PD_ENTRY_BUSY)
599
aead_request_complete(aead_req, -EINPROGRESS);
600
601
aead_request_complete(aead_req, err);
602
}
603
604
static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
605
{
606
struct ce_pd *pd = &dev->pdr[idx];
607
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
608
609
switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
610
case CRYPTO_ALG_TYPE_SKCIPHER:
611
crypto4xx_cipher_done(dev, pd_uinfo, pd);
612
break;
613
case CRYPTO_ALG_TYPE_AEAD:
614
crypto4xx_aead_done(dev, pd_uinfo, pd);
615
break;
616
}
617
}
618
619
static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
620
{
621
crypto4xx_destroy_pdr(core_dev->dev);
622
crypto4xx_destroy_gdr(core_dev->dev);
623
crypto4xx_destroy_sdr(core_dev->dev);
624
}
625
626
static u32 get_next_gd(u32 current)
627
{
628
if (current != PPC4XX_LAST_GD)
629
return current + 1;
630
else
631
return 0;
632
}
633
634
static u32 get_next_sd(u32 current)
635
{
636
if (current != PPC4XX_LAST_SD)
637
return current + 1;
638
else
639
return 0;
640
}
641
642
int crypto4xx_build_pd(struct crypto_async_request *req,
643
struct crypto4xx_ctx *ctx,
644
struct scatterlist *src,
645
struct scatterlist *dst,
646
const unsigned int datalen,
647
const void *iv, const u32 iv_len,
648
const struct dynamic_sa_ctl *req_sa,
649
const unsigned int sa_len,
650
const unsigned int assoclen,
651
struct scatterlist *_dst)
652
{
653
struct crypto4xx_device *dev = ctx->dev;
654
struct dynamic_sa_ctl *sa;
655
struct ce_gd *gd;
656
struct ce_pd *pd;
657
u32 num_gd, num_sd;
658
u32 fst_gd = 0xffffffff;
659
u32 fst_sd = 0xffffffff;
660
u32 pd_entry;
661
unsigned long flags;
662
struct pd_uinfo *pd_uinfo;
663
unsigned int nbytes = datalen;
664
size_t offset_to_sr_ptr;
665
u32 gd_idx = 0;
666
int tmp;
667
bool is_busy, force_sd;
668
669
/*
670
* There's a very subtile/disguised "bug" in the hardware that
671
* gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
672
* of the hardware spec:
673
* *drum roll* the AES/(T)DES OFB and CFB modes are listed as
674
* operation modes for >>> "Block ciphers" <<<.
675
*
676
* To workaround this issue and stop the hardware from causing
677
* "overran dst buffer" on crypttexts that are not a multiple
678
* of 16 (AES_BLOCK_SIZE), we force the driver to use the
679
* scatter buffers.
680
*/
681
force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
682
|| req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
683
&& (datalen % AES_BLOCK_SIZE);
684
685
/* figure how many gd are needed */
686
tmp = sg_nents_for_len(src, assoclen + datalen);
687
if (tmp < 0) {
688
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
689
return tmp;
690
}
691
if (tmp == 1)
692
tmp = 0;
693
num_gd = tmp;
694
695
if (assoclen) {
696
nbytes += assoclen;
697
dst = scatterwalk_ffwd(_dst, dst, assoclen);
698
}
699
700
/* figure how many sd are needed */
701
if (sg_is_last(dst) && force_sd == false) {
702
num_sd = 0;
703
} else {
704
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
705
num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
706
if (datalen % PPC4XX_SD_BUFFER_SIZE)
707
num_sd++;
708
} else {
709
num_sd = 1;
710
}
711
}
712
713
/*
714
* The follow section of code needs to be protected
715
* The gather ring and scatter ring needs to be consecutive
716
* In case of run out of any kind of descriptor, the descriptor
717
* already got must be return the original place.
718
*/
719
spin_lock_irqsave(&dev->core_dev->lock, flags);
720
/*
721
* Let the caller know to slow down, once more than 13/16ths = 81%
722
* of the available data contexts are being used simultaneously.
723
*
724
* With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
725
* 31 more contexts. Before new requests have to be rejected.
726
*/
727
if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
728
is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
729
((PPC4XX_NUM_PD * 13) / 16);
730
} else {
731
/*
732
* To fix contention issues between ipsec (no blacklog) and
733
* dm-crypto (backlog) reserve 32 entries for "no backlog"
734
* data contexts.
735
*/
736
is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
737
((PPC4XX_NUM_PD * 15) / 16);
738
739
if (is_busy) {
740
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
741
return -EBUSY;
742
}
743
}
744
745
if (num_gd) {
746
fst_gd = crypto4xx_get_n_gd(dev, num_gd);
747
if (fst_gd == ERING_WAS_FULL) {
748
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
749
return -EAGAIN;
750
}
751
}
752
if (num_sd) {
753
fst_sd = crypto4xx_get_n_sd(dev, num_sd);
754
if (fst_sd == ERING_WAS_FULL) {
755
if (num_gd)
756
dev->gdr_head = fst_gd;
757
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
758
return -EAGAIN;
759
}
760
}
761
pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
762
if (pd_entry == ERING_WAS_FULL) {
763
if (num_gd)
764
dev->gdr_head = fst_gd;
765
if (num_sd)
766
dev->sdr_head = fst_sd;
767
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
768
return -EAGAIN;
769
}
770
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
771
772
pd = &dev->pdr[pd_entry];
773
pd->sa_len = sa_len;
774
775
pd_uinfo = &dev->pdr_uinfo[pd_entry];
776
pd_uinfo->num_gd = num_gd;
777
pd_uinfo->num_sd = num_sd;
778
pd_uinfo->dest_va = dst;
779
pd_uinfo->async_req = req;
780
781
if (iv_len)
782
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
783
784
sa = pd_uinfo->sa_va;
785
memcpy(sa, req_sa, sa_len * 4);
786
787
sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
788
offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
789
*(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
790
791
if (num_gd) {
792
dma_addr_t gd_dma;
793
struct scatterlist *sg;
794
795
/* get first gd we are going to use */
796
gd_idx = fst_gd;
797
pd_uinfo->first_gd = fst_gd;
798
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
799
pd->src = gd_dma;
800
/* enable gather */
801
sa->sa_command_0.bf.gather = 1;
802
/* walk the sg, and setup gather array */
803
804
sg = src;
805
while (nbytes) {
806
size_t len;
807
808
len = min(sg->length, nbytes);
809
gd->ptr = dma_map_page(dev->core_dev->device,
810
sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
811
gd->ctl_len.len = len;
812
gd->ctl_len.done = 0;
813
gd->ctl_len.ready = 1;
814
if (len >= nbytes)
815
break;
816
817
nbytes -= sg->length;
818
gd_idx = get_next_gd(gd_idx);
819
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
820
sg = sg_next(sg);
821
}
822
} else {
823
pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
824
src->offset, min(nbytes, src->length),
825
DMA_TO_DEVICE);
826
/*
827
* Disable gather in sa command
828
*/
829
sa->sa_command_0.bf.gather = 0;
830
/*
831
* Indicate gather array is not used
832
*/
833
pd_uinfo->first_gd = 0xffffffff;
834
}
835
if (!num_sd) {
836
/*
837
* we know application give us dst a whole piece of memory
838
* no need to use scatter ring.
839
*/
840
pd_uinfo->first_sd = 0xffffffff;
841
sa->sa_command_0.bf.scatter = 0;
842
pd->dest = (u32)dma_map_page(dev->core_dev->device,
843
sg_page(dst), dst->offset,
844
min(datalen, dst->length),
845
DMA_TO_DEVICE);
846
} else {
847
dma_addr_t sd_dma;
848
struct ce_sd *sd = NULL;
849
850
u32 sd_idx = fst_sd;
851
nbytes = datalen;
852
sa->sa_command_0.bf.scatter = 1;
853
pd_uinfo->first_sd = fst_sd;
854
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
855
pd->dest = sd_dma;
856
/* setup scatter descriptor */
857
sd->ctl.done = 0;
858
sd->ctl.rdy = 1;
859
/* sd->ptr should be setup by sd_init routine*/
860
if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
861
nbytes -= PPC4XX_SD_BUFFER_SIZE;
862
else
863
nbytes = 0;
864
while (nbytes) {
865
sd_idx = get_next_sd(sd_idx);
866
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
867
/* setup scatter descriptor */
868
sd->ctl.done = 0;
869
sd->ctl.rdy = 1;
870
if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
871
nbytes -= PPC4XX_SD_BUFFER_SIZE;
872
} else {
873
/*
874
* SD entry can hold PPC4XX_SD_BUFFER_SIZE,
875
* which is more than nbytes, so done.
876
*/
877
nbytes = 0;
878
}
879
}
880
}
881
882
pd->pd_ctl.w = PD_CTL_HOST_READY |
883
((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
884
PD_CTL_HASH_FINAL : 0);
885
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
886
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
887
888
wmb();
889
/* write any value to push engine to read a pd */
890
writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
891
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
892
return is_busy ? -EBUSY : -EINPROGRESS;
893
}
894
895
/*
896
* Algorithm Registration Functions
897
*/
898
static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
899
struct crypto4xx_ctx *ctx)
900
{
901
ctx->dev = amcc_alg->dev;
902
ctx->sa_in = NULL;
903
ctx->sa_out = NULL;
904
ctx->sa_len = 0;
905
}
906
907
static int crypto4xx_sk_init(struct crypto_skcipher *sk)
908
{
909
struct skcipher_alg *alg = crypto_skcipher_alg(sk);
910
struct crypto4xx_alg *amcc_alg;
911
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
912
913
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
914
ctx->sw_cipher.cipher =
915
crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
916
CRYPTO_ALG_NEED_FALLBACK);
917
if (IS_ERR(ctx->sw_cipher.cipher))
918
return PTR_ERR(ctx->sw_cipher.cipher);
919
}
920
921
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
922
crypto4xx_ctx_init(amcc_alg, ctx);
923
return 0;
924
}
925
926
static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
927
{
928
crypto4xx_free_sa(ctx);
929
}
930
931
static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
932
{
933
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
934
935
crypto4xx_common_exit(ctx);
936
if (ctx->sw_cipher.cipher)
937
crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
938
}
939
940
static int crypto4xx_aead_init(struct crypto_aead *tfm)
941
{
942
struct aead_alg *alg = crypto_aead_alg(tfm);
943
struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
944
struct crypto4xx_alg *amcc_alg;
945
946
ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
947
CRYPTO_ALG_NEED_FALLBACK |
948
CRYPTO_ALG_ASYNC);
949
if (IS_ERR(ctx->sw_cipher.aead))
950
return PTR_ERR(ctx->sw_cipher.aead);
951
952
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
953
crypto4xx_ctx_init(amcc_alg, ctx);
954
crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
955
crypto_aead_reqsize(ctx->sw_cipher.aead),
956
sizeof(struct crypto4xx_aead_reqctx)));
957
return 0;
958
}
959
960
static void crypto4xx_aead_exit(struct crypto_aead *tfm)
961
{
962
struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
963
964
crypto4xx_common_exit(ctx);
965
crypto_free_aead(ctx->sw_cipher.aead);
966
}
967
968
static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
969
struct crypto4xx_alg_common *crypto_alg,
970
int array_size)
971
{
972
struct crypto4xx_alg *alg;
973
int i;
974
int rc = 0;
975
976
for (i = 0; i < array_size; i++) {
977
alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
978
if (!alg)
979
return -ENOMEM;
980
981
alg->alg = crypto_alg[i];
982
alg->dev = sec_dev;
983
984
switch (alg->alg.type) {
985
case CRYPTO_ALG_TYPE_AEAD:
986
rc = crypto_register_aead(&alg->alg.u.aead);
987
break;
988
989
case CRYPTO_ALG_TYPE_RNG:
990
rc = crypto_register_rng(&alg->alg.u.rng);
991
break;
992
993
default:
994
rc = crypto_register_skcipher(&alg->alg.u.cipher);
995
break;
996
}
997
998
if (rc)
999
kfree(alg);
1000
else
1001
list_add_tail(&alg->entry, &sec_dev->alg_list);
1002
}
1003
1004
return 0;
1005
}
1006
1007
static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1008
{
1009
struct crypto4xx_alg *alg, *tmp;
1010
1011
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1012
list_del(&alg->entry);
1013
switch (alg->alg.type) {
1014
case CRYPTO_ALG_TYPE_AEAD:
1015
crypto_unregister_aead(&alg->alg.u.aead);
1016
break;
1017
1018
case CRYPTO_ALG_TYPE_RNG:
1019
crypto_unregister_rng(&alg->alg.u.rng);
1020
break;
1021
1022
default:
1023
crypto_unregister_skcipher(&alg->alg.u.cipher);
1024
}
1025
kfree(alg);
1026
}
1027
}
1028
1029
static void crypto4xx_bh_tasklet_cb(unsigned long data)
1030
{
1031
struct device *dev = (struct device *)data;
1032
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1033
struct pd_uinfo *pd_uinfo;
1034
struct ce_pd *pd;
1035
u32 tail = core_dev->dev->pdr_tail;
1036
u32 head = core_dev->dev->pdr_head;
1037
1038
do {
1039
pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1040
pd = &core_dev->dev->pdr[tail];
1041
if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1042
((READ_ONCE(pd->pd_ctl.w) &
1043
(PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1044
PD_CTL_PE_DONE)) {
1045
crypto4xx_pd_done(core_dev->dev, tail);
1046
tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1047
} else {
1048
/* if tail not done, break */
1049
break;
1050
}
1051
} while (head != tail);
1052
}
1053
1054
/*
1055
* Top Half of isr.
1056
*/
1057
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1058
u32 clr_val)
1059
{
1060
struct device *dev = data;
1061
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1062
1063
writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1064
tasklet_schedule(&core_dev->tasklet);
1065
1066
return IRQ_HANDLED;
1067
}
1068
1069
static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1070
{
1071
return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1072
}
1073
1074
static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1075
{
1076
return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1077
PPC4XX_TMO_ERR_INT);
1078
}
1079
1080
static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
1081
u8 *data, unsigned int max)
1082
{
1083
unsigned int i, curr = 0;
1084
u32 val[2];
1085
1086
do {
1087
/* trigger PRN generation */
1088
writel(PPC4XX_PRNG_CTRL_AUTO_EN,
1089
dev->ce_base + CRYPTO4XX_PRNG_CTRL);
1090
1091
for (i = 0; i < 1024; i++) {
1092
/* usually 19 iterations are enough */
1093
if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
1094
CRYPTO4XX_PRNG_STAT_BUSY))
1095
continue;
1096
1097
val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
1098
val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
1099
break;
1100
}
1101
if (i == 1024)
1102
return -ETIMEDOUT;
1103
1104
if ((max - curr) >= 8) {
1105
memcpy(data, &val, 8);
1106
data += 8;
1107
curr += 8;
1108
} else {
1109
/* copy only remaining bytes */
1110
memcpy(data, &val, max - curr);
1111
break;
1112
}
1113
} while (curr < max);
1114
1115
return curr;
1116
}
1117
1118
static int crypto4xx_prng_generate(struct crypto_rng *tfm,
1119
const u8 *src, unsigned int slen,
1120
u8 *dstn, unsigned int dlen)
1121
{
1122
struct rng_alg *alg = crypto_rng_alg(tfm);
1123
struct crypto4xx_alg *amcc_alg;
1124
struct crypto4xx_device *dev;
1125
int ret;
1126
1127
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
1128
dev = amcc_alg->dev;
1129
1130
mutex_lock(&dev->core_dev->rng_lock);
1131
ret = ppc4xx_prng_data_read(dev, dstn, dlen);
1132
mutex_unlock(&dev->core_dev->rng_lock);
1133
return ret;
1134
}
1135
1136
1137
static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
1138
unsigned int slen)
1139
{
1140
return 0;
1141
}
1142
1143
/*
1144
* Supported Crypto Algorithms
1145
*/
1146
static struct crypto4xx_alg_common crypto4xx_alg[] = {
1147
/* Crypto AES modes */
1148
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1149
.base = {
1150
.cra_name = "cbc(aes)",
1151
.cra_driver_name = "cbc-aes-ppc4xx",
1152
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1153
.cra_flags = CRYPTO_ALG_ASYNC |
1154
CRYPTO_ALG_KERN_DRIVER_ONLY,
1155
.cra_blocksize = AES_BLOCK_SIZE,
1156
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
1157
.cra_module = THIS_MODULE,
1158
},
1159
.min_keysize = AES_MIN_KEY_SIZE,
1160
.max_keysize = AES_MAX_KEY_SIZE,
1161
.ivsize = AES_IV_SIZE,
1162
.setkey = crypto4xx_setkey_aes_cbc,
1163
.encrypt = crypto4xx_encrypt_iv_block,
1164
.decrypt = crypto4xx_decrypt_iv_block,
1165
.init = crypto4xx_sk_init,
1166
.exit = crypto4xx_sk_exit,
1167
} },
1168
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1169
.base = {
1170
.cra_name = "ctr(aes)",
1171
.cra_driver_name = "ctr-aes-ppc4xx",
1172
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1173
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1174
CRYPTO_ALG_ASYNC |
1175
CRYPTO_ALG_KERN_DRIVER_ONLY,
1176
.cra_blocksize = 1,
1177
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
1178
.cra_module = THIS_MODULE,
1179
},
1180
.min_keysize = AES_MIN_KEY_SIZE,
1181
.max_keysize = AES_MAX_KEY_SIZE,
1182
.ivsize = AES_IV_SIZE,
1183
.setkey = crypto4xx_setkey_aes_ctr,
1184
.encrypt = crypto4xx_encrypt_ctr,
1185
.decrypt = crypto4xx_decrypt_ctr,
1186
.init = crypto4xx_sk_init,
1187
.exit = crypto4xx_sk_exit,
1188
} },
1189
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1190
.base = {
1191
.cra_name = "rfc3686(ctr(aes))",
1192
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1193
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1194
.cra_flags = CRYPTO_ALG_ASYNC |
1195
CRYPTO_ALG_KERN_DRIVER_ONLY,
1196
.cra_blocksize = 1,
1197
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
1198
.cra_module = THIS_MODULE,
1199
},
1200
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1201
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1202
.ivsize = CTR_RFC3686_IV_SIZE,
1203
.setkey = crypto4xx_setkey_rfc3686,
1204
.encrypt = crypto4xx_rfc3686_encrypt,
1205
.decrypt = crypto4xx_rfc3686_decrypt,
1206
.init = crypto4xx_sk_init,
1207
.exit = crypto4xx_sk_exit,
1208
} },
1209
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1210
.base = {
1211
.cra_name = "ecb(aes)",
1212
.cra_driver_name = "ecb-aes-ppc4xx",
1213
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1214
.cra_flags = CRYPTO_ALG_ASYNC |
1215
CRYPTO_ALG_KERN_DRIVER_ONLY,
1216
.cra_blocksize = AES_BLOCK_SIZE,
1217
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
1218
.cra_module = THIS_MODULE,
1219
},
1220
.min_keysize = AES_MIN_KEY_SIZE,
1221
.max_keysize = AES_MAX_KEY_SIZE,
1222
.setkey = crypto4xx_setkey_aes_ecb,
1223
.encrypt = crypto4xx_encrypt_noiv_block,
1224
.decrypt = crypto4xx_decrypt_noiv_block,
1225
.init = crypto4xx_sk_init,
1226
.exit = crypto4xx_sk_exit,
1227
} },
1228
1229
/* AEAD */
1230
{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1231
.setkey = crypto4xx_setkey_aes_ccm,
1232
.setauthsize = crypto4xx_setauthsize_aead,
1233
.encrypt = crypto4xx_encrypt_aes_ccm,
1234
.decrypt = crypto4xx_decrypt_aes_ccm,
1235
.init = crypto4xx_aead_init,
1236
.exit = crypto4xx_aead_exit,
1237
.ivsize = AES_BLOCK_SIZE,
1238
.maxauthsize = 16,
1239
.base = {
1240
.cra_name = "ccm(aes)",
1241
.cra_driver_name = "ccm-aes-ppc4xx",
1242
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1243
.cra_flags = CRYPTO_ALG_ASYNC |
1244
CRYPTO_ALG_NEED_FALLBACK |
1245
CRYPTO_ALG_KERN_DRIVER_ONLY,
1246
.cra_blocksize = 1,
1247
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
1248
.cra_module = THIS_MODULE,
1249
},
1250
} },
1251
{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1252
.setkey = crypto4xx_setkey_aes_gcm,
1253
.setauthsize = crypto4xx_setauthsize_aead,
1254
.encrypt = crypto4xx_encrypt_aes_gcm,
1255
.decrypt = crypto4xx_decrypt_aes_gcm,
1256
.init = crypto4xx_aead_init,
1257
.exit = crypto4xx_aead_exit,
1258
.ivsize = GCM_AES_IV_SIZE,
1259
.maxauthsize = 16,
1260
.base = {
1261
.cra_name = "gcm(aes)",
1262
.cra_driver_name = "gcm-aes-ppc4xx",
1263
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1264
.cra_flags = CRYPTO_ALG_ASYNC |
1265
CRYPTO_ALG_NEED_FALLBACK |
1266
CRYPTO_ALG_KERN_DRIVER_ONLY,
1267
.cra_blocksize = 1,
1268
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
1269
.cra_module = THIS_MODULE,
1270
},
1271
} },
1272
{ .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
1273
.base = {
1274
.cra_name = "stdrng",
1275
.cra_driver_name = "crypto4xx_rng",
1276
.cra_priority = 300,
1277
.cra_ctxsize = 0,
1278
.cra_module = THIS_MODULE,
1279
},
1280
.generate = crypto4xx_prng_generate,
1281
.seed = crypto4xx_prng_seed,
1282
.seedsize = 0,
1283
} },
1284
};
1285
1286
/*
1287
* Module Initialization Routine
1288
*/
1289
static int crypto4xx_probe(struct platform_device *ofdev)
1290
{
1291
int rc;
1292
struct device *dev = &ofdev->dev;
1293
struct crypto4xx_core_device *core_dev;
1294
struct device_node *np;
1295
u32 pvr;
1296
bool is_revb = true;
1297
1298
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
1299
if (np) {
1300
mtdcri(SDR0, PPC460EX_SDR0_SRST,
1301
mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1302
mtdcri(SDR0, PPC460EX_SDR0_SRST,
1303
mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1304
} else {
1305
np = of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto");
1306
if (np) {
1307
mtdcri(SDR0, PPC405EX_SDR0_SRST,
1308
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1309
mtdcri(SDR0, PPC405EX_SDR0_SRST,
1310
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1311
is_revb = false;
1312
} else {
1313
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto");
1314
if (np) {
1315
mtdcri(SDR0, PPC460SX_SDR0_SRST,
1316
mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1317
mtdcri(SDR0, PPC460SX_SDR0_SRST,
1318
mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1319
} else {
1320
printk(KERN_ERR "Crypto Function Not supported!\n");
1321
return -EINVAL;
1322
}
1323
}
1324
}
1325
1326
of_node_put(np);
1327
1328
core_dev = devm_kzalloc(
1329
&ofdev->dev, sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1330
if (!core_dev)
1331
return -ENOMEM;
1332
1333
dev_set_drvdata(dev, core_dev);
1334
core_dev->ofdev = ofdev;
1335
core_dev->dev = devm_kzalloc(
1336
&ofdev->dev, sizeof(struct crypto4xx_device), GFP_KERNEL);
1337
if (!core_dev->dev)
1338
return -ENOMEM;
1339
1340
/*
1341
* Older version of 460EX/GT have a hardware bug.
1342
* Hence they do not support H/W based security intr coalescing
1343
*/
1344
pvr = mfspr(SPRN_PVR);
1345
if (is_revb && ((pvr >> 4) == 0x130218A)) {
1346
u32 min = PVR_MIN(pvr);
1347
1348
if (min < 4) {
1349
dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1350
is_revb = false;
1351
}
1352
}
1353
1354
core_dev->dev->core_dev = core_dev;
1355
core_dev->dev->is_revb = is_revb;
1356
core_dev->device = dev;
1357
rc = devm_mutex_init(&ofdev->dev, &core_dev->rng_lock);
1358
if (rc)
1359
return rc;
1360
spin_lock_init(&core_dev->lock);
1361
INIT_LIST_HEAD(&core_dev->dev->alg_list);
1362
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1363
rc = crypto4xx_build_sdr(core_dev->dev);
1364
if (rc)
1365
goto err_build_sdr;
1366
rc = crypto4xx_build_pdr(core_dev->dev);
1367
if (rc)
1368
goto err_build_sdr;
1369
1370
rc = crypto4xx_build_gdr(core_dev->dev);
1371
if (rc)
1372
goto err_build_sdr;
1373
1374
/* Init tasklet for bottom half processing */
1375
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1376
(unsigned long) dev);
1377
1378
core_dev->dev->ce_base = devm_platform_ioremap_resource(ofdev, 0);
1379
if (IS_ERR(core_dev->dev->ce_base)) {
1380
dev_err(&ofdev->dev, "failed to ioremap resource");
1381
rc = PTR_ERR(core_dev->dev->ce_base);
1382
goto err_build_sdr;
1383
}
1384
1385
/* Register for Crypto isr, Crypto Engine IRQ */
1386
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1387
rc = devm_request_irq(&ofdev->dev, core_dev->irq,
1388
is_revb ? crypto4xx_ce_interrupt_handler_revb :
1389
crypto4xx_ce_interrupt_handler,
1390
0, KBUILD_MODNAME, dev);
1391
if (rc)
1392
goto err_iomap;
1393
1394
/* need to setup pdr, rdr, gdr and sdr before this */
1395
crypto4xx_hw_init(core_dev->dev);
1396
1397
/* Register security algorithms with Linux CryptoAPI */
1398
rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1399
ARRAY_SIZE(crypto4xx_alg));
1400
if (rc)
1401
goto err_iomap;
1402
1403
ppc4xx_trng_probe(core_dev);
1404
return 0;
1405
1406
err_iomap:
1407
tasklet_kill(&core_dev->tasklet);
1408
err_build_sdr:
1409
crypto4xx_destroy_sdr(core_dev->dev);
1410
crypto4xx_destroy_gdr(core_dev->dev);
1411
crypto4xx_destroy_pdr(core_dev->dev);
1412
return rc;
1413
}
1414
1415
static void crypto4xx_remove(struct platform_device *ofdev)
1416
{
1417
struct device *dev = &ofdev->dev;
1418
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1419
1420
ppc4xx_trng_remove(core_dev);
1421
1422
tasklet_kill(&core_dev->tasklet);
1423
/* Un-register with Linux CryptoAPI */
1424
crypto4xx_unregister_alg(core_dev->dev);
1425
/* Free all allocated memory */
1426
crypto4xx_stop_all(core_dev);
1427
}
1428
1429
static const struct of_device_id crypto4xx_match[] = {
1430
{ .compatible = "amcc,ppc4xx-crypto",},
1431
{ },
1432
};
1433
MODULE_DEVICE_TABLE(of, crypto4xx_match);
1434
1435
static struct platform_driver crypto4xx_driver = {
1436
.driver = {
1437
.name = KBUILD_MODNAME,
1438
.of_match_table = crypto4xx_match,
1439
},
1440
.probe = crypto4xx_probe,
1441
.remove = crypto4xx_remove,
1442
};
1443
1444
module_platform_driver(crypto4xx_driver);
1445
1446
MODULE_LICENSE("GPL");
1447
MODULE_AUTHOR("James Hsiao <[email protected]>");
1448
MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
1449
1450