Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/hw_random/cctrng.c
29269 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
3
4
#include <linux/kernel.h>
5
#include <linux/module.h>
6
#include <linux/clk.h>
7
#include <linux/hw_random.h>
8
#include <linux/io.h>
9
#include <linux/platform_device.h>
10
#include <linux/pm_runtime.h>
11
#include <linux/interrupt.h>
12
#include <linux/irqreturn.h>
13
#include <linux/workqueue.h>
14
#include <linux/circ_buf.h>
15
#include <linux/completion.h>
16
#include <linux/of.h>
17
#include <linux/bitfield.h>
18
#include <linux/fips.h>
19
20
#include "cctrng.h"
21
22
#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
23
#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
24
#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
25
26
#define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \
27
(FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
28
29
#define CC_HW_RESET_LOOP_COUNT 10
30
#define CC_TRNG_SUSPEND_TIMEOUT 3000
31
32
/* data circular buffer in words must be:
33
* - of a power-of-2 size (limitation of circ_buf.h macros)
34
* - at least 6, the size generated in the EHR according to HW implementation
35
*/
36
#define CCTRNG_DATA_BUF_WORDS 32
37
38
/* The timeout for the TRNG operation should be calculated with the formula:
39
* Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
40
* while:
41
* - SAMPLE_CNT is input value from the characterisation process
42
* - all the rest are constants
43
*/
44
#define EHR_NUM 1
45
#define VN_COEFF 4
46
#define EHR_LENGTH CC_TRNG_EHR_IN_BITS
47
#define SCALE_VALUE 2
48
#define CCTRNG_TIMEOUT(smpl_cnt) \
49
(EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
50
51
struct cctrng_drvdata {
52
struct platform_device *pdev;
53
void __iomem *cc_base;
54
struct clk *clk;
55
struct hwrng rng;
56
u32 active_rosc;
57
/* Sampling interval for each ring oscillator:
58
* count of ring oscillator cycles between consecutive bits sampling.
59
* Value of 0 indicates non-valid rosc
60
*/
61
u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
62
63
u32 data_buf[CCTRNG_DATA_BUF_WORDS];
64
struct circ_buf circ;
65
struct work_struct compwork;
66
struct work_struct startwork;
67
68
/* pending_hw - 1 when HW is pending, 0 when it is idle */
69
atomic_t pending_hw;
70
71
/* protects against multiple concurrent consumers of data_buf */
72
spinlock_t read_lock;
73
};
74
75
76
/* functions for write/read CC registers */
77
static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
78
{
79
iowrite32(val, (drvdata->cc_base + reg));
80
}
81
static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
82
{
83
return ioread32(drvdata->cc_base + reg);
84
}
85
86
87
static int cc_trng_pm_get(struct device *dev)
88
{
89
int rc = 0;
90
91
rc = pm_runtime_get_sync(dev);
92
93
/* pm_runtime_get_sync() can return 1 as a valid return code */
94
return (rc == 1 ? 0 : rc);
95
}
96
97
static void cc_trng_pm_put_suspend(struct device *dev)
98
{
99
int rc = 0;
100
101
rc = pm_runtime_put_autosuspend(dev);
102
if (rc)
103
dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
104
}
105
106
static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
107
{
108
struct device *dev = &(drvdata->pdev->dev);
109
110
/* must be before the enabling to avoid redundant suspending */
111
pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
112
pm_runtime_use_autosuspend(dev);
113
/* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
114
return pm_runtime_set_active(dev);
115
}
116
117
static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
118
{
119
struct device *dev = &(drvdata->pdev->dev);
120
121
/* enable the PM module*/
122
pm_runtime_enable(dev);
123
}
124
125
static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
126
{
127
struct device *dev = &(drvdata->pdev->dev);
128
129
pm_runtime_disable(dev);
130
}
131
132
133
static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
134
{
135
struct device *dev = &(drvdata->pdev->dev);
136
struct device_node *np = drvdata->pdev->dev.of_node;
137
int rc;
138
int i;
139
/* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
140
int ret = -EINVAL;
141
142
rc = of_property_read_u32_array(np, "arm,rosc-ratio",
143
drvdata->smpl_ratio,
144
CC_TRNG_NUM_OF_ROSCS);
145
if (rc) {
146
/* arm,rosc-ratio was not found in device tree */
147
return rc;
148
}
149
150
/* verify that at least one rosc has (sampling ratio > 0) */
151
for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
152
dev_dbg(dev, "rosc %d sampling ratio %u",
153
i, drvdata->smpl_ratio[i]);
154
155
if (drvdata->smpl_ratio[i] > 0)
156
ret = 0;
157
}
158
159
return ret;
160
}
161
162
static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
163
{
164
struct device *dev = &(drvdata->pdev->dev);
165
166
dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
167
drvdata->active_rosc += 1;
168
169
while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
170
if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
171
return 0;
172
173
drvdata->active_rosc += 1;
174
}
175
return -EINVAL;
176
}
177
178
179
static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
180
{
181
u32 max_cycles;
182
183
/* Set watchdog threshold to maximal allowed time (in CPU cycles) */
184
max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
185
cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
186
187
/* enable the RND source */
188
cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
189
190
/* unmask RNG interrupts */
191
cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
192
}
193
194
195
/* increase circular data buffer index (head/tail) */
196
static inline void circ_idx_inc(int *idx, int bytes)
197
{
198
*idx += (bytes + 3) >> 2;
199
*idx &= (CCTRNG_DATA_BUF_WORDS - 1);
200
}
201
202
static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
203
{
204
return CIRC_SPACE(drvdata->circ.head,
205
drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
206
207
}
208
209
static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
210
{
211
/* current implementation ignores "wait" */
212
213
struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
214
struct device *dev = &(drvdata->pdev->dev);
215
u32 *buf = (u32 *)drvdata->circ.buf;
216
size_t copied = 0;
217
size_t cnt_w;
218
size_t size;
219
size_t left;
220
221
if (!spin_trylock(&drvdata->read_lock)) {
222
/* concurrent consumers from data_buf cannot be served */
223
dev_dbg_ratelimited(dev, "unable to hold lock\n");
224
return 0;
225
}
226
227
/* copy till end of data buffer (without wrap back) */
228
cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
229
drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
230
size = min((cnt_w<<2), max);
231
memcpy(data, &(buf[drvdata->circ.tail]), size);
232
copied = size;
233
circ_idx_inc(&drvdata->circ.tail, size);
234
/* copy rest of data in data buffer */
235
left = max - copied;
236
if (left > 0) {
237
cnt_w = CIRC_CNT(drvdata->circ.head,
238
drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
239
size = min((cnt_w<<2), left);
240
memcpy(data, &(buf[drvdata->circ.tail]), size);
241
copied += size;
242
circ_idx_inc(&drvdata->circ.tail, size);
243
}
244
245
spin_unlock(&drvdata->read_lock);
246
247
if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
248
if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
249
/* re-check space in buffer to avoid potential race */
250
if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
251
/* increment device's usage counter */
252
int rc = cc_trng_pm_get(dev);
253
254
if (rc) {
255
dev_err(dev,
256
"cc_trng_pm_get returned %x\n",
257
rc);
258
return rc;
259
}
260
261
/* schedule execution of deferred work handler
262
* for filling of data buffer
263
*/
264
schedule_work(&drvdata->startwork);
265
} else {
266
atomic_set(&drvdata->pending_hw, 0);
267
}
268
}
269
}
270
271
return copied;
272
}
273
274
static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
275
{
276
u32 tmp_smpl_cnt = 0;
277
struct device *dev = &(drvdata->pdev->dev);
278
279
dev_dbg(dev, "cctrng hw trigger.\n");
280
281
/* enable the HW RND clock */
282
cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
283
284
/* do software reset */
285
cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
286
/* in order to verify that the reset has completed,
287
* the sample count need to be verified
288
*/
289
do {
290
/* enable the HW RND clock */
291
cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
292
293
/* set sampling ratio (rng_clocks) between consecutive bits */
294
cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
295
drvdata->smpl_ratio[drvdata->active_rosc]);
296
297
/* read the sampling ratio */
298
tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
299
300
} while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
301
302
/* disable the RND source for setting new parameters in HW */
303
cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
304
305
cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
306
307
cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
308
309
/* Debug Control register: set to 0 - no bypasses */
310
cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
311
312
cc_trng_enable_rnd_source(drvdata);
313
}
314
315
static void cc_trng_compwork_handler(struct work_struct *w)
316
{
317
u32 isr = 0;
318
u32 ehr_valid = 0;
319
struct cctrng_drvdata *drvdata =
320
container_of(w, struct cctrng_drvdata, compwork);
321
struct device *dev = &(drvdata->pdev->dev);
322
int i;
323
324
/* stop DMA and the RNG source */
325
cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
326
cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
327
328
/* read RNG_ISR and check for errors */
329
isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
330
ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
331
dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
332
333
if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
334
fips_fail_notify();
335
/* FIPS error is fatal */
336
panic("Got HW CRNGT error while fips is enabled!\n");
337
}
338
339
/* Clear all pending RNG interrupts */
340
cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
341
342
343
if (!ehr_valid) {
344
/* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
345
if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
346
CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
347
dev_dbg(dev, "cctrng autocorr/timeout error.\n");
348
goto next_rosc;
349
}
350
351
/* in case of VN error, ignore it */
352
}
353
354
/* read EHR data from registers */
355
for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
356
/* calc word ptr in data_buf */
357
u32 *buf = (u32 *)drvdata->circ.buf;
358
359
buf[drvdata->circ.head] = cc_ioread(drvdata,
360
CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
361
362
/* EHR_DATA registers are cleared on read. In case 0 value was
363
* returned, restart the entropy collection.
364
*/
365
if (buf[drvdata->circ.head] == 0) {
366
dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
367
drvdata->active_rosc);
368
goto next_rosc;
369
}
370
371
circ_idx_inc(&drvdata->circ.head, 1<<2);
372
}
373
374
atomic_set(&drvdata->pending_hw, 0);
375
376
/* continue to fill data buffer if needed */
377
if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
378
if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
379
/* Re-enable rnd source */
380
cc_trng_enable_rnd_source(drvdata);
381
return;
382
}
383
}
384
385
cc_trng_pm_put_suspend(dev);
386
387
dev_dbg(dev, "compwork handler done\n");
388
return;
389
390
next_rosc:
391
if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
392
(cc_trng_change_rosc(drvdata) == 0)) {
393
/* trigger trng hw with next rosc */
394
cc_trng_hw_trigger(drvdata);
395
} else {
396
atomic_set(&drvdata->pending_hw, 0);
397
cc_trng_pm_put_suspend(dev);
398
}
399
}
400
401
static irqreturn_t cc_isr(int irq, void *dev_id)
402
{
403
struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
404
struct device *dev = &(drvdata->pdev->dev);
405
u32 irr;
406
407
/* if driver suspended return, probably shared interrupt */
408
if (pm_runtime_suspended(dev))
409
return IRQ_NONE;
410
411
/* read the interrupt status */
412
irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
413
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
414
415
if (irr == 0) /* Probably shared interrupt line */
416
return IRQ_NONE;
417
418
/* clear interrupt - must be before processing events */
419
cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
420
421
/* RNG interrupt - most probable */
422
if (irr & CC_HOST_RNG_IRQ_MASK) {
423
/* Mask RNG interrupts - will be unmasked in deferred work */
424
cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
425
426
/* We clear RNG interrupt here,
427
* to avoid it from firing as we'll unmask RNG interrupts.
428
*/
429
cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
430
CC_HOST_RNG_IRQ_MASK);
431
432
irr &= ~CC_HOST_RNG_IRQ_MASK;
433
434
/* schedule execution of deferred work handler */
435
schedule_work(&drvdata->compwork);
436
}
437
438
if (irr) {
439
dev_dbg_ratelimited(dev,
440
"IRR includes unknown cause bits (0x%08X)\n",
441
irr);
442
/* Just warning */
443
}
444
445
return IRQ_HANDLED;
446
}
447
448
static void cc_trng_startwork_handler(struct work_struct *w)
449
{
450
struct cctrng_drvdata *drvdata =
451
container_of(w, struct cctrng_drvdata, startwork);
452
453
drvdata->active_rosc = 0;
454
cc_trng_hw_trigger(drvdata);
455
}
456
457
static int cctrng_probe(struct platform_device *pdev)
458
{
459
struct cctrng_drvdata *drvdata;
460
struct device *dev = &pdev->dev;
461
int rc = 0;
462
u32 val;
463
int irq;
464
465
/* Compile time assertion checks */
466
BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
467
BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
468
469
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
470
if (!drvdata)
471
return -ENOMEM;
472
473
drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
474
if (!drvdata->rng.name)
475
return -ENOMEM;
476
477
drvdata->rng.read = cctrng_read;
478
drvdata->rng.priv = (unsigned long)drvdata;
479
drvdata->rng.quality = CC_TRNG_QUALITY;
480
481
platform_set_drvdata(pdev, drvdata);
482
drvdata->pdev = pdev;
483
484
drvdata->circ.buf = (char *)drvdata->data_buf;
485
486
drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
487
if (IS_ERR(drvdata->cc_base))
488
return dev_err_probe(dev, PTR_ERR(drvdata->cc_base), "Failed to ioremap registers");
489
490
/* Then IRQ */
491
irq = platform_get_irq(pdev, 0);
492
if (irq < 0)
493
return irq;
494
495
/* parse sampling rate from device tree */
496
rc = cc_trng_parse_sampling_ratio(drvdata);
497
if (rc)
498
return dev_err_probe(dev, rc, "Failed to get legal sampling ratio for rosc\n");
499
500
drvdata->clk = devm_clk_get_optional_enabled(dev, NULL);
501
if (IS_ERR(drvdata->clk))
502
return dev_err_probe(dev, PTR_ERR(drvdata->clk),
503
"Failed to get or enable the clock\n");
504
505
INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
506
INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
507
spin_lock_init(&drvdata->read_lock);
508
509
/* register the driver isr function */
510
rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
511
if (rc)
512
return dev_err_probe(dev, rc, "Could not register to interrupt %d\n", irq);
513
dev_dbg(dev, "Registered to IRQ: %d\n", irq);
514
515
/* Clear all pending interrupts */
516
val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
517
dev_dbg(dev, "IRR=0x%08X\n", val);
518
cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
519
520
/* unmask HOST RNG interrupt */
521
cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
522
cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
523
~CC_HOST_RNG_IRQ_MASK);
524
525
/* init PM */
526
rc = cc_trng_pm_init(drvdata);
527
if (rc)
528
return dev_err_probe(dev, rc, "cc_trng_pm_init failed\n");
529
530
/* increment device's usage counter */
531
rc = cc_trng_pm_get(dev);
532
if (rc)
533
return dev_err_probe(dev, rc, "cc_trng_pm_get returned %x\n", rc);
534
535
/* set pending_hw to verify that HW won't be triggered from read */
536
atomic_set(&drvdata->pending_hw, 1);
537
538
/* registration of the hwrng device */
539
rc = devm_hwrng_register(dev, &drvdata->rng);
540
if (rc) {
541
dev_err(dev, "Could not register hwrng device.\n");
542
goto post_pm_err;
543
}
544
545
/* trigger HW to start generate data */
546
drvdata->active_rosc = 0;
547
cc_trng_hw_trigger(drvdata);
548
549
/* All set, we can allow auto-suspend */
550
cc_trng_pm_go(drvdata);
551
552
dev_info(dev, "ARM cctrng device initialized\n");
553
554
return 0;
555
556
post_pm_err:
557
cc_trng_pm_fini(drvdata);
558
559
return rc;
560
}
561
562
static void cctrng_remove(struct platform_device *pdev)
563
{
564
struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
565
struct device *dev = &pdev->dev;
566
567
dev_dbg(dev, "Releasing cctrng resources...\n");
568
569
cc_trng_pm_fini(drvdata);
570
571
dev_info(dev, "ARM cctrng device terminated\n");
572
}
573
574
static int __maybe_unused cctrng_suspend(struct device *dev)
575
{
576
struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
577
578
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
579
cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
580
POWER_DOWN_ENABLE);
581
582
clk_disable_unprepare(drvdata->clk);
583
584
return 0;
585
}
586
587
static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
588
{
589
unsigned int val;
590
unsigned int i;
591
592
for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
593
/* in cc7x3 NVM_IS_IDLE indicates that CC reset is
594
* completed and device is fully functional
595
*/
596
val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
597
if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
598
/* hw indicate reset completed */
599
return true;
600
}
601
/* allow scheduling other process on the processor */
602
schedule();
603
}
604
/* reset not completed */
605
return false;
606
}
607
608
static int __maybe_unused cctrng_resume(struct device *dev)
609
{
610
struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
611
int rc;
612
613
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
614
/* Enables the device source clk */
615
rc = clk_prepare_enable(drvdata->clk);
616
if (rc) {
617
dev_err(dev, "failed getting clock back on. We're toast.\n");
618
return rc;
619
}
620
621
/* wait for Cryptocell reset completion */
622
if (!cctrng_wait_for_reset_completion(drvdata)) {
623
dev_err(dev, "Cryptocell reset not completed");
624
clk_disable_unprepare(drvdata->clk);
625
return -EBUSY;
626
}
627
628
/* unmask HOST RNG interrupt */
629
cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
630
cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
631
~CC_HOST_RNG_IRQ_MASK);
632
633
cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
634
POWER_DOWN_DISABLE);
635
636
return 0;
637
}
638
639
static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
640
641
static const struct of_device_id arm_cctrng_dt_match[] = {
642
{ .compatible = "arm,cryptocell-713-trng", },
643
{ .compatible = "arm,cryptocell-703-trng", },
644
{},
645
};
646
MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
647
648
static struct platform_driver cctrng_driver = {
649
.driver = {
650
.name = "cctrng",
651
.of_match_table = arm_cctrng_dt_match,
652
.pm = &cctrng_pm,
653
},
654
.probe = cctrng_probe,
655
.remove = cctrng_remove,
656
};
657
658
module_platform_driver(cctrng_driver);
659
660
/* Module description */
661
MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
662
MODULE_AUTHOR("ARM");
663
MODULE_LICENSE("GPL v2");
664
665