Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/hw_random/stm32-rng.c
29269 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (c) 2015, Daniel Thompson
4
*/
5
6
#include <linux/clk.h>
7
#include <linux/clk-provider.h>
8
#include <linux/delay.h>
9
#include <linux/hw_random.h>
10
#include <linux/io.h>
11
#include <linux/iopoll.h>
12
#include <linux/kernel.h>
13
#include <linux/module.h>
14
#include <linux/of.h>
15
#include <linux/of_address.h>
16
#include <linux/platform_device.h>
17
#include <linux/pm_runtime.h>
18
#include <linux/reset.h>
19
#include <linux/slab.h>
20
21
#define RNG_CR 0x00
22
#define RNG_CR_RNGEN BIT(2)
23
#define RNG_CR_CED BIT(5)
24
#define RNG_CR_CONFIG1 GENMASK(11, 8)
25
#define RNG_CR_NISTC BIT(12)
26
#define RNG_CR_CONFIG2 GENMASK(15, 13)
27
#define RNG_CR_CLKDIV_SHIFT 16
28
#define RNG_CR_CLKDIV GENMASK(19, 16)
29
#define RNG_CR_CONFIG3 GENMASK(25, 20)
30
#define RNG_CR_CONDRST BIT(30)
31
#define RNG_CR_CONFLOCK BIT(31)
32
#define RNG_CR_ENTROPY_SRC_MASK (RNG_CR_CONFIG1 | RNG_CR_NISTC | RNG_CR_CONFIG2 | RNG_CR_CONFIG3)
33
#define RNG_CR_CONFIG_MASK (RNG_CR_ENTROPY_SRC_MASK | RNG_CR_CED | RNG_CR_CLKDIV)
34
35
#define RNG_SR 0x04
36
#define RNG_SR_DRDY BIT(0)
37
#define RNG_SR_CECS BIT(1)
38
#define RNG_SR_SECS BIT(2)
39
#define RNG_SR_CEIS BIT(5)
40
#define RNG_SR_SEIS BIT(6)
41
42
#define RNG_DR 0x08
43
44
#define RNG_NSCR 0x0C
45
#define RNG_NSCR_MASK GENMASK(17, 0)
46
47
#define RNG_HTCR 0x10
48
49
#define RNG_NB_RECOVER_TRIES 3
50
51
struct stm32_rng_data {
52
uint max_clock_rate;
53
uint nb_clock;
54
u32 cr;
55
u32 nscr;
56
u32 htcr;
57
bool has_cond_reset;
58
};
59
60
/**
61
* struct stm32_rng_config - RNG configuration data
62
*
63
* @cr: RNG configuration. 0 means default hardware RNG configuration
64
* @nscr: Noise sources control configuration.
65
* @htcr: Health tests configuration.
66
*/
67
struct stm32_rng_config {
68
u32 cr;
69
u32 nscr;
70
u32 htcr;
71
};
72
73
struct stm32_rng_private {
74
struct hwrng rng;
75
struct device *dev;
76
void __iomem *base;
77
struct clk_bulk_data *clk_bulk;
78
struct reset_control *rst;
79
struct stm32_rng_config pm_conf;
80
const struct stm32_rng_data *data;
81
bool ced;
82
bool lock_conf;
83
};
84
85
/*
86
* Extracts from the STM32 RNG specification when RNG supports CONDRST.
87
*
88
* When a noise source (or seed) error occurs, the RNG stops generating
89
* random numbers and sets to “1” both SEIS and SECS bits to indicate
90
* that a seed error occurred. (...)
91
*
92
* 1. Software reset by writing CONDRST at 1 and at 0 (see bitfield
93
* description for details). This step is needed only if SECS is set.
94
* Indeed, when SEIS is set and SECS is cleared it means RNG performed
95
* the reset automatically (auto-reset).
96
* 2. If SECS was set in step 1 (no auto-reset) wait for CONDRST
97
* to be cleared in the RNG_CR register, then confirm that SEIS is
98
* cleared in the RNG_SR register. Otherwise just clear SEIS bit in
99
* the RNG_SR register.
100
* 3. If SECS was set in step 1 (no auto-reset) wait for SECS to be
101
* cleared by RNG. The random number generation is now back to normal.
102
*/
103
static int stm32_rng_conceal_seed_error_cond_reset(struct stm32_rng_private *priv)
104
{
105
struct device *dev = priv->dev;
106
u32 sr = readl_relaxed(priv->base + RNG_SR);
107
u32 cr = readl_relaxed(priv->base + RNG_CR);
108
int err;
109
110
if (sr & RNG_SR_SECS) {
111
/* Conceal by resetting the subsystem (step 1.) */
112
writel_relaxed(cr | RNG_CR_CONDRST, priv->base + RNG_CR);
113
writel_relaxed(cr & ~RNG_CR_CONDRST, priv->base + RNG_CR);
114
} else {
115
/* RNG auto-reset (step 2.) */
116
writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR);
117
goto end;
118
}
119
120
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, cr, !(cr & RNG_CR_CONDRST), 10,
121
100000);
122
if (err) {
123
dev_err(dev, "%s: timeout %x\n", __func__, sr);
124
return err;
125
}
126
127
/* Check SEIS is cleared (step 2.) */
128
if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
129
return -EINVAL;
130
131
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, sr, !(sr & RNG_SR_SECS), 10,
132
100000);
133
if (err) {
134
dev_err(dev, "%s: timeout %x\n", __func__, sr);
135
return err;
136
}
137
138
end:
139
return 0;
140
}
141
142
/*
143
* Extracts from the STM32 RNG specification, when CONDRST is not supported
144
*
145
* When a noise source (or seed) error occurs, the RNG stops generating
146
* random numbers and sets to “1” both SEIS and SECS bits to indicate
147
* that a seed error occurred. (...)
148
*
149
* The following sequence shall be used to fully recover from a seed
150
* error after the RNG initialization:
151
* 1. Clear the SEIS bit by writing it to “0”.
152
* 2. Read out 12 words from the RNG_DR register, and discard each of
153
* them in order to clean the pipeline.
154
* 3. Confirm that SEIS is still cleared. Random number generation is
155
* back to normal.
156
*/
157
static int stm32_rng_conceal_seed_error_sw_reset(struct stm32_rng_private *priv)
158
{
159
unsigned int i = 0;
160
u32 sr = readl_relaxed(priv->base + RNG_SR);
161
162
writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR);
163
164
for (i = 12; i != 0; i--)
165
(void)readl_relaxed(priv->base + RNG_DR);
166
167
if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
168
return -EINVAL;
169
170
return 0;
171
}
172
173
static int stm32_rng_conceal_seed_error(struct hwrng *rng)
174
{
175
struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
176
177
dev_dbg(priv->dev, "Concealing seed error\n");
178
179
if (priv->data->has_cond_reset)
180
return stm32_rng_conceal_seed_error_cond_reset(priv);
181
else
182
return stm32_rng_conceal_seed_error_sw_reset(priv);
183
};
184
185
186
static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
187
{
188
struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
189
unsigned int i = 0;
190
int retval = 0, err = 0;
191
u32 sr;
192
193
retval = pm_runtime_resume_and_get(priv->dev);
194
if (retval)
195
return retval;
196
197
if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
198
stm32_rng_conceal_seed_error(rng);
199
200
while (max >= sizeof(u32)) {
201
sr = readl_relaxed(priv->base + RNG_SR);
202
/*
203
* Manage timeout which is based on timer and take
204
* care of initial delay time when enabling the RNG.
205
*/
206
if (!sr && wait) {
207
err = readl_relaxed_poll_timeout_atomic(priv->base
208
+ RNG_SR,
209
sr, sr,
210
10, 50000);
211
if (err) {
212
dev_err(priv->dev, "%s: timeout %x!\n", __func__, sr);
213
break;
214
}
215
} else if (!sr) {
216
/* The FIFO is being filled up */
217
break;
218
}
219
220
if (sr != RNG_SR_DRDY) {
221
if (sr & RNG_SR_SEIS) {
222
err = stm32_rng_conceal_seed_error(rng);
223
i++;
224
if (err && i > RNG_NB_RECOVER_TRIES) {
225
dev_err(priv->dev, "Couldn't recover from seed error\n");
226
retval = -ENOTRECOVERABLE;
227
goto exit_rpm;
228
}
229
230
continue;
231
}
232
233
if (WARN_ONCE((sr & RNG_SR_CEIS), "RNG clock too slow - %x\n", sr))
234
writel_relaxed(0, priv->base + RNG_SR);
235
}
236
237
/* Late seed error case: DR being 0 is an error status */
238
*(u32 *)data = readl_relaxed(priv->base + RNG_DR);
239
if (!*(u32 *)data) {
240
err = stm32_rng_conceal_seed_error(rng);
241
i++;
242
if (err && i > RNG_NB_RECOVER_TRIES) {
243
dev_err(priv->dev, "Couldn't recover from seed error");
244
retval = -ENOTRECOVERABLE;
245
goto exit_rpm;
246
}
247
248
continue;
249
}
250
251
i = 0;
252
retval += sizeof(u32);
253
data += sizeof(u32);
254
max -= sizeof(u32);
255
}
256
257
exit_rpm:
258
pm_runtime_put_sync_autosuspend(priv->dev);
259
260
return retval || !wait ? retval : -EIO;
261
}
262
263
static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
264
{
265
struct stm32_rng_private *priv =
266
container_of(rng, struct stm32_rng_private, rng);
267
unsigned long clock_rate = 0;
268
uint clock_div = 0;
269
270
clock_rate = clk_get_rate(priv->clk_bulk[0].clk);
271
272
/*
273
* Get the exponent to apply on the CLKDIV field in RNG_CR register
274
* No need to handle the case when clock-div > 0xF as it is physically
275
* impossible
276
*/
277
while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
278
clock_div++;
279
280
pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div);
281
282
return clock_div;
283
}
284
285
static int stm32_rng_init(struct hwrng *rng)
286
{
287
struct stm32_rng_private *priv =
288
container_of(rng, struct stm32_rng_private, rng);
289
int err;
290
u32 reg;
291
292
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
293
if (err)
294
return err;
295
296
/* clear error indicators */
297
writel_relaxed(0, priv->base + RNG_SR);
298
299
reg = readl_relaxed(priv->base + RNG_CR);
300
301
/*
302
* Keep default RNG configuration if none was specified.
303
* 0 is an invalid value as it disables all entropy sources.
304
*/
305
if (priv->data->has_cond_reset && priv->data->cr) {
306
uint clock_div = stm32_rng_clock_freq_restrain(rng);
307
308
reg &= ~RNG_CR_CONFIG_MASK;
309
reg |= RNG_CR_CONDRST | (priv->data->cr & RNG_CR_ENTROPY_SRC_MASK) |
310
(clock_div << RNG_CR_CLKDIV_SHIFT);
311
if (priv->ced)
312
reg &= ~RNG_CR_CED;
313
else
314
reg |= RNG_CR_CED;
315
writel_relaxed(reg, priv->base + RNG_CR);
316
317
/* Health tests and noise control registers */
318
writel_relaxed(priv->data->htcr, priv->base + RNG_HTCR);
319
writel_relaxed(priv->data->nscr & RNG_NSCR_MASK, priv->base + RNG_NSCR);
320
321
reg &= ~RNG_CR_CONDRST;
322
reg |= RNG_CR_RNGEN;
323
if (priv->lock_conf)
324
reg |= RNG_CR_CONFLOCK;
325
326
writel_relaxed(reg, priv->base + RNG_CR);
327
328
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg,
329
(!(reg & RNG_CR_CONDRST)),
330
10, 50000);
331
if (err) {
332
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
333
dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
334
return -EINVAL;
335
}
336
} else {
337
/* Handle all RNG versions by checking if conditional reset should be set */
338
if (priv->data->has_cond_reset)
339
reg |= RNG_CR_CONDRST;
340
341
if (priv->ced)
342
reg &= ~RNG_CR_CED;
343
else
344
reg |= RNG_CR_CED;
345
346
writel_relaxed(reg, priv->base + RNG_CR);
347
348
if (priv->data->has_cond_reset)
349
reg &= ~RNG_CR_CONDRST;
350
351
reg |= RNG_CR_RNGEN;
352
353
writel_relaxed(reg, priv->base + RNG_CR);
354
}
355
356
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg,
357
reg & RNG_SR_DRDY,
358
10, 100000);
359
if (err || (reg & ~RNG_SR_DRDY)) {
360
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
361
dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
362
363
return -EINVAL;
364
}
365
366
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
367
368
return 0;
369
}
370
371
static void stm32_rng_remove(struct platform_device *ofdev)
372
{
373
pm_runtime_disable(&ofdev->dev);
374
}
375
376
static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
377
{
378
struct stm32_rng_private *priv = dev_get_drvdata(dev);
379
u32 reg;
380
381
reg = readl_relaxed(priv->base + RNG_CR);
382
reg &= ~RNG_CR_RNGEN;
383
writel_relaxed(reg, priv->base + RNG_CR);
384
385
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
386
387
return 0;
388
}
389
390
static int __maybe_unused stm32_rng_suspend(struct device *dev)
391
{
392
struct stm32_rng_private *priv = dev_get_drvdata(dev);
393
int err;
394
395
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
396
if (err)
397
return err;
398
399
if (priv->data->has_cond_reset) {
400
priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR);
401
priv->pm_conf.htcr = readl_relaxed(priv->base + RNG_HTCR);
402
}
403
404
/* Do not save that RNG is enabled as it will be handled at resume */
405
priv->pm_conf.cr = readl_relaxed(priv->base + RNG_CR) & ~RNG_CR_RNGEN;
406
407
writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
408
409
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
410
411
return 0;
412
}
413
414
static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
415
{
416
struct stm32_rng_private *priv = dev_get_drvdata(dev);
417
int err;
418
u32 reg;
419
420
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
421
if (err)
422
return err;
423
424
/* Clean error indications */
425
writel_relaxed(0, priv->base + RNG_SR);
426
427
reg = readl_relaxed(priv->base + RNG_CR);
428
reg |= RNG_CR_RNGEN;
429
writel_relaxed(reg, priv->base + RNG_CR);
430
431
return 0;
432
}
433
434
static int __maybe_unused stm32_rng_resume(struct device *dev)
435
{
436
struct stm32_rng_private *priv = dev_get_drvdata(dev);
437
int err;
438
u32 reg;
439
440
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
441
if (err)
442
return err;
443
444
/* Clean error indications */
445
writel_relaxed(0, priv->base + RNG_SR);
446
447
if (priv->data->has_cond_reset) {
448
/*
449
* Correct configuration in bits [29:4] must be set in the same
450
* access that set RNG_CR_CONDRST bit. Else config setting is
451
* not taken into account. CONFIGLOCK bit must also be unset but
452
* it is not handled at the moment.
453
*/
454
writel_relaxed(priv->pm_conf.cr | RNG_CR_CONDRST, priv->base + RNG_CR);
455
456
writel_relaxed(priv->pm_conf.nscr, priv->base + RNG_NSCR);
457
writel_relaxed(priv->pm_conf.htcr, priv->base + RNG_HTCR);
458
459
reg = readl_relaxed(priv->base + RNG_CR);
460
reg |= RNG_CR_RNGEN;
461
reg &= ~RNG_CR_CONDRST;
462
writel_relaxed(reg, priv->base + RNG_CR);
463
464
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg,
465
reg & ~RNG_CR_CONDRST, 10, 100000);
466
467
if (err) {
468
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
469
dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
470
return -EINVAL;
471
}
472
} else {
473
reg = priv->pm_conf.cr;
474
reg |= RNG_CR_RNGEN;
475
writel_relaxed(reg, priv->base + RNG_CR);
476
}
477
478
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
479
480
return 0;
481
}
482
483
static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
484
SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend,
485
stm32_rng_runtime_resume, NULL)
486
SET_SYSTEM_SLEEP_PM_OPS(stm32_rng_suspend,
487
stm32_rng_resume)
488
};
489
490
static const struct stm32_rng_data stm32mp25_rng_data = {
491
.has_cond_reset = true,
492
.max_clock_rate = 48000000,
493
.nb_clock = 2,
494
.cr = 0x00F00D00,
495
.nscr = 0x2B5BB,
496
.htcr = 0x969D,
497
};
498
499
static const struct stm32_rng_data stm32mp13_rng_data = {
500
.has_cond_reset = true,
501
.max_clock_rate = 48000000,
502
.nb_clock = 1,
503
.cr = 0x00F00D00,
504
.nscr = 0x2B5BB,
505
.htcr = 0x969D,
506
};
507
508
static const struct stm32_rng_data stm32_rng_data = {
509
.has_cond_reset = false,
510
.max_clock_rate = 48000000,
511
.nb_clock = 1,
512
};
513
514
static const struct of_device_id stm32_rng_match[] = {
515
{
516
.compatible = "st,stm32mp25-rng",
517
.data = &stm32mp25_rng_data,
518
},
519
{
520
.compatible = "st,stm32mp13-rng",
521
.data = &stm32mp13_rng_data,
522
},
523
{
524
.compatible = "st,stm32-rng",
525
.data = &stm32_rng_data,
526
},
527
{},
528
};
529
MODULE_DEVICE_TABLE(of, stm32_rng_match);
530
531
static int stm32_rng_probe(struct platform_device *ofdev)
532
{
533
struct device *dev = &ofdev->dev;
534
struct device_node *np = ofdev->dev.of_node;
535
struct stm32_rng_private *priv;
536
struct resource *res;
537
int ret;
538
539
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
540
if (!priv)
541
return -ENOMEM;
542
543
priv->base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
544
if (IS_ERR(priv->base))
545
return PTR_ERR(priv->base);
546
547
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
548
if (!IS_ERR(priv->rst)) {
549
reset_control_assert(priv->rst);
550
udelay(2);
551
reset_control_deassert(priv->rst);
552
}
553
554
priv->ced = of_property_read_bool(np, "clock-error-detect");
555
priv->lock_conf = of_property_read_bool(np, "st,rng-lock-conf");
556
priv->dev = dev;
557
558
priv->data = of_device_get_match_data(dev);
559
if (!priv->data)
560
return -ENODEV;
561
562
dev_set_drvdata(dev, priv);
563
564
priv->rng.name = dev_driver_string(dev);
565
priv->rng.init = stm32_rng_init;
566
priv->rng.read = stm32_rng_read;
567
priv->rng.quality = 900;
568
569
if (!priv->data->nb_clock || priv->data->nb_clock > 2)
570
return -EINVAL;
571
572
ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk);
573
if (ret != priv->data->nb_clock)
574
return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret);
575
576
if (priv->data->nb_clock == 2) {
577
const char *id = priv->clk_bulk[1].id;
578
struct clk *clk = priv->clk_bulk[1].clk;
579
580
if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id)
581
return dev_err_probe(dev, -EINVAL, "Missing clock name\n");
582
583
if (strcmp(priv->clk_bulk[0].id, "core")) {
584
priv->clk_bulk[1].id = priv->clk_bulk[0].id;
585
priv->clk_bulk[1].clk = priv->clk_bulk[0].clk;
586
priv->clk_bulk[0].id = id;
587
priv->clk_bulk[0].clk = clk;
588
}
589
}
590
591
pm_runtime_set_autosuspend_delay(dev, 100);
592
pm_runtime_use_autosuspend(dev);
593
pm_runtime_enable(dev);
594
595
return devm_hwrng_register(dev, &priv->rng);
596
}
597
598
static struct platform_driver stm32_rng_driver = {
599
.driver = {
600
.name = "stm32-rng",
601
.pm = pm_ptr(&stm32_rng_pm_ops),
602
.of_match_table = stm32_rng_match,
603
},
604
.probe = stm32_rng_probe,
605
.remove = stm32_rng_remove,
606
};
607
608
module_platform_driver(stm32_rng_driver);
609
610
MODULE_LICENSE("GPL");
611
MODULE_AUTHOR("Daniel Thompson <[email protected]>");
612
MODULE_DESCRIPTION("STMicroelectronics STM32 RNG device driver");
613
614