]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/char/hw_random/omap-rng.c
hwrng: omap - Fix RNG wait loop timeout
[mirror_ubuntu-bionic-kernel.git] / drivers / char / hw_random / omap-rng.c
1 /*
2 * omap-rng.c - RNG driver for TI OMAP CPU family
3 *
4 * Author: Deepak Saxena <dsaxena@plexity.net>
5 *
6 * Copyright 2005 (c) MontaVista Software, Inc.
7 *
8 * Mostly based on original driver:
9 *
10 * Copyright (C) 2005 Nokia Corporation
11 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
12 *
13 * This file is licensed under the terms of the GNU General Public
14 * License version 2. This program is licensed "as is" without any
15 * warranty of any kind, whether express or implied.
16 */
17
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/random.h>
21 #include <linux/err.h>
22 #include <linux/platform_device.h>
23 #include <linux/hw_random.h>
24 #include <linux/delay.h>
25 #include <linux/slab.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_address.h>
30 #include <linux/interrupt.h>
31 #include <linux/clk.h>
32
33 #include <asm/io.h>
34
35 #define RNG_REG_STATUS_RDY (1 << 0)
36
37 #define RNG_REG_INTACK_RDY_MASK (1 << 0)
38 #define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK (1 << 1)
39 #define RNG_SHUTDOWN_OFLO_MASK (1 << 1)
40
41 #define RNG_CONTROL_STARTUP_CYCLES_SHIFT 16
42 #define RNG_CONTROL_STARTUP_CYCLES_MASK (0xffff << 16)
43 #define RNG_CONTROL_ENABLE_TRNG_SHIFT 10
44 #define RNG_CONTROL_ENABLE_TRNG_MASK (1 << 10)
45
46 #define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT 16
47 #define RNG_CONFIG_MAX_REFIL_CYCLES_MASK (0xffff << 16)
48 #define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT 0
49 #define RNG_CONFIG_MIN_REFIL_CYCLES_MASK (0xff << 0)
50
51 #define RNG_CONTROL_STARTUP_CYCLES 0xff
52 #define RNG_CONFIG_MIN_REFIL_CYCLES 0x21
53 #define RNG_CONFIG_MAX_REFIL_CYCLES 0x22
54
55 #define RNG_ALARMCNT_ALARM_TH_SHIFT 0x0
56 #define RNG_ALARMCNT_ALARM_TH_MASK (0xff << 0)
57 #define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT 16
58 #define RNG_ALARMCNT_SHUTDOWN_TH_MASK (0x1f << 16)
59 #define RNG_ALARM_THRESHOLD 0xff
60 #define RNG_SHUTDOWN_THRESHOLD 0x4
61
62 #define RNG_REG_FROENABLE_MASK 0xffffff
63 #define RNG_REG_FRODETUNE_MASK 0xffffff
64
65 #define OMAP2_RNG_OUTPUT_SIZE 0x4
66 #define OMAP4_RNG_OUTPUT_SIZE 0x8
67 #define EIP76_RNG_OUTPUT_SIZE 0x10
68
69 /*
70 * EIP76 RNG takes approx. 700us to produce 16 bytes of output data
71 * as per testing results. And to account for the lack of udelay()'s
72 * reliability, we keep the timeout as 1000us.
73 */
74 #define RNG_DATA_FILL_TIMEOUT 100
75
76 enum {
77 RNG_OUTPUT_0_REG = 0,
78 RNG_OUTPUT_1_REG,
79 RNG_OUTPUT_2_REG,
80 RNG_OUTPUT_3_REG,
81 RNG_STATUS_REG,
82 RNG_INTMASK_REG,
83 RNG_INTACK_REG,
84 RNG_CONTROL_REG,
85 RNG_CONFIG_REG,
86 RNG_ALARMCNT_REG,
87 RNG_FROENABLE_REG,
88 RNG_FRODETUNE_REG,
89 RNG_ALARMMASK_REG,
90 RNG_ALARMSTOP_REG,
91 RNG_REV_REG,
92 RNG_SYSCONFIG_REG,
93 };
94
95 static const u16 reg_map_omap2[] = {
96 [RNG_OUTPUT_0_REG] = 0x0,
97 [RNG_STATUS_REG] = 0x4,
98 [RNG_CONFIG_REG] = 0x28,
99 [RNG_REV_REG] = 0x3c,
100 [RNG_SYSCONFIG_REG] = 0x40,
101 };
102
103 static const u16 reg_map_omap4[] = {
104 [RNG_OUTPUT_0_REG] = 0x0,
105 [RNG_OUTPUT_1_REG] = 0x4,
106 [RNG_STATUS_REG] = 0x8,
107 [RNG_INTMASK_REG] = 0xc,
108 [RNG_INTACK_REG] = 0x10,
109 [RNG_CONTROL_REG] = 0x14,
110 [RNG_CONFIG_REG] = 0x18,
111 [RNG_ALARMCNT_REG] = 0x1c,
112 [RNG_FROENABLE_REG] = 0x20,
113 [RNG_FRODETUNE_REG] = 0x24,
114 [RNG_ALARMMASK_REG] = 0x28,
115 [RNG_ALARMSTOP_REG] = 0x2c,
116 [RNG_REV_REG] = 0x1FE0,
117 [RNG_SYSCONFIG_REG] = 0x1FE4,
118 };
119
120 static const u16 reg_map_eip76[] = {
121 [RNG_OUTPUT_0_REG] = 0x0,
122 [RNG_OUTPUT_1_REG] = 0x4,
123 [RNG_OUTPUT_2_REG] = 0x8,
124 [RNG_OUTPUT_3_REG] = 0xc,
125 [RNG_STATUS_REG] = 0x10,
126 [RNG_INTACK_REG] = 0x10,
127 [RNG_CONTROL_REG] = 0x14,
128 [RNG_CONFIG_REG] = 0x18,
129 [RNG_ALARMCNT_REG] = 0x1c,
130 [RNG_FROENABLE_REG] = 0x20,
131 [RNG_FRODETUNE_REG] = 0x24,
132 [RNG_ALARMMASK_REG] = 0x28,
133 [RNG_ALARMSTOP_REG] = 0x2c,
134 [RNG_REV_REG] = 0x7c,
135 };
136
137 struct omap_rng_dev;
138 /**
139 * struct omap_rng_pdata - RNG IP block-specific data
140 * @regs: Pointer to the register offsets structure.
141 * @data_size: No. of bytes in RNG output.
142 * @data_present: Callback to determine if data is available.
143 * @init: Callback for IP specific initialization sequence.
144 * @cleanup: Callback for IP specific cleanup sequence.
145 */
146 struct omap_rng_pdata {
147 u16 *regs;
148 u32 data_size;
149 u32 (*data_present)(struct omap_rng_dev *priv);
150 int (*init)(struct omap_rng_dev *priv);
151 void (*cleanup)(struct omap_rng_dev *priv);
152 };
153
154 struct omap_rng_dev {
155 void __iomem *base;
156 struct device *dev;
157 const struct omap_rng_pdata *pdata;
158 struct hwrng rng;
159 struct clk *clk;
160 };
161
162 static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
163 {
164 return __raw_readl(priv->base + priv->pdata->regs[reg]);
165 }
166
167 static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
168 u32 val)
169 {
170 __raw_writel(val, priv->base + priv->pdata->regs[reg]);
171 }
172
173
174 static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
175 bool wait)
176 {
177 struct omap_rng_dev *priv;
178 int i, present;
179
180 priv = (struct omap_rng_dev *)rng->priv;
181
182 if (max < priv->pdata->data_size)
183 return 0;
184
185 for (i = 0; i < RNG_DATA_FILL_TIMEOUT; i++) {
186 present = priv->pdata->data_present(priv);
187 if (present || !wait)
188 break;
189
190 udelay(10);
191 }
192 if (!present)
193 return 0;
194
195 memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
196 priv->pdata->data_size);
197
198 if (priv->pdata->regs[RNG_INTACK_REG])
199 omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
200
201 return priv->pdata->data_size;
202 }
203
204 static int omap_rng_init(struct hwrng *rng)
205 {
206 struct omap_rng_dev *priv;
207
208 priv = (struct omap_rng_dev *)rng->priv;
209 return priv->pdata->init(priv);
210 }
211
212 static void omap_rng_cleanup(struct hwrng *rng)
213 {
214 struct omap_rng_dev *priv;
215
216 priv = (struct omap_rng_dev *)rng->priv;
217 priv->pdata->cleanup(priv);
218 }
219
220
221 static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
222 {
223 return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1;
224 }
225
226 static int omap2_rng_init(struct omap_rng_dev *priv)
227 {
228 omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1);
229 return 0;
230 }
231
232 static void omap2_rng_cleanup(struct omap_rng_dev *priv)
233 {
234 omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0);
235 }
236
237 static struct omap_rng_pdata omap2_rng_pdata = {
238 .regs = (u16 *)reg_map_omap2,
239 .data_size = OMAP2_RNG_OUTPUT_SIZE,
240 .data_present = omap2_rng_data_present,
241 .init = omap2_rng_init,
242 .cleanup = omap2_rng_cleanup,
243 };
244
245 #if defined(CONFIG_OF)
246 static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
247 {
248 return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
249 }
250
251 static int eip76_rng_init(struct omap_rng_dev *priv)
252 {
253 u32 val;
254
255 /* Return if RNG is already running. */
256 if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
257 return 0;
258
259 /* Number of 512 bit blocks of raw Noise Source output data that must
260 * be processed by either the Conditioning Function or the
261 * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
262 * output value.
263 */
264 val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
265
266 /* Number of FRO samples that are XOR-ed together into one bit to be
267 * shifted into the main shift register
268 */
269 val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
270 omap_rng_write(priv, RNG_CONFIG_REG, val);
271
272 /* Enable all available FROs */
273 omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
274 omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
275
276 /* Enable TRNG */
277 val = RNG_CONTROL_ENABLE_TRNG_MASK;
278 omap_rng_write(priv, RNG_CONTROL_REG, val);
279
280 return 0;
281 }
282
283 static int omap4_rng_init(struct omap_rng_dev *priv)
284 {
285 u32 val;
286
287 /* Return if RNG is already running. */
288 if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
289 return 0;
290
291 val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
292 val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
293 omap_rng_write(priv, RNG_CONFIG_REG, val);
294
295 omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
296 omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
297 val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT;
298 val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT;
299 omap_rng_write(priv, RNG_ALARMCNT_REG, val);
300
301 val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT;
302 val |= RNG_CONTROL_ENABLE_TRNG_MASK;
303 omap_rng_write(priv, RNG_CONTROL_REG, val);
304
305 return 0;
306 }
307
308 static void omap4_rng_cleanup(struct omap_rng_dev *priv)
309 {
310 int val;
311
312 val = omap_rng_read(priv, RNG_CONTROL_REG);
313 val &= ~RNG_CONTROL_ENABLE_TRNG_MASK;
314 omap_rng_write(priv, RNG_CONTROL_REG, val);
315 }
316
317 static irqreturn_t omap4_rng_irq(int irq, void *dev_id)
318 {
319 struct omap_rng_dev *priv = dev_id;
320 u32 fro_detune, fro_enable;
321
322 /*
323 * Interrupt raised by a fro shutdown threshold, do the following:
324 * 1. Clear the alarm events.
325 * 2. De tune the FROs which are shutdown.
326 * 3. Re enable the shutdown FROs.
327 */
328 omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0);
329 omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0);
330
331 fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG);
332 fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK;
333 fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG);
334 fro_enable = RNG_REG_FROENABLE_MASK;
335
336 omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune);
337 omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable);
338
339 omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK);
340
341 return IRQ_HANDLED;
342 }
343
344 static struct omap_rng_pdata omap4_rng_pdata = {
345 .regs = (u16 *)reg_map_omap4,
346 .data_size = OMAP4_RNG_OUTPUT_SIZE,
347 .data_present = omap4_rng_data_present,
348 .init = omap4_rng_init,
349 .cleanup = omap4_rng_cleanup,
350 };
351
352 static struct omap_rng_pdata eip76_rng_pdata = {
353 .regs = (u16 *)reg_map_eip76,
354 .data_size = EIP76_RNG_OUTPUT_SIZE,
355 .data_present = omap4_rng_data_present,
356 .init = eip76_rng_init,
357 .cleanup = omap4_rng_cleanup,
358 };
359
360 static const struct of_device_id omap_rng_of_match[] = {
361 {
362 .compatible = "ti,omap2-rng",
363 .data = &omap2_rng_pdata,
364 },
365 {
366 .compatible = "ti,omap4-rng",
367 .data = &omap4_rng_pdata,
368 },
369 {
370 .compatible = "inside-secure,safexcel-eip76",
371 .data = &eip76_rng_pdata,
372 },
373 {},
374 };
375 MODULE_DEVICE_TABLE(of, omap_rng_of_match);
376
377 static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
378 struct platform_device *pdev)
379 {
380 const struct of_device_id *match;
381 struct device *dev = &pdev->dev;
382 int irq, err;
383
384 match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
385 if (!match) {
386 dev_err(dev, "no compatible OF match\n");
387 return -EINVAL;
388 }
389 priv->pdata = match->data;
390
391 if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
392 of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
393 irq = platform_get_irq(pdev, 0);
394 if (irq < 0) {
395 dev_err(dev, "%s: error getting IRQ resource - %d\n",
396 __func__, irq);
397 return irq;
398 }
399
400 err = devm_request_irq(dev, irq, omap4_rng_irq,
401 IRQF_TRIGGER_NONE, dev_name(dev), priv);
402 if (err) {
403 dev_err(dev, "unable to request irq %d, err = %d\n",
404 irq, err);
405 return err;
406 }
407
408 /*
409 * On OMAP4, enabling the shutdown_oflo interrupt is
410 * done in the interrupt mask register. There is no
411 * such register on EIP76, and it's enabled by the
412 * same bit in the control register
413 */
414 if (priv->pdata->regs[RNG_INTMASK_REG])
415 omap_rng_write(priv, RNG_INTMASK_REG,
416 RNG_SHUTDOWN_OFLO_MASK);
417 else
418 omap_rng_write(priv, RNG_CONTROL_REG,
419 RNG_SHUTDOWN_OFLO_MASK);
420 }
421 return 0;
422 }
423 #else
424 static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng,
425 struct platform_device *pdev)
426 {
427 return -EINVAL;
428 }
429 #endif
430
431 static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
432 {
433 /* Only OMAP2/3 can be non-DT */
434 omap_rng->pdata = &omap2_rng_pdata;
435 return 0;
436 }
437
438 static int omap_rng_probe(struct platform_device *pdev)
439 {
440 struct omap_rng_dev *priv;
441 struct resource *res;
442 struct device *dev = &pdev->dev;
443 int ret;
444
445 priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL);
446 if (!priv)
447 return -ENOMEM;
448
449 priv->rng.read = omap_rng_do_read;
450 priv->rng.init = omap_rng_init;
451 priv->rng.cleanup = omap_rng_cleanup;
452 priv->rng.quality = 900;
453
454 priv->rng.priv = (unsigned long)priv;
455 platform_set_drvdata(pdev, priv);
456 priv->dev = dev;
457
458 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
459 priv->base = devm_ioremap_resource(dev, res);
460 if (IS_ERR(priv->base)) {
461 ret = PTR_ERR(priv->base);
462 goto err_ioremap;
463 }
464
465 priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
466 if (!priv->rng.name) {
467 ret = -ENOMEM;
468 goto err_ioremap;
469 }
470
471 pm_runtime_enable(&pdev->dev);
472 ret = pm_runtime_get_sync(&pdev->dev);
473 if (ret < 0) {
474 dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
475 pm_runtime_put_noidle(&pdev->dev);
476 goto err_ioremap;
477 }
478
479 priv->clk = devm_clk_get(&pdev->dev, NULL);
480 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
481 return -EPROBE_DEFER;
482 if (!IS_ERR(priv->clk)) {
483 ret = clk_prepare_enable(priv->clk);
484 if (ret) {
485 dev_err(&pdev->dev,
486 "Unable to enable the clk: %d\n", ret);
487 goto err_register;
488 }
489 }
490
491 ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
492 get_omap_rng_device_details(priv);
493 if (ret)
494 goto err_register;
495
496 ret = hwrng_register(&priv->rng);
497 if (ret)
498 goto err_register;
499
500 dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
501 omap_rng_read(priv, RNG_REV_REG));
502
503 return 0;
504
505 err_register:
506 priv->base = NULL;
507 pm_runtime_put_sync(&pdev->dev);
508 pm_runtime_disable(&pdev->dev);
509
510 if (!IS_ERR(priv->clk))
511 clk_disable_unprepare(priv->clk);
512 err_ioremap:
513 dev_err(dev, "initialization failed.\n");
514 return ret;
515 }
516
517 static int omap_rng_remove(struct platform_device *pdev)
518 {
519 struct omap_rng_dev *priv = platform_get_drvdata(pdev);
520
521 hwrng_unregister(&priv->rng);
522
523 priv->pdata->cleanup(priv);
524
525 pm_runtime_put_sync(&pdev->dev);
526 pm_runtime_disable(&pdev->dev);
527
528 if (!IS_ERR(priv->clk))
529 clk_disable_unprepare(priv->clk);
530
531 return 0;
532 }
533
534 static int __maybe_unused omap_rng_suspend(struct device *dev)
535 {
536 struct omap_rng_dev *priv = dev_get_drvdata(dev);
537
538 priv->pdata->cleanup(priv);
539 pm_runtime_put_sync(dev);
540
541 return 0;
542 }
543
544 static int __maybe_unused omap_rng_resume(struct device *dev)
545 {
546 struct omap_rng_dev *priv = dev_get_drvdata(dev);
547 int ret;
548
549 ret = pm_runtime_get_sync(dev);
550 if (ret < 0) {
551 dev_err(dev, "Failed to runtime_get device: %d\n", ret);
552 pm_runtime_put_noidle(dev);
553 return ret;
554 }
555
556 priv->pdata->init(priv);
557
558 return 0;
559 }
560
561 static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
562
563 static struct platform_driver omap_rng_driver = {
564 .driver = {
565 .name = "omap_rng",
566 .pm = &omap_rng_pm,
567 .of_match_table = of_match_ptr(omap_rng_of_match),
568 },
569 .probe = omap_rng_probe,
570 .remove = omap_rng_remove,
571 };
572
573 module_platform_driver(omap_rng_driver);
574 MODULE_ALIAS("platform:omap_rng");
575 MODULE_AUTHOR("Deepak Saxena (and others)");
576 MODULE_LICENSE("GPL");