Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/clocksource/arm_arch_timer_mmio.c
29267 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* ARM Generic Memory Mapped Timer support
4
*
5
* Split from drivers/clocksource/arm_arch_timer.c
6
*
7
* Copyright (C) 2011 ARM Ltd.
8
* All Rights Reserved
9
*/
10
11
#define pr_fmt(fmt) "arch_timer_mmio: " fmt
12
13
#include <linux/clockchips.h>
14
#include <linux/interrupt.h>
15
#include <linux/io-64-nonatomic-lo-hi.h>
16
#include <linux/of_irq.h>
17
#include <linux/of_address.h>
18
#include <linux/platform_device.h>
19
20
#include <clocksource/arm_arch_timer.h>
21
22
#define CNTTIDR 0x08
23
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
24
25
#define CNTACR(n) (0x40 + ((n) * 4))
26
#define CNTACR_RPCT BIT(0)
27
#define CNTACR_RVCT BIT(1)
28
#define CNTACR_RFRQ BIT(2)
29
#define CNTACR_RVOFF BIT(3)
30
#define CNTACR_RWVT BIT(4)
31
#define CNTACR_RWPT BIT(5)
32
33
#define CNTPCT_LO 0x00
34
#define CNTVCT_LO 0x08
35
#define CNTFRQ 0x10
36
#define CNTP_CVAL_LO 0x20
37
#define CNTP_CTL 0x2c
38
#define CNTV_CVAL_LO 0x30
39
#define CNTV_CTL 0x3c
40
41
enum arch_timer_access {
42
PHYS_ACCESS,
43
VIRT_ACCESS,
44
};
45
46
struct arch_timer {
47
struct clock_event_device evt;
48
struct clocksource cs;
49
struct arch_timer_mem *gt_block;
50
void __iomem *base;
51
enum arch_timer_access access;
52
u32 rate;
53
};
54
55
#define evt_to_arch_timer(e) container_of(e, struct arch_timer, evt)
56
#define cs_to_arch_timer(c) container_of(c, struct arch_timer, cs)
57
58
static void arch_timer_mmio_write(struct arch_timer *timer,
59
enum arch_timer_reg reg, u64 val)
60
{
61
switch (timer->access) {
62
case PHYS_ACCESS:
63
switch (reg) {
64
case ARCH_TIMER_REG_CTRL:
65
writel_relaxed((u32)val, timer->base + CNTP_CTL);
66
return;
67
case ARCH_TIMER_REG_CVAL:
68
/*
69
* Not guaranteed to be atomic, so the timer
70
* must be disabled at this point.
71
*/
72
writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
73
return;
74
}
75
break;
76
case VIRT_ACCESS:
77
switch (reg) {
78
case ARCH_TIMER_REG_CTRL:
79
writel_relaxed((u32)val, timer->base + CNTV_CTL);
80
return;
81
case ARCH_TIMER_REG_CVAL:
82
/* Same restriction as above */
83
writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
84
return;
85
}
86
break;
87
}
88
89
/* Should never be here */
90
WARN_ON_ONCE(1);
91
}
92
93
static u32 arch_timer_mmio_read(struct arch_timer *timer, enum arch_timer_reg reg)
94
{
95
switch (timer->access) {
96
case PHYS_ACCESS:
97
switch (reg) {
98
case ARCH_TIMER_REG_CTRL:
99
return readl_relaxed(timer->base + CNTP_CTL);
100
default:
101
break;
102
}
103
break;
104
case VIRT_ACCESS:
105
switch (reg) {
106
case ARCH_TIMER_REG_CTRL:
107
return readl_relaxed(timer->base + CNTV_CTL);
108
default:
109
break;
110
}
111
break;
112
}
113
114
/* Should never be here */
115
WARN_ON_ONCE(1);
116
return 0;
117
}
118
119
static noinstr u64 arch_counter_mmio_get_cnt(struct arch_timer *t)
120
{
121
int offset_lo = t->access == VIRT_ACCESS ? CNTVCT_LO : CNTPCT_LO;
122
u32 cnt_lo, cnt_hi, tmp_hi;
123
124
do {
125
cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
126
cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
127
tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
128
} while (cnt_hi != tmp_hi);
129
130
return ((u64) cnt_hi << 32) | cnt_lo;
131
}
132
133
static u64 arch_mmio_counter_read(struct clocksource *cs)
134
{
135
struct arch_timer *at = cs_to_arch_timer(cs);
136
137
return arch_counter_mmio_get_cnt(at);
138
}
139
140
static int arch_timer_mmio_shutdown(struct clock_event_device *clk)
141
{
142
struct arch_timer *at = evt_to_arch_timer(clk);
143
unsigned long ctrl;
144
145
ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
146
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
147
arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
148
149
return 0;
150
}
151
152
static int arch_timer_mmio_set_next_event(unsigned long evt,
153
struct clock_event_device *clk)
154
{
155
struct arch_timer *timer = evt_to_arch_timer(clk);
156
unsigned long ctrl;
157
u64 cnt;
158
159
ctrl = arch_timer_mmio_read(timer, ARCH_TIMER_REG_CTRL);
160
161
/* Timer must be disabled before programming CVAL */
162
if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
163
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
164
arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
165
}
166
167
ctrl |= ARCH_TIMER_CTRL_ENABLE;
168
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
169
170
cnt = arch_counter_mmio_get_cnt(timer);
171
172
arch_timer_mmio_write(timer, ARCH_TIMER_REG_CVAL, evt + cnt);
173
arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
174
return 0;
175
}
176
177
static irqreturn_t arch_timer_mmio_handler(int irq, void *dev_id)
178
{
179
struct clock_event_device *evt = dev_id;
180
struct arch_timer *at = evt_to_arch_timer(evt);
181
unsigned long ctrl;
182
183
ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
184
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
185
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
186
arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
187
evt->event_handler(evt);
188
return IRQ_HANDLED;
189
}
190
191
return IRQ_NONE;
192
}
193
194
static struct arch_timer_mem_frame *find_best_frame(struct platform_device *pdev)
195
{
196
struct arch_timer_mem_frame *frame, *best_frame = NULL;
197
struct arch_timer *at = platform_get_drvdata(pdev);
198
void __iomem *cntctlbase;
199
u32 cnttidr;
200
201
cntctlbase = ioremap(at->gt_block->cntctlbase, at->gt_block->size);
202
if (!cntctlbase) {
203
dev_err(&pdev->dev, "Can't map CNTCTLBase @ %pa\n",
204
&at->gt_block->cntctlbase);
205
return NULL;
206
}
207
208
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
209
210
/*
211
* Try to find a virtual capable frame. Otherwise fall back to a
212
* physical capable frame.
213
*/
214
for (int i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
215
u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
216
CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
217
218
frame = &at->gt_block->frame[i];
219
if (!frame->valid)
220
continue;
221
222
/* Try enabling everything, and see what sticks */
223
writel_relaxed(cntacr, cntctlbase + CNTACR(i));
224
cntacr = readl_relaxed(cntctlbase + CNTACR(i));
225
226
/* Pick a suitable frame for which we have an IRQ */
227
if ((cnttidr & CNTTIDR_VIRT(i)) &&
228
!(~cntacr & (CNTACR_RWVT | CNTACR_RVCT)) &&
229
frame->virt_irq) {
230
best_frame = frame;
231
at->access = VIRT_ACCESS;
232
break;
233
}
234
235
if ((~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) ||
236
!frame->phys_irq)
237
continue;
238
239
at->access = PHYS_ACCESS;
240
best_frame = frame;
241
}
242
243
iounmap(cntctlbase);
244
245
return best_frame;
246
}
247
248
static void arch_timer_mmio_setup(struct arch_timer *at, int irq)
249
{
250
at->evt = (struct clock_event_device) {
251
.features = (CLOCK_EVT_FEAT_ONESHOT |
252
CLOCK_EVT_FEAT_DYNIRQ),
253
.name = "arch_mem_timer",
254
.rating = 400,
255
.cpumask = cpu_possible_mask,
256
.irq = irq,
257
.set_next_event = arch_timer_mmio_set_next_event,
258
.set_state_oneshot_stopped = arch_timer_mmio_shutdown,
259
.set_state_shutdown = arch_timer_mmio_shutdown,
260
};
261
262
at->evt.set_state_shutdown(&at->evt);
263
264
clockevents_config_and_register(&at->evt, at->rate, 0xf,
265
(unsigned long)CLOCKSOURCE_MASK(56));
266
267
enable_irq(at->evt.irq);
268
269
at->cs = (struct clocksource) {
270
.name = "arch_mmio_counter",
271
.rating = 300,
272
.read = arch_mmio_counter_read,
273
.mask = CLOCKSOURCE_MASK(56),
274
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
275
};
276
277
clocksource_register_hz(&at->cs, at->rate);
278
}
279
280
static int arch_timer_mmio_frame_register(struct platform_device *pdev,
281
struct arch_timer_mem_frame *frame)
282
{
283
struct arch_timer *at = platform_get_drvdata(pdev);
284
struct device_node *np = pdev->dev.of_node;
285
int ret, irq;
286
u32 rate;
287
288
if (!devm_request_mem_region(&pdev->dev, frame->cntbase, frame->size,
289
"arch_mem_timer"))
290
return -EBUSY;
291
292
at->base = devm_ioremap(&pdev->dev, frame->cntbase, frame->size);
293
if (!at->base) {
294
dev_err(&pdev->dev, "Can't map frame's registers\n");
295
return -ENXIO;
296
}
297
298
/*
299
* Allow "clock-frequency" to override the probed rate. If neither
300
* lead to something useful, use the CPU timer frequency as the
301
* fallback. The nice thing about that last point is that we woudn't
302
* made it here if we didn't have a valid frequency.
303
*/
304
rate = readl_relaxed(at->base + CNTFRQ);
305
306
if (!np || of_property_read_u32(np, "clock-frequency", &at->rate))
307
at->rate = rate;
308
309
if (!at->rate)
310
at->rate = arch_timer_get_rate();
311
312
irq = at->access == VIRT_ACCESS ? frame->virt_irq : frame->phys_irq;
313
ret = devm_request_irq(&pdev->dev, irq, arch_timer_mmio_handler,
314
IRQF_TIMER | IRQF_NO_AUTOEN, "arch_mem_timer",
315
&at->evt);
316
if (ret) {
317
dev_err(&pdev->dev, "Failed to request mem timer irq\n");
318
return ret;
319
}
320
321
/* Afer this point, we're not allowed to fail anymore */
322
arch_timer_mmio_setup(at, irq);
323
return 0;
324
}
325
326
static int of_populate_gt_block(struct platform_device *pdev,
327
struct arch_timer *at)
328
{
329
struct resource res;
330
331
if (of_address_to_resource(pdev->dev.of_node, 0, &res))
332
return -EINVAL;
333
334
at->gt_block->cntctlbase = res.start;
335
at->gt_block->size = resource_size(&res);
336
337
for_each_available_child_of_node_scoped(pdev->dev.of_node, frame_node) {
338
struct arch_timer_mem_frame *frame;
339
u32 n;
340
341
if (of_property_read_u32(frame_node, "frame-number", &n)) {
342
dev_err(&pdev->dev, FW_BUG "Missing frame-number\n");
343
return -EINVAL;
344
}
345
if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
346
dev_err(&pdev->dev,
347
FW_BUG "Wrong frame-number, only 0-%u are permitted\n",
348
ARCH_TIMER_MEM_MAX_FRAMES - 1);
349
return -EINVAL;
350
}
351
352
frame = &at->gt_block->frame[n];
353
354
if (frame->valid) {
355
dev_err(&pdev->dev, FW_BUG "Duplicated frame-number\n");
356
return -EINVAL;
357
}
358
359
if (of_address_to_resource(frame_node, 0, &res))
360
return -EINVAL;
361
362
frame->cntbase = res.start;
363
frame->size = resource_size(&res);
364
365
frame->phys_irq = irq_of_parse_and_map(frame_node, 0);
366
frame->virt_irq = irq_of_parse_and_map(frame_node, 1);
367
368
frame->valid = true;
369
}
370
371
return 0;
372
}
373
374
static int arch_timer_mmio_probe(struct platform_device *pdev)
375
{
376
struct arch_timer_mem_frame *frame;
377
struct arch_timer *at;
378
struct device_node *np;
379
int ret;
380
381
np = pdev->dev.of_node;
382
383
at = devm_kmalloc(&pdev->dev, sizeof(*at), GFP_KERNEL | __GFP_ZERO);
384
if (!at)
385
return -ENOMEM;
386
387
if (np) {
388
at->gt_block = devm_kmalloc(&pdev->dev, sizeof(*at->gt_block),
389
GFP_KERNEL | __GFP_ZERO);
390
if (!at->gt_block)
391
return -ENOMEM;
392
ret = of_populate_gt_block(pdev, at);
393
if (ret)
394
return ret;
395
} else {
396
at->gt_block = dev_get_platdata(&pdev->dev);
397
}
398
399
platform_set_drvdata(pdev, at);
400
401
frame = find_best_frame(pdev);
402
if (!frame) {
403
dev_err(&pdev->dev,
404
"Unable to find a suitable frame in timer @ %pa\n",
405
&at->gt_block->cntctlbase);
406
return -EINVAL;
407
}
408
409
ret = arch_timer_mmio_frame_register(pdev, frame);
410
if (!ret)
411
dev_info(&pdev->dev,
412
"mmio timer running at %lu.%02luMHz (%s)\n",
413
(unsigned long)at->rate / 1000000,
414
(unsigned long)(at->rate / 10000) % 100,
415
at->access == VIRT_ACCESS ? "virt" : "phys");
416
417
return ret;
418
}
419
420
static const struct of_device_id arch_timer_mmio_of_table[] = {
421
{ .compatible = "arm,armv7-timer-mem", },
422
{}
423
};
424
425
static struct platform_driver arch_timer_mmio_drv = {
426
.driver = {
427
.name = "arch-timer-mmio",
428
.of_match_table = arch_timer_mmio_of_table,
429
},
430
.probe = arch_timer_mmio_probe,
431
};
432
builtin_platform_driver(arch_timer_mmio_drv);
433
434
static struct platform_driver arch_timer_mmio_acpi_drv = {
435
.driver = {
436
.name = "gtdt-arm-mmio-timer",
437
},
438
.probe = arch_timer_mmio_probe,
439
};
440
builtin_platform_driver(arch_timer_mmio_acpi_drv);
441
442