Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/clocksource/arm_arch_timer.c
29267 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/drivers/clocksource/arm_arch_timer.c
4
*
5
* Copyright (C) 2011 ARM Ltd.
6
* All Rights Reserved
7
*/
8
9
#define pr_fmt(fmt) "arch_timer: " fmt
10
11
#include <linux/init.h>
12
#include <linux/kernel.h>
13
#include <linux/device.h>
14
#include <linux/smp.h>
15
#include <linux/cpu.h>
16
#include <linux/cpu_pm.h>
17
#include <linux/clockchips.h>
18
#include <linux/clocksource.h>
19
#include <linux/clocksource_ids.h>
20
#include <linux/interrupt.h>
21
#include <linux/kstrtox.h>
22
#include <linux/of_irq.h>
23
#include <linux/of_address.h>
24
#include <linux/io.h>
25
#include <linux/slab.h>
26
#include <linux/sched/clock.h>
27
#include <linux/sched_clock.h>
28
#include <linux/acpi.h>
29
#include <linux/arm-smccc.h>
30
#include <linux/ptp_kvm.h>
31
32
#include <asm/arch_timer.h>
33
#include <asm/virt.h>
34
35
#include <clocksource/arm_arch_timer.h>
36
37
/*
38
* The minimum amount of time a generic counter is guaranteed to not roll over
39
* (40 years)
40
*/
41
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
42
43
static u32 arch_timer_rate __ro_after_init;
44
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
45
46
static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = {
47
[ARCH_TIMER_PHYS_SECURE_PPI] = "sec-phys",
48
[ARCH_TIMER_PHYS_NONSECURE_PPI] = "phys",
49
[ARCH_TIMER_VIRT_PPI] = "virt",
50
[ARCH_TIMER_HYP_PPI] = "hyp-phys",
51
[ARCH_TIMER_HYP_VIRT_PPI] = "hyp-virt",
52
};
53
54
static struct clock_event_device __percpu *arch_timer_evt;
55
56
static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
57
static bool arch_timer_c3stop __ro_after_init;
58
static bool arch_counter_suspend_stop __ro_after_init;
59
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
60
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
61
#else
62
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
63
#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
64
65
static cpumask_t evtstrm_available = CPU_MASK_NONE;
66
static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
67
68
static int __init early_evtstrm_cfg(char *buf)
69
{
70
return kstrtobool(buf, &evtstrm_enable);
71
}
72
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
73
74
/*
75
* Makes an educated guess at a valid counter width based on the Generic Timer
76
* specification. Of note:
77
* 1) the system counter is at least 56 bits wide
78
* 2) a roll-over time of not less than 40 years
79
*
80
* See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details.
81
*/
82
static int arch_counter_get_width(void)
83
{
84
u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate;
85
86
/* guarantee the returned width is within the valid range */
87
return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64);
88
}
89
90
/*
91
* Architected system timer support.
92
*/
93
static noinstr u64 raw_counter_get_cntpct_stable(void)
94
{
95
return __arch_counter_get_cntpct_stable();
96
}
97
98
static notrace u64 arch_counter_get_cntpct_stable(void)
99
{
100
u64 val;
101
preempt_disable_notrace();
102
val = __arch_counter_get_cntpct_stable();
103
preempt_enable_notrace();
104
return val;
105
}
106
107
static noinstr u64 arch_counter_get_cntpct(void)
108
{
109
return __arch_counter_get_cntpct();
110
}
111
112
static noinstr u64 raw_counter_get_cntvct_stable(void)
113
{
114
return __arch_counter_get_cntvct_stable();
115
}
116
117
static notrace u64 arch_counter_get_cntvct_stable(void)
118
{
119
u64 val;
120
preempt_disable_notrace();
121
val = __arch_counter_get_cntvct_stable();
122
preempt_enable_notrace();
123
return val;
124
}
125
126
static noinstr u64 arch_counter_get_cntvct(void)
127
{
128
return __arch_counter_get_cntvct();
129
}
130
131
/*
132
* Default to cp15 based access because arm64 uses this function for
133
* sched_clock() before DT is probed and the cp15 method is guaranteed
134
* to exist on arm64. arm doesn't use this before DT is probed so even
135
* if we don't have the cp15 accessors we won't have a problem.
136
*/
137
u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct;
138
EXPORT_SYMBOL_GPL(arch_timer_read_counter);
139
140
static u64 arch_counter_read(struct clocksource *cs)
141
{
142
return arch_timer_read_counter();
143
}
144
145
static u64 arch_counter_read_cc(struct cyclecounter *cc)
146
{
147
return arch_timer_read_counter();
148
}
149
150
static struct clocksource clocksource_counter = {
151
.name = "arch_sys_counter",
152
.id = CSID_ARM_ARCH_COUNTER,
153
.rating = 400,
154
.read = arch_counter_read,
155
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
156
};
157
158
static struct cyclecounter cyclecounter __ro_after_init = {
159
.read = arch_counter_read_cc,
160
};
161
162
struct ate_acpi_oem_info {
163
char oem_id[ACPI_OEM_ID_SIZE + 1];
164
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
165
u32 oem_revision;
166
};
167
168
#ifdef CONFIG_FSL_ERRATUM_A008585
169
/*
170
* The number of retries is an arbitrary value well beyond the highest number
171
* of iterations the loop has been observed to take.
172
*/
173
#define __fsl_a008585_read_reg(reg) ({ \
174
u64 _old, _new; \
175
int _retries = 200; \
176
\
177
do { \
178
_old = read_sysreg(reg); \
179
_new = read_sysreg(reg); \
180
_retries--; \
181
} while (unlikely(_old != _new) && _retries); \
182
\
183
WARN_ON_ONCE(!_retries); \
184
_new; \
185
})
186
187
static u64 notrace fsl_a008585_read_cntpct_el0(void)
188
{
189
return __fsl_a008585_read_reg(cntpct_el0);
190
}
191
192
static u64 notrace fsl_a008585_read_cntvct_el0(void)
193
{
194
return __fsl_a008585_read_reg(cntvct_el0);
195
}
196
#endif
197
198
#ifdef CONFIG_HISILICON_ERRATUM_161010101
199
/*
200
* Verify whether the value of the second read is larger than the first by
201
* less than 32 is the only way to confirm the value is correct, so clear the
202
* lower 5 bits to check whether the difference is greater than 32 or not.
203
* Theoretically the erratum should not occur more than twice in succession
204
* when reading the system counter, but it is possible that some interrupts
205
* may lead to more than twice read errors, triggering the warning, so setting
206
* the number of retries far beyond the number of iterations the loop has been
207
* observed to take.
208
*/
209
#define __hisi_161010101_read_reg(reg) ({ \
210
u64 _old, _new; \
211
int _retries = 50; \
212
\
213
do { \
214
_old = read_sysreg(reg); \
215
_new = read_sysreg(reg); \
216
_retries--; \
217
} while (unlikely((_new - _old) >> 5) && _retries); \
218
\
219
WARN_ON_ONCE(!_retries); \
220
_new; \
221
})
222
223
static u64 notrace hisi_161010101_read_cntpct_el0(void)
224
{
225
return __hisi_161010101_read_reg(cntpct_el0);
226
}
227
228
static u64 notrace hisi_161010101_read_cntvct_el0(void)
229
{
230
return __hisi_161010101_read_reg(cntvct_el0);
231
}
232
233
static const struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
234
/*
235
* Note that trailing spaces are required to properly match
236
* the OEM table information.
237
*/
238
{
239
.oem_id = "HISI ",
240
.oem_table_id = "HIP05 ",
241
.oem_revision = 0,
242
},
243
{
244
.oem_id = "HISI ",
245
.oem_table_id = "HIP06 ",
246
.oem_revision = 0,
247
},
248
{
249
.oem_id = "HISI ",
250
.oem_table_id = "HIP07 ",
251
.oem_revision = 0,
252
},
253
{ /* Sentinel indicating the end of the OEM array */ },
254
};
255
#endif
256
257
#ifdef CONFIG_ARM64_ERRATUM_858921
258
static u64 notrace arm64_858921_read_cntpct_el0(void)
259
{
260
u64 old, new;
261
262
old = read_sysreg(cntpct_el0);
263
new = read_sysreg(cntpct_el0);
264
return (((old ^ new) >> 32) & 1) ? old : new;
265
}
266
267
static u64 notrace arm64_858921_read_cntvct_el0(void)
268
{
269
u64 old, new;
270
271
old = read_sysreg(cntvct_el0);
272
new = read_sysreg(cntvct_el0);
273
return (((old ^ new) >> 32) & 1) ? old : new;
274
}
275
#endif
276
277
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
278
/*
279
* The low bits of the counter registers are indeterminate while bit 10 or
280
* greater is rolling over. Since the counter value can jump both backward
281
* (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
282
* with all ones or all zeros in the low bits. Bound the loop by the maximum
283
* number of CPU cycles in 3 consecutive 24 MHz counter periods.
284
*/
285
#define __sun50i_a64_read_reg(reg) ({ \
286
u64 _val; \
287
int _retries = 150; \
288
\
289
do { \
290
_val = read_sysreg(reg); \
291
_retries--; \
292
} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
293
\
294
WARN_ON_ONCE(!_retries); \
295
_val; \
296
})
297
298
static u64 notrace sun50i_a64_read_cntpct_el0(void)
299
{
300
return __sun50i_a64_read_reg(cntpct_el0);
301
}
302
303
static u64 notrace sun50i_a64_read_cntvct_el0(void)
304
{
305
return __sun50i_a64_read_reg(cntvct_el0);
306
}
307
#endif
308
309
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
310
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
311
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
312
313
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
314
315
/*
316
* Force the inlining of this function so that the register accesses
317
* can be themselves correctly inlined.
318
*/
319
static __always_inline
320
void erratum_set_next_event_generic(const int access, unsigned long evt,
321
struct clock_event_device *clk)
322
{
323
unsigned long ctrl;
324
u64 cval;
325
326
ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
327
ctrl |= ARCH_TIMER_CTRL_ENABLE;
328
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
329
330
if (access == ARCH_TIMER_PHYS_ACCESS) {
331
cval = evt + arch_counter_get_cntpct_stable();
332
write_sysreg(cval, cntp_cval_el0);
333
} else {
334
cval = evt + arch_counter_get_cntvct_stable();
335
write_sysreg(cval, cntv_cval_el0);
336
}
337
338
arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
339
}
340
341
static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
342
struct clock_event_device *clk)
343
{
344
erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
345
return 0;
346
}
347
348
static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,
349
struct clock_event_device *clk)
350
{
351
erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
352
return 0;
353
}
354
355
static const struct arch_timer_erratum_workaround ool_workarounds[] = {
356
#ifdef CONFIG_FSL_ERRATUM_A008585
357
{
358
.match_type = ate_match_dt,
359
.id = "fsl,erratum-a008585",
360
.desc = "Freescale erratum a005858",
361
.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
362
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
363
.set_next_event_phys = erratum_set_next_event_phys,
364
.set_next_event_virt = erratum_set_next_event_virt,
365
},
366
#endif
367
#ifdef CONFIG_HISILICON_ERRATUM_161010101
368
{
369
.match_type = ate_match_dt,
370
.id = "hisilicon,erratum-161010101",
371
.desc = "HiSilicon erratum 161010101",
372
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
373
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
374
.set_next_event_phys = erratum_set_next_event_phys,
375
.set_next_event_virt = erratum_set_next_event_virt,
376
},
377
{
378
.match_type = ate_match_acpi_oem_info,
379
.id = hisi_161010101_oem_info,
380
.desc = "HiSilicon erratum 161010101",
381
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
382
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
383
.set_next_event_phys = erratum_set_next_event_phys,
384
.set_next_event_virt = erratum_set_next_event_virt,
385
},
386
#endif
387
#ifdef CONFIG_ARM64_ERRATUM_858921
388
{
389
.match_type = ate_match_local_cap_id,
390
.id = (void *)ARM64_WORKAROUND_858921,
391
.desc = "ARM erratum 858921",
392
.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
393
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
394
.set_next_event_phys = erratum_set_next_event_phys,
395
.set_next_event_virt = erratum_set_next_event_virt,
396
},
397
#endif
398
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
399
{
400
.match_type = ate_match_dt,
401
.id = "allwinner,erratum-unknown1",
402
.desc = "Allwinner erratum UNKNOWN1",
403
.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
404
.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
405
.set_next_event_phys = erratum_set_next_event_phys,
406
.set_next_event_virt = erratum_set_next_event_virt,
407
},
408
#endif
409
#ifdef CONFIG_ARM64_ERRATUM_1418040
410
{
411
.match_type = ate_match_local_cap_id,
412
.id = (void *)ARM64_WORKAROUND_1418040,
413
.desc = "ARM erratum 1418040",
414
.disable_compat_vdso = true,
415
},
416
#endif
417
};
418
419
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
420
const void *);
421
422
static
423
bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
424
const void *arg)
425
{
426
const struct device_node *np = arg;
427
428
return of_property_read_bool(np, wa->id);
429
}
430
431
static
432
bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
433
const void *arg)
434
{
435
return this_cpu_has_cap((uintptr_t)wa->id);
436
}
437
438
439
static
440
bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
441
const void *arg)
442
{
443
static const struct ate_acpi_oem_info empty_oem_info = {};
444
const struct ate_acpi_oem_info *info = wa->id;
445
const struct acpi_table_header *table = arg;
446
447
/* Iterate over the ACPI OEM info array, looking for a match */
448
while (memcmp(info, &empty_oem_info, sizeof(*info))) {
449
if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
450
!memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
451
info->oem_revision == table->oem_revision)
452
return true;
453
454
info++;
455
}
456
457
return false;
458
}
459
460
static const struct arch_timer_erratum_workaround *
461
arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
462
ate_match_fn_t match_fn,
463
void *arg)
464
{
465
int i;
466
467
for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
468
if (ool_workarounds[i].match_type != type)
469
continue;
470
471
if (match_fn(&ool_workarounds[i], arg))
472
return &ool_workarounds[i];
473
}
474
475
return NULL;
476
}
477
478
static
479
void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
480
bool local)
481
{
482
int i;
483
484
if (local) {
485
__this_cpu_write(timer_unstable_counter_workaround, wa);
486
} else {
487
for_each_possible_cpu(i)
488
per_cpu(timer_unstable_counter_workaround, i) = wa;
489
}
490
491
if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
492
atomic_set(&timer_unstable_counter_workaround_in_use, 1);
493
494
/*
495
* Don't use the vdso fastpath if errata require using the
496
* out-of-line counter accessor. We may change our mind pretty
497
* late in the game (with a per-CPU erratum, for example), so
498
* change both the default value and the vdso itself.
499
*/
500
if (wa->read_cntvct_el0) {
501
clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
502
vdso_default = VDSO_CLOCKMODE_NONE;
503
} else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
504
vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
505
clocksource_counter.vdso_clock_mode = vdso_default;
506
}
507
}
508
509
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
510
void *arg)
511
{
512
const struct arch_timer_erratum_workaround *wa, *__wa;
513
ate_match_fn_t match_fn = NULL;
514
bool local = false;
515
516
switch (type) {
517
case ate_match_dt:
518
match_fn = arch_timer_check_dt_erratum;
519
break;
520
case ate_match_local_cap_id:
521
match_fn = arch_timer_check_local_cap_erratum;
522
local = true;
523
break;
524
case ate_match_acpi_oem_info:
525
match_fn = arch_timer_check_acpi_oem_erratum;
526
break;
527
default:
528
WARN_ON(1);
529
return;
530
}
531
532
wa = arch_timer_iterate_errata(type, match_fn, arg);
533
if (!wa)
534
return;
535
536
__wa = __this_cpu_read(timer_unstable_counter_workaround);
537
if (__wa && wa != __wa)
538
pr_warn("Can't enable workaround for %s (clashes with %s\n)",
539
wa->desc, __wa->desc);
540
541
if (__wa)
542
return;
543
544
arch_timer_enable_workaround(wa, local);
545
pr_info("Enabling %s workaround for %s\n",
546
local ? "local" : "global", wa->desc);
547
}
548
549
static bool arch_timer_this_cpu_has_cntvct_wa(void)
550
{
551
return has_erratum_handler(read_cntvct_el0);
552
}
553
554
static bool arch_timer_counter_has_wa(void)
555
{
556
return atomic_read(&timer_unstable_counter_workaround_in_use);
557
}
558
#else
559
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
560
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
561
#define arch_timer_counter_has_wa() ({false;})
562
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
563
564
static __always_inline irqreturn_t timer_handler(const int access,
565
struct clock_event_device *evt)
566
{
567
unsigned long ctrl;
568
569
ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
570
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
571
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
572
arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
573
evt->event_handler(evt);
574
return IRQ_HANDLED;
575
}
576
577
return IRQ_NONE;
578
}
579
580
static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
581
{
582
struct clock_event_device *evt = dev_id;
583
584
return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
585
}
586
587
static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
588
{
589
struct clock_event_device *evt = dev_id;
590
591
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
592
}
593
594
static __always_inline int arch_timer_shutdown(const int access,
595
struct clock_event_device *clk)
596
{
597
unsigned long ctrl;
598
599
ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
600
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
601
arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
602
603
return 0;
604
}
605
606
static int arch_timer_shutdown_virt(struct clock_event_device *clk)
607
{
608
return arch_timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
609
}
610
611
static int arch_timer_shutdown_phys(struct clock_event_device *clk)
612
{
613
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
614
}
615
616
static __always_inline void set_next_event(const int access, unsigned long evt,
617
struct clock_event_device *clk)
618
{
619
unsigned long ctrl;
620
u64 cnt;
621
622
ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
623
ctrl |= ARCH_TIMER_CTRL_ENABLE;
624
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
625
626
if (access == ARCH_TIMER_PHYS_ACCESS)
627
cnt = __arch_counter_get_cntpct();
628
else
629
cnt = __arch_counter_get_cntvct();
630
631
arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CVAL, evt + cnt);
632
arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
633
}
634
635
static int arch_timer_set_next_event_virt(unsigned long evt,
636
struct clock_event_device *clk)
637
{
638
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
639
return 0;
640
}
641
642
static int arch_timer_set_next_event_phys(unsigned long evt,
643
struct clock_event_device *clk)
644
{
645
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
646
return 0;
647
}
648
649
static u64 __arch_timer_check_delta(void)
650
{
651
#ifdef CONFIG_ARM64
652
const struct midr_range broken_cval_midrs[] = {
653
/*
654
* XGene-1 implements CVAL in terms of TVAL, meaning
655
* that the maximum timer range is 32bit. Shame on them.
656
*
657
* Note that TVAL is signed, thus has only 31 of its
658
* 32 bits to express magnitude.
659
*/
660
MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
661
APM_CPU_PART_XGENE),
662
APM_CPU_VAR_POTENZA, 0x0, 0xf),
663
{},
664
};
665
666
if (is_midr_in_range_list(broken_cval_midrs)) {
667
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
668
return CLOCKSOURCE_MASK(31);
669
}
670
#endif
671
return CLOCKSOURCE_MASK(arch_counter_get_width());
672
}
673
674
static void __arch_timer_setup(struct clock_event_device *clk)
675
{
676
typeof(clk->set_next_event) sne;
677
u64 max_delta;
678
679
clk->features = CLOCK_EVT_FEAT_ONESHOT;
680
681
arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
682
683
if (arch_timer_c3stop)
684
clk->features |= CLOCK_EVT_FEAT_C3STOP;
685
clk->name = "arch_sys_timer";
686
clk->rating = 450;
687
clk->cpumask = cpumask_of(smp_processor_id());
688
clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
689
switch (arch_timer_uses_ppi) {
690
case ARCH_TIMER_VIRT_PPI:
691
clk->set_state_shutdown = arch_timer_shutdown_virt;
692
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
693
sne = erratum_handler(set_next_event_virt);
694
break;
695
case ARCH_TIMER_PHYS_SECURE_PPI:
696
case ARCH_TIMER_PHYS_NONSECURE_PPI:
697
case ARCH_TIMER_HYP_PPI:
698
clk->set_state_shutdown = arch_timer_shutdown_phys;
699
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
700
sne = erratum_handler(set_next_event_phys);
701
break;
702
default:
703
BUG();
704
}
705
706
clk->set_next_event = sne;
707
max_delta = __arch_timer_check_delta();
708
709
clk->set_state_shutdown(clk);
710
711
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
712
}
713
714
static void arch_timer_evtstrm_enable(unsigned int divider)
715
{
716
u32 cntkctl = arch_timer_get_cntkctl();
717
718
#ifdef CONFIG_ARM64
719
/* ECV is likely to require a large divider. Use the EVNTIS flag. */
720
if (cpus_have_final_cap(ARM64_HAS_ECV) && divider > 15) {
721
cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE;
722
divider -= 8;
723
}
724
#endif
725
726
divider = min(divider, 15U);
727
cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
728
/* Set the divider and enable virtual event stream */
729
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
730
| ARCH_TIMER_VIRT_EVT_EN;
731
arch_timer_set_cntkctl(cntkctl);
732
arch_timer_set_evtstrm_feature();
733
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
734
}
735
736
static void arch_timer_configure_evtstream(void)
737
{
738
int evt_stream_div, lsb;
739
740
/*
741
* As the event stream can at most be generated at half the frequency
742
* of the counter, use half the frequency when computing the divider.
743
*/
744
evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
745
746
/*
747
* Find the closest power of two to the divisor. If the adjacent bit
748
* of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
749
*/
750
lsb = fls(evt_stream_div) - 1;
751
if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
752
lsb++;
753
754
/* enable event stream */
755
arch_timer_evtstrm_enable(max(0, lsb));
756
}
757
758
static int arch_timer_evtstrm_starting_cpu(unsigned int cpu)
759
{
760
arch_timer_configure_evtstream();
761
return 0;
762
}
763
764
static int arch_timer_evtstrm_dying_cpu(unsigned int cpu)
765
{
766
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
767
return 0;
768
}
769
770
static int __init arch_timer_evtstrm_register(void)
771
{
772
if (!arch_timer_evt || !evtstrm_enable)
773
return 0;
774
775
return cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
776
"clockevents/arm/arch_timer_evtstrm:starting",
777
arch_timer_evtstrm_starting_cpu,
778
arch_timer_evtstrm_dying_cpu);
779
}
780
core_initcall(arch_timer_evtstrm_register);
781
782
static void arch_counter_set_user_access(void)
783
{
784
u32 cntkctl = arch_timer_get_cntkctl();
785
786
/* Disable user access to the timers and both counters */
787
/* Also disable virtual event stream */
788
cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
789
| ARCH_TIMER_USR_VT_ACCESS_EN
790
| ARCH_TIMER_USR_VCT_ACCESS_EN
791
| ARCH_TIMER_VIRT_EVT_EN
792
| ARCH_TIMER_USR_PCT_ACCESS_EN);
793
794
/*
795
* Enable user access to the virtual counter if it doesn't
796
* need to be workaround. The vdso may have been already
797
* disabled though.
798
*/
799
if (arch_timer_this_cpu_has_cntvct_wa())
800
pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
801
else
802
cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
803
804
arch_timer_set_cntkctl(cntkctl);
805
}
806
807
static bool arch_timer_has_nonsecure_ppi(void)
808
{
809
return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
810
arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
811
}
812
813
static u32 check_ppi_trigger(int irq)
814
{
815
u32 flags = irq_get_trigger_type(irq);
816
817
if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
818
pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
819
pr_warn("WARNING: Please fix your firmware\n");
820
flags = IRQF_TRIGGER_LOW;
821
}
822
823
return flags;
824
}
825
826
static int arch_timer_starting_cpu(unsigned int cpu)
827
{
828
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
829
u32 flags;
830
831
__arch_timer_setup(clk);
832
833
flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
834
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
835
836
if (arch_timer_has_nonsecure_ppi()) {
837
flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
838
enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
839
flags);
840
}
841
842
arch_counter_set_user_access();
843
844
return 0;
845
}
846
847
static int validate_timer_rate(void)
848
{
849
if (!arch_timer_rate)
850
return -EINVAL;
851
852
/* Arch timer frequency < 1MHz can cause trouble */
853
WARN_ON(arch_timer_rate < 1000000);
854
855
return 0;
856
}
857
858
/*
859
* For historical reasons, when probing with DT we use whichever (non-zero)
860
* rate was probed first, and don't verify that others match. If the first node
861
* probed has a clock-frequency property, this overrides the HW register.
862
*/
863
static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np)
864
{
865
/* Who has more than one independent system counter? */
866
if (arch_timer_rate)
867
return;
868
869
if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
870
arch_timer_rate = rate;
871
872
/* Check the timer frequency. */
873
if (validate_timer_rate())
874
pr_warn("frequency not available\n");
875
}
876
877
static void __init arch_timer_banner(void)
878
{
879
pr_info("cp15 timer running at %lu.%02luMHz (%s).\n",
880
(unsigned long)arch_timer_rate / 1000000,
881
(unsigned long)(arch_timer_rate / 10000) % 100,
882
(arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys");
883
}
884
885
u32 arch_timer_get_rate(void)
886
{
887
return arch_timer_rate;
888
}
889
890
bool arch_timer_evtstrm_available(void)
891
{
892
/*
893
* We might get called from a preemptible context. This is fine
894
* because availability of the event stream should be always the same
895
* for a preemptible context and context where we might resume a task.
896
*/
897
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
898
}
899
900
static struct arch_timer_kvm_info arch_timer_kvm_info;
901
902
struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
903
{
904
return &arch_timer_kvm_info;
905
}
906
907
static void __init arch_counter_register(void)
908
{
909
u64 (*scr)(void);
910
u64 (*rd)(void);
911
u64 start_count;
912
int width;
913
914
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
915
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
916
if (arch_timer_counter_has_wa()) {
917
rd = arch_counter_get_cntvct_stable;
918
scr = raw_counter_get_cntvct_stable;
919
} else {
920
rd = arch_counter_get_cntvct;
921
scr = arch_counter_get_cntvct;
922
}
923
} else {
924
if (arch_timer_counter_has_wa()) {
925
rd = arch_counter_get_cntpct_stable;
926
scr = raw_counter_get_cntpct_stable;
927
} else {
928
rd = arch_counter_get_cntpct;
929
scr = arch_counter_get_cntpct;
930
}
931
}
932
933
arch_timer_read_counter = rd;
934
clocksource_counter.vdso_clock_mode = vdso_default;
935
936
width = arch_counter_get_width();
937
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
938
cyclecounter.mask = CLOCKSOURCE_MASK(width);
939
940
if (!arch_counter_suspend_stop)
941
clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
942
start_count = arch_timer_read_counter();
943
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
944
cyclecounter.mult = clocksource_counter.mult;
945
cyclecounter.shift = clocksource_counter.shift;
946
timecounter_init(&arch_timer_kvm_info.timecounter,
947
&cyclecounter, start_count);
948
949
sched_clock_register(scr, width, arch_timer_rate);
950
}
951
952
static void arch_timer_stop(struct clock_event_device *clk)
953
{
954
pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
955
956
disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
957
if (arch_timer_has_nonsecure_ppi())
958
disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
959
}
960
961
static int arch_timer_dying_cpu(unsigned int cpu)
962
{
963
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
964
965
arch_timer_stop(clk);
966
return 0;
967
}
968
969
#ifdef CONFIG_CPU_PM
970
static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
971
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
972
unsigned long action, void *hcpu)
973
{
974
if (action == CPU_PM_ENTER) {
975
__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
976
977
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
978
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
979
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
980
981
if (arch_timer_have_evtstrm_feature())
982
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
983
}
984
return NOTIFY_OK;
985
}
986
987
static struct notifier_block arch_timer_cpu_pm_notifier = {
988
.notifier_call = arch_timer_cpu_pm_notify,
989
};
990
991
static int __init arch_timer_cpu_pm_init(void)
992
{
993
return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
994
}
995
996
static void __init arch_timer_cpu_pm_deinit(void)
997
{
998
WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
999
}
1000
1001
#else
1002
static int __init arch_timer_cpu_pm_init(void)
1003
{
1004
return 0;
1005
}
1006
1007
static void __init arch_timer_cpu_pm_deinit(void)
1008
{
1009
}
1010
#endif
1011
1012
static int __init arch_timer_register(void)
1013
{
1014
int err;
1015
int ppi;
1016
1017
arch_timer_evt = alloc_percpu(struct clock_event_device);
1018
if (!arch_timer_evt) {
1019
err = -ENOMEM;
1020
goto out;
1021
}
1022
1023
ppi = arch_timer_ppi[arch_timer_uses_ppi];
1024
switch (arch_timer_uses_ppi) {
1025
case ARCH_TIMER_VIRT_PPI:
1026
err = request_percpu_irq(ppi, arch_timer_handler_virt,
1027
"arch_timer", arch_timer_evt);
1028
break;
1029
case ARCH_TIMER_PHYS_SECURE_PPI:
1030
case ARCH_TIMER_PHYS_NONSECURE_PPI:
1031
err = request_percpu_irq(ppi, arch_timer_handler_phys,
1032
"arch_timer", arch_timer_evt);
1033
if (!err && arch_timer_has_nonsecure_ppi()) {
1034
ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1035
err = request_percpu_irq(ppi, arch_timer_handler_phys,
1036
"arch_timer", arch_timer_evt);
1037
if (err)
1038
free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1039
arch_timer_evt);
1040
}
1041
break;
1042
case ARCH_TIMER_HYP_PPI:
1043
err = request_percpu_irq(ppi, arch_timer_handler_phys,
1044
"arch_timer", arch_timer_evt);
1045
break;
1046
default:
1047
BUG();
1048
}
1049
1050
if (err) {
1051
pr_err("can't register interrupt %d (%d)\n", ppi, err);
1052
goto out_free;
1053
}
1054
1055
err = arch_timer_cpu_pm_init();
1056
if (err)
1057
goto out_unreg_notify;
1058
1059
/* Register and immediately configure the timer on the boot CPU */
1060
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1061
"clockevents/arm/arch_timer:starting",
1062
arch_timer_starting_cpu, arch_timer_dying_cpu);
1063
if (err)
1064
goto out_unreg_cpupm;
1065
return 0;
1066
1067
out_unreg_cpupm:
1068
arch_timer_cpu_pm_deinit();
1069
1070
out_unreg_notify:
1071
free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1072
if (arch_timer_has_nonsecure_ppi())
1073
free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1074
arch_timer_evt);
1075
1076
out_free:
1077
free_percpu(arch_timer_evt);
1078
arch_timer_evt = NULL;
1079
out:
1080
return err;
1081
}
1082
1083
static int __init arch_timer_common_init(void)
1084
{
1085
arch_timer_banner();
1086
arch_counter_register();
1087
return arch_timer_arch_init();
1088
}
1089
1090
/**
1091
* arch_timer_select_ppi() - Select suitable PPI for the current system.
1092
*
1093
* If HYP mode is available, we know that the physical timer
1094
* has been configured to be accessible from PL1. Use it, so
1095
* that a guest can use the virtual timer instead.
1096
*
1097
* On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1098
* accesses to CNTP_*_EL1 registers are silently redirected to
1099
* their CNTHP_*_EL2 counterparts, and use a different PPI
1100
* number.
1101
*
1102
* If no interrupt provided for virtual timer, we'll have to
1103
* stick to the physical timer. It'd better be accessible...
1104
* For arm64 we never use the secure interrupt.
1105
*
1106
* Return: a suitable PPI type for the current system.
1107
*/
1108
static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1109
{
1110
if (is_kernel_in_hyp_mode())
1111
return ARCH_TIMER_HYP_PPI;
1112
1113
if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1114
return ARCH_TIMER_VIRT_PPI;
1115
1116
if (IS_ENABLED(CONFIG_ARM64))
1117
return ARCH_TIMER_PHYS_NONSECURE_PPI;
1118
1119
return ARCH_TIMER_PHYS_SECURE_PPI;
1120
}
1121
1122
static void __init arch_timer_populate_kvm_info(void)
1123
{
1124
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1125
if (is_kernel_in_hyp_mode())
1126
arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1127
}
1128
1129
static int __init arch_timer_of_init(struct device_node *np)
1130
{
1131
int i, irq, ret;
1132
u32 rate;
1133
bool has_names;
1134
1135
if (arch_timer_evt) {
1136
pr_warn("multiple nodes in dt, skipping\n");
1137
return 0;
1138
}
1139
1140
has_names = of_property_present(np, "interrupt-names");
1141
1142
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
1143
if (has_names)
1144
irq = of_irq_get_byname(np, arch_timer_ppi_names[i]);
1145
else
1146
irq = of_irq_get(np, i);
1147
if (irq > 0)
1148
arch_timer_ppi[i] = irq;
1149
}
1150
1151
arch_timer_populate_kvm_info();
1152
1153
rate = arch_timer_get_cntfrq();
1154
arch_timer_of_configure_rate(rate, np);
1155
1156
arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1157
1158
/* Check for globally applicable workarounds */
1159
arch_timer_check_ool_workaround(ate_match_dt, np);
1160
1161
/*
1162
* If we cannot rely on firmware initializing the timer registers then
1163
* we should use the physical timers instead.
1164
*/
1165
if (IS_ENABLED(CONFIG_ARM) &&
1166
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1167
arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1168
else
1169
arch_timer_uses_ppi = arch_timer_select_ppi();
1170
1171
if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1172
pr_err("No interrupt available, giving up\n");
1173
return -EINVAL;
1174
}
1175
1176
/* On some systems, the counter stops ticking when in suspend. */
1177
arch_counter_suspend_stop = of_property_read_bool(np,
1178
"arm,no-tick-in-suspend");
1179
1180
ret = arch_timer_register();
1181
if (ret)
1182
return ret;
1183
1184
return arch_timer_common_init();
1185
}
1186
TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1187
TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1188
1189
#ifdef CONFIG_ACPI_GTDT
1190
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1191
{
1192
int ret;
1193
1194
if (arch_timer_evt) {
1195
pr_warn("already initialized, skipping\n");
1196
return -EINVAL;
1197
}
1198
1199
ret = acpi_gtdt_init(table, NULL);
1200
if (ret)
1201
return ret;
1202
1203
arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1204
acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1205
1206
arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1207
acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1208
1209
arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1210
acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1211
1212
arch_timer_populate_kvm_info();
1213
1214
/*
1215
* When probing via ACPI, we have no mechanism to override the sysreg
1216
* CNTFRQ value. This *must* be correct.
1217
*/
1218
arch_timer_rate = arch_timer_get_cntfrq();
1219
ret = validate_timer_rate();
1220
if (ret) {
1221
pr_err(FW_BUG "frequency not available.\n");
1222
return ret;
1223
}
1224
1225
arch_timer_uses_ppi = arch_timer_select_ppi();
1226
if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1227
pr_err("No interrupt available, giving up\n");
1228
return -EINVAL;
1229
}
1230
1231
/* Always-on capability */
1232
arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1233
1234
/* Check for globally applicable workarounds */
1235
arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1236
1237
ret = arch_timer_register();
1238
if (ret)
1239
return ret;
1240
1241
return arch_timer_common_init();
1242
}
1243
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1244
#endif
1245
1246
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
1247
enum clocksource_ids *cs_id)
1248
{
1249
struct arm_smccc_res hvc_res;
1250
u32 ptp_counter;
1251
ktime_t ktime;
1252
1253
if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY))
1254
return -EOPNOTSUPP;
1255
1256
if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
1257
ptp_counter = KVM_PTP_VIRT_COUNTER;
1258
else
1259
ptp_counter = KVM_PTP_PHYS_COUNTER;
1260
1261
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID,
1262
ptp_counter, &hvc_res);
1263
1264
if ((int)(hvc_res.a0) < 0)
1265
return -EOPNOTSUPP;
1266
1267
ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1;
1268
*ts = ktime_to_timespec64(ktime);
1269
if (cycle)
1270
*cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
1271
if (cs_id)
1272
*cs_id = CSID_ARM_ARCH_COUNTER;
1273
1274
return 0;
1275
}
1276
EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp);
1277
1278