Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/apic/apic.c
29269 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Local APIC handling, local APIC timers
4
*
5
* (c) 1999, 2000, 2009 Ingo Molnar <[email protected]>
6
*
7
* Fixes
8
* Maciej W. Rozycki : Bits for genuine 82489DX APICs;
9
* thanks to Eric Gilmore
10
* and Rolf G. Tews
11
* for testing these extensively.
12
* Maciej W. Rozycki : Various updates and fixes.
13
* Mikael Pettersson : Power Management for UP-APIC.
14
* Pavel Machek and
15
* Mikael Pettersson : PM converted to driver model.
16
*/
17
18
#include <linux/perf_event.h>
19
#include <linux/kernel_stat.h>
20
#include <linux/mc146818rtc.h>
21
#include <linux/acpi_pmtmr.h>
22
#include <linux/bitmap.h>
23
#include <linux/clockchips.h>
24
#include <linux/interrupt.h>
25
#include <linux/memblock.h>
26
#include <linux/ftrace.h>
27
#include <linux/ioport.h>
28
#include <linux/export.h>
29
#include <linux/syscore_ops.h>
30
#include <linux/delay.h>
31
#include <linux/timex.h>
32
#include <linux/i8253.h>
33
#include <linux/dmar.h>
34
#include <linux/init.h>
35
#include <linux/cpu.h>
36
#include <linux/dmi.h>
37
#include <linux/smp.h>
38
#include <linux/mm.h>
39
40
#include <xen/xen.h>
41
42
#include <asm/trace/irq_vectors.h>
43
#include <asm/irq_remapping.h>
44
#include <asm/pc-conf-reg.h>
45
#include <asm/perf_event.h>
46
#include <asm/x86_init.h>
47
#include <linux/atomic.h>
48
#include <asm/barrier.h>
49
#include <asm/mpspec.h>
50
#include <asm/i8259.h>
51
#include <asm/proto.h>
52
#include <asm/traps.h>
53
#include <asm/apic.h>
54
#include <asm/acpi.h>
55
#include <asm/io_apic.h>
56
#include <asm/desc.h>
57
#include <asm/hpet.h>
58
#include <asm/mtrr.h>
59
#include <asm/time.h>
60
#include <asm/smp.h>
61
#include <asm/mce.h>
62
#include <asm/msr.h>
63
#include <asm/tsc.h>
64
#include <asm/hypervisor.h>
65
#include <asm/cpu_device_id.h>
66
#include <asm/intel-family.h>
67
#include <asm/irq_regs.h>
68
#include <asm/cpu.h>
69
70
#include "local.h"
71
72
/* Processor that is doing the boot up */
73
u32 boot_cpu_physical_apicid __ro_after_init = BAD_APICID;
74
EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
75
76
u8 boot_cpu_apic_version __ro_after_init;
77
78
/*
79
* This variable controls which CPUs receive external NMIs. By default,
80
* external NMIs are delivered only to the BSP.
81
*/
82
static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP;
83
84
/*
85
* Hypervisor supports 15 bits of APIC ID in MSI Extended Destination ID
86
*/
87
static bool virt_ext_dest_id __ro_after_init;
88
89
/* For parallel bootup. */
90
unsigned long apic_mmio_base __ro_after_init;
91
92
static inline bool apic_accessible(void)
93
{
94
return x2apic_mode || apic_mmio_base;
95
}
96
97
#ifdef CONFIG_X86_32
98
/* Local APIC was disabled by the BIOS and enabled by the kernel */
99
static int enabled_via_apicbase __ro_after_init;
100
101
/*
102
* Handle interrupt mode configuration register (IMCR).
103
* This register controls whether the interrupt signals
104
* that reach the BSP come from the master PIC or from the
105
* local APIC. Before entering Symmetric I/O Mode, either
106
* the BIOS or the operating system must switch out of
107
* PIC Mode by changing the IMCR.
108
*/
109
static inline void imcr_pic_to_apic(void)
110
{
111
/* NMI and 8259 INTR go through APIC */
112
pc_conf_set(PC_CONF_MPS_IMCR, 0x01);
113
}
114
115
static inline void imcr_apic_to_pic(void)
116
{
117
/* NMI and 8259 INTR go directly to BSP */
118
pc_conf_set(PC_CONF_MPS_IMCR, 0x00);
119
}
120
#endif
121
122
/*
123
* Knob to control our willingness to enable the local APIC.
124
*
125
* +1=force-enable
126
*/
127
static int force_enable_local_apic __initdata;
128
129
/*
130
* APIC command line parameters
131
*/
132
static int __init parse_lapic(char *arg)
133
{
134
if (IS_ENABLED(CONFIG_X86_32) && !arg)
135
force_enable_local_apic = 1;
136
else if (arg && !strncmp(arg, "notscdeadline", 13))
137
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
138
return 0;
139
}
140
early_param("lapic", parse_lapic);
141
142
#ifdef CONFIG_X86_64
143
static int apic_calibrate_pmtmr __initdata;
144
static __init int setup_apicpmtimer(char *s)
145
{
146
apic_calibrate_pmtmr = 1;
147
notsc_setup(NULL);
148
return 1;
149
}
150
__setup("apicpmtimer", setup_apicpmtimer);
151
#endif
152
153
static unsigned long mp_lapic_addr __ro_after_init;
154
bool apic_is_disabled __ro_after_init;
155
/* Disable local APIC timer from the kernel commandline or via dmi quirk */
156
static int disable_apic_timer __initdata;
157
/* Local APIC timer works in C2 */
158
int local_apic_timer_c2_ok __ro_after_init;
159
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
160
161
/*
162
* Debug level, exported for io_apic.c
163
*/
164
int apic_verbosity __ro_after_init;
165
166
int pic_mode __ro_after_init;
167
168
/* Have we found an MP table */
169
int smp_found_config __ro_after_init;
170
171
static struct resource lapic_resource = {
172
.name = "Local APIC",
173
.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
174
};
175
176
unsigned int lapic_timer_period = 0;
177
178
static void apic_pm_activate(void);
179
180
/*
181
* Get the LAPIC version
182
*/
183
static inline int lapic_get_version(void)
184
{
185
return GET_APIC_VERSION(apic_read(APIC_LVR));
186
}
187
188
/*
189
* Check, if the APIC is integrated or a separate chip
190
*/
191
static inline int lapic_is_integrated(void)
192
{
193
return APIC_INTEGRATED(lapic_get_version());
194
}
195
196
/*
197
* Check, whether this is a modern or a first generation APIC
198
*/
199
static int modern_apic(void)
200
{
201
/* AMD systems use old APIC versions, so check the CPU */
202
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
203
boot_cpu_data.x86 >= 0xf)
204
return 1;
205
206
/* Hygon systems use modern APIC */
207
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
208
return 1;
209
210
return lapic_get_version() >= 0x14;
211
}
212
213
/*
214
* right after this call apic become NOOP driven
215
* so apic->write/read doesn't do anything
216
*/
217
static void __init apic_disable(void)
218
{
219
apic_install_driver(&apic_noop);
220
}
221
222
void native_apic_icr_write(u32 low, u32 id)
223
{
224
unsigned long flags;
225
226
local_irq_save(flags);
227
apic_write(APIC_ICR2, SET_XAPIC_DEST_FIELD(id));
228
apic_write(APIC_ICR, low);
229
local_irq_restore(flags);
230
}
231
232
u64 native_apic_icr_read(void)
233
{
234
u32 icr1, icr2;
235
236
icr2 = apic_read(APIC_ICR2);
237
icr1 = apic_read(APIC_ICR);
238
239
return icr1 | ((u64)icr2 << 32);
240
}
241
242
/**
243
* lapic_get_maxlvt - get the maximum number of local vector table entries
244
*/
245
int lapic_get_maxlvt(void)
246
{
247
/*
248
* - we always have APIC integrated on 64bit mode
249
* - 82489DXs do not report # of LVT entries
250
*/
251
return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
252
}
253
254
/*
255
* Local APIC timer
256
*/
257
258
/* Clock divisor */
259
#define APIC_DIVISOR 16
260
#define TSC_DIVISOR 8
261
262
/* i82489DX specific */
263
#define I82489DX_BASE_DIVIDER (((0x2) << 18))
264
265
/*
266
* This function sets up the local APIC timer, with a timeout of
267
* 'clocks' APIC bus clock. During calibration we actually call
268
* this function twice on the boot CPU, once with a bogus timeout
269
* value, second time for real. The other (noncalibrating) CPUs
270
* call this function only once, with the real, calibrated value.
271
*
272
* We do reads before writes even if unnecessary, to get around the
273
* P5 APIC double write bug.
274
*/
275
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
276
{
277
unsigned int lvtt_value, tmp_value;
278
279
lvtt_value = LOCAL_TIMER_VECTOR;
280
if (!oneshot)
281
lvtt_value |= APIC_LVT_TIMER_PERIODIC;
282
else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
283
lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
284
285
/*
286
* The i82489DX APIC uses bit 18 and 19 for the base divider. This
287
* overlaps with bit 18 on integrated APICs, but is not documented
288
* in the SDM. No problem though. i82489DX equipped systems do not
289
* have TSC deadline timer.
290
*/
291
if (!lapic_is_integrated())
292
lvtt_value |= I82489DX_BASE_DIVIDER;
293
294
if (!irqen)
295
lvtt_value |= APIC_LVT_MASKED;
296
297
apic_write(APIC_LVTT, lvtt_value);
298
299
if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
300
/*
301
* See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
302
* writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
303
* According to Intel, MFENCE can do the serialization here.
304
*/
305
asm volatile("mfence" : : : "memory");
306
return;
307
}
308
309
/*
310
* Divide PICLK by 16
311
*/
312
tmp_value = apic_read(APIC_TDCR);
313
apic_write(APIC_TDCR,
314
(tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
315
APIC_TDR_DIV_16);
316
317
if (!oneshot)
318
apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
319
}
320
321
/*
322
* Setup extended LVT, AMD specific
323
*
324
* Software should use the LVT offsets the BIOS provides. The offsets
325
* are determined by the subsystems using it like those for MCE
326
* threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
327
* are supported. Beginning with family 10h at least 4 offsets are
328
* available.
329
*
330
* Since the offsets must be consistent for all cores, we keep track
331
* of the LVT offsets in software and reserve the offset for the same
332
* vector also to be used on other cores. An offset is freed by
333
* setting the entry to APIC_EILVT_MASKED.
334
*
335
* If the BIOS is right, there should be no conflicts. Otherwise a
336
* "[Firmware Bug]: ..." error message is generated. However, if
337
* software does not properly determines the offsets, it is not
338
* necessarily a BIOS bug.
339
*/
340
341
static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
342
343
static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
344
{
345
return (old & APIC_EILVT_MASKED)
346
|| (new == APIC_EILVT_MASKED)
347
|| ((new & ~APIC_EILVT_MASKED) == old);
348
}
349
350
static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
351
{
352
unsigned int rsvd, vector;
353
354
if (offset >= APIC_EILVT_NR_MAX)
355
return ~0;
356
357
rsvd = atomic_read(&eilvt_offsets[offset]);
358
do {
359
vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
360
if (vector && !eilvt_entry_is_changeable(vector, new))
361
/* may not change if vectors are different */
362
return rsvd;
363
} while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));
364
365
rsvd = new & ~APIC_EILVT_MASKED;
366
if (rsvd && rsvd != vector)
367
pr_info("LVT offset %d assigned for vector 0x%02x\n",
368
offset, rsvd);
369
370
return new;
371
}
372
373
/*
374
* If mask=1, the LVT entry does not generate interrupts while mask=0
375
* enables the vector. See also the BKDGs. Must be called with
376
* preemption disabled.
377
*/
378
379
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
380
{
381
unsigned long reg = APIC_EILVTn(offset);
382
unsigned int new, old, reserved;
383
384
new = (mask << 16) | (msg_type << 8) | vector;
385
old = apic_read(reg);
386
reserved = reserve_eilvt_offset(offset, new);
387
388
if (reserved != new) {
389
pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
390
"vector 0x%x, but the register is already in use for "
391
"vector 0x%x on another cpu\n",
392
smp_processor_id(), reg, offset, new, reserved);
393
return -EINVAL;
394
}
395
396
if (!eilvt_entry_is_changeable(old, new)) {
397
pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
398
"vector 0x%x, but the register is already in use for "
399
"vector 0x%x on this cpu\n",
400
smp_processor_id(), reg, offset, new, old);
401
return -EBUSY;
402
}
403
404
apic_write(reg, new);
405
406
return 0;
407
}
408
EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
409
410
/*
411
* Program the next event, relative to now
412
*/
413
static int lapic_next_event(unsigned long delta,
414
struct clock_event_device *evt)
415
{
416
apic_write(APIC_TMICT, delta);
417
return 0;
418
}
419
420
static int lapic_next_deadline(unsigned long delta,
421
struct clock_event_device *evt)
422
{
423
u64 tsc;
424
425
/* This MSR is special and need a special fence: */
426
weak_wrmsr_fence();
427
428
tsc = rdtsc();
429
wrmsrq(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
430
return 0;
431
}
432
433
static int lapic_timer_shutdown(struct clock_event_device *evt)
434
{
435
unsigned int v;
436
437
/* Lapic used as dummy for broadcast ? */
438
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
439
return 0;
440
441
v = apic_read(APIC_LVTT);
442
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
443
apic_write(APIC_LVTT, v);
444
445
/*
446
* Setting APIC_LVT_MASKED (above) should be enough to tell
447
* the hardware that this timer will never fire. But AMD
448
* erratum 411 and some Intel CPU behavior circa 2024 say
449
* otherwise. Time for belt and suspenders programming: mask
450
* the timer _and_ zero the counter registers:
451
*/
452
if (v & APIC_LVT_TIMER_TSCDEADLINE)
453
wrmsrq(MSR_IA32_TSC_DEADLINE, 0);
454
else
455
apic_write(APIC_TMICT, 0);
456
457
return 0;
458
}
459
460
static inline int
461
lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
462
{
463
/* Lapic used as dummy for broadcast ? */
464
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
465
return 0;
466
467
__setup_APIC_LVTT(lapic_timer_period, oneshot, 1);
468
return 0;
469
}
470
471
static int lapic_timer_set_periodic(struct clock_event_device *evt)
472
{
473
return lapic_timer_set_periodic_oneshot(evt, false);
474
}
475
476
static int lapic_timer_set_oneshot(struct clock_event_device *evt)
477
{
478
return lapic_timer_set_periodic_oneshot(evt, true);
479
}
480
481
/*
482
* Local APIC timer broadcast function
483
*/
484
static void lapic_timer_broadcast(const struct cpumask *mask)
485
{
486
#ifdef CONFIG_SMP
487
__apic_send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
488
#endif
489
}
490
491
492
/*
493
* The local apic timer can be used for any function which is CPU local.
494
*/
495
static struct clock_event_device lapic_clockevent = {
496
.name = "lapic",
497
.features = CLOCK_EVT_FEAT_PERIODIC |
498
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
499
| CLOCK_EVT_FEAT_DUMMY,
500
.shift = 32,
501
.set_state_shutdown = lapic_timer_shutdown,
502
.set_state_periodic = lapic_timer_set_periodic,
503
.set_state_oneshot = lapic_timer_set_oneshot,
504
.set_state_oneshot_stopped = lapic_timer_shutdown,
505
.set_next_event = lapic_next_event,
506
.broadcast = lapic_timer_broadcast,
507
.rating = 100,
508
.irq = -1,
509
};
510
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
511
512
static const struct x86_cpu_id deadline_match[] __initconst = {
513
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 0x2, 0x2, 0x3a), /* EP */
514
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 0x4, 0x4, 0x0f), /* EX */
515
516
X86_MATCH_VFM(INTEL_BROADWELL_X, 0x0b000020),
517
518
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x2, 0x2, 0x00000011),
519
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x3, 0x3, 0x0700000e),
520
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x4, 0x4, 0x0f00000c),
521
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x5, 0x5, 0x0e000003),
522
523
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x3, 0x3, 0x01000136),
524
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x4, 0x4, 0x02000014),
525
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x5, 0xf, 0),
526
527
X86_MATCH_VFM(INTEL_HASWELL, 0x22),
528
X86_MATCH_VFM(INTEL_HASWELL_L, 0x20),
529
X86_MATCH_VFM(INTEL_HASWELL_G, 0x17),
530
531
X86_MATCH_VFM(INTEL_BROADWELL, 0x25),
532
X86_MATCH_VFM(INTEL_BROADWELL_G, 0x17),
533
534
X86_MATCH_VFM(INTEL_SKYLAKE_L, 0xb2),
535
X86_MATCH_VFM(INTEL_SKYLAKE, 0xb2),
536
537
X86_MATCH_VFM(INTEL_KABYLAKE_L, 0x52),
538
X86_MATCH_VFM(INTEL_KABYLAKE, 0x52),
539
540
{},
541
};
542
543
static __init bool apic_validate_deadline_timer(void)
544
{
545
const struct x86_cpu_id *m;
546
u32 rev;
547
548
if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
549
return false;
550
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
551
return true;
552
553
m = x86_match_cpu(deadline_match);
554
if (!m)
555
return true;
556
557
rev = (u32)m->driver_data;
558
559
if (boot_cpu_data.microcode >= rev)
560
return true;
561
562
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
563
pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
564
"please update microcode to version: 0x%x (or later)\n", rev);
565
return false;
566
}
567
568
/*
569
* Setup the local APIC timer for this CPU. Copy the initialized values
570
* of the boot CPU and register the clock event in the framework.
571
*/
572
static void setup_APIC_timer(void)
573
{
574
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
575
576
if (this_cpu_has(X86_FEATURE_ARAT)) {
577
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
578
/* Make LAPIC timer preferable over percpu HPET */
579
lapic_clockevent.rating = 150;
580
}
581
582
memcpy(levt, &lapic_clockevent, sizeof(*levt));
583
levt->cpumask = cpumask_of(smp_processor_id());
584
585
if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
586
levt->name = "lapic-deadline";
587
levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
588
CLOCK_EVT_FEAT_DUMMY);
589
levt->set_next_event = lapic_next_deadline;
590
clockevents_config_and_register(levt,
591
tsc_khz * (1000 / TSC_DIVISOR),
592
0xF, ~0UL);
593
} else
594
clockevents_register_device(levt);
595
596
apic_update_vector(smp_processor_id(), LOCAL_TIMER_VECTOR, true);
597
}
598
599
/*
600
* Install the updated TSC frequency from recalibration at the TSC
601
* deadline clockevent devices.
602
*/
603
static void __lapic_update_tsc_freq(void *info)
604
{
605
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
606
607
if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
608
return;
609
610
clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
611
}
612
613
void lapic_update_tsc_freq(void)
614
{
615
/*
616
* The clockevent device's ->mult and ->shift can both be
617
* changed. In order to avoid races, schedule the frequency
618
* update code on each CPU.
619
*/
620
on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
621
}
622
623
/*
624
* In this functions we calibrate APIC bus clocks to the external timer.
625
*
626
* We want to do the calibration only once since we want to have local timer
627
* irqs synchronous. CPUs connected by the same APIC bus have the very same bus
628
* frequency.
629
*
630
* This was previously done by reading the PIT/HPET and waiting for a wrap
631
* around to find out, that a tick has elapsed. I have a box, where the PIT
632
* readout is broken, so it never gets out of the wait loop again. This was
633
* also reported by others.
634
*
635
* Monitoring the jiffies value is inaccurate and the clockevents
636
* infrastructure allows us to do a simple substitution of the interrupt
637
* handler.
638
*
639
* The calibration routine also uses the pm_timer when possible, as the PIT
640
* happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
641
* back to normal later in the boot process).
642
*/
643
644
#define LAPIC_CAL_LOOPS (HZ/10)
645
646
static __initdata int lapic_cal_loops = -1;
647
static __initdata long lapic_cal_t1, lapic_cal_t2;
648
static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
649
static __initdata u32 lapic_cal_pm1, lapic_cal_pm2;
650
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
651
652
/*
653
* Temporary interrupt handler and polled calibration function.
654
*/
655
static void __init lapic_cal_handler(struct clock_event_device *dev)
656
{
657
unsigned long long tsc = 0;
658
long tapic = apic_read(APIC_TMCCT);
659
u32 pm = acpi_pm_read_early();
660
661
if (boot_cpu_has(X86_FEATURE_TSC))
662
tsc = rdtsc();
663
664
switch (lapic_cal_loops++) {
665
case 0:
666
lapic_cal_t1 = tapic;
667
lapic_cal_tsc1 = tsc;
668
lapic_cal_pm1 = pm;
669
lapic_cal_j1 = jiffies;
670
break;
671
672
case LAPIC_CAL_LOOPS:
673
lapic_cal_t2 = tapic;
674
lapic_cal_tsc2 = tsc;
675
if (pm < lapic_cal_pm1)
676
pm += ACPI_PM_OVRRUN;
677
lapic_cal_pm2 = pm;
678
lapic_cal_j2 = jiffies;
679
break;
680
}
681
}
682
683
static int __init
684
calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc)
685
{
686
const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
687
const long pm_thresh = pm_100ms / 100;
688
unsigned long mult;
689
u64 res;
690
691
#ifndef CONFIG_X86_PM_TIMER
692
return -1;
693
#endif
694
695
apic_pr_verbose("... PM-Timer delta = %u\n", deltapm);
696
697
/* Check, if the PM timer is available */
698
if (!deltapm)
699
return -1;
700
701
mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
702
703
if (deltapm > (pm_100ms - pm_thresh) &&
704
deltapm < (pm_100ms + pm_thresh)) {
705
apic_pr_verbose("... PM-Timer result ok\n");
706
return 0;
707
}
708
709
res = (((u64)deltapm) * mult) >> 22;
710
do_div(res, 1000000);
711
pr_warn("APIC calibration not consistent with PM-Timer: %ldms instead of 100ms\n",
712
(long)res);
713
714
/* Correct the lapic counter value */
715
res = (((u64)(*delta)) * pm_100ms);
716
do_div(res, deltapm);
717
pr_info("APIC delta adjusted to PM-Timer: "
718
"%lu (%ld)\n", (unsigned long)res, *delta);
719
*delta = (long)res;
720
721
/* Correct the tsc counter value */
722
if (boot_cpu_has(X86_FEATURE_TSC)) {
723
res = (((u64)(*deltatsc)) * pm_100ms);
724
do_div(res, deltapm);
725
apic_pr_verbose("TSC delta adjusted to PM-Timer: %lu (%ld)\n",
726
(unsigned long)res, *deltatsc);
727
*deltatsc = (long)res;
728
}
729
730
return 0;
731
}
732
733
static int __init lapic_init_clockevent(void)
734
{
735
if (!lapic_timer_period)
736
return -1;
737
738
/* Calculate the scaled math multiplication factor */
739
lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR,
740
TICK_NSEC, lapic_clockevent.shift);
741
lapic_clockevent.max_delta_ns =
742
clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
743
lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
744
lapic_clockevent.min_delta_ns =
745
clockevent_delta2ns(0xF, &lapic_clockevent);
746
lapic_clockevent.min_delta_ticks = 0xF;
747
748
return 0;
749
}
750
751
bool __init apic_needs_pit(void)
752
{
753
/*
754
* If the frequencies are not known, PIT is required for both TSC
755
* and apic timer calibration.
756
*/
757
if (!tsc_khz || !cpu_khz)
758
return true;
759
760
/* Is there an APIC at all or is it disabled? */
761
if (!boot_cpu_has(X86_FEATURE_APIC) || apic_is_disabled)
762
return true;
763
764
/*
765
* If interrupt delivery mode is legacy PIC or virtual wire without
766
* configuration, the local APIC timer won't be set up. Make sure
767
* that the PIT is initialized.
768
*/
769
if (apic_intr_mode == APIC_PIC ||
770
apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
771
return true;
772
773
/* Virt guests may lack ARAT, but still have DEADLINE */
774
if (!boot_cpu_has(X86_FEATURE_ARAT))
775
return true;
776
777
/* Deadline timer is based on TSC so no further PIT action required */
778
if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
779
return false;
780
781
/* APIC timer disabled? */
782
if (disable_apic_timer)
783
return true;
784
/*
785
* The APIC timer frequency is known already, no PIT calibration
786
* required. If unknown, let the PIT be initialized.
787
*/
788
return lapic_timer_period == 0;
789
}
790
791
static int __init calibrate_APIC_clock(void)
792
{
793
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
794
u64 tsc_perj = 0, tsc_start = 0;
795
unsigned long jif_start;
796
unsigned long deltaj;
797
long delta, deltatsc;
798
int pm_referenced = 0;
799
800
if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
801
return 0;
802
803
/*
804
* Check if lapic timer has already been calibrated by platform
805
* specific routine, such as tsc calibration code. If so just fill
806
* in the clockevent structure and return.
807
*/
808
if (!lapic_init_clockevent()) {
809
apic_pr_verbose("lapic timer already calibrated %d\n", lapic_timer_period);
810
/*
811
* Direct calibration methods must have an always running
812
* local APIC timer, no need for broadcast timer.
813
*/
814
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
815
return 0;
816
}
817
818
apic_pr_verbose("Using local APIC timer interrupts. Calibrating APIC timer ...\n");
819
820
/*
821
* There are platforms w/o global clockevent devices. Instead of
822
* making the calibration conditional on that, use a polling based
823
* approach everywhere.
824
*/
825
local_irq_disable();
826
827
/*
828
* Setup the APIC counter to maximum. There is no way the lapic
829
* can underflow in the 100ms detection time frame
830
*/
831
__setup_APIC_LVTT(0xffffffff, 0, 0);
832
833
/*
834
* Methods to terminate the calibration loop:
835
* 1) Global clockevent if available (jiffies)
836
* 2) TSC if available and frequency is known
837
*/
838
jif_start = READ_ONCE(jiffies);
839
840
if (tsc_khz) {
841
tsc_start = rdtsc();
842
tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
843
}
844
845
/*
846
* Enable interrupts so the tick can fire, if a global
847
* clockevent device is available
848
*/
849
local_irq_enable();
850
851
while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
852
/* Wait for a tick to elapse */
853
while (1) {
854
if (tsc_khz) {
855
u64 tsc_now = rdtsc();
856
if ((tsc_now - tsc_start) >= tsc_perj) {
857
tsc_start += tsc_perj;
858
break;
859
}
860
} else {
861
unsigned long jif_now = READ_ONCE(jiffies);
862
863
if (time_after(jif_now, jif_start)) {
864
jif_start = jif_now;
865
break;
866
}
867
}
868
cpu_relax();
869
}
870
871
/* Invoke the calibration routine */
872
local_irq_disable();
873
lapic_cal_handler(NULL);
874
local_irq_enable();
875
}
876
877
local_irq_disable();
878
879
/* Build delta t1-t2 as apic timer counts down */
880
delta = lapic_cal_t1 - lapic_cal_t2;
881
apic_pr_verbose("... lapic delta = %ld\n", delta);
882
883
deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
884
885
/* we trust the PM based calibration if possible */
886
pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
887
&delta, &deltatsc);
888
889
lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
890
lapic_init_clockevent();
891
892
apic_pr_verbose("..... delta %ld\n", delta);
893
apic_pr_verbose("..... mult: %u\n", lapic_clockevent.mult);
894
apic_pr_verbose("..... calibration result: %u\n", lapic_timer_period);
895
896
if (boot_cpu_has(X86_FEATURE_TSC)) {
897
apic_pr_verbose("..... CPU clock speed is %ld.%04ld MHz.\n",
898
(deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
899
(deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
900
}
901
902
apic_pr_verbose("..... host bus clock speed is %u.%04u MHz.\n",
903
lapic_timer_period / (1000000 / HZ),
904
lapic_timer_period % (1000000 / HZ));
905
906
/*
907
* Do a sanity check on the APIC calibration result
908
*/
909
if (lapic_timer_period < (1000000 / HZ)) {
910
local_irq_enable();
911
pr_warn("APIC frequency too slow, disabling apic timer\n");
912
return -1;
913
}
914
915
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
916
917
/*
918
* PM timer calibration failed or not turned on so lets try APIC
919
* timer based calibration, if a global clockevent device is
920
* available.
921
*/
922
if (!pm_referenced && global_clock_event) {
923
apic_pr_verbose("... verify APIC timer\n");
924
925
/*
926
* Setup the apic timer manually
927
*/
928
levt->event_handler = lapic_cal_handler;
929
lapic_timer_set_periodic(levt);
930
lapic_cal_loops = -1;
931
932
/* Let the interrupts run */
933
local_irq_enable();
934
935
while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
936
cpu_relax();
937
938
/* Stop the lapic timer */
939
local_irq_disable();
940
lapic_timer_shutdown(levt);
941
942
/* Jiffies delta */
943
deltaj = lapic_cal_j2 - lapic_cal_j1;
944
apic_pr_verbose("... jiffies delta = %lu\n", deltaj);
945
946
/* Check, if the jiffies result is consistent */
947
if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
948
apic_pr_verbose("... jiffies result ok\n");
949
else
950
levt->features |= CLOCK_EVT_FEAT_DUMMY;
951
}
952
local_irq_enable();
953
954
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
955
pr_warn("APIC timer disabled due to verification failure\n");
956
return -1;
957
}
958
959
return 0;
960
}
961
962
/*
963
* Setup the boot APIC
964
*
965
* Calibrate and verify the result.
966
*/
967
void __init setup_boot_APIC_clock(void)
968
{
969
/*
970
* The local apic timer can be disabled via the kernel
971
* commandline or from the CPU detection code. Register the lapic
972
* timer as a dummy clock event source on SMP systems, so the
973
* broadcast mechanism is used. On UP systems simply ignore it.
974
*/
975
if (disable_apic_timer) {
976
pr_info("Disabling APIC timer\n");
977
/* No broadcast on UP ! */
978
if (num_possible_cpus() > 1) {
979
lapic_clockevent.mult = 1;
980
setup_APIC_timer();
981
}
982
return;
983
}
984
985
if (calibrate_APIC_clock()) {
986
/* No broadcast on UP ! */
987
if (num_possible_cpus() > 1)
988
setup_APIC_timer();
989
return;
990
}
991
992
/*
993
* If nmi_watchdog is set to IO_APIC, we need the
994
* PIT/HPET going. Otherwise register lapic as a dummy
995
* device.
996
*/
997
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
998
999
/* Setup the lapic or request the broadcast */
1000
setup_APIC_timer();
1001
amd_e400_c1e_apic_setup();
1002
}
1003
1004
void setup_secondary_APIC_clock(void)
1005
{
1006
setup_APIC_timer();
1007
amd_e400_c1e_apic_setup();
1008
}
1009
1010
/*
1011
* The guts of the apic timer interrupt
1012
*/
1013
static void local_apic_timer_interrupt(void)
1014
{
1015
struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
1016
1017
/*
1018
* Normally we should not be here till LAPIC has been initialized but
1019
* in some cases like kdump, its possible that there is a pending LAPIC
1020
* timer interrupt from previous kernel's context and is delivered in
1021
* new kernel the moment interrupts are enabled.
1022
*
1023
* Interrupts are enabled early and LAPIC is setup much later, hence
1024
* its possible that when we get here evt->event_handler is NULL.
1025
* Check for event_handler being NULL and discard the interrupt as
1026
* spurious.
1027
*/
1028
if (!evt->event_handler) {
1029
pr_warn("Spurious LAPIC timer interrupt on cpu %d\n",
1030
smp_processor_id());
1031
/* Switch it off */
1032
lapic_timer_shutdown(evt);
1033
return;
1034
}
1035
1036
/*
1037
* the NMI deadlock-detector uses this.
1038
*/
1039
inc_irq_stat(apic_timer_irqs);
1040
1041
evt->event_handler(evt);
1042
}
1043
1044
/*
1045
* Local APIC timer interrupt. This is the most natural way for doing
1046
* local interrupts, but local timer interrupts can be emulated by
1047
* broadcast interrupts too. [in case the hw doesn't support APIC timers]
1048
*
1049
* [ if a single-CPU system runs an SMP kernel then we call the local
1050
* interrupt as well. Thus we cannot inline the local irq ... ]
1051
*/
1052
DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
1053
{
1054
struct pt_regs *old_regs = set_irq_regs(regs);
1055
1056
apic_eoi();
1057
trace_local_timer_entry(LOCAL_TIMER_VECTOR);
1058
local_apic_timer_interrupt();
1059
trace_local_timer_exit(LOCAL_TIMER_VECTOR);
1060
1061
set_irq_regs(old_regs);
1062
}
1063
1064
/*
1065
* Local APIC start and shutdown
1066
*/
1067
1068
/**
1069
* clear_local_APIC - shutdown the local APIC
1070
*
1071
* This is called, when a CPU is disabled and before rebooting, so the state of
1072
* the local APIC has no dangling leftovers. Also used to cleanout any BIOS
1073
* leftovers during boot.
1074
*/
1075
void clear_local_APIC(void)
1076
{
1077
int maxlvt;
1078
u32 v;
1079
1080
if (!apic_accessible())
1081
return;
1082
1083
maxlvt = lapic_get_maxlvt();
1084
/*
1085
* Masking an LVT entry can trigger a local APIC error
1086
* if the vector is zero. Mask LVTERR first to prevent this.
1087
*/
1088
if (maxlvt >= 3) {
1089
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
1090
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
1091
}
1092
/*
1093
* Careful: we have to set masks only first to deassert
1094
* any level-triggered sources.
1095
*/
1096
v = apic_read(APIC_LVTT);
1097
apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
1098
v = apic_read(APIC_LVT0);
1099
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1100
v = apic_read(APIC_LVT1);
1101
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1102
if (maxlvt >= 4) {
1103
v = apic_read(APIC_LVTPC);
1104
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
1105
}
1106
1107
/* lets not touch this if we didn't frob it */
1108
#ifdef CONFIG_X86_THERMAL_VECTOR
1109
if (maxlvt >= 5) {
1110
v = apic_read(APIC_LVTTHMR);
1111
apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
1112
}
1113
#endif
1114
#ifdef CONFIG_X86_MCE_INTEL
1115
if (maxlvt >= 6) {
1116
v = apic_read(APIC_LVTCMCI);
1117
if (!(v & APIC_LVT_MASKED))
1118
apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
1119
}
1120
#endif
1121
1122
/*
1123
* Clean APIC state for other OSs:
1124
*/
1125
apic_write(APIC_LVTT, APIC_LVT_MASKED);
1126
apic_write(APIC_LVT0, APIC_LVT_MASKED);
1127
apic_write(APIC_LVT1, APIC_LVT_MASKED);
1128
if (maxlvt >= 3)
1129
apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1130
if (maxlvt >= 4)
1131
apic_write(APIC_LVTPC, APIC_LVT_MASKED);
1132
1133
/* Integrated APIC (!82489DX) ? */
1134
if (lapic_is_integrated()) {
1135
if (maxlvt > 3)
1136
/* Clear ESR due to Pentium errata 3AP and 11AP */
1137
apic_write(APIC_ESR, 0);
1138
apic_read(APIC_ESR);
1139
}
1140
}
1141
1142
/**
1143
* apic_soft_disable - Clears and software disables the local APIC on hotplug
1144
*
1145
* Contrary to disable_local_APIC() this does not touch the enable bit in
1146
* MSR_IA32_APICBASE. Clearing that bit on systems based on the 3 wire APIC
1147
* bus would require a hardware reset as the APIC would lose track of bus
1148
* arbitration. On systems with FSB delivery APICBASE could be disabled,
1149
* but it has to be guaranteed that no interrupt is sent to the APIC while
1150
* in that state and it's not clear from the SDM whether it still responds
1151
* to INIT/SIPI messages. Stay on the safe side and use software disable.
1152
*/
1153
void apic_soft_disable(void)
1154
{
1155
u32 value;
1156
1157
clear_local_APIC();
1158
1159
/* Soft disable APIC (implies clearing of registers for 82489DX!). */
1160
value = apic_read(APIC_SPIV);
1161
value &= ~APIC_SPIV_APIC_ENABLED;
1162
apic_write(APIC_SPIV, value);
1163
}
1164
1165
/**
1166
* disable_local_APIC - clear and disable the local APIC
1167
*/
1168
void disable_local_APIC(void)
1169
{
1170
if (!apic_accessible())
1171
return;
1172
1173
if (apic->teardown)
1174
apic->teardown();
1175
1176
apic_soft_disable();
1177
1178
#ifdef CONFIG_X86_32
1179
/*
1180
* When LAPIC was disabled by the BIOS and enabled by the kernel,
1181
* restore the disabled state.
1182
*/
1183
if (enabled_via_apicbase) {
1184
unsigned int l, h;
1185
1186
rdmsr(MSR_IA32_APICBASE, l, h);
1187
l &= ~MSR_IA32_APICBASE_ENABLE;
1188
wrmsr(MSR_IA32_APICBASE, l, h);
1189
}
1190
#endif
1191
}
1192
1193
/*
1194
* If Linux enabled the LAPIC against the BIOS default disable it down before
1195
* re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
1196
* not power-off. Additionally clear all LVT entries before disable_local_APIC
1197
* for the case where Linux didn't enable the LAPIC.
1198
*/
1199
void lapic_shutdown(void)
1200
{
1201
unsigned long flags;
1202
1203
if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1204
return;
1205
1206
local_irq_save(flags);
1207
1208
#ifdef CONFIG_X86_32
1209
if (!enabled_via_apicbase)
1210
clear_local_APIC();
1211
else
1212
#endif
1213
disable_local_APIC();
1214
1215
1216
local_irq_restore(flags);
1217
}
1218
1219
/**
1220
* sync_Arb_IDs - synchronize APIC bus arbitration IDs
1221
*/
1222
void __init sync_Arb_IDs(void)
1223
{
1224
/*
1225
* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
1226
* needed on AMD.
1227
*/
1228
if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1229
return;
1230
1231
/*
1232
* Wait for idle.
1233
*/
1234
apic_wait_icr_idle();
1235
1236
apic_pr_debug("Synchronizing Arb IDs.\n");
1237
apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | APIC_DM_INIT);
1238
}
1239
1240
enum apic_intr_mode_id apic_intr_mode __ro_after_init;
1241
1242
static int __init __apic_intr_mode_select(void)
1243
{
1244
/* Check kernel option */
1245
if (apic_is_disabled) {
1246
pr_info("APIC disabled via kernel command line\n");
1247
return APIC_PIC;
1248
}
1249
1250
/* Check BIOS */
1251
#ifdef CONFIG_X86_64
1252
/* On 64-bit, the APIC must be integrated, Check local APIC only */
1253
if (!boot_cpu_has(X86_FEATURE_APIC)) {
1254
apic_is_disabled = true;
1255
pr_info("APIC disabled by BIOS\n");
1256
return APIC_PIC;
1257
}
1258
#else
1259
/* On 32-bit, the APIC may be integrated APIC or 82489DX */
1260
1261
/* Neither 82489DX nor integrated APIC ? */
1262
if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
1263
apic_is_disabled = true;
1264
return APIC_PIC;
1265
}
1266
1267
/* If the BIOS pretends there is an integrated APIC ? */
1268
if (!boot_cpu_has(X86_FEATURE_APIC) &&
1269
APIC_INTEGRATED(boot_cpu_apic_version)) {
1270
apic_is_disabled = true;
1271
pr_err(FW_BUG "Local APIC not detected, force emulation\n");
1272
return APIC_PIC;
1273
}
1274
#endif
1275
1276
/* Check MP table or ACPI MADT configuration */
1277
if (!smp_found_config) {
1278
disable_ioapic_support();
1279
if (!acpi_lapic) {
1280
pr_info("APIC: ACPI MADT or MP tables are not detected\n");
1281
return APIC_VIRTUAL_WIRE_NO_CONFIG;
1282
}
1283
return APIC_VIRTUAL_WIRE;
1284
}
1285
1286
#ifdef CONFIG_SMP
1287
/* If SMP should be disabled, then really disable it! */
1288
if (!setup_max_cpus) {
1289
pr_info("APIC: SMP mode deactivated\n");
1290
return APIC_SYMMETRIC_IO_NO_ROUTING;
1291
}
1292
#endif
1293
1294
return APIC_SYMMETRIC_IO;
1295
}
1296
1297
/* Select the interrupt delivery mode for the BSP */
1298
void __init apic_intr_mode_select(void)
1299
{
1300
apic_intr_mode = __apic_intr_mode_select();
1301
}
1302
1303
/*
1304
* An initial setup of the virtual wire mode.
1305
*/
1306
void __init init_bsp_APIC(void)
1307
{
1308
unsigned int value;
1309
1310
/*
1311
* Don't do the setup now if we have a SMP BIOS as the
1312
* through-I/O-APIC virtual wire mode might be active.
1313
*/
1314
if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
1315
return;
1316
1317
/*
1318
* Do not trust the local APIC being empty at bootup.
1319
*/
1320
clear_local_APIC();
1321
1322
/*
1323
* Enable APIC.
1324
*/
1325
value = apic_read(APIC_SPIV);
1326
value &= ~APIC_VECTOR_MASK;
1327
value |= APIC_SPIV_APIC_ENABLED;
1328
1329
#ifdef CONFIG_X86_32
1330
/* This bit is reserved on P4/Xeon and should be cleared */
1331
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1332
(boot_cpu_data.x86 == 15))
1333
value &= ~APIC_SPIV_FOCUS_DISABLED;
1334
else
1335
#endif
1336
value |= APIC_SPIV_FOCUS_DISABLED;
1337
value |= SPURIOUS_APIC_VECTOR;
1338
apic_write(APIC_SPIV, value);
1339
1340
/*
1341
* Set up the virtual wire mode.
1342
*/
1343
apic_write(APIC_LVT0, APIC_DM_EXTINT);
1344
value = APIC_DM_NMI;
1345
if (!lapic_is_integrated()) /* 82489DX */
1346
value |= APIC_LVT_LEVEL_TRIGGER;
1347
if (apic_extnmi == APIC_EXTNMI_NONE)
1348
value |= APIC_LVT_MASKED;
1349
apic_write(APIC_LVT1, value);
1350
}
1351
1352
static void __init apic_bsp_setup(bool upmode);
1353
1354
/* Init the interrupt delivery mode for the BSP */
1355
void __init apic_intr_mode_init(void)
1356
{
1357
bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
1358
1359
switch (apic_intr_mode) {
1360
case APIC_PIC:
1361
pr_info("APIC: Keep in PIC mode(8259)\n");
1362
return;
1363
case APIC_VIRTUAL_WIRE:
1364
pr_info("APIC: Switch to virtual wire mode setup\n");
1365
break;
1366
case APIC_VIRTUAL_WIRE_NO_CONFIG:
1367
pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
1368
upmode = true;
1369
break;
1370
case APIC_SYMMETRIC_IO:
1371
pr_info("APIC: Switch to symmetric I/O mode setup\n");
1372
break;
1373
case APIC_SYMMETRIC_IO_NO_ROUTING:
1374
pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
1375
break;
1376
}
1377
1378
x86_64_probe_apic();
1379
1380
if (x86_platform.apic_post_init)
1381
x86_platform.apic_post_init();
1382
1383
apic_bsp_setup(upmode);
1384
}
1385
1386
static void lapic_setup_esr(void)
1387
{
1388
unsigned int oldvalue, value, maxlvt;
1389
1390
if (!lapic_is_integrated()) {
1391
pr_info("No ESR for 82489DX.\n");
1392
return;
1393
}
1394
1395
if (apic->disable_esr) {
1396
/*
1397
* Something untraceable is creating bad interrupts on
1398
* secondary quads ... for the moment, just leave the
1399
* ESR disabled - we can't do anything useful with the
1400
* errors anyway - mbligh
1401
*/
1402
pr_info("Leaving ESR disabled.\n");
1403
return;
1404
}
1405
1406
maxlvt = lapic_get_maxlvt();
1407
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1408
apic_write(APIC_ESR, 0);
1409
oldvalue = apic_read(APIC_ESR);
1410
1411
/* enables sending errors */
1412
value = ERROR_APIC_VECTOR;
1413
apic_write(APIC_LVTERR, value);
1414
1415
/*
1416
* spec says clear errors after enabling vector.
1417
*/
1418
if (maxlvt > 3)
1419
apic_write(APIC_ESR, 0);
1420
value = apic_read(APIC_ESR);
1421
if (value != oldvalue) {
1422
apic_pr_verbose("ESR value before enabling vector: 0x%08x after: 0x%08x\n",
1423
oldvalue, value);
1424
}
1425
}
1426
1427
#define APIC_IR_REGS APIC_ISR_NR
1428
#define APIC_IR_BITS (APIC_IR_REGS * 32)
1429
#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
1430
1431
union apic_ir {
1432
unsigned long map[APIC_IR_MAPSIZE];
1433
u32 regs[APIC_IR_REGS];
1434
};
1435
1436
static bool apic_check_and_eoi_isr(union apic_ir *isr)
1437
{
1438
int i, bit;
1439
1440
/* Read the ISRs */
1441
for (i = 0; i < APIC_IR_REGS; i++)
1442
isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
1443
1444
/* If the ISR map empty, nothing to do here. */
1445
if (bitmap_empty(isr->map, APIC_IR_BITS))
1446
return true;
1447
1448
/*
1449
* There can be multiple ISR bits set when a high priority
1450
* interrupt preempted a lower priority one. Issue an EOI for each
1451
* set bit. The priority traversal order does not matter as there
1452
* can't be new ISR bits raised at this point. What matters is that
1453
* an EOI is issued for each ISR bit.
1454
*/
1455
for_each_set_bit(bit, isr->map, APIC_IR_BITS)
1456
apic_eoi();
1457
1458
/* Reread the ISRs, they should be empty now */
1459
for (i = 0; i < APIC_IR_REGS; i++)
1460
isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
1461
1462
return bitmap_empty(isr->map, APIC_IR_BITS);
1463
}
1464
1465
/*
1466
* If a CPU services an interrupt and crashes before issuing EOI to the
1467
* local APIC, the corresponding ISR bit is still set when the crashing CPU
1468
* jumps into a crash kernel. Read the ISR and issue an EOI for each set
1469
* bit to acknowledge it as otherwise these slots would be locked forever
1470
* waiting for an EOI.
1471
*
1472
* If there are pending bits in the IRR, then they won't be converted into
1473
* ISR bits as the CPU has interrupts disabled. They will be delivered once
1474
* the CPU enables interrupts and there is nothing which can prevent that.
1475
*
1476
* In the worst case this results in spurious interrupt warnings.
1477
*/
1478
static void apic_clear_isr(void)
1479
{
1480
union apic_ir ir;
1481
unsigned int i;
1482
1483
if (!apic_check_and_eoi_isr(&ir))
1484
pr_warn("APIC: Stale ISR: %256pb\n", ir.map);
1485
1486
for (i = 0; i < APIC_IR_REGS; i++)
1487
ir.regs[i] = apic_read(APIC_IRR + i * 0x10);
1488
1489
if (!bitmap_empty(ir.map, APIC_IR_BITS))
1490
pr_warn("APIC: Stale IRR: %256pb\n", ir.map);
1491
}
1492
1493
/**
1494
* setup_local_APIC - setup the local APIC
1495
*
1496
* Used to setup local APIC while initializing BSP or bringing up APs.
1497
* Always called with preemption disabled.
1498
*/
1499
static void setup_local_APIC(void)
1500
{
1501
int cpu = smp_processor_id();
1502
unsigned int value;
1503
1504
if (apic_is_disabled) {
1505
disable_ioapic_support();
1506
return;
1507
}
1508
1509
if (apic->setup)
1510
apic->setup();
1511
1512
/*
1513
* If this comes from kexec/kcrash the APIC might be enabled in
1514
* SPIV. Soft disable it before doing further initialization.
1515
*/
1516
value = apic_read(APIC_SPIV);
1517
value &= ~APIC_SPIV_APIC_ENABLED;
1518
apic_write(APIC_SPIV, value);
1519
1520
#ifdef CONFIG_X86_32
1521
/* Pound the ESR really hard over the head with a big hammer - mbligh */
1522
if (lapic_is_integrated() && apic->disable_esr) {
1523
apic_write(APIC_ESR, 0);
1524
apic_write(APIC_ESR, 0);
1525
apic_write(APIC_ESR, 0);
1526
apic_write(APIC_ESR, 0);
1527
}
1528
#endif
1529
/*
1530
* Intel recommends to set DFR, LDR and TPR before enabling
1531
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
1532
* document number 292116).
1533
*
1534
* Except for APICs which operate in physical destination mode.
1535
*/
1536
if (apic->init_apic_ldr)
1537
apic->init_apic_ldr();
1538
1539
/*
1540
* Set Task Priority to 'accept all except vectors 0-31'. An APIC
1541
* vector in the 16-31 range could be delivered if TPR == 0, but we
1542
* would think it's an exception and terrible things will happen. We
1543
* never change this later on.
1544
*/
1545
value = apic_read(APIC_TASKPRI);
1546
value &= ~APIC_TPRI_MASK;
1547
value |= 0x10;
1548
apic_write(APIC_TASKPRI, value);
1549
1550
apic_clear_isr();
1551
1552
/*
1553
* Now that we are all set up, enable the APIC
1554
*/
1555
value = apic_read(APIC_SPIV);
1556
value &= ~APIC_VECTOR_MASK;
1557
/*
1558
* Enable APIC
1559
*/
1560
value |= APIC_SPIV_APIC_ENABLED;
1561
1562
#ifdef CONFIG_X86_32
1563
/*
1564
* Some unknown Intel IO/APIC (or APIC) errata is biting us with
1565
* certain networking cards. If high frequency interrupts are
1566
* happening on a particular IOAPIC pin, plus the IOAPIC routing
1567
* entry is masked/unmasked at a high rate as well then sooner or
1568
* later IOAPIC line gets 'stuck', no more interrupts are received
1569
* from the device. If focus CPU is disabled then the hang goes
1570
* away, oh well :-(
1571
*
1572
* [ This bug can be reproduced easily with a level-triggered
1573
* PCI Ne2000 networking cards and PII/PIII processors, dual
1574
* BX chipset. ]
1575
*/
1576
/*
1577
* Actually disabling the focus CPU check just makes the hang less
1578
* frequent as it makes the interrupt distribution model be more
1579
* like LRU than MRU (the short-term load is more even across CPUs).
1580
*/
1581
1582
/*
1583
* - enable focus processor (bit==0)
1584
* - 64bit mode always use processor focus
1585
* so no need to set it
1586
*/
1587
value &= ~APIC_SPIV_FOCUS_DISABLED;
1588
#endif
1589
1590
/*
1591
* Set spurious IRQ vector
1592
*/
1593
value |= SPURIOUS_APIC_VECTOR;
1594
apic_write(APIC_SPIV, value);
1595
1596
perf_events_lapic_init();
1597
1598
/*
1599
* Set up LVT0, LVT1:
1600
*
1601
* set up through-local-APIC on the boot CPU's LINT0. This is not
1602
* strictly necessary in pure symmetric-IO mode, but sometimes
1603
* we delegate interrupts to the 8259A.
1604
*/
1605
/*
1606
* TODO: set up through-local-APIC from through-I/O-APIC? --macro
1607
*/
1608
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1609
if (!cpu && (pic_mode || !value || ioapic_is_disabled)) {
1610
value = APIC_DM_EXTINT;
1611
apic_pr_verbose("Enabled ExtINT on CPU#%d\n", cpu);
1612
} else {
1613
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1614
apic_pr_verbose("Masked ExtINT on CPU#%d\n", cpu);
1615
}
1616
apic_write(APIC_LVT0, value);
1617
1618
/*
1619
* Only the BSP sees the LINT1 NMI signal by default. This can be
1620
* modified by apic_extnmi= boot option.
1621
*/
1622
if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
1623
apic_extnmi == APIC_EXTNMI_ALL)
1624
value = APIC_DM_NMI;
1625
else
1626
value = APIC_DM_NMI | APIC_LVT_MASKED;
1627
1628
/* Is 82489DX ? */
1629
if (!lapic_is_integrated())
1630
value |= APIC_LVT_LEVEL_TRIGGER;
1631
apic_write(APIC_LVT1, value);
1632
1633
#ifdef CONFIG_X86_MCE_INTEL
1634
/* Recheck CMCI information after local APIC is up on CPU #0 */
1635
if (!cpu)
1636
cmci_recheck();
1637
#endif
1638
}
1639
1640
static void end_local_APIC_setup(void)
1641
{
1642
lapic_setup_esr();
1643
1644
#ifdef CONFIG_X86_32
1645
{
1646
unsigned int value;
1647
/* Disable the local apic timer */
1648
value = apic_read(APIC_LVTT);
1649
value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1650
apic_write(APIC_LVTT, value);
1651
}
1652
#endif
1653
1654
apic_pm_activate();
1655
}
1656
1657
/*
1658
* APIC setup function for application processors. Called from smpboot.c
1659
*/
1660
void apic_ap_setup(void)
1661
{
1662
setup_local_APIC();
1663
end_local_APIC_setup();
1664
}
1665
1666
static __init void apic_read_boot_cpu_id(bool x2apic)
1667
{
1668
/*
1669
* This can be invoked from check_x2apic() before the APIC has been
1670
* selected. But that code knows for sure that the BIOS enabled
1671
* X2APIC.
1672
*/
1673
if (x2apic) {
1674
boot_cpu_physical_apicid = native_apic_msr_read(APIC_ID);
1675
boot_cpu_apic_version = GET_APIC_VERSION(native_apic_msr_read(APIC_LVR));
1676
} else {
1677
boot_cpu_physical_apicid = read_apic_id();
1678
boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
1679
}
1680
topology_register_boot_apic(boot_cpu_physical_apicid);
1681
}
1682
1683
#ifdef CONFIG_X86_X2APIC
1684
int x2apic_mode;
1685
EXPORT_SYMBOL_GPL(x2apic_mode);
1686
1687
enum {
1688
X2APIC_OFF,
1689
X2APIC_DISABLED,
1690
/* All states below here have X2APIC enabled */
1691
X2APIC_ON,
1692
X2APIC_ON_LOCKED
1693
};
1694
static int x2apic_state;
1695
1696
static bool x2apic_hw_locked(void)
1697
{
1698
u64 x86_arch_cap_msr;
1699
u64 msr;
1700
1701
x86_arch_cap_msr = x86_read_arch_cap_msr();
1702
if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
1703
rdmsrq(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
1704
return (msr & LEGACY_XAPIC_DISABLED);
1705
}
1706
return false;
1707
}
1708
1709
static void __x2apic_disable(void)
1710
{
1711
u64 msr;
1712
1713
if (!boot_cpu_has(X86_FEATURE_APIC))
1714
return;
1715
1716
rdmsrq(MSR_IA32_APICBASE, msr);
1717
if (!(msr & X2APIC_ENABLE))
1718
return;
1719
/* Disable xapic and x2apic first and then reenable xapic mode */
1720
wrmsrq(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1721
wrmsrq(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1722
printk_once(KERN_INFO "x2apic disabled\n");
1723
}
1724
1725
static void __x2apic_enable(void)
1726
{
1727
u64 msr;
1728
1729
rdmsrq(MSR_IA32_APICBASE, msr);
1730
if (msr & X2APIC_ENABLE)
1731
return;
1732
wrmsrq(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1733
printk_once(KERN_INFO "x2apic enabled\n");
1734
}
1735
1736
static int __init setup_nox2apic(char *str)
1737
{
1738
if (x2apic_enabled()) {
1739
u32 apicid = native_apic_msr_read(APIC_ID);
1740
1741
if (apicid >= 255) {
1742
pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
1743
apicid);
1744
return 0;
1745
}
1746
if (x2apic_hw_locked()) {
1747
pr_warn("APIC locked in x2apic mode, can't disable\n");
1748
return 0;
1749
}
1750
pr_warn("x2apic already enabled.\n");
1751
__x2apic_disable();
1752
}
1753
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1754
x2apic_state = X2APIC_DISABLED;
1755
x2apic_mode = 0;
1756
return 0;
1757
}
1758
early_param("nox2apic", setup_nox2apic);
1759
1760
/* Called from cpu_init() to enable x2apic on (secondary) cpus */
1761
void x2apic_setup(void)
1762
{
1763
/*
1764
* Try to make the AP's APIC state match that of the BSP, but if the
1765
* BSP is unlocked and the AP is locked then there is a state mismatch.
1766
* Warn about the mismatch in case a GP fault occurs due to a locked AP
1767
* trying to be turned off.
1768
*/
1769
if (x2apic_state != X2APIC_ON_LOCKED && x2apic_hw_locked())
1770
pr_warn("x2apic lock mismatch between BSP and AP.\n");
1771
/*
1772
* If x2apic is not in ON or LOCKED state, disable it if already enabled
1773
* from BIOS.
1774
*/
1775
if (x2apic_state < X2APIC_ON) {
1776
__x2apic_disable();
1777
return;
1778
}
1779
__x2apic_enable();
1780
}
1781
1782
static __init void apic_set_fixmap(bool read_apic);
1783
1784
static __init void x2apic_disable(void)
1785
{
1786
u32 x2apic_id;
1787
1788
if (x2apic_state < X2APIC_ON)
1789
return;
1790
1791
x2apic_id = read_apic_id();
1792
if (x2apic_id >= 255)
1793
panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1794
1795
if (x2apic_hw_locked()) {
1796
pr_warn("Cannot disable locked x2apic, id: %08x\n", x2apic_id);
1797
return;
1798
}
1799
1800
__x2apic_disable();
1801
1802
x2apic_mode = 0;
1803
x2apic_state = X2APIC_DISABLED;
1804
1805
/*
1806
* Don't reread the APIC ID as it was already done from
1807
* check_x2apic() and the APIC driver still is a x2APIC variant,
1808
* which fails to do the read after x2APIC was disabled.
1809
*/
1810
apic_set_fixmap(false);
1811
}
1812
1813
static __init void x2apic_enable(void)
1814
{
1815
if (x2apic_state != X2APIC_OFF)
1816
return;
1817
1818
x2apic_mode = 1;
1819
x2apic_state = X2APIC_ON;
1820
__x2apic_enable();
1821
}
1822
1823
static __init void try_to_enable_x2apic(int remap_mode)
1824
{
1825
if (x2apic_state == X2APIC_DISABLED)
1826
return;
1827
1828
if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1829
u32 apic_limit = 255;
1830
1831
/*
1832
* Using X2APIC without IR is not architecturally supported
1833
* on bare metal but may be supported in guests.
1834
*/
1835
if (!x86_init.hyper.x2apic_available()) {
1836
pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1837
x2apic_disable();
1838
return;
1839
}
1840
1841
/*
1842
* If the hypervisor supports extended destination ID in
1843
* MSI, that increases the maximum APIC ID that can be
1844
* used for non-remapped IRQ domains.
1845
*/
1846
if (x86_init.hyper.msi_ext_dest_id()) {
1847
virt_ext_dest_id = 1;
1848
apic_limit = 32767;
1849
}
1850
1851
/*
1852
* Without IR, all CPUs can be addressed by IOAPIC/MSI only
1853
* in physical mode, and CPUs with an APIC ID that cannot
1854
* be addressed must not be brought online.
1855
*/
1856
x2apic_set_max_apicid(apic_limit);
1857
x2apic_phys = 1;
1858
}
1859
x2apic_enable();
1860
}
1861
1862
void __init check_x2apic(void)
1863
{
1864
if (x2apic_enabled()) {
1865
pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
1866
x2apic_mode = 1;
1867
if (x2apic_hw_locked())
1868
x2apic_state = X2APIC_ON_LOCKED;
1869
else
1870
x2apic_state = X2APIC_ON;
1871
apic_read_boot_cpu_id(true);
1872
} else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
1873
x2apic_state = X2APIC_DISABLED;
1874
}
1875
}
1876
#else /* CONFIG_X86_X2APIC */
1877
void __init check_x2apic(void)
1878
{
1879
if (!apic_is_x2apic_enabled())
1880
return;
1881
/*
1882
* Checkme: Can we simply turn off x2APIC here instead of disabling the APIC?
1883
*/
1884
pr_err("Kernel does not support x2APIC, please recompile with CONFIG_X86_X2APIC.\n");
1885
pr_err("Disabling APIC, expect reduced performance and functionality.\n");
1886
1887
apic_is_disabled = true;
1888
setup_clear_cpu_cap(X86_FEATURE_APIC);
1889
}
1890
1891
static inline void try_to_enable_x2apic(int remap_mode) { }
1892
static inline void __x2apic_enable(void) { }
1893
#endif /* !CONFIG_X86_X2APIC */
1894
1895
void __init enable_IR_x2apic(void)
1896
{
1897
unsigned long flags;
1898
int ret, ir_stat;
1899
1900
if (ioapic_is_disabled) {
1901
pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1902
return;
1903
}
1904
1905
ir_stat = irq_remapping_prepare();
1906
if (ir_stat < 0 && !x2apic_supported())
1907
return;
1908
1909
ret = save_ioapic_entries();
1910
if (ret) {
1911
pr_info("Saving IO-APIC state failed: %d\n", ret);
1912
return;
1913
}
1914
1915
local_irq_save(flags);
1916
legacy_pic->mask_all();
1917
mask_ioapic_entries();
1918
1919
/* If irq_remapping_prepare() succeeded, try to enable it */
1920
if (ir_stat >= 0)
1921
ir_stat = irq_remapping_enable();
1922
/* ir_stat contains the remap mode or an error code */
1923
try_to_enable_x2apic(ir_stat);
1924
1925
if (ir_stat < 0)
1926
restore_ioapic_entries();
1927
legacy_pic->restore_mask();
1928
local_irq_restore(flags);
1929
}
1930
1931
#ifdef CONFIG_X86_64
1932
/*
1933
* Detect and enable local APICs on non-SMP boards.
1934
* Original code written by Keir Fraser.
1935
* On AMD64 we trust the BIOS - if it says no APIC it is likely
1936
* not correctly set up (usually the APIC timer won't work etc.)
1937
*/
1938
static bool __init detect_init_APIC(void)
1939
{
1940
if (!boot_cpu_has(X86_FEATURE_APIC)) {
1941
pr_info("No local APIC present\n");
1942
return false;
1943
}
1944
1945
register_lapic_address(APIC_DEFAULT_PHYS_BASE);
1946
return true;
1947
}
1948
#else
1949
1950
static bool __init apic_verify(unsigned long addr)
1951
{
1952
u32 features, h, l;
1953
1954
/*
1955
* The APIC feature bit should now be enabled
1956
* in `cpuid'
1957
*/
1958
features = cpuid_edx(1);
1959
if (!(features & (1 << X86_FEATURE_APIC))) {
1960
pr_warn("Could not enable APIC!\n");
1961
return false;
1962
}
1963
set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1964
1965
/* The BIOS may have set up the APIC at some other address */
1966
if (boot_cpu_data.x86 >= 6) {
1967
rdmsr(MSR_IA32_APICBASE, l, h);
1968
if (l & MSR_IA32_APICBASE_ENABLE)
1969
addr = l & MSR_IA32_APICBASE_BASE;
1970
}
1971
1972
register_lapic_address(addr);
1973
pr_info("Found and enabled local APIC!\n");
1974
return true;
1975
}
1976
1977
bool __init apic_force_enable(unsigned long addr)
1978
{
1979
u32 h, l;
1980
1981
if (apic_is_disabled)
1982
return false;
1983
1984
/*
1985
* Some BIOSes disable the local APIC in the APIC_BASE
1986
* MSR. This can only be done in software for Intel P6 or later
1987
* and AMD K7 (Model > 1) or later.
1988
*/
1989
if (boot_cpu_data.x86 >= 6) {
1990
rdmsr(MSR_IA32_APICBASE, l, h);
1991
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1992
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1993
l &= ~MSR_IA32_APICBASE_BASE;
1994
l |= MSR_IA32_APICBASE_ENABLE | addr;
1995
wrmsr(MSR_IA32_APICBASE, l, h);
1996
enabled_via_apicbase = 1;
1997
}
1998
}
1999
return apic_verify(addr);
2000
}
2001
2002
/*
2003
* Detect and initialize APIC
2004
*/
2005
static bool __init detect_init_APIC(void)
2006
{
2007
/* Disabled by kernel option? */
2008
if (apic_is_disabled)
2009
return false;
2010
2011
switch (boot_cpu_data.x86_vendor) {
2012
case X86_VENDOR_AMD:
2013
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
2014
(boot_cpu_data.x86 >= 15))
2015
break;
2016
goto no_apic;
2017
case X86_VENDOR_HYGON:
2018
break;
2019
case X86_VENDOR_INTEL:
2020
if ((boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)) ||
2021
boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO)
2022
break;
2023
goto no_apic;
2024
default:
2025
goto no_apic;
2026
}
2027
2028
if (!boot_cpu_has(X86_FEATURE_APIC)) {
2029
/*
2030
* Over-ride BIOS and try to enable the local APIC only if
2031
* "lapic" specified.
2032
*/
2033
if (!force_enable_local_apic) {
2034
pr_info("Local APIC disabled by BIOS -- "
2035
"you can enable it with \"lapic\"\n");
2036
return false;
2037
}
2038
if (!apic_force_enable(APIC_DEFAULT_PHYS_BASE))
2039
return false;
2040
} else {
2041
if (!apic_verify(APIC_DEFAULT_PHYS_BASE))
2042
return false;
2043
}
2044
2045
apic_pm_activate();
2046
2047
return true;
2048
2049
no_apic:
2050
pr_info("No local APIC present or hardware disabled\n");
2051
return false;
2052
}
2053
#endif
2054
2055
/**
2056
* init_apic_mappings - initialize APIC mappings
2057
*/
2058
void __init init_apic_mappings(void)
2059
{
2060
if (apic_validate_deadline_timer())
2061
pr_info("TSC deadline timer available\n");
2062
2063
if (x2apic_mode)
2064
return;
2065
2066
if (!smp_found_config) {
2067
if (!detect_init_APIC()) {
2068
pr_info("APIC: disable apic facility\n");
2069
apic_disable();
2070
}
2071
}
2072
}
2073
2074
static __init void apic_set_fixmap(bool read_apic)
2075
{
2076
set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
2077
apic_mmio_base = APIC_BASE;
2078
apic_pr_verbose("Mapped APIC to %16lx (%16lx)\n", apic_mmio_base, mp_lapic_addr);
2079
if (read_apic)
2080
apic_read_boot_cpu_id(false);
2081
}
2082
2083
void __init register_lapic_address(unsigned long address)
2084
{
2085
/* This should only happen once */
2086
WARN_ON_ONCE(mp_lapic_addr);
2087
mp_lapic_addr = address;
2088
2089
if (!x2apic_mode)
2090
apic_set_fixmap(true);
2091
}
2092
2093
/*
2094
* Local APIC interrupts
2095
*/
2096
2097
/*
2098
* Common handling code for spurious_interrupt and spurious_vector entry
2099
* points below. No point in allowing the compiler to inline it twice.
2100
*/
2101
static noinline void handle_spurious_interrupt(u8 vector)
2102
{
2103
u32 v;
2104
2105
trace_spurious_apic_entry(vector);
2106
2107
inc_irq_stat(irq_spurious_count);
2108
2109
/*
2110
* If this is a spurious interrupt then do not acknowledge
2111
*/
2112
if (vector == SPURIOUS_APIC_VECTOR) {
2113
/* See SDM vol 3 */
2114
pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
2115
smp_processor_id());
2116
goto out;
2117
}
2118
2119
/*
2120
* If it is a vectored one, verify it's set in the ISR. If set,
2121
* acknowledge it.
2122
*/
2123
v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
2124
if (v & (1 << (vector & 0x1f))) {
2125
pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
2126
vector, smp_processor_id());
2127
apic_eoi();
2128
} else {
2129
pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
2130
vector, smp_processor_id());
2131
}
2132
out:
2133
trace_spurious_apic_exit(vector);
2134
}
2135
2136
/**
2137
* spurious_interrupt - Catch all for interrupts raised on unused vectors
2138
* @regs: Pointer to pt_regs on stack
2139
* @vector: The vector number
2140
*
2141
* This is invoked from ASM entry code to catch all interrupts which
2142
* trigger on an entry which is routed to the common_spurious idtentry
2143
* point.
2144
*/
2145
DEFINE_IDTENTRY_IRQ(spurious_interrupt)
2146
{
2147
handle_spurious_interrupt(vector);
2148
}
2149
2150
DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
2151
{
2152
handle_spurious_interrupt(SPURIOUS_APIC_VECTOR);
2153
}
2154
2155
/*
2156
* This interrupt should never happen with our APIC/SMP architecture
2157
*/
2158
DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
2159
{
2160
static const char * const error_interrupt_reason[] = {
2161
"Send CS error", /* APIC Error Bit 0 */
2162
"Receive CS error", /* APIC Error Bit 1 */
2163
"Send accept error", /* APIC Error Bit 2 */
2164
"Receive accept error", /* APIC Error Bit 3 */
2165
"Redirectable IPI", /* APIC Error Bit 4 */
2166
"Send illegal vector", /* APIC Error Bit 5 */
2167
"Received illegal vector", /* APIC Error Bit 6 */
2168
"Illegal register address", /* APIC Error Bit 7 */
2169
};
2170
u32 v, i = 0;
2171
2172
trace_error_apic_entry(ERROR_APIC_VECTOR);
2173
2174
/* First tickle the hardware, only then report what went on. -- REW */
2175
if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */
2176
apic_write(APIC_ESR, 0);
2177
v = apic_read(APIC_ESR);
2178
apic_eoi();
2179
atomic_inc(&irq_err_count);
2180
2181
apic_pr_debug("APIC error on CPU%d: %02x", smp_processor_id(), v);
2182
2183
v &= 0xff;
2184
while (v) {
2185
if (v & 0x1)
2186
apic_pr_debug_cont(" : %s", error_interrupt_reason[i]);
2187
i++;
2188
v >>= 1;
2189
}
2190
2191
apic_pr_debug_cont("\n");
2192
2193
trace_error_apic_exit(ERROR_APIC_VECTOR);
2194
}
2195
2196
/**
2197
* connect_bsp_APIC - attach the APIC to the interrupt system
2198
*/
2199
static void __init connect_bsp_APIC(void)
2200
{
2201
#ifdef CONFIG_X86_32
2202
if (pic_mode) {
2203
/*
2204
* Do not trust the local APIC being empty at bootup.
2205
*/
2206
clear_local_APIC();
2207
/*
2208
* PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
2209
* local APIC to INT and NMI lines.
2210
*/
2211
apic_pr_verbose("Leaving PIC mode, enabling APIC mode.\n");
2212
imcr_pic_to_apic();
2213
}
2214
#endif
2215
}
2216
2217
/**
2218
* disconnect_bsp_APIC - detach the APIC from the interrupt system
2219
* @virt_wire_setup: indicates, whether virtual wire mode is selected
2220
*
2221
* Virtual wire mode is necessary to deliver legacy interrupts even when the
2222
* APIC is disabled.
2223
*/
2224
void disconnect_bsp_APIC(int virt_wire_setup)
2225
{
2226
unsigned int value;
2227
2228
#ifdef CONFIG_X86_32
2229
if (pic_mode) {
2230
/*
2231
* Put the board back into PIC mode (has an effect only on
2232
* certain older boards). Note that APIC interrupts, including
2233
* IPIs, won't work beyond this point! The only exception are
2234
* INIT IPIs.
2235
*/
2236
apic_pr_verbose("Disabling APIC mode, entering PIC mode.\n");
2237
imcr_apic_to_pic();
2238
return;
2239
}
2240
#endif
2241
2242
/* Go back to Virtual Wire compatibility mode */
2243
2244
/* For the spurious interrupt use vector F, and enable it */
2245
value = apic_read(APIC_SPIV);
2246
value &= ~APIC_VECTOR_MASK;
2247
value |= APIC_SPIV_APIC_ENABLED;
2248
value |= 0xf;
2249
apic_write(APIC_SPIV, value);
2250
2251
if (!virt_wire_setup) {
2252
/*
2253
* For LVT0 make it edge triggered, active high,
2254
* external and enabled
2255
*/
2256
value = apic_read(APIC_LVT0);
2257
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2258
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2259
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2260
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2261
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2262
apic_write(APIC_LVT0, value);
2263
} else {
2264
/* Disable LVT0 */
2265
apic_write(APIC_LVT0, APIC_LVT_MASKED);
2266
}
2267
2268
/*
2269
* For LVT1 make it edge triggered, active high,
2270
* nmi and enabled
2271
*/
2272
value = apic_read(APIC_LVT1);
2273
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2274
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2275
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2276
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2277
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2278
apic_write(APIC_LVT1, value);
2279
}
2280
2281
void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg,
2282
bool dmar)
2283
{
2284
memset(msg, 0, sizeof(*msg));
2285
2286
msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
2287
msg->arch_addr_lo.dest_mode_logical = apic->dest_mode_logical;
2288
msg->arch_addr_lo.destid_0_7 = cfg->dest_apicid & 0xFF;
2289
2290
msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_FIXED;
2291
msg->arch_data.vector = cfg->vector;
2292
2293
msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
2294
/*
2295
* Only the IOMMU itself can use the trick of putting destination
2296
* APIC ID into the high bits of the address. Anything else would
2297
* just be writing to memory if it tried that, and needs IR to
2298
* address APICs which can't be addressed in the normal 32-bit
2299
* address range at 0xFFExxxxx. That is typically just 8 bits, but
2300
* some hypervisors allow the extended destination ID field in bits
2301
* 5-11 to be used, giving support for 15 bits of APIC IDs in total.
2302
*/
2303
if (dmar)
2304
msg->arch_addr_hi.destid_8_31 = cfg->dest_apicid >> 8;
2305
else if (virt_ext_dest_id && cfg->dest_apicid < 0x8000)
2306
msg->arch_addr_lo.virt_destid_8_14 = cfg->dest_apicid >> 8;
2307
else
2308
WARN_ON_ONCE(cfg->dest_apicid > 0xFF);
2309
}
2310
2311
u32 x86_msi_msg_get_destid(struct msi_msg *msg, bool extid)
2312
{
2313
u32 dest = msg->arch_addr_lo.destid_0_7;
2314
2315
if (extid)
2316
dest |= msg->arch_addr_hi.destid_8_31 << 8;
2317
return dest;
2318
}
2319
EXPORT_SYMBOL_GPL(x86_msi_msg_get_destid);
2320
2321
static void __init apic_bsp_up_setup(void)
2322
{
2323
reset_phys_cpu_present_map(boot_cpu_physical_apicid);
2324
}
2325
2326
/**
2327
* apic_bsp_setup - Setup function for local apic and io-apic
2328
* @upmode: Force UP mode (for APIC_init_uniprocessor)
2329
*/
2330
static void __init apic_bsp_setup(bool upmode)
2331
{
2332
connect_bsp_APIC();
2333
if (upmode)
2334
apic_bsp_up_setup();
2335
setup_local_APIC();
2336
2337
enable_IO_APIC();
2338
end_local_APIC_setup();
2339
irq_remap_enable_fault_handling();
2340
setup_IO_APIC();
2341
lapic_update_legacy_vectors();
2342
}
2343
2344
#ifdef CONFIG_UP_LATE_INIT
2345
void __init up_late_init(void)
2346
{
2347
if (apic_intr_mode == APIC_PIC)
2348
return;
2349
2350
/* Setup local timer */
2351
x86_init.timers.setup_percpu_clockev();
2352
}
2353
#endif
2354
2355
/*
2356
* Power management
2357
*/
2358
#ifdef CONFIG_PM
2359
2360
static struct {
2361
/*
2362
* 'active' is true if the local APIC was enabled by us and
2363
* not the BIOS; this signifies that we are also responsible
2364
* for disabling it before entering apm/acpi suspend
2365
*/
2366
int active;
2367
/* r/w apic fields */
2368
u32 apic_id;
2369
unsigned int apic_taskpri;
2370
unsigned int apic_ldr;
2371
unsigned int apic_dfr;
2372
unsigned int apic_spiv;
2373
unsigned int apic_lvtt;
2374
unsigned int apic_lvtpc;
2375
unsigned int apic_lvt0;
2376
unsigned int apic_lvt1;
2377
unsigned int apic_lvterr;
2378
unsigned int apic_tmict;
2379
unsigned int apic_tdcr;
2380
unsigned int apic_thmr;
2381
unsigned int apic_cmci;
2382
} apic_pm_state;
2383
2384
static int lapic_suspend(void)
2385
{
2386
unsigned long flags;
2387
int maxlvt;
2388
2389
if (!apic_pm_state.active)
2390
return 0;
2391
2392
maxlvt = lapic_get_maxlvt();
2393
2394
apic_pm_state.apic_id = apic_read(APIC_ID);
2395
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2396
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2397
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2398
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2399
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2400
if (maxlvt >= 4)
2401
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2402
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2403
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2404
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2405
apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2406
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2407
#ifdef CONFIG_X86_THERMAL_VECTOR
2408
if (maxlvt >= 5)
2409
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2410
#endif
2411
#ifdef CONFIG_X86_MCE_INTEL
2412
if (maxlvt >= 6)
2413
apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
2414
#endif
2415
2416
local_irq_save(flags);
2417
2418
/*
2419
* Mask IOAPIC before disabling the local APIC to prevent stale IRR
2420
* entries on some implementations.
2421
*/
2422
mask_ioapic_entries();
2423
2424
disable_local_APIC();
2425
2426
irq_remapping_disable();
2427
2428
local_irq_restore(flags);
2429
return 0;
2430
}
2431
2432
static void lapic_resume(void)
2433
{
2434
unsigned int l, h;
2435
unsigned long flags;
2436
int maxlvt;
2437
2438
if (!apic_pm_state.active)
2439
return;
2440
2441
local_irq_save(flags);
2442
2443
/*
2444
* IO-APIC and PIC have their own resume routines.
2445
* We just mask them here to make sure the interrupt
2446
* subsystem is completely quiet while we enable x2apic
2447
* and interrupt-remapping.
2448
*/
2449
mask_ioapic_entries();
2450
legacy_pic->mask_all();
2451
2452
if (x2apic_mode) {
2453
__x2apic_enable();
2454
} else {
2455
/*
2456
* Make sure the APICBASE points to the right address
2457
*
2458
* FIXME! This will be wrong if we ever support suspend on
2459
* SMP! We'll need to do this as part of the CPU restore!
2460
*/
2461
if (boot_cpu_data.x86 >= 6) {
2462
rdmsr(MSR_IA32_APICBASE, l, h);
2463
l &= ~MSR_IA32_APICBASE_BASE;
2464
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2465
wrmsr(MSR_IA32_APICBASE, l, h);
2466
}
2467
}
2468
2469
maxlvt = lapic_get_maxlvt();
2470
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2471
apic_write(APIC_ID, apic_pm_state.apic_id);
2472
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2473
apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2474
apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2475
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2476
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2477
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2478
#ifdef CONFIG_X86_THERMAL_VECTOR
2479
if (maxlvt >= 5)
2480
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2481
#endif
2482
#ifdef CONFIG_X86_MCE_INTEL
2483
if (maxlvt >= 6)
2484
apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
2485
#endif
2486
if (maxlvt >= 4)
2487
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2488
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2489
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2490
apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2491
apic_write(APIC_ESR, 0);
2492
apic_read(APIC_ESR);
2493
apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2494
apic_write(APIC_ESR, 0);
2495
apic_read(APIC_ESR);
2496
2497
irq_remapping_reenable(x2apic_mode);
2498
2499
local_irq_restore(flags);
2500
}
2501
2502
/*
2503
* This device has no shutdown method - fully functioning local APICs
2504
* are needed on every CPU up until machine_halt/restart/poweroff.
2505
*/
2506
2507
static struct syscore_ops lapic_syscore_ops = {
2508
.resume = lapic_resume,
2509
.suspend = lapic_suspend,
2510
};
2511
2512
static void apic_pm_activate(void)
2513
{
2514
apic_pm_state.active = 1;
2515
}
2516
2517
static int __init init_lapic_sysfs(void)
2518
{
2519
/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
2520
if (boot_cpu_has(X86_FEATURE_APIC))
2521
register_syscore_ops(&lapic_syscore_ops);
2522
2523
return 0;
2524
}
2525
2526
/* local apic needs to resume before other devices access its registers. */
2527
core_initcall(init_lapic_sysfs);
2528
2529
#else /* CONFIG_PM */
2530
2531
static void apic_pm_activate(void) { }
2532
2533
#endif /* CONFIG_PM */
2534
2535
#ifdef CONFIG_X86_64
2536
2537
static int multi_checked;
2538
static int multi;
2539
2540
static int set_multi(const struct dmi_system_id *d)
2541
{
2542
if (multi)
2543
return 0;
2544
pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2545
multi = 1;
2546
return 0;
2547
}
2548
2549
static const struct dmi_system_id multi_dmi_table[] = {
2550
{
2551
.callback = set_multi,
2552
.ident = "IBM System Summit2",
2553
.matches = {
2554
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2555
DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2556
},
2557
},
2558
{}
2559
};
2560
2561
static void dmi_check_multi(void)
2562
{
2563
if (multi_checked)
2564
return;
2565
2566
dmi_check_system(multi_dmi_table);
2567
multi_checked = 1;
2568
}
2569
2570
/*
2571
* apic_is_clustered_box() -- Check if we can expect good TSC
2572
*
2573
* Thus far, the major user of this is IBM's Summit2 series:
2574
* Clustered boxes may have unsynced TSC problems if they are
2575
* multi-chassis.
2576
* Use DMI to check them
2577
*/
2578
int apic_is_clustered_box(void)
2579
{
2580
dmi_check_multi();
2581
return multi;
2582
}
2583
#endif
2584
2585
/*
2586
* APIC command line parameters
2587
*/
2588
static int __init setup_nolapic(char *arg)
2589
{
2590
apic_is_disabled = true;
2591
setup_clear_cpu_cap(X86_FEATURE_APIC);
2592
return 0;
2593
}
2594
early_param("nolapic", setup_nolapic);
2595
2596
static int __init parse_lapic_timer_c2_ok(char *arg)
2597
{
2598
local_apic_timer_c2_ok = 1;
2599
return 0;
2600
}
2601
early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2602
2603
static int __init parse_disable_apic_timer(char *arg)
2604
{
2605
disable_apic_timer = 1;
2606
return 0;
2607
}
2608
early_param("noapictimer", parse_disable_apic_timer);
2609
2610
static int __init parse_nolapic_timer(char *arg)
2611
{
2612
disable_apic_timer = 1;
2613
return 0;
2614
}
2615
early_param("nolapic_timer", parse_nolapic_timer);
2616
2617
static int __init apic_set_verbosity(char *arg)
2618
{
2619
if (!arg) {
2620
if (IS_ENABLED(CONFIG_X86_32))
2621
return -EINVAL;
2622
2623
ioapic_is_disabled = false;
2624
return 0;
2625
}
2626
2627
if (strcmp("debug", arg) == 0)
2628
apic_verbosity = APIC_DEBUG;
2629
else if (strcmp("verbose", arg) == 0)
2630
apic_verbosity = APIC_VERBOSE;
2631
#ifdef CONFIG_X86_64
2632
else {
2633
pr_warn("APIC Verbosity level %s not recognised"
2634
" use apic=verbose or apic=debug\n", arg);
2635
return -EINVAL;
2636
}
2637
#endif
2638
2639
return 0;
2640
}
2641
early_param("apic", apic_set_verbosity);
2642
2643
static int __init lapic_insert_resource(void)
2644
{
2645
if (!apic_mmio_base)
2646
return -1;
2647
2648
/* Put local APIC into the resource map. */
2649
lapic_resource.start = apic_mmio_base;
2650
lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2651
insert_resource(&iomem_resource, &lapic_resource);
2652
2653
return 0;
2654
}
2655
2656
/*
2657
* need call insert after e820__reserve_resources()
2658
* that is using request_resource
2659
*/
2660
late_initcall(lapic_insert_resource);
2661
2662
static int __init apic_set_extnmi(char *arg)
2663
{
2664
if (!arg)
2665
return -EINVAL;
2666
2667
if (!strncmp("all", arg, 3))
2668
apic_extnmi = APIC_EXTNMI_ALL;
2669
else if (!strncmp("none", arg, 4))
2670
apic_extnmi = APIC_EXTNMI_NONE;
2671
else if (!strncmp("bsp", arg, 3))
2672
apic_extnmi = APIC_EXTNMI_BSP;
2673
else {
2674
pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
2675
return -EINVAL;
2676
}
2677
2678
return 0;
2679
}
2680
early_param("apic_extnmi", apic_set_extnmi);
2681
2682