1 #ifndef _ASM_X86_APIC_H
2 #define _ASM_X86_APIC_H
4 #include <linux/cpumask.h>
7 #include <asm/alternative.h>
8 #include <asm/cpufeature.h>
9 #include <asm/apicdef.h>
10 #include <linux/atomic.h>
11 #include <asm/fixmap.h>
12 #include <asm/mpspec.h>
16 #define ARCH_APICTIMER_STOPS_ON_C3 1
22 #define APIC_VERBOSE 1
25 /* Macros for apic_extnmi which controls external NMI masking */
26 #define APIC_EXTNMI_BSP 0 /* Default */
27 #define APIC_EXTNMI_ALL 1
28 #define APIC_EXTNMI_NONE 2
31 * Define the default level of output to be very little
32 * This can be turned up by using apic=verbose for more
33 * information and apic=debug for _lots_ of information.
34 * apic_verbosity is defined in apic.c
36 #define apic_printk(v, s, a...) do { \
37 if ((v) <= apic_verbosity) \
42 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
43 extern void generic_apic_probe(void);
45 static inline void generic_apic_probe(void)
50 #ifdef CONFIG_X86_LOCAL_APIC
52 extern unsigned int apic_verbosity
;
53 extern int local_apic_timer_c2_ok
;
55 extern int disable_apic
;
56 extern unsigned int lapic_timer_frequency
;
59 extern void __inquire_remote_apic(int apicid
);
60 #else /* CONFIG_SMP */
61 static inline void __inquire_remote_apic(int apicid
)
64 #endif /* CONFIG_SMP */
66 static inline void default_inquire_remote_apic(int apicid
)
68 if (apic_verbosity
>= APIC_DEBUG
)
69 __inquire_remote_apic(apicid
);
73 * With 82489DX we can't rely on apic feature bit
74 * retrieved via cpuid but still have to deal with
75 * such an apic chip so we assume that SMP configuration
76 * is found from MP table (64bit case uses ACPI mostly
77 * which set smp presence flag as well so we are safe
78 * to use this helper too).
80 static inline bool apic_from_smp_config(void)
82 return smp_found_config
&& !disable_apic
;
86 * Basic functions accessing APICs.
88 #ifdef CONFIG_PARAVIRT
89 #include <asm/paravirt.h>
92 extern int setup_profiling_timer(unsigned int);
94 static inline void native_apic_mem_write(u32 reg
, u32 v
)
96 volatile u32
*addr
= (volatile u32
*)(APIC_BASE
+ reg
);
98 alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP
,
99 ASM_OUTPUT2("=r" (v
), "=m" (*addr
)),
100 ASM_OUTPUT2("0" (v
), "m" (*addr
)));
103 static inline u32
native_apic_mem_read(u32 reg
)
105 return *((volatile u32
*)(APIC_BASE
+ reg
));
108 extern void native_apic_wait_icr_idle(void);
109 extern u32
native_safe_apic_wait_icr_idle(void);
110 extern void native_apic_icr_write(u32 low
, u32 id
);
111 extern u64
native_apic_icr_read(void);
113 static inline bool apic_is_x2apic_enabled(void)
117 if (rdmsrl_safe(MSR_IA32_APICBASE
, &msr
))
119 return msr
& X2APIC_ENABLE
;
122 extern void enable_IR_x2apic(void);
124 extern int get_physical_broadcast(void);
126 extern int lapic_get_maxlvt(void);
127 extern void clear_local_APIC(void);
128 extern void disconnect_bsp_APIC(int virt_wire_setup
);
129 extern void disable_local_APIC(void);
130 extern void lapic_shutdown(void);
131 extern void sync_Arb_IDs(void);
132 extern void init_bsp_APIC(void);
133 extern void setup_local_APIC(void);
134 extern void init_apic_mappings(void);
135 void register_lapic_address(unsigned long address
);
136 extern void setup_boot_APIC_clock(void);
137 extern void setup_secondary_APIC_clock(void);
138 extern int APIC_init_uniprocessor(void);
141 static inline int apic_force_enable(unsigned long addr
)
146 extern int apic_force_enable(unsigned long addr
);
149 extern int apic_bsp_setup(bool upmode
);
150 extern void apic_ap_setup(void);
153 * On 32bit this is mach-xxx local
156 extern int apic_is_clustered_box(void);
158 static inline int apic_is_clustered_box(void)
164 extern int setup_APIC_eilvt(u8 lvt_off
, u8 vector
, u8 msg_type
, u8 mask
);
166 #else /* !CONFIG_X86_LOCAL_APIC */
167 static inline void lapic_shutdown(void) { }
168 #define local_apic_timer_c2_ok 1
169 static inline void init_apic_mappings(void) { }
170 static inline void disable_local_APIC(void) { }
171 # define setup_boot_APIC_clock x86_init_noop
172 # define setup_secondary_APIC_clock x86_init_noop
173 #endif /* !CONFIG_X86_LOCAL_APIC */
175 #ifdef CONFIG_X86_X2APIC
177 * Make previous memory operations globally visible before
178 * sending the IPI through x2apic wrmsr. We need a serializing instruction or
181 static inline void x2apic_wrmsr_fence(void)
183 asm volatile("mfence" : : : "memory");
186 static inline void native_apic_msr_write(u32 reg
, u32 v
)
188 if (reg
== APIC_DFR
|| reg
== APIC_ID
|| reg
== APIC_LDR
||
192 wrmsr(APIC_BASE_MSR
+ (reg
>> 4), v
, 0);
195 static inline void native_apic_msr_eoi_write(u32 reg
, u32 v
)
197 wrmsr(APIC_BASE_MSR
+ (APIC_EOI
>> 4), APIC_EOI_ACK
, 0);
200 static inline u32
native_apic_msr_read(u32 reg
)
207 rdmsrl(APIC_BASE_MSR
+ (reg
>> 4), msr
);
211 static inline void native_x2apic_wait_icr_idle(void)
213 /* no need to wait for icr idle in x2apic */
217 static inline u32
native_safe_x2apic_wait_icr_idle(void)
219 /* no need to wait for icr idle in x2apic */
223 static inline void native_x2apic_icr_write(u32 low
, u32 id
)
225 wrmsrl(APIC_BASE_MSR
+ (APIC_ICR
>> 4), ((__u64
) id
) << 32 | low
);
228 static inline u64
native_x2apic_icr_read(void)
232 rdmsrl(APIC_BASE_MSR
+ (APIC_ICR
>> 4), val
);
236 extern int x2apic_mode
;
237 extern int x2apic_phys
;
238 extern void __init
check_x2apic(void);
239 extern void x2apic_setup(void);
240 static inline int x2apic_enabled(void)
242 return boot_cpu_has(X86_FEATURE_X2APIC
) && apic_is_x2apic_enabled();
245 #define x2apic_supported() (boot_cpu_has(X86_FEATURE_X2APIC))
246 #else /* !CONFIG_X86_X2APIC */
247 static inline void check_x2apic(void) { }
248 static inline void x2apic_setup(void) { }
249 static inline int x2apic_enabled(void) { return 0; }
251 #define x2apic_mode (0)
252 #define x2apic_supported() (0)
253 #endif /* !CONFIG_X86_X2APIC */
256 #define SET_APIC_ID(x) (apic->set_apic_id(x))
262 * Copyright 2004 James Cleverdon, IBM.
263 * Subject to the GNU Public License, v.2
265 * Generic APIC sub-arch data struct.
267 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
268 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
275 int (*acpi_madt_oem_check
)(char *oem_id
, char *oem_table_id
);
276 int (*apic_id_valid
)(int apicid
);
277 int (*apic_id_registered
)(void);
279 u32 irq_delivery_mode
;
282 const struct cpumask
*(*target_cpus
)(void);
287 unsigned long (*check_apicid_used
)(physid_mask_t
*map
, int apicid
);
289 void (*vector_allocation_domain
)(int cpu
, struct cpumask
*retmask
,
290 const struct cpumask
*mask
);
291 void (*init_apic_ldr
)(void);
293 void (*ioapic_phys_id_map
)(physid_mask_t
*phys_map
, physid_mask_t
*retmap
);
295 void (*setup_apic_routing
)(void);
296 int (*cpu_present_to_apicid
)(int mps_cpu
);
297 void (*apicid_to_cpu_present
)(int phys_apicid
, physid_mask_t
*retmap
);
298 int (*check_phys_apicid_present
)(int phys_apicid
);
299 int (*phys_pkg_id
)(int cpuid_apic
, int index_msb
);
301 unsigned int (*get_apic_id
)(unsigned long x
);
302 unsigned long (*set_apic_id
)(unsigned int id
);
304 int (*cpu_mask_to_apicid_and
)(const struct cpumask
*cpumask
,
305 const struct cpumask
*andmask
,
306 unsigned int *apicid
);
309 void (*send_IPI
)(int cpu
, int vector
);
310 void (*send_IPI_mask
)(const struct cpumask
*mask
, int vector
);
311 void (*send_IPI_mask_allbutself
)(const struct cpumask
*mask
,
313 void (*send_IPI_allbutself
)(int vector
);
314 void (*send_IPI_all
)(int vector
);
315 void (*send_IPI_self
)(int vector
);
317 /* wakeup_secondary_cpu */
318 int (*wakeup_secondary_cpu
)(int apicid
, unsigned long start_eip
);
320 void (*inquire_remote_apic
)(int apicid
);
323 u32 (*read
)(u32 reg
);
324 void (*write
)(u32 reg
, u32 v
);
326 * ->eoi_write() has the same signature as ->write().
328 * Drivers can support both ->eoi_write() and ->write() by passing the same
329 * callback value. Kernel can override ->eoi_write() and fall back
332 void (*eoi_write
)(u32 reg
, u32 v
);
333 u64 (*icr_read
)(void);
334 void (*icr_write
)(u32 low
, u32 high
);
335 void (*wait_icr_idle
)(void);
336 u32 (*safe_wait_icr_idle
)(void);
340 * Called very early during boot from get_smp_config(). It should
341 * return the logical apicid. x86_[bios]_cpu_to_apicid is
342 * initialized before this function is called.
344 * If logical apicid can't be determined that early, the function
345 * may return BAD_APICID. Logical apicid will be configured after
346 * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
347 * won't be applied properly during early boot in this case.
349 int (*x86_32_early_logical_apicid
)(int cpu
);
354 * Pointer to the local APIC driver in use on this system (there's
355 * always just one such driver in use - the kernel decides via an
356 * early probing process which one it picks - and then sticks to it):
358 extern struct apic
*apic
;
361 * APIC drivers are probed based on how they are listed in the .apicdrivers
362 * section. So the order is important and enforced by the ordering
363 * of different apic driver files in the Makefile.
365 * For the files having two apic drivers, we use apic_drivers()
366 * to enforce the order with in them.
368 #define apic_driver(sym) \
369 static const struct apic *__apicdrivers_##sym __used \
370 __aligned(sizeof(struct apic *)) \
371 __section(.apicdrivers) = { &sym }
373 #define apic_drivers(sym1, sym2) \
374 static struct apic *__apicdrivers_##sym1##sym2[2] __used \
375 __aligned(sizeof(struct apic *)) \
376 __section(.apicdrivers) = { &sym1, &sym2 }
378 extern struct apic
*__apicdrivers
[], *__apicdrivers_end
[];
381 * APIC functionality to boot other CPUs - only used on SMP:
384 extern int wakeup_secondary_cpu_via_nmi(int apicid
, unsigned long start_eip
);
387 #ifdef CONFIG_X86_LOCAL_APIC
389 static inline u32
apic_read(u32 reg
)
391 return apic
->read(reg
);
394 static inline void apic_write(u32 reg
, u32 val
)
396 apic
->write(reg
, val
);
399 static inline void apic_eoi(void)
401 apic
->eoi_write(APIC_EOI
, APIC_EOI_ACK
);
404 static inline u64
apic_icr_read(void)
406 return apic
->icr_read();
409 static inline void apic_icr_write(u32 low
, u32 high
)
411 apic
->icr_write(low
, high
);
414 static inline void apic_wait_icr_idle(void)
416 apic
->wait_icr_idle();
419 static inline u32
safe_apic_wait_icr_idle(void)
421 return apic
->safe_wait_icr_idle();
424 extern void __init
apic_set_eoi_write(void (*eoi_write
)(u32 reg
, u32 v
));
426 #else /* CONFIG_X86_LOCAL_APIC */
428 static inline u32
apic_read(u32 reg
) { return 0; }
429 static inline void apic_write(u32 reg
, u32 val
) { }
430 static inline void apic_eoi(void) { }
431 static inline u64
apic_icr_read(void) { return 0; }
432 static inline void apic_icr_write(u32 low
, u32 high
) { }
433 static inline void apic_wait_icr_idle(void) { }
434 static inline u32
safe_apic_wait_icr_idle(void) { return 0; }
435 static inline void apic_set_eoi_write(void (*eoi_write
)(u32 reg
, u32 v
)) {}
437 #endif /* CONFIG_X86_LOCAL_APIC */
439 static inline void ack_APIC_irq(void)
442 * ack_APIC_irq() actually gets compiled as a single instruction
448 static inline unsigned default_get_apic_id(unsigned long x
)
450 unsigned int ver
= GET_APIC_VERSION(apic_read(APIC_LVR
));
452 if (APIC_XAPIC(ver
) || boot_cpu_has(X86_FEATURE_EXTD_APICID
))
453 return (x
>> 24) & 0xFF;
455 return (x
>> 24) & 0x0F;
459 * Warm reset vector position:
461 #define TRAMPOLINE_PHYS_LOW 0x467
462 #define TRAMPOLINE_PHYS_HIGH 0x469
465 extern void apic_send_IPI_self(int vector
);
467 DECLARE_PER_CPU(int, x2apic_extra_bits
);
469 extern int default_cpu_present_to_apicid(int mps_cpu
);
470 extern int default_check_phys_apicid_present(int phys_apicid
);
473 extern void generic_bigsmp_probe(void);
476 #ifdef CONFIG_X86_LOCAL_APIC
480 #define APIC_DFR_VALUE (APIC_DFR_FLAT)
482 static inline const struct cpumask
*default_target_cpus(void)
485 return cpu_online_mask
;
487 return cpumask_of(0);
491 static inline const struct cpumask
*online_target_cpus(void)
493 return cpu_online_mask
;
496 DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16
, x86_bios_cpu_apicid
);
499 static inline unsigned int read_apic_id(void)
503 reg
= apic_read(APIC_ID
);
505 return apic
->get_apic_id(reg
);
508 static inline int default_apic_id_valid(int apicid
)
510 return (apicid
< 255);
513 extern int default_acpi_madt_oem_check(char *, char *);
515 extern void default_setup_apic_routing(void);
517 extern struct apic apic_noop
;
521 static inline int noop_x86_32_early_logical_apicid(int cpu
)
527 * Set up the logical destination ID.
529 * Intel recommends to set DFR, LDR and TPR before enabling
530 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
531 * document number 292116). So here it goes...
533 extern void default_init_apic_ldr(void);
535 static inline int default_apic_id_registered(void)
537 return physid_isset(read_apic_id(), phys_cpu_present_map
);
540 static inline int default_phys_pkg_id(int cpuid_apic
, int index_msb
)
542 return cpuid_apic
>> index_msb
;
548 flat_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
549 const struct cpumask
*andmask
,
550 unsigned int *apicid
)
552 unsigned long cpu_mask
= cpumask_bits(cpumask
)[0] &
553 cpumask_bits(andmask
)[0] &
554 cpumask_bits(cpu_online_mask
)[0] &
557 if (likely(cpu_mask
)) {
558 *apicid
= (unsigned int)cpu_mask
;
566 default_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
567 const struct cpumask
*andmask
,
568 unsigned int *apicid
);
571 flat_vector_allocation_domain(int cpu
, struct cpumask
*retmask
,
572 const struct cpumask
*mask
)
574 /* Careful. Some cpus do not strictly honor the set of cpus
575 * specified in the interrupt destination when using lowest
576 * priority interrupt delivery mode.
578 * In particular there was a hyperthreading cpu observed to
579 * deliver interrupts to the wrong hyperthread when only one
580 * hyperthread was specified in the interrupt desitination.
582 cpumask_clear(retmask
);
583 cpumask_bits(retmask
)[0] = APIC_ALL_CPUS
;
587 default_vector_allocation_domain(int cpu
, struct cpumask
*retmask
,
588 const struct cpumask
*mask
)
590 cpumask_copy(retmask
, cpumask_of(cpu
));
593 static inline unsigned long default_check_apicid_used(physid_mask_t
*map
, int apicid
)
595 return physid_isset(apicid
, *map
);
598 static inline void default_ioapic_phys_id_map(physid_mask_t
*phys_map
, physid_mask_t
*retmap
)
603 static inline int __default_cpu_present_to_apicid(int mps_cpu
)
605 if (mps_cpu
< nr_cpu_ids
&& cpu_present(mps_cpu
))
606 return (int)per_cpu(x86_bios_cpu_apicid
, mps_cpu
);
612 __default_check_phys_apicid_present(int phys_apicid
)
614 return physid_isset(phys_apicid
, phys_cpu_present_map
);
618 static inline int default_cpu_present_to_apicid(int mps_cpu
)
620 return __default_cpu_present_to_apicid(mps_cpu
);
624 default_check_phys_apicid_present(int phys_apicid
)
626 return __default_check_phys_apicid_present(phys_apicid
);
629 extern int default_cpu_present_to_apicid(int mps_cpu
);
630 extern int default_check_phys_apicid_present(int phys_apicid
);
633 #endif /* CONFIG_X86_LOCAL_APIC */
634 extern void irq_enter(void);
635 extern void irq_exit(void);
637 static inline void entering_irq(void)
643 static inline void entering_ack_irq(void)
649 static inline void ipi_entering_ack_irq(void)
655 static inline void exiting_irq(void)
660 static inline void exiting_ack_irq(void)
663 /* Ack only at the end to avoid potential reentry */
667 extern void ioapic_zap_locks(void);
669 #endif /* _ASM_X86_APIC_H */