arm64: prefetch: add alternative pattern for CPUs without a prefetcher
[deliverable/linux.git] / arch / arm64 / include / asm / cpufeature.h
1 /*
2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #ifndef __ASM_CPUFEATURE_H
10 #define __ASM_CPUFEATURE_H
11
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
14
15 /*
16 * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
17 * in the kernel and for user space to keep track of which optional features
18 * are supported by the current system. So let's map feature 'x' to HWCAP_x.
19 * Note that HWCAP_x constants are bit fields so we need to take the log.
20 */
21
22 #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
23 #define cpu_feature(x) ilog2(HWCAP_ ## x)
24
25 #define ARM64_WORKAROUND_CLEAN_CACHE 0
26 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
27 #define ARM64_WORKAROUND_845719 2
28 #define ARM64_HAS_SYSREG_GIC_CPUIF 3
29 #define ARM64_HAS_PAN 4
30 #define ARM64_HAS_LSE_ATOMICS 5
31 #define ARM64_WORKAROUND_CAVIUM_23154 6
32 #define ARM64_WORKAROUND_834220 7
33 #define ARM64_HAS_NO_HW_PREFETCH 8
34
35 #define ARM64_NCAPS 9
36
37 #ifndef __ASSEMBLY__
38
39 #include <linux/kernel.h>
40
41 /* CPU feature register tracking */
42 enum ftr_type {
43 FTR_EXACT, /* Use a predefined safe value */
44 FTR_LOWER_SAFE, /* Smaller value is safe */
45 FTR_HIGHER_SAFE,/* Bigger value is safe */
46 };
47
48 #define FTR_STRICT true /* SANITY check strict matching required */
49 #define FTR_NONSTRICT false /* SANITY check ignored */
50
51 #define FTR_SIGNED true /* Value should be treated as signed */
52 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
53
54 struct arm64_ftr_bits {
55 bool sign; /* Value is signed ? */
56 bool strict; /* CPU Sanity check: strict matching required ? */
57 enum ftr_type type;
58 u8 shift;
59 u8 width;
60 s64 safe_val; /* safe value for discrete features */
61 };
62
63 /*
64 * @arm64_ftr_reg - Feature register
65 * @strict_mask Bits which should match across all CPUs for sanity.
66 * @sys_val Safe value across the CPUs (system view)
67 */
68 struct arm64_ftr_reg {
69 u32 sys_id;
70 const char *name;
71 u64 strict_mask;
72 u64 sys_val;
73 struct arm64_ftr_bits *ftr_bits;
74 };
75
76 struct arm64_cpu_capabilities {
77 const char *desc;
78 u16 capability;
79 bool (*matches)(const struct arm64_cpu_capabilities *);
80 void (*enable)(void *); /* Called on all active CPUs */
81 union {
82 struct { /* To be used for erratum handling only */
83 u32 midr_model;
84 u32 midr_range_min, midr_range_max;
85 };
86
87 struct { /* Feature register checking */
88 u32 sys_reg;
89 int field_pos;
90 int min_field_value;
91 int hwcap_type;
92 unsigned long hwcap;
93 };
94 };
95 };
96
97 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
98
99 static inline bool cpu_have_feature(unsigned int num)
100 {
101 return elf_hwcap & (1UL << num);
102 }
103
104 static inline bool cpus_have_cap(unsigned int num)
105 {
106 if (num >= ARM64_NCAPS)
107 return false;
108 return test_bit(num, cpu_hwcaps);
109 }
110
111 static inline void cpus_set_cap(unsigned int num)
112 {
113 if (num >= ARM64_NCAPS)
114 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
115 num, ARM64_NCAPS);
116 else
117 __set_bit(num, cpu_hwcaps);
118 }
119
120 static inline int __attribute_const__
121 cpuid_feature_extract_field_width(u64 features, int field, int width)
122 {
123 return (s64)(features << (64 - width - field)) >> (64 - width);
124 }
125
126 static inline int __attribute_const__
127 cpuid_feature_extract_field(u64 features, int field)
128 {
129 return cpuid_feature_extract_field_width(features, field, 4);
130 }
131
132 static inline unsigned int __attribute_const__
133 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
134 {
135 return (u64)(features << (64 - width - field)) >> (64 - width);
136 }
137
138 static inline unsigned int __attribute_const__
139 cpuid_feature_extract_unsigned_field(u64 features, int field)
140 {
141 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
142 }
143
144 static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
145 {
146 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
147 }
148
149 static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
150 {
151 return ftrp->sign ?
152 cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) :
153 cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width);
154 }
155
156 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
157 {
158 return cpuid_feature_extract_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
159 cpuid_feature_extract_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
160 }
161
162 void __init setup_cpu_features(void);
163
164 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
165 const char *info);
166 void check_local_cpu_errata(void);
167
168 #ifdef CONFIG_HOTPLUG_CPU
169 void verify_local_cpu_capabilities(void);
170 #else
171 static inline void verify_local_cpu_capabilities(void)
172 {
173 }
174 #endif
175
176 u64 read_system_reg(u32 id);
177
178 static inline bool cpu_supports_mixed_endian_el0(void)
179 {
180 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
181 }
182
183 static inline bool system_supports_mixed_endian_el0(void)
184 {
185 return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
186 }
187
188 #endif /* __ASSEMBLY__ */
189
190 #endif
This page took 0.040362 seconds and 5 git commands to generate.