net: bcmgenet: Software reset EPHY after power on
[deliverable/linux.git] / arch / powerpc / mm / hash_native_64.c
CommitLineData
1da177e4
LT
1/*
2 * native hashtable management.
3 *
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
3c726f8d
BH
12
13#undef DEBUG_LOW
14
1da177e4
LT
15#include <linux/spinlock.h>
16#include <linux/bitops.h>
beacc6da 17#include <linux/of.h>
1da177e4
LT
18#include <linux/threads.h>
19#include <linux/smp.h>
20
1da177e4
LT
21#include <asm/machdep.h>
22#include <asm/mmu.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/tlbflush.h>
26#include <asm/tlb.h>
27#include <asm/cputable.h>
3c726f8d 28#include <asm/udbg.h>
71bf08b6 29#include <asm/kexec.h>
60dbf438 30#include <asm/ppc-opcode.h>
3c726f8d 31
ec249dd8 32#include <misc/cxl-base.h>
4c6d9acc 33
3c726f8d
BH
34#ifdef DEBUG_LOW
35#define DBG_LOW(fmt...) udbg_printf(fmt)
36#else
37#define DBG_LOW(fmt...)
38#endif
1da177e4 39
12f04f2b 40#ifdef __BIG_ENDIAN__
1da177e4 41#define HPTE_LOCK_BIT 3
12f04f2b
AB
42#else
43#define HPTE_LOCK_BIT (56+3)
44#endif
1da177e4 45
9e368f29 46DEFINE_RAW_SPINLOCK(native_tlbie_lock);
1da177e4 47
b1022fbd 48static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
3c726f8d 49{
5524a27d 50 unsigned long va;
3c726f8d 51 unsigned int penc;
de640959 52 unsigned long sllp;
3c726f8d 53
5524a27d
AK
54 /*
55 * We need 14 to 65 bits of va for a tlibe of 4K page
56 * With vpn we ignore the lower VPN_SHIFT bits already.
57 * And top two bits are already ignored because we can
58 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
59 * of 12.
60 */
61 va = vpn << VPN_SHIFT;
62 /*
63 * clear top 16 bits of 64bit va, non SLS segment
64 * Older versions of the architecture (2.02 and earler) require the
65 * masking of the top 16 bits.
66 */
3c726f8d
BH
67 va &= ~(0xffffULL << 48);
68
69 switch (psize) {
70 case MMU_PAGE_4K:
1f6aaacc
AK
71 /* clear out bits after (52) [0....52.....63] */
72 va &= ~((1ul << (64 - 52)) - 1);
1189be65 73 va |= ssize << 8;
de640959
AK
74 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
75 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
76 va |= sllp << 5;
a32e252f 77 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
969391c5 78 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
60dbf438 79 : "memory");
3c726f8d
BH
80 break;
81 default:
5524a27d 82 /* We need 14 to 14 + i bits of va */
b1022fbd 83 penc = mmu_psize_defs[psize].penc[apsize];
1f6aaacc 84 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
19242b24 85 va |= penc << 12;
1189be65 86 va |= ssize << 8;
29ef7a3e
AK
87 /*
88 * AVAL bits:
89 * We don't need all the bits, but rest of the bits
90 * must be ignored by the processor.
91 * vpn cover upto 65 bits of va. (0...65) and we need
92 * 58..64 bits of va.
93 */
94 va |= (vpn & 0xfe); /* AVAL */
60dbf438 95 va |= 1; /* L */
a32e252f 96 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
969391c5 97 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
60dbf438 98 : "memory");
3c726f8d
BH
99 break;
100 }
101}
102
b1022fbd 103static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
3c726f8d 104{
5524a27d 105 unsigned long va;
3c726f8d 106 unsigned int penc;
de640959 107 unsigned long sllp;
3c726f8d 108
5524a27d
AK
109 /* VPN_SHIFT can be atmost 12 */
110 va = vpn << VPN_SHIFT;
111 /*
112 * clear top 16 bits of 64 bit va, non SLS segment
113 * Older versions of the architecture (2.02 and earler) require the
114 * masking of the top 16 bits.
115 */
3c726f8d
BH
116 va &= ~(0xffffULL << 48);
117
118 switch (psize) {
119 case MMU_PAGE_4K:
1f6aaacc
AK
120 /* clear out bits after(52) [0....52.....63] */
121 va &= ~((1ul << (64 - 52)) - 1);
1189be65 122 va |= ssize << 8;
de640959
AK
123 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
124 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
125 va |= sllp << 5;
3c726f8d
BH
126 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
127 : : "r"(va) : "memory");
128 break;
129 default:
5524a27d 130 /* We need 14 to 14 + i bits of va */
b1022fbd 131 penc = mmu_psize_defs[psize].penc[apsize];
1f6aaacc 132 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
19242b24 133 va |= penc << 12;
1189be65 134 va |= ssize << 8;
29ef7a3e
AK
135 /*
136 * AVAL bits:
137 * We don't need all the bits, but rest of the bits
138 * must be ignored by the processor.
139 * vpn cover upto 65 bits of va. (0...65) and we need
140 * 58..64 bits of va.
141 */
142 va |= (vpn & 0xfe);
60dbf438 143 va |= 1; /* L */
3c726f8d
BH
144 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
145 : : "r"(va) : "memory");
146 break;
147 }
148
149}
150
b1022fbd
AK
151static inline void tlbie(unsigned long vpn, int psize, int apsize,
152 int ssize, int local)
3c726f8d 153{
4c6d9acc 154 unsigned int use_local;
44ae3ab3 155 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
3c726f8d 156
4c6d9acc
IM
157 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
158
3c726f8d
BH
159 if (use_local)
160 use_local = mmu_psize_defs[psize].tlbiel;
161 if (lock_tlbie && !use_local)
6b9c9b8a 162 raw_spin_lock(&native_tlbie_lock);
3c726f8d
BH
163 asm volatile("ptesync": : :"memory");
164 if (use_local) {
b1022fbd 165 __tlbiel(vpn, psize, apsize, ssize);
3c726f8d
BH
166 asm volatile("ptesync": : :"memory");
167 } else {
b1022fbd 168 __tlbie(vpn, psize, apsize, ssize);
3c726f8d
BH
169 asm volatile("eieio; tlbsync; ptesync": : :"memory");
170 }
171 if (lock_tlbie && !use_local)
6b9c9b8a 172 raw_spin_unlock(&native_tlbie_lock);
3c726f8d
BH
173}
174
8e561e7e 175static inline void native_lock_hpte(struct hash_pte *hptep)
1da177e4 176{
12f04f2b 177 unsigned long *word = (unsigned long *)&hptep->v;
1da177e4
LT
178
179 while (1) {
66d99b88 180 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
1da177e4
LT
181 break;
182 while(test_bit(HPTE_LOCK_BIT, word))
183 cpu_relax();
184 }
185}
186
8e561e7e 187static inline void native_unlock_hpte(struct hash_pte *hptep)
1da177e4 188{
12f04f2b 189 unsigned long *word = (unsigned long *)&hptep->v;
1da177e4 190
66d99b88 191 clear_bit_unlock(HPTE_LOCK_BIT, word);
1da177e4
LT
192}
193
5524a27d 194static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
3c726f8d 195 unsigned long pa, unsigned long rflags,
b1022fbd 196 unsigned long vflags, int psize, int apsize, int ssize)
1da177e4 197{
8e561e7e 198 struct hash_pte *hptep = htab_address + hpte_group;
96e28449 199 unsigned long hpte_v, hpte_r;
1da177e4
LT
200 int i;
201
3c726f8d 202 if (!(vflags & HPTE_V_BOLTED)) {
5524a27d 203 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
3c726f8d 204 " rflags=%lx, vflags=%lx, psize=%d)\n",
5524a27d 205 hpte_group, vpn, pa, rflags, vflags, psize);
3c726f8d
BH
206 }
207
1da177e4 208 for (i = 0; i < HPTES_PER_GROUP; i++) {
12f04f2b 209 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
1da177e4
LT
210 /* retry with lock held */
211 native_lock_hpte(hptep);
12f04f2b 212 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
1da177e4
LT
213 break;
214 native_unlock_hpte(hptep);
215 }
216
217 hptep++;
218 }
219
220 if (i == HPTES_PER_GROUP)
221 return -1;
222
b1022fbd
AK
223 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
224 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
3c726f8d
BH
225
226 if (!(vflags & HPTE_V_BOLTED)) {
227 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
228 i, hpte_v, hpte_r);
229 }
1da177e4 230
12f04f2b 231 hptep->r = cpu_to_be64(hpte_r);
1da177e4 232 /* Guarantee the second dword is visible before the valid bit */
74a0ba61 233 eieio();
1da177e4
LT
234 /*
235 * Now set the first dword including the valid bit
236 * NOTE: this also unlocks the hpte
237 */
12f04f2b 238 hptep->v = cpu_to_be64(hpte_v);
1da177e4
LT
239
240 __asm__ __volatile__ ("ptesync" : : : "memory");
241
96e28449 242 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
1da177e4
LT
243}
244
245static long native_hpte_remove(unsigned long hpte_group)
246{
8e561e7e 247 struct hash_pte *hptep;
1da177e4
LT
248 int i;
249 int slot_offset;
96e28449 250 unsigned long hpte_v;
1da177e4 251
3c726f8d
BH
252 DBG_LOW(" remove(group=%lx)\n", hpte_group);
253
1da177e4
LT
254 /* pick a random entry to start at */
255 slot_offset = mftb() & 0x7;
256
257 for (i = 0; i < HPTES_PER_GROUP; i++) {
258 hptep = htab_address + hpte_group + slot_offset;
12f04f2b 259 hpte_v = be64_to_cpu(hptep->v);
1da177e4 260
96e28449 261 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
1da177e4
LT
262 /* retry with lock held */
263 native_lock_hpte(hptep);
12f04f2b 264 hpte_v = be64_to_cpu(hptep->v);
96e28449
DG
265 if ((hpte_v & HPTE_V_VALID)
266 && !(hpte_v & HPTE_V_BOLTED))
1da177e4
LT
267 break;
268 native_unlock_hpte(hptep);
269 }
270
271 slot_offset++;
272 slot_offset &= 0x7;
273 }
274
275 if (i == HPTES_PER_GROUP)
276 return -1;
277
278 /* Invalidate the hpte. NOTE: this also unlocks it */
96e28449 279 hptep->v = 0;
1da177e4
LT
280
281 return i;
282}
283
3c726f8d 284static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
db3d8534 285 unsigned long vpn, int bpsize,
aefa5688 286 int apsize, int ssize, unsigned long flags)
1da177e4 287{
8e561e7e 288 struct hash_pte *hptep = htab_address + slot;
3c726f8d 289 unsigned long hpte_v, want_v;
aefa5688 290 int ret = 0, local = 0;
3c726f8d 291
db3d8534 292 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
3c726f8d 293
5524a27d
AK
294 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
295 vpn, want_v & HPTE_V_AVPN, slot, newpp);
3c726f8d 296
12f04f2b 297 hpte_v = be64_to_cpu(hptep->v);
0608d692
AK
298 /*
299 * We need to invalidate the TLB always because hpte_remove doesn't do
300 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
301 * random entry from it. When we do that we don't invalidate the TLB
302 * (hpte_remove) because we assume the old translation is still
303 * technically "valid".
304 */
db3d8534 305 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
3c726f8d 306 DBG_LOW(" -> miss\n");
3c726f8d
BH
307 ret = -1;
308 } else {
0ec2698f
AK
309 native_lock_hpte(hptep);
310 /* recheck with locks held */
311 hpte_v = be64_to_cpu(hptep->v);
312 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
313 !(hpte_v & HPTE_V_VALID))) {
314 ret = -1;
315 } else {
316 DBG_LOW(" -> hit\n");
317 /* Update the HPTE */
318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319 ~(HPTE_R_PP | HPTE_R_N)) |
320 (newpp & (HPTE_R_PP | HPTE_R_N |
321 HPTE_R_C)));
322 }
323 native_unlock_hpte(hptep);
3c726f8d 324 }
aefa5688
AK
325
326 if (flags & HPTE_LOCAL_UPDATE)
327 local = 1;
328 /*
329 * Ensure it is out of the tlb too if it is not a nohpte fault
330 */
331 if (!(flags & HPTE_NOHPTE_UPDATE))
332 tlbie(vpn, bpsize, apsize, ssize, local);
333
3c726f8d 334 return ret;
1da177e4
LT
335}
336
5524a27d 337static long native_hpte_find(unsigned long vpn, int psize, int ssize)
1da177e4 338{
8e561e7e 339 struct hash_pte *hptep;
1da177e4 340 unsigned long hash;
1189be65 341 unsigned long i;
1da177e4 342 long slot;
3c726f8d 343 unsigned long want_v, hpte_v;
1da177e4 344
5524a27d 345 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
74f227b2 346 want_v = hpte_encode_avpn(vpn, psize, ssize);
1da177e4 347
1189be65
PM
348 /* Bolted mappings are only ever in the primary group */
349 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
350 for (i = 0; i < HPTES_PER_GROUP; i++) {
351 hptep = htab_address + slot;
12f04f2b 352 hpte_v = be64_to_cpu(hptep->v);
1da177e4 353
1189be65
PM
354 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
355 /* HPTE matches */
356 return slot;
357 ++slot;
1da177e4
LT
358 }
359
360 return -1;
361}
362
1da177e4
LT
363/*
364 * Update the page protection bits. Intended to be used to create
365 * guard pages for kernel data structures on pages which are bolted
366 * in the HPT. Assumes pages being operated on will not be stolen.
1da177e4
LT
367 *
368 * No need to lock here because we should be the only user.
369 */
3c726f8d 370static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
1189be65 371 int psize, int ssize)
1da177e4 372{
5524a27d
AK
373 unsigned long vpn;
374 unsigned long vsid;
1da177e4 375 long slot;
8e561e7e 376 struct hash_pte *hptep;
1da177e4 377
1189be65 378 vsid = get_kernel_vsid(ea, ssize);
5524a27d 379 vpn = hpt_vpn(ea, vsid, ssize);
1da177e4 380
5524a27d 381 slot = native_hpte_find(vpn, psize, ssize);
1da177e4
LT
382 if (slot == -1)
383 panic("could not find page to bolt\n");
384 hptep = htab_address + slot;
385
3c726f8d 386 /* Update the HPTE */
12f04f2b
AB
387 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
388 ~(HPTE_R_PP | HPTE_R_N)) |
389 (newpp & (HPTE_R_PP | HPTE_R_N)));
db3d8534
AK
390 /*
391 * Ensure it is out of the tlb too. Bolted entries base and
392 * actual page size will be same.
393 */
394 tlbie(vpn, psize, psize, ssize, 0);
1da177e4
LT
395}
396
5524a27d 397static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
db3d8534 398 int bpsize, int apsize, int ssize, int local)
1da177e4 399{
8e561e7e 400 struct hash_pte *hptep = htab_address + slot;
96e28449 401 unsigned long hpte_v;
3c726f8d 402 unsigned long want_v;
1da177e4 403 unsigned long flags;
1da177e4
LT
404
405 local_irq_save(flags);
1da177e4 406
5524a27d 407 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
3c726f8d 408
db3d8534 409 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
3c726f8d 410 native_lock_hpte(hptep);
12f04f2b 411 hpte_v = be64_to_cpu(hptep->v);
1da177e4 412
0608d692
AK
413 /*
414 * We need to invalidate the TLB always because hpte_remove doesn't do
415 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
416 * random entry from it. When we do that we don't invalidate the TLB
417 * (hpte_remove) because we assume the old translation is still
418 * technically "valid".
419 */
db3d8534 420 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
1da177e4 421 native_unlock_hpte(hptep);
3c726f8d 422 else
1da177e4 423 /* Invalidate the hpte. NOTE: this also unlocks it */
96e28449 424 hptep->v = 0;
1da177e4 425
3c726f8d 426 /* Invalidate the TLB */
db3d8534
AK
427 tlbie(vpn, bpsize, apsize, ssize, local);
428
1da177e4
LT
429 local_irq_restore(flags);
430}
431
fa1f8ae8
AK
432static void native_hugepage_invalidate(unsigned long vsid,
433 unsigned long addr,
1a527286 434 unsigned char *hpte_slot_array,
d557b098 435 int psize, int ssize, int local)
1a527286 436{
969b7b20 437 int i;
1a527286
AK
438 struct hash_pte *hptep;
439 int actual_psize = MMU_PAGE_16M;
440 unsigned int max_hpte_count, valid;
441 unsigned long flags, s_addr = addr;
442 unsigned long hpte_v, want_v, shift;
fa1f8ae8 443 unsigned long hidx, vpn = 0, hash, slot;
1a527286
AK
444
445 shift = mmu_psize_defs[psize].shift;
446 max_hpte_count = 1U << (PMD_SHIFT - shift);
447
448 local_irq_save(flags);
449 for (i = 0; i < max_hpte_count; i++) {
450 valid = hpte_valid(hpte_slot_array, i);
451 if (!valid)
452 continue;
453 hidx = hpte_hash_index(hpte_slot_array, i);
454
455 /* get the vpn */
456 addr = s_addr + (i * (1ul << shift));
1a527286
AK
457 vpn = hpt_vpn(addr, vsid, ssize);
458 hash = hpt_hash(vpn, shift, ssize);
459 if (hidx & _PTEIDX_SECONDARY)
460 hash = ~hash;
461
462 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
463 slot += hidx & _PTEIDX_GROUP_IX;
464
465 hptep = htab_address + slot;
466 want_v = hpte_encode_avpn(vpn, psize, ssize);
467 native_lock_hpte(hptep);
12f04f2b 468 hpte_v = be64_to_cpu(hptep->v);
1a527286
AK
469
470 /* Even if we miss, we need to invalidate the TLB */
471 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
472 native_unlock_hpte(hptep);
473 else
474 /* Invalidate the hpte. NOTE: this also unlocks it */
475 hptep->v = 0;
969b7b20
AK
476 /*
477 * We need to do tlb invalidate for all the address, tlbie
478 * instruction compares entry_VA in tlb with the VA specified
479 * here
480 */
d557b098 481 tlbie(vpn, psize, actual_psize, ssize, local);
1a527286 482 }
1a527286
AK
483 local_irq_restore(flags);
484}
485
db3d8534
AK
486static inline int __hpte_actual_psize(unsigned int lp, int psize)
487{
488 int i, shift;
489 unsigned int mask;
490
491 /* start from 1 ignoring MMU_PAGE_4K */
492 for (i = 1; i < MMU_PAGE_COUNT; i++) {
493
494 /* invalid penc */
495 if (mmu_psize_defs[psize].penc[i] == -1)
496 continue;
497 /*
498 * encoding bits per actual page size
499 * PTE LP actual page size
500 * rrrr rrrz >=8KB
501 * rrrr rrzz >=16KB
502 * rrrr rzzz >=32KB
503 * rrrr zzzz >=64KB
504 * .......
505 */
506 shift = mmu_psize_defs[i].shift - LP_SHIFT;
507 if (shift > LP_BITS)
508 shift = LP_BITS;
509 mask = (1 << shift) - 1;
510 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
511 return i;
512 }
513 return -1;
514}
515
8e561e7e 516static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
b1022fbd 517 int *psize, int *apsize, int *ssize, unsigned long *vpn)
71bf08b6 518{
dcda287a 519 unsigned long avpn, pteg, vpi;
12f04f2b
AB
520 unsigned long hpte_v = be64_to_cpu(hpte->v);
521 unsigned long hpte_r = be64_to_cpu(hpte->r);
dcda287a 522 unsigned long vsid, seg_off;
7e74c392
AK
523 int size, a_size, shift;
524 /* Look at the 8 bit LP value */
12f04f2b 525 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
71bf08b6 526
b1022fbd
AK
527 if (!(hpte_v & HPTE_V_LARGE)) {
528 size = MMU_PAGE_4K;
529 a_size = MMU_PAGE_4K;
530 } else {
71bf08b6 531 for (size = 0; size < MMU_PAGE_COUNT; size++) {
3c726f8d 532
71bf08b6
LB
533 /* valid entries have a shift value */
534 if (!mmu_psize_defs[size].shift)
535 continue;
b1022fbd 536
7e74c392
AK
537 a_size = __hpte_actual_psize(lp, size);
538 if (a_size != -1)
539 break;
71bf08b6
LB
540 }
541 }
2454c7e9 542 /* This works for all page sizes, and for 256M and 1T segments */
dcda287a 543 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
71bf08b6 544 shift = mmu_psize_defs[size].shift;
71bf08b6 545
dcda287a
AK
546 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
547 pteg = slot / HPTES_PER_GROUP;
548 if (hpte_v & HPTE_V_SECONDARY)
549 pteg = ~pteg;
550
551 switch (*ssize) {
552 case MMU_SEGSIZE_256M:
553 /* We only have 28 - 23 bits of seg_off in avpn */
554 seg_off = (avpn & 0x1f) << 23;
555 vsid = avpn >> 5;
556 /* We can find more bits from the pteg value */
557 if (shift < 23) {
558 vpi = (vsid ^ pteg) & htab_hash_mask;
559 seg_off |= vpi << shift;
560 }
5524a27d 561 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
83383b73 562 break;
dcda287a
AK
563 case MMU_SEGSIZE_1T:
564 /* We only have 40 - 23 bits of seg_off in avpn */
565 seg_off = (avpn & 0x1ffff) << 23;
566 vsid = avpn >> 17;
567 if (shift < 23) {
2454c7e9 568 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
dcda287a 569 seg_off |= vpi << shift;
71bf08b6 570 }
5524a27d 571 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
83383b73 572 break;
dcda287a 573 default:
5524a27d 574 *vpn = size = 0;
3c726f8d 575 }
b1022fbd
AK
576 *psize = size;
577 *apsize = a_size;
3c726f8d
BH
578}
579
f4c82d51
S
580/*
581 * clear all mappings on kexec. All cpus are in real mode (or they will
582 * be when they isi), and we are the only one left. We rely on our kernel
583 * mapping being 0xC0's and the hardware ignoring those two real bits.
584 *
fdf880a6
CB
585 * This must be called with interrupts disabled.
586 *
587 * Taking the native_tlbie_lock is unsafe here due to the possibility of
588 * lockdep being on. On pre POWER5 hardware, not taking the lock could
589 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
590 * gets called during boot before secondary CPUs have come up and during
591 * crashdump and all bets are off anyway.
592 *
f4c82d51
S
593 * TODO: add batching support when enabled. remember, no dynamic memory here,
594 * athough there is the control page available...
595 */
596static void native_hpte_clear(void)
597{
5524a27d 598 unsigned long vpn = 0;
fdf880a6 599 unsigned long slot, slots;
8e561e7e 600 struct hash_pte *hptep = htab_address;
5524a27d 601 unsigned long hpte_v;
f4c82d51 602 unsigned long pteg_count;
b1022fbd 603 int psize, apsize, ssize;
f4c82d51
S
604
605 pteg_count = htab_hash_mask + 1;
606
f4c82d51
S
607 slots = pteg_count * HPTES_PER_GROUP;
608
609 for (slot = 0; slot < slots; slot++, hptep++) {
610 /*
611 * we could lock the pte here, but we are the only cpu
612 * running, right? and for crash dump, we probably
613 * don't want to wait for a maybe bad cpu.
614 */
12f04f2b 615 hpte_v = be64_to_cpu(hptep->v);
f4c82d51 616
47f78a49 617 /*
fdf880a6
CB
618 * Call __tlbie() here rather than tlbie() since we can't take the
619 * native_tlbie_lock.
47f78a49 620 */
96e28449 621 if (hpte_v & HPTE_V_VALID) {
b1022fbd 622 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
96e28449 623 hptep->v = 0;
b1022fbd 624 __tlbie(vpn, psize, apsize, ssize);
f4c82d51
S
625 }
626 }
627
47f78a49 628 asm volatile("eieio; tlbsync; ptesync":::"memory");
f4c82d51
S
629}
630
3c726f8d
BH
631/*
632 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
633 * the lock all the time
634 */
61b1a942 635static void native_flush_hash_range(unsigned long number, int local)
1da177e4 636{
5524a27d
AK
637 unsigned long vpn;
638 unsigned long hash, index, hidx, shift, slot;
8e561e7e 639 struct hash_pte *hptep;
96e28449 640 unsigned long hpte_v;
3c726f8d
BH
641 unsigned long want_v;
642 unsigned long flags;
643 real_pte_t pte;
69111bac 644 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
3c726f8d 645 unsigned long psize = batch->psize;
1189be65 646 int ssize = batch->ssize;
3c726f8d 647 int i;
1da177e4
LT
648
649 local_irq_save(flags);
650
1da177e4 651 for (i = 0; i < number; i++) {
5524a27d 652 vpn = batch->vpn[i];
3c726f8d
BH
653 pte = batch->pte[i];
654
5524a27d
AK
655 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
656 hash = hpt_hash(vpn, shift, ssize);
3c726f8d
BH
657 hidx = __rpte_to_hidx(pte, index);
658 if (hidx & _PTEIDX_SECONDARY)
659 hash = ~hash;
660 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
661 slot += hidx & _PTEIDX_GROUP_IX;
662 hptep = htab_address + slot;
74f227b2 663 want_v = hpte_encode_avpn(vpn, psize, ssize);
3c726f8d 664 native_lock_hpte(hptep);
12f04f2b 665 hpte_v = be64_to_cpu(hptep->v);
3c726f8d
BH
666 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
667 !(hpte_v & HPTE_V_VALID))
668 native_unlock_hpte(hptep);
669 else
670 hptep->v = 0;
671 } pte_iterate_hashed_end();
1da177e4
LT
672 }
673
44ae3ab3 674 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
3c726f8d 675 mmu_psize_defs[psize].tlbiel && local) {
1da177e4 676 asm volatile("ptesync":::"memory");
3c726f8d 677 for (i = 0; i < number; i++) {
5524a27d 678 vpn = batch->vpn[i];
3c726f8d
BH
679 pte = batch->pte[i];
680
5524a27d
AK
681 pte_iterate_hashed_subpages(pte, psize,
682 vpn, index, shift) {
b1022fbd 683 __tlbiel(vpn, psize, psize, ssize);
3c726f8d
BH
684 } pte_iterate_hashed_end();
685 }
1da177e4
LT
686 asm volatile("ptesync":::"memory");
687 } else {
44ae3ab3 688 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
1da177e4
LT
689
690 if (lock_tlbie)
6b9c9b8a 691 raw_spin_lock(&native_tlbie_lock);
1da177e4
LT
692
693 asm volatile("ptesync":::"memory");
3c726f8d 694 for (i = 0; i < number; i++) {
5524a27d 695 vpn = batch->vpn[i];
3c726f8d
BH
696 pte = batch->pte[i];
697
5524a27d
AK
698 pte_iterate_hashed_subpages(pte, psize,
699 vpn, index, shift) {
b1022fbd 700 __tlbie(vpn, psize, psize, ssize);
3c726f8d
BH
701 } pte_iterate_hashed_end();
702 }
1da177e4
LT
703 asm volatile("eieio; tlbsync; ptesync":::"memory");
704
705 if (lock_tlbie)
6b9c9b8a 706 raw_spin_unlock(&native_tlbie_lock);
1da177e4
LT
707 }
708
709 local_irq_restore(flags);
710}
711
7d0daae4 712void __init hpte_init_native(void)
1da177e4
LT
713{
714 ppc_md.hpte_invalidate = native_hpte_invalidate;
715 ppc_md.hpte_updatepp = native_hpte_updatepp;
716 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
717 ppc_md.hpte_insert = native_hpte_insert;
f4c82d51
S
718 ppc_md.hpte_remove = native_hpte_remove;
719 ppc_md.hpte_clear_all = native_hpte_clear;
8e166991 720 ppc_md.flush_hash_range = native_flush_hash_range;
1a527286 721 ppc_md.hugepage_invalidate = native_hugepage_invalidate;
1da177e4 722}
This page took 0.79868 seconds and 5 git commands to generate.