[PATCH] Add missing select's to DVB_BUDGET_AV
[deliverable/linux.git] / arch / ppc64 / mm / slb_low.S
CommitLineData
1da177e4
LT
1/*
2 * arch/ppc64/mm/slb_low.S
3 *
4 * Low-level SLB routines
5 *
6 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
7 *
8 * Based on earlier C version:
9 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
10 * Copyright (c) 2001 Dave Engebretsen
11 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/config.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/ppc_asm.h>
24#include <asm/offsets.h>
25#include <asm/cputable.h>
26
27/* void slb_allocate(unsigned long ea);
28 *
29 * Create an SLB entry for the given EA (user or kernel).
30 * r3 = faulting address, r13 = PACA
31 * r9, r10, r11 are clobbered by this function
32 * No other registers are examined or changed.
33 */
34_GLOBAL(slb_allocate)
35 /*
36 * First find a slot, round robin. Previously we tried to find
37 * a free slot first but that took too long. Unfortunately we
38 * dont have any LRU information to help us choose a slot.
39 */
40#ifdef CONFIG_PPC_ISERIES
41 /*
42 * On iSeries, the "bolted" stack segment can be cast out on
43 * shared processor switch so we need to check for a miss on
44 * it and restore it to the right slot.
45 */
46 ld r9,PACAKSAVE(r13)
47 clrrdi r9,r9,28
48 clrrdi r11,r3,28
49 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
50 cmpld r9,r11
51 beq 3f
52#endif /* CONFIG_PPC_ISERIES */
53
54 ld r10,PACASTABRR(r13)
55 addi r10,r10,1
56 /* use a cpu feature mask if we ever change our slb size */
57 cmpldi r10,SLB_NUM_ENTRIES
58
59 blt+ 4f
60 li r10,SLB_NUM_BOLTED
61
624:
63 std r10,PACASTABRR(r13)
643:
65 /* r3 = faulting address, r10 = entry */
66
67 srdi r9,r3,60 /* get region */
68 srdi r3,r3,28 /* get esid */
69 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
70
71 rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
72 oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
73
74 /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
75
76 blt cr7,0f /* user or kernel? */
77
78 /* kernel address: proto-VSID = ESID */
79 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
80 * this code will generate the protoVSID 0xfffffffff for the
81 * top segment. That's ok, the scramble below will translate
82 * it to VSID 0, which is reserved as a bad VSID - one which
83 * will never have any pages in it. */
84 li r11,SLB_VSID_KERNEL
85BEGIN_FTR_SECTION
86 bne cr7,9f
87 li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
88END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
89 b 9f
90
910: /* user address: proto-VSID = context<<15 | ESID */
e28f7faf 92 srdi. r9,r3,USER_ESID_BITS
1da177e4
LT
93 bne- 8f /* invalid ea bits set */
94
95#ifdef CONFIG_HUGETLB_PAGE
96BEGIN_FTR_SECTION
c594adad
DG
97 lhz r9,PACAHIGHHTLBAREAS(r13)
98 srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT)
99 srd r9,r9,r11
100 andi. r9,r9,1
101 bne 5f
102
103 li r11,SLB_VSID_USER
104
1da177e4 105 cmpldi r3,16
c594adad 106 bge 6f
1da177e4 107
c594adad 108 lhz r9,PACALOWHTLBAREAS(r13)
1da177e4
LT
109 srd r9,r9,r3
110 andi. r9,r9,1
c594adad 111
1da177e4
LT
112 beq 6f
113
c594adad 1145: li r11,SLB_VSID_USER|SLB_VSID_L
1da177e4
LT
115END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
116#endif /* CONFIG_HUGETLB_PAGE */
117
1186: ld r9,PACACONTEXTID(r13)
119 rldimi r3,r9,USER_ESID_BITS,0
120
1219: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
122 ASM_VSID_SCRAMBLE(r3,r9)
123
124 rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */
125
126 /*
127 * No need for an isync before or after this slbmte. The exception
128 * we enter with and the rfid we exit with are context synchronizing.
129 */
130 slbmte r11,r10
131
132 bgelr cr7 /* we're done for kernel addresses */
133
134 /* Update the slb cache */
135 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
136 cmpldi r3,SLB_CACHE_ENTRIES
137 bge 1f
138
139 /* still room in the slb cache */
140 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
141 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
142 add r11,r11,r13 /* r11 = (u16 *)paca + offset */
143 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
144 addi r3,r3,1 /* offset++ */
145 b 2f
1461: /* offset >= SLB_CACHE_ENTRIES */
147 li r3,SLB_CACHE_ENTRIES+1
1482:
149 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
150 blr
151
1528: /* invalid EA */
153 li r3,0 /* BAD_VSID */
154 li r11,SLB_VSID_USER /* flags don't much matter */
155 b 9b
This page took 0.051709 seconds and 5 git commands to generate.