Merge tag 'stable/for-linus-3.14-rc2-tag' of git://git.kernel.org/pub/scm/linux/kerne...
[deliverable/linux.git] / arch / sparc / kernel / trampoline_64.S
1 /*
2 * trampoline.S: Jump start slave processors on sparc64.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7
8 #include <asm/head.h>
9 #include <asm/asi.h>
10 #include <asm/lsu.h>
11 #include <asm/dcr.h>
12 #include <asm/dcu.h>
13 #include <asm/pstate.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16 #include <asm/spitfire.h>
17 #include <asm/processor.h>
18 #include <asm/thread_info.h>
19 #include <asm/mmu.h>
20 #include <asm/hypervisor.h>
21 #include <asm/cpudata.h>
22
23 .data
24 .align 8
25 call_method:
26 .asciz "call-method"
27 .align 8
28 itlb_load:
29 .asciz "SUNW,itlb-load"
30 .align 8
31 dtlb_load:
32 .asciz "SUNW,dtlb-load"
33
34 #define TRAMP_STACK_SIZE 1024
35 .align 16
36 tramp_stack:
37 .skip TRAMP_STACK_SIZE
38
39 .align 8
40 .globl sparc64_cpu_startup, sparc64_cpu_startup_end
41 sparc64_cpu_startup:
42 BRANCH_IF_SUN4V(g1, niagara_startup)
43 BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
44 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
45
46 ba,pt %xcc, spitfire_startup
47 nop
48
49 cheetah_plus_startup:
50 /* Preserve OBP chosen DCU and DCR register settings. */
51 ba,pt %xcc, cheetah_generic_startup
52 nop
53
54 cheetah_startup:
55 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
56 wr %g1, %asr18
57
58 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
59 or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
60 sllx %g5, 32, %g5
61 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
62 stxa %g5, [%g0] ASI_DCU_CONTROL_REG
63 membar #Sync
64 /* fallthru */
65
66 cheetah_generic_startup:
67 mov TSB_EXTENSION_P, %g3
68 stxa %g0, [%g3] ASI_DMMU
69 stxa %g0, [%g3] ASI_IMMU
70 membar #Sync
71
72 mov TSB_EXTENSION_S, %g3
73 stxa %g0, [%g3] ASI_DMMU
74 membar #Sync
75
76 mov TSB_EXTENSION_N, %g3
77 stxa %g0, [%g3] ASI_DMMU
78 stxa %g0, [%g3] ASI_IMMU
79 membar #Sync
80 /* fallthru */
81
82 niagara_startup:
83 /* Disable STICK_INT interrupts. */
84 sethi %hi(0x80000000), %g5
85 sllx %g5, 32, %g5
86 wr %g5, %asr25
87
88 ba,pt %xcc, startup_continue
89 nop
90
91 spitfire_startup:
92 mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
93 stxa %g1, [%g0] ASI_LSU_CONTROL
94 membar #Sync
95
96 startup_continue:
97 mov %o0, %l0
98 BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
99
100 sethi %hi(0x80000000), %g2
101 sllx %g2, 32, %g2
102 wr %g2, 0, %tick_cmpr
103
104 /* Call OBP by hand to lock KERNBASE into i/d tlbs.
105 * We lock 'num_kernel_image_mappings' consequetive entries.
106 */
107 sethi %hi(prom_entry_lock), %g2
108 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
109 brnz,pn %g1, 1b
110 nop
111
112 sethi %hi(p1275buf), %g2
113 or %g2, %lo(p1275buf), %g2
114 ldx [%g2 + 0x10], %l2
115 add %l2, -(192 + 128), %sp
116 flushw
117
118 /* Setup the loop variables:
119 * %l3: VADDR base
120 * %l4: TTE base
121 * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
122 * %l6: Number of TTE entries to map
123 * %l7: Highest TTE entry number, we count down
124 */
125 sethi %hi(KERNBASE), %l3
126 sethi %hi(kern_locked_tte_data), %l4
127 ldx [%l4 + %lo(kern_locked_tte_data)], %l4
128 clr %l5
129 sethi %hi(num_kernel_image_mappings), %l6
130 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
131
132 mov 15, %l7
133 BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
134
135 mov 63, %l7
136 2:
137
138 3:
139 /* Lock into I-MMU */
140 sethi %hi(call_method), %g2
141 or %g2, %lo(call_method), %g2
142 stx %g2, [%sp + 2047 + 128 + 0x00]
143 mov 5, %g2
144 stx %g2, [%sp + 2047 + 128 + 0x08]
145 mov 1, %g2
146 stx %g2, [%sp + 2047 + 128 + 0x10]
147 sethi %hi(itlb_load), %g2
148 or %g2, %lo(itlb_load), %g2
149 stx %g2, [%sp + 2047 + 128 + 0x18]
150 sethi %hi(prom_mmu_ihandle_cache), %g2
151 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
152 stx %g2, [%sp + 2047 + 128 + 0x20]
153
154 /* Each TTE maps 4MB, convert index to offset. */
155 sllx %l5, 22, %g1
156
157 add %l3, %g1, %g2
158 stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
159 add %l4, %g1, %g2
160 stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
161
162 /* TTE index is highest minus loop index. */
163 sub %l7, %l5, %g2
164 stx %g2, [%sp + 2047 + 128 + 0x38]
165
166 sethi %hi(p1275buf), %g2
167 or %g2, %lo(p1275buf), %g2
168 ldx [%g2 + 0x08], %o1
169 call %o1
170 add %sp, (2047 + 128), %o0
171
172 /* Lock into D-MMU */
173 sethi %hi(call_method), %g2
174 or %g2, %lo(call_method), %g2
175 stx %g2, [%sp + 2047 + 128 + 0x00]
176 mov 5, %g2
177 stx %g2, [%sp + 2047 + 128 + 0x08]
178 mov 1, %g2
179 stx %g2, [%sp + 2047 + 128 + 0x10]
180 sethi %hi(dtlb_load), %g2
181 or %g2, %lo(dtlb_load), %g2
182 stx %g2, [%sp + 2047 + 128 + 0x18]
183 sethi %hi(prom_mmu_ihandle_cache), %g2
184 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
185 stx %g2, [%sp + 2047 + 128 + 0x20]
186
187 /* Each TTE maps 4MB, convert index to offset. */
188 sllx %l5, 22, %g1
189
190 add %l3, %g1, %g2
191 stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
192 add %l4, %g1, %g2
193 stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
194
195 /* TTE index is highest minus loop index. */
196 sub %l7, %l5, %g2
197 stx %g2, [%sp + 2047 + 128 + 0x38]
198
199 sethi %hi(p1275buf), %g2
200 or %g2, %lo(p1275buf), %g2
201 ldx [%g2 + 0x08], %o1
202 call %o1
203 add %sp, (2047 + 128), %o0
204
205 add %l5, 1, %l5
206 cmp %l5, %l6
207 bne,pt %xcc, 3b
208 nop
209
210 sethi %hi(prom_entry_lock), %g2
211 stb %g0, [%g2 + %lo(prom_entry_lock)]
212
213 ba,pt %xcc, after_lock_tlb
214 nop
215
216 niagara_lock_tlb:
217 sethi %hi(KERNBASE), %l3
218 sethi %hi(kern_locked_tte_data), %l4
219 ldx [%l4 + %lo(kern_locked_tte_data)], %l4
220 clr %l5
221 sethi %hi(num_kernel_image_mappings), %l6
222 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
223
224 1:
225 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
226 sllx %l5, 22, %g2
227 add %l3, %g2, %o0
228 clr %o1
229 add %l4, %g2, %o2
230 mov HV_MMU_IMMU, %o3
231 ta HV_FAST_TRAP
232
233 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
234 sllx %l5, 22, %g2
235 add %l3, %g2, %o0
236 clr %o1
237 add %l4, %g2, %o2
238 mov HV_MMU_DMMU, %o3
239 ta HV_FAST_TRAP
240
241 add %l5, 1, %l5
242 cmp %l5, %l6
243 bne,pt %xcc, 1b
244 nop
245
246 after_lock_tlb:
247 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
248 wr %g0, 0, %fprs
249
250 wr %g0, ASI_P, %asi
251
252 mov PRIMARY_CONTEXT, %g7
253
254 661: stxa %g0, [%g7] ASI_DMMU
255 .section .sun4v_1insn_patch, "ax"
256 .word 661b
257 stxa %g0, [%g7] ASI_MMU
258 .previous
259
260 membar #Sync
261 mov SECONDARY_CONTEXT, %g7
262
263 661: stxa %g0, [%g7] ASI_DMMU
264 .section .sun4v_1insn_patch, "ax"
265 .word 661b
266 stxa %g0, [%g7] ASI_MMU
267 .previous
268
269 membar #Sync
270
271 /* Everything we do here, until we properly take over the
272 * trap table, must be done with extreme care. We cannot
273 * make any references to %g6 (current thread pointer),
274 * %g4 (current task pointer), or %g5 (base of current cpu's
275 * per-cpu area) until we properly take over the trap table
276 * from the firmware and hypervisor.
277 *
278 * Get onto temporary stack which is in the locked kernel image.
279 */
280 sethi %hi(tramp_stack), %g1
281 or %g1, %lo(tramp_stack), %g1
282 add %g1, TRAMP_STACK_SIZE, %g1
283 sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
284 mov 0, %fp
285
286 /* Put garbage in these registers to trap any access to them. */
287 set 0xdeadbeef, %g4
288 set 0xdeadbeef, %g5
289 set 0xdeadbeef, %g6
290
291 call init_irqwork_curcpu
292 nop
293
294 sethi %hi(tlb_type), %g3
295 lduw [%g3 + %lo(tlb_type)], %g2
296 cmp %g2, 3
297 bne,pt %icc, 1f
298 nop
299
300 call hard_smp_processor_id
301 nop
302
303 call sun4v_register_mondo_queues
304 nop
305
306 1: call init_cur_cpu_trap
307 ldx [%l0], %o0
308
309 /* Start using proper page size encodings in ctx register. */
310 sethi %hi(sparc64_kern_pri_context), %g3
311 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
312 mov PRIMARY_CONTEXT, %g1
313
314 661: stxa %g2, [%g1] ASI_DMMU
315 .section .sun4v_1insn_patch, "ax"
316 .word 661b
317 stxa %g2, [%g1] ASI_MMU
318 .previous
319
320 membar #Sync
321
322 wrpr %g0, 0, %wstate
323
324 sethi %hi(prom_entry_lock), %g2
325 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
326 brnz,pn %g1, 1b
327 nop
328
329 /* As a hack, put &init_thread_union into %g6.
330 * prom_world() loads from here to restore the %asi
331 * register.
332 */
333 sethi %hi(init_thread_union), %g6
334 or %g6, %lo(init_thread_union), %g6
335
336 sethi %hi(is_sun4v), %o0
337 lduw [%o0 + %lo(is_sun4v)], %o0
338 brz,pt %o0, 2f
339 nop
340
341 TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
342 add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
343 stxa %g2, [%g0] ASI_SCRATCHPAD
344
345 /* Compute physical address:
346 *
347 * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
348 */
349 sethi %hi(KERNBASE), %g3
350 sub %g2, %g3, %g2
351 sethi %hi(kern_base), %g3
352 ldx [%g3 + %lo(kern_base)], %g3
353 add %g2, %g3, %o1
354 sethi %hi(sparc64_ttable_tl0), %o0
355
356 set prom_set_trap_table_name, %g2
357 stx %g2, [%sp + 2047 + 128 + 0x00]
358 mov 2, %g2
359 stx %g2, [%sp + 2047 + 128 + 0x08]
360 mov 0, %g2
361 stx %g2, [%sp + 2047 + 128 + 0x10]
362 stx %o0, [%sp + 2047 + 128 + 0x18]
363 stx %o1, [%sp + 2047 + 128 + 0x20]
364 sethi %hi(p1275buf), %g2
365 or %g2, %lo(p1275buf), %g2
366 ldx [%g2 + 0x08], %o1
367 call %o1
368 add %sp, (2047 + 128), %o0
369
370 ba,pt %xcc, 3f
371 nop
372
373 2: sethi %hi(sparc64_ttable_tl0), %o0
374 set prom_set_trap_table_name, %g2
375 stx %g2, [%sp + 2047 + 128 + 0x00]
376 mov 1, %g2
377 stx %g2, [%sp + 2047 + 128 + 0x08]
378 mov 0, %g2
379 stx %g2, [%sp + 2047 + 128 + 0x10]
380 stx %o0, [%sp + 2047 + 128 + 0x18]
381 sethi %hi(p1275buf), %g2
382 or %g2, %lo(p1275buf), %g2
383 ldx [%g2 + 0x08], %o1
384 call %o1
385 add %sp, (2047 + 128), %o0
386
387 3: sethi %hi(prom_entry_lock), %g2
388 stb %g0, [%g2 + %lo(prom_entry_lock)]
389
390 ldx [%l0], %g6
391 ldx [%g6 + TI_TASK], %g4
392
393 mov 1, %g5
394 sllx %g5, THREAD_SHIFT, %g5
395 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
396 add %g6, %g5, %sp
397 mov 0, %fp
398
399 rdpr %pstate, %o1
400 or %o1, PSTATE_IE, %o1
401 wrpr %o1, 0, %pstate
402
403 call smp_callin
404 nop
405
406 call cpu_panic
407 nop
408 1: b,a,pt %xcc, 1b
409
410 .align 8
411 sparc64_cpu_startup_end:
This page took 0.046601 seconds and 5 git commands to generate.