[SPARC64]: Use ASI_SCRATCHPAD address 0x0 properly.
[deliverable/linux.git] / arch / sparc64 / kernel / sun4v_ivec.S
CommitLineData
5b0c0572
DM
1/* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6#include <asm/cpudata.h>
7#include <asm/intr_queue.h>
8
9 .text
10 .align 32
11
12sun4v_cpu_mondo:
13 /* Head offset in %g2, tail offset in %g4.
14 * If they are the same, no work.
15 */
16 mov INTRQ_CPU_MONDO_HEAD, %g2
17 ldxa [%g2] ASI_QUEUE, %g2
18 mov INTRQ_CPU_MONDO_TAIL, %g4
19 ldxa [%g4] ASI_QUEUE, %g4
20 cmp %g2, %g4
21 be,pn %xcc, sun4v_cpu_mondo_queue_empty
22 nop
23
24 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
25 ldxa [%g0] ASI_SCRATCHPAD, %g3
26 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
27
28 /* Get CPU mondo queue base phys address into %g7. */
29 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
30
31 /* Now get the cross-call arguments and handler PC, same
32 * layout as sun4u:
33 *
34 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
35 * high half is context arg to MMU flushes, into %g5
36 * 2nd 64-bit word: 64-bit arg, load into %g1
37 * 3rd 64-bit word: 64-bit arg, load into %g7
38 */
39 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
40 add %g2, 0x8, %g2
41 srlx %g3, 32, %g5
42 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
43 add %g2, 0x8, %g2
44 srl %g3, 0, %g3
45 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
46 add %g2, 0x40 - 0x8 - 0x8, %g2
47
48 /* Update queue head pointer. */
49 sethi %hi(8192 - 1), %g4
50 or %g4, %lo(8192 - 1), %g4
51 and %g2, %g4, %g2
52
53 mov INTRQ_CPU_MONDO_HEAD, %g4
54 stxa %g2, [%g4] ASI_QUEUE
55 membar #Sync
56
57 jmpl %g3, %g0
58 nop
59
60sun4v_cpu_mondo_queue_empty:
61 retry
62
63sun4v_dev_mondo:
64 /* Head offset in %g2, tail offset in %g4. */
65 mov INTRQ_DEVICE_MONDO_HEAD, %g2
66 ldxa [%g2] ASI_QUEUE, %g2
67 mov INTRQ_DEVICE_MONDO_TAIL, %g4
68 ldxa [%g4] ASI_QUEUE, %g4
69 cmp %g2, %g4
70 be,pn %xcc, sun4v_dev_mondo_queue_empty
71 nop
72
73 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
74 ldxa [%g0] ASI_SCRATCHPAD, %g3
75 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
76
77 /* Get DEV mondo queue base phys address into %g5. */
78 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
79
80 /* Load IVEC into %g3. */
81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
82 add %g2, 0x40, %g2
83
84 /* XXX There can be a full 64-byte block of data here.
85 * XXX This is how we can get at MSI vector data.
86 * XXX Current we do not capture this, but when we do we'll
87 * XXX need to add a 64-byte storage area in the struct ino_bucket
88 * XXX or the struct irq_desc.
89 */
90
91 /* Update queue head pointer, this frees up some registers. */
92 sethi %hi(8192 - 1), %g4
93 or %g4, %lo(8192 - 1), %g4
94 and %g2, %g4, %g2
95
96 mov INTRQ_DEVICE_MONDO_HEAD, %g4
97 stxa %g2, [%g4] ASI_QUEUE
98 membar #Sync
99
100 /* Get &__irq_work[smp_processor_id()] into %g1. */
101 sethi %hi(__irq_work), %g4
102 sllx %g1, 6, %g1
103 or %g4, %lo(__irq_work), %g4
104 add %g4, %g1, %g1
105
106 /* Get &ivector_table[IVEC] into %g4. */
107 sethi %hi(ivector_table), %g4
108 sllx %g3, 5, %g3
109 or %g4, %lo(ivector_table), %g4
110 add %g4, %g3, %g4
111
112 /* Load IRQ %pil into %g5. */
113 ldub [%g4 + 0x04], %g5
114
115 /* Insert ivector_table[] entry into __irq_work[] queue. */
116 sllx %g5, 2, %g3
117 lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */
118 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
119 stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */
120
121 /* Signal the interrupt by setting (1 << pil) in %softint. */
122 mov 1, %g2
123 sllx %g2, %g5, %g2
124 wr %g2, 0x0, %set_softint
125
126sun4v_dev_mondo_queue_empty:
127 retry
128
129sun4v_res_mondo:
130 /* Head offset in %g2, tail offset in %g4. */
131 mov INTRQ_RESUM_MONDO_HEAD, %g2
132 ldxa [%g2] ASI_QUEUE, %g2
133 mov INTRQ_RESUM_MONDO_TAIL, %g4
134 ldxa [%g4] ASI_QUEUE, %g4
135 cmp %g2, %g4
136 be,pn %xcc, sun4v_res_mondo_queue_empty
137 nop
138
139 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
140 ldxa [%g0] ASI_SCRATCHPAD, %g3
141 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
142
143 /* Get RES mondo queue base phys address into %g5. */
144 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
145
146 /* Get RES kernel buffer base phys address into %g7. */
147 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
148
149 /* If the first word is non-zero, queue is full. */
150 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
151 brnz,pn %g1, sun4v_res_mondo_queue_full
152 nop
153
154 /* Remember this entry's offset in %g1. */
155 mov %g2, %g1
156
157 /* Copy 64-byte queue entry into kernel buffer. */
158 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
159 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
160 add %g2, 0x08, %g2
161 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
162 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
163 add %g2, 0x08, %g2
164 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
165 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
166 add %g2, 0x08, %g2
167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
169 add %g2, 0x08, %g2
170 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
171 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
172 add %g2, 0x08, %g2
173 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
174 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
175 add %g2, 0x08, %g2
176 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
177 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
178 add %g2, 0x08, %g2
179 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
180 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
181 add %g2, 0x08, %g2
182
183 /* Update queue head pointer. */
184 sethi %hi(8192 - 1), %g4
185 or %g4, %lo(8192 - 1), %g4
186 and %g2, %g4, %g2
187
188 mov INTRQ_RESUM_MONDO_HEAD, %g4
189 stxa %g2, [%g4] ASI_QUEUE
190 membar #Sync
191
192 /* Disable interrupts and save register state so we can call
193 * C code. The etrap handling will leave %g4 in %l4 for us
194 * when it's done.
195 */
196 rdpr %pil, %g2
197 wrpr %g0, 15, %pil
198 mov %g1, %g4
199 ba,pt %xcc, etrap_irq
200 rd %pc, %g7
201
202 /* Log the event. */
203 add %sp, PTREGS_OFF, %o0
204 call sun4v_resum_error
205 mov %l4, %o1
206
207 /* Return from trap. */
208 ba,pt %xcc, rtrap_irq
209 nop
210
211sun4v_res_mondo_queue_empty:
212 retry
213
214sun4v_res_mondo_queue_full:
215 /* The queue is full, consolidate our damage by setting
216 * the head equal to the tail. We'll just trap again otherwise.
217 * Call C code to log the event.
218 */
219 mov INTRQ_RESUM_MONDO_HEAD, %g2
220 stxa %g4, [%g2] ASI_QUEUE
221 membar #Sync
222
223 rdpr %pil, %g2
224 wrpr %g0, 15, %pil
225 ba,pt %xcc, etrap_irq
226 rd %pc, %g7
227
228 call sun4v_resum_overflow
229 add %sp, PTREGS_OFF, %o0
230
231 ba,pt %xcc, rtrap_irq
232 nop
233
234sun4v_nonres_mondo:
235 /* Head offset in %g2, tail offset in %g4. */
236 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
237 ldxa [%g2] ASI_QUEUE, %g2
238 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
239 ldxa [%g4] ASI_QUEUE, %g4
240 cmp %g2, %g4
241 be,pn %xcc, sun4v_nonres_mondo_queue_empty
242 nop
243
244 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
245 ldxa [%g0] ASI_SCRATCHPAD, %g3
246 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
247
248 /* Get RES mondo queue base phys address into %g5. */
249 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
250
251 /* Get RES kernel buffer base phys address into %g7. */
252 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
253
254 /* If the first word is non-zero, queue is full. */
255 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
256 brnz,pn %g1, sun4v_nonres_mondo_queue_full
257 nop
258
259 /* Remember this entry's offset in %g1. */
260 mov %g2, %g1
261
262 /* Copy 64-byte queue entry into kernel buffer. */
263 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
264 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
265 add %g2, 0x08, %g2
266 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
267 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
268 add %g2, 0x08, %g2
269 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
270 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
271 add %g2, 0x08, %g2
272 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
273 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
274 add %g2, 0x08, %g2
275 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
276 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
277 add %g2, 0x08, %g2
278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
280 add %g2, 0x08, %g2
281 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
282 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
283 add %g2, 0x08, %g2
284 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
285 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
286 add %g2, 0x08, %g2
287
288 /* Update queue head pointer. */
289 sethi %hi(8192 - 1), %g4
290 or %g4, %lo(8192 - 1), %g4
291 and %g2, %g4, %g2
292
293 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
294 stxa %g2, [%g4] ASI_QUEUE
295 membar #Sync
296
297 /* Disable interrupts and save register state so we can call
298 * C code. The etrap handling will leave %g4 in %l4 for us
299 * when it's done.
300 */
301 rdpr %pil, %g2
302 wrpr %g0, 15, %pil
303 mov %g1, %g4
304 ba,pt %xcc, etrap_irq
305 rd %pc, %g7
306
307 /* Log the event. */
308 add %sp, PTREGS_OFF, %o0
309 call sun4v_nonresum_error
310 mov %l4, %o1
311
312 /* Return from trap. */
313 ba,pt %xcc, rtrap_irq
314 nop
315
316sun4v_nonres_mondo_queue_empty:
317 retry
318
319sun4v_nonres_mondo_queue_full:
320 /* The queue is full, consolidate our damage by setting
321 * the head equal to the tail. We'll just trap again otherwise.
322 * Call C code to log the event.
323 */
324 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
325 stxa %g4, [%g2] ASI_QUEUE
326 membar #Sync
327
328 rdpr %pil, %g2
329 wrpr %g0, 15, %pil
330 ba,pt %xcc, etrap_irq
331 rd %pc, %g7
332
333 call sun4v_nonresum_overflow
334 add %sp, PTREGS_OFF, %o0
335
336 ba,pt %xcc, rtrap_irq
337 nop
This page took 0.035824 seconds and 5 git commands to generate.