[SPARC64]: Add irqtrace/stacktrace/lockdep support.
[deliverable/linux.git] / arch / sparc64 / kernel / sun4v_ivec.S
CommitLineData
5b0c0572
DM
1/* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6#include <asm/cpudata.h>
7#include <asm/intr_queue.h>
fd0504c3 8#include <asm/pil.h>
5b0c0572
DM
9
10 .text
11 .align 32
12
13sun4v_cpu_mondo:
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
16 */
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
21 cmp %g2, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
23 nop
24
25 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
26 ldxa [%g0] ASI_SCRATCHPAD, %g3
27 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
28
29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
31
32 /* Now get the cross-call arguments and handler PC, same
33 * layout as sun4u:
34 *
35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36 * high half is context arg to MMU flushes, into %g5
37 * 2nd 64-bit word: 64-bit arg, load into %g1
38 * 3rd 64-bit word: 64-bit arg, load into %g7
39 */
40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
41 add %g2, 0x8, %g2
42 srlx %g3, 32, %g5
43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
44 add %g2, 0x8, %g2
45 srl %g3, 0, %g3
46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
47 add %g2, 0x40 - 0x8 - 0x8, %g2
48
49 /* Update queue head pointer. */
50 sethi %hi(8192 - 1), %g4
51 or %g4, %lo(8192 - 1), %g4
52 and %g2, %g4, %g2
53
54 mov INTRQ_CPU_MONDO_HEAD, %g4
55 stxa %g2, [%g4] ASI_QUEUE
56 membar #Sync
57
58 jmpl %g3, %g0
59 nop
60
61sun4v_cpu_mondo_queue_empty:
62 retry
63
64sun4v_dev_mondo:
65 /* Head offset in %g2, tail offset in %g4. */
66 mov INTRQ_DEVICE_MONDO_HEAD, %g2
67 ldxa [%g2] ASI_QUEUE, %g2
68 mov INTRQ_DEVICE_MONDO_TAIL, %g4
69 ldxa [%g4] ASI_QUEUE, %g4
70 cmp %g2, %g4
71 be,pn %xcc, sun4v_dev_mondo_queue_empty
72 nop
73
74 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
75 ldxa [%g0] ASI_SCRATCHPAD, %g3
76 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
77
78 /* Get DEV mondo queue base phys address into %g5. */
79 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
80
81 /* Load IVEC into %g3. */
82 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
83 add %g2, 0x40, %g2
84
85 /* XXX There can be a full 64-byte block of data here.
86 * XXX This is how we can get at MSI vector data.
87 * XXX Current we do not capture this, but when we do we'll
88 * XXX need to add a 64-byte storage area in the struct ino_bucket
89 * XXX or the struct irq_desc.
90 */
91
92 /* Update queue head pointer, this frees up some registers. */
93 sethi %hi(8192 - 1), %g4
94 or %g4, %lo(8192 - 1), %g4
95 and %g2, %g4, %g2
96
97 mov INTRQ_DEVICE_MONDO_HEAD, %g4
98 stxa %g2, [%g4] ASI_QUEUE
99 membar #Sync
100
101 /* Get &__irq_work[smp_processor_id()] into %g1. */
a615fea4 102 TRAP_LOAD_IRQ_WORK(%g1, %g4)
5b0c0572
DM
103
104 /* Get &ivector_table[IVEC] into %g4. */
105 sethi %hi(ivector_table), %g4
e18e2a00 106 sllx %g3, 3, %g3
5b0c0572
DM
107 or %g4, %lo(ivector_table), %g4
108 add %g4, %g3, %g4
109
5b0c0572 110 /* Insert ivector_table[] entry into __irq_work[] queue. */
fd0504c3 111 lduw [%g1], %g2 /* g2 = irq_work(cpu) */
5b0c0572 112 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
fd0504c3 113 stw %g4, [%g1] /* irq_work(cpu) = bucket */
5b0c0572
DM
114
115 /* Signal the interrupt by setting (1 << pil) in %softint. */
fd0504c3 116 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
5b0c0572
DM
117
118sun4v_dev_mondo_queue_empty:
119 retry
120
121sun4v_res_mondo:
122 /* Head offset in %g2, tail offset in %g4. */
123 mov INTRQ_RESUM_MONDO_HEAD, %g2
124 ldxa [%g2] ASI_QUEUE, %g2
125 mov INTRQ_RESUM_MONDO_TAIL, %g4
126 ldxa [%g4] ASI_QUEUE, %g4
127 cmp %g2, %g4
128 be,pn %xcc, sun4v_res_mondo_queue_empty
129 nop
130
131 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
132 ldxa [%g0] ASI_SCRATCHPAD, %g3
133 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
134
135 /* Get RES mondo queue base phys address into %g5. */
136 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
137
138 /* Get RES kernel buffer base phys address into %g7. */
139 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
140
141 /* If the first word is non-zero, queue is full. */
142 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
143 brnz,pn %g1, sun4v_res_mondo_queue_full
144 nop
145
146 /* Remember this entry's offset in %g1. */
147 mov %g2, %g1
148
149 /* Copy 64-byte queue entry into kernel buffer. */
150 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
151 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
152 add %g2, 0x08, %g2
153 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
154 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
155 add %g2, 0x08, %g2
156 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
157 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
158 add %g2, 0x08, %g2
159 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
160 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
161 add %g2, 0x08, %g2
162 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
163 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
164 add %g2, 0x08, %g2
165 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
166 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
167 add %g2, 0x08, %g2
168 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
169 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
170 add %g2, 0x08, %g2
171 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
172 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
173 add %g2, 0x08, %g2
174
175 /* Update queue head pointer. */
176 sethi %hi(8192 - 1), %g4
177 or %g4, %lo(8192 - 1), %g4
178 and %g2, %g4, %g2
179
180 mov INTRQ_RESUM_MONDO_HEAD, %g4
181 stxa %g2, [%g4] ASI_QUEUE
182 membar #Sync
183
184 /* Disable interrupts and save register state so we can call
185 * C code. The etrap handling will leave %g4 in %l4 for us
186 * when it's done.
187 */
188 rdpr %pil, %g2
189 wrpr %g0, 15, %pil
190 mov %g1, %g4
191 ba,pt %xcc, etrap_irq
192 rd %pc, %g7
10e26723
DM
193#ifdef CONFIG_TRACE_IRQFLAGS
194 call trace_hardirqs_off
195 nop
196#endif
5b0c0572
DM
197 /* Log the event. */
198 add %sp, PTREGS_OFF, %o0
199 call sun4v_resum_error
200 mov %l4, %o1
201
202 /* Return from trap. */
203 ba,pt %xcc, rtrap_irq
204 nop
205
206sun4v_res_mondo_queue_empty:
207 retry
208
209sun4v_res_mondo_queue_full:
210 /* The queue is full, consolidate our damage by setting
211 * the head equal to the tail. We'll just trap again otherwise.
212 * Call C code to log the event.
213 */
214 mov INTRQ_RESUM_MONDO_HEAD, %g2
215 stxa %g4, [%g2] ASI_QUEUE
216 membar #Sync
217
218 rdpr %pil, %g2
219 wrpr %g0, 15, %pil
220 ba,pt %xcc, etrap_irq
221 rd %pc, %g7
10e26723
DM
222#ifdef CONFIG_TRACE_IRQFLAGS
223 call trace_hardirqs_off
224 nop
225#endif
5b0c0572
DM
226 call sun4v_resum_overflow
227 add %sp, PTREGS_OFF, %o0
228
229 ba,pt %xcc, rtrap_irq
230 nop
231
232sun4v_nonres_mondo:
233 /* Head offset in %g2, tail offset in %g4. */
234 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
235 ldxa [%g2] ASI_QUEUE, %g2
236 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
237 ldxa [%g4] ASI_QUEUE, %g4
238 cmp %g2, %g4
239 be,pn %xcc, sun4v_nonres_mondo_queue_empty
240 nop
241
242 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
243 ldxa [%g0] ASI_SCRATCHPAD, %g3
244 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
245
246 /* Get RES mondo queue base phys address into %g5. */
247 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
248
249 /* Get RES kernel buffer base phys address into %g7. */
250 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
251
252 /* If the first word is non-zero, queue is full. */
253 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
254 brnz,pn %g1, sun4v_nonres_mondo_queue_full
255 nop
256
257 /* Remember this entry's offset in %g1. */
258 mov %g2, %g1
259
260 /* Copy 64-byte queue entry into kernel buffer. */
261 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
262 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
263 add %g2, 0x08, %g2
264 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
265 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
266 add %g2, 0x08, %g2
267 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
268 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
269 add %g2, 0x08, %g2
270 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
271 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
272 add %g2, 0x08, %g2
273 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
274 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
275 add %g2, 0x08, %g2
276 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
277 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
278 add %g2, 0x08, %g2
279 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
280 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
281 add %g2, 0x08, %g2
282 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
283 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
284 add %g2, 0x08, %g2
285
286 /* Update queue head pointer. */
287 sethi %hi(8192 - 1), %g4
288 or %g4, %lo(8192 - 1), %g4
289 and %g2, %g4, %g2
290
291 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
292 stxa %g2, [%g4] ASI_QUEUE
293 membar #Sync
294
295 /* Disable interrupts and save register state so we can call
296 * C code. The etrap handling will leave %g4 in %l4 for us
297 * when it's done.
298 */
299 rdpr %pil, %g2
300 wrpr %g0, 15, %pil
301 mov %g1, %g4
302 ba,pt %xcc, etrap_irq
303 rd %pc, %g7
10e26723
DM
304#ifdef CONFIG_TRACE_IRQFLAGS
305 call trace_hardirqs_off
306 nop
307#endif
5b0c0572
DM
308 /* Log the event. */
309 add %sp, PTREGS_OFF, %o0
310 call sun4v_nonresum_error
311 mov %l4, %o1
312
313 /* Return from trap. */
314 ba,pt %xcc, rtrap_irq
315 nop
316
317sun4v_nonres_mondo_queue_empty:
318 retry
319
320sun4v_nonres_mondo_queue_full:
321 /* The queue is full, consolidate our damage by setting
322 * the head equal to the tail. We'll just trap again otherwise.
323 * Call C code to log the event.
324 */
325 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
326 stxa %g4, [%g2] ASI_QUEUE
327 membar #Sync
328
329 rdpr %pil, %g2
330 wrpr %g0, 15, %pil
331 ba,pt %xcc, etrap_irq
332 rd %pc, %g7
10e26723
DM
333#ifdef CONFIG_TRACE_IRQFLAGS
334 call trace_hardirqs_off
335 nop
336#endif
5b0c0572
DM
337 call sun4v_nonresum_overflow
338 add %sp, PTREGS_OFF, %o0
339
340 ba,pt %xcc, rtrap_irq
341 nop
This page took 0.107058 seconds and 5 git commands to generate.