[SPARC64]: Allocate ivector_table dynamically.
[deliverable/linux.git] / arch / sparc64 / kernel / sun4v_ivec.S
CommitLineData
5b0c0572
DM
1/* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6#include <asm/cpudata.h>
7#include <asm/intr_queue.h>
fd0504c3 8#include <asm/pil.h>
5b0c0572
DM
9
10 .text
11 .align 32
12
13sun4v_cpu_mondo:
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
16 */
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
21 cmp %g2, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
23 nop
24
5cbc3073
DM
25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
5b0c0572
DM
28
29 /* Get CPU mondo queue base phys address into %g7. */
5cbc3073 30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
5b0c0572
DM
31
32 /* Now get the cross-call arguments and handler PC, same
33 * layout as sun4u:
34 *
35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36 * high half is context arg to MMU flushes, into %g5
37 * 2nd 64-bit word: 64-bit arg, load into %g1
38 * 3rd 64-bit word: 64-bit arg, load into %g7
39 */
40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
41 add %g2, 0x8, %g2
42 srlx %g3, 32, %g5
43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
44 add %g2, 0x8, %g2
45 srl %g3, 0, %g3
46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
47 add %g2, 0x40 - 0x8 - 0x8, %g2
48
49 /* Update queue head pointer. */
5cbc3073 50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
5b0c0572
DM
51 and %g2, %g4, %g2
52
53 mov INTRQ_CPU_MONDO_HEAD, %g4
54 stxa %g2, [%g4] ASI_QUEUE
55 membar #Sync
56
57 jmpl %g3, %g0
58 nop
59
60sun4v_cpu_mondo_queue_empty:
61 retry
62
63sun4v_dev_mondo:
64 /* Head offset in %g2, tail offset in %g4. */
65 mov INTRQ_DEVICE_MONDO_HEAD, %g2
66 ldxa [%g2] ASI_QUEUE, %g2
67 mov INTRQ_DEVICE_MONDO_TAIL, %g4
68 ldxa [%g4] ASI_QUEUE, %g4
69 cmp %g2, %g4
70 be,pn %xcc, sun4v_dev_mondo_queue_empty
71 nop
72
5cbc3073
DM
73 /* Get &trap_block[smp_processor_id()] into %g4. */
74 ldxa [%g0] ASI_SCRATCHPAD, %g4
75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
5b0c0572
DM
76
77 /* Get DEV mondo queue base phys address into %g5. */
5cbc3073 78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
5b0c0572
DM
79
80 /* Load IVEC into %g3. */
81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
82 add %g2, 0x40, %g2
83
84 /* XXX There can be a full 64-byte block of data here.
85 * XXX This is how we can get at MSI vector data.
86 * XXX Current we do not capture this, but when we do we'll
87 * XXX need to add a 64-byte storage area in the struct ino_bucket
88 * XXX or the struct irq_desc.
89 */
90
91 /* Update queue head pointer, this frees up some registers. */
5cbc3073 92 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
5b0c0572
DM
93 and %g2, %g4, %g2
94
95 mov INTRQ_DEVICE_MONDO_HEAD, %g4
96 stxa %g2, [%g4] ASI_QUEUE
97 membar #Sync
98
eb2d8d60 99 TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
5b0c0572 100
eb2d8d60
DM
101 /* Get __pa(&ivector_table[IVEC]) into %g4. */
102 sethi %hi(ivector_table_pa), %g4
103 ldx [%g4 + %lo(ivector_table_pa)], %g4
a650d383 104 sllx %g3, 4, %g3
5b0c0572
DM
105 add %g4, %g3, %g4
106
eb2d8d60
DM
107 ldx [%g1], %g2
108 stxa %g2, [%g4] ASI_PHYS_USE_EC
109 stx %g4, [%g1]
5b0c0572
DM
110
111 /* Signal the interrupt by setting (1 << pil) in %softint. */
fd0504c3 112 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
5b0c0572
DM
113
114sun4v_dev_mondo_queue_empty:
115 retry
116
117sun4v_res_mondo:
118 /* Head offset in %g2, tail offset in %g4. */
119 mov INTRQ_RESUM_MONDO_HEAD, %g2
120 ldxa [%g2] ASI_QUEUE, %g2
121 mov INTRQ_RESUM_MONDO_TAIL, %g4
122 ldxa [%g4] ASI_QUEUE, %g4
123 cmp %g2, %g4
124 be,pn %xcc, sun4v_res_mondo_queue_empty
125 nop
126
127 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
128 ldxa [%g0] ASI_SCRATCHPAD, %g3
129 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
130
131 /* Get RES mondo queue base phys address into %g5. */
132 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
133
134 /* Get RES kernel buffer base phys address into %g7. */
135 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
136
137 /* If the first word is non-zero, queue is full. */
138 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
139 brnz,pn %g1, sun4v_res_mondo_queue_full
140 nop
141
5cbc3073
DM
142 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
143
5b0c0572
DM
144 /* Remember this entry's offset in %g1. */
145 mov %g2, %g1
146
147 /* Copy 64-byte queue entry into kernel buffer. */
148 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
149 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
150 add %g2, 0x08, %g2
151 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
152 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
153 add %g2, 0x08, %g2
154 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
155 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
156 add %g2, 0x08, %g2
157 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
158 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
159 add %g2, 0x08, %g2
160 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
161 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
162 add %g2, 0x08, %g2
163 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
164 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
165 add %g2, 0x08, %g2
166 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
167 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
168 add %g2, 0x08, %g2
169 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
170 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
171 add %g2, 0x08, %g2
172
173 /* Update queue head pointer. */
5b0c0572
DM
174 and %g2, %g4, %g2
175
176 mov INTRQ_RESUM_MONDO_HEAD, %g4
177 stxa %g2, [%g4] ASI_QUEUE
178 membar #Sync
179
180 /* Disable interrupts and save register state so we can call
181 * C code. The etrap handling will leave %g4 in %l4 for us
182 * when it's done.
183 */
184 rdpr %pil, %g2
185 wrpr %g0, 15, %pil
186 mov %g1, %g4
187 ba,pt %xcc, etrap_irq
188 rd %pc, %g7
10e26723
DM
189#ifdef CONFIG_TRACE_IRQFLAGS
190 call trace_hardirqs_off
191 nop
192#endif
5b0c0572
DM
193 /* Log the event. */
194 add %sp, PTREGS_OFF, %o0
195 call sun4v_resum_error
196 mov %l4, %o1
197
198 /* Return from trap. */
199 ba,pt %xcc, rtrap_irq
200 nop
201
202sun4v_res_mondo_queue_empty:
203 retry
204
205sun4v_res_mondo_queue_full:
206 /* The queue is full, consolidate our damage by setting
207 * the head equal to the tail. We'll just trap again otherwise.
208 * Call C code to log the event.
209 */
210 mov INTRQ_RESUM_MONDO_HEAD, %g2
211 stxa %g4, [%g2] ASI_QUEUE
212 membar #Sync
213
214 rdpr %pil, %g2
215 wrpr %g0, 15, %pil
216 ba,pt %xcc, etrap_irq
217 rd %pc, %g7
10e26723
DM
218#ifdef CONFIG_TRACE_IRQFLAGS
219 call trace_hardirqs_off
220 nop
221#endif
5b0c0572
DM
222 call sun4v_resum_overflow
223 add %sp, PTREGS_OFF, %o0
224
225 ba,pt %xcc, rtrap_irq
226 nop
227
228sun4v_nonres_mondo:
229 /* Head offset in %g2, tail offset in %g4. */
230 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
231 ldxa [%g2] ASI_QUEUE, %g2
232 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
233 ldxa [%g4] ASI_QUEUE, %g4
234 cmp %g2, %g4
235 be,pn %xcc, sun4v_nonres_mondo_queue_empty
236 nop
237
238 /* Get &trap_block[smp_processor_id()] into %g3. */
12eaa328
DM
239 ldxa [%g0] ASI_SCRATCHPAD, %g3
240 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
5b0c0572
DM
241
242 /* Get RES mondo queue base phys address into %g5. */
243 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
244
245 /* Get RES kernel buffer base phys address into %g7. */
246 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
247
248 /* If the first word is non-zero, queue is full. */
249 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
250 brnz,pn %g1, sun4v_nonres_mondo_queue_full
251 nop
252
5cbc3073
DM
253 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
254
5b0c0572
DM
255 /* Remember this entry's offset in %g1. */
256 mov %g2, %g1
257
258 /* Copy 64-byte queue entry into kernel buffer. */
259 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
260 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
261 add %g2, 0x08, %g2
262 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
263 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
264 add %g2, 0x08, %g2
265 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
266 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
267 add %g2, 0x08, %g2
268 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
269 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
270 add %g2, 0x08, %g2
271 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
272 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
273 add %g2, 0x08, %g2
274 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
275 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
276 add %g2, 0x08, %g2
277 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
278 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
279 add %g2, 0x08, %g2
280 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
281 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
282 add %g2, 0x08, %g2
283
284 /* Update queue head pointer. */
5b0c0572
DM
285 and %g2, %g4, %g2
286
287 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
288 stxa %g2, [%g4] ASI_QUEUE
289 membar #Sync
290
291 /* Disable interrupts and save register state so we can call
292 * C code. The etrap handling will leave %g4 in %l4 for us
293 * when it's done.
294 */
295 rdpr %pil, %g2
296 wrpr %g0, 15, %pil
297 mov %g1, %g4
298 ba,pt %xcc, etrap_irq
299 rd %pc, %g7
10e26723
DM
300#ifdef CONFIG_TRACE_IRQFLAGS
301 call trace_hardirqs_off
302 nop
303#endif
5b0c0572
DM
304 /* Log the event. */
305 add %sp, PTREGS_OFF, %o0
306 call sun4v_nonresum_error
307 mov %l4, %o1
308
309 /* Return from trap. */
310 ba,pt %xcc, rtrap_irq
311 nop
312
313sun4v_nonres_mondo_queue_empty:
314 retry
315
316sun4v_nonres_mondo_queue_full:
317 /* The queue is full, consolidate our damage by setting
318 * the head equal to the tail. We'll just trap again otherwise.
319 * Call C code to log the event.
320 */
321 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
322 stxa %g4, [%g2] ASI_QUEUE
323 membar #Sync
324
325 rdpr %pil, %g2
326 wrpr %g0, 15, %pil
327 ba,pt %xcc, etrap_irq
328 rd %pc, %g7
10e26723
DM
329#ifdef CONFIG_TRACE_IRQFLAGS
330 call trace_hardirqs_off
331 nop
332#endif
5b0c0572
DM
333 call sun4v_nonresum_overflow
334 add %sp, PTREGS_OFF, %o0
335
336 ba,pt %xcc, rtrap_irq
337 nop
This page took 0.185253 seconds and 5 git commands to generate.