[IA64] update sn2_defconfig
[deliverable/linux.git] / arch / sparc64 / kernel / sun4v_ivec.S
1 /* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6 #include <asm/cpudata.h>
7 #include <asm/intr_queue.h>
8 #include <asm/pil.h>
9
10 .text
11 .align 32
12
13 sun4v_cpu_mondo:
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
16 */
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
21 cmp %g2, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
23 nop
24
25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
28
29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
31
32 /* Now get the cross-call arguments and handler PC, same
33 * layout as sun4u:
34 *
35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36 * high half is context arg to MMU flushes, into %g5
37 * 2nd 64-bit word: 64-bit arg, load into %g1
38 * 3rd 64-bit word: 64-bit arg, load into %g7
39 */
40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
41 add %g2, 0x8, %g2
42 srlx %g3, 32, %g5
43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
44 add %g2, 0x8, %g2
45 srl %g3, 0, %g3
46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
47 add %g2, 0x40 - 0x8 - 0x8, %g2
48
49 /* Update queue head pointer. */
50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
51 and %g2, %g4, %g2
52
53 mov INTRQ_CPU_MONDO_HEAD, %g4
54 stxa %g2, [%g4] ASI_QUEUE
55 membar #Sync
56
57 jmpl %g3, %g0
58 nop
59
60 sun4v_cpu_mondo_queue_empty:
61 retry
62
63 sun4v_dev_mondo:
64 /* Head offset in %g2, tail offset in %g4. */
65 mov INTRQ_DEVICE_MONDO_HEAD, %g2
66 ldxa [%g2] ASI_QUEUE, %g2
67 mov INTRQ_DEVICE_MONDO_TAIL, %g4
68 ldxa [%g4] ASI_QUEUE, %g4
69 cmp %g2, %g4
70 be,pn %xcc, sun4v_dev_mondo_queue_empty
71 nop
72
73 /* Get &trap_block[smp_processor_id()] into %g4. */
74 ldxa [%g0] ASI_SCRATCHPAD, %g4
75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
76
77 /* Get DEV mondo queue base phys address into %g5. */
78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
79
80 /* Load IVEC into %g3. */
81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
82 add %g2, 0x40, %g2
83
84 /* XXX There can be a full 64-byte block of data here.
85 * XXX This is how we can get at MSI vector data.
86 * XXX Current we do not capture this, but when we do we'll
87 * XXX need to add a 64-byte storage area in the struct ino_bucket
88 * XXX or the struct irq_desc.
89 */
90
91 /* Update queue head pointer, this frees up some registers. */
92 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
93 and %g2, %g4, %g2
94
95 mov INTRQ_DEVICE_MONDO_HEAD, %g4
96 stxa %g2, [%g4] ASI_QUEUE
97 membar #Sync
98
99 /* Get &__irq_work[smp_processor_id()] into %g1. */
100 TRAP_LOAD_IRQ_WORK(%g1, %g4)
101
102 /* Get &ivector_table[IVEC] into %g4. */
103 sethi %hi(ivector_table), %g4
104 sllx %g3, 3, %g3
105 or %g4, %lo(ivector_table), %g4
106 add %g4, %g3, %g4
107
108 /* Insert ivector_table[] entry into __irq_work[] queue. */
109 lduw [%g1], %g2 /* g2 = irq_work(cpu) */
110 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
111 stw %g4, [%g1] /* irq_work(cpu) = bucket */
112
113 /* Signal the interrupt by setting (1 << pil) in %softint. */
114 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
115
116 sun4v_dev_mondo_queue_empty:
117 retry
118
119 sun4v_res_mondo:
120 /* Head offset in %g2, tail offset in %g4. */
121 mov INTRQ_RESUM_MONDO_HEAD, %g2
122 ldxa [%g2] ASI_QUEUE, %g2
123 mov INTRQ_RESUM_MONDO_TAIL, %g4
124 ldxa [%g4] ASI_QUEUE, %g4
125 cmp %g2, %g4
126 be,pn %xcc, sun4v_res_mondo_queue_empty
127 nop
128
129 /* Get &trap_block[smp_processor_id()] into %g3. */
130 ldxa [%g0] ASI_SCRATCHPAD, %g3
131 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
132
133 /* Get RES mondo queue base phys address into %g5. */
134 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
135
136 /* Get RES kernel buffer base phys address into %g7. */
137 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
138
139 /* If the first word is non-zero, queue is full. */
140 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
141 brnz,pn %g1, sun4v_res_mondo_queue_full
142 nop
143
144 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
145
146 /* Remember this entry's offset in %g1. */
147 mov %g2, %g1
148
149 /* Copy 64-byte queue entry into kernel buffer. */
150 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
151 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
152 add %g2, 0x08, %g2
153 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
154 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
155 add %g2, 0x08, %g2
156 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
157 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
158 add %g2, 0x08, %g2
159 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
160 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
161 add %g2, 0x08, %g2
162 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
163 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
164 add %g2, 0x08, %g2
165 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
166 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
167 add %g2, 0x08, %g2
168 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
169 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
170 add %g2, 0x08, %g2
171 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
172 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
173 add %g2, 0x08, %g2
174
175 /* Update queue head pointer. */
176 and %g2, %g4, %g2
177
178 mov INTRQ_RESUM_MONDO_HEAD, %g4
179 stxa %g2, [%g4] ASI_QUEUE
180 membar #Sync
181
182 /* Disable interrupts and save register state so we can call
183 * C code. The etrap handling will leave %g4 in %l4 for us
184 * when it's done.
185 */
186 rdpr %pil, %g2
187 wrpr %g0, 15, %pil
188 mov %g1, %g4
189 ba,pt %xcc, etrap_irq
190 rd %pc, %g7
191 #ifdef CONFIG_TRACE_IRQFLAGS
192 call trace_hardirqs_off
193 nop
194 #endif
195 /* Log the event. */
196 add %sp, PTREGS_OFF, %o0
197 call sun4v_resum_error
198 mov %l4, %o1
199
200 /* Return from trap. */
201 ba,pt %xcc, rtrap_irq
202 nop
203
204 sun4v_res_mondo_queue_empty:
205 retry
206
207 sun4v_res_mondo_queue_full:
208 /* The queue is full, consolidate our damage by setting
209 * the head equal to the tail. We'll just trap again otherwise.
210 * Call C code to log the event.
211 */
212 mov INTRQ_RESUM_MONDO_HEAD, %g2
213 stxa %g4, [%g2] ASI_QUEUE
214 membar #Sync
215
216 rdpr %pil, %g2
217 wrpr %g0, 15, %pil
218 ba,pt %xcc, etrap_irq
219 rd %pc, %g7
220 #ifdef CONFIG_TRACE_IRQFLAGS
221 call trace_hardirqs_off
222 nop
223 #endif
224 call sun4v_resum_overflow
225 add %sp, PTREGS_OFF, %o0
226
227 ba,pt %xcc, rtrap_irq
228 nop
229
230 sun4v_nonres_mondo:
231 /* Head offset in %g2, tail offset in %g4. */
232 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
233 ldxa [%g2] ASI_QUEUE, %g2
234 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
235 ldxa [%g4] ASI_QUEUE, %g4
236 cmp %g2, %g4
237 be,pn %xcc, sun4v_nonres_mondo_queue_empty
238 nop
239
240 /* Get &trap_block[smp_processor_id()] into %g3. */
241 ldxa [%g0] ASI_SCRATCHPAD, %g3
242 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
243
244 /* Get RES mondo queue base phys address into %g5. */
245 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
246
247 /* Get RES kernel buffer base phys address into %g7. */
248 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
249
250 /* If the first word is non-zero, queue is full. */
251 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
252 brnz,pn %g1, sun4v_nonres_mondo_queue_full
253 nop
254
255 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
256
257 /* Remember this entry's offset in %g1. */
258 mov %g2, %g1
259
260 /* Copy 64-byte queue entry into kernel buffer. */
261 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
262 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
263 add %g2, 0x08, %g2
264 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
265 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
266 add %g2, 0x08, %g2
267 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
268 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
269 add %g2, 0x08, %g2
270 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
271 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
272 add %g2, 0x08, %g2
273 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
274 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
275 add %g2, 0x08, %g2
276 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
277 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
278 add %g2, 0x08, %g2
279 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
280 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
281 add %g2, 0x08, %g2
282 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
283 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
284 add %g2, 0x08, %g2
285
286 /* Update queue head pointer. */
287 and %g2, %g4, %g2
288
289 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
290 stxa %g2, [%g4] ASI_QUEUE
291 membar #Sync
292
293 /* Disable interrupts and save register state so we can call
294 * C code. The etrap handling will leave %g4 in %l4 for us
295 * when it's done.
296 */
297 rdpr %pil, %g2
298 wrpr %g0, 15, %pil
299 mov %g1, %g4
300 ba,pt %xcc, etrap_irq
301 rd %pc, %g7
302 #ifdef CONFIG_TRACE_IRQFLAGS
303 call trace_hardirqs_off
304 nop
305 #endif
306 /* Log the event. */
307 add %sp, PTREGS_OFF, %o0
308 call sun4v_nonresum_error
309 mov %l4, %o1
310
311 /* Return from trap. */
312 ba,pt %xcc, rtrap_irq
313 nop
314
315 sun4v_nonres_mondo_queue_empty:
316 retry
317
318 sun4v_nonres_mondo_queue_full:
319 /* The queue is full, consolidate our damage by setting
320 * the head equal to the tail. We'll just trap again otherwise.
321 * Call C code to log the event.
322 */
323 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
324 stxa %g4, [%g2] ASI_QUEUE
325 membar #Sync
326
327 rdpr %pil, %g2
328 wrpr %g0, 15, %pil
329 ba,pt %xcc, etrap_irq
330 rd %pc, %g7
331 #ifdef CONFIG_TRACE_IRQFLAGS
332 call trace_hardirqs_off
333 nop
334 #endif
335 call sun4v_nonresum_overflow
336 add %sp, PTREGS_OFF, %o0
337
338 ba,pt %xcc, rtrap_irq
339 nop
This page took 0.039455 seconds and 5 git commands to generate.