Commit | Line | Data |
---|---|---|
67207b96 AB |
1 | /* |
2 | * Low-level SPU handling | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
3b3d22cb | 23 | #undef DEBUG |
67207b96 AB |
24 | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/module.h> | |
67207b96 AB |
28 | #include <linux/ptrace.h> |
29 | #include <linux/slab.h> | |
30 | #include <linux/wait.h> | |
e28b0031 GL |
31 | #include <linux/mm.h> |
32 | #include <linux/io.h> | |
14cc3e2b | 33 | #include <linux/mutex.h> |
bce94513 | 34 | #include <linux/linux_logo.h> |
67207b96 | 35 | #include <asm/spu.h> |
540270d8 | 36 | #include <asm/spu_priv1.h> |
ff8a8f25 | 37 | #include <asm/xmon.h> |
3ad216ca | 38 | #include <asm/prom.h> |
67207b96 | 39 | |
e28b0031 | 40 | const struct spu_management_ops *spu_management_ops; |
ccf17e9d JK |
41 | EXPORT_SYMBOL_GPL(spu_management_ops); |
42 | ||
540270d8 | 43 | const struct spu_priv1_ops *spu_priv1_ops; |
24140594 | 44 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
540270d8 | 45 | |
24140594 CH |
46 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
47 | EXPORT_SYMBOL_GPL(cbe_spu_info); | |
94b2a439 | 48 | |
24140594 CH |
49 | /* |
50 | * Protects cbe_spu_info and spu->number. | |
51 | */ | |
52 | static DEFINE_SPINLOCK(spu_lock); | |
53 | ||
54 | /* | |
55 | * List of all spus in the system. | |
56 | * | |
57 | * This list is iterated by callers from irq context and callers that | |
58 | * want to sleep. Thus modifications need to be done with both | |
59 | * spu_full_list_lock and spu_full_list_mutex held, while iterating | |
60 | * through it requires either of these locks. | |
61 | * | |
62 | * In addition spu_full_list_lock protects all assignmens to | |
63 | * spu->mm. | |
64 | */ | |
65 | static LIST_HEAD(spu_full_list); | |
66 | static DEFINE_SPINLOCK(spu_full_list_lock); | |
67 | static DEFINE_MUTEX(spu_full_list_mutex); | |
540270d8 | 68 | |
94b2a439 BH |
69 | void spu_invalidate_slbs(struct spu *spu) |
70 | { | |
71 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
72 | ||
73 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) | |
74 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | |
75 | } | |
76 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); | |
77 | ||
78 | /* This is called by the MM core when a segment size is changed, to | |
79 | * request a flush of all the SPEs using a given mm | |
80 | */ | |
81 | void spu_flush_all_slbs(struct mm_struct *mm) | |
82 | { | |
83 | struct spu *spu; | |
84 | unsigned long flags; | |
85 | ||
24140594 | 86 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 BH |
87 | list_for_each_entry(spu, &spu_full_list, full_list) { |
88 | if (spu->mm == mm) | |
89 | spu_invalidate_slbs(spu); | |
90 | } | |
24140594 | 91 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
92 | } |
93 | ||
94 | /* The hack below stinks... try to do something better one of | |
95 | * these days... Does it even work properly with NR_CPUS == 1 ? | |
96 | */ | |
97 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | |
98 | { | |
99 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | |
100 | ||
101 | /* Global TLBIE broadcast required with SPEs. */ | |
102 | __cpus_setall(&mm->cpu_vm_mask, nr); | |
103 | } | |
104 | ||
105 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | |
106 | { | |
107 | unsigned long flags; | |
108 | ||
24140594 | 109 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 | 110 | spu->mm = mm; |
24140594 | 111 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
112 | if (mm) |
113 | mm_needs_global_tlbie(mm); | |
114 | } | |
115 | EXPORT_SYMBOL_GPL(spu_associate_mm); | |
116 | ||
67207b96 AB |
117 | static int __spu_trap_invalid_dma(struct spu *spu) |
118 | { | |
119 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 120 | spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); |
67207b96 AB |
121 | return 0; |
122 | } | |
123 | ||
124 | static int __spu_trap_dma_align(struct spu *spu) | |
125 | { | |
126 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 127 | spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); |
67207b96 AB |
128 | return 0; |
129 | } | |
130 | ||
131 | static int __spu_trap_error(struct spu *spu) | |
132 | { | |
133 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 134 | spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); |
67207b96 AB |
135 | return 0; |
136 | } | |
137 | ||
138 | static void spu_restart_dma(struct spu *spu) | |
139 | { | |
140 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
5473af04 | 141 | |
8837d921 | 142 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
5473af04 | 143 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
67207b96 AB |
144 | } |
145 | ||
146 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |
147 | { | |
8b3d6663 AB |
148 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
149 | struct mm_struct *mm = spu->mm; | |
724bd80e | 150 | u64 esid, vsid, llp; |
94b2a439 | 151 | int psize; |
67207b96 AB |
152 | |
153 | pr_debug("%s\n", __FUNCTION__); | |
154 | ||
8837d921 | 155 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
8b3d6663 AB |
156 | /* SLBs are pre-loaded for context switch, so |
157 | * we should never get here! | |
158 | */ | |
5473af04 MN |
159 | printk("%s: invalid access during switch!\n", __func__); |
160 | return 1; | |
161 | } | |
0afacde3 | 162 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
163 | ||
164 | switch(REGION_ID(ea)) { | |
165 | case USER_REGION_ID: | |
d0f13e3c BH |
166 | #ifdef CONFIG_PPC_MM_SLICES |
167 | psize = get_slice_psize(mm, ea); | |
168 | #else | |
169 | psize = mm->context.user_psize; | |
0afacde3 | 170 | #endif |
1189be65 | 171 | vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | |
94b2a439 | 172 | SLB_VSID_USER; |
0afacde3 | 173 | break; |
174 | case VMALLOC_REGION_ID: | |
94b2a439 BH |
175 | if (ea < VMALLOC_END) |
176 | psize = mmu_vmalloc_psize; | |
177 | else | |
178 | psize = mmu_io_psize; | |
1189be65 | 179 | vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | |
94b2a439 | 180 | SLB_VSID_KERNEL; |
0afacde3 | 181 | break; |
182 | case KERNEL_REGION_ID: | |
94b2a439 | 183 | psize = mmu_linear_psize; |
1189be65 | 184 | vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | |
94b2a439 | 185 | SLB_VSID_KERNEL; |
0afacde3 | 186 | break; |
187 | default: | |
8b3d6663 AB |
188 | /* Future: support kernel segments so that drivers |
189 | * can use SPUs. | |
190 | */ | |
67207b96 AB |
191 | pr_debug("invalid region access at %016lx\n", ea); |
192 | return 1; | |
193 | } | |
94b2a439 | 194 | llp = mmu_psize_defs[psize].sllp; |
67207b96 | 195 | |
8b3d6663 | 196 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
94b2a439 | 197 | out_be64(&priv2->slb_vsid_RW, vsid | llp); |
8b3d6663 AB |
198 | out_be64(&priv2->slb_esid_RW, esid); |
199 | ||
200 | spu->slb_replace++; | |
67207b96 AB |
201 | if (spu->slb_replace >= 8) |
202 | spu->slb_replace = 0; | |
203 | ||
67207b96 | 204 | spu_restart_dma(spu); |
e9f8a0b6 | 205 | spu->stats.slb_flt++; |
67207b96 AB |
206 | return 0; |
207 | } | |
208 | ||
5473af04 | 209 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
8b3d6663 | 210 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
67207b96 | 211 | { |
a33a7d73 | 212 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
67207b96 | 213 | |
5473af04 MN |
214 | /* Handle kernel space hash faults immediately. |
215 | User hash faults need to be deferred to process context. */ | |
216 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) | |
217 | && REGION_ID(ea) != USER_REGION_ID | |
218 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { | |
219 | spu_restart_dma(spu); | |
220 | return 0; | |
221 | } | |
222 | ||
8837d921 | 223 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
5473af04 MN |
224 | printk("%s: invalid access during switch!\n", __func__); |
225 | return 1; | |
226 | } | |
67207b96 | 227 | |
8b3d6663 AB |
228 | spu->dar = ea; |
229 | spu->dsisr = dsisr; | |
230 | mb(); | |
ba723fe2 | 231 | spu->stop_callback(spu); |
67207b96 AB |
232 | return 0; |
233 | } | |
234 | ||
235 | static irqreturn_t | |
f5a92458 | 236 | spu_irq_class_0(int irq, void *data) |
67207b96 AB |
237 | { |
238 | struct spu *spu; | |
b7f90a40 | 239 | unsigned long stat, mask; |
67207b96 AB |
240 | |
241 | spu = data; | |
b7f90a40 MN |
242 | |
243 | mask = spu_int_mask_get(spu, 0); | |
244 | stat = spu_int_stat_get(spu, 0); | |
245 | stat &= mask; | |
246 | ||
247 | spin_lock(&spu->register_lock); | |
248 | spu->class_0_pending |= stat; | |
249 | spin_unlock(&spu->register_lock); | |
250 | ||
ba723fe2 | 251 | spu->stop_callback(spu); |
67207b96 | 252 | |
b7f90a40 MN |
253 | spu_int_stat_clear(spu, 0, stat); |
254 | ||
67207b96 AB |
255 | return IRQ_HANDLED; |
256 | } | |
257 | ||
5110459f | 258 | int |
67207b96 AB |
259 | spu_irq_class_0_bottom(struct spu *spu) |
260 | { | |
3650cfe2 | 261 | unsigned long flags; |
b7f90a40 | 262 | unsigned long stat; |
67207b96 | 263 | |
3650cfe2 | 264 | spin_lock_irqsave(&spu->register_lock, flags); |
b7f90a40 MN |
265 | stat = spu->class_0_pending; |
266 | spu->class_0_pending = 0; | |
3a843d7c | 267 | |
2cd90bc8 | 268 | if (stat & 1) /* invalid DMA alignment */ |
67207b96 AB |
269 | __spu_trap_dma_align(spu); |
270 | ||
2cd90bc8 AB |
271 | if (stat & 2) /* invalid MFC DMA */ |
272 | __spu_trap_invalid_dma(spu); | |
273 | ||
67207b96 AB |
274 | if (stat & 4) /* error on SPU */ |
275 | __spu_trap_error(spu); | |
276 | ||
3650cfe2 | 277 | spin_unlock_irqrestore(&spu->register_lock, flags); |
5110459f AB |
278 | |
279 | return (stat & 0x7) ? -EIO : 0; | |
67207b96 | 280 | } |
5110459f | 281 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
67207b96 AB |
282 | |
283 | static irqreturn_t | |
f5a92458 | 284 | spu_irq_class_1(int irq, void *data) |
67207b96 AB |
285 | { |
286 | struct spu *spu; | |
8b3d6663 | 287 | unsigned long stat, mask, dar, dsisr; |
67207b96 AB |
288 | |
289 | spu = data; | |
8b3d6663 AB |
290 | |
291 | /* atomically read & clear class1 status. */ | |
292 | spin_lock(&spu->register_lock); | |
f0831acc AB |
293 | mask = spu_int_mask_get(spu, 1); |
294 | stat = spu_int_stat_get(spu, 1) & mask; | |
295 | dar = spu_mfc_dar_get(spu); | |
296 | dsisr = spu_mfc_dsisr_get(spu); | |
38307341 | 297 | if (stat & 2) /* mapping fault */ |
f0831acc AB |
298 | spu_mfc_dsisr_set(spu, 0ul); |
299 | spu_int_stat_clear(spu, 1, stat); | |
8b3d6663 | 300 | spin_unlock(&spu->register_lock); |
a33a7d73 AB |
301 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
302 | dar, dsisr); | |
67207b96 AB |
303 | |
304 | if (stat & 1) /* segment fault */ | |
305 | __spu_trap_data_seg(spu, dar); | |
306 | ||
307 | if (stat & 2) { /* mapping fault */ | |
8b3d6663 | 308 | __spu_trap_data_map(spu, dar, dsisr); |
67207b96 AB |
309 | } |
310 | ||
311 | if (stat & 4) /* ls compare & suspend on get */ | |
312 | ; | |
313 | ||
314 | if (stat & 8) /* ls compare & suspend on put */ | |
315 | ; | |
316 | ||
67207b96 AB |
317 | return stat ? IRQ_HANDLED : IRQ_NONE; |
318 | } | |
319 | ||
320 | static irqreturn_t | |
f5a92458 | 321 | spu_irq_class_2(int irq, void *data) |
67207b96 AB |
322 | { |
323 | struct spu *spu; | |
324 | unsigned long stat; | |
3a843d7c | 325 | unsigned long mask; |
67207b96 AB |
326 | |
327 | spu = data; | |
ba723fe2 | 328 | spin_lock(&spu->register_lock); |
f0831acc AB |
329 | stat = spu_int_stat_get(spu, 2); |
330 | mask = spu_int_mask_get(spu, 2); | |
ba723fe2 MN |
331 | /* ignore interrupts we're not waiting for */ |
332 | stat &= mask; | |
333 | /* | |
334 | * mailbox interrupts (0x1 and 0x10) are level triggered. | |
335 | * mask them now before acknowledging. | |
336 | */ | |
337 | if (stat & 0x11) | |
338 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); | |
339 | /* acknowledge all interrupts before the callbacks */ | |
340 | spu_int_stat_clear(spu, 2, stat); | |
341 | spin_unlock(&spu->register_lock); | |
67207b96 | 342 | |
3a843d7c | 343 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
67207b96 | 344 | |
67207b96 | 345 | if (stat & 1) /* PPC core mailbox */ |
ba723fe2 | 346 | spu->ibox_callback(spu); |
67207b96 AB |
347 | |
348 | if (stat & 2) /* SPU stop-and-signal */ | |
ba723fe2 | 349 | spu->stop_callback(spu); |
67207b96 AB |
350 | |
351 | if (stat & 4) /* SPU halted */ | |
ba723fe2 | 352 | spu->stop_callback(spu); |
67207b96 AB |
353 | |
354 | if (stat & 8) /* DMA tag group complete */ | |
ba723fe2 | 355 | spu->mfc_callback(spu); |
67207b96 AB |
356 | |
357 | if (stat & 0x10) /* SPU mailbox threshold */ | |
ba723fe2 | 358 | spu->wbox_callback(spu); |
67207b96 | 359 | |
e9f8a0b6 | 360 | spu->stats.class2_intr++; |
67207b96 AB |
361 | return stat ? IRQ_HANDLED : IRQ_NONE; |
362 | } | |
363 | ||
0ebfff14 | 364 | static int spu_request_irqs(struct spu *spu) |
67207b96 | 365 | { |
0ebfff14 | 366 | int ret = 0; |
67207b96 | 367 | |
0ebfff14 BH |
368 | if (spu->irqs[0] != NO_IRQ) { |
369 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", | |
370 | spu->number); | |
371 | ret = request_irq(spu->irqs[0], spu_irq_class_0, | |
372 | IRQF_DISABLED, | |
373 | spu->irq_c0, spu); | |
374 | if (ret) | |
375 | goto bail0; | |
376 | } | |
377 | if (spu->irqs[1] != NO_IRQ) { | |
378 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | |
379 | spu->number); | |
380 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | |
381 | IRQF_DISABLED, | |
382 | spu->irq_c1, spu); | |
383 | if (ret) | |
384 | goto bail1; | |
385 | } | |
386 | if (spu->irqs[2] != NO_IRQ) { | |
387 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | |
388 | spu->number); | |
389 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | |
390 | IRQF_DISABLED, | |
391 | spu->irq_c2, spu); | |
392 | if (ret) | |
393 | goto bail2; | |
394 | } | |
395 | return 0; | |
67207b96 | 396 | |
0ebfff14 BH |
397 | bail2: |
398 | if (spu->irqs[1] != NO_IRQ) | |
399 | free_irq(spu->irqs[1], spu); | |
400 | bail1: | |
401 | if (spu->irqs[0] != NO_IRQ) | |
402 | free_irq(spu->irqs[0], spu); | |
403 | bail0: | |
67207b96 AB |
404 | return ret; |
405 | } | |
406 | ||
0ebfff14 | 407 | static void spu_free_irqs(struct spu *spu) |
67207b96 | 408 | { |
0ebfff14 BH |
409 | if (spu->irqs[0] != NO_IRQ) |
410 | free_irq(spu->irqs[0], spu); | |
411 | if (spu->irqs[1] != NO_IRQ) | |
412 | free_irq(spu->irqs[1], spu); | |
413 | if (spu->irqs[2] != NO_IRQ) | |
414 | free_irq(spu->irqs[2], spu); | |
67207b96 AB |
415 | } |
416 | ||
486acd48 | 417 | void spu_init_channels(struct spu *spu) |
67207b96 AB |
418 | { |
419 | static const struct { | |
420 | unsigned channel; | |
421 | unsigned count; | |
422 | } zero_list[] = { | |
423 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | |
424 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | |
425 | }, count_list[] = { | |
426 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | |
427 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | |
428 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | |
429 | }; | |
6ff730c3 | 430 | struct spu_priv2 __iomem *priv2; |
67207b96 AB |
431 | int i; |
432 | ||
433 | priv2 = spu->priv2; | |
434 | ||
435 | /* initialize all channel data to zero */ | |
436 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | |
437 | int count; | |
438 | ||
439 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | |
440 | for (count = 0; count < zero_list[i].count; count++) | |
441 | out_be64(&priv2->spu_chnldata_RW, 0); | |
442 | } | |
443 | ||
444 | /* initialize channel counts to meaningful values */ | |
445 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | |
446 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | |
447 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | |
448 | } | |
449 | } | |
486acd48 | 450 | EXPORT_SYMBOL_GPL(spu_init_channels); |
67207b96 | 451 | |
6deac066 GL |
452 | static int spu_shutdown(struct sys_device *sysdev) |
453 | { | |
454 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
455 | ||
456 | spu_free_irqs(spu); | |
457 | spu_destroy_spu(spu); | |
458 | return 0; | |
459 | } | |
460 | ||
1238819a | 461 | static struct sysdev_class spu_sysdev_class = { |
af5ca3f4 | 462 | .name = "spu", |
6deac066 | 463 | .shutdown = spu_shutdown, |
1d64093f JK |
464 | }; |
465 | ||
e570beb6 CK |
466 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
467 | { | |
468 | struct spu *spu; | |
e570beb6 | 469 | |
24140594 | 470 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
471 | list_for_each_entry(spu, &spu_full_list, full_list) |
472 | sysdev_create_file(&spu->sysdev, attr); | |
24140594 | 473 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 474 | |
e570beb6 CK |
475 | return 0; |
476 | } | |
477 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | |
478 | ||
479 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) | |
480 | { | |
481 | struct spu *spu; | |
e570beb6 | 482 | |
24140594 | 483 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
484 | list_for_each_entry(spu, &spu_full_list, full_list) |
485 | sysfs_create_group(&spu->sysdev.kobj, attrs); | |
24140594 | 486 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 487 | |
e570beb6 CK |
488 | return 0; |
489 | } | |
490 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | |
491 | ||
492 | ||
493 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) | |
494 | { | |
495 | struct spu *spu; | |
e570beb6 | 496 | |
24140594 | 497 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
498 | list_for_each_entry(spu, &spu_full_list, full_list) |
499 | sysdev_remove_file(&spu->sysdev, attr); | |
24140594 | 500 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
501 | } |
502 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); | |
503 | ||
504 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) | |
505 | { | |
506 | struct spu *spu; | |
e570beb6 | 507 | |
24140594 | 508 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
509 | list_for_each_entry(spu, &spu_full_list, full_list) |
510 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | |
24140594 | 511 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
512 | } |
513 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); | |
514 | ||
1d64093f JK |
515 | static int spu_create_sysdev(struct spu *spu) |
516 | { | |
517 | int ret; | |
518 | ||
519 | spu->sysdev.id = spu->number; | |
520 | spu->sysdev.cls = &spu_sysdev_class; | |
521 | ret = sysdev_register(&spu->sysdev); | |
522 | if (ret) { | |
523 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", | |
524 | spu->number); | |
525 | return ret; | |
526 | } | |
527 | ||
0021550c | 528 | sysfs_add_device_to_node(&spu->sysdev, spu->node); |
1d64093f JK |
529 | |
530 | return 0; | |
531 | } | |
532 | ||
e28b0031 | 533 | static int __init create_spu(void *data) |
67207b96 AB |
534 | { |
535 | struct spu *spu; | |
536 | int ret; | |
537 | static int number; | |
94b2a439 | 538 | unsigned long flags; |
27ec41d3 | 539 | struct timespec ts; |
67207b96 AB |
540 | |
541 | ret = -ENOMEM; | |
ecec2177 | 542 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
67207b96 AB |
543 | if (!spu) |
544 | goto out; | |
545 | ||
486acd48 CH |
546 | spu->alloc_state = SPU_FREE; |
547 | ||
e28b0031 | 548 | spin_lock_init(&spu->register_lock); |
24140594 | 549 | spin_lock(&spu_lock); |
e28b0031 | 550 | spu->number = number++; |
24140594 | 551 | spin_unlock(&spu_lock); |
e28b0031 GL |
552 | |
553 | ret = spu_create_spu(spu, data); | |
e5267b4b | 554 | |
67207b96 AB |
555 | if (ret) |
556 | goto out_free; | |
557 | ||
24f43b33 | 558 | spu_mfc_sdr_setup(spu); |
f0831acc | 559 | spu_mfc_sr1_set(spu, 0x33); |
67207b96 AB |
560 | ret = spu_request_irqs(spu); |
561 | if (ret) | |
e28b0031 | 562 | goto out_destroy; |
67207b96 | 563 | |
1d64093f JK |
564 | ret = spu_create_sysdev(spu); |
565 | if (ret) | |
566 | goto out_free_irqs; | |
567 | ||
486acd48 | 568 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
aa6d5b20 AB |
569 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); |
570 | cbe_spu_info[spu->node].n_spus++; | |
486acd48 | 571 | mutex_unlock(&cbe_spu_info[spu->node].list_mutex); |
24140594 CH |
572 | |
573 | mutex_lock(&spu_full_list_mutex); | |
574 | spin_lock_irqsave(&spu_full_list_lock, flags); | |
e570beb6 | 575 | list_add(&spu->full_list, &spu_full_list); |
24140594 CH |
576 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
577 | mutex_unlock(&spu_full_list_mutex); | |
67207b96 | 578 | |
27ec41d3 AD |
579 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
580 | ktime_get_ts(&ts); | |
581 | spu->stats.tstamp = timespec_to_ns(&ts); | |
fe2f896d | 582 | |
9d92af62 AB |
583 | INIT_LIST_HEAD(&spu->aff_list); |
584 | ||
67207b96 AB |
585 | goto out; |
586 | ||
1d64093f JK |
587 | out_free_irqs: |
588 | spu_free_irqs(spu); | |
e28b0031 GL |
589 | out_destroy: |
590 | spu_destroy_spu(spu); | |
67207b96 AB |
591 | out_free: |
592 | kfree(spu); | |
593 | out: | |
594 | return ret; | |
595 | } | |
596 | ||
fe2f896d CH |
597 | static const char *spu_state_names[] = { |
598 | "user", "system", "iowait", "idle" | |
599 | }; | |
600 | ||
601 | static unsigned long long spu_acct_time(struct spu *spu, | |
602 | enum spu_utilization_state state) | |
603 | { | |
27ec41d3 | 604 | struct timespec ts; |
fe2f896d CH |
605 | unsigned long long time = spu->stats.times[state]; |
606 | ||
27ec41d3 AD |
607 | /* |
608 | * If the spu is idle or the context is stopped, utilization | |
609 | * statistics are not updated. Apply the time delta from the | |
610 | * last recorded state of the spu. | |
611 | */ | |
612 | if (spu->stats.util_state == state) { | |
613 | ktime_get_ts(&ts); | |
614 | time += timespec_to_ns(&ts) - spu->stats.tstamp; | |
615 | } | |
fe2f896d | 616 | |
27ec41d3 | 617 | return time / NSEC_PER_MSEC; |
fe2f896d CH |
618 | } |
619 | ||
620 | ||
621 | static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) | |
622 | { | |
623 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
624 | ||
625 | return sprintf(buf, "%s %llu %llu %llu %llu " | |
626 | "%llu %llu %llu %llu %llu %llu %llu %llu\n", | |
27ec41d3 | 627 | spu_state_names[spu->stats.util_state], |
fe2f896d CH |
628 | spu_acct_time(spu, SPU_UTIL_USER), |
629 | spu_acct_time(spu, SPU_UTIL_SYSTEM), | |
630 | spu_acct_time(spu, SPU_UTIL_IOWAIT), | |
27ec41d3 | 631 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
fe2f896d CH |
632 | spu->stats.vol_ctx_switch, |
633 | spu->stats.invol_ctx_switch, | |
634 | spu->stats.slb_flt, | |
635 | spu->stats.hash_flt, | |
636 | spu->stats.min_flt, | |
637 | spu->stats.maj_flt, | |
638 | spu->stats.class2_intr, | |
639 | spu->stats.libassist); | |
640 | } | |
641 | ||
642 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | |
643 | ||
67207b96 AB |
644 | static int __init init_spu_base(void) |
645 | { | |
befdc746 | 646 | int i, ret = 0; |
67207b96 | 647 | |
aa6d5b20 | 648 | for (i = 0; i < MAX_NUMNODES; i++) { |
486acd48 | 649 | mutex_init(&cbe_spu_info[i].list_mutex); |
aa6d5b20 | 650 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); |
aa6d5b20 | 651 | } |
ccf17e9d | 652 | |
da06aa08 | 653 | if (!spu_management_ops) |
befdc746 | 654 | goto out; |
da06aa08 | 655 | |
1d64093f JK |
656 | /* create sysdev class for spus */ |
657 | ret = sysdev_class_register(&spu_sysdev_class); | |
658 | if (ret) | |
befdc746 | 659 | goto out; |
1d64093f | 660 | |
e28b0031 GL |
661 | ret = spu_enumerate_spus(create_spu); |
662 | ||
bce94513 | 663 | if (ret < 0) { |
e28b0031 GL |
664 | printk(KERN_WARNING "%s: Error initializing spus\n", |
665 | __FUNCTION__); | |
befdc746 | 666 | goto out_unregister_sysdev_class; |
67207b96 | 667 | } |
ff8a8f25 | 668 | |
bce94513 GU |
669 | if (ret > 0) { |
670 | /* | |
671 | * We cannot put the forward declaration in | |
672 | * <linux/linux_logo.h> because of conflicting session type | |
673 | * conflicts for const and __initdata with different compiler | |
674 | * versions | |
675 | */ | |
676 | extern const struct linux_logo logo_spe_clut224; | |
677 | ||
678 | fb_append_extra_logo(&logo_spe_clut224, ret); | |
679 | } | |
680 | ||
24140594 | 681 | mutex_lock(&spu_full_list_mutex); |
ff8a8f25 | 682 | xmon_register_spus(&spu_full_list); |
8d2655e6 | 683 | crash_register_spus(&spu_full_list); |
24140594 | 684 | mutex_unlock(&spu_full_list_mutex); |
fe2f896d CH |
685 | spu_add_sysdev_attr(&attr_stat); |
686 | ||
f5996449 | 687 | spu_init_affinity(); |
3ad216ca | 688 | |
befdc746 CH |
689 | return 0; |
690 | ||
691 | out_unregister_sysdev_class: | |
692 | sysdev_class_unregister(&spu_sysdev_class); | |
693 | out: | |
67207b96 AB |
694 | return ret; |
695 | } | |
696 | module_init(init_spu_base); | |
697 | ||
698 | MODULE_LICENSE("GPL"); | |
699 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |