4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/config.h>
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
41 #include <linux/vmalloc.h>
42 #include <linux/smp.h>
43 #include <linux/smp_lock.h>
44 #include <linux/stddef.h>
45 #include <linux/unistd.h>
49 #include <asm/spu_csa.h>
50 #include <asm/mmu_context.h>
52 #include "spu_save_dump.h"
53 #include "spu_restore_dump.h"
56 #define POLL_WHILE_TRUE(_c) { \
61 #define RELAX_SPIN_COUNT 1000
62 #define POLL_WHILE_TRUE(_c) { \
65 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
68 if (unlikely(_c)) yield(); \
74 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
76 static inline void acquire_spu_lock(struct spu
*spu
)
80 * Acquire SPU-specific mutual exclusion lock.
85 static inline void release_spu_lock(struct spu
*spu
)
88 * Release SPU-specific mutual exclusion lock.
93 static inline int check_spu_isolate(struct spu_state
*csa
, struct spu
*spu
)
95 struct spu_problem __iomem
*prob
= spu
->problem
;
100 * If SPU_Status[E,L,IS] any field is '1', this
101 * SPU is in isolate state and cannot be context
102 * saved at this time.
104 isolate_state
= SPU_STATUS_ISOLATED_STATE
|
105 SPU_STATUS_ISOLATED_LOAD_STAUTUS
| SPU_STATUS_ISOLATED_EXIT_STAUTUS
;
106 return (in_be32(&prob
->spu_status_R
) & isolate_state
) ? 1 : 0;
109 static inline void disable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
113 * Save INT_Mask_class0 in CSA.
114 * Write INT_MASK_class0 with value of 0.
115 * Save INT_Mask_class1 in CSA.
116 * Write INT_MASK_class1 with value of 0.
117 * Save INT_Mask_class2 in CSA.
118 * Write INT_MASK_class2 with value of 0.
120 spin_lock_irq(&spu
->register_lock
);
122 csa
->priv1
.int_mask_class0_RW
= spu_int_mask_get(spu
, 0);
123 csa
->priv1
.int_mask_class1_RW
= spu_int_mask_get(spu
, 1);
124 csa
->priv1
.int_mask_class2_RW
= spu_int_mask_get(spu
, 2);
126 spu_int_mask_set(spu
, 0, 0ul);
127 spu_int_mask_set(spu
, 1, 0ul);
128 spu_int_mask_set(spu
, 2, 0ul);
130 spin_unlock_irq(&spu
->register_lock
);
133 static inline void set_watchdog_timer(struct spu_state
*csa
, struct spu
*spu
)
137 * Set a software watchdog timer, which specifies the
138 * maximum allowable time for a context save sequence.
140 * For present, this implementation will not set a global
141 * watchdog timer, as virtualization & variable system load
142 * may cause unpredictable execution times.
146 static inline void inhibit_user_access(struct spu_state
*csa
, struct spu
*spu
)
150 * Inhibit user-space access (if provided) to this
151 * SPU by unmapping the virtual pages assigned to
152 * the SPU memory-mapped I/O (MMIO) for problem
157 static inline void set_switch_pending(struct spu_state
*csa
, struct spu
*spu
)
161 * Set a software context switch pending flag.
163 set_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
);
167 static inline void save_mfc_cntl(struct spu_state
*csa
, struct spu
*spu
)
169 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
172 * Suspend DMA and save MFC_CNTL.
174 switch (in_be64(&priv2
->mfc_control_RW
) &
175 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) {
176 case MFC_CNTL_SUSPEND_IN_PROGRESS
:
177 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
178 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) ==
179 MFC_CNTL_SUSPEND_COMPLETE
);
181 case MFC_CNTL_SUSPEND_COMPLETE
:
183 csa
->priv2
.mfc_control_RW
=
184 in_be64(&priv2
->mfc_control_RW
) |
185 MFC_CNTL_SUSPEND_DMA_QUEUE
;
188 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION
:
189 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_SUSPEND_DMA_QUEUE
);
190 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
191 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) ==
192 MFC_CNTL_SUSPEND_COMPLETE
);
194 csa
->priv2
.mfc_control_RW
=
195 in_be64(&priv2
->mfc_control_RW
) &
196 ~MFC_CNTL_SUSPEND_DMA_QUEUE
;
202 static inline void save_spu_runcntl(struct spu_state
*csa
, struct spu
*spu
)
204 struct spu_problem __iomem
*prob
= spu
->problem
;
207 * Save SPU_Runcntl in the CSA. This value contains
208 * the "Application Desired State".
210 csa
->prob
.spu_runcntl_RW
= in_be32(&prob
->spu_runcntl_RW
);
213 static inline void save_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
216 * Save MFC_SR1 in the CSA.
218 csa
->priv1
.mfc_sr1_RW
= spu_mfc_sr1_get(spu
);
221 static inline void save_spu_status(struct spu_state
*csa
, struct spu
*spu
)
223 struct spu_problem __iomem
*prob
= spu
->problem
;
226 * Read SPU_Status[R], and save to CSA.
228 if ((in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
) == 0) {
229 csa
->prob
.spu_status_R
= in_be32(&prob
->spu_status_R
);
233 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
235 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
238 SPU_STATUS_INVALID_INSTR
| SPU_STATUS_SINGLE_STEP
|
239 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
240 if ((in_be32(&prob
->spu_status_R
) & stopped
) == 0)
241 csa
->prob
.spu_status_R
= SPU_STATUS_RUNNING
;
243 csa
->prob
.spu_status_R
= in_be32(&prob
->spu_status_R
);
247 static inline void save_mfc_decr(struct spu_state
*csa
, struct spu
*spu
)
249 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
252 * Read MFC_CNTL[Ds]. Update saved copy of
255 if (in_be64(&priv2
->mfc_control_RW
) & MFC_CNTL_DECREMENTER_RUNNING
) {
256 csa
->priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
257 csa
->suspend_time
= get_cycles();
258 out_be64(&priv2
->spu_chnlcntptr_RW
, 7ULL);
260 csa
->spu_chnldata_RW
[7] = in_be64(&priv2
->spu_chnldata_RW
);
263 csa
->priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
267 static inline void halt_mfc_decr(struct spu_state
*csa
, struct spu
*spu
)
269 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
272 * Write MFC_CNTL[Dh] set to a '1' to halt
275 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_DECREMENTER_HALTED
);
279 static inline void save_timebase(struct spu_state
*csa
, struct spu
*spu
)
282 * Read PPE Timebase High and Timebase low registers
283 * and save in CSA. TBD.
285 csa
->suspend_time
= get_cycles();
288 static inline void remove_other_spu_access(struct spu_state
*csa
,
292 * Remove other SPU access to this SPU by unmapping
293 * this SPU's pages from their address space. TBD.
297 static inline void do_mfc_mssync(struct spu_state
*csa
, struct spu
*spu
)
299 struct spu_problem __iomem
*prob
= spu
->problem
;
303 * Write SPU_MSSync register. Poll SPU_MSSync[P]
306 out_be64(&prob
->spc_mssync_RW
, 1UL);
307 POLL_WHILE_TRUE(in_be64(&prob
->spc_mssync_RW
) & MS_SYNC_PENDING
);
310 static inline void issue_mfc_tlbie(struct spu_state
*csa
, struct spu
*spu
)
315 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
316 * Then issue a PPE sync instruction.
318 spu_tlb_invalidate(spu
);
322 static inline void handle_pending_interrupts(struct spu_state
*csa
,
326 * Handle any pending interrupts from this SPU
327 * here. This is OS or hypervisor specific. One
328 * option is to re-enable interrupts to handle any
329 * pending interrupts, with the interrupt handlers
330 * recognizing the software Context Switch Pending
331 * flag, to ensure the SPU execution or MFC command
332 * queue is not restarted. TBD.
336 static inline void save_mfc_queues(struct spu_state
*csa
, struct spu
*spu
)
338 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
342 * If MFC_Cntl[Se]=0 then save
343 * MFC command queues.
345 if ((in_be64(&priv2
->mfc_control_RW
) & MFC_CNTL_DMA_QUEUES_EMPTY
) == 0) {
346 for (i
= 0; i
< 8; i
++) {
347 csa
->priv2
.puq
[i
].mfc_cq_data0_RW
=
348 in_be64(&priv2
->puq
[i
].mfc_cq_data0_RW
);
349 csa
->priv2
.puq
[i
].mfc_cq_data1_RW
=
350 in_be64(&priv2
->puq
[i
].mfc_cq_data1_RW
);
351 csa
->priv2
.puq
[i
].mfc_cq_data2_RW
=
352 in_be64(&priv2
->puq
[i
].mfc_cq_data2_RW
);
353 csa
->priv2
.puq
[i
].mfc_cq_data3_RW
=
354 in_be64(&priv2
->puq
[i
].mfc_cq_data3_RW
);
356 for (i
= 0; i
< 16; i
++) {
357 csa
->priv2
.spuq
[i
].mfc_cq_data0_RW
=
358 in_be64(&priv2
->spuq
[i
].mfc_cq_data0_RW
);
359 csa
->priv2
.spuq
[i
].mfc_cq_data1_RW
=
360 in_be64(&priv2
->spuq
[i
].mfc_cq_data1_RW
);
361 csa
->priv2
.spuq
[i
].mfc_cq_data2_RW
=
362 in_be64(&priv2
->spuq
[i
].mfc_cq_data2_RW
);
363 csa
->priv2
.spuq
[i
].mfc_cq_data3_RW
=
364 in_be64(&priv2
->spuq
[i
].mfc_cq_data3_RW
);
369 static inline void save_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
371 struct spu_problem __iomem
*prob
= spu
->problem
;
374 * Save the PPU_QueryMask register
377 csa
->prob
.dma_querymask_RW
= in_be32(&prob
->dma_querymask_RW
);
380 static inline void save_ppu_querytype(struct spu_state
*csa
, struct spu
*spu
)
382 struct spu_problem __iomem
*prob
= spu
->problem
;
385 * Save the PPU_QueryType register
388 csa
->prob
.dma_querytype_RW
= in_be32(&prob
->dma_querytype_RW
);
391 static inline void save_mfc_csr_tsq(struct spu_state
*csa
, struct spu
*spu
)
393 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
396 * Save the MFC_CSR_TSQ register
399 csa
->priv2
.spu_tag_status_query_RW
=
400 in_be64(&priv2
->spu_tag_status_query_RW
);
403 static inline void save_mfc_csr_cmd(struct spu_state
*csa
, struct spu
*spu
)
405 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
408 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
409 * registers in the CSA.
411 csa
->priv2
.spu_cmd_buf1_RW
= in_be64(&priv2
->spu_cmd_buf1_RW
);
412 csa
->priv2
.spu_cmd_buf2_RW
= in_be64(&priv2
->spu_cmd_buf2_RW
);
415 static inline void save_mfc_csr_ato(struct spu_state
*csa
, struct spu
*spu
)
417 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
420 * Save the MFC_CSR_ATO register in
423 csa
->priv2
.spu_atomic_status_RW
= in_be64(&priv2
->spu_atomic_status_RW
);
426 static inline void save_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
429 * Save the MFC_TCLASS_ID register in
432 csa
->priv1
.mfc_tclass_id_RW
= spu_mfc_tclass_id_get(spu
);
435 static inline void set_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
439 * Write the MFC_TCLASS_ID register with
440 * the value 0x10000000.
442 spu_mfc_tclass_id_set(spu
, 0x10000000);
446 static inline void purge_mfc_queue(struct spu_state
*csa
, struct spu
*spu
)
448 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
452 * Write MFC_CNTL[Pc]=1 (purge queue).
454 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_PURGE_DMA_REQUEST
);
458 static inline void wait_purge_complete(struct spu_state
*csa
, struct spu
*spu
)
460 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
463 * Poll MFC_CNTL[Ps] until value '11' is read
466 POLL_WHILE_FALSE(in_be64(&priv2
->mfc_control_RW
) &
467 MFC_CNTL_PURGE_DMA_COMPLETE
);
470 static inline void save_mfc_slbs(struct spu_state
*csa
, struct spu
*spu
)
472 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
476 * If MFC_SR1[R]='1', save SLBs in CSA.
478 if (spu_mfc_sr1_get(spu
) & MFC_STATE1_RELOCATE_MASK
) {
479 csa
->priv2
.slb_index_W
= in_be64(&priv2
->slb_index_W
);
480 for (i
= 0; i
< 8; i
++) {
481 out_be64(&priv2
->slb_index_W
, i
);
483 csa
->slb_esid_RW
[i
] = in_be64(&priv2
->slb_esid_RW
);
484 csa
->slb_vsid_RW
[i
] = in_be64(&priv2
->slb_vsid_RW
);
490 static inline void setup_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
494 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
495 * MFC_SR1[TL,R,Pr,T] set correctly for the
496 * OS specific environment.
498 * Implementation note: The SPU-side code
499 * for save/restore is privileged, so the
500 * MFC_SR1[Pr] bit is not set.
503 spu_mfc_sr1_set(spu
, (MFC_STATE1_MASTER_RUN_CONTROL_MASK
|
504 MFC_STATE1_RELOCATE_MASK
|
505 MFC_STATE1_BUS_TLBIE_MASK
));
508 static inline void save_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
510 struct spu_problem __iomem
*prob
= spu
->problem
;
513 * Save SPU_NPC in the CSA.
515 csa
->prob
.spu_npc_RW
= in_be32(&prob
->spu_npc_RW
);
518 static inline void save_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
520 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
523 * Save SPU_PrivCntl in the CSA.
525 csa
->priv2
.spu_privcntl_RW
= in_be64(&priv2
->spu_privcntl_RW
);
528 static inline void reset_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
530 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
534 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
536 out_be64(&priv2
->spu_privcntl_RW
, 0UL);
540 static inline void save_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
542 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
545 * Save SPU_LSLR in the CSA.
547 csa
->priv2
.spu_lslr_RW
= in_be64(&priv2
->spu_lslr_RW
);
550 static inline void reset_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
552 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
558 out_be64(&priv2
->spu_lslr_RW
, LS_ADDR_MASK
);
562 static inline void save_spu_cfg(struct spu_state
*csa
, struct spu
*spu
)
564 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
567 * Save SPU_Cfg in the CSA.
569 csa
->priv2
.spu_cfg_RW
= in_be64(&priv2
->spu_cfg_RW
);
572 static inline void save_pm_trace(struct spu_state
*csa
, struct spu
*spu
)
575 * Save PM_Trace_Tag_Wait_Mask in the CSA.
576 * Not performed by this implementation.
580 static inline void save_mfc_rag(struct spu_state
*csa
, struct spu
*spu
)
583 * Save RA_GROUP_ID register and the
584 * RA_ENABLE reigster in the CSA.
586 csa
->priv1
.resource_allocation_groupID_RW
=
587 spu_resource_allocation_groupID_get(spu
);
588 csa
->priv1
.resource_allocation_enable_RW
=
589 spu_resource_allocation_enable_get(spu
);
592 static inline void save_ppu_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
594 struct spu_problem __iomem
*prob
= spu
->problem
;
597 * Save MB_Stat register in the CSA.
599 csa
->prob
.mb_stat_R
= in_be32(&prob
->mb_stat_R
);
602 static inline void save_ppu_mb(struct spu_state
*csa
, struct spu
*spu
)
604 struct spu_problem __iomem
*prob
= spu
->problem
;
607 * Save the PPU_MB register in the CSA.
609 csa
->prob
.pu_mb_R
= in_be32(&prob
->pu_mb_R
);
612 static inline void save_ppuint_mb(struct spu_state
*csa
, struct spu
*spu
)
614 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
617 * Save the PPUINT_MB register in the CSA.
619 csa
->priv2
.puint_mb_R
= in_be64(&priv2
->puint_mb_R
);
622 static inline void save_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
624 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
625 u64 idx
, ch_indices
[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
629 * Save the following CH: [0,1,3,4,24,25,27]
631 for (i
= 0; i
< 7; i
++) {
633 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
635 csa
->spu_chnldata_RW
[idx
] = in_be64(&priv2
->spu_chnldata_RW
);
636 csa
->spu_chnlcnt_RW
[idx
] = in_be64(&priv2
->spu_chnlcnt_RW
);
637 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
638 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
643 static inline void save_spu_mb(struct spu_state
*csa
, struct spu
*spu
)
645 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
649 * Save SPU Read Mailbox Channel.
651 out_be64(&priv2
->spu_chnlcntptr_RW
, 29UL);
653 csa
->spu_chnlcnt_RW
[29] = in_be64(&priv2
->spu_chnlcnt_RW
);
654 for (i
= 0; i
< 4; i
++) {
655 csa
->spu_mailbox_data
[i
] = in_be64(&priv2
->spu_chnldata_RW
);
657 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
661 static inline void save_mfc_cmd(struct spu_state
*csa
, struct spu
*spu
)
663 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
666 * Save MFC_CMD Channel.
668 out_be64(&priv2
->spu_chnlcntptr_RW
, 21UL);
670 csa
->spu_chnlcnt_RW
[21] = in_be64(&priv2
->spu_chnlcnt_RW
);
674 static inline void reset_ch(struct spu_state
*csa
, struct spu
*spu
)
676 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
677 u64 ch_indices
[4] = { 21UL, 23UL, 28UL, 30UL };
678 u64 ch_counts
[4] = { 16UL, 1UL, 1UL, 1UL };
683 * Reset the following CH: [21, 23, 28, 30]
685 for (i
= 0; i
< 4; i
++) {
687 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
689 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
694 static inline void resume_mfc_queue(struct spu_state
*csa
, struct spu
*spu
)
696 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
700 * Write MFC_CNTL[Sc]=0 (resume queue processing).
702 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESUME_DMA_QUEUE
);
705 static inline void invalidate_slbs(struct spu_state
*csa
, struct spu
*spu
)
707 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
711 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
713 if (spu_mfc_sr1_get(spu
) & MFC_STATE1_RELOCATE_MASK
) {
714 out_be64(&priv2
->slb_invalidate_all_W
, 0UL);
719 static inline void get_kernel_slb(u64 ea
, u64 slb
[2])
721 slb
[0] = (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) | SLB_VSID_KERNEL
;
722 slb
[1] = (ea
& ESID_MASK
) | SLB_ESID_V
;
724 /* Large pages are used for kernel text/data, but not vmalloc. */
725 if (cpu_has_feature(CPU_FTR_16M_PAGE
)
726 && REGION_ID(ea
) == KERNEL_REGION_ID
)
727 slb
[0] |= SLB_VSID_L
;
730 static inline void load_mfc_slb(struct spu
*spu
, u64 slb
[2], int slbe
)
732 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
734 out_be64(&priv2
->slb_index_W
, slbe
);
736 out_be64(&priv2
->slb_vsid_RW
, slb
[0]);
737 out_be64(&priv2
->slb_esid_RW
, slb
[1]);
741 static inline void setup_mfc_slbs(struct spu_state
*csa
, struct spu
*spu
)
748 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
749 * register, then initialize SLB_VSID and SLB_ESID
750 * to provide access to SPU context save code and
753 * This implementation places both the context
754 * switch code and LSCSA in kernel address space.
756 * Further this implementation assumes that the
757 * MFC_SR1[R]=1 (in other words, assume that
758 * translation is desired by OS environment).
760 invalidate_slbs(csa
, spu
);
761 get_kernel_slb((unsigned long)&spu_save_code
[0], code_slb
);
762 get_kernel_slb((unsigned long)csa
->lscsa
, lscsa_slb
);
763 load_mfc_slb(spu
, code_slb
, 0);
764 if ((lscsa_slb
[0] != code_slb
[0]) || (lscsa_slb
[1] != code_slb
[1]))
765 load_mfc_slb(spu
, lscsa_slb
, 1);
768 static inline void set_switch_active(struct spu_state
*csa
, struct spu
*spu
)
772 * Change the software context switch pending flag
773 * to context switch active.
775 set_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
);
776 clear_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
);
780 static inline void enable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
782 unsigned long class1_mask
= CLASS1_ENABLE_SEGMENT_FAULT_INTR
|
783 CLASS1_ENABLE_STORAGE_FAULT_INTR
;
787 * Reset and then enable interrupts, as
790 * This implementation enables only class1
791 * (translation) interrupts.
793 spin_lock_irq(&spu
->register_lock
);
794 spu_int_stat_clear(spu
, 0, ~0ul);
795 spu_int_stat_clear(spu
, 1, ~0ul);
796 spu_int_stat_clear(spu
, 2, ~0ul);
797 spu_int_mask_set(spu
, 0, 0ul);
798 spu_int_mask_set(spu
, 1, class1_mask
);
799 spu_int_mask_set(spu
, 2, 0ul);
800 spin_unlock_irq(&spu
->register_lock
);
803 static inline int send_mfc_dma(struct spu
*spu
, unsigned long ea
,
804 unsigned int ls_offset
, unsigned int size
,
805 unsigned int tag
, unsigned int rclass
,
808 struct spu_problem __iomem
*prob
= spu
->problem
;
809 union mfc_tag_size_class_cmd command
;
810 unsigned int transfer_size
;
811 volatile unsigned int status
= 0x0;
815 (size
> MFC_MAX_DMA_SIZE
) ? MFC_MAX_DMA_SIZE
: size
;
816 command
.u
.mfc_size
= transfer_size
;
817 command
.u
.mfc_tag
= tag
;
818 command
.u
.mfc_rclassid
= rclass
;
819 command
.u
.mfc_cmd
= cmd
;
821 out_be32(&prob
->mfc_lsa_W
, ls_offset
);
822 out_be64(&prob
->mfc_ea_W
, ea
);
823 out_be64(&prob
->mfc_union_W
.all64
, command
.all64
);
825 in_be32(&prob
->mfc_union_W
.by32
.mfc_class_cmd32
);
826 if (unlikely(status
& 0x2)) {
829 } while (status
& 0x3);
830 size
-= transfer_size
;
832 ls_offset
+= transfer_size
;
837 static inline void save_ls_16kb(struct spu_state
*csa
, struct spu
*spu
)
839 unsigned long addr
= (unsigned long)&csa
->lscsa
->ls
[0];
840 unsigned int ls_offset
= 0x0;
841 unsigned int size
= 16384;
842 unsigned int tag
= 0;
843 unsigned int rclass
= 0;
844 unsigned int cmd
= MFC_PUT_CMD
;
847 * Issue a DMA command to copy the first 16K bytes
848 * of local storage to the CSA.
850 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
853 static inline void set_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
855 struct spu_problem __iomem
*prob
= spu
->problem
;
859 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
860 * point address of context save code in local
863 * This implementation uses SPU-side save/restore
864 * programs with entry points at LSA of 0.
866 out_be32(&prob
->spu_npc_RW
, 0);
870 static inline void set_signot1(struct spu_state
*csa
, struct spu
*spu
)
872 struct spu_problem __iomem
*prob
= spu
->problem
;
880 * Write SPU_Sig_Notify_1 register with upper 32-bits
881 * of the CSA.LSCSA effective address.
883 addr64
.ull
= (u64
) csa
->lscsa
;
884 out_be32(&prob
->signal_notify1
, addr64
.ui
[0]);
888 static inline void set_signot2(struct spu_state
*csa
, struct spu
*spu
)
890 struct spu_problem __iomem
*prob
= spu
->problem
;
898 * Write SPU_Sig_Notify_2 register with lower 32-bits
899 * of the CSA.LSCSA effective address.
901 addr64
.ull
= (u64
) csa
->lscsa
;
902 out_be32(&prob
->signal_notify2
, addr64
.ui
[1]);
906 static inline void send_save_code(struct spu_state
*csa
, struct spu
*spu
)
908 unsigned long addr
= (unsigned long)&spu_save_code
[0];
909 unsigned int ls_offset
= 0x0;
910 unsigned int size
= sizeof(spu_save_code
);
911 unsigned int tag
= 0;
912 unsigned int rclass
= 0;
913 unsigned int cmd
= MFC_GETFS_CMD
;
916 * Issue a DMA command to copy context save code
917 * to local storage and start SPU.
919 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
922 static inline void set_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
924 struct spu_problem __iomem
*prob
= spu
->problem
;
928 * Write PPU_QueryMask=1 (enable Tag Group 0)
929 * and issue eieio instruction.
931 out_be32(&prob
->dma_querymask_RW
, MFC_TAGID_TO_TAGMASK(0));
935 static inline void wait_tag_complete(struct spu_state
*csa
, struct spu
*spu
)
937 struct spu_problem __iomem
*prob
= spu
->problem
;
938 u32 mask
= MFC_TAGID_TO_TAGMASK(0);
945 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
946 * or write PPU_QueryType[TS]=01 and wait for Tag Group
947 * Complete Interrupt. Write INT_Stat_Class0 or
948 * INT_Stat_Class2 with value of 'handled'.
950 POLL_WHILE_FALSE(in_be32(&prob
->dma_tagstatus_R
) & mask
);
952 local_irq_save(flags
);
953 spu_int_stat_clear(spu
, 0, ~(0ul));
954 spu_int_stat_clear(spu
, 2, ~(0ul));
955 local_irq_restore(flags
);
958 static inline void wait_spu_stopped(struct spu_state
*csa
, struct spu
*spu
)
960 struct spu_problem __iomem
*prob
= spu
->problem
;
965 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
966 * or SPU Class 2 interrupt. Write INT_Stat_class0
967 * or INT_Stat_class2 with value of handled.
969 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
);
971 local_irq_save(flags
);
972 spu_int_stat_clear(spu
, 0, ~(0ul));
973 spu_int_stat_clear(spu
, 2, ~(0ul));
974 local_irq_restore(flags
);
977 static inline int check_save_status(struct spu_state
*csa
, struct spu
*spu
)
979 struct spu_problem __iomem
*prob
= spu
->problem
;
983 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
984 * context save succeeded, otherwise context save
987 complete
= ((SPU_SAVE_COMPLETE
<< SPU_STOP_STATUS_SHIFT
) |
988 SPU_STATUS_STOPPED_BY_STOP
);
989 return (in_be32(&prob
->spu_status_R
) != complete
) ? 1 : 0;
992 static inline void terminate_spu_app(struct spu_state
*csa
, struct spu
*spu
)
995 * If required, notify the "using application" that
996 * the SPU task has been terminated. TBD.
1000 static inline void suspend_mfc(struct spu_state
*csa
, struct spu
*spu
)
1002 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1006 * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
1007 * the queue and halt the decrementer.
1009 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_SUSPEND_DMA_QUEUE
|
1010 MFC_CNTL_DECREMENTER_HALTED
);
1014 static inline void wait_suspend_mfc_complete(struct spu_state
*csa
,
1017 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1021 * Poll MFC_CNTL[Ss] until 11 is returned.
1023 POLL_WHILE_FALSE(in_be64(&priv2
->mfc_control_RW
) &
1024 MFC_CNTL_SUSPEND_COMPLETE
);
1027 static inline int suspend_spe(struct spu_state
*csa
, struct spu
*spu
)
1029 struct spu_problem __iomem
*prob
= spu
->problem
;
1032 * If SPU_Status[R]=1, stop SPU execution
1033 * and wait for stop to complete.
1035 * Returns 1 if SPU_Status[R]=1 on entry.
1038 if (in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
) {
1039 if (in_be32(&prob
->spu_status_R
) &
1040 SPU_STATUS_ISOLATED_EXIT_STAUTUS
) {
1041 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1042 SPU_STATUS_RUNNING
);
1044 if ((in_be32(&prob
->spu_status_R
) &
1045 SPU_STATUS_ISOLATED_LOAD_STAUTUS
)
1046 || (in_be32(&prob
->spu_status_R
) &
1047 SPU_STATUS_ISOLATED_STATE
)) {
1048 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1050 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1051 SPU_STATUS_RUNNING
);
1052 out_be32(&prob
->spu_runcntl_RW
, 0x2);
1054 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1055 SPU_STATUS_RUNNING
);
1057 if (in_be32(&prob
->spu_status_R
) &
1058 SPU_STATUS_WAITING_FOR_CHANNEL
) {
1059 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1061 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1062 SPU_STATUS_RUNNING
);
1069 static inline void clear_spu_status(struct spu_state
*csa
, struct spu
*spu
)
1071 struct spu_problem __iomem
*prob
= spu
->problem
;
1073 /* Restore, Step 10:
1074 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1075 * release SPU from isolate state.
1077 if (!(in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
)) {
1078 if (in_be32(&prob
->spu_status_R
) &
1079 SPU_STATUS_ISOLATED_EXIT_STAUTUS
) {
1080 spu_mfc_sr1_set(spu
,
1081 MFC_STATE1_MASTER_RUN_CONTROL_MASK
);
1083 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1085 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1086 SPU_STATUS_RUNNING
);
1088 if ((in_be32(&prob
->spu_status_R
) &
1089 SPU_STATUS_ISOLATED_LOAD_STAUTUS
)
1090 || (in_be32(&prob
->spu_status_R
) &
1091 SPU_STATUS_ISOLATED_STATE
)) {
1092 spu_mfc_sr1_set(spu
,
1093 MFC_STATE1_MASTER_RUN_CONTROL_MASK
);
1095 out_be32(&prob
->spu_runcntl_RW
, 0x2);
1097 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1098 SPU_STATUS_RUNNING
);
1103 static inline void reset_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
1105 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1106 u64 ch_indices
[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1110 /* Restore, Step 20:
1111 * Reset the following CH: [0,1,3,4,24,25,27]
1113 for (i
= 0; i
< 7; i
++) {
1114 idx
= ch_indices
[i
];
1115 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1117 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
1118 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
1123 static inline void reset_ch_part2(struct spu_state
*csa
, struct spu
*spu
)
1125 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1126 u64 ch_indices
[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1127 u64 ch_counts
[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1131 /* Restore, Step 21:
1132 * Reset the following CH: [21, 23, 28, 29, 30]
1134 for (i
= 0; i
< 5; i
++) {
1135 idx
= ch_indices
[i
];
1136 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1138 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
1143 static inline void setup_spu_status_part1(struct spu_state
*csa
,
1146 u32 status_P
= SPU_STATUS_STOPPED_BY_STOP
;
1147 u32 status_I
= SPU_STATUS_INVALID_INSTR
;
1148 u32 status_H
= SPU_STATUS_STOPPED_BY_HALT
;
1149 u32 status_S
= SPU_STATUS_SINGLE_STEP
;
1150 u32 status_S_I
= SPU_STATUS_SINGLE_STEP
| SPU_STATUS_INVALID_INSTR
;
1151 u32 status_S_P
= SPU_STATUS_SINGLE_STEP
| SPU_STATUS_STOPPED_BY_STOP
;
1152 u32 status_P_H
= SPU_STATUS_STOPPED_BY_HALT
|SPU_STATUS_STOPPED_BY_STOP
;
1153 u32 status_P_I
= SPU_STATUS_STOPPED_BY_STOP
|SPU_STATUS_INVALID_INSTR
;
1156 /* Restore, Step 27:
1157 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1158 * instruction sequence to the end of the SPU based restore
1159 * code (after the "context restored" stop and signal) to
1160 * restore the correct SPU status.
1162 * NOTE: Rather than modifying the SPU executable, we
1163 * instead add a new 'stopped_status' field to the
1164 * LSCSA. The SPU-side restore reads this field and
1165 * takes the appropriate action when exiting.
1169 (csa
->prob
.spu_status_R
>> SPU_STOP_STATUS_SHIFT
) & 0xFFFF;
1170 if ((csa
->prob
.spu_status_R
& status_P_I
) == status_P_I
) {
1172 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1173 * by Stop and Signal instruction, followed by 'br -4'.
1176 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P_I
;
1177 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1179 } else if ((csa
->prob
.spu_status_R
& status_P_H
) == status_P_H
) {
1181 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1182 * by Stop and Signal instruction, followed by
1185 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P_H
;
1186 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1188 } else if ((csa
->prob
.spu_status_R
& status_S_P
) == status_S_P
) {
1190 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1191 * followed by 'br -4'.
1193 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S_P
;
1194 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1196 } else if ((csa
->prob
.spu_status_R
& status_S_I
) == status_S_I
) {
1198 /* SPU_Status[S,I]=1 - Illegal instruction followed
1201 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S_I
;
1202 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1204 } else if ((csa
->prob
.spu_status_R
& status_P
) == status_P
) {
1206 /* SPU_Status[P]=1 - Stop and Signal instruction
1207 * followed by 'br -4'.
1209 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P
;
1210 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1212 } else if ((csa
->prob
.spu_status_R
& status_H
) == status_H
) {
1214 /* SPU_Status[H]=1 - Halt Conditional, followed
1217 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_H
;
1219 } else if ((csa
->prob
.spu_status_R
& status_S
) == status_S
) {
1221 /* SPU_Status[S]=1 - Two nop instructions.
1223 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S
;
1225 } else if ((csa
->prob
.spu_status_R
& status_I
) == status_I
) {
1227 /* SPU_Status[I]=1 - Illegal instruction followed
1230 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_I
;
1235 static inline void setup_spu_status_part2(struct spu_state
*csa
,
1240 /* Restore, Step 28:
1241 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1242 * add a 'br *' instruction to the end of
1243 * the SPU based restore code.
1245 * NOTE: Rather than modifying the SPU executable, we
1246 * instead add a new 'stopped_status' field to the
1247 * LSCSA. The SPU-side restore reads this field and
1248 * takes the appropriate action when exiting.
1250 mask
= SPU_STATUS_INVALID_INSTR
|
1251 SPU_STATUS_SINGLE_STEP
|
1252 SPU_STATUS_STOPPED_BY_HALT
|
1253 SPU_STATUS_STOPPED_BY_STOP
| SPU_STATUS_RUNNING
;
1254 if (!(csa
->prob
.spu_status_R
& mask
)) {
1255 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_R
;
1259 static inline void restore_mfc_rag(struct spu_state
*csa
, struct spu
*spu
)
1261 /* Restore, Step 29:
1262 * Restore RA_GROUP_ID register and the
1263 * RA_ENABLE reigster from the CSA.
1265 spu_resource_allocation_groupID_set(spu
,
1266 csa
->priv1
.resource_allocation_groupID_RW
);
1267 spu_resource_allocation_enable_set(spu
,
1268 csa
->priv1
.resource_allocation_enable_RW
);
1271 static inline void send_restore_code(struct spu_state
*csa
, struct spu
*spu
)
1273 unsigned long addr
= (unsigned long)&spu_restore_code
[0];
1274 unsigned int ls_offset
= 0x0;
1275 unsigned int size
= sizeof(spu_restore_code
);
1276 unsigned int tag
= 0;
1277 unsigned int rclass
= 0;
1278 unsigned int cmd
= MFC_GETFS_CMD
;
1280 /* Restore, Step 37:
1281 * Issue MFC DMA command to copy context
1282 * restore code to local storage.
1284 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
1287 static inline void setup_decr(struct spu_state
*csa
, struct spu
*spu
)
1289 /* Restore, Step 34:
1290 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1291 * running) then adjust decrementer, set
1292 * decrementer running status in LSCSA,
1293 * and set decrementer "wrapped" status
1296 if (csa
->priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
) {
1297 cycles_t resume_time
= get_cycles();
1298 cycles_t delta_time
= resume_time
- csa
->suspend_time
;
1300 csa
->lscsa
->decr
.slot
[0] = delta_time
;
1304 static inline void setup_ppu_mb(struct spu_state
*csa
, struct spu
*spu
)
1306 /* Restore, Step 35:
1307 * Copy the CSA.PU_MB data into the LSCSA.
1309 csa
->lscsa
->ppu_mb
.slot
[0] = csa
->prob
.pu_mb_R
;
1312 static inline void setup_ppuint_mb(struct spu_state
*csa
, struct spu
*spu
)
1314 /* Restore, Step 36:
1315 * Copy the CSA.PUINT_MB data into the LSCSA.
1317 csa
->lscsa
->ppuint_mb
.slot
[0] = csa
->priv2
.puint_mb_R
;
1320 static inline int check_restore_status(struct spu_state
*csa
, struct spu
*spu
)
1322 struct spu_problem __iomem
*prob
= spu
->problem
;
1325 /* Restore, Step 40:
1326 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1327 * context restore succeeded, otherwise context restore
1330 complete
= ((SPU_RESTORE_COMPLETE
<< SPU_STOP_STATUS_SHIFT
) |
1331 SPU_STATUS_STOPPED_BY_STOP
);
1332 return (in_be32(&prob
->spu_status_R
) != complete
) ? 1 : 0;
1335 static inline void restore_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
1337 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1339 /* Restore, Step 41:
1340 * Restore SPU_PrivCntl from the CSA.
1342 out_be64(&priv2
->spu_privcntl_RW
, csa
->priv2
.spu_privcntl_RW
);
1346 static inline void restore_status_part1(struct spu_state
*csa
, struct spu
*spu
)
1348 struct spu_problem __iomem
*prob
= spu
->problem
;
1351 /* Restore, Step 42:
1352 * If any CSA.SPU_Status[I,S,H,P]=1, then
1353 * restore the error or single step state.
1355 mask
= SPU_STATUS_INVALID_INSTR
|
1356 SPU_STATUS_SINGLE_STEP
|
1357 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
1358 if (csa
->prob
.spu_status_R
& mask
) {
1359 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1361 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1362 SPU_STATUS_RUNNING
);
1366 static inline void restore_status_part2(struct spu_state
*csa
, struct spu
*spu
)
1368 struct spu_problem __iomem
*prob
= spu
->problem
;
1371 /* Restore, Step 43:
1372 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1373 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1374 * then write '00' to SPU_RunCntl[R0R1] and wait
1375 * for SPU_Status[R]=0.
1377 mask
= SPU_STATUS_INVALID_INSTR
|
1378 SPU_STATUS_SINGLE_STEP
|
1379 SPU_STATUS_STOPPED_BY_HALT
|
1380 SPU_STATUS_STOPPED_BY_STOP
| SPU_STATUS_RUNNING
;
1381 if (!(csa
->prob
.spu_status_R
& mask
)) {
1382 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1384 POLL_WHILE_FALSE(in_be32(&prob
->spu_status_R
) &
1385 SPU_STATUS_RUNNING
);
1386 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1388 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1389 SPU_STATUS_RUNNING
);
1393 static inline void restore_ls_16kb(struct spu_state
*csa
, struct spu
*spu
)
1395 unsigned long addr
= (unsigned long)&csa
->lscsa
->ls
[0];
1396 unsigned int ls_offset
= 0x0;
1397 unsigned int size
= 16384;
1398 unsigned int tag
= 0;
1399 unsigned int rclass
= 0;
1400 unsigned int cmd
= MFC_GET_CMD
;
1402 /* Restore, Step 44:
1403 * Issue a DMA command to restore the first
1404 * 16kb of local storage from CSA.
1406 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
1409 static inline void clear_interrupts(struct spu_state
*csa
, struct spu
*spu
)
1411 /* Restore, Step 49:
1412 * Write INT_MASK_class0 with value of 0.
1413 * Write INT_MASK_class1 with value of 0.
1414 * Write INT_MASK_class2 with value of 0.
1415 * Write INT_STAT_class0 with value of -1.
1416 * Write INT_STAT_class1 with value of -1.
1417 * Write INT_STAT_class2 with value of -1.
1419 spin_lock_irq(&spu
->register_lock
);
1420 spu_int_mask_set(spu
, 0, 0ul);
1421 spu_int_mask_set(spu
, 1, 0ul);
1422 spu_int_mask_set(spu
, 2, 0ul);
1423 spu_int_stat_clear(spu
, 0, ~0ul);
1424 spu_int_stat_clear(spu
, 1, ~0ul);
1425 spu_int_stat_clear(spu
, 2, ~0ul);
1426 spin_unlock_irq(&spu
->register_lock
);
1429 static inline void restore_mfc_queues(struct spu_state
*csa
, struct spu
*spu
)
1431 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1434 /* Restore, Step 50:
1435 * If MFC_Cntl[Se]!=0 then restore
1436 * MFC command queues.
1438 if ((csa
->priv2
.mfc_control_RW
& MFC_CNTL_DMA_QUEUES_EMPTY_MASK
) == 0) {
1439 for (i
= 0; i
< 8; i
++) {
1440 out_be64(&priv2
->puq
[i
].mfc_cq_data0_RW
,
1441 csa
->priv2
.puq
[i
].mfc_cq_data0_RW
);
1442 out_be64(&priv2
->puq
[i
].mfc_cq_data1_RW
,
1443 csa
->priv2
.puq
[i
].mfc_cq_data1_RW
);
1444 out_be64(&priv2
->puq
[i
].mfc_cq_data2_RW
,
1445 csa
->priv2
.puq
[i
].mfc_cq_data2_RW
);
1446 out_be64(&priv2
->puq
[i
].mfc_cq_data3_RW
,
1447 csa
->priv2
.puq
[i
].mfc_cq_data3_RW
);
1449 for (i
= 0; i
< 16; i
++) {
1450 out_be64(&priv2
->spuq
[i
].mfc_cq_data0_RW
,
1451 csa
->priv2
.spuq
[i
].mfc_cq_data0_RW
);
1452 out_be64(&priv2
->spuq
[i
].mfc_cq_data1_RW
,
1453 csa
->priv2
.spuq
[i
].mfc_cq_data1_RW
);
1454 out_be64(&priv2
->spuq
[i
].mfc_cq_data2_RW
,
1455 csa
->priv2
.spuq
[i
].mfc_cq_data2_RW
);
1456 out_be64(&priv2
->spuq
[i
].mfc_cq_data3_RW
,
1457 csa
->priv2
.spuq
[i
].mfc_cq_data3_RW
);
1463 static inline void restore_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
1465 struct spu_problem __iomem
*prob
= spu
->problem
;
1467 /* Restore, Step 51:
1468 * Restore the PPU_QueryMask register from CSA.
1470 out_be32(&prob
->dma_querymask_RW
, csa
->prob
.dma_querymask_RW
);
1474 static inline void restore_ppu_querytype(struct spu_state
*csa
, struct spu
*spu
)
1476 struct spu_problem __iomem
*prob
= spu
->problem
;
1478 /* Restore, Step 52:
1479 * Restore the PPU_QueryType register from CSA.
1481 out_be32(&prob
->dma_querytype_RW
, csa
->prob
.dma_querytype_RW
);
1485 static inline void restore_mfc_csr_tsq(struct spu_state
*csa
, struct spu
*spu
)
1487 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1489 /* Restore, Step 53:
1490 * Restore the MFC_CSR_TSQ register from CSA.
1492 out_be64(&priv2
->spu_tag_status_query_RW
,
1493 csa
->priv2
.spu_tag_status_query_RW
);
1497 static inline void restore_mfc_csr_cmd(struct spu_state
*csa
, struct spu
*spu
)
1499 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1501 /* Restore, Step 54:
1502 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1503 * registers from CSA.
1505 out_be64(&priv2
->spu_cmd_buf1_RW
, csa
->priv2
.spu_cmd_buf1_RW
);
1506 out_be64(&priv2
->spu_cmd_buf2_RW
, csa
->priv2
.spu_cmd_buf2_RW
);
1510 static inline void restore_mfc_csr_ato(struct spu_state
*csa
, struct spu
*spu
)
1512 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1514 /* Restore, Step 55:
1515 * Restore the MFC_CSR_ATO register from CSA.
1517 out_be64(&priv2
->spu_atomic_status_RW
, csa
->priv2
.spu_atomic_status_RW
);
1520 static inline void restore_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
1522 /* Restore, Step 56:
1523 * Restore the MFC_TCLASS_ID register from CSA.
1525 spu_mfc_tclass_id_set(spu
, csa
->priv1
.mfc_tclass_id_RW
);
1529 static inline void set_llr_event(struct spu_state
*csa
, struct spu
*spu
)
1531 u64 ch0_cnt
, ch0_data
;
1534 /* Restore, Step 57:
1535 * Set the Lock Line Reservation Lost Event by:
1536 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1537 * 2. If CSA.SPU_Channel_0_Count=0 and
1538 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1539 * CSA.SPU_Event_Status[Lr]=0 then set
1540 * CSA.SPU_Event_Status_Count=1.
1542 ch0_cnt
= csa
->spu_chnlcnt_RW
[0];
1543 ch0_data
= csa
->spu_chnldata_RW
[0];
1544 ch1_data
= csa
->spu_chnldata_RW
[1];
1545 csa
->spu_chnldata_RW
[0] |= MFC_LLR_LOST_EVENT
;
1546 if ((ch0_cnt
== 0) && !(ch0_data
& MFC_LLR_LOST_EVENT
) &&
1547 (ch1_data
& MFC_LLR_LOST_EVENT
)) {
1548 csa
->spu_chnlcnt_RW
[0] = 1;
1552 static inline void restore_decr_wrapped(struct spu_state
*csa
, struct spu
*spu
)
1554 /* Restore, Step 58:
1555 * If the status of the CSA software decrementer
1556 * "wrapped" flag is set, OR in a '1' to
1557 * CSA.SPU_Event_Status[Tm].
1559 if (csa
->lscsa
->decr_status
.slot
[0] == 1) {
1560 csa
->spu_chnldata_RW
[0] |= 0x20;
1562 if ((csa
->lscsa
->decr_status
.slot
[0] == 1) &&
1563 (csa
->spu_chnlcnt_RW
[0] == 0 &&
1564 ((csa
->spu_chnldata_RW
[2] & 0x20) == 0x0) &&
1565 ((csa
->spu_chnldata_RW
[0] & 0x20) != 0x1))) {
1566 csa
->spu_chnlcnt_RW
[0] = 1;
1570 static inline void restore_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
1572 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1573 u64 idx
, ch_indices
[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1576 /* Restore, Step 59:
1577 * Restore the following CH: [0,1,3,4,24,25,27]
1579 for (i
= 0; i
< 7; i
++) {
1580 idx
= ch_indices
[i
];
1581 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1583 out_be64(&priv2
->spu_chnldata_RW
, csa
->spu_chnldata_RW
[idx
]);
1584 out_be64(&priv2
->spu_chnlcnt_RW
, csa
->spu_chnlcnt_RW
[idx
]);
1589 static inline void restore_ch_part2(struct spu_state
*csa
, struct spu
*spu
)
1591 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1592 u64 ch_indices
[3] = { 9UL, 21UL, 23UL };
1593 u64 ch_counts
[3] = { 1UL, 16UL, 1UL };
1597 /* Restore, Step 60:
1598 * Restore the following CH: [9,21,23].
1601 ch_counts
[1] = csa
->spu_chnlcnt_RW
[21];
1603 for (i
= 0; i
< 3; i
++) {
1604 idx
= ch_indices
[i
];
1605 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1607 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
1612 static inline void restore_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
1614 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1616 /* Restore, Step 61:
1617 * Restore the SPU_LSLR register from CSA.
1619 out_be64(&priv2
->spu_lslr_RW
, csa
->priv2
.spu_lslr_RW
);
1623 static inline void restore_spu_cfg(struct spu_state
*csa
, struct spu
*spu
)
1625 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1627 /* Restore, Step 62:
1628 * Restore the SPU_Cfg register from CSA.
1630 out_be64(&priv2
->spu_cfg_RW
, csa
->priv2
.spu_cfg_RW
);
1634 static inline void restore_pm_trace(struct spu_state
*csa
, struct spu
*spu
)
1636 /* Restore, Step 63:
1637 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1638 * Not performed by this implementation.
1642 static inline void restore_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
1644 struct spu_problem __iomem
*prob
= spu
->problem
;
1646 /* Restore, Step 64:
1647 * Restore SPU_NPC from CSA.
1649 out_be32(&prob
->spu_npc_RW
, csa
->prob
.spu_npc_RW
);
1653 static inline void restore_spu_mb(struct spu_state
*csa
, struct spu
*spu
)
1655 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1658 /* Restore, Step 65:
1659 * Restore MFC_RdSPU_MB from CSA.
1661 out_be64(&priv2
->spu_chnlcntptr_RW
, 29UL);
1663 out_be64(&priv2
->spu_chnlcnt_RW
, csa
->spu_chnlcnt_RW
[29]);
1664 for (i
= 0; i
< 4; i
++) {
1665 out_be64(&priv2
->spu_chnldata_RW
, csa
->spu_mailbox_data
[i
]);
1670 static inline void check_ppu_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
1672 struct spu_problem __iomem
*prob
= spu
->problem
;
1675 /* Restore, Step 66:
1676 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1677 * read from the PPU_MB register.
1679 if ((csa
->prob
.mb_stat_R
& 0xFF) == 0) {
1680 dummy
= in_be32(&prob
->pu_mb_R
);
1685 static inline void check_ppuint_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
1687 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1690 /* Restore, Step 66:
1691 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1692 * read from the PPUINT_MB register.
1694 if ((csa
->prob
.mb_stat_R
& 0xFF0000) == 0) {
1695 dummy
= in_be64(&priv2
->puint_mb_R
);
1697 spu_int_stat_clear(spu
, 2, CLASS2_ENABLE_MAILBOX_INTR
);
1702 static inline void restore_mfc_slbs(struct spu_state
*csa
, struct spu
*spu
)
1704 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1707 /* Restore, Step 68:
1708 * If MFC_SR1[R]='1', restore SLBs from CSA.
1710 if (csa
->priv1
.mfc_sr1_RW
& MFC_STATE1_RELOCATE_MASK
) {
1711 for (i
= 0; i
< 8; i
++) {
1712 out_be64(&priv2
->slb_index_W
, i
);
1714 out_be64(&priv2
->slb_esid_RW
, csa
->slb_esid_RW
[i
]);
1715 out_be64(&priv2
->slb_vsid_RW
, csa
->slb_vsid_RW
[i
]);
1718 out_be64(&priv2
->slb_index_W
, csa
->priv2
.slb_index_W
);
1723 static inline void restore_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
1725 /* Restore, Step 69:
1726 * Restore the MFC_SR1 register from CSA.
1728 spu_mfc_sr1_set(spu
, csa
->priv1
.mfc_sr1_RW
);
1732 static inline void restore_other_spu_access(struct spu_state
*csa
,
1735 /* Restore, Step 70:
1736 * Restore other SPU mappings to this SPU. TBD.
1740 static inline void restore_spu_runcntl(struct spu_state
*csa
, struct spu
*spu
)
1742 struct spu_problem __iomem
*prob
= spu
->problem
;
1744 /* Restore, Step 71:
1745 * If CSA.SPU_Status[R]=1 then write
1746 * SPU_RunCntl[R0R1]='01'.
1748 if (csa
->prob
.spu_status_R
& SPU_STATUS_RUNNING
) {
1749 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1754 static inline void restore_mfc_cntl(struct spu_state
*csa
, struct spu
*spu
)
1756 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1758 /* Restore, Step 72:
1759 * Restore the MFC_CNTL register for the CSA.
1761 out_be64(&priv2
->mfc_control_RW
, csa
->priv2
.mfc_control_RW
);
1765 static inline void enable_user_access(struct spu_state
*csa
, struct spu
*spu
)
1767 /* Restore, Step 73:
1768 * Enable user-space access (if provided) to this
1769 * SPU by mapping the virtual pages assigned to
1770 * the SPU memory-mapped I/O (MMIO) for problem
1775 static inline void reset_switch_active(struct spu_state
*csa
, struct spu
*spu
)
1777 /* Restore, Step 74:
1778 * Reset the "context switch active" flag.
1780 clear_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
);
1784 static inline void reenable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
1786 /* Restore, Step 75:
1787 * Re-enable SPU interrupts.
1789 spin_lock_irq(&spu
->register_lock
);
1790 spu_int_mask_set(spu
, 0, csa
->priv1
.int_mask_class0_RW
);
1791 spu_int_mask_set(spu
, 1, csa
->priv1
.int_mask_class1_RW
);
1792 spu_int_mask_set(spu
, 2, csa
->priv1
.int_mask_class2_RW
);
1793 spin_unlock_irq(&spu
->register_lock
);
1796 static int quiece_spu(struct spu_state
*prev
, struct spu
*spu
)
1799 * Combined steps 2-18 of SPU context save sequence, which
1800 * quiesce the SPU state (disable SPU execution, MFC command
1801 * queues, decrementer, SPU interrupts, etc.).
1803 * Returns 0 on success.
1804 * 2 if failed step 2.
1805 * 6 if failed step 6.
1808 if (check_spu_isolate(prev
, spu
)) { /* Step 2. */
1811 disable_interrupts(prev
, spu
); /* Step 3. */
1812 set_watchdog_timer(prev
, spu
); /* Step 4. */
1813 inhibit_user_access(prev
, spu
); /* Step 5. */
1814 if (check_spu_isolate(prev
, spu
)) { /* Step 6. */
1817 set_switch_pending(prev
, spu
); /* Step 7. */
1818 save_mfc_cntl(prev
, spu
); /* Step 8. */
1819 save_spu_runcntl(prev
, spu
); /* Step 9. */
1820 save_mfc_sr1(prev
, spu
); /* Step 10. */
1821 save_spu_status(prev
, spu
); /* Step 11. */
1822 save_mfc_decr(prev
, spu
); /* Step 12. */
1823 halt_mfc_decr(prev
, spu
); /* Step 13. */
1824 save_timebase(prev
, spu
); /* Step 14. */
1825 remove_other_spu_access(prev
, spu
); /* Step 15. */
1826 do_mfc_mssync(prev
, spu
); /* Step 16. */
1827 issue_mfc_tlbie(prev
, spu
); /* Step 17. */
1828 handle_pending_interrupts(prev
, spu
); /* Step 18. */
1833 static void save_csa(struct spu_state
*prev
, struct spu
*spu
)
1836 * Combine steps 19-44 of SPU context save sequence, which
1837 * save regions of the privileged & problem state areas.
1840 save_mfc_queues(prev
, spu
); /* Step 19. */
1841 save_ppu_querymask(prev
, spu
); /* Step 20. */
1842 save_ppu_querytype(prev
, spu
); /* Step 21. */
1843 save_mfc_csr_tsq(prev
, spu
); /* Step 22. */
1844 save_mfc_csr_cmd(prev
, spu
); /* Step 23. */
1845 save_mfc_csr_ato(prev
, spu
); /* Step 24. */
1846 save_mfc_tclass_id(prev
, spu
); /* Step 25. */
1847 set_mfc_tclass_id(prev
, spu
); /* Step 26. */
1848 purge_mfc_queue(prev
, spu
); /* Step 27. */
1849 wait_purge_complete(prev
, spu
); /* Step 28. */
1850 save_mfc_slbs(prev
, spu
); /* Step 29. */
1851 setup_mfc_sr1(prev
, spu
); /* Step 30. */
1852 save_spu_npc(prev
, spu
); /* Step 31. */
1853 save_spu_privcntl(prev
, spu
); /* Step 32. */
1854 reset_spu_privcntl(prev
, spu
); /* Step 33. */
1855 save_spu_lslr(prev
, spu
); /* Step 34. */
1856 reset_spu_lslr(prev
, spu
); /* Step 35. */
1857 save_spu_cfg(prev
, spu
); /* Step 36. */
1858 save_pm_trace(prev
, spu
); /* Step 37. */
1859 save_mfc_rag(prev
, spu
); /* Step 38. */
1860 save_ppu_mb_stat(prev
, spu
); /* Step 39. */
1861 save_ppu_mb(prev
, spu
); /* Step 40. */
1862 save_ppuint_mb(prev
, spu
); /* Step 41. */
1863 save_ch_part1(prev
, spu
); /* Step 42. */
1864 save_spu_mb(prev
, spu
); /* Step 43. */
1865 save_mfc_cmd(prev
, spu
); /* Step 44. */
1866 reset_ch(prev
, spu
); /* Step 45. */
1869 static void save_lscsa(struct spu_state
*prev
, struct spu
*spu
)
1872 * Perform steps 46-57 of SPU context save sequence,
1873 * which save regions of the local store and register
1877 resume_mfc_queue(prev
, spu
); /* Step 46. */
1878 setup_mfc_slbs(prev
, spu
); /* Step 47. */
1879 set_switch_active(prev
, spu
); /* Step 48. */
1880 enable_interrupts(prev
, spu
); /* Step 49. */
1881 save_ls_16kb(prev
, spu
); /* Step 50. */
1882 set_spu_npc(prev
, spu
); /* Step 51. */
1883 set_signot1(prev
, spu
); /* Step 52. */
1884 set_signot2(prev
, spu
); /* Step 53. */
1885 send_save_code(prev
, spu
); /* Step 54. */
1886 set_ppu_querymask(prev
, spu
); /* Step 55. */
1887 wait_tag_complete(prev
, spu
); /* Step 56. */
1888 wait_spu_stopped(prev
, spu
); /* Step 57. */
1891 static void harvest(struct spu_state
*prev
, struct spu
*spu
)
1894 * Perform steps 2-25 of SPU context restore sequence,
1895 * which resets an SPU either after a failed save, or
1896 * when using SPU for first time.
1899 disable_interrupts(prev
, spu
); /* Step 2. */
1900 inhibit_user_access(prev
, spu
); /* Step 3. */
1901 terminate_spu_app(prev
, spu
); /* Step 4. */
1902 set_switch_pending(prev
, spu
); /* Step 5. */
1903 remove_other_spu_access(prev
, spu
); /* Step 6. */
1904 suspend_mfc(prev
, spu
); /* Step 7. */
1905 wait_suspend_mfc_complete(prev
, spu
); /* Step 8. */
1906 if (!suspend_spe(prev
, spu
)) /* Step 9. */
1907 clear_spu_status(prev
, spu
); /* Step 10. */
1908 do_mfc_mssync(prev
, spu
); /* Step 11. */
1909 issue_mfc_tlbie(prev
, spu
); /* Step 12. */
1910 handle_pending_interrupts(prev
, spu
); /* Step 13. */
1911 purge_mfc_queue(prev
, spu
); /* Step 14. */
1912 wait_purge_complete(prev
, spu
); /* Step 15. */
1913 reset_spu_privcntl(prev
, spu
); /* Step 16. */
1914 reset_spu_lslr(prev
, spu
); /* Step 17. */
1915 setup_mfc_sr1(prev
, spu
); /* Step 18. */
1916 invalidate_slbs(prev
, spu
); /* Step 19. */
1917 reset_ch_part1(prev
, spu
); /* Step 20. */
1918 reset_ch_part2(prev
, spu
); /* Step 21. */
1919 enable_interrupts(prev
, spu
); /* Step 22. */
1920 set_switch_active(prev
, spu
); /* Step 23. */
1921 set_mfc_tclass_id(prev
, spu
); /* Step 24. */
1922 resume_mfc_queue(prev
, spu
); /* Step 25. */
1925 static void restore_lscsa(struct spu_state
*next
, struct spu
*spu
)
1928 * Perform steps 26-40 of SPU context restore sequence,
1929 * which restores regions of the local store and register
1933 set_watchdog_timer(next
, spu
); /* Step 26. */
1934 setup_spu_status_part1(next
, spu
); /* Step 27. */
1935 setup_spu_status_part2(next
, spu
); /* Step 28. */
1936 restore_mfc_rag(next
, spu
); /* Step 29. */
1937 setup_mfc_slbs(next
, spu
); /* Step 30. */
1938 set_spu_npc(next
, spu
); /* Step 31. */
1939 set_signot1(next
, spu
); /* Step 32. */
1940 set_signot2(next
, spu
); /* Step 33. */
1941 setup_decr(next
, spu
); /* Step 34. */
1942 setup_ppu_mb(next
, spu
); /* Step 35. */
1943 setup_ppuint_mb(next
, spu
); /* Step 36. */
1944 send_restore_code(next
, spu
); /* Step 37. */
1945 set_ppu_querymask(next
, spu
); /* Step 38. */
1946 wait_tag_complete(next
, spu
); /* Step 39. */
1947 wait_spu_stopped(next
, spu
); /* Step 40. */
1950 static void restore_csa(struct spu_state
*next
, struct spu
*spu
)
1953 * Combine steps 41-76 of SPU context restore sequence, which
1954 * restore regions of the privileged & problem state areas.
1957 restore_spu_privcntl(next
, spu
); /* Step 41. */
1958 restore_status_part1(next
, spu
); /* Step 42. */
1959 restore_status_part2(next
, spu
); /* Step 43. */
1960 restore_ls_16kb(next
, spu
); /* Step 44. */
1961 wait_tag_complete(next
, spu
); /* Step 45. */
1962 suspend_mfc(next
, spu
); /* Step 46. */
1963 wait_suspend_mfc_complete(next
, spu
); /* Step 47. */
1964 issue_mfc_tlbie(next
, spu
); /* Step 48. */
1965 clear_interrupts(next
, spu
); /* Step 49. */
1966 restore_mfc_queues(next
, spu
); /* Step 50. */
1967 restore_ppu_querymask(next
, spu
); /* Step 51. */
1968 restore_ppu_querytype(next
, spu
); /* Step 52. */
1969 restore_mfc_csr_tsq(next
, spu
); /* Step 53. */
1970 restore_mfc_csr_cmd(next
, spu
); /* Step 54. */
1971 restore_mfc_csr_ato(next
, spu
); /* Step 55. */
1972 restore_mfc_tclass_id(next
, spu
); /* Step 56. */
1973 set_llr_event(next
, spu
); /* Step 57. */
1974 restore_decr_wrapped(next
, spu
); /* Step 58. */
1975 restore_ch_part1(next
, spu
); /* Step 59. */
1976 restore_ch_part2(next
, spu
); /* Step 60. */
1977 restore_spu_lslr(next
, spu
); /* Step 61. */
1978 restore_spu_cfg(next
, spu
); /* Step 62. */
1979 restore_pm_trace(next
, spu
); /* Step 63. */
1980 restore_spu_npc(next
, spu
); /* Step 64. */
1981 restore_spu_mb(next
, spu
); /* Step 65. */
1982 check_ppu_mb_stat(next
, spu
); /* Step 66. */
1983 check_ppuint_mb_stat(next
, spu
); /* Step 67. */
1984 restore_mfc_slbs(next
, spu
); /* Step 68. */
1985 restore_mfc_sr1(next
, spu
); /* Step 69. */
1986 restore_other_spu_access(next
, spu
); /* Step 70. */
1987 restore_spu_runcntl(next
, spu
); /* Step 71. */
1988 restore_mfc_cntl(next
, spu
); /* Step 72. */
1989 enable_user_access(next
, spu
); /* Step 73. */
1990 reset_switch_active(next
, spu
); /* Step 74. */
1991 reenable_interrupts(next
, spu
); /* Step 75. */
1994 static int __do_spu_save(struct spu_state
*prev
, struct spu
*spu
)
1999 * SPU context save can be broken into three phases:
2001 * (a) quiesce [steps 2-16].
2002 * (b) save of CSA, performed by PPE [steps 17-42]
2003 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2005 * Returns 0 on success.
2006 * 2,6 if failed to quiece SPU
2007 * 53 if SPU-side of save failed.
2010 rc
= quiece_spu(prev
, spu
); /* Steps 2-16. */
2021 save_csa(prev
, spu
); /* Steps 17-43. */
2022 save_lscsa(prev
, spu
); /* Steps 44-53. */
2023 return check_save_status(prev
, spu
); /* Step 54. */
2026 static int __do_spu_restore(struct spu_state
*next
, struct spu
*spu
)
2031 * SPU context restore can be broken into three phases:
2033 * (a) harvest (or reset) SPU [steps 2-24].
2034 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2035 * (c) restore CSA [steps 41-76], performed by PPE.
2037 * The 'harvest' step is not performed here, but rather
2041 restore_lscsa(next
, spu
); /* Steps 24-39. */
2042 rc
= check_restore_status(next
, spu
); /* Step 40. */
2045 /* Failed. Return now. */
2049 /* Fall through to next step. */
2052 restore_csa(next
, spu
);
2058 * spu_save - SPU context save, with locking.
2059 * @prev: pointer to SPU context save area, to be saved.
2060 * @spu: pointer to SPU iomem structure.
2062 * Acquire locks, perform the save operation then return.
2064 int spu_save(struct spu_state
*prev
, struct spu
*spu
)
2068 acquire_spu_lock(spu
); /* Step 1. */
2069 rc
= __do_spu_save(prev
, spu
); /* Steps 2-53. */
2070 release_spu_lock(spu
);
2072 panic("%s failed on SPU[%d], rc=%d.\n",
2073 __func__
, spu
->number
, rc
);
2079 * spu_restore - SPU context restore, with harvest and locking.
2080 * @new: pointer to SPU context save area, to be restored.
2081 * @spu: pointer to SPU iomem structure.
2083 * Perform harvest + restore, as we may not be coming
2084 * from a previous succesful save operation, and the
2085 * hardware state is unknown.
2087 int spu_restore(struct spu_state
*new, struct spu
*spu
)
2091 acquire_spu_lock(spu
);
2096 spu
->slb_replace
= 0;
2097 spu
->class_0_pending
= 0;
2098 rc
= __do_spu_restore(new, spu
);
2099 release_spu_lock(spu
);
2101 panic("%s failed on SPU[%d] rc=%d.\n",
2102 __func__
, spu
->number
, rc
);
2108 * spu_harvest - SPU harvest (reset) operation
2109 * @spu: pointer to SPU iomem structure.
2111 * Perform SPU harvest (reset) operation.
2113 void spu_harvest(struct spu
*spu
)
2115 acquire_spu_lock(spu
);
2117 release_spu_lock(spu
);
2120 static void init_prob(struct spu_state
*csa
)
2122 csa
->spu_chnlcnt_RW
[9] = 1;
2123 csa
->spu_chnlcnt_RW
[21] = 16;
2124 csa
->spu_chnlcnt_RW
[23] = 1;
2125 csa
->spu_chnlcnt_RW
[28] = 1;
2126 csa
->spu_chnlcnt_RW
[30] = 1;
2127 csa
->prob
.spu_runcntl_RW
= SPU_RUNCNTL_STOP
;
2130 static void init_priv1(struct spu_state
*csa
)
2132 /* Enable decode, relocate, tlbie response, master runcntl. */
2133 csa
->priv1
.mfc_sr1_RW
= MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
|
2134 MFC_STATE1_MASTER_RUN_CONTROL_MASK
|
2135 MFC_STATE1_PROBLEM_STATE_MASK
|
2136 MFC_STATE1_RELOCATE_MASK
| MFC_STATE1_BUS_TLBIE_MASK
;
2138 /* Set storage description. */
2139 csa
->priv1
.mfc_sdr_RW
= mfspr(SPRN_SDR1
);
2141 /* Enable OS-specific set of interrupts. */
2142 csa
->priv1
.int_mask_class0_RW
= CLASS0_ENABLE_DMA_ALIGNMENT_INTR
|
2143 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR
|
2144 CLASS0_ENABLE_SPU_ERROR_INTR
;
2145 csa
->priv1
.int_mask_class1_RW
= CLASS1_ENABLE_SEGMENT_FAULT_INTR
|
2146 CLASS1_ENABLE_STORAGE_FAULT_INTR
;
2147 csa
->priv1
.int_mask_class2_RW
= CLASS2_ENABLE_SPU_STOP_INTR
|
2148 CLASS2_ENABLE_SPU_HALT_INTR
|
2149 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR
;
2152 static void init_priv2(struct spu_state
*csa
)
2154 csa
->priv2
.spu_lslr_RW
= LS_ADDR_MASK
;
2155 csa
->priv2
.mfc_control_RW
= MFC_CNTL_RESUME_DMA_QUEUE
|
2156 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION
|
2157 MFC_CNTL_DMA_QUEUES_EMPTY_MASK
;
2161 * spu_alloc_csa - allocate and initialize an SPU context save area.
2163 * Allocate and initialize the contents of an SPU context save area.
2164 * This includes enabling address translation, interrupt masks, etc.,
2165 * as appropriate for the given OS environment.
2167 * Note that storage for the 'lscsa' is allocated separately,
2168 * as it is by far the largest of the context save regions,
2169 * and may need to be pinned or otherwise specially aligned.
2171 void spu_init_csa(struct spu_state
*csa
)
2173 struct spu_lscsa
*lscsa
;
2178 memset(csa
, 0, sizeof(struct spu_state
));
2180 lscsa
= vmalloc(sizeof(struct spu_lscsa
));
2184 memset(lscsa
, 0, sizeof(struct spu_lscsa
));
2186 csa
->register_lock
= SPIN_LOCK_UNLOCKED
;
2188 /* Set LS pages reserved to allow for user-space mapping. */
2189 for (p
= lscsa
->ls
; p
< lscsa
->ls
+ LS_SIZE
; p
+= PAGE_SIZE
)
2190 SetPageReserved(vmalloc_to_page(p
));
2197 void spu_fini_csa(struct spu_state
*csa
)
2199 /* Clear reserved bit before vfree. */
2201 for (p
= csa
->lscsa
->ls
; p
< csa
->lscsa
->ls
+ LS_SIZE
; p
+= PAGE_SIZE
)
2202 ClearPageReserved(vmalloc_to_page(p
));