4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/config.h>
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
41 #include <linux/vmalloc.h>
42 #include <linux/smp.h>
43 #include <linux/smp_lock.h>
44 #include <linux/stddef.h>
45 #include <linux/unistd.h>
49 #include <asm/spu_priv1.h>
50 #include <asm/spu_csa.h>
51 #include <asm/mmu_context.h>
53 #include "spu_save_dump.h"
54 #include "spu_restore_dump.h"
57 #define POLL_WHILE_TRUE(_c) { \
62 #define RELAX_SPIN_COUNT 1000
63 #define POLL_WHILE_TRUE(_c) { \
66 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
69 if (unlikely(_c)) yield(); \
75 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
77 static inline void acquire_spu_lock(struct spu
*spu
)
81 * Acquire SPU-specific mutual exclusion lock.
86 static inline void release_spu_lock(struct spu
*spu
)
89 * Release SPU-specific mutual exclusion lock.
94 static inline int check_spu_isolate(struct spu_state
*csa
, struct spu
*spu
)
96 struct spu_problem __iomem
*prob
= spu
->problem
;
101 * If SPU_Status[E,L,IS] any field is '1', this
102 * SPU is in isolate state and cannot be context
103 * saved at this time.
105 isolate_state
= SPU_STATUS_ISOLATED_STATE
|
106 SPU_STATUS_ISOLATED_LOAD_STAUTUS
| SPU_STATUS_ISOLATED_EXIT_STAUTUS
;
107 return (in_be32(&prob
->spu_status_R
) & isolate_state
) ? 1 : 0;
110 static inline void disable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
114 * Save INT_Mask_class0 in CSA.
115 * Write INT_MASK_class0 with value of 0.
116 * Save INT_Mask_class1 in CSA.
117 * Write INT_MASK_class1 with value of 0.
118 * Save INT_Mask_class2 in CSA.
119 * Write INT_MASK_class2 with value of 0.
121 spin_lock_irq(&spu
->register_lock
);
123 csa
->priv1
.int_mask_class0_RW
= spu_int_mask_get(spu
, 0);
124 csa
->priv1
.int_mask_class1_RW
= spu_int_mask_get(spu
, 1);
125 csa
->priv1
.int_mask_class2_RW
= spu_int_mask_get(spu
, 2);
127 spu_int_mask_set(spu
, 0, 0ul);
128 spu_int_mask_set(spu
, 1, 0ul);
129 spu_int_mask_set(spu
, 2, 0ul);
131 spin_unlock_irq(&spu
->register_lock
);
134 static inline void set_watchdog_timer(struct spu_state
*csa
, struct spu
*spu
)
138 * Set a software watchdog timer, which specifies the
139 * maximum allowable time for a context save sequence.
141 * For present, this implementation will not set a global
142 * watchdog timer, as virtualization & variable system load
143 * may cause unpredictable execution times.
147 static inline void inhibit_user_access(struct spu_state
*csa
, struct spu
*spu
)
151 * Inhibit user-space access (if provided) to this
152 * SPU by unmapping the virtual pages assigned to
153 * the SPU memory-mapped I/O (MMIO) for problem
158 static inline void set_switch_pending(struct spu_state
*csa
, struct spu
*spu
)
162 * Set a software context switch pending flag.
164 set_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
);
168 static inline void save_mfc_cntl(struct spu_state
*csa
, struct spu
*spu
)
170 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
173 * Suspend DMA and save MFC_CNTL.
175 switch (in_be64(&priv2
->mfc_control_RW
) &
176 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) {
177 case MFC_CNTL_SUSPEND_IN_PROGRESS
:
178 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
179 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) ==
180 MFC_CNTL_SUSPEND_COMPLETE
);
182 case MFC_CNTL_SUSPEND_COMPLETE
:
184 csa
->priv2
.mfc_control_RW
=
185 in_be64(&priv2
->mfc_control_RW
) |
186 MFC_CNTL_SUSPEND_DMA_QUEUE
;
189 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION
:
190 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_SUSPEND_DMA_QUEUE
);
191 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
192 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) ==
193 MFC_CNTL_SUSPEND_COMPLETE
);
195 csa
->priv2
.mfc_control_RW
=
196 in_be64(&priv2
->mfc_control_RW
) &
197 ~MFC_CNTL_SUSPEND_DMA_QUEUE
;
203 static inline void save_spu_runcntl(struct spu_state
*csa
, struct spu
*spu
)
205 struct spu_problem __iomem
*prob
= spu
->problem
;
208 * Save SPU_Runcntl in the CSA. This value contains
209 * the "Application Desired State".
211 csa
->prob
.spu_runcntl_RW
= in_be32(&prob
->spu_runcntl_RW
);
214 static inline void save_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
217 * Save MFC_SR1 in the CSA.
219 csa
->priv1
.mfc_sr1_RW
= spu_mfc_sr1_get(spu
);
222 static inline void save_spu_status(struct spu_state
*csa
, struct spu
*spu
)
224 struct spu_problem __iomem
*prob
= spu
->problem
;
227 * Read SPU_Status[R], and save to CSA.
229 if ((in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
) == 0) {
230 csa
->prob
.spu_status_R
= in_be32(&prob
->spu_status_R
);
234 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
236 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
239 SPU_STATUS_INVALID_INSTR
| SPU_STATUS_SINGLE_STEP
|
240 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
241 if ((in_be32(&prob
->spu_status_R
) & stopped
) == 0)
242 csa
->prob
.spu_status_R
= SPU_STATUS_RUNNING
;
244 csa
->prob
.spu_status_R
= in_be32(&prob
->spu_status_R
);
248 static inline void save_mfc_decr(struct spu_state
*csa
, struct spu
*spu
)
250 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
253 * Read MFC_CNTL[Ds]. Update saved copy of
256 if (in_be64(&priv2
->mfc_control_RW
) & MFC_CNTL_DECREMENTER_RUNNING
) {
257 csa
->priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
258 csa
->suspend_time
= get_cycles();
259 out_be64(&priv2
->spu_chnlcntptr_RW
, 7ULL);
261 csa
->spu_chnldata_RW
[7] = in_be64(&priv2
->spu_chnldata_RW
);
264 csa
->priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
268 static inline void halt_mfc_decr(struct spu_state
*csa
, struct spu
*spu
)
270 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
273 * Write MFC_CNTL[Dh] set to a '1' to halt
276 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_DECREMENTER_HALTED
);
280 static inline void save_timebase(struct spu_state
*csa
, struct spu
*spu
)
283 * Read PPE Timebase High and Timebase low registers
284 * and save in CSA. TBD.
286 csa
->suspend_time
= get_cycles();
289 static inline void remove_other_spu_access(struct spu_state
*csa
,
293 * Remove other SPU access to this SPU by unmapping
294 * this SPU's pages from their address space. TBD.
298 static inline void do_mfc_mssync(struct spu_state
*csa
, struct spu
*spu
)
300 struct spu_problem __iomem
*prob
= spu
->problem
;
304 * Write SPU_MSSync register. Poll SPU_MSSync[P]
307 out_be64(&prob
->spc_mssync_RW
, 1UL);
308 POLL_WHILE_TRUE(in_be64(&prob
->spc_mssync_RW
) & MS_SYNC_PENDING
);
311 static inline void issue_mfc_tlbie(struct spu_state
*csa
, struct spu
*spu
)
316 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
317 * Then issue a PPE sync instruction.
319 spu_tlb_invalidate(spu
);
323 static inline void handle_pending_interrupts(struct spu_state
*csa
,
327 * Handle any pending interrupts from this SPU
328 * here. This is OS or hypervisor specific. One
329 * option is to re-enable interrupts to handle any
330 * pending interrupts, with the interrupt handlers
331 * recognizing the software Context Switch Pending
332 * flag, to ensure the SPU execution or MFC command
333 * queue is not restarted. TBD.
337 static inline void save_mfc_queues(struct spu_state
*csa
, struct spu
*spu
)
339 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
343 * If MFC_Cntl[Se]=0 then save
344 * MFC command queues.
346 if ((in_be64(&priv2
->mfc_control_RW
) & MFC_CNTL_DMA_QUEUES_EMPTY
) == 0) {
347 for (i
= 0; i
< 8; i
++) {
348 csa
->priv2
.puq
[i
].mfc_cq_data0_RW
=
349 in_be64(&priv2
->puq
[i
].mfc_cq_data0_RW
);
350 csa
->priv2
.puq
[i
].mfc_cq_data1_RW
=
351 in_be64(&priv2
->puq
[i
].mfc_cq_data1_RW
);
352 csa
->priv2
.puq
[i
].mfc_cq_data2_RW
=
353 in_be64(&priv2
->puq
[i
].mfc_cq_data2_RW
);
354 csa
->priv2
.puq
[i
].mfc_cq_data3_RW
=
355 in_be64(&priv2
->puq
[i
].mfc_cq_data3_RW
);
357 for (i
= 0; i
< 16; i
++) {
358 csa
->priv2
.spuq
[i
].mfc_cq_data0_RW
=
359 in_be64(&priv2
->spuq
[i
].mfc_cq_data0_RW
);
360 csa
->priv2
.spuq
[i
].mfc_cq_data1_RW
=
361 in_be64(&priv2
->spuq
[i
].mfc_cq_data1_RW
);
362 csa
->priv2
.spuq
[i
].mfc_cq_data2_RW
=
363 in_be64(&priv2
->spuq
[i
].mfc_cq_data2_RW
);
364 csa
->priv2
.spuq
[i
].mfc_cq_data3_RW
=
365 in_be64(&priv2
->spuq
[i
].mfc_cq_data3_RW
);
370 static inline void save_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
372 struct spu_problem __iomem
*prob
= spu
->problem
;
375 * Save the PPU_QueryMask register
378 csa
->prob
.dma_querymask_RW
= in_be32(&prob
->dma_querymask_RW
);
381 static inline void save_ppu_querytype(struct spu_state
*csa
, struct spu
*spu
)
383 struct spu_problem __iomem
*prob
= spu
->problem
;
386 * Save the PPU_QueryType register
389 csa
->prob
.dma_querytype_RW
= in_be32(&prob
->dma_querytype_RW
);
392 static inline void save_mfc_csr_tsq(struct spu_state
*csa
, struct spu
*spu
)
394 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
397 * Save the MFC_CSR_TSQ register
400 csa
->priv2
.spu_tag_status_query_RW
=
401 in_be64(&priv2
->spu_tag_status_query_RW
);
404 static inline void save_mfc_csr_cmd(struct spu_state
*csa
, struct spu
*spu
)
406 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
409 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
410 * registers in the CSA.
412 csa
->priv2
.spu_cmd_buf1_RW
= in_be64(&priv2
->spu_cmd_buf1_RW
);
413 csa
->priv2
.spu_cmd_buf2_RW
= in_be64(&priv2
->spu_cmd_buf2_RW
);
416 static inline void save_mfc_csr_ato(struct spu_state
*csa
, struct spu
*spu
)
418 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
421 * Save the MFC_CSR_ATO register in
424 csa
->priv2
.spu_atomic_status_RW
= in_be64(&priv2
->spu_atomic_status_RW
);
427 static inline void save_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
430 * Save the MFC_TCLASS_ID register in
433 csa
->priv1
.mfc_tclass_id_RW
= spu_mfc_tclass_id_get(spu
);
436 static inline void set_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
440 * Write the MFC_TCLASS_ID register with
441 * the value 0x10000000.
443 spu_mfc_tclass_id_set(spu
, 0x10000000);
447 static inline void purge_mfc_queue(struct spu_state
*csa
, struct spu
*spu
)
449 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
453 * Write MFC_CNTL[Pc]=1 (purge queue).
455 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_PURGE_DMA_REQUEST
);
459 static inline void wait_purge_complete(struct spu_state
*csa
, struct spu
*spu
)
461 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
464 * Poll MFC_CNTL[Ps] until value '11' is read
467 POLL_WHILE_FALSE(in_be64(&priv2
->mfc_control_RW
) &
468 MFC_CNTL_PURGE_DMA_COMPLETE
);
471 static inline void save_mfc_slbs(struct spu_state
*csa
, struct spu
*spu
)
473 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
477 * If MFC_SR1[R]='1', save SLBs in CSA.
479 if (spu_mfc_sr1_get(spu
) & MFC_STATE1_RELOCATE_MASK
) {
480 csa
->priv2
.slb_index_W
= in_be64(&priv2
->slb_index_W
);
481 for (i
= 0; i
< 8; i
++) {
482 out_be64(&priv2
->slb_index_W
, i
);
484 csa
->slb_esid_RW
[i
] = in_be64(&priv2
->slb_esid_RW
);
485 csa
->slb_vsid_RW
[i
] = in_be64(&priv2
->slb_vsid_RW
);
491 static inline void setup_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
495 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
496 * MFC_SR1[TL,R,Pr,T] set correctly for the
497 * OS specific environment.
499 * Implementation note: The SPU-side code
500 * for save/restore is privileged, so the
501 * MFC_SR1[Pr] bit is not set.
504 spu_mfc_sr1_set(spu
, (MFC_STATE1_MASTER_RUN_CONTROL_MASK
|
505 MFC_STATE1_RELOCATE_MASK
|
506 MFC_STATE1_BUS_TLBIE_MASK
));
509 static inline void save_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
511 struct spu_problem __iomem
*prob
= spu
->problem
;
514 * Save SPU_NPC in the CSA.
516 csa
->prob
.spu_npc_RW
= in_be32(&prob
->spu_npc_RW
);
519 static inline void save_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
521 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
524 * Save SPU_PrivCntl in the CSA.
526 csa
->priv2
.spu_privcntl_RW
= in_be64(&priv2
->spu_privcntl_RW
);
529 static inline void reset_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
531 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
535 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
537 out_be64(&priv2
->spu_privcntl_RW
, 0UL);
541 static inline void save_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
543 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
546 * Save SPU_LSLR in the CSA.
548 csa
->priv2
.spu_lslr_RW
= in_be64(&priv2
->spu_lslr_RW
);
551 static inline void reset_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
553 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
559 out_be64(&priv2
->spu_lslr_RW
, LS_ADDR_MASK
);
563 static inline void save_spu_cfg(struct spu_state
*csa
, struct spu
*spu
)
565 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
568 * Save SPU_Cfg in the CSA.
570 csa
->priv2
.spu_cfg_RW
= in_be64(&priv2
->spu_cfg_RW
);
573 static inline void save_pm_trace(struct spu_state
*csa
, struct spu
*spu
)
576 * Save PM_Trace_Tag_Wait_Mask in the CSA.
577 * Not performed by this implementation.
581 static inline void save_mfc_rag(struct spu_state
*csa
, struct spu
*spu
)
584 * Save RA_GROUP_ID register and the
585 * RA_ENABLE reigster in the CSA.
587 csa
->priv1
.resource_allocation_groupID_RW
=
588 spu_resource_allocation_groupID_get(spu
);
589 csa
->priv1
.resource_allocation_enable_RW
=
590 spu_resource_allocation_enable_get(spu
);
593 static inline void save_ppu_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
595 struct spu_problem __iomem
*prob
= spu
->problem
;
598 * Save MB_Stat register in the CSA.
600 csa
->prob
.mb_stat_R
= in_be32(&prob
->mb_stat_R
);
603 static inline void save_ppu_mb(struct spu_state
*csa
, struct spu
*spu
)
605 struct spu_problem __iomem
*prob
= spu
->problem
;
608 * Save the PPU_MB register in the CSA.
610 csa
->prob
.pu_mb_R
= in_be32(&prob
->pu_mb_R
);
613 static inline void save_ppuint_mb(struct spu_state
*csa
, struct spu
*spu
)
615 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
618 * Save the PPUINT_MB register in the CSA.
620 csa
->priv2
.puint_mb_R
= in_be64(&priv2
->puint_mb_R
);
623 static inline void save_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
625 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
626 u64 idx
, ch_indices
[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
632 /* Save CH 1, without channel count */
633 out_be64(&priv2
->spu_chnlcntptr_RW
, 1);
634 csa
->spu_chnldata_RW
[1] = in_be64(&priv2
->spu_chnldata_RW
);
636 /* Save the following CH: [0,3,4,24,25,27] */
637 for (i
= 0; i
< 7; i
++) {
639 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
641 csa
->spu_chnldata_RW
[idx
] = in_be64(&priv2
->spu_chnldata_RW
);
642 csa
->spu_chnlcnt_RW
[idx
] = in_be64(&priv2
->spu_chnlcnt_RW
);
643 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
644 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
649 static inline void save_spu_mb(struct spu_state
*csa
, struct spu
*spu
)
651 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
655 * Save SPU Read Mailbox Channel.
657 out_be64(&priv2
->spu_chnlcntptr_RW
, 29UL);
659 csa
->spu_chnlcnt_RW
[29] = in_be64(&priv2
->spu_chnlcnt_RW
);
660 for (i
= 0; i
< 4; i
++) {
661 csa
->spu_mailbox_data
[i
] = in_be64(&priv2
->spu_chnldata_RW
);
663 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
667 static inline void save_mfc_cmd(struct spu_state
*csa
, struct spu
*spu
)
669 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
672 * Save MFC_CMD Channel.
674 out_be64(&priv2
->spu_chnlcntptr_RW
, 21UL);
676 csa
->spu_chnlcnt_RW
[21] = in_be64(&priv2
->spu_chnlcnt_RW
);
680 static inline void reset_ch(struct spu_state
*csa
, struct spu
*spu
)
682 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
683 u64 ch_indices
[4] = { 21UL, 23UL, 28UL, 30UL };
684 u64 ch_counts
[4] = { 16UL, 1UL, 1UL, 1UL };
689 * Reset the following CH: [21, 23, 28, 30]
691 for (i
= 0; i
< 4; i
++) {
693 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
695 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
700 static inline void resume_mfc_queue(struct spu_state
*csa
, struct spu
*spu
)
702 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
706 * Write MFC_CNTL[Sc]=0 (resume queue processing).
708 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESUME_DMA_QUEUE
);
711 static inline void invalidate_slbs(struct spu_state
*csa
, struct spu
*spu
)
713 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
717 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
719 if (spu_mfc_sr1_get(spu
) & MFC_STATE1_RELOCATE_MASK
) {
720 out_be64(&priv2
->slb_invalidate_all_W
, 0UL);
725 static inline void get_kernel_slb(u64 ea
, u64 slb
[2])
729 if (REGION_ID(ea
) == KERNEL_REGION_ID
)
730 llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
732 llp
= mmu_psize_defs
[mmu_virtual_psize
].sllp
;
733 slb
[0] = (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) |
734 SLB_VSID_KERNEL
| llp
;
735 slb
[1] = (ea
& ESID_MASK
) | SLB_ESID_V
;
738 static inline void load_mfc_slb(struct spu
*spu
, u64 slb
[2], int slbe
)
740 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
742 out_be64(&priv2
->slb_index_W
, slbe
);
744 out_be64(&priv2
->slb_vsid_RW
, slb
[0]);
745 out_be64(&priv2
->slb_esid_RW
, slb
[1]);
749 static inline void setup_mfc_slbs(struct spu_state
*csa
, struct spu
*spu
)
756 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
757 * register, then initialize SLB_VSID and SLB_ESID
758 * to provide access to SPU context save code and
761 * This implementation places both the context
762 * switch code and LSCSA in kernel address space.
764 * Further this implementation assumes that the
765 * MFC_SR1[R]=1 (in other words, assume that
766 * translation is desired by OS environment).
768 invalidate_slbs(csa
, spu
);
769 get_kernel_slb((unsigned long)&spu_save_code
[0], code_slb
);
770 get_kernel_slb((unsigned long)csa
->lscsa
, lscsa_slb
);
771 load_mfc_slb(spu
, code_slb
, 0);
772 if ((lscsa_slb
[0] != code_slb
[0]) || (lscsa_slb
[1] != code_slb
[1]))
773 load_mfc_slb(spu
, lscsa_slb
, 1);
776 static inline void set_switch_active(struct spu_state
*csa
, struct spu
*spu
)
780 * Change the software context switch pending flag
781 * to context switch active.
783 set_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
);
784 clear_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
);
788 static inline void enable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
790 unsigned long class1_mask
= CLASS1_ENABLE_SEGMENT_FAULT_INTR
|
791 CLASS1_ENABLE_STORAGE_FAULT_INTR
;
795 * Reset and then enable interrupts, as
798 * This implementation enables only class1
799 * (translation) interrupts.
801 spin_lock_irq(&spu
->register_lock
);
802 spu_int_stat_clear(spu
, 0, ~0ul);
803 spu_int_stat_clear(spu
, 1, ~0ul);
804 spu_int_stat_clear(spu
, 2, ~0ul);
805 spu_int_mask_set(spu
, 0, 0ul);
806 spu_int_mask_set(spu
, 1, class1_mask
);
807 spu_int_mask_set(spu
, 2, 0ul);
808 spin_unlock_irq(&spu
->register_lock
);
811 static inline int send_mfc_dma(struct spu
*spu
, unsigned long ea
,
812 unsigned int ls_offset
, unsigned int size
,
813 unsigned int tag
, unsigned int rclass
,
816 struct spu_problem __iomem
*prob
= spu
->problem
;
817 union mfc_tag_size_class_cmd command
;
818 unsigned int transfer_size
;
819 volatile unsigned int status
= 0x0;
823 (size
> MFC_MAX_DMA_SIZE
) ? MFC_MAX_DMA_SIZE
: size
;
824 command
.u
.mfc_size
= transfer_size
;
825 command
.u
.mfc_tag
= tag
;
826 command
.u
.mfc_rclassid
= rclass
;
827 command
.u
.mfc_cmd
= cmd
;
829 out_be32(&prob
->mfc_lsa_W
, ls_offset
);
830 out_be64(&prob
->mfc_ea_W
, ea
);
831 out_be64(&prob
->mfc_union_W
.all64
, command
.all64
);
833 in_be32(&prob
->mfc_union_W
.by32
.mfc_class_cmd32
);
834 if (unlikely(status
& 0x2)) {
837 } while (status
& 0x3);
838 size
-= transfer_size
;
840 ls_offset
+= transfer_size
;
845 static inline void save_ls_16kb(struct spu_state
*csa
, struct spu
*spu
)
847 unsigned long addr
= (unsigned long)&csa
->lscsa
->ls
[0];
848 unsigned int ls_offset
= 0x0;
849 unsigned int size
= 16384;
850 unsigned int tag
= 0;
851 unsigned int rclass
= 0;
852 unsigned int cmd
= MFC_PUT_CMD
;
855 * Issue a DMA command to copy the first 16K bytes
856 * of local storage to the CSA.
858 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
861 static inline void set_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
863 struct spu_problem __iomem
*prob
= spu
->problem
;
867 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
868 * point address of context save code in local
871 * This implementation uses SPU-side save/restore
872 * programs with entry points at LSA of 0.
874 out_be32(&prob
->spu_npc_RW
, 0);
878 static inline void set_signot1(struct spu_state
*csa
, struct spu
*spu
)
880 struct spu_problem __iomem
*prob
= spu
->problem
;
888 * Write SPU_Sig_Notify_1 register with upper 32-bits
889 * of the CSA.LSCSA effective address.
891 addr64
.ull
= (u64
) csa
->lscsa
;
892 out_be32(&prob
->signal_notify1
, addr64
.ui
[0]);
896 static inline void set_signot2(struct spu_state
*csa
, struct spu
*spu
)
898 struct spu_problem __iomem
*prob
= spu
->problem
;
906 * Write SPU_Sig_Notify_2 register with lower 32-bits
907 * of the CSA.LSCSA effective address.
909 addr64
.ull
= (u64
) csa
->lscsa
;
910 out_be32(&prob
->signal_notify2
, addr64
.ui
[1]);
914 static inline void send_save_code(struct spu_state
*csa
, struct spu
*spu
)
916 unsigned long addr
= (unsigned long)&spu_save_code
[0];
917 unsigned int ls_offset
= 0x0;
918 unsigned int size
= sizeof(spu_save_code
);
919 unsigned int tag
= 0;
920 unsigned int rclass
= 0;
921 unsigned int cmd
= MFC_GETFS_CMD
;
924 * Issue a DMA command to copy context save code
925 * to local storage and start SPU.
927 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
930 static inline void set_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
932 struct spu_problem __iomem
*prob
= spu
->problem
;
936 * Write PPU_QueryMask=1 (enable Tag Group 0)
937 * and issue eieio instruction.
939 out_be32(&prob
->dma_querymask_RW
, MFC_TAGID_TO_TAGMASK(0));
943 static inline void wait_tag_complete(struct spu_state
*csa
, struct spu
*spu
)
945 struct spu_problem __iomem
*prob
= spu
->problem
;
946 u32 mask
= MFC_TAGID_TO_TAGMASK(0);
953 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
954 * or write PPU_QueryType[TS]=01 and wait for Tag Group
955 * Complete Interrupt. Write INT_Stat_Class0 or
956 * INT_Stat_Class2 with value of 'handled'.
958 POLL_WHILE_FALSE(in_be32(&prob
->dma_tagstatus_R
) & mask
);
960 local_irq_save(flags
);
961 spu_int_stat_clear(spu
, 0, ~(0ul));
962 spu_int_stat_clear(spu
, 2, ~(0ul));
963 local_irq_restore(flags
);
966 static inline void wait_spu_stopped(struct spu_state
*csa
, struct spu
*spu
)
968 struct spu_problem __iomem
*prob
= spu
->problem
;
973 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
974 * or SPU Class 2 interrupt. Write INT_Stat_class0
975 * or INT_Stat_class2 with value of handled.
977 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
);
979 local_irq_save(flags
);
980 spu_int_stat_clear(spu
, 0, ~(0ul));
981 spu_int_stat_clear(spu
, 2, ~(0ul));
982 local_irq_restore(flags
);
985 static inline int check_save_status(struct spu_state
*csa
, struct spu
*spu
)
987 struct spu_problem __iomem
*prob
= spu
->problem
;
991 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
992 * context save succeeded, otherwise context save
995 complete
= ((SPU_SAVE_COMPLETE
<< SPU_STOP_STATUS_SHIFT
) |
996 SPU_STATUS_STOPPED_BY_STOP
);
997 return (in_be32(&prob
->spu_status_R
) != complete
) ? 1 : 0;
1000 static inline void terminate_spu_app(struct spu_state
*csa
, struct spu
*spu
)
1003 * If required, notify the "using application" that
1004 * the SPU task has been terminated. TBD.
1008 static inline void suspend_mfc(struct spu_state
*csa
, struct spu
*spu
)
1010 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1014 * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
1015 * the queue and halt the decrementer.
1017 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_SUSPEND_DMA_QUEUE
|
1018 MFC_CNTL_DECREMENTER_HALTED
);
1022 static inline void wait_suspend_mfc_complete(struct spu_state
*csa
,
1025 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1029 * Poll MFC_CNTL[Ss] until 11 is returned.
1031 POLL_WHILE_FALSE(in_be64(&priv2
->mfc_control_RW
) &
1032 MFC_CNTL_SUSPEND_COMPLETE
);
1035 static inline int suspend_spe(struct spu_state
*csa
, struct spu
*spu
)
1037 struct spu_problem __iomem
*prob
= spu
->problem
;
1040 * If SPU_Status[R]=1, stop SPU execution
1041 * and wait for stop to complete.
1043 * Returns 1 if SPU_Status[R]=1 on entry.
1046 if (in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
) {
1047 if (in_be32(&prob
->spu_status_R
) &
1048 SPU_STATUS_ISOLATED_EXIT_STAUTUS
) {
1049 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1050 SPU_STATUS_RUNNING
);
1052 if ((in_be32(&prob
->spu_status_R
) &
1053 SPU_STATUS_ISOLATED_LOAD_STAUTUS
)
1054 || (in_be32(&prob
->spu_status_R
) &
1055 SPU_STATUS_ISOLATED_STATE
)) {
1056 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1058 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1059 SPU_STATUS_RUNNING
);
1060 out_be32(&prob
->spu_runcntl_RW
, 0x2);
1062 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1063 SPU_STATUS_RUNNING
);
1065 if (in_be32(&prob
->spu_status_R
) &
1066 SPU_STATUS_WAITING_FOR_CHANNEL
) {
1067 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1069 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1070 SPU_STATUS_RUNNING
);
1077 static inline void clear_spu_status(struct spu_state
*csa
, struct spu
*spu
)
1079 struct spu_problem __iomem
*prob
= spu
->problem
;
1081 /* Restore, Step 10:
1082 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1083 * release SPU from isolate state.
1085 if (!(in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
)) {
1086 if (in_be32(&prob
->spu_status_R
) &
1087 SPU_STATUS_ISOLATED_EXIT_STAUTUS
) {
1088 spu_mfc_sr1_set(spu
,
1089 MFC_STATE1_MASTER_RUN_CONTROL_MASK
);
1091 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1093 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1094 SPU_STATUS_RUNNING
);
1096 if ((in_be32(&prob
->spu_status_R
) &
1097 SPU_STATUS_ISOLATED_LOAD_STAUTUS
)
1098 || (in_be32(&prob
->spu_status_R
) &
1099 SPU_STATUS_ISOLATED_STATE
)) {
1100 spu_mfc_sr1_set(spu
,
1101 MFC_STATE1_MASTER_RUN_CONTROL_MASK
);
1103 out_be32(&prob
->spu_runcntl_RW
, 0x2);
1105 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1106 SPU_STATUS_RUNNING
);
1111 static inline void reset_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
1113 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1114 u64 ch_indices
[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1118 /* Restore, Step 20:
1122 out_be64(&priv2
->spu_chnlcntptr_RW
, 1);
1123 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
1125 /* Reset the following CH: [0,3,4,24,25,27] */
1126 for (i
= 0; i
< 7; i
++) {
1127 idx
= ch_indices
[i
];
1128 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1130 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
1131 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
1136 static inline void reset_ch_part2(struct spu_state
*csa
, struct spu
*spu
)
1138 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1139 u64 ch_indices
[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1140 u64 ch_counts
[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1144 /* Restore, Step 21:
1145 * Reset the following CH: [21, 23, 28, 29, 30]
1147 for (i
= 0; i
< 5; i
++) {
1148 idx
= ch_indices
[i
];
1149 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1151 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
1156 static inline void setup_spu_status_part1(struct spu_state
*csa
,
1159 u32 status_P
= SPU_STATUS_STOPPED_BY_STOP
;
1160 u32 status_I
= SPU_STATUS_INVALID_INSTR
;
1161 u32 status_H
= SPU_STATUS_STOPPED_BY_HALT
;
1162 u32 status_S
= SPU_STATUS_SINGLE_STEP
;
1163 u32 status_S_I
= SPU_STATUS_SINGLE_STEP
| SPU_STATUS_INVALID_INSTR
;
1164 u32 status_S_P
= SPU_STATUS_SINGLE_STEP
| SPU_STATUS_STOPPED_BY_STOP
;
1165 u32 status_P_H
= SPU_STATUS_STOPPED_BY_HALT
|SPU_STATUS_STOPPED_BY_STOP
;
1166 u32 status_P_I
= SPU_STATUS_STOPPED_BY_STOP
|SPU_STATUS_INVALID_INSTR
;
1169 /* Restore, Step 27:
1170 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1171 * instruction sequence to the end of the SPU based restore
1172 * code (after the "context restored" stop and signal) to
1173 * restore the correct SPU status.
1175 * NOTE: Rather than modifying the SPU executable, we
1176 * instead add a new 'stopped_status' field to the
1177 * LSCSA. The SPU-side restore reads this field and
1178 * takes the appropriate action when exiting.
1182 (csa
->prob
.spu_status_R
>> SPU_STOP_STATUS_SHIFT
) & 0xFFFF;
1183 if ((csa
->prob
.spu_status_R
& status_P_I
) == status_P_I
) {
1185 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1186 * by Stop and Signal instruction, followed by 'br -4'.
1189 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P_I
;
1190 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1192 } else if ((csa
->prob
.spu_status_R
& status_P_H
) == status_P_H
) {
1194 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1195 * by Stop and Signal instruction, followed by
1198 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P_H
;
1199 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1201 } else if ((csa
->prob
.spu_status_R
& status_S_P
) == status_S_P
) {
1203 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1204 * followed by 'br -4'.
1206 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S_P
;
1207 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1209 } else if ((csa
->prob
.spu_status_R
& status_S_I
) == status_S_I
) {
1211 /* SPU_Status[S,I]=1 - Illegal instruction followed
1214 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S_I
;
1215 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1217 } else if ((csa
->prob
.spu_status_R
& status_P
) == status_P
) {
1219 /* SPU_Status[P]=1 - Stop and Signal instruction
1220 * followed by 'br -4'.
1222 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P
;
1223 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1225 } else if ((csa
->prob
.spu_status_R
& status_H
) == status_H
) {
1227 /* SPU_Status[H]=1 - Halt Conditional, followed
1230 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_H
;
1232 } else if ((csa
->prob
.spu_status_R
& status_S
) == status_S
) {
1234 /* SPU_Status[S]=1 - Two nop instructions.
1236 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S
;
1238 } else if ((csa
->prob
.spu_status_R
& status_I
) == status_I
) {
1240 /* SPU_Status[I]=1 - Illegal instruction followed
1243 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_I
;
1248 static inline void setup_spu_status_part2(struct spu_state
*csa
,
1253 /* Restore, Step 28:
1254 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1255 * add a 'br *' instruction to the end of
1256 * the SPU based restore code.
1258 * NOTE: Rather than modifying the SPU executable, we
1259 * instead add a new 'stopped_status' field to the
1260 * LSCSA. The SPU-side restore reads this field and
1261 * takes the appropriate action when exiting.
1263 mask
= SPU_STATUS_INVALID_INSTR
|
1264 SPU_STATUS_SINGLE_STEP
|
1265 SPU_STATUS_STOPPED_BY_HALT
|
1266 SPU_STATUS_STOPPED_BY_STOP
| SPU_STATUS_RUNNING
;
1267 if (!(csa
->prob
.spu_status_R
& mask
)) {
1268 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_R
;
1272 static inline void restore_mfc_rag(struct spu_state
*csa
, struct spu
*spu
)
1274 /* Restore, Step 29:
1275 * Restore RA_GROUP_ID register and the
1276 * RA_ENABLE reigster from the CSA.
1278 spu_resource_allocation_groupID_set(spu
,
1279 csa
->priv1
.resource_allocation_groupID_RW
);
1280 spu_resource_allocation_enable_set(spu
,
1281 csa
->priv1
.resource_allocation_enable_RW
);
1284 static inline void send_restore_code(struct spu_state
*csa
, struct spu
*spu
)
1286 unsigned long addr
= (unsigned long)&spu_restore_code
[0];
1287 unsigned int ls_offset
= 0x0;
1288 unsigned int size
= sizeof(spu_restore_code
);
1289 unsigned int tag
= 0;
1290 unsigned int rclass
= 0;
1291 unsigned int cmd
= MFC_GETFS_CMD
;
1293 /* Restore, Step 37:
1294 * Issue MFC DMA command to copy context
1295 * restore code to local storage.
1297 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
1300 static inline void setup_decr(struct spu_state
*csa
, struct spu
*spu
)
1302 /* Restore, Step 34:
1303 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1304 * running) then adjust decrementer, set
1305 * decrementer running status in LSCSA,
1306 * and set decrementer "wrapped" status
1309 if (csa
->priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
) {
1310 cycles_t resume_time
= get_cycles();
1311 cycles_t delta_time
= resume_time
- csa
->suspend_time
;
1313 csa
->lscsa
->decr
.slot
[0] -= delta_time
;
1317 static inline void setup_ppu_mb(struct spu_state
*csa
, struct spu
*spu
)
1319 /* Restore, Step 35:
1320 * Copy the CSA.PU_MB data into the LSCSA.
1322 csa
->lscsa
->ppu_mb
.slot
[0] = csa
->prob
.pu_mb_R
;
1325 static inline void setup_ppuint_mb(struct spu_state
*csa
, struct spu
*spu
)
1327 /* Restore, Step 36:
1328 * Copy the CSA.PUINT_MB data into the LSCSA.
1330 csa
->lscsa
->ppuint_mb
.slot
[0] = csa
->priv2
.puint_mb_R
;
1333 static inline int check_restore_status(struct spu_state
*csa
, struct spu
*spu
)
1335 struct spu_problem __iomem
*prob
= spu
->problem
;
1338 /* Restore, Step 40:
1339 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1340 * context restore succeeded, otherwise context restore
1343 complete
= ((SPU_RESTORE_COMPLETE
<< SPU_STOP_STATUS_SHIFT
) |
1344 SPU_STATUS_STOPPED_BY_STOP
);
1345 return (in_be32(&prob
->spu_status_R
) != complete
) ? 1 : 0;
1348 static inline void restore_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
1350 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1352 /* Restore, Step 41:
1353 * Restore SPU_PrivCntl from the CSA.
1355 out_be64(&priv2
->spu_privcntl_RW
, csa
->priv2
.spu_privcntl_RW
);
1359 static inline void restore_status_part1(struct spu_state
*csa
, struct spu
*spu
)
1361 struct spu_problem __iomem
*prob
= spu
->problem
;
1364 /* Restore, Step 42:
1365 * If any CSA.SPU_Status[I,S,H,P]=1, then
1366 * restore the error or single step state.
1368 mask
= SPU_STATUS_INVALID_INSTR
|
1369 SPU_STATUS_SINGLE_STEP
|
1370 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
1371 if (csa
->prob
.spu_status_R
& mask
) {
1372 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1374 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1375 SPU_STATUS_RUNNING
);
1379 static inline void restore_status_part2(struct spu_state
*csa
, struct spu
*spu
)
1381 struct spu_problem __iomem
*prob
= spu
->problem
;
1384 /* Restore, Step 43:
1385 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1386 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1387 * then write '00' to SPU_RunCntl[R0R1] and wait
1388 * for SPU_Status[R]=0.
1390 mask
= SPU_STATUS_INVALID_INSTR
|
1391 SPU_STATUS_SINGLE_STEP
|
1392 SPU_STATUS_STOPPED_BY_HALT
|
1393 SPU_STATUS_STOPPED_BY_STOP
| SPU_STATUS_RUNNING
;
1394 if (!(csa
->prob
.spu_status_R
& mask
)) {
1395 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1397 POLL_WHILE_FALSE(in_be32(&prob
->spu_status_R
) &
1398 SPU_STATUS_RUNNING
);
1399 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1401 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1402 SPU_STATUS_RUNNING
);
1406 static inline void restore_ls_16kb(struct spu_state
*csa
, struct spu
*spu
)
1408 unsigned long addr
= (unsigned long)&csa
->lscsa
->ls
[0];
1409 unsigned int ls_offset
= 0x0;
1410 unsigned int size
= 16384;
1411 unsigned int tag
= 0;
1412 unsigned int rclass
= 0;
1413 unsigned int cmd
= MFC_GET_CMD
;
1415 /* Restore, Step 44:
1416 * Issue a DMA command to restore the first
1417 * 16kb of local storage from CSA.
1419 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
1422 static inline void clear_interrupts(struct spu_state
*csa
, struct spu
*spu
)
1424 /* Restore, Step 49:
1425 * Write INT_MASK_class0 with value of 0.
1426 * Write INT_MASK_class1 with value of 0.
1427 * Write INT_MASK_class2 with value of 0.
1428 * Write INT_STAT_class0 with value of -1.
1429 * Write INT_STAT_class1 with value of -1.
1430 * Write INT_STAT_class2 with value of -1.
1432 spin_lock_irq(&spu
->register_lock
);
1433 spu_int_mask_set(spu
, 0, 0ul);
1434 spu_int_mask_set(spu
, 1, 0ul);
1435 spu_int_mask_set(spu
, 2, 0ul);
1436 spu_int_stat_clear(spu
, 0, ~0ul);
1437 spu_int_stat_clear(spu
, 1, ~0ul);
1438 spu_int_stat_clear(spu
, 2, ~0ul);
1439 spin_unlock_irq(&spu
->register_lock
);
1442 static inline void restore_mfc_queues(struct spu_state
*csa
, struct spu
*spu
)
1444 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1447 /* Restore, Step 50:
1448 * If MFC_Cntl[Se]!=0 then restore
1449 * MFC command queues.
1451 if ((csa
->priv2
.mfc_control_RW
& MFC_CNTL_DMA_QUEUES_EMPTY_MASK
) == 0) {
1452 for (i
= 0; i
< 8; i
++) {
1453 out_be64(&priv2
->puq
[i
].mfc_cq_data0_RW
,
1454 csa
->priv2
.puq
[i
].mfc_cq_data0_RW
);
1455 out_be64(&priv2
->puq
[i
].mfc_cq_data1_RW
,
1456 csa
->priv2
.puq
[i
].mfc_cq_data1_RW
);
1457 out_be64(&priv2
->puq
[i
].mfc_cq_data2_RW
,
1458 csa
->priv2
.puq
[i
].mfc_cq_data2_RW
);
1459 out_be64(&priv2
->puq
[i
].mfc_cq_data3_RW
,
1460 csa
->priv2
.puq
[i
].mfc_cq_data3_RW
);
1462 for (i
= 0; i
< 16; i
++) {
1463 out_be64(&priv2
->spuq
[i
].mfc_cq_data0_RW
,
1464 csa
->priv2
.spuq
[i
].mfc_cq_data0_RW
);
1465 out_be64(&priv2
->spuq
[i
].mfc_cq_data1_RW
,
1466 csa
->priv2
.spuq
[i
].mfc_cq_data1_RW
);
1467 out_be64(&priv2
->spuq
[i
].mfc_cq_data2_RW
,
1468 csa
->priv2
.spuq
[i
].mfc_cq_data2_RW
);
1469 out_be64(&priv2
->spuq
[i
].mfc_cq_data3_RW
,
1470 csa
->priv2
.spuq
[i
].mfc_cq_data3_RW
);
1476 static inline void restore_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
1478 struct spu_problem __iomem
*prob
= spu
->problem
;
1480 /* Restore, Step 51:
1481 * Restore the PPU_QueryMask register from CSA.
1483 out_be32(&prob
->dma_querymask_RW
, csa
->prob
.dma_querymask_RW
);
1487 static inline void restore_ppu_querytype(struct spu_state
*csa
, struct spu
*spu
)
1489 struct spu_problem __iomem
*prob
= spu
->problem
;
1491 /* Restore, Step 52:
1492 * Restore the PPU_QueryType register from CSA.
1494 out_be32(&prob
->dma_querytype_RW
, csa
->prob
.dma_querytype_RW
);
1498 static inline void restore_mfc_csr_tsq(struct spu_state
*csa
, struct spu
*spu
)
1500 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1502 /* Restore, Step 53:
1503 * Restore the MFC_CSR_TSQ register from CSA.
1505 out_be64(&priv2
->spu_tag_status_query_RW
,
1506 csa
->priv2
.spu_tag_status_query_RW
);
1510 static inline void restore_mfc_csr_cmd(struct spu_state
*csa
, struct spu
*spu
)
1512 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1514 /* Restore, Step 54:
1515 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1516 * registers from CSA.
1518 out_be64(&priv2
->spu_cmd_buf1_RW
, csa
->priv2
.spu_cmd_buf1_RW
);
1519 out_be64(&priv2
->spu_cmd_buf2_RW
, csa
->priv2
.spu_cmd_buf2_RW
);
1523 static inline void restore_mfc_csr_ato(struct spu_state
*csa
, struct spu
*spu
)
1525 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1527 /* Restore, Step 55:
1528 * Restore the MFC_CSR_ATO register from CSA.
1530 out_be64(&priv2
->spu_atomic_status_RW
, csa
->priv2
.spu_atomic_status_RW
);
1533 static inline void restore_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
1535 /* Restore, Step 56:
1536 * Restore the MFC_TCLASS_ID register from CSA.
1538 spu_mfc_tclass_id_set(spu
, csa
->priv1
.mfc_tclass_id_RW
);
1542 static inline void set_llr_event(struct spu_state
*csa
, struct spu
*spu
)
1544 u64 ch0_cnt
, ch0_data
;
1547 /* Restore, Step 57:
1548 * Set the Lock Line Reservation Lost Event by:
1549 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1550 * 2. If CSA.SPU_Channel_0_Count=0 and
1551 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1552 * CSA.SPU_Event_Status[Lr]=0 then set
1553 * CSA.SPU_Event_Status_Count=1.
1555 ch0_cnt
= csa
->spu_chnlcnt_RW
[0];
1556 ch0_data
= csa
->spu_chnldata_RW
[0];
1557 ch1_data
= csa
->spu_chnldata_RW
[1];
1558 csa
->spu_chnldata_RW
[0] |= MFC_LLR_LOST_EVENT
;
1559 if ((ch0_cnt
== 0) && !(ch0_data
& MFC_LLR_LOST_EVENT
) &&
1560 (ch1_data
& MFC_LLR_LOST_EVENT
)) {
1561 csa
->spu_chnlcnt_RW
[0] = 1;
1565 static inline void restore_decr_wrapped(struct spu_state
*csa
, struct spu
*spu
)
1567 /* Restore, Step 58:
1568 * If the status of the CSA software decrementer
1569 * "wrapped" flag is set, OR in a '1' to
1570 * CSA.SPU_Event_Status[Tm].
1572 if (csa
->lscsa
->decr_status
.slot
[0] == 1) {
1573 csa
->spu_chnldata_RW
[0] |= 0x20;
1575 if ((csa
->lscsa
->decr_status
.slot
[0] == 1) &&
1576 (csa
->spu_chnlcnt_RW
[0] == 0 &&
1577 ((csa
->spu_chnldata_RW
[2] & 0x20) == 0x0) &&
1578 ((csa
->spu_chnldata_RW
[0] & 0x20) != 0x1))) {
1579 csa
->spu_chnlcnt_RW
[0] = 1;
1583 static inline void restore_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
1585 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1586 u64 idx
, ch_indices
[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1589 /* Restore, Step 59:
1592 /* Restore CH 1 without count */
1593 out_be64(&priv2
->spu_chnlcntptr_RW
, 1);
1594 out_be64(&priv2
->spu_chnldata_RW
, csa
->spu_chnldata_RW
[1]);
1596 /* Restore the following CH: [0,3,4,24,25,27] */
1597 for (i
= 0; i
< 7; i
++) {
1598 idx
= ch_indices
[i
];
1599 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1601 out_be64(&priv2
->spu_chnldata_RW
, csa
->spu_chnldata_RW
[idx
]);
1602 out_be64(&priv2
->spu_chnlcnt_RW
, csa
->spu_chnlcnt_RW
[idx
]);
1607 static inline void restore_ch_part2(struct spu_state
*csa
, struct spu
*spu
)
1609 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1610 u64 ch_indices
[3] = { 9UL, 21UL, 23UL };
1611 u64 ch_counts
[3] = { 1UL, 16UL, 1UL };
1615 /* Restore, Step 60:
1616 * Restore the following CH: [9,21,23].
1619 ch_counts
[1] = csa
->spu_chnlcnt_RW
[21];
1621 for (i
= 0; i
< 3; i
++) {
1622 idx
= ch_indices
[i
];
1623 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1625 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
1630 static inline void restore_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
1632 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1634 /* Restore, Step 61:
1635 * Restore the SPU_LSLR register from CSA.
1637 out_be64(&priv2
->spu_lslr_RW
, csa
->priv2
.spu_lslr_RW
);
1641 static inline void restore_spu_cfg(struct spu_state
*csa
, struct spu
*spu
)
1643 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1645 /* Restore, Step 62:
1646 * Restore the SPU_Cfg register from CSA.
1648 out_be64(&priv2
->spu_cfg_RW
, csa
->priv2
.spu_cfg_RW
);
1652 static inline void restore_pm_trace(struct spu_state
*csa
, struct spu
*spu
)
1654 /* Restore, Step 63:
1655 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1656 * Not performed by this implementation.
1660 static inline void restore_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
1662 struct spu_problem __iomem
*prob
= spu
->problem
;
1664 /* Restore, Step 64:
1665 * Restore SPU_NPC from CSA.
1667 out_be32(&prob
->spu_npc_RW
, csa
->prob
.spu_npc_RW
);
1671 static inline void restore_spu_mb(struct spu_state
*csa
, struct spu
*spu
)
1673 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1676 /* Restore, Step 65:
1677 * Restore MFC_RdSPU_MB from CSA.
1679 out_be64(&priv2
->spu_chnlcntptr_RW
, 29UL);
1681 out_be64(&priv2
->spu_chnlcnt_RW
, csa
->spu_chnlcnt_RW
[29]);
1682 for (i
= 0; i
< 4; i
++) {
1683 out_be64(&priv2
->spu_chnldata_RW
, csa
->spu_mailbox_data
[i
]);
1688 static inline void check_ppu_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
1690 struct spu_problem __iomem
*prob
= spu
->problem
;
1693 /* Restore, Step 66:
1694 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1695 * read from the PPU_MB register.
1697 if ((csa
->prob
.mb_stat_R
& 0xFF) == 0) {
1698 dummy
= in_be32(&prob
->pu_mb_R
);
1703 static inline void check_ppuint_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
1705 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1708 /* Restore, Step 66:
1709 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1710 * read from the PPUINT_MB register.
1712 if ((csa
->prob
.mb_stat_R
& 0xFF0000) == 0) {
1713 dummy
= in_be64(&priv2
->puint_mb_R
);
1715 spu_int_stat_clear(spu
, 2, CLASS2_ENABLE_MAILBOX_INTR
);
1720 static inline void restore_mfc_slbs(struct spu_state
*csa
, struct spu
*spu
)
1722 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1725 /* Restore, Step 68:
1726 * If MFC_SR1[R]='1', restore SLBs from CSA.
1728 if (csa
->priv1
.mfc_sr1_RW
& MFC_STATE1_RELOCATE_MASK
) {
1729 for (i
= 0; i
< 8; i
++) {
1730 out_be64(&priv2
->slb_index_W
, i
);
1732 out_be64(&priv2
->slb_esid_RW
, csa
->slb_esid_RW
[i
]);
1733 out_be64(&priv2
->slb_vsid_RW
, csa
->slb_vsid_RW
[i
]);
1736 out_be64(&priv2
->slb_index_W
, csa
->priv2
.slb_index_W
);
1741 static inline void restore_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
1743 /* Restore, Step 69:
1744 * Restore the MFC_SR1 register from CSA.
1746 spu_mfc_sr1_set(spu
, csa
->priv1
.mfc_sr1_RW
);
1750 static inline void restore_other_spu_access(struct spu_state
*csa
,
1753 /* Restore, Step 70:
1754 * Restore other SPU mappings to this SPU. TBD.
1758 static inline void restore_spu_runcntl(struct spu_state
*csa
, struct spu
*spu
)
1760 struct spu_problem __iomem
*prob
= spu
->problem
;
1762 /* Restore, Step 71:
1763 * If CSA.SPU_Status[R]=1 then write
1764 * SPU_RunCntl[R0R1]='01'.
1766 if (csa
->prob
.spu_status_R
& SPU_STATUS_RUNNING
) {
1767 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1772 static inline void restore_mfc_cntl(struct spu_state
*csa
, struct spu
*spu
)
1774 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1776 /* Restore, Step 72:
1777 * Restore the MFC_CNTL register for the CSA.
1779 out_be64(&priv2
->mfc_control_RW
, csa
->priv2
.mfc_control_RW
);
1783 static inline void enable_user_access(struct spu_state
*csa
, struct spu
*spu
)
1785 /* Restore, Step 73:
1786 * Enable user-space access (if provided) to this
1787 * SPU by mapping the virtual pages assigned to
1788 * the SPU memory-mapped I/O (MMIO) for problem
1793 static inline void reset_switch_active(struct spu_state
*csa
, struct spu
*spu
)
1795 /* Restore, Step 74:
1796 * Reset the "context switch active" flag.
1798 clear_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
);
1802 static inline void reenable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
1804 /* Restore, Step 75:
1805 * Re-enable SPU interrupts.
1807 spin_lock_irq(&spu
->register_lock
);
1808 spu_int_mask_set(spu
, 0, csa
->priv1
.int_mask_class0_RW
);
1809 spu_int_mask_set(spu
, 1, csa
->priv1
.int_mask_class1_RW
);
1810 spu_int_mask_set(spu
, 2, csa
->priv1
.int_mask_class2_RW
);
1811 spin_unlock_irq(&spu
->register_lock
);
1814 static int quiece_spu(struct spu_state
*prev
, struct spu
*spu
)
1817 * Combined steps 2-18 of SPU context save sequence, which
1818 * quiesce the SPU state (disable SPU execution, MFC command
1819 * queues, decrementer, SPU interrupts, etc.).
1821 * Returns 0 on success.
1822 * 2 if failed step 2.
1823 * 6 if failed step 6.
1826 if (check_spu_isolate(prev
, spu
)) { /* Step 2. */
1829 disable_interrupts(prev
, spu
); /* Step 3. */
1830 set_watchdog_timer(prev
, spu
); /* Step 4. */
1831 inhibit_user_access(prev
, spu
); /* Step 5. */
1832 if (check_spu_isolate(prev
, spu
)) { /* Step 6. */
1835 set_switch_pending(prev
, spu
); /* Step 7. */
1836 save_mfc_cntl(prev
, spu
); /* Step 8. */
1837 save_spu_runcntl(prev
, spu
); /* Step 9. */
1838 save_mfc_sr1(prev
, spu
); /* Step 10. */
1839 save_spu_status(prev
, spu
); /* Step 11. */
1840 save_mfc_decr(prev
, spu
); /* Step 12. */
1841 halt_mfc_decr(prev
, spu
); /* Step 13. */
1842 save_timebase(prev
, spu
); /* Step 14. */
1843 remove_other_spu_access(prev
, spu
); /* Step 15. */
1844 do_mfc_mssync(prev
, spu
); /* Step 16. */
1845 issue_mfc_tlbie(prev
, spu
); /* Step 17. */
1846 handle_pending_interrupts(prev
, spu
); /* Step 18. */
1851 static void save_csa(struct spu_state
*prev
, struct spu
*spu
)
1854 * Combine steps 19-44 of SPU context save sequence, which
1855 * save regions of the privileged & problem state areas.
1858 save_mfc_queues(prev
, spu
); /* Step 19. */
1859 save_ppu_querymask(prev
, spu
); /* Step 20. */
1860 save_ppu_querytype(prev
, spu
); /* Step 21. */
1861 save_mfc_csr_tsq(prev
, spu
); /* Step 22. */
1862 save_mfc_csr_cmd(prev
, spu
); /* Step 23. */
1863 save_mfc_csr_ato(prev
, spu
); /* Step 24. */
1864 save_mfc_tclass_id(prev
, spu
); /* Step 25. */
1865 set_mfc_tclass_id(prev
, spu
); /* Step 26. */
1866 purge_mfc_queue(prev
, spu
); /* Step 27. */
1867 wait_purge_complete(prev
, spu
); /* Step 28. */
1868 save_mfc_slbs(prev
, spu
); /* Step 29. */
1869 setup_mfc_sr1(prev
, spu
); /* Step 30. */
1870 save_spu_npc(prev
, spu
); /* Step 31. */
1871 save_spu_privcntl(prev
, spu
); /* Step 32. */
1872 reset_spu_privcntl(prev
, spu
); /* Step 33. */
1873 save_spu_lslr(prev
, spu
); /* Step 34. */
1874 reset_spu_lslr(prev
, spu
); /* Step 35. */
1875 save_spu_cfg(prev
, spu
); /* Step 36. */
1876 save_pm_trace(prev
, spu
); /* Step 37. */
1877 save_mfc_rag(prev
, spu
); /* Step 38. */
1878 save_ppu_mb_stat(prev
, spu
); /* Step 39. */
1879 save_ppu_mb(prev
, spu
); /* Step 40. */
1880 save_ppuint_mb(prev
, spu
); /* Step 41. */
1881 save_ch_part1(prev
, spu
); /* Step 42. */
1882 save_spu_mb(prev
, spu
); /* Step 43. */
1883 save_mfc_cmd(prev
, spu
); /* Step 44. */
1884 reset_ch(prev
, spu
); /* Step 45. */
1887 static void save_lscsa(struct spu_state
*prev
, struct spu
*spu
)
1890 * Perform steps 46-57 of SPU context save sequence,
1891 * which save regions of the local store and register
1895 resume_mfc_queue(prev
, spu
); /* Step 46. */
1896 setup_mfc_slbs(prev
, spu
); /* Step 47. */
1897 set_switch_active(prev
, spu
); /* Step 48. */
1898 enable_interrupts(prev
, spu
); /* Step 49. */
1899 save_ls_16kb(prev
, spu
); /* Step 50. */
1900 set_spu_npc(prev
, spu
); /* Step 51. */
1901 set_signot1(prev
, spu
); /* Step 52. */
1902 set_signot2(prev
, spu
); /* Step 53. */
1903 send_save_code(prev
, spu
); /* Step 54. */
1904 set_ppu_querymask(prev
, spu
); /* Step 55. */
1905 wait_tag_complete(prev
, spu
); /* Step 56. */
1906 wait_spu_stopped(prev
, spu
); /* Step 57. */
1909 static void harvest(struct spu_state
*prev
, struct spu
*spu
)
1912 * Perform steps 2-25 of SPU context restore sequence,
1913 * which resets an SPU either after a failed save, or
1914 * when using SPU for first time.
1917 disable_interrupts(prev
, spu
); /* Step 2. */
1918 inhibit_user_access(prev
, spu
); /* Step 3. */
1919 terminate_spu_app(prev
, spu
); /* Step 4. */
1920 set_switch_pending(prev
, spu
); /* Step 5. */
1921 remove_other_spu_access(prev
, spu
); /* Step 6. */
1922 suspend_mfc(prev
, spu
); /* Step 7. */
1923 wait_suspend_mfc_complete(prev
, spu
); /* Step 8. */
1924 if (!suspend_spe(prev
, spu
)) /* Step 9. */
1925 clear_spu_status(prev
, spu
); /* Step 10. */
1926 do_mfc_mssync(prev
, spu
); /* Step 11. */
1927 issue_mfc_tlbie(prev
, spu
); /* Step 12. */
1928 handle_pending_interrupts(prev
, spu
); /* Step 13. */
1929 purge_mfc_queue(prev
, spu
); /* Step 14. */
1930 wait_purge_complete(prev
, spu
); /* Step 15. */
1931 reset_spu_privcntl(prev
, spu
); /* Step 16. */
1932 reset_spu_lslr(prev
, spu
); /* Step 17. */
1933 setup_mfc_sr1(prev
, spu
); /* Step 18. */
1934 invalidate_slbs(prev
, spu
); /* Step 19. */
1935 reset_ch_part1(prev
, spu
); /* Step 20. */
1936 reset_ch_part2(prev
, spu
); /* Step 21. */
1937 enable_interrupts(prev
, spu
); /* Step 22. */
1938 set_switch_active(prev
, spu
); /* Step 23. */
1939 set_mfc_tclass_id(prev
, spu
); /* Step 24. */
1940 resume_mfc_queue(prev
, spu
); /* Step 25. */
1943 static void restore_lscsa(struct spu_state
*next
, struct spu
*spu
)
1946 * Perform steps 26-40 of SPU context restore sequence,
1947 * which restores regions of the local store and register
1951 set_watchdog_timer(next
, spu
); /* Step 26. */
1952 setup_spu_status_part1(next
, spu
); /* Step 27. */
1953 setup_spu_status_part2(next
, spu
); /* Step 28. */
1954 restore_mfc_rag(next
, spu
); /* Step 29. */
1955 setup_mfc_slbs(next
, spu
); /* Step 30. */
1956 set_spu_npc(next
, spu
); /* Step 31. */
1957 set_signot1(next
, spu
); /* Step 32. */
1958 set_signot2(next
, spu
); /* Step 33. */
1959 setup_decr(next
, spu
); /* Step 34. */
1960 setup_ppu_mb(next
, spu
); /* Step 35. */
1961 setup_ppuint_mb(next
, spu
); /* Step 36. */
1962 send_restore_code(next
, spu
); /* Step 37. */
1963 set_ppu_querymask(next
, spu
); /* Step 38. */
1964 wait_tag_complete(next
, spu
); /* Step 39. */
1965 wait_spu_stopped(next
, spu
); /* Step 40. */
1968 static void restore_csa(struct spu_state
*next
, struct spu
*spu
)
1971 * Combine steps 41-76 of SPU context restore sequence, which
1972 * restore regions of the privileged & problem state areas.
1975 restore_spu_privcntl(next
, spu
); /* Step 41. */
1976 restore_status_part1(next
, spu
); /* Step 42. */
1977 restore_status_part2(next
, spu
); /* Step 43. */
1978 restore_ls_16kb(next
, spu
); /* Step 44. */
1979 wait_tag_complete(next
, spu
); /* Step 45. */
1980 suspend_mfc(next
, spu
); /* Step 46. */
1981 wait_suspend_mfc_complete(next
, spu
); /* Step 47. */
1982 issue_mfc_tlbie(next
, spu
); /* Step 48. */
1983 clear_interrupts(next
, spu
); /* Step 49. */
1984 restore_mfc_queues(next
, spu
); /* Step 50. */
1985 restore_ppu_querymask(next
, spu
); /* Step 51. */
1986 restore_ppu_querytype(next
, spu
); /* Step 52. */
1987 restore_mfc_csr_tsq(next
, spu
); /* Step 53. */
1988 restore_mfc_csr_cmd(next
, spu
); /* Step 54. */
1989 restore_mfc_csr_ato(next
, spu
); /* Step 55. */
1990 restore_mfc_tclass_id(next
, spu
); /* Step 56. */
1991 set_llr_event(next
, spu
); /* Step 57. */
1992 restore_decr_wrapped(next
, spu
); /* Step 58. */
1993 restore_ch_part1(next
, spu
); /* Step 59. */
1994 restore_ch_part2(next
, spu
); /* Step 60. */
1995 restore_spu_lslr(next
, spu
); /* Step 61. */
1996 restore_spu_cfg(next
, spu
); /* Step 62. */
1997 restore_pm_trace(next
, spu
); /* Step 63. */
1998 restore_spu_npc(next
, spu
); /* Step 64. */
1999 restore_spu_mb(next
, spu
); /* Step 65. */
2000 check_ppu_mb_stat(next
, spu
); /* Step 66. */
2001 check_ppuint_mb_stat(next
, spu
); /* Step 67. */
2002 restore_mfc_slbs(next
, spu
); /* Step 68. */
2003 restore_mfc_sr1(next
, spu
); /* Step 69. */
2004 restore_other_spu_access(next
, spu
); /* Step 70. */
2005 restore_spu_runcntl(next
, spu
); /* Step 71. */
2006 restore_mfc_cntl(next
, spu
); /* Step 72. */
2007 enable_user_access(next
, spu
); /* Step 73. */
2008 reset_switch_active(next
, spu
); /* Step 74. */
2009 reenable_interrupts(next
, spu
); /* Step 75. */
2012 static int __do_spu_save(struct spu_state
*prev
, struct spu
*spu
)
2017 * SPU context save can be broken into three phases:
2019 * (a) quiesce [steps 2-16].
2020 * (b) save of CSA, performed by PPE [steps 17-42]
2021 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2023 * Returns 0 on success.
2024 * 2,6 if failed to quiece SPU
2025 * 53 if SPU-side of save failed.
2028 rc
= quiece_spu(prev
, spu
); /* Steps 2-16. */
2039 save_csa(prev
, spu
); /* Steps 17-43. */
2040 save_lscsa(prev
, spu
); /* Steps 44-53. */
2041 return check_save_status(prev
, spu
); /* Step 54. */
2044 static int __do_spu_restore(struct spu_state
*next
, struct spu
*spu
)
2049 * SPU context restore can be broken into three phases:
2051 * (a) harvest (or reset) SPU [steps 2-24].
2052 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2053 * (c) restore CSA [steps 41-76], performed by PPE.
2055 * The 'harvest' step is not performed here, but rather
2059 restore_lscsa(next
, spu
); /* Steps 24-39. */
2060 rc
= check_restore_status(next
, spu
); /* Step 40. */
2063 /* Failed. Return now. */
2067 /* Fall through to next step. */
2070 restore_csa(next
, spu
);
2076 * spu_save - SPU context save, with locking.
2077 * @prev: pointer to SPU context save area, to be saved.
2078 * @spu: pointer to SPU iomem structure.
2080 * Acquire locks, perform the save operation then return.
2082 int spu_save(struct spu_state
*prev
, struct spu
*spu
)
2086 acquire_spu_lock(spu
); /* Step 1. */
2087 rc
= __do_spu_save(prev
, spu
); /* Steps 2-53. */
2088 release_spu_lock(spu
);
2090 panic("%s failed on SPU[%d], rc=%d.\n",
2091 __func__
, spu
->number
, rc
);
2095 EXPORT_SYMBOL_GPL(spu_save
);
2098 * spu_restore - SPU context restore, with harvest and locking.
2099 * @new: pointer to SPU context save area, to be restored.
2100 * @spu: pointer to SPU iomem structure.
2102 * Perform harvest + restore, as we may not be coming
2103 * from a previous successful save operation, and the
2104 * hardware state is unknown.
2106 int spu_restore(struct spu_state
*new, struct spu
*spu
)
2110 acquire_spu_lock(spu
);
2114 spu
->slb_replace
= 0;
2115 spu
->class_0_pending
= 0;
2116 rc
= __do_spu_restore(new, spu
);
2117 release_spu_lock(spu
);
2119 panic("%s failed on SPU[%d] rc=%d.\n",
2120 __func__
, spu
->number
, rc
);
2124 EXPORT_SYMBOL_GPL(spu_restore
);
2127 * spu_harvest - SPU harvest (reset) operation
2128 * @spu: pointer to SPU iomem structure.
2130 * Perform SPU harvest (reset) operation.
2132 void spu_harvest(struct spu
*spu
)
2134 acquire_spu_lock(spu
);
2136 release_spu_lock(spu
);
2139 static void init_prob(struct spu_state
*csa
)
2141 csa
->spu_chnlcnt_RW
[9] = 1;
2142 csa
->spu_chnlcnt_RW
[21] = 16;
2143 csa
->spu_chnlcnt_RW
[23] = 1;
2144 csa
->spu_chnlcnt_RW
[28] = 1;
2145 csa
->spu_chnlcnt_RW
[30] = 1;
2146 csa
->prob
.spu_runcntl_RW
= SPU_RUNCNTL_STOP
;
2147 csa
->prob
.mb_stat_R
= 0x000400;
2150 static void init_priv1(struct spu_state
*csa
)
2152 /* Enable decode, relocate, tlbie response, master runcntl. */
2153 csa
->priv1
.mfc_sr1_RW
= MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
|
2154 MFC_STATE1_MASTER_RUN_CONTROL_MASK
|
2155 MFC_STATE1_PROBLEM_STATE_MASK
|
2156 MFC_STATE1_RELOCATE_MASK
| MFC_STATE1_BUS_TLBIE_MASK
;
2158 /* Set storage description. */
2159 csa
->priv1
.mfc_sdr_RW
= mfspr(SPRN_SDR1
);
2161 /* Enable OS-specific set of interrupts. */
2162 csa
->priv1
.int_mask_class0_RW
= CLASS0_ENABLE_DMA_ALIGNMENT_INTR
|
2163 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR
|
2164 CLASS0_ENABLE_SPU_ERROR_INTR
;
2165 csa
->priv1
.int_mask_class1_RW
= CLASS1_ENABLE_SEGMENT_FAULT_INTR
|
2166 CLASS1_ENABLE_STORAGE_FAULT_INTR
;
2167 csa
->priv1
.int_mask_class2_RW
= CLASS2_ENABLE_SPU_STOP_INTR
|
2168 CLASS2_ENABLE_SPU_HALT_INTR
|
2169 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR
;
2172 static void init_priv2(struct spu_state
*csa
)
2174 csa
->priv2
.spu_lslr_RW
= LS_ADDR_MASK
;
2175 csa
->priv2
.mfc_control_RW
= MFC_CNTL_RESUME_DMA_QUEUE
|
2176 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION
|
2177 MFC_CNTL_DMA_QUEUES_EMPTY_MASK
;
2181 * spu_alloc_csa - allocate and initialize an SPU context save area.
2183 * Allocate and initialize the contents of an SPU context save area.
2184 * This includes enabling address translation, interrupt masks, etc.,
2185 * as appropriate for the given OS environment.
2187 * Note that storage for the 'lscsa' is allocated separately,
2188 * as it is by far the largest of the context save regions,
2189 * and may need to be pinned or otherwise specially aligned.
2191 void spu_init_csa(struct spu_state
*csa
)
2193 struct spu_lscsa
*lscsa
;
2198 memset(csa
, 0, sizeof(struct spu_state
));
2200 lscsa
= vmalloc(sizeof(struct spu_lscsa
));
2204 memset(lscsa
, 0, sizeof(struct spu_lscsa
));
2206 csa
->register_lock
= SPIN_LOCK_UNLOCKED
;
2208 /* Set LS pages reserved to allow for user-space mapping. */
2209 for (p
= lscsa
->ls
; p
< lscsa
->ls
+ LS_SIZE
; p
+= PAGE_SIZE
)
2210 SetPageReserved(vmalloc_to_page(p
));
2216 EXPORT_SYMBOL_GPL(spu_init_csa
);
2218 void spu_fini_csa(struct spu_state
*csa
)
2220 /* Clear reserved bit before vfree. */
2222 for (p
= csa
->lscsa
->ls
; p
< csa
->lscsa
->ls
+ LS_SIZE
; p
+= PAGE_SIZE
)
2223 ClearPageReserved(vmalloc_to_page(p
));
2227 EXPORT_SYMBOL_GPL(spu_fini_csa
);