+/* Return a bit-field extract of given operand# in FIFO, and its
+ source-addr. `bit_offset' starts at 0, referring to LSB after PKE
+ instruction word. Width must be >0, <=32. Assume FIFO is full
+ enough. Skip over DMA tags, but mark them as an error (ER0). */
+
+unsigned_4
+pke_pcrel_operand_bits(struct pke_device* me, int bit_offset, int bit_width, unsigned_4* source_addr)
+{
+ unsigned_4* word = NULL;
+ unsigned_4 value;
+ struct fifo_quadword* fifo_operand;
+ int wordnumber, bitnumber;
+
+ wordnumber = bit_offset/32;
+ bitnumber = bit_offset%32;
+
+ /* find operand word with bitfield */
+ fifo_operand = pke_pcrel_fifo(me, wordnumber + 1, &word);
+ ASSERT(word != NULL);
+
+ /* extract bitfield from word */
+ value = BIT_MASK_GET(*word, bitnumber, bitnumber + bit_width - 1);
+
+ /* extract source addr from fifo word */
+ *source_addr = fifo_operand->source_address;
+
+ return value;
+}
+
+
+
+/* check for stall conditions on indicated devices (path* only on
+ PKE1), do not change status; return 0 iff no stall */
+int
+pke_check_stall(struct pke_device* me, enum pke_check_target what)
+{
+ int any_stall = 0;
+ unsigned_4 cop2_stat, gpuif_stat;
+
+ /* read status words */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_READ(me, (GIF_REG_STAT),
+ & gpuif_stat,
+ 4);
+ PKE_MEM_READ(me, (COP2_REG_STAT_ADDR),
+ & cop2_stat,
+ 4);
+
+ /* perform checks */
+ if(what == chk_vu)
+ {
+ if(me->pke_number == 0)
+ any_stall = BIT_MASK_GET(cop2_stat, COP2_REG_STAT_VBS0_B, COP2_REG_STAT_VBS0_E);
+ else /* if(me->pke_number == 1) */
+ any_stall = BIT_MASK_GET(cop2_stat, COP2_REG_STAT_VBS1_B, COP2_REG_STAT_VBS1_E);
+ }
+ else if(what == chk_path1) /* VU -> GPUIF */
+ {
+ ASSERT(me->pke_number == 1);
+ if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 1)
+ any_stall = 1;
+ }
+ else if(what == chk_path2) /* PKE -> GPUIF */
+ {
+ ASSERT(me->pke_number == 1);
+ if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 2)
+ any_stall = 1;
+ }
+ else if(what == chk_path3) /* DMA -> GPUIF */
+ {
+ ASSERT(me->pke_number == 1);
+ if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 3)
+ any_stall = 1;
+ }
+ else
+ {
+ /* invalid what */
+ ASSERT(0);
+ }
+
+ /* any stall reasons? */
+ return any_stall;
+}
+
+
+/* PKE1 only: flip the DBF bit; recompute TOPS, TOP */
+void
+pke_flip_dbf(struct pke_device* me)
+{
+ int newdf;
+ /* compute new TOP */
+ PKE_REG_MASK_SET(me, TOP, TOP,
+ PKE_REG_MASK_GET(me, TOPS, TOPS));
+ /* flip DBF */
+ newdf = PKE_REG_MASK_GET(me, DBF, DF) ? 0 : 1;
+ PKE_REG_MASK_SET(me, DBF, DF, newdf);
+ PKE_REG_MASK_SET(me, STAT, DBF, newdf);
+ /* compute new TOPS */
+ PKE_REG_MASK_SET(me, TOPS, TOPS,
+ (PKE_REG_MASK_GET(me, BASE, BASE) +
+ newdf * PKE_REG_MASK_GET(me, OFST, OFFSET)));
+
+ /* this is equivalent to last word from okadaa (98-02-25):
+ 1) TOP=TOPS;
+ 2) TOPS=BASE + !DBF*OFFSET
+ 3) DBF=!DBF */
+}
+
+
+/* set the STAT:PIS bit and send an interrupt to the 5900 */
+void
+pke_begin_interrupt_stall(struct pke_device* me)
+{
+ /* set PIS */
+ PKE_REG_MASK_SET(me, STAT, PIS, 1);
+
+ /* XXX: send interrupt to 5900? */
+}
+
+
+
+
+/* PKEcode handler functions -- responsible for checking and
+ confirming old stall conditions, executing pkecode, updating PC and
+ status registers -- may assume being run on correct PKE unit */
+
+void
+pke_code_nop(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_stcycl(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* copy immediate value into CYCLE reg */
+ PKE_REG_MASK_SET(me, CYCLE, WL, BIT_MASK_GET(imm, 8, 15));
+ PKE_REG_MASK_SET(me, CYCLE, CL, BIT_MASK_GET(imm, 0, 7));
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_offset(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* copy 10 bits to OFFSET field */
+ PKE_REG_MASK_SET(me, OFST, OFFSET, BIT_MASK_GET(imm, 0, 9));
+ /* clear DBF bit */
+ PKE_REG_MASK_SET(me, DBF, DF, 0);
+ /* clear other DBF bit */
+ PKE_REG_MASK_SET(me, STAT, DBF, 0);
+ /* set TOPS = BASE */
+ PKE_REG_MASK_SET(me, TOPS, TOPS, PKE_REG_MASK_GET(me, BASE, BASE));
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_base(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* copy 10 bits to BASE field */
+ PKE_REG_MASK_SET(me, BASE, BASE, BIT_MASK_GET(imm, 0, 9));
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_itop(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* copy 10 bits to ITOPS field */
+ PKE_REG_MASK_SET(me, ITOPS, ITOPS, BIT_MASK_GET(imm, 0, 9));
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_stmod(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* copy 2 bits to MODE register */
+ PKE_REG_MASK_SET(me, MODE, MDE, BIT_MASK_GET(imm, 0, 2));
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_mskpath3(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+ unsigned_4 gif_mode;
+
+ /* set appropriate bit */
+ if(BIT_MASK_GET(imm, PKE_REG_MSKPATH3_B, PKE_REG_MSKPATH3_E) != 0)
+ gif_mode = GIF_REG_STAT_M3P;
+ else
+ gif_mode = 0;
+
+ /* write register to "read-only" register; gpuif code will look at M3P bit only */
+ PKE_MEM_WRITE(me, GIF_REG_VIF_M3P, & gif_mode, 4);
+
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_pkemark(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+ /* copy 16 bits to MARK register */
+ PKE_REG_MASK_SET(me, MARK, MARK, BIT_MASK_GET(imm, 0, 15));
+ /* set MRK bit in STAT register - CPU2 v2.1 docs incorrect */
+ PKE_REG_MASK_SET(me, STAT, MRK, 1);
+ /* done */
+ pke_pc_advance(me, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+}
+
+
+void
+pke_code_flushe(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* compute next PEW bit */
+ if(pke_check_stall(me, chk_vu))
+ {
+ /* VU busy */
+ PKE_REG_MASK_SET(me, STAT, PEW, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
+ /* try again next cycle */
+ }
+ else
+ {
+ /* VU idle */
+ PKE_REG_MASK_SET(me, STAT, PEW, 0);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1);
+ }
+}
+
+
+void
+pke_code_flush(struct pke_device* me, unsigned_4 pkecode)
+{
+ int something_busy = 0;
+
+ /* compute next PEW, PGW bits */
+ if(pke_check_stall(me, chk_vu))
+ {
+ something_busy = 1;
+ PKE_REG_MASK_SET(me, STAT, PEW, 1);
+ }
+ else
+ PKE_REG_MASK_SET(me, STAT, PEW, 0);
+
+
+ if(pke_check_stall(me, chk_path1) ||
+ pke_check_stall(me, chk_path2))
+ {
+ something_busy = 1;
+ PKE_REG_MASK_SET(me, STAT, PGW, 1);
+ }
+ else
+ PKE_REG_MASK_SET(me, STAT, PGW, 0);
+
+ /* go or no go */
+ if(something_busy)
+ {
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* try again next cycle */
+ }
+ else
+ {
+ /* all idle */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1);
+ }
+}
+
+
+void
+pke_code_flusha(struct pke_device* me, unsigned_4 pkecode)
+{
+ int something_busy = 0;
+
+ /* compute next PEW, PGW bits */
+ if(pke_check_stall(me, chk_vu))
+ {
+ something_busy = 1;
+ PKE_REG_MASK_SET(me, STAT, PEW, 1);
+ }
+ else
+ PKE_REG_MASK_SET(me, STAT, PEW, 0);
+
+
+ if(pke_check_stall(me, chk_path1) ||
+ pke_check_stall(me, chk_path2) ||
+ pke_check_stall(me, chk_path3))
+ {
+ something_busy = 1;
+ PKE_REG_MASK_SET(me, STAT, PGW, 1);
+ }
+ else
+ PKE_REG_MASK_SET(me, STAT, PGW, 0);
+
+ if(something_busy)
+ {
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* try again next cycle */
+ }
+ else
+ {
+ /* all idle */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1);
+ }
+}
+
+
+void
+pke_code_pkemscal(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* compute next PEW bit */
+ if(pke_check_stall(me, chk_vu))
+ {
+ /* VU busy */
+ PKE_REG_MASK_SET(me, STAT, PEW, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
+ /* try again next cycle */
+ }
+ else
+ {
+ unsigned_4 vu_pc;
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* VU idle */
+ PKE_REG_MASK_SET(me, STAT, PEW, 0);
+
+ /* flip DBF on PKE1 */
+ if(me->pke_number == 1)
+ pke_flip_dbf(me);
+
+ /* compute new PC for VU (host byte-order) */
+ vu_pc = BIT_MASK_GET(imm, 0, 15);
+ vu_pc = T2H_4(vu_pc);
+
+ /* write new PC; callback function gets VU running */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
+ & vu_pc,
+ 4);
+
+ /* copy ITOPS field to ITOP */
+ PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1);
+ }
+}
+
+
+
+void
+pke_code_pkemscnt(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* compute next PEW bit */
+ if(pke_check_stall(me, chk_vu))
+ {
+ /* VU busy */
+ PKE_REG_MASK_SET(me, STAT, PEW, 1);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
+ /* try again next cycle */
+ }
+ else
+ {
+ unsigned_4 vu_pc;
+
+ /* VU idle */
+ PKE_REG_MASK_SET(me, STAT, PEW, 0);
+
+ /* flip DBF on PKE1 */
+ if(me->pke_number == 1)
+ pke_flip_dbf(me);
+
+ /* read old PC */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_READ(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
+ & vu_pc,
+ 4);
+
+ /* rewrite new PC; callback function gets VU running */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
+ & vu_pc,
+ 4);
+
+ /* copy ITOPS field to ITOP */
+ PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1);
+ }
+}
+
+
+void
+pke_code_pkemscalf(struct pke_device* me, unsigned_4 pkecode)
+{
+ int something_busy = 0;
+
+ /* compute next PEW, PGW bits */
+ if(pke_check_stall(me, chk_vu))
+ {
+ something_busy = 1;
+ PKE_REG_MASK_SET(me, STAT, PEW, 1);
+ }
+ else
+ PKE_REG_MASK_SET(me, STAT, PEW, 0);
+
+
+ if(pke_check_stall(me, chk_path1) ||
+ pke_check_stall(me, chk_path2) ||
+ pke_check_stall(me, chk_path3))
+ {
+ something_busy = 1;
+ PKE_REG_MASK_SET(me, STAT, PGW, 1);
+ }
+ else
+ PKE_REG_MASK_SET(me, STAT, PGW, 0);
+
+ /* go or no go */
+ if(something_busy)
+ {
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* try again next cycle */
+ }
+ else
+ {
+ unsigned_4 vu_pc;
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* flip DBF on PKE1 */
+ if(me->pke_number == 1)
+ pke_flip_dbf(me);
+
+ /* compute new PC for VU (host byte-order) */
+ vu_pc = BIT_MASK_GET(imm, 0, 15);
+ vu_pc = T2H_4(vu_pc);
+
+ /* rewrite new PC; callback function gets VU running */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
+ & vu_pc,
+ 4);
+
+ /* copy ITOPS field to ITOP */
+ PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1);
+ }
+}
+
+
+void
+pke_code_stmask(struct pke_device* me, unsigned_4 pkecode)
+{
+ unsigned_4* mask;
+
+ /* check that FIFO has one more word for STMASK operand */
+ mask = pke_pcrel_operand(me, 1);
+ if(mask != NULL)
+ {
+ /* "transferring" operand */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, 1);
+
+ /* fill the register */
+ PKE_REG_MASK_SET(me, MASK, MASK, *mask);
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, 0);
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 2);
+ }
+ else
+ {
+ /* need to wait for another word */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* try again next cycle */
+ }
+}
+
+
+void
+pke_code_strow(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* check that FIFO has four more words for STROW operand */
+ unsigned_4* last_op;
+
+ last_op = pke_pcrel_operand(me, 4);
+ if(last_op != NULL)
+ {
+ /* "transferring" operand */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, 1);
+
+ /* copy ROW registers: must all exist if 4th operand exists */
+ me->regs[PKE_REG_R0][0] = * pke_pcrel_operand(me, 1);
+ me->regs[PKE_REG_R1][0] = * pke_pcrel_operand(me, 2);
+ me->regs[PKE_REG_R2][0] = * pke_pcrel_operand(me, 3);
+ me->regs[PKE_REG_R3][0] = * pke_pcrel_operand(me, 4);
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, 0);
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 5);
+ }
+ else
+ {
+ /* need to wait for another word */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* try again next cycle */
+ }
+}
+
+
+void
+pke_code_stcol(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* check that FIFO has four more words for STCOL operand */
+ unsigned_4* last_op;
+
+ last_op = pke_pcrel_operand(me, 4);
+ if(last_op != NULL)
+ {
+ /* "transferring" operand */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, 1);
+
+ /* copy COL registers: must all exist if 4th operand exists */
+ me->regs[PKE_REG_C0][0] = * pke_pcrel_operand(me, 1);
+ me->regs[PKE_REG_C1][0] = * pke_pcrel_operand(me, 2);
+ me->regs[PKE_REG_C2][0] = * pke_pcrel_operand(me, 3);
+ me->regs[PKE_REG_C3][0] = * pke_pcrel_operand(me, 4);
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, 0);
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 5);
+ }
+ else
+ {
+ /* need to wait for another word */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* try again next cycle */
+ }
+}
+
+
+void
+pke_code_mpg(struct pke_device* me, unsigned_4 pkecode)
+{
+ unsigned_4* last_mpg_word;
+ int num = BIT_MASK_GET(pkecode, PKE_OPCODE_NUM_B, PKE_OPCODE_NUM_E);
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* assert 64-bit alignment of MPG operand */
+ if(me->qw_pc != 3 && me->qw_pc != 1)
+ return pke_code_error(me, pkecode);
+
+ /* map zero to max+1 */
+ if(num==0) num=0x100;
+
+ /* check that FIFO has a few more words for MPG operand */
+ last_mpg_word = pke_pcrel_operand(me, num*2); /* num: number of 64-bit words */
+ if(last_mpg_word != NULL)
+ {
+ /* perform implied FLUSHE */
+ if(pke_check_stall(me, chk_vu))
+ {
+ /* VU busy */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
+ /* retry this instruction next clock */
+ }
+ else
+ {
+ /* VU idle */
+ int i;
+
+ /* "transferring" operand */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, num);
+
+ /* transfer VU instructions, one word-pair per iteration */
+ for(i=0; i<num; i++)
+ {
+ address_word vu_addr_base, vu_addr;
+ address_word vutrack_addr_base, vutrack_addr;
+ address_word vu_addr_max_size;
+ unsigned_4 vu_lower_opcode, vu_upper_opcode;
+ unsigned_4* operand;
+ struct fifo_quadword* fq;
+ int next_num;
+
+ /* decrement NUM */
+ next_num = PKE_REG_MASK_GET(me, NUM, NUM) - 1;
+ PKE_REG_MASK_SET(me, NUM, NUM, next_num);
+
+ /* imm: in 64-bit units for MPG instruction */
+ /* VU*_MEM0 : instruction memory */
+ vu_addr_base = (me->pke_number == 0) ?
+ VU0_MEM0_WINDOW_START : VU1_MEM0_WINDOW_START;
+ vu_addr_max_size = (me->pke_number == 0) ?
+ VU0_MEM0_SIZE : VU1_MEM0_SIZE;
+ vutrack_addr_base = (me->pke_number == 0) ?
+ VU0_MEM0_SRCADDR_START : VU1_MEM0_SRCADDR_START;
+
+ /* compute VU address for this word-pair */
+ vu_addr = vu_addr_base + (imm + i) * 8;
+ /* check for vu_addr overflow */
+ while(vu_addr >= vu_addr_base + vu_addr_max_size)
+ vu_addr -= vu_addr_max_size;
+
+ /* compute VU tracking address */
+ vutrack_addr = vutrack_addr_base + ((signed_8)vu_addr - (signed_8)vu_addr_base) / 2;
+
+ /* Fetch operand words; assume they are already little-endian for VU imem */
+ fq = pke_pcrel_fifo(me, i*2 + 1, & operand);
+ vu_lower_opcode = *operand;
+ vu_upper_opcode = *pke_pcrel_operand(me, i*2 + 2);
+
+ /* write data into VU memory */
+ /* lower (scalar) opcode comes in first word ; macro performs H2T! */
+ PKE_MEM_WRITE(me, vu_addr,
+ & vu_lower_opcode,
+ 4);
+ /* upper (vector) opcode comes in second word ; H2T */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_WRITE(me, vu_addr + 4,
+ & vu_upper_opcode,
+ 4);
+
+ /* write tracking address in target byte-order */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_WRITE(me, vutrack_addr,
+ & fq->source_address,
+ 4);
+ } /* VU xfer loop */
+
+ /* check NUM */
+ ASSERT(PKE_REG_MASK_GET(me, NUM, NUM) == 0);
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1 + num*2);
+ }
+ } /* if FIFO full enough */
+ else
+ {
+ /* need to wait for another word */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* retry this instruction next clock */
+ }
+}
+
+
+void
+pke_code_direct(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* check that FIFO has a few more words for DIRECT operand */
+ unsigned_4* last_direct_word;
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+
+ /* assert 128-bit alignment of DIRECT operand */
+ if(me->qw_pc != 3)
+ return pke_code_error(me, pkecode);
+
+ /* map zero to max+1 */
+ if(imm==0) imm=0x10000;
+
+ last_direct_word = pke_pcrel_operand(me, imm*4); /* imm: number of 128-bit words */
+ if(last_direct_word != NULL)
+ {
+ /* VU idle */
+ int i;
+ unsigned_16 fifo_data;
+
+ /* "transferring" operand */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
+
+ /* transfer GPUIF quadwords, one word per iteration */
+ for(i=0; i<imm*4; i++)
+ {
+ unsigned_4* operand = pke_pcrel_operand(me, 1+i);
+
+ /* collect word into quadword */
+ *A4_16(&fifo_data, 3 - (i % 4)) = *operand;
+
+ /* write to GPUIF FIFO only with full quadword */
+ if(i % 4 == 3)
+ {
+ ASSERT(sizeof(fifo_data) == 16);
+ PKE_MEM_WRITE(me, GIF_PATH2_FIFO_ADDR,
+ & fifo_data,
+ 16);
+ } /* write collected quadword */
+ } /* GPUIF xfer loop */
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1 + imm*4);
+ } /* if FIFO full enough */
+ else
+ {
+ /* need to wait for another word */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* retry this instruction next clock */
+ }
+}
+
+
+void
+pke_code_directhl(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* treat the same as DIRECTH */
+ pke_code_direct(me, pkecode);
+}
+
+
+void
+pke_code_unpack(struct pke_device* me, unsigned_4 pkecode)
+{
+ int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
+ int cmd = BIT_MASK_GET(pkecode, PKE_OPCODE_CMD_B, PKE_OPCODE_CMD_E);
+ int num = BIT_MASK_GET(pkecode, PKE_OPCODE_NUM_B, PKE_OPCODE_NUM_E);
+ int nummx = (num == 0) ? 0x0100 : num;
+ short vn = BIT_MASK_GET(cmd, 2, 3); /* unpack shape controls */
+ short vl = BIT_MASK_GET(cmd, 0, 1);
+ int m = BIT_MASK_GET(cmd, 4, 4);
+ short cl = PKE_REG_MASK_GET(me, CYCLE, CL); /* cycle controls */
+ short wl = PKE_REG_MASK_GET(me, CYCLE, WL);
+ short addrwl = (wl == 0) ? 0x0100 : wl;
+ int r = BIT_MASK_GET(imm, 15, 15); /* indicator bits in imm value */
+ int usn = BIT_MASK_GET(imm, 14, 14);
+
+ int n, num_operands;
+ unsigned_4* last_operand_word = NULL;
+
+ /* catch all illegal UNPACK variants */
+ if(vl == 3 && vn < 3)
+ {
+ pke_code_error(me, pkecode);
+ return;
+ }
+
+ /* compute PKEcode length, as given in CPU2 spec, v2.1 pg. 11 */
+ if(cl >= addrwl)
+ n = num;
+ else
+ n = cl * (nummx / addrwl) + PKE_LIMIT(nummx % addrwl, cl);
+ num_operands = (31 + (32 >> vl) * (vn+1) * n)/32; /* round up to next word */
+
+ /* confirm that FIFO has enough words in it */
+ if(num_operands > 0)
+ last_operand_word = pke_pcrel_operand(me, num_operands);
+ if(last_operand_word != NULL || num_operands == 0)
+ {
+ address_word vu_addr_base, vutrack_addr_base;
+ address_word vu_addr_max_size;
+ int vector_num_out, vector_num_in;
+
+ /* "transferring" operand */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
+
+ /* don't check whether VU is idle */
+
+ /* compute VU address base */
+ if(me->pke_number == 0)
+ {
+ vu_addr_base = VU0_MEM1_WINDOW_START;
+ vu_addr_max_size = VU0_MEM1_SIZE;
+ vutrack_addr_base = VU0_MEM1_SRCADDR_START;
+ r = 0;
+ }
+ else
+ {
+ vu_addr_base = VU1_MEM1_WINDOW_START;
+ vu_addr_max_size = VU1_MEM1_SIZE;
+ vutrack_addr_base = VU1_MEM1_SRCADDR_START;
+ }
+
+ /* set NUM */
+ PKE_REG_MASK_SET(me, NUM, NUM, nummx);
+
+ /* transfer given number of vectors */
+ vector_num_out = 0; /* output vector number being processed */
+ vector_num_in = 0; /* argument vector number being processed */
+ do
+ {
+ quadword vu_old_data;
+ quadword vu_new_data;
+ quadword unpacked_data;
+ address_word vu_addr;
+ address_word vutrack_addr;
+ unsigned_4 source_addr = 0;
+ int i;
+ int next_num;
+
+ /* decrement NUM */
+ next_num = PKE_REG_MASK_GET(me, NUM, NUM) - 1;
+ PKE_REG_MASK_SET(me, NUM, NUM, next_num);
+
+ /* compute VU destination address, as bytes in R5900 memory */
+ if(cl >= wl)
+ {
+ /* map zero to max+1 */
+ vu_addr = vu_addr_base + 16 * (BIT_MASK_GET(imm, 0, 9) +
+ (vector_num_out / addrwl) * cl +
+ (vector_num_out % addrwl));
+ }
+ else
+ vu_addr = vu_addr_base + 16 * (BIT_MASK_GET(imm, 0, 9) +
+ vector_num_out);
+
+ /* handle "R" double-buffering bit */
+ if(r)
+ vu_addr += 16 * PKE_REG_MASK_GET(me, TOPS, TOPS);
+
+ /* check for vu_addr overflow */
+ while(vu_addr >= vu_addr_base + vu_addr_max_size)
+ vu_addr -= vu_addr_max_size;
+
+ /* compute address of tracking table entry */
+ vutrack_addr = vutrack_addr_base + ((signed_8)vu_addr - (signed_8)vu_addr_base) / 4;
+
+ /* read old VU data word at address; reverse words if needed */
+ {
+ unsigned_16 vu_old_badwords;
+ ASSERT(sizeof(vu_old_badwords) == 16);
+ PKE_MEM_READ(me, vu_addr,
+ &vu_old_badwords, 16);
+ vu_old_data[0] = * A4_16(& vu_old_badwords, 3);
+ vu_old_data[1] = * A4_16(& vu_old_badwords, 2);
+ vu_old_data[2] = * A4_16(& vu_old_badwords, 1);
+ vu_old_data[3] = * A4_16(& vu_old_badwords, 0);
+ }
+
+ /* For cyclic unpack, next operand quadword may come from instruction stream
+ or be zero. */
+ if((cl < addrwl) &&
+ (vector_num_out % addrwl) >= cl)
+ {
+ /* clear operand - used only in a "indeterminate" state */
+ for(i = 0; i < 4; i++)
+ unpacked_data[i] = 0;
+ }
+ else
+ {
+ /* compute packed vector dimensions */
+ int vectorbits = 0, unitbits = 0;
+
+ if(vl < 3) /* PKE_UNPACK_*_{32,16,8} */
+ {
+ unitbits = (32 >> vl);
+ vectorbits = unitbits * (vn+1);
+ }
+ else if(vl == 3 && vn == 3) /* PKE_UNPACK_V4_5 */
+ {
+ unitbits = 5;
+ vectorbits = 16;
+ }
+ else /* illegal unpack variant */
+ {
+ /* should have been caught at top of function */
+ ASSERT(0);
+ }
+
+ /* loop over columns */
+ for(i=0; i<=vn; i++)
+ {
+ unsigned_4 operand;
+
+ /* offset in bits in current operand word */
+ int bitoffset =
+ (vector_num_in * vectorbits) + (i * unitbits); /* # of bits from PKEcode */
+
+ /* last unit of V4_5 is only one bit wide */
+ if(vl == 3 && vn == 3 && i == 3) /* PKE_UNPACK_V4_5 */
+ unitbits = 1;
+
+ /* confirm we're not reading more than we said we needed */
+ if(vector_num_in * vectorbits >= num_operands * 32)
+ {
+ /* this condition may be triggered by illegal
+ PKEcode / CYCLE combinations. */
+ pke_code_error(me, pkecode);
+ /* XXX: this case needs to be better understood,
+ and detected at a better time. */
+ return;
+ }
+
+ /* fetch bitfield operand */
+ operand = pke_pcrel_operand_bits(me, bitoffset, unitbits, & source_addr);
+
+ /* selectively sign-extend; not for V4_5 1-bit value */
+ if(usn || unitbits == 1)
+ unpacked_data[i] = operand;
+ else
+ unpacked_data[i] = SEXT32(operand, unitbits-1);
+ }
+
+ /* set remaining top words in vector */
+ for(i=vn+1; i<4; i++)
+ {
+ if(vn == 0) /* S_{32,16,8}: copy lowest element */
+ unpacked_data[i] = unpacked_data[0];
+ else
+ unpacked_data[i] = 0;
+ }
+
+ /* consumed a vector from the PKE instruction stream */
+ vector_num_in ++;
+ } /* unpack word from instruction operand */
+
+ /* process STMOD register for accumulation operations */
+ switch(PKE_REG_MASK_GET(me, MODE, MDE))
+ {
+ case PKE_MODE_ADDROW: /* add row registers to output data */
+ case PKE_MODE_ACCROW: /* same .. later conditionally accumulate */
+ for(i=0; i<4; i++)
+ /* exploit R0..R3 contiguity */
+ unpacked_data[i] += me->regs[PKE_REG_R0 + i][0];
+ break;
+
+ case PKE_MODE_INPUT: /* pass data through */
+ default: /* specified as undefined */
+ ;
+ }
+
+ /* compute replacement word */
+ if(m) /* use mask register? */
+ {
+ /* compute index into mask register for this word */
+ int mask_index = PKE_LIMIT(vector_num_out % addrwl, 3);
+
+ for(i=0; i<4; i++) /* loop over columns */
+ {
+ int mask_op = PKE_MASKREG_GET(me, mask_index, i);
+ unsigned_4* masked_value = NULL;
+
+ switch(mask_op)
+ {
+ case PKE_MASKREG_INPUT:
+ masked_value = & unpacked_data[i];
+
+ /* conditionally accumulate */
+ if(PKE_REG_MASK_GET(me, MODE, MDE) == PKE_MODE_ACCROW)
+ me->regs[PKE_REG_R0 + i][0] = unpacked_data[i];
+
+ break;
+
+ case PKE_MASKREG_ROW: /* exploit R0..R3 contiguity */
+ masked_value = & me->regs[PKE_REG_R0 + i][0];
+ break;
+
+ case PKE_MASKREG_COLUMN: /* exploit C0..C3 contiguity */
+ masked_value = & me->regs[PKE_REG_C0 + mask_index][0];
+ break;
+
+ case PKE_MASKREG_NOTHING:
+ /* "write inhibit" by re-copying old data */
+ masked_value = & vu_old_data[i];
+ break;
+
+ default:
+ ASSERT(0);
+ /* no other cases possible */
+ }
+
+ /* copy masked value for column */
+ vu_new_data[i] = *masked_value;
+ } /* loop over columns */
+ } /* mask */
+ else
+ {
+ /* no mask - just copy over entire unpacked quadword */
+ memcpy(vu_new_data, unpacked_data, sizeof(unpacked_data));
+
+ /* conditionally store accumulated row results */
+ if(PKE_REG_MASK_GET(me, MODE, MDE) == PKE_MODE_ACCROW)
+ for(i=0; i<4; i++)
+ me->regs[PKE_REG_R0 + i][0] = unpacked_data[i];
+ }
+
+ /* write new VU data word at address; reverse words if needed */
+ {
+ unsigned_16 vu_new_badwords;
+ * A4_16(& vu_new_badwords, 3) = vu_new_data[0];
+ * A4_16(& vu_new_badwords, 2) = vu_new_data[1];
+ * A4_16(& vu_new_badwords, 1) = vu_new_data[2];
+ * A4_16(& vu_new_badwords, 0) = vu_new_data[3];
+ ASSERT(sizeof(vu_new_badwords) == 16);
+ PKE_MEM_WRITE(me, vu_addr,
+ &vu_new_badwords, 16);
+ }
+
+ /* write tracking address */
+ ASSERT(sizeof(unsigned_4) == 4);
+ PKE_MEM_WRITE(me, vutrack_addr,
+ & source_addr,
+ 4);
+
+ /* next vector please */
+ vector_num_out ++;
+ } /* vector transfer loop */
+ while(PKE_REG_MASK_GET(me, NUM, NUM) > 0);
+
+ /* confirm we've written as many vectors as told */
+ ASSERT(nummx == vector_num_out);
+
+ /* done */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ pke_pc_advance(me, 1 + num_operands);
+ } /* PKE FIFO full enough */
+ else
+ {
+ /* need to wait for another word */
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
+ /* retry this instruction next clock */
+ }
+}
+
+
+void
+pke_code_error(struct pke_device* me, unsigned_4 pkecode)
+{
+ /* set ER1 flag in STAT register */
+ PKE_REG_MASK_SET(me, STAT, ER1, 1);
+
+ if(! PKE_REG_MASK_GET(me, ERR, ME1))
+ {
+ pke_begin_interrupt_stall(me);
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
+ }
+ else
+ {
+ PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
+ }
+
+ /* advance over faulty word */
+ pke_pc_advance(me, 1);
+}