1 /* Simulator for Analog Devices Blackfin processors.
3 Copyright (C) 2005-2011 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
28 typedef uint16_t bu16
;
29 typedef uint32_t bu32
;
30 typedef uint64_t bu40
;
31 typedef uint64_t bu64
;
38 /* For dealing with parallel instructions, we must avoid changing our register
39 file until all parallel insns have been simulated. This queue of stores
40 can be used to delay a modification.
41 XXX: Should go and convert all 32 bit insns to use this. */
47 /* The KSP/USP handling wrt SP may not follow the hardware exactly (the hw
48 looks at current mode and uses either SP or USP based on that. We instead
49 always operate on SP and mirror things in KSP and USP. During a CEC
50 transition, we take care of syncing the values. This lowers the simulation
51 complexity and speeds things up a bit. */
54 bu32 dpregs
[16], iregs
[4], mregs
[4], bregs
[4], lregs
[4], cycles
[3];
56 bu32 lt
[2], lc
[2], lb
[2];
57 bu32 ksp
, usp
, seqstat
, syscfg
, rets
, reti
, retx
, retn
, rete
;
59 /* These ASTAT flags need not be bu32, but it makes pointers easier. */
60 bu32 ac0
, ac0_copy
, ac1
, an
, aq
;
61 union { struct { bu32 av0
; bu32 av1
; }; bu32 av
[2]; };
62 union { struct { bu32 av0s
; bu32 av1s
; }; bu32 avs
[2]; };
63 bu32 az
, cc
, v
, v_copy
, vs
;
68 /* Set by an instruction emulation function if we performed a jump. We
69 cannot compare oldpc to newpc as this ignores the "jump 0;" case. */
72 /* Used by the CEC to figure out where to return to. */
75 /* How many cycles did this insn take to complete ? */
78 /* The pc currently being interpreted in parallel insns. */
81 /* Needed for supporting the DISALGNEXCPT instruction */
84 /* See notes above for struct store. */
85 struct store stores
[20];
89 /* Cache heavily used CPU-specific device pointers. */
97 #define REG_H_L(h, l) (((h) & 0xffff0000) | ((l) & 0x0000ffff))
99 #define DREG(x) (BFIN_CPU_STATE.dpregs[x])
100 #define PREG(x) (BFIN_CPU_STATE.dpregs[x + 8])
101 #define SPREG PREG (6)
102 #define FPREG PREG (7)
103 #define IREG(x) (BFIN_CPU_STATE.iregs[x])
104 #define MREG(x) (BFIN_CPU_STATE.mregs[x])
105 #define BREG(x) (BFIN_CPU_STATE.bregs[x])
106 #define LREG(x) (BFIN_CPU_STATE.lregs[x])
107 #define AXREG(x) (BFIN_CPU_STATE.ax[x])
108 #define AWREG(x) (BFIN_CPU_STATE.aw[x])
109 #define CCREG (BFIN_CPU_STATE.cc)
110 #define LCREG(x) (BFIN_CPU_STATE.lc[x])
111 #define LTREG(x) (BFIN_CPU_STATE.lt[x])
112 #define LBREG(x) (BFIN_CPU_STATE.lb[x])
113 #define CYCLESREG (BFIN_CPU_STATE.cycles[0])
114 #define CYCLES2REG (BFIN_CPU_STATE.cycles[1])
115 #define CYCLES2SHDREG (BFIN_CPU_STATE.cycles[2])
116 #define KSPREG (BFIN_CPU_STATE.ksp)
117 #define USPREG (BFIN_CPU_STATE.usp)
118 #define SEQSTATREG (BFIN_CPU_STATE.seqstat)
119 #define SYSCFGREG (BFIN_CPU_STATE.syscfg)
120 #define RETSREG (BFIN_CPU_STATE.rets)
121 #define RETIREG (BFIN_CPU_STATE.reti)
122 #define RETXREG (BFIN_CPU_STATE.retx)
123 #define RETNREG (BFIN_CPU_STATE.retn)
124 #define RETEREG (BFIN_CPU_STATE.rete)
125 #define PCREG (BFIN_CPU_STATE.pc)
126 #define EMUDAT_INREG (BFIN_CPU_STATE.emudat[0])
127 #define EMUDAT_OUTREG (BFIN_CPU_STATE.emudat[1])
128 #define INSN_LEN (BFIN_CPU_STATE.insn_len)
129 #define CYCLE_DELAY (BFIN_CPU_STATE.cycle_delay)
130 #define DIS_ALGN_EXPT (BFIN_CPU_STATE.dis_algn_expt)
132 #define EXCAUSE_SHIFT 0
133 #define EXCAUSE_MASK (0x3f << EXCAUSE_SHIFT)
134 #define EXCAUSE ((SEQSTATREG & EXCAUSE_MASK) >> EXCAUSE_SHIFT)
135 #define HWERRCAUSE_SHIFT 14
136 #define HWERRCAUSE_MASK (0x1f << HWERRCAUSE_SHIFT)
137 #define HWERRCAUSE ((SEQSTATREG & HWERRCAUSE_MASK) >> HWERRCAUSE_SHIFT)
139 #define _SET_CORE32REG_IDX(reg, p, x, val) \
142 TRACE_REGISTER (cpu, "wrote "#p"%i = %#x", x, __v); \
145 #define SET_DREG(x, val) _SET_CORE32REG_IDX (DREG (x), R, x, val)
146 #define SET_PREG(x, val) _SET_CORE32REG_IDX (PREG (x), P, x, val)
147 #define SET_IREG(x, val) _SET_CORE32REG_IDX (IREG (x), I, x, val)
148 #define SET_MREG(x, val) _SET_CORE32REG_IDX (MREG (x), M, x, val)
149 #define SET_BREG(x, val) _SET_CORE32REG_IDX (BREG (x), B, x, val)
150 #define SET_LREG(x, val) _SET_CORE32REG_IDX (LREG (x), L, x, val)
151 #define SET_LCREG(x, val) _SET_CORE32REG_IDX (LCREG (x), LC, x, val)
152 #define SET_LTREG(x, val) _SET_CORE32REG_IDX (LTREG (x), LT, x, val)
153 #define SET_LBREG(x, val) _SET_CORE32REG_IDX (LBREG (x), LB, x, val)
155 #define SET_DREG_L_H(x, l, h) SET_DREG (x, REG_H_L (h, l))
156 #define SET_DREG_L(x, l) SET_DREG (x, REG_H_L (DREG (x), l))
157 #define SET_DREG_H(x, h) SET_DREG (x, REG_H_L (h, DREG (x)))
159 #define _SET_CORE32REG_ALU(reg, p, x, val) \
162 TRACE_REGISTER (cpu, "wrote A%i"#p" = %#x", x, __v); \
165 #define SET_AXREG(x, val) _SET_CORE32REG_ALU (AXREG (x), X, x, val)
166 #define SET_AWREG(x, val) _SET_CORE32REG_ALU (AWREG (x), W, x, val)
168 #define SET_AREG(x, val) \
171 SET_AXREG (x, (__a >> 32) & 0xff); \
172 SET_AWREG (x, __a); \
174 #define SET_AREG32(x, val) \
176 SET_AWREG (x, val); \
177 SET_AXREG (x, -(AWREG (x) >> 31)); \
180 #define _SET_CORE32REG(reg, val) \
183 TRACE_REGISTER (cpu, "wrote "#reg" = %#x", __v); \
186 #define SET_FPREG(val) _SET_CORE32REG (FP, val)
187 #define SET_SPREG(val) _SET_CORE32REG (SP, val)
188 #define SET_CYCLESREG(val) _SET_CORE32REG (CYCLES, val)
189 #define SET_CYCLES2REG(val) _SET_CORE32REG (CYCLES2, val)
190 #define SET_CYCLES2SHDREG(val) _SET_CORE32REG (CYCLES2SHD, val)
191 #define SET_KSPREG(val) _SET_CORE32REG (KSP, val)
192 #define SET_USPREG(val) _SET_CORE32REG (USP, val)
193 #define SET_SYSCFGREG(val) _SET_CORE32REG (SYSCFG, val)
194 #define SET_RETSREG(val) _SET_CORE32REG (RETS, val)
195 #define SET_RETIREG(val) _SET_CORE32REG (RETI, val)
196 #define SET_RETXREG(val) _SET_CORE32REG (RETX, val)
197 #define SET_RETNREG(val) _SET_CORE32REG (RETN, val)
198 #define SET_RETEREG(val) _SET_CORE32REG (RETE, val)
199 #define SET_PCREG(val) _SET_CORE32REG (PC, val)
201 #define _SET_CORE32REGFIELD(reg, field, val, mask, shift) \
204 bu32 __v = ((reg##REG) & ~(mask)) | (__f << (shift)); \
205 TRACE_REGISTER (cpu, "wrote "#field" = %#x ("#reg" = %#x)", __f, __v); \
208 #define SET_SEQSTATREG(val) _SET_CORE32REG (SEQSTAT, val)
209 #define SET_EXCAUSE(excp) _SET_CORE32REGFIELD (SEQSTAT, EXCAUSE, excp, EXCAUSE_MASK, EXCAUSE_SHIFT)
210 #define SET_HWERRCAUSE(hwerr) _SET_CORE32REGFIELD (SEQSTAT, HWERRCAUSE, hwerr, HWERRCAUSE_MASK, HWERRCAUSE_SHIFT)
214 #define AC0_COPY_BIT 2
218 #define RND_MOD_BIT 8
227 #define ASTAT_DEFINED_BITS \
228 ((1 << AZ_BIT) | (1 << AN_BIT) | (1 << AC0_COPY_BIT) | (1 << V_COPY_BIT) \
229 |(1 << CC_BIT) | (1 << AQ_BIT) \
230 |(1 << RND_MOD_BIT) \
231 |(1 << AC0_BIT) | (1 << AC1_BIT) \
232 |(1 << AV0_BIT) | (1 << AV0S_BIT) | (1 << AV1_BIT) | (1 << AV1S_BIT) \
233 |(1 << V_BIT) | (1 << VS_BIT))
235 #define ASTATREG(field) (BFIN_CPU_STATE.field)
236 #define ASTAT_DEPOSIT(field, bit) (ASTATREG(field) << (bit))
238 (ASTAT_DEPOSIT(az, AZ_BIT) \
239 |ASTAT_DEPOSIT(an, AN_BIT) \
240 |ASTAT_DEPOSIT(ac0_copy, AC0_COPY_BIT) \
241 |ASTAT_DEPOSIT(v_copy, V_COPY_BIT) \
242 |ASTAT_DEPOSIT(cc, CC_BIT) \
243 |ASTAT_DEPOSIT(aq, AQ_BIT) \
244 |ASTAT_DEPOSIT(rnd_mod, RND_MOD_BIT) \
245 |ASTAT_DEPOSIT(ac0, AC0_BIT) \
246 |ASTAT_DEPOSIT(ac1, AC1_BIT) \
247 |ASTAT_DEPOSIT(av0, AV0_BIT) \
248 |ASTAT_DEPOSIT(av0s, AV0S_BIT) \
249 |ASTAT_DEPOSIT(av1, AV1_BIT) \
250 |ASTAT_DEPOSIT(av1s, AV1S_BIT) \
251 |ASTAT_DEPOSIT(v, V_BIT) \
252 |ASTAT_DEPOSIT(vs, VS_BIT) \
253 |ASTATREG(astat_reserved))
255 #define ASTAT_EXTRACT(a, bit) (((a) >> bit) & 1)
256 #define _SET_ASTAT(a, field, bit) (ASTATREG(field) = ASTAT_EXTRACT(a, bit))
257 #define SET_ASTAT(a) \
259 TRACE_REGISTER (cpu, "wrote ASTAT = %#x", a); \
260 _SET_ASTAT(a, az, AZ_BIT); \
261 _SET_ASTAT(a, an, AN_BIT); \
262 _SET_ASTAT(a, ac0_copy, AC0_COPY_BIT); \
263 _SET_ASTAT(a, v_copy, V_COPY_BIT); \
264 _SET_ASTAT(a, cc, CC_BIT); \
265 _SET_ASTAT(a, aq, AQ_BIT); \
266 _SET_ASTAT(a, rnd_mod, RND_MOD_BIT); \
267 _SET_ASTAT(a, ac0, AC0_BIT); \
268 _SET_ASTAT(a, ac1, AC1_BIT); \
269 _SET_ASTAT(a, av0, AV0_BIT); \
270 _SET_ASTAT(a, av0s, AV0S_BIT); \
271 _SET_ASTAT(a, av1, AV1_BIT); \
272 _SET_ASTAT(a, av1s, AV1S_BIT); \
273 _SET_ASTAT(a, v, V_BIT); \
274 _SET_ASTAT(a, vs, VS_BIT); \
275 ASTATREG(astat_reserved) = (a) & ~ASTAT_DEFINED_BITS; \
277 #define SET_ASTATREG(field, val) \
280 TRACE_REGISTER (cpu, "wrote ASTAT["#field"] = %i", __v); \
281 ASTATREG (field) = __v; \
282 if (&ASTATREG (field) == &ASTATREG (ac0)) \
284 TRACE_REGISTER (cpu, "wrote ASTAT["#field"_copy] = %i", __v); \
285 ASTATREG (ac0_copy) = __v; \
287 else if (&ASTATREG (field) == &ASTATREG (v)) \
289 TRACE_REGISTER (cpu, "wrote ASTAT["#field"_copy] = %i", __v); \
290 ASTATREG (v_copy) = __v; \
293 #define SET_CCREG(val) SET_ASTATREG (cc, val)
295 #define SYSCFG_SSSTEP (1 << 0)
296 #define SYSCFG_CCEN (1 << 1)
297 #define SYSCFG_SNEN (1 << 2)
299 #define __PUT_MEM(taddr, v, size) \
301 bu##size __v = (v); \
302 bu32 __taddr = (taddr); \
303 int __cnt, __bytes = size / 8; \
304 mmu_check_addr (cpu, __taddr, true, false, __bytes); \
305 __cnt = sim_core_write_buffer (CPU_STATE(cpu), cpu, write_map, \
306 (void *)&__v, __taddr, __bytes); \
307 if (__cnt != __bytes) \
308 mmu_process_fault (cpu, __taddr, true, false, false, true); \
309 TRACE_CORE (cpu, __taddr, __bytes, write_map, __v); \
311 #define PUT_BYTE(taddr, v) __PUT_MEM(taddr, v, 8)
312 #define PUT_WORD(taddr, v) __PUT_MEM(taddr, v, 16)
313 #define PUT_LONG(taddr, v) __PUT_MEM(taddr, v, 32)
315 #define __GET_MEM(taddr, size, inst, map) \
318 bu32 __taddr = (taddr); \
319 int __cnt, __bytes = size / 8; \
320 mmu_check_addr (cpu, __taddr, false, inst, __bytes); \
321 __cnt = sim_core_read_buffer (CPU_STATE(cpu), cpu, map, \
322 (void *)&__ret, __taddr, __bytes); \
323 if (__cnt != __bytes) \
324 mmu_process_fault (cpu, __taddr, false, inst, false, true); \
325 TRACE_CORE (cpu, __taddr, __bytes, map, __ret); \
328 #define _GET_MEM(taddr, size) __GET_MEM(taddr, size, false, read_map)
329 #define GET_BYTE(taddr) _GET_MEM(taddr, 8)
330 #define GET_WORD(taddr) _GET_MEM(taddr, 16)
331 #define GET_LONG(taddr) _GET_MEM(taddr, 32)
333 #define IFETCH(taddr) __GET_MEM(taddr, 16, true, exec_map)
334 #define IFETCH_CHECK(taddr) mmu_check_addr (cpu, taddr, false, true, 2)
336 extern void bfin_syscall (SIM_CPU
*);
337 extern bu32
interp_insn_bfin (SIM_CPU
*, bu32
);
338 extern bu32
hwloop_get_next_pc (SIM_CPU
*, bu32
, bu32
);
340 /* Defines for Blackfin memory layouts. */
341 #define BFIN_ASYNC_BASE 0x20000000
342 #define BFIN_SYSTEM_MMR_BASE 0xFFC00000
343 #define BFIN_CORE_MMR_BASE 0xFFE00000
344 #define BFIN_L1_SRAM_SCRATCH 0xFFB00000
345 #define BFIN_L1_SRAM_SCRATCH_SIZE 0x1000
346 #define BFIN_L1_SRAM_SCRATCH_END (BFIN_L1_SRAM_SCRATCH + BFIN_L1_SRAM_SCRATCH_SIZE)
348 #define BFIN_L1_CACHE_BYTES 32
This page took 0.050736 seconds and 4 git commands to generate.