Add support to count the number of instructions issued.
[deliverable/binutils-gdb.git] / sim / ppc / core.c
CommitLineData
cb7a6892
MM
1/* This file is part of the program psim.
2
3 Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19 */
20
21
22#ifndef _CORE_C_
23#define _CORE_C_
24
25#ifndef STATIC_INLINE_CORE
26#define STATIC_INLINE_CORE STATIC_INLINE
27#endif
28
29#include "basics.h"
30#include "device_tree.h"
31#include "memory_map.h"
32#include "core.h"
33
34
35struct _core {
36 /* attached devices */
37 device_node *device_tree;
38 /* different memory maps */
39 memory_map *readable; /* really everything */
40 memory_map *writeable;
41 memory_map *executable;
42 /* VEA model requires additional memory information */
43 unsigned_word data_upper_bound;
44 unsigned_word data_high_water;
45 unsigned_word stack_upper_bound;
46 unsigned_word stack_lower_bound;
47 unsigned_word stack_low_water;
48 /* misc */
49 int trace;
50};
51
52
53STATIC_INLINE_CORE void
54create_core_from_addresses(device_node *device,
55 void *data)
56{
57 core *memory = (core*)data;
58 device_address *address;
59 for (address = device->addresses;
60 address != NULL;
61 address = address->next_address) {
62 switch (device->type) {
63 case memory_device:
64 {
65 void *ram = zalloc(address->size);
66 TRACE(trace_core,
67 ("create_core_from_addresses() adding memory at 0x%.8x-0x%.8x, size %8d\n",
68 address->lower_bound, address->lower_bound + address->size - 1, address->size));
69 core_add_raw_memory(memory,
70 ram,
71 address->lower_bound,
72 address->size,
73 address->access);
74 }
75 break;
76 case sequential_device:
77 case block_device:
78 case bus_device:
79 case other_device:
80 {
81 TRACE(trace_core,
82 ("create_core_from_addresses() adding device at 0x%.8x-0x%.8x, size %8d\n",
83 address->lower_bound, address->lower_bound + address->size - 1, address->size));
84 ASSERT(device->callbacks != NULL);
85 core_add_callback_memory(memory,
86 device,
87 device->callbacks->read_callback,
88 device->callbacks->write_callback,
89 address->lower_bound,
90 address->size,
91 address->access);
92 }
93 break;
94 default:
95 TRACE(trace_core,
96 ("create_core_from_addresses() unknown type %d\n", (int)device->type));
97 break;
98 /* nothing happens here */
99 }
100 }
101}
102
103
104INLINE_CORE core *
105core_create(device_node *root,
106 int trace)
107{
108 core *memory;
109
110 /* Initialize things */
111 memory = ZALLOC(core);
112 memory->trace = trace;
113 memory->device_tree = root;
114
115 /* allocate space for the separate virtual to physical maps */
116 memory->executable = new_memory_map();
117 memory->readable = new_memory_map();
118 memory->writeable = new_memory_map();
119
120 /* initial values for the water marks */
121 memory->data_high_water = 0;
122 memory->stack_low_water = memory->data_high_water - sizeof(unsigned_word);
123
124 /* go over the device tree looking for address ranges to add to
125 memory */
126 device_tree_traverse(root,
127 create_core_from_addresses,
128 NULL,
129 memory);
130
131 /* return the created core object */
132 return memory;
133}
134
135
136STATIC_INLINE_CORE void
137zero_core_from_addresses(device_node *device,
138 void *data)
139{
140 core *memory = (core*)data;
141 device_address *address;
142
143 /* for memory nodes, copy or zero any data */
144 if (device->type == memory_device) {
145 for (address = device->addresses;
146 address != NULL;
147 address = address->next_address) {
148 if (memory_map_zero(memory->readable,
149 address->lower_bound,
150 address->size) != address->size)
151 error("init_core_from_addresses() - zero failed\n");
152 /* adjust high water mark (sbrk) */
153 if (memory->data_upper_bound < address->upper_bound)
154 memory->data_upper_bound = address->upper_bound;
155 }
156 }
157}
158
159STATIC_INLINE_CORE void
160load_core_from_addresses(device_node *device,
161 void *data)
162{
163 core *memory = (core*)data;
164 device_address *address;
165
166 /* initialize the address range with the value attached to the
167 address. Even works for devices! */
168 for (address = device->addresses;
169 address != NULL;
170 address = address->next_address) {
171 /* (re)init the address range. I don't want to think about what
172 this is doing to callback devices! */
173 if (address->init) {
174 if (memory_map_write_buffer(memory->readable,
175 address->init,
176 address->lower_bound,
177 address->size,
178 raw_transfer) != address->size)
179 error("init_core_from_addresses() - write failed\n");
180 }
181 }
182}
183
184INLINE_CORE void
185core_init(core *memory)
186{
187 unsigned nr_cleared;
188 unsigned_word clear_base;
189 unsigned_word clear_bound;
190
191 /* for vea, several memory break points */
192 memory->data_upper_bound = 0;
193 memory->stack_upper_bound = device_tree_find_int(memory->device_tree,
194 "/options/stack-pointer");;
195 memory->stack_lower_bound = memory->stack_upper_bound;
196
197 /* (re) clear all of memory that is specified by memory-address
198 entries. While we're at it determine the upper bound for memory
199 areas */
200 device_tree_traverse(memory->device_tree,
201 NULL,
202 zero_core_from_addresses,
203 memory);
204
205 /* May have grown the data sectioin (vea model), zero that too if
206 present */
207 clear_base = memory->data_upper_bound;
208 clear_bound = memory->data_high_water;
209 if (clear_bound > clear_base) {
210 while ((nr_cleared = memory_map_zero(memory->readable,
211 clear_base,
212 clear_bound - clear_base)) > 0) {
213 clear_base += nr_cleared;
214 }
215 }
216
217 /* clear any part of the stack that was dynamically allocated */
218 clear_base = memory->stack_low_water;
219 clear_bound = memory->stack_upper_bound;
220 if (clear_bound > clear_base) {
221 while ((nr_cleared = memory_map_zero(memory->readable,
222 clear_base,
223 clear_bound - clear_base)) > 0) {
224 clear_base += nr_cleared;
225 }
226 }
227
228 /* with everything zero'ed, now (re) load any data sections */
229 device_tree_traverse(memory->device_tree,
230 NULL,
231 load_core_from_addresses,
232 memory);
233
234}
235
236
237
238INLINE_CORE void
239core_add_raw_memory(core *memory,
240 void *buffer,
241 unsigned_word base,
242 unsigned size,
243 device_access access)
244{
245 if (access & device_is_readable)
246 memory_map_add_raw_memory(memory->readable,
247 buffer, base, size);
248 if (access & device_is_writeable)
249 memory_map_add_raw_memory(memory->writeable,
250 buffer, base, size);
251 if (access & device_is_executable)
252 memory_map_add_raw_memory(memory->executable,
253 buffer, base, size);
254}
255
256
257INLINE_CORE void
258core_add_callback_memory(core *memory,
259 device_node *device,
260 device_reader_callback *reader,
261 device_writer_callback *writer,
262 unsigned_word base,
263 unsigned size,
264 device_access access)
265{
266 if (access & device_is_readable)
267 memory_map_add_callback_memory(memory->readable,
268 device, reader, writer,
269 base, size);
270 if (access & device_is_writeable)
271 memory_map_add_callback_memory(memory->writeable,
272 device, reader, writer,
273 base, size);
274 if (access & device_is_executable)
275 memory_map_add_callback_memory(memory->executable,
276 device, reader, writer,
277 base, size);
278}
279
280
281STATIC_INLINE_CORE void
282malloc_core_memory(core *memory,
283 unsigned_word base,
284 unsigned size,
285 device_access access)
286{
287 void *buffer = (void*)zalloc(size);
288 core_add_raw_memory(memory, buffer, base, size, access);
289}
290
291INLINE_CORE unsigned_word
292core_data_upper_bound(core *memory)
293{
294 return memory->data_upper_bound;
295}
296
297
298INLINE_CORE unsigned_word
299core_stack_lower_bound(core *memory)
300{
301 return memory->stack_lower_bound;
302}
303
304INLINE_CORE unsigned_word
305core_stack_size(core *memory)
306{
307 return (memory->stack_upper_bound - memory->stack_lower_bound);
308}
309
310
311
312INLINE_CORE void
313core_add_data(core *memory, unsigned_word incr)
314{
c464ba66
MM
315 unsigned_word new_upper_bound = memory->data_upper_bound + incr;
316 if (new_upper_bound > memory->data_high_water) {
317 if (memory->data_upper_bound >= memory->data_high_water)
318 /* all the memory is new */
319 malloc_core_memory(memory,
320 memory->data_upper_bound,
321 incr,
322 device_is_readable | device_is_writeable);
323 else
324 /* some of the memory was already allocated, only need to add
325 missing bit */
326 malloc_core_memory(memory,
327 memory->data_high_water,
328 new_upper_bound - memory->data_high_water,
329 device_is_readable | device_is_writeable);
330 memory->data_high_water = new_upper_bound;
cb7a6892 331 }
c464ba66 332 memory->data_upper_bound = new_upper_bound;
cb7a6892
MM
333}
334
335
336INLINE_CORE void
337core_add_stack(core *memory, unsigned_word incr)
338{
c464ba66
MM
339 unsigned_word new_lower_bound = memory->stack_lower_bound - incr;
340 if (new_lower_bound < memory->stack_low_water) {
341 if (memory->stack_lower_bound <= memory->stack_low_water)
342 /* all the memory is new */
343 malloc_core_memory(memory,
344 new_lower_bound,
345 incr,
346 device_is_readable | device_is_writeable);
347 else
348 /* allocate only the extra bit */
349 malloc_core_memory(memory,
350 new_lower_bound,
351 memory->stack_low_water - new_lower_bound,
352 device_is_readable | device_is_writeable);
353 memory->stack_low_water = new_lower_bound;
cb7a6892 354 }
c464ba66 355 memory->stack_lower_bound = new_lower_bound;
cb7a6892
MM
356}
357
358
359INLINE_CORE memory_map *
360core_readable(core *core)
361{
362 return core->readable;
363}
364
365
366INLINE_CORE memory_map *
367core_writeable(core *core)
368{
369 return core->writeable;
370}
371
372
373INLINE_CORE memory_map *
374core_executable(core *core)
375{
376 return core->executable;
377}
378
379#endif /* _CORE_ */
This page took 0.040563 seconds and 4 git commands to generate.