Commit | Line | Data |
---|---|---|
a76d924d DJ |
1 | /* Parts of target interface that deal with accessing memory and memory-like |
2 | objects. | |
3 | ||
4 | Copyright (C) 2006 | |
5 | Free Software Foundation, Inc. | |
6 | ||
7 | This file is part of GDB. | |
8 | ||
9 | This program is free software; you can redistribute it and/or modify | |
10 | it under the terms of the GNU General Public License as published by | |
11 | the Free Software Foundation; either version 2 of the License, or | |
12 | (at your option) any later version. | |
13 | ||
14 | This program is distributed in the hope that it will be useful, | |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
20 | along with this program; if not, write to the Free Software | |
21 | Foundation, Inc., 51 Franklin Street, Fifth Floor, | |
22 | Boston, MA 02110-1301, USA. */ | |
23 | ||
24 | #include "defs.h" | |
25 | #include "vec.h" | |
26 | #include "target.h" | |
27 | #include "memory-map.h" | |
28 | ||
29 | #include "gdb_assert.h" | |
30 | ||
31 | #include <stdio.h> | |
32 | #include <sys/time.h> | |
33 | ||
34 | static int | |
35 | compare_block_starting_address (const void *a, const void *b) | |
36 | { | |
37 | const struct memory_write_request *a_req = a; | |
38 | const struct memory_write_request *b_req = b; | |
39 | ||
40 | if (a_req->begin < b_req->begin) | |
41 | return -1; | |
42 | else if (a_req->begin == b_req->begin) | |
43 | return 0; | |
44 | else | |
45 | return 1; | |
46 | } | |
47 | ||
48 | /* Adds to RESULT all memory write requests from BLOCK that are | |
49 | in [BEGIN, END) range. | |
50 | ||
51 | If any memory request is only partially in the specified range, | |
52 | that part of the memory request will be added. */ | |
53 | ||
54 | static void | |
55 | claim_memory (VEC(memory_write_request_s) *blocks, | |
56 | VEC(memory_write_request_s) **result, | |
57 | ULONGEST begin, | |
58 | ULONGEST end) | |
59 | { | |
60 | int i; | |
61 | ULONGEST claimed_begin; | |
62 | ULONGEST claimed_end; | |
63 | struct memory_write_request *r; | |
64 | ||
65 | for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i) | |
66 | { | |
67 | /* If the request doesn't overlap [BEGIN, END), skip it. We | |
68 | must handle END == 0 meaning the top of memory; we don't yet | |
69 | check for R->end == 0, which would also mean the top of | |
70 | memory, but there's an assertion in | |
71 | target_write_memory_blocks which checks for that. */ | |
72 | ||
73 | if (begin >= r->end) | |
74 | continue; | |
75 | if (end != 0 && end <= r->begin) | |
76 | continue; | |
77 | ||
78 | claimed_begin = max (begin, r->begin); | |
79 | if (end == 0) | |
80 | claimed_end = r->end; | |
81 | else | |
82 | claimed_end = min (end, r->end); | |
83 | ||
84 | if (claimed_begin == r->begin && claimed_end == r->end) | |
85 | VEC_safe_push (memory_write_request_s, *result, r); | |
86 | else | |
87 | { | |
88 | struct memory_write_request *n = | |
89 | VEC_safe_push (memory_write_request_s, *result, NULL); | |
90 | memset (n, 0, sizeof (struct memory_write_request)); | |
91 | n->begin = claimed_begin; | |
92 | n->end = claimed_end; | |
93 | n->data = r->data + (claimed_begin - r->begin); | |
94 | } | |
95 | } | |
96 | } | |
97 | ||
98 | /* Given a vector of struct memory_write_request objects in BLOCKS, | |
99 | add memory requests for flash memory into FLASH_BLOCKS, and for | |
100 | regular memory to REGULAR_BLOCKS. */ | |
101 | ||
102 | static void | |
103 | split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks, | |
104 | VEC(memory_write_request_s) **regular_blocks, | |
105 | VEC(memory_write_request_s) **flash_blocks) | |
106 | { | |
107 | struct mem_region *region; | |
108 | CORE_ADDR cur_address; | |
109 | ||
110 | /* This implementation runs in O(length(regions)*length(blocks)) time. | |
111 | However, in most cases the number of blocks will be small, so this does | |
112 | not matter. | |
113 | ||
114 | Note also that it's extremely unlikely that a memory write request | |
115 | will span more than one memory region, however for safety we handle | |
116 | such situations. */ | |
117 | ||
118 | cur_address = 0; | |
119 | while (1) | |
120 | { | |
121 | VEC(memory_write_request_s) **r; | |
122 | region = lookup_mem_region (cur_address); | |
123 | ||
124 | r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks; | |
125 | cur_address = region->hi; | |
126 | claim_memory (blocks, r, region->lo, region->hi); | |
127 | ||
128 | if (cur_address == 0) | |
129 | break; | |
130 | } | |
131 | } | |
132 | ||
133 | /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN | |
134 | to the start of the flash block containing the address. Similarly, | |
135 | if END is non-NULL *END will be set to the address one past the end | |
136 | of the block containing the address. */ | |
137 | ||
138 | static void | |
139 | block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end) | |
140 | { | |
141 | struct mem_region *region; | |
142 | unsigned blocksize; | |
143 | ||
144 | region = lookup_mem_region (address); | |
145 | gdb_assert (region->attrib.mode == MEM_FLASH); | |
146 | blocksize = region->attrib.blocksize; | |
147 | if (begin) | |
148 | *begin = address / blocksize * blocksize; | |
149 | if (end) | |
150 | *end = (address + blocksize - 1) / blocksize * blocksize; | |
151 | } | |
152 | ||
153 | /* Given the list of memory requests to be WRITTEN, this function | |
154 | returns write requests covering each group of flash blocks which must | |
155 | be erased. */ | |
156 | ||
157 | static VEC(memory_write_request_s) * | |
158 | blocks_to_erase (VEC(memory_write_request_s) *written) | |
159 | { | |
160 | unsigned i; | |
161 | struct memory_write_request *ptr; | |
162 | ||
163 | VEC(memory_write_request_s) *result = NULL; | |
164 | ||
165 | for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i) | |
166 | { | |
167 | CORE_ADDR begin, end; | |
168 | ||
169 | block_boundaries (ptr->begin, &begin, 0); | |
170 | block_boundaries (ptr->end, 0, &end); | |
171 | ||
172 | if (!VEC_empty (memory_write_request_s, result) | |
173 | && VEC_last (memory_write_request_s, result)->end >= begin) | |
174 | { | |
175 | VEC_last (memory_write_request_s, result)->end = end; | |
176 | } | |
177 | else | |
178 | { | |
179 | struct memory_write_request *n = | |
180 | VEC_safe_push (memory_write_request_s, result, NULL); | |
181 | memset (n, 0, sizeof (struct memory_write_request)); | |
182 | n->begin = begin; | |
183 | n->end = end; | |
184 | } | |
185 | } | |
186 | ||
187 | return result; | |
188 | } | |
189 | ||
190 | /* Given ERASED_BLOCKS, a list of blocks that will be erased with | |
191 | flash erase commands, and WRITTEN_BLOCKS, the list of memory | |
192 | addresses that will be written, compute the set of memory addresses | |
193 | that will be erased but not rewritten (e.g. padding within a block | |
194 | which is only partially filled by "load"). */ | |
195 | ||
196 | static VEC(memory_write_request_s) * | |
197 | compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks, | |
198 | VEC(memory_write_request_s) *written_blocks) | |
199 | { | |
200 | VEC(memory_write_request_s) *result = NULL; | |
201 | ||
202 | unsigned i, j; | |
203 | unsigned je = VEC_length (memory_write_request_s, written_blocks); | |
204 | struct memory_write_request *erased_p; | |
205 | ||
206 | /* Look at each erased memory_write_request in turn, and | |
207 | see what part of it is subsequently written to. | |
208 | ||
209 | This implementation is O(length(erased) * length(written)). If | |
210 | the lists are sorted at this point it could be rewritten more | |
211 | efficiently, but the complexity is not generally worthwhile. */ | |
212 | ||
213 | for (i = 0; | |
214 | VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p); | |
215 | ++i) | |
216 | { | |
217 | /* Make a deep copy -- it will be modified inside the loop, but | |
218 | we don't want to modify original vector. */ | |
219 | struct memory_write_request erased = *erased_p; | |
220 | ||
221 | for (j = 0; j != je;) | |
222 | { | |
223 | struct memory_write_request *written | |
224 | = VEC_index (memory_write_request_s, | |
225 | written_blocks, j); | |
226 | ||
227 | /* Now try various cases. */ | |
228 | ||
229 | /* If WRITTEN is fully to the left of ERASED, check the next | |
230 | written memory_write_request. */ | |
231 | if (written->end <= erased.begin) | |
232 | { | |
233 | ++j; | |
234 | continue; | |
235 | } | |
236 | ||
237 | /* If WRITTEN is fully to the right of ERASED, then ERASED | |
238 | is not written at all. WRITTEN might affect other | |
239 | blocks. */ | |
240 | if (written->begin >= erased.end) | |
241 | { | |
242 | VEC_safe_push (memory_write_request_s, result, &erased); | |
243 | goto next_erased; | |
244 | } | |
245 | ||
246 | /* If all of ERASED is completely written, we can move on to | |
247 | the next erased region. */ | |
248 | if (written->begin <= erased.begin | |
249 | && written->end >= erased.end) | |
250 | { | |
251 | goto next_erased; | |
252 | } | |
253 | ||
254 | /* If there is an unwritten part at the beginning of ERASED, | |
255 | then we should record that part and try this inner loop | |
256 | again for the remainder. */ | |
257 | if (written->begin > erased.begin) | |
258 | { | |
259 | struct memory_write_request *n = | |
260 | VEC_safe_push (memory_write_request_s, result, NULL); | |
261 | memset (n, 0, sizeof (struct memory_write_request)); | |
262 | n->begin = erased.begin; | |
263 | n->end = written->begin; | |
264 | erased.begin = written->begin; | |
265 | continue; | |
266 | } | |
267 | ||
268 | /* If there is an unwritten part at the end of ERASED, we | |
269 | forget about the part that was written to and wait to see | |
270 | if the next write request writes more of ERASED. We can't | |
271 | push it yet. */ | |
272 | if (written->end < erased.end) | |
273 | { | |
274 | erased.begin = written->end; | |
275 | ++j; | |
276 | continue; | |
277 | } | |
278 | } | |
279 | ||
280 | /* If we ran out of write requests without doing anything about | |
281 | ERASED, then that means it's really erased. */ | |
282 | VEC_safe_push (memory_write_request_s, result, &erased); | |
283 | ||
284 | next_erased: | |
285 | ; | |
286 | } | |
287 | ||
288 | return result; | |
289 | } | |
290 | ||
291 | static void | |
292 | cleanup_request_data (void *p) | |
293 | { | |
294 | VEC(memory_write_request_s) **v = p; | |
295 | struct memory_write_request *r; | |
296 | int i; | |
297 | ||
298 | for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i) | |
299 | xfree (r->data); | |
300 | } | |
301 | ||
302 | static void | |
303 | cleanup_write_requests_vector (void *p) | |
304 | { | |
305 | VEC(memory_write_request_s) **v = p; | |
306 | VEC_free (memory_write_request_s, *v); | |
307 | } | |
308 | ||
309 | int | |
310 | target_write_memory_blocks (VEC(memory_write_request_s) *requests, | |
311 | enum flash_preserve_mode preserve_flash_p, | |
312 | void (*progress_cb) (ULONGEST, void *)) | |
313 | { | |
314 | struct cleanup *back_to = make_cleanup (null_cleanup, NULL); | |
315 | VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s, | |
316 | requests); | |
317 | unsigned i; | |
318 | int err = 0; | |
319 | struct memory_write_request *r; | |
320 | VEC(memory_write_request_s) *regular = NULL; | |
321 | VEC(memory_write_request_s) *flash = NULL; | |
322 | VEC(memory_write_request_s) *erased, *garbled; | |
323 | ||
324 | /* END == 0 would represent wraparound: a write to the very last | |
325 | byte of the address space. This file was not written with that | |
326 | possibility in mind. This is fixable, but a lot of work for a | |
327 | rare problem; so for now, fail noisily here instead of obscurely | |
328 | later. */ | |
329 | for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i) | |
330 | gdb_assert (r->end != 0); | |
331 | ||
332 | make_cleanup (cleanup_write_requests_vector, &blocks); | |
333 | ||
334 | /* Sort the blocks by their start address. */ | |
335 | qsort (VEC_address (memory_write_request_s, blocks), | |
336 | VEC_length (memory_write_request_s, blocks), | |
337 | sizeof (struct memory_write_request), compare_block_starting_address); | |
338 | ||
339 | /* Split blocks into list of regular memory blocks, | |
340 | and list of flash memory blocks. */ | |
341 | make_cleanup (cleanup_write_requests_vector, ®ular); | |
342 | make_cleanup (cleanup_write_requests_vector, &flash); | |
343 | split_regular_and_flash_blocks (blocks, ®ular, &flash); | |
344 | ||
345 | /* If a variable is added to forbid flash write, even during "load", | |
346 | it should be checked here. Similarly, if this function is used | |
347 | for other situations besides "load" in which writing to flash | |
348 | is undesirable, that should be checked here. */ | |
349 | ||
350 | /* Find flash blocks to erase. */ | |
351 | erased = blocks_to_erase (flash); | |
352 | make_cleanup (cleanup_write_requests_vector, &erased); | |
353 | ||
354 | /* Find what flash regions will be erased, and not overwritten; then | |
355 | either preserve or discard the old contents. */ | |
356 | garbled = compute_garbled_blocks (erased, flash); | |
357 | make_cleanup (cleanup_request_data, &garbled); | |
358 | make_cleanup (cleanup_write_requests_vector, &garbled); | |
359 | ||
360 | if (!VEC_empty (memory_write_request_s, garbled)) | |
361 | { | |
362 | if (preserve_flash_p == flash_preserve) | |
363 | { | |
364 | struct memory_write_request *r; | |
365 | ||
366 | /* Read in regions that must be preserved and add them to | |
367 | the list of blocks we read. */ | |
368 | for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i) | |
369 | { | |
370 | gdb_assert (r->data == NULL); | |
371 | r->data = xmalloc (r->end - r->begin); | |
372 | err = target_read_memory (r->begin, r->data, r->end - r->begin); | |
373 | if (err != 0) | |
374 | goto out; | |
375 | ||
376 | VEC_safe_push (memory_write_request_s, flash, r); | |
377 | } | |
378 | ||
379 | qsort (VEC_address (memory_write_request_s, flash), | |
380 | VEC_length (memory_write_request_s, flash), | |
381 | sizeof (struct memory_write_request), compare_block_starting_address); | |
382 | } | |
383 | } | |
384 | ||
385 | /* We could coalesce adjacent memory blocks here, to reduce the | |
386 | number of write requests for small sections. However, we would | |
387 | have to reallocate and copy the data pointers, which could be | |
388 | large; large sections are more common in loadable objects than | |
389 | large numbers of small sections (although the reverse can be true | |
390 | in object files). So, we issue at least one write request per | |
391 | passed struct memory_write_request. The remote stub will still | |
392 | have the opportunity to batch flash requests. */ | |
393 | ||
394 | /* Write regular blocks. */ | |
395 | for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i) | |
396 | { | |
397 | LONGEST len; | |
398 | ||
399 | len = target_write_with_progress (¤t_target, | |
400 | TARGET_OBJECT_MEMORY, NULL, | |
401 | r->data, r->begin, r->end - r->begin, | |
402 | progress_cb, r->baton); | |
403 | if (len < (LONGEST) (r->end - r->begin)) | |
404 | { | |
405 | /* Call error? */ | |
406 | err = -1; | |
407 | goto out; | |
408 | } | |
409 | } | |
410 | ||
411 | if (!VEC_empty (memory_write_request_s, erased)) | |
412 | { | |
413 | /* Erase all pages. */ | |
414 | for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i) | |
415 | target_flash_erase (r->begin, r->end - r->begin); | |
416 | ||
417 | /* Write flash data. */ | |
418 | for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i) | |
419 | { | |
420 | LONGEST len; | |
421 | ||
422 | len = target_write_with_progress (¤t_target, | |
423 | TARGET_OBJECT_FLASH, NULL, | |
424 | r->data, r->begin, r->end - r->begin, | |
425 | progress_cb, r->baton); | |
426 | if (len < (LONGEST) (r->end - r->begin)) | |
427 | error (_("Error writing data to flash")); | |
428 | } | |
429 | ||
430 | target_flash_done (); | |
431 | } | |
432 | ||
433 | out: | |
434 | do_cleanups (back_to); | |
435 | ||
436 | return err; | |
437 | } |