xref: /dragonfly/contrib/gdb-7/gdb/target-memory.c (revision e0b1d537)
1 /* Parts of target interface that deal with accessing memory and memory-like
2    objects.
3 
4    Copyright (C) 2006-2013 Free Software Foundation, Inc.
5 
6    This file is part of GDB.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #include "defs.h"
22 #include "vec.h"
23 #include "target.h"
24 #include "memory-map.h"
25 
26 #include "gdb_assert.h"
27 
28 #include <stdio.h>
29 #include <sys/time.h>
30 
31 static int
32 compare_block_starting_address (const void *a, const void *b)
33 {
34   const struct memory_write_request *a_req = a;
35   const struct memory_write_request *b_req = b;
36 
37   if (a_req->begin < b_req->begin)
38     return -1;
39   else if (a_req->begin == b_req->begin)
40     return 0;
41   else
42     return 1;
43 }
44 
45 /* Adds to RESULT all memory write requests from BLOCK that are
46    in [BEGIN, END) range.
47 
48    If any memory request is only partially in the specified range,
49    that part of the memory request will be added.  */
50 
51 static void
52 claim_memory (VEC(memory_write_request_s) *blocks,
53 	      VEC(memory_write_request_s) **result,
54 	      ULONGEST begin,
55 	      ULONGEST end)
56 {
57   int i;
58   ULONGEST claimed_begin;
59   ULONGEST claimed_end;
60   struct memory_write_request *r;
61 
62   for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i)
63     {
64       /* If the request doesn't overlap [BEGIN, END), skip it.  We
65 	 must handle END == 0 meaning the top of memory; we don't yet
66 	 check for R->end == 0, which would also mean the top of
67 	 memory, but there's an assertion in
68 	 target_write_memory_blocks which checks for that.  */
69 
70       if (begin >= r->end)
71 	continue;
72       if (end != 0 && end <= r->begin)
73 	continue;
74 
75       claimed_begin = max (begin, r->begin);
76       if (end == 0)
77 	claimed_end = r->end;
78       else
79 	claimed_end = min (end, r->end);
80 
81       if (claimed_begin == r->begin && claimed_end == r->end)
82 	VEC_safe_push (memory_write_request_s, *result, r);
83       else
84 	{
85 	  struct memory_write_request *n =
86 	    VEC_safe_push (memory_write_request_s, *result, NULL);
87 
88 	  *n = *r;
89 	  n->begin = claimed_begin;
90 	  n->end = claimed_end;
91 	  n->data += claimed_begin - r->begin;
92 	}
93     }
94 }
95 
96 /* Given a vector of struct memory_write_request objects in BLOCKS,
97    add memory requests for flash memory into FLASH_BLOCKS, and for
98    regular memory to REGULAR_BLOCKS.  */
99 
100 static void
101 split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks,
102 				VEC(memory_write_request_s) **regular_blocks,
103 				VEC(memory_write_request_s) **flash_blocks)
104 {
105   struct mem_region *region;
106   CORE_ADDR cur_address;
107 
108   /* This implementation runs in O(length(regions)*length(blocks)) time.
109      However, in most cases the number of blocks will be small, so this does
110      not matter.
111 
112      Note also that it's extremely unlikely that a memory write request
113      will span more than one memory region, however for safety we handle
114      such situations.  */
115 
116   cur_address = 0;
117   while (1)
118     {
119       VEC(memory_write_request_s) **r;
120 
121       region = lookup_mem_region (cur_address);
122       r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks;
123       cur_address = region->hi;
124       claim_memory (blocks, r, region->lo, region->hi);
125 
126       if (cur_address == 0)
127 	break;
128     }
129 }
130 
131 /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN
132    to the start of the flash block containing the address.  Similarly,
133    if END is non-NULL *END will be set to the address one past the end
134    of the block containing the address.  */
135 
136 static void
137 block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end)
138 {
139   struct mem_region *region;
140   unsigned blocksize;
141 
142   region = lookup_mem_region (address);
143   gdb_assert (region->attrib.mode == MEM_FLASH);
144   blocksize = region->attrib.blocksize;
145   if (begin)
146     *begin = address / blocksize * blocksize;
147   if (end)
148     *end = (address + blocksize - 1) / blocksize * blocksize;
149 }
150 
151 /* Given the list of memory requests to be WRITTEN, this function
152    returns write requests covering each group of flash blocks which must
153    be erased.  */
154 
155 static VEC(memory_write_request_s) *
156 blocks_to_erase (VEC(memory_write_request_s) *written)
157 {
158   unsigned i;
159   struct memory_write_request *ptr;
160 
161   VEC(memory_write_request_s) *result = NULL;
162 
163   for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i)
164     {
165       CORE_ADDR begin, end;
166 
167       block_boundaries (ptr->begin, &begin, 0);
168       block_boundaries (ptr->end - 1, 0, &end);
169 
170       if (!VEC_empty (memory_write_request_s, result)
171 	  && VEC_last (memory_write_request_s, result)->end >= begin)
172 	{
173 	  VEC_last (memory_write_request_s, result)->end = end;
174 	}
175       else
176 	{
177 	  struct memory_write_request *n =
178 	    VEC_safe_push (memory_write_request_s, result, NULL);
179 
180 	  memset (n, 0, sizeof (struct memory_write_request));
181 	  n->begin = begin;
182 	  n->end = end;
183 	}
184     }
185 
186   return result;
187 }
188 
189 /* Given ERASED_BLOCKS, a list of blocks that will be erased with
190    flash erase commands, and WRITTEN_BLOCKS, the list of memory
191    addresses that will be written, compute the set of memory addresses
192    that will be erased but not rewritten (e.g. padding within a block
193    which is only partially filled by "load").  */
194 
195 static VEC(memory_write_request_s) *
196 compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks,
197 			VEC(memory_write_request_s) *written_blocks)
198 {
199   VEC(memory_write_request_s) *result = NULL;
200 
201   unsigned i, j;
202   unsigned je = VEC_length (memory_write_request_s, written_blocks);
203   struct memory_write_request *erased_p;
204 
205   /* Look at each erased memory_write_request in turn, and
206      see what part of it is subsequently written to.
207 
208      This implementation is O(length(erased) * length(written)).  If
209      the lists are sorted at this point it could be rewritten more
210      efficiently, but the complexity is not generally worthwhile.  */
211 
212   for (i = 0;
213        VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p);
214        ++i)
215     {
216       /* Make a deep copy -- it will be modified inside the loop, but
217 	 we don't want to modify original vector.  */
218       struct memory_write_request erased = *erased_p;
219 
220       for (j = 0; j != je;)
221 	{
222 	  struct memory_write_request *written
223 	    = VEC_index (memory_write_request_s,
224 			 written_blocks, j);
225 
226 	  /* Now try various cases.  */
227 
228 	  /* If WRITTEN is fully to the left of ERASED, check the next
229 	     written memory_write_request.  */
230 	  if (written->end <= erased.begin)
231 	    {
232 	      ++j;
233 	      continue;
234 	    }
235 
236 	  /* If WRITTEN is fully to the right of ERASED, then ERASED
237 	     is not written at all.  WRITTEN might affect other
238 	     blocks.  */
239 	  if (written->begin >= erased.end)
240 	    {
241 	      VEC_safe_push (memory_write_request_s, result, &erased);
242 	      goto next_erased;
243 	    }
244 
245 	  /* If all of ERASED is completely written, we can move on to
246 	     the next erased region.  */
247 	  if (written->begin <= erased.begin
248 	      && written->end >= erased.end)
249 	    {
250 	      goto next_erased;
251 	    }
252 
253 	  /* If there is an unwritten part at the beginning of ERASED,
254 	     then we should record that part and try this inner loop
255 	     again for the remainder.  */
256 	  if (written->begin > erased.begin)
257 	    {
258 	      struct memory_write_request *n =
259 		VEC_safe_push (memory_write_request_s, result, NULL);
260 
261 	      memset (n, 0, sizeof (struct memory_write_request));
262 	      n->begin = erased.begin;
263 	      n->end = written->begin;
264 	      erased.begin = written->begin;
265 	      continue;
266 	    }
267 
268 	  /* If there is an unwritten part at the end of ERASED, we
269 	     forget about the part that was written to and wait to see
270 	     if the next write request writes more of ERASED.  We can't
271 	     push it yet.  */
272 	  if (written->end < erased.end)
273 	    {
274 	      erased.begin = written->end;
275 	      ++j;
276 	      continue;
277 	    }
278 	}
279 
280       /* If we ran out of write requests without doing anything about
281 	 ERASED, then that means it's really erased.  */
282       VEC_safe_push (memory_write_request_s, result, &erased);
283 
284     next_erased:
285       ;
286     }
287 
288   return result;
289 }
290 
291 static void
292 cleanup_request_data (void *p)
293 {
294   VEC(memory_write_request_s) **v = p;
295   struct memory_write_request *r;
296   int i;
297 
298   for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i)
299     xfree (r->data);
300 }
301 
302 static void
303 cleanup_write_requests_vector (void *p)
304 {
305   VEC(memory_write_request_s) **v = p;
306 
307   VEC_free (memory_write_request_s, *v);
308 }
309 
310 int
311 target_write_memory_blocks (VEC(memory_write_request_s) *requests,
312 			    enum flash_preserve_mode preserve_flash_p,
313 			    void (*progress_cb) (ULONGEST, void *))
314 {
315   struct cleanup *back_to = make_cleanup (null_cleanup, NULL);
316   VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s,
317 						  requests);
318   unsigned i;
319   int err = 0;
320   struct memory_write_request *r;
321   VEC(memory_write_request_s) *regular = NULL;
322   VEC(memory_write_request_s) *flash = NULL;
323   VEC(memory_write_request_s) *erased, *garbled;
324 
325   /* END == 0 would represent wraparound: a write to the very last
326      byte of the address space.  This file was not written with that
327      possibility in mind.  This is fixable, but a lot of work for a
328      rare problem; so for now, fail noisily here instead of obscurely
329      later.  */
330   for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i)
331     gdb_assert (r->end != 0);
332 
333   make_cleanup (cleanup_write_requests_vector, &blocks);
334 
335   /* Sort the blocks by their start address.  */
336   qsort (VEC_address (memory_write_request_s, blocks),
337 	 VEC_length (memory_write_request_s, blocks),
338 	 sizeof (struct memory_write_request), compare_block_starting_address);
339 
340   /* Split blocks into list of regular memory blocks,
341      and list of flash memory blocks.  */
342   make_cleanup (cleanup_write_requests_vector, &regular);
343   make_cleanup (cleanup_write_requests_vector, &flash);
344   split_regular_and_flash_blocks (blocks, &regular, &flash);
345 
346   /* If a variable is added to forbid flash write, even during "load",
347      it should be checked here.  Similarly, if this function is used
348      for other situations besides "load" in which writing to flash
349      is undesirable, that should be checked here.  */
350 
351   /* Find flash blocks to erase.  */
352   erased = blocks_to_erase (flash);
353   make_cleanup (cleanup_write_requests_vector, &erased);
354 
355   /* Find what flash regions will be erased, and not overwritten; then
356      either preserve or discard the old contents.  */
357   garbled = compute_garbled_blocks (erased, flash);
358   make_cleanup (cleanup_request_data, &garbled);
359   make_cleanup (cleanup_write_requests_vector, &garbled);
360 
361   if (!VEC_empty (memory_write_request_s, garbled))
362     {
363       if (preserve_flash_p == flash_preserve)
364 	{
365 	  struct memory_write_request *r;
366 
367 	  /* Read in regions that must be preserved and add them to
368 	     the list of blocks we read.  */
369 	  for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i)
370 	    {
371 	      gdb_assert (r->data == NULL);
372 	      r->data = xmalloc (r->end - r->begin);
373 	      err = target_read_memory (r->begin, r->data, r->end - r->begin);
374 	      if (err != 0)
375 		goto out;
376 
377 	      VEC_safe_push (memory_write_request_s, flash, r);
378 	    }
379 
380 	  qsort (VEC_address (memory_write_request_s, flash),
381 		 VEC_length (memory_write_request_s, flash),
382 		 sizeof (struct memory_write_request),
383 		 compare_block_starting_address);
384 	}
385     }
386 
387   /* We could coalesce adjacent memory blocks here, to reduce the
388      number of write requests for small sections.  However, we would
389      have to reallocate and copy the data pointers, which could be
390      large; large sections are more common in loadable objects than
391      large numbers of small sections (although the reverse can be true
392      in object files).  So, we issue at least one write request per
393      passed struct memory_write_request.  The remote stub will still
394      have the opportunity to batch flash requests.  */
395 
396   /* Write regular blocks.  */
397   for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i)
398     {
399       LONGEST len;
400 
401       len = target_write_with_progress (current_target.beneath,
402 					TARGET_OBJECT_MEMORY, NULL,
403 					r->data, r->begin, r->end - r->begin,
404 					progress_cb, r->baton);
405       if (len < (LONGEST) (r->end - r->begin))
406 	{
407 	  /* Call error?  */
408 	  err = -1;
409 	  goto out;
410 	}
411     }
412 
413   if (!VEC_empty (memory_write_request_s, erased))
414     {
415       /* Erase all pages.  */
416       for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i)
417 	target_flash_erase (r->begin, r->end - r->begin);
418 
419       /* Write flash data.  */
420       for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i)
421 	{
422 	  LONGEST len;
423 
424 	  len = target_write_with_progress (&current_target,
425 					    TARGET_OBJECT_FLASH, NULL,
426 					    r->data, r->begin,
427 					    r->end - r->begin,
428 					    progress_cb, r->baton);
429 	  if (len < (LONGEST) (r->end - r->begin))
430 	    error (_("Error writing data to flash"));
431 	}
432 
433       target_flash_done ();
434     }
435 
436  out:
437   do_cleanups (back_to);
438 
439   return err;
440 }
441