xref: /dragonfly/contrib/gdb-7/gdb/target-memory.c (revision 6e278935)
1 /* Parts of target interface that deal with accessing memory and memory-like
2    objects.
3 
4    Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
5    Free Software Foundation, Inc.
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "vec.h"
24 #include "target.h"
25 #include "memory-map.h"
26 
27 #include "gdb_assert.h"
28 
29 #include <stdio.h>
30 #include <sys/time.h>
31 
32 static int
33 compare_block_starting_address (const void *a, const void *b)
34 {
35   const struct memory_write_request *a_req = a;
36   const struct memory_write_request *b_req = b;
37 
38   if (a_req->begin < b_req->begin)
39     return -1;
40   else if (a_req->begin == b_req->begin)
41     return 0;
42   else
43     return 1;
44 }
45 
46 /* Adds to RESULT all memory write requests from BLOCK that are
47    in [BEGIN, END) range.
48 
49    If any memory request is only partially in the specified range,
50    that part of the memory request will be added.  */
51 
52 static void
53 claim_memory (VEC(memory_write_request_s) *blocks,
54 	      VEC(memory_write_request_s) **result,
55 	      ULONGEST begin,
56 	      ULONGEST end)
57 {
58   int i;
59   ULONGEST claimed_begin;
60   ULONGEST claimed_end;
61   struct memory_write_request *r;
62 
63   for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i)
64     {
65       /* If the request doesn't overlap [BEGIN, END), skip it.  We
66 	 must handle END == 0 meaning the top of memory; we don't yet
67 	 check for R->end == 0, which would also mean the top of
68 	 memory, but there's an assertion in
69 	 target_write_memory_blocks which checks for that.  */
70 
71       if (begin >= r->end)
72 	continue;
73       if (end != 0 && end <= r->begin)
74 	continue;
75 
76       claimed_begin = max (begin, r->begin);
77       if (end == 0)
78 	claimed_end = r->end;
79       else
80 	claimed_end = min (end, r->end);
81 
82       if (claimed_begin == r->begin && claimed_end == r->end)
83 	VEC_safe_push (memory_write_request_s, *result, r);
84       else
85 	{
86 	  struct memory_write_request *n =
87 	    VEC_safe_push (memory_write_request_s, *result, NULL);
88 
89 	  *n = *r;
90 	  n->begin = claimed_begin;
91 	  n->end = claimed_end;
92 	  n->data += claimed_begin - r->begin;
93 	}
94     }
95 }
96 
97 /* Given a vector of struct memory_write_request objects in BLOCKS,
98    add memory requests for flash memory into FLASH_BLOCKS, and for
99    regular memory to REGULAR_BLOCKS.  */
100 
101 static void
102 split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks,
103 				VEC(memory_write_request_s) **regular_blocks,
104 				VEC(memory_write_request_s) **flash_blocks)
105 {
106   struct mem_region *region;
107   CORE_ADDR cur_address;
108 
109   /* This implementation runs in O(length(regions)*length(blocks)) time.
110      However, in most cases the number of blocks will be small, so this does
111      not matter.
112 
113      Note also that it's extremely unlikely that a memory write request
114      will span more than one memory region, however for safety we handle
115      such situations.  */
116 
117   cur_address = 0;
118   while (1)
119     {
120       VEC(memory_write_request_s) **r;
121 
122       region = lookup_mem_region (cur_address);
123       r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks;
124       cur_address = region->hi;
125       claim_memory (blocks, r, region->lo, region->hi);
126 
127       if (cur_address == 0)
128 	break;
129     }
130 }
131 
132 /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN
133    to the start of the flash block containing the address.  Similarly,
134    if END is non-NULL *END will be set to the address one past the end
135    of the block containing the address.  */
136 
137 static void
138 block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end)
139 {
140   struct mem_region *region;
141   unsigned blocksize;
142 
143   region = lookup_mem_region (address);
144   gdb_assert (region->attrib.mode == MEM_FLASH);
145   blocksize = region->attrib.blocksize;
146   if (begin)
147     *begin = address / blocksize * blocksize;
148   if (end)
149     *end = (address + blocksize - 1) / blocksize * blocksize;
150 }
151 
152 /* Given the list of memory requests to be WRITTEN, this function
153    returns write requests covering each group of flash blocks which must
154    be erased.  */
155 
156 static VEC(memory_write_request_s) *
157 blocks_to_erase (VEC(memory_write_request_s) *written)
158 {
159   unsigned i;
160   struct memory_write_request *ptr;
161 
162   VEC(memory_write_request_s) *result = NULL;
163 
164   for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i)
165     {
166       CORE_ADDR begin, end;
167 
168       block_boundaries (ptr->begin, &begin, 0);
169       block_boundaries (ptr->end - 1, 0, &end);
170 
171       if (!VEC_empty (memory_write_request_s, result)
172 	  && VEC_last (memory_write_request_s, result)->end >= begin)
173 	{
174 	  VEC_last (memory_write_request_s, result)->end = end;
175 	}
176       else
177 	{
178 	  struct memory_write_request *n =
179 	    VEC_safe_push (memory_write_request_s, result, NULL);
180 
181 	  memset (n, 0, sizeof (struct memory_write_request));
182 	  n->begin = begin;
183 	  n->end = end;
184 	}
185     }
186 
187   return result;
188 }
189 
190 /* Given ERASED_BLOCKS, a list of blocks that will be erased with
191    flash erase commands, and WRITTEN_BLOCKS, the list of memory
192    addresses that will be written, compute the set of memory addresses
193    that will be erased but not rewritten (e.g. padding within a block
194    which is only partially filled by "load").  */
195 
196 static VEC(memory_write_request_s) *
197 compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks,
198 			VEC(memory_write_request_s) *written_blocks)
199 {
200   VEC(memory_write_request_s) *result = NULL;
201 
202   unsigned i, j;
203   unsigned je = VEC_length (memory_write_request_s, written_blocks);
204   struct memory_write_request *erased_p;
205 
206   /* Look at each erased memory_write_request in turn, and
207      see what part of it is subsequently written to.
208 
209      This implementation is O(length(erased) * length(written)).  If
210      the lists are sorted at this point it could be rewritten more
211      efficiently, but the complexity is not generally worthwhile.  */
212 
213   for (i = 0;
214        VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p);
215        ++i)
216     {
217       /* Make a deep copy -- it will be modified inside the loop, but
218 	 we don't want to modify original vector.  */
219       struct memory_write_request erased = *erased_p;
220 
221       for (j = 0; j != je;)
222 	{
223 	  struct memory_write_request *written
224 	    = VEC_index (memory_write_request_s,
225 			 written_blocks, j);
226 
227 	  /* Now try various cases.  */
228 
229 	  /* If WRITTEN is fully to the left of ERASED, check the next
230 	     written memory_write_request.  */
231 	  if (written->end <= erased.begin)
232 	    {
233 	      ++j;
234 	      continue;
235 	    }
236 
237 	  /* If WRITTEN is fully to the right of ERASED, then ERASED
238 	     is not written at all.  WRITTEN might affect other
239 	     blocks.  */
240 	  if (written->begin >= erased.end)
241 	    {
242 	      VEC_safe_push (memory_write_request_s, result, &erased);
243 	      goto next_erased;
244 	    }
245 
246 	  /* If all of ERASED is completely written, we can move on to
247 	     the next erased region.  */
248 	  if (written->begin <= erased.begin
249 	      && written->end >= erased.end)
250 	    {
251 	      goto next_erased;
252 	    }
253 
254 	  /* If there is an unwritten part at the beginning of ERASED,
255 	     then we should record that part and try this inner loop
256 	     again for the remainder.  */
257 	  if (written->begin > erased.begin)
258 	    {
259 	      struct memory_write_request *n =
260 		VEC_safe_push (memory_write_request_s, result, NULL);
261 
262 	      memset (n, 0, sizeof (struct memory_write_request));
263 	      n->begin = erased.begin;
264 	      n->end = written->begin;
265 	      erased.begin = written->begin;
266 	      continue;
267 	    }
268 
269 	  /* If there is an unwritten part at the end of ERASED, we
270 	     forget about the part that was written to and wait to see
271 	     if the next write request writes more of ERASED.  We can't
272 	     push it yet.  */
273 	  if (written->end < erased.end)
274 	    {
275 	      erased.begin = written->end;
276 	      ++j;
277 	      continue;
278 	    }
279 	}
280 
281       /* If we ran out of write requests without doing anything about
282 	 ERASED, then that means it's really erased.  */
283       VEC_safe_push (memory_write_request_s, result, &erased);
284 
285     next_erased:
286       ;
287     }
288 
289   return result;
290 }
291 
292 static void
293 cleanup_request_data (void *p)
294 {
295   VEC(memory_write_request_s) **v = p;
296   struct memory_write_request *r;
297   int i;
298 
299   for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i)
300     xfree (r->data);
301 }
302 
303 static void
304 cleanup_write_requests_vector (void *p)
305 {
306   VEC(memory_write_request_s) **v = p;
307 
308   VEC_free (memory_write_request_s, *v);
309 }
310 
311 int
312 target_write_memory_blocks (VEC(memory_write_request_s) *requests,
313 			    enum flash_preserve_mode preserve_flash_p,
314 			    void (*progress_cb) (ULONGEST, void *))
315 {
316   struct cleanup *back_to = make_cleanup (null_cleanup, NULL);
317   VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s,
318 						  requests);
319   unsigned i;
320   int err = 0;
321   struct memory_write_request *r;
322   VEC(memory_write_request_s) *regular = NULL;
323   VEC(memory_write_request_s) *flash = NULL;
324   VEC(memory_write_request_s) *erased, *garbled;
325 
326   /* END == 0 would represent wraparound: a write to the very last
327      byte of the address space.  This file was not written with that
328      possibility in mind.  This is fixable, but a lot of work for a
329      rare problem; so for now, fail noisily here instead of obscurely
330      later.  */
331   for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i)
332     gdb_assert (r->end != 0);
333 
334   make_cleanup (cleanup_write_requests_vector, &blocks);
335 
336   /* Sort the blocks by their start address.  */
337   qsort (VEC_address (memory_write_request_s, blocks),
338 	 VEC_length (memory_write_request_s, blocks),
339 	 sizeof (struct memory_write_request), compare_block_starting_address);
340 
341   /* Split blocks into list of regular memory blocks,
342      and list of flash memory blocks.  */
343   make_cleanup (cleanup_write_requests_vector, &regular);
344   make_cleanup (cleanup_write_requests_vector, &flash);
345   split_regular_and_flash_blocks (blocks, &regular, &flash);
346 
347   /* If a variable is added to forbid flash write, even during "load",
348      it should be checked here.  Similarly, if this function is used
349      for other situations besides "load" in which writing to flash
350      is undesirable, that should be checked here.  */
351 
352   /* Find flash blocks to erase.  */
353   erased = blocks_to_erase (flash);
354   make_cleanup (cleanup_write_requests_vector, &erased);
355 
356   /* Find what flash regions will be erased, and not overwritten; then
357      either preserve or discard the old contents.  */
358   garbled = compute_garbled_blocks (erased, flash);
359   make_cleanup (cleanup_request_data, &garbled);
360   make_cleanup (cleanup_write_requests_vector, &garbled);
361 
362   if (!VEC_empty (memory_write_request_s, garbled))
363     {
364       if (preserve_flash_p == flash_preserve)
365 	{
366 	  struct memory_write_request *r;
367 
368 	  /* Read in regions that must be preserved and add them to
369 	     the list of blocks we read.  */
370 	  for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i)
371 	    {
372 	      gdb_assert (r->data == NULL);
373 	      r->data = xmalloc (r->end - r->begin);
374 	      err = target_read_memory (r->begin, r->data, r->end - r->begin);
375 	      if (err != 0)
376 		goto out;
377 
378 	      VEC_safe_push (memory_write_request_s, flash, r);
379 	    }
380 
381 	  qsort (VEC_address (memory_write_request_s, flash),
382 		 VEC_length (memory_write_request_s, flash),
383 		 sizeof (struct memory_write_request),
384 		 compare_block_starting_address);
385 	}
386     }
387 
388   /* We could coalesce adjacent memory blocks here, to reduce the
389      number of write requests for small sections.  However, we would
390      have to reallocate and copy the data pointers, which could be
391      large; large sections are more common in loadable objects than
392      large numbers of small sections (although the reverse can be true
393      in object files).  So, we issue at least one write request per
394      passed struct memory_write_request.  The remote stub will still
395      have the opportunity to batch flash requests.  */
396 
397   /* Write regular blocks.  */
398   for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i)
399     {
400       LONGEST len;
401 
402       len = target_write_with_progress (current_target.beneath,
403 					TARGET_OBJECT_MEMORY, NULL,
404 					r->data, r->begin, r->end - r->begin,
405 					progress_cb, r->baton);
406       if (len < (LONGEST) (r->end - r->begin))
407 	{
408 	  /* Call error?  */
409 	  err = -1;
410 	  goto out;
411 	}
412     }
413 
414   if (!VEC_empty (memory_write_request_s, erased))
415     {
416       /* Erase all pages.  */
417       for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i)
418 	target_flash_erase (r->begin, r->end - r->begin);
419 
420       /* Write flash data.  */
421       for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i)
422 	{
423 	  LONGEST len;
424 
425 	  len = target_write_with_progress (&current_target,
426 					    TARGET_OBJECT_FLASH, NULL,
427 					    r->data, r->begin,
428 					    r->end - r->begin,
429 					    progress_cb, r->baton);
430 	  if (len < (LONGEST) (r->end - r->begin))
431 	    error (_("Error writing data to flash"));
432 	}
433 
434       target_flash_done ();
435     }
436 
437  out:
438   do_cleanups (back_to);
439 
440   return err;
441 }
442