xref: /dragonfly/contrib/gdb-7/gdb/target-memory.c (revision dcd37f7d)
1 /* Parts of target interface that deal with accessing memory and memory-like
2    objects.
3 
4    Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 
6    This file is part of GDB.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #include "defs.h"
22 #include "vec.h"
23 #include "target.h"
24 #include "memory-map.h"
25 
26 #include "gdb_assert.h"
27 
28 #include <stdio.h>
29 #include <sys/time.h>
30 
31 static int
32 compare_block_starting_address (const void *a, const void *b)
33 {
34   const struct memory_write_request *a_req = a;
35   const struct memory_write_request *b_req = b;
36 
37   if (a_req->begin < b_req->begin)
38     return -1;
39   else if (a_req->begin == b_req->begin)
40     return 0;
41   else
42     return 1;
43 }
44 
45 /* Adds to RESULT all memory write requests from BLOCK that are
46    in [BEGIN, END) range.
47 
48    If any memory request is only partially in the specified range,
49    that part of the memory request will be added.  */
50 
51 static void
52 claim_memory (VEC(memory_write_request_s) *blocks,
53 	      VEC(memory_write_request_s) **result,
54 	      ULONGEST begin,
55 	      ULONGEST end)
56 {
57   int i;
58   ULONGEST claimed_begin;
59   ULONGEST claimed_end;
60   struct memory_write_request *r;
61 
62   for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i)
63     {
64       /* If the request doesn't overlap [BEGIN, END), skip it.  We
65 	 must handle END == 0 meaning the top of memory; we don't yet
66 	 check for R->end == 0, which would also mean the top of
67 	 memory, but there's an assertion in
68 	 target_write_memory_blocks which checks for that.  */
69 
70       if (begin >= r->end)
71 	continue;
72       if (end != 0 && end <= r->begin)
73 	continue;
74 
75       claimed_begin = max (begin, r->begin);
76       if (end == 0)
77 	claimed_end = r->end;
78       else
79 	claimed_end = min (end, r->end);
80 
81       if (claimed_begin == r->begin && claimed_end == r->end)
82 	VEC_safe_push (memory_write_request_s, *result, r);
83       else
84 	{
85 	  struct memory_write_request *n =
86 	    VEC_safe_push (memory_write_request_s, *result, NULL);
87 	  *n = *r;
88 	  n->begin = claimed_begin;
89 	  n->end = claimed_end;
90 	  n->data += claimed_begin - r->begin;
91 	}
92     }
93 }
94 
95 /* Given a vector of struct memory_write_request objects in BLOCKS,
96    add memory requests for flash memory into FLASH_BLOCKS, and for
97    regular memory to REGULAR_BLOCKS.  */
98 
99 static void
100 split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks,
101 				VEC(memory_write_request_s) **regular_blocks,
102 				VEC(memory_write_request_s) **flash_blocks)
103 {
104   struct mem_region *region;
105   CORE_ADDR cur_address;
106 
107   /* This implementation runs in O(length(regions)*length(blocks)) time.
108      However, in most cases the number of blocks will be small, so this does
109      not matter.
110 
111      Note also that it's extremely unlikely that a memory write request
112      will span more than one memory region, however for safety we handle
113      such situations.  */
114 
115   cur_address = 0;
116   while (1)
117     {
118       VEC(memory_write_request_s) **r;
119       region = lookup_mem_region (cur_address);
120 
121       r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks;
122       cur_address = region->hi;
123       claim_memory (blocks, r, region->lo, region->hi);
124 
125       if (cur_address == 0)
126 	break;
127     }
128 }
129 
130 /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN
131    to the start of the flash block containing the address.  Similarly,
132    if END is non-NULL *END will be set to the address one past the end
133    of the block containing the address.  */
134 
135 static void
136 block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end)
137 {
138   struct mem_region *region;
139   unsigned blocksize;
140 
141   region = lookup_mem_region (address);
142   gdb_assert (region->attrib.mode == MEM_FLASH);
143   blocksize = region->attrib.blocksize;
144   if (begin)
145     *begin = address / blocksize * blocksize;
146   if (end)
147     *end = (address + blocksize - 1) / blocksize * blocksize;
148 }
149 
150 /* Given the list of memory requests to be WRITTEN, this function
151    returns write requests covering each group of flash blocks which must
152    be erased.  */
153 
154 static VEC(memory_write_request_s) *
155 blocks_to_erase (VEC(memory_write_request_s) *written)
156 {
157   unsigned i;
158   struct memory_write_request *ptr;
159 
160   VEC(memory_write_request_s) *result = NULL;
161 
162   for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i)
163     {
164       CORE_ADDR begin, end;
165 
166       block_boundaries (ptr->begin, &begin, 0);
167       block_boundaries (ptr->end - 1, 0, &end);
168 
169       if (!VEC_empty (memory_write_request_s, result)
170 	  && VEC_last (memory_write_request_s, result)->end >= begin)
171 	{
172 	  VEC_last (memory_write_request_s, result)->end = end;
173 	}
174       else
175 	{
176 	  struct memory_write_request *n =
177 	    VEC_safe_push (memory_write_request_s, result, NULL);
178 	  memset (n, 0, sizeof (struct memory_write_request));
179 	  n->begin = begin;
180 	  n->end = end;
181 	}
182     }
183 
184   return result;
185 }
186 
187 /* Given ERASED_BLOCKS, a list of blocks that will be erased with
188    flash erase commands, and WRITTEN_BLOCKS, the list of memory
189    addresses that will be written, compute the set of memory addresses
190    that will be erased but not rewritten (e.g. padding within a block
191    which is only partially filled by "load").  */
192 
193 static VEC(memory_write_request_s) *
194 compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks,
195 			VEC(memory_write_request_s) *written_blocks)
196 {
197   VEC(memory_write_request_s) *result = NULL;
198 
199   unsigned i, j;
200   unsigned je = VEC_length (memory_write_request_s, written_blocks);
201   struct memory_write_request *erased_p;
202 
203   /* Look at each erased memory_write_request in turn, and
204      see what part of it is subsequently written to.
205 
206      This implementation is O(length(erased) * length(written)).  If
207      the lists are sorted at this point it could be rewritten more
208      efficiently, but the complexity is not generally worthwhile.  */
209 
210   for (i = 0;
211        VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p);
212        ++i)
213     {
214       /* Make a deep copy -- it will be modified inside the loop, but
215 	 we don't want to modify original vector.  */
216       struct memory_write_request erased = *erased_p;
217 
218       for (j = 0; j != je;)
219 	{
220 	  struct memory_write_request *written
221 	    = VEC_index (memory_write_request_s,
222 			 written_blocks, j);
223 
224 	  /* Now try various cases.  */
225 
226 	  /* If WRITTEN is fully to the left of ERASED, check the next
227 	     written memory_write_request.  */
228 	  if (written->end <= erased.begin)
229 	    {
230 	      ++j;
231 	      continue;
232 	    }
233 
234 	  /* If WRITTEN is fully to the right of ERASED, then ERASED
235 	     is not written at all.  WRITTEN might affect other
236 	     blocks.  */
237 	  if (written->begin >= erased.end)
238 	    {
239 	      VEC_safe_push (memory_write_request_s, result, &erased);
240 	      goto next_erased;
241 	    }
242 
243 	  /* If all of ERASED is completely written, we can move on to
244 	     the next erased region.  */
245 	  if (written->begin <= erased.begin
246 	      && written->end >= erased.end)
247 	    {
248 	      goto next_erased;
249 	    }
250 
251 	  /* If there is an unwritten part at the beginning of ERASED,
252 	     then we should record that part and try this inner loop
253 	     again for the remainder.  */
254 	  if (written->begin > erased.begin)
255 	    {
256 	      struct memory_write_request *n =
257 		VEC_safe_push (memory_write_request_s, result, NULL);
258 	      memset (n, 0, sizeof (struct memory_write_request));
259 	      n->begin = erased.begin;
260 	      n->end = written->begin;
261 	      erased.begin = written->begin;
262 	      continue;
263 	    }
264 
265 	  /* If there is an unwritten part at the end of ERASED, we
266 	     forget about the part that was written to and wait to see
267 	     if the next write request writes more of ERASED.  We can't
268 	     push it yet.  */
269 	  if (written->end < erased.end)
270 	    {
271 	      erased.begin = written->end;
272 	      ++j;
273 	      continue;
274 	    }
275 	}
276 
277       /* If we ran out of write requests without doing anything about
278 	 ERASED, then that means it's really erased.  */
279       VEC_safe_push (memory_write_request_s, result, &erased);
280 
281     next_erased:
282       ;
283     }
284 
285   return result;
286 }
287 
288 static void
289 cleanup_request_data (void *p)
290 {
291   VEC(memory_write_request_s) **v = p;
292   struct memory_write_request *r;
293   int i;
294 
295   for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i)
296     xfree (r->data);
297 }
298 
299 static void
300 cleanup_write_requests_vector (void *p)
301 {
302   VEC(memory_write_request_s) **v = p;
303   VEC_free (memory_write_request_s, *v);
304 }
305 
306 int
307 target_write_memory_blocks (VEC(memory_write_request_s) *requests,
308 			    enum flash_preserve_mode preserve_flash_p,
309 			    void (*progress_cb) (ULONGEST, void *))
310 {
311   struct cleanup *back_to = make_cleanup (null_cleanup, NULL);
312   VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s,
313 						  requests);
314   unsigned i;
315   int err = 0;
316   struct memory_write_request *r;
317   VEC(memory_write_request_s) *regular = NULL;
318   VEC(memory_write_request_s) *flash = NULL;
319   VEC(memory_write_request_s) *erased, *garbled;
320 
321   /* END == 0 would represent wraparound: a write to the very last
322      byte of the address space.  This file was not written with that
323      possibility in mind.  This is fixable, but a lot of work for a
324      rare problem; so for now, fail noisily here instead of obscurely
325      later.  */
326   for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i)
327     gdb_assert (r->end != 0);
328 
329   make_cleanup (cleanup_write_requests_vector, &blocks);
330 
331   /* Sort the blocks by their start address.  */
332   qsort (VEC_address (memory_write_request_s, blocks),
333 	 VEC_length (memory_write_request_s, blocks),
334 	 sizeof (struct memory_write_request), compare_block_starting_address);
335 
336   /* Split blocks into list of regular memory blocks,
337      and list of flash memory blocks. */
338   make_cleanup (cleanup_write_requests_vector, &regular);
339   make_cleanup (cleanup_write_requests_vector, &flash);
340   split_regular_and_flash_blocks (blocks, &regular, &flash);
341 
342   /* If a variable is added to forbid flash write, even during "load",
343      it should be checked here.  Similarly, if this function is used
344      for other situations besides "load" in which writing to flash
345      is undesirable, that should be checked here.  */
346 
347   /* Find flash blocks to erase.  */
348   erased = blocks_to_erase (flash);
349   make_cleanup (cleanup_write_requests_vector, &erased);
350 
351   /* Find what flash regions will be erased, and not overwritten; then
352      either preserve or discard the old contents.  */
353   garbled = compute_garbled_blocks (erased, flash);
354   make_cleanup (cleanup_request_data, &garbled);
355   make_cleanup (cleanup_write_requests_vector, &garbled);
356 
357   if (!VEC_empty (memory_write_request_s, garbled))
358     {
359       if (preserve_flash_p == flash_preserve)
360 	{
361 	  struct memory_write_request *r;
362 
363 	  /* Read in regions that must be preserved and add them to
364 	     the list of blocks we read.  */
365 	  for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i)
366 	    {
367 	      gdb_assert (r->data == NULL);
368 	      r->data = xmalloc (r->end - r->begin);
369 	      err = target_read_memory (r->begin, r->data, r->end - r->begin);
370 	      if (err != 0)
371 		goto out;
372 
373 	      VEC_safe_push (memory_write_request_s, flash, r);
374 	    }
375 
376 	  qsort (VEC_address (memory_write_request_s, flash),
377 		 VEC_length (memory_write_request_s, flash),
378 		 sizeof (struct memory_write_request), compare_block_starting_address);
379 	}
380     }
381 
382   /* We could coalesce adjacent memory blocks here, to reduce the
383      number of write requests for small sections.  However, we would
384      have to reallocate and copy the data pointers, which could be
385      large; large sections are more common in loadable objects than
386      large numbers of small sections (although the reverse can be true
387      in object files).  So, we issue at least one write request per
388      passed struct memory_write_request.  The remote stub will still
389      have the opportunity to batch flash requests.  */
390 
391   /* Write regular blocks.  */
392   for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i)
393     {
394       LONGEST len;
395 
396       len = target_write_with_progress (current_target.beneath,
397 					TARGET_OBJECT_MEMORY, NULL,
398 					r->data, r->begin, r->end - r->begin,
399 					progress_cb, r->baton);
400       if (len < (LONGEST) (r->end - r->begin))
401 	{
402 	  /* Call error?  */
403 	  err = -1;
404 	  goto out;
405 	}
406     }
407 
408   if (!VEC_empty (memory_write_request_s, erased))
409     {
410       /* Erase all pages.  */
411       for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i)
412 	target_flash_erase (r->begin, r->end - r->begin);
413 
414       /* Write flash data.  */
415       for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i)
416 	{
417 	  LONGEST len;
418 
419 	  len = target_write_with_progress (&current_target,
420 					    TARGET_OBJECT_FLASH, NULL,
421 					    r->data, r->begin, r->end - r->begin,
422 					    progress_cb, r->baton);
423 	  if (len < (LONGEST) (r->end - r->begin))
424 	    error (_("Error writing data to flash"));
425 	}
426 
427       target_flash_done ();
428     }
429 
430  out:
431   do_cleanups (back_to);
432 
433   return err;
434 }
435