xref: /netbsd/external/gpl3/gdb.old/dist/gdb/value.c (revision 184b2d41)
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2 
3    Copyright (C) 1986-2020 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "gdbsupport/selftest.h"
45 #include "gdbsupport/array-view.h"
46 #include "cli/cli-style.h"
47 
48 /* Definition of a user function.  */
49 struct internal_function
50 {
51   /* The name of the function.  It is a bit odd to have this in the
52      function itself -- the user might use a differently-named
53      convenience variable to hold the function.  */
54   char *name;
55 
56   /* The handler.  */
57   internal_function_fn handler;
58 
59   /* User data for the handler.  */
60   void *cookie;
61 };
62 
63 /* Defines an [OFFSET, OFFSET + LENGTH) range.  */
64 
65 struct range
66 {
67   /* Lowest offset in the range.  */
68   LONGEST offset;
69 
70   /* Length of the range.  */
71   LONGEST length;
72 
73   /* Returns true if THIS is strictly less than OTHER, useful for
74      searching.  We keep ranges sorted by offset and coalesce
75      overlapping and contiguous ranges, so this just compares the
76      starting offset.  */
77 
78   bool operator< (const range &other) const
79   {
80     return offset < other.offset;
81   }
82 
83   /* Returns true if THIS is equal to OTHER.  */
84   bool operator== (const range &other) const
85   {
86     return offset == other.offset && length == other.length;
87   }
88 };
89 
90 /* Returns true if the ranges defined by [offset1, offset1+len1) and
91    [offset2, offset2+len2) overlap.  */
92 
93 static int
ranges_overlap(LONGEST offset1,LONGEST len1,LONGEST offset2,LONGEST len2)94 ranges_overlap (LONGEST offset1, LONGEST len1,
95 		LONGEST offset2, LONGEST len2)
96 {
97   ULONGEST h, l;
98 
99   l = std::max (offset1, offset2);
100   h = std::min (offset1 + len1, offset2 + len2);
101   return (l < h);
102 }
103 
104 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105    OFFSET+LENGTH).  */
106 
107 static int
ranges_contain(const std::vector<range> & ranges,LONGEST offset,LONGEST length)108 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
109 		LONGEST length)
110 {
111   range what;
112 
113   what.offset = offset;
114   what.length = length;
115 
116   /* We keep ranges sorted by offset and coalesce overlapping and
117      contiguous ranges, so to check if a range list contains a given
118      range, we can do a binary search for the position the given range
119      would be inserted if we only considered the starting OFFSET of
120      ranges.  We call that position I.  Since we also have LENGTH to
121      care for (this is a range afterall), we need to check if the
122      _previous_ range overlaps the I range.  E.g.,
123 
124          R
125          |---|
126        |---|    |---|  |------| ... |--|
127        0        1      2            N
128 
129        I=1
130 
131      In the case above, the binary search would return `I=1', meaning,
132      this OFFSET should be inserted at position 1, and the current
133      position 1 should be pushed further (and before 2).  But, `0'
134      overlaps with R.
135 
136      Then we need to check if the I range overlaps the I range itself.
137      E.g.,
138 
139               R
140               |---|
141        |---|    |---|  |-------| ... |--|
142        0        1      2             N
143 
144        I=1
145   */
146 
147 
148   auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
149 
150   if (i > ranges.begin ())
151     {
152       const struct range &bef = *(i - 1);
153 
154       if (ranges_overlap (bef.offset, bef.length, offset, length))
155 	return 1;
156     }
157 
158   if (i < ranges.end ())
159     {
160       const struct range &r = *i;
161 
162       if (ranges_overlap (r.offset, r.length, offset, length))
163 	return 1;
164     }
165 
166   return 0;
167 }
168 
169 static struct cmd_list_element *functionlist;
170 
171 /* Note that the fields in this structure are arranged to save a bit
172    of memory.  */
173 
174 struct value
175 {
valuevalue176   explicit value (struct type *type_)
177     : modifiable (1),
178       lazy (1),
179       initialized (1),
180       stack (0),
181       type (type_),
182       enclosing_type (type_)
183   {
184   }
185 
~valuevalue186   ~value ()
187   {
188     if (VALUE_LVAL (this) == lval_computed)
189       {
190 	const struct lval_funcs *funcs = location.computed.funcs;
191 
192 	if (funcs->free_closure)
193 	  funcs->free_closure (this);
194       }
195     else if (VALUE_LVAL (this) == lval_xcallable)
196       delete location.xm_worker;
197   }
198 
199   DISABLE_COPY_AND_ASSIGN (value);
200 
201   /* Type of value; either not an lval, or one of the various
202      different possible kinds of lval.  */
203   enum lval_type lval = not_lval;
204 
205   /* Is it modifiable?  Only relevant if lval != not_lval.  */
206   unsigned int modifiable : 1;
207 
208   /* If zero, contents of this value are in the contents field.  If
209      nonzero, contents are in inferior.  If the lval field is lval_memory,
210      the contents are in inferior memory at location.address plus offset.
211      The lval field may also be lval_register.
212 
213      WARNING: This field is used by the code which handles watchpoints
214      (see breakpoint.c) to decide whether a particular value can be
215      watched by hardware watchpoints.  If the lazy flag is set for
216      some member of a value chain, it is assumed that this member of
217      the chain doesn't need to be watched as part of watching the
218      value itself.  This is how GDB avoids watching the entire struct
219      or array when the user wants to watch a single struct member or
220      array element.  If you ever change the way lazy flag is set and
221      reset, be sure to consider this use as well!  */
222   unsigned int lazy : 1;
223 
224   /* If value is a variable, is it initialized or not.  */
225   unsigned int initialized : 1;
226 
227   /* If value is from the stack.  If this is set, read_stack will be
228      used instead of read_memory to enable extra caching.  */
229   unsigned int stack : 1;
230 
231   /* Location of value (if lval).  */
232   union
233   {
234     /* If lval == lval_memory, this is the address in the inferior  */
235     CORE_ADDR address;
236 
237     /*If lval == lval_register, the value is from a register.  */
238     struct
239     {
240       /* Register number.  */
241       int regnum;
242       /* Frame ID of "next" frame to which a register value is relative.
243 	 If the register value is found relative to frame F, then the
244 	 frame id of F->next will be stored in next_frame_id.  */
245       struct frame_id next_frame_id;
246     } reg;
247 
248     /* Pointer to internal variable.  */
249     struct internalvar *internalvar;
250 
251     /* Pointer to xmethod worker.  */
252     struct xmethod_worker *xm_worker;
253 
254     /* If lval == lval_computed, this is a set of function pointers
255        to use to access and describe the value, and a closure pointer
256        for them to use.  */
257     struct
258     {
259       /* Functions to call.  */
260       const struct lval_funcs *funcs;
261 
262       /* Closure for those functions to use.  */
263       void *closure;
264     } computed;
265   } location {};
266 
267   /* Describes offset of a value within lval of a structure in target
268      addressable memory units.  Note also the member embedded_offset
269      below.  */
270   LONGEST offset = 0;
271 
272   /* Only used for bitfields; number of bits contained in them.  */
273   LONGEST bitsize = 0;
274 
275   /* Only used for bitfields; position of start of field.  For
276      little-endian targets, it is the position of the LSB.  For
277      big-endian targets, it is the position of the MSB.  */
278   LONGEST bitpos = 0;
279 
280   /* The number of references to this value.  When a value is created,
281      the value chain holds a reference, so REFERENCE_COUNT is 1.  If
282      release_value is called, this value is removed from the chain but
283      the caller of release_value now has a reference to this value.
284      The caller must arrange for a call to value_free later.  */
285   int reference_count = 1;
286 
287   /* Only used for bitfields; the containing value.  This allows a
288      single read from the target when displaying multiple
289      bitfields.  */
290   value_ref_ptr parent;
291 
292   /* Type of the value.  */
293   struct type *type;
294 
295   /* If a value represents a C++ object, then the `type' field gives
296      the object's compile-time type.  If the object actually belongs
297      to some class derived from `type', perhaps with other base
298      classes and additional members, then `type' is just a subobject
299      of the real thing, and the full object is probably larger than
300      `type' would suggest.
301 
302      If `type' is a dynamic class (i.e. one with a vtable), then GDB
303      can actually determine the object's run-time type by looking at
304      the run-time type information in the vtable.  When this
305      information is available, we may elect to read in the entire
306      object, for several reasons:
307 
308      - When printing the value, the user would probably rather see the
309      full object, not just the limited portion apparent from the
310      compile-time type.
311 
312      - If `type' has virtual base classes, then even printing `type'
313      alone may require reaching outside the `type' portion of the
314      object to wherever the virtual base class has been stored.
315 
316      When we store the entire object, `enclosing_type' is the run-time
317      type -- the complete object -- and `embedded_offset' is the
318      offset of `type' within that larger type, in target addressable memory
319      units.  The value_contents() macro takes `embedded_offset' into account,
320      so most GDB code continues to see the `type' portion of the value, just
321      as the inferior would.
322 
323      If `type' is a pointer to an object, then `enclosing_type' is a
324      pointer to the object's run-time type, and `pointed_to_offset' is
325      the offset in target addressable memory units from the full object
326      to the pointed-to object -- that is, the value `embedded_offset' would
327      have if we followed the pointer and fetched the complete object.
328      (I don't really see the point.  Why not just determine the
329      run-time type when you indirect, and avoid the special case?  The
330      contents don't matter until you indirect anyway.)
331 
332      If we're not doing anything fancy, `enclosing_type' is equal to
333      `type', and `embedded_offset' is zero, so everything works
334      normally.  */
335   struct type *enclosing_type;
336   LONGEST embedded_offset = 0;
337   LONGEST pointed_to_offset = 0;
338 
339   /* Actual contents of the value.  Target byte-order.  NULL or not
340      valid if lazy is nonzero.  */
341   gdb::unique_xmalloc_ptr<gdb_byte> contents;
342 
343   /* Unavailable ranges in CONTENTS.  We mark unavailable ranges,
344      rather than available, since the common and default case is for a
345      value to be available.  This is filled in at value read time.
346      The unavailable ranges are tracked in bits.  Note that a contents
347      bit that has been optimized out doesn't really exist in the
348      program, so it can't be marked unavailable either.  */
349   std::vector<range> unavailable;
350 
351   /* Likewise, but for optimized out contents (a chunk of the value of
352      a variable that does not actually exist in the program).  If LVAL
353      is lval_register, this is a register ($pc, $sp, etc., never a
354      program variable) that has not been saved in the frame.  Not
355      saved registers and optimized-out program variables values are
356      treated pretty much the same, except not-saved registers have a
357      different string representation and related error strings.  */
358   std::vector<range> optimized_out;
359 };
360 
361 /* See value.h.  */
362 
363 struct gdbarch *
get_value_arch(const struct value * value)364 get_value_arch (const struct value *value)
365 {
366   return get_type_arch (value_type (value));
367 }
368 
369 int
value_bits_available(const struct value * value,LONGEST offset,LONGEST length)370 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
371 {
372   gdb_assert (!value->lazy);
373 
374   return !ranges_contain (value->unavailable, offset, length);
375 }
376 
377 int
value_bytes_available(const struct value * value,LONGEST offset,LONGEST length)378 value_bytes_available (const struct value *value,
379 		       LONGEST offset, LONGEST length)
380 {
381   return value_bits_available (value,
382 			       offset * TARGET_CHAR_BIT,
383 			       length * TARGET_CHAR_BIT);
384 }
385 
386 int
value_bits_any_optimized_out(const struct value * value,int bit_offset,int bit_length)387 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
388 {
389   gdb_assert (!value->lazy);
390 
391   return ranges_contain (value->optimized_out, bit_offset, bit_length);
392 }
393 
394 int
value_entirely_available(struct value * value)395 value_entirely_available (struct value *value)
396 {
397   /* We can only tell whether the whole value is available when we try
398      to read it.  */
399   if (value->lazy)
400     value_fetch_lazy (value);
401 
402   if (value->unavailable.empty ())
403     return 1;
404   return 0;
405 }
406 
407 /* Returns true if VALUE is entirely covered by RANGES.  If the value
408    is lazy, it'll be read now.  Note that RANGE is a pointer to
409    pointer because reading the value might change *RANGE.  */
410 
411 static int
value_entirely_covered_by_range_vector(struct value * value,const std::vector<range> & ranges)412 value_entirely_covered_by_range_vector (struct value *value,
413 					const std::vector<range> &ranges)
414 {
415   /* We can only tell whether the whole value is optimized out /
416      unavailable when we try to read it.  */
417   if (value->lazy)
418     value_fetch_lazy (value);
419 
420   if (ranges.size () == 1)
421     {
422       const struct range &t = ranges[0];
423 
424       if (t.offset == 0
425 	  && t.length == (TARGET_CHAR_BIT
426 			  * TYPE_LENGTH (value_enclosing_type (value))))
427 	return 1;
428     }
429 
430   return 0;
431 }
432 
433 int
value_entirely_unavailable(struct value * value)434 value_entirely_unavailable (struct value *value)
435 {
436   return value_entirely_covered_by_range_vector (value, value->unavailable);
437 }
438 
439 int
value_entirely_optimized_out(struct value * value)440 value_entirely_optimized_out (struct value *value)
441 {
442   return value_entirely_covered_by_range_vector (value, value->optimized_out);
443 }
444 
445 /* Insert into the vector pointed to by VECTORP the bit range starting of
446    OFFSET bits, and extending for the next LENGTH bits.  */
447 
448 static void
insert_into_bit_range_vector(std::vector<range> * vectorp,LONGEST offset,LONGEST length)449 insert_into_bit_range_vector (std::vector<range> *vectorp,
450 			      LONGEST offset, LONGEST length)
451 {
452   range newr;
453 
454   /* Insert the range sorted.  If there's overlap or the new range
455      would be contiguous with an existing range, merge.  */
456 
457   newr.offset = offset;
458   newr.length = length;
459 
460   /* Do a binary search for the position the given range would be
461      inserted if we only considered the starting OFFSET of ranges.
462      Call that position I.  Since we also have LENGTH to care for
463      (this is a range afterall), we need to check if the _previous_
464      range overlaps the I range.  E.g., calling R the new range:
465 
466        #1 - overlaps with previous
467 
468 	   R
469 	   |-...-|
470 	 |---|     |---|  |------| ... |--|
471 	 0         1      2            N
472 
473 	 I=1
474 
475      In the case #1 above, the binary search would return `I=1',
476      meaning, this OFFSET should be inserted at position 1, and the
477      current position 1 should be pushed further (and become 2).  But,
478      note that `0' overlaps with R, so we want to merge them.
479 
480      A similar consideration needs to be taken if the new range would
481      be contiguous with the previous range:
482 
483        #2 - contiguous with previous
484 
485 	    R
486 	    |-...-|
487 	 |--|       |---|  |------| ... |--|
488 	 0          1      2            N
489 
490 	 I=1
491 
492      If there's no overlap with the previous range, as in:
493 
494        #3 - not overlapping and not contiguous
495 
496 	       R
497 	       |-...-|
498 	  |--|         |---|  |------| ... |--|
499 	  0            1      2            N
500 
501 	 I=1
502 
503      or if I is 0:
504 
505        #4 - R is the range with lowest offset
506 
507 	  R
508 	 |-...-|
509 	         |--|       |---|  |------| ... |--|
510 	         0          1      2            N
511 
512 	 I=0
513 
514      ... we just push the new range to I.
515 
516      All the 4 cases above need to consider that the new range may
517      also overlap several of the ranges that follow, or that R may be
518      contiguous with the following range, and merge.  E.g.,
519 
520        #5 - overlapping following ranges
521 
522 	  R
523 	 |------------------------|
524 	         |--|       |---|  |------| ... |--|
525 	         0          1      2            N
526 
527 	 I=0
528 
529        or:
530 
531 	    R
532 	    |-------|
533 	 |--|       |---|  |------| ... |--|
534 	 0          1      2            N
535 
536 	 I=1
537 
538   */
539 
540   auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
541   if (i > vectorp->begin ())
542     {
543       struct range &bef = *(i - 1);
544 
545       if (ranges_overlap (bef.offset, bef.length, offset, length))
546 	{
547 	  /* #1 */
548 	  ULONGEST l = std::min (bef.offset, offset);
549 	  ULONGEST h = std::max (bef.offset + bef.length, offset + length);
550 
551 	  bef.offset = l;
552 	  bef.length = h - l;
553 	  i--;
554 	}
555       else if (offset == bef.offset + bef.length)
556 	{
557 	  /* #2 */
558 	  bef.length += length;
559 	  i--;
560 	}
561       else
562 	{
563 	  /* #3 */
564 	  i = vectorp->insert (i, newr);
565 	}
566     }
567   else
568     {
569       /* #4 */
570       i = vectorp->insert (i, newr);
571     }
572 
573   /* Check whether the ranges following the one we've just added or
574      touched can be folded in (#5 above).  */
575   if (i != vectorp->end () && i + 1 < vectorp->end ())
576     {
577       int removed = 0;
578       auto next = i + 1;
579 
580       /* Get the range we just touched.  */
581       struct range &t = *i;
582       removed = 0;
583 
584       i = next;
585       for (; i < vectorp->end (); i++)
586 	{
587 	  struct range &r = *i;
588 	  if (r.offset <= t.offset + t.length)
589 	    {
590 	      ULONGEST l, h;
591 
592 	      l = std::min (t.offset, r.offset);
593 	      h = std::max (t.offset + t.length, r.offset + r.length);
594 
595 	      t.offset = l;
596 	      t.length = h - l;
597 
598 	      removed++;
599 	    }
600 	  else
601 	    {
602 	      /* If we couldn't merge this one, we won't be able to
603 		 merge following ones either, since the ranges are
604 		 always sorted by OFFSET.  */
605 	      break;
606 	    }
607 	}
608 
609       if (removed != 0)
610 	vectorp->erase (next, next + removed);
611     }
612 }
613 
614 void
mark_value_bits_unavailable(struct value * value,LONGEST offset,LONGEST length)615 mark_value_bits_unavailable (struct value *value,
616 			     LONGEST offset, LONGEST length)
617 {
618   insert_into_bit_range_vector (&value->unavailable, offset, length);
619 }
620 
621 void
mark_value_bytes_unavailable(struct value * value,LONGEST offset,LONGEST length)622 mark_value_bytes_unavailable (struct value *value,
623 			      LONGEST offset, LONGEST length)
624 {
625   mark_value_bits_unavailable (value,
626 			       offset * TARGET_CHAR_BIT,
627 			       length * TARGET_CHAR_BIT);
628 }
629 
630 /* Find the first range in RANGES that overlaps the range defined by
631    OFFSET and LENGTH, starting at element POS in the RANGES vector,
632    Returns the index into RANGES where such overlapping range was
633    found, or -1 if none was found.  */
634 
635 static int
find_first_range_overlap(const std::vector<range> * ranges,int pos,LONGEST offset,LONGEST length)636 find_first_range_overlap (const std::vector<range> *ranges, int pos,
637 			  LONGEST offset, LONGEST length)
638 {
639   int i;
640 
641   for (i = pos; i < ranges->size (); i++)
642     {
643       const range &r = (*ranges)[i];
644       if (ranges_overlap (r.offset, r.length, offset, length))
645 	return i;
646     }
647 
648   return -1;
649 }
650 
651 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
652    PTR2 + OFFSET2_BITS.  Return 0 if the memory is the same, otherwise
653    return non-zero.
654 
655    It must always be the case that:
656      OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
657 
658    It is assumed that memory can be accessed from:
659      PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
660    to:
661      PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
662             / TARGET_CHAR_BIT)  */
663 static int
memcmp_with_bit_offsets(const gdb_byte * ptr1,size_t offset1_bits,const gdb_byte * ptr2,size_t offset2_bits,size_t length_bits)664 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
665 			 const gdb_byte *ptr2, size_t offset2_bits,
666 			 size_t length_bits)
667 {
668   gdb_assert (offset1_bits % TARGET_CHAR_BIT
669 	      == offset2_bits % TARGET_CHAR_BIT);
670 
671   if (offset1_bits % TARGET_CHAR_BIT != 0)
672     {
673       size_t bits;
674       gdb_byte mask, b1, b2;
675 
676       /* The offset from the base pointers PTR1 and PTR2 is not a complete
677 	 number of bytes.  A number of bits up to either the next exact
678 	 byte boundary, or LENGTH_BITS (which ever is sooner) will be
679 	 compared.  */
680       bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
681       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
682       mask = (1 << bits) - 1;
683 
684       if (length_bits < bits)
685 	{
686 	  mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
687 	  bits = length_bits;
688 	}
689 
690       /* Now load the two bytes and mask off the bits we care about.  */
691       b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
692       b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
693 
694       if (b1 != b2)
695 	return 1;
696 
697       /* Now update the length and offsets to take account of the bits
698 	 we've just compared.  */
699       length_bits -= bits;
700       offset1_bits += bits;
701       offset2_bits += bits;
702     }
703 
704   if (length_bits % TARGET_CHAR_BIT != 0)
705     {
706       size_t bits;
707       size_t o1, o2;
708       gdb_byte mask, b1, b2;
709 
710       /* The length is not an exact number of bytes.  After the previous
711 	 IF.. block then the offsets are byte aligned, or the
712 	 length is zero (in which case this code is not reached).  Compare
713 	 a number of bits at the end of the region, starting from an exact
714 	 byte boundary.  */
715       bits = length_bits % TARGET_CHAR_BIT;
716       o1 = offset1_bits + length_bits - bits;
717       o2 = offset2_bits + length_bits - bits;
718 
719       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
720       mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
721 
722       gdb_assert (o1 % TARGET_CHAR_BIT == 0);
723       gdb_assert (o2 % TARGET_CHAR_BIT == 0);
724 
725       b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
726       b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
727 
728       if (b1 != b2)
729 	return 1;
730 
731       length_bits -= bits;
732     }
733 
734   if (length_bits > 0)
735     {
736       /* We've now taken care of any stray "bits" at the start, or end of
737 	 the region to compare, the remainder can be covered with a simple
738 	 memcmp.  */
739       gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
740       gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
741       gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
742 
743       return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
744 		     ptr2 + offset2_bits / TARGET_CHAR_BIT,
745 		     length_bits / TARGET_CHAR_BIT);
746     }
747 
748   /* Length is zero, regions match.  */
749   return 0;
750 }
751 
752 /* Helper struct for find_first_range_overlap_and_match and
753    value_contents_bits_eq.  Keep track of which slot of a given ranges
754    vector have we last looked at.  */
755 
756 struct ranges_and_idx
757 {
758   /* The ranges.  */
759   const std::vector<range> *ranges;
760 
761   /* The range we've last found in RANGES.  Given ranges are sorted,
762      we can start the next lookup here.  */
763   int idx;
764 };
765 
766 /* Helper function for value_contents_bits_eq.  Compare LENGTH bits of
767    RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
768    ranges starting at OFFSET2 bits.  Return true if the ranges match
769    and fill in *L and *H with the overlapping window relative to
770    (both) OFFSET1 or OFFSET2.  */
771 
772 static int
find_first_range_overlap_and_match(struct ranges_and_idx * rp1,struct ranges_and_idx * rp2,LONGEST offset1,LONGEST offset2,LONGEST length,ULONGEST * l,ULONGEST * h)773 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
774 				    struct ranges_and_idx *rp2,
775 				    LONGEST offset1, LONGEST offset2,
776 				    LONGEST length, ULONGEST *l, ULONGEST *h)
777 {
778   rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
779 				       offset1, length);
780   rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
781 				       offset2, length);
782 
783   if (rp1->idx == -1 && rp2->idx == -1)
784     {
785       *l = length;
786       *h = length;
787       return 1;
788     }
789   else if (rp1->idx == -1 || rp2->idx == -1)
790     return 0;
791   else
792     {
793       const range *r1, *r2;
794       ULONGEST l1, h1;
795       ULONGEST l2, h2;
796 
797       r1 = &(*rp1->ranges)[rp1->idx];
798       r2 = &(*rp2->ranges)[rp2->idx];
799 
800       /* Get the unavailable windows intersected by the incoming
801 	 ranges.  The first and last ranges that overlap the argument
802 	 range may be wider than said incoming arguments ranges.  */
803       l1 = std::max (offset1, r1->offset);
804       h1 = std::min (offset1 + length, r1->offset + r1->length);
805 
806       l2 = std::max (offset2, r2->offset);
807       h2 = std::min (offset2 + length, offset2 + r2->length);
808 
809       /* Make them relative to the respective start offsets, so we can
810 	 compare them for equality.  */
811       l1 -= offset1;
812       h1 -= offset1;
813 
814       l2 -= offset2;
815       h2 -= offset2;
816 
817       /* Different ranges, no match.  */
818       if (l1 != l2 || h1 != h2)
819 	return 0;
820 
821       *h = h1;
822       *l = l1;
823       return 1;
824     }
825 }
826 
827 /* Helper function for value_contents_eq.  The only difference is that
828    this function is bit rather than byte based.
829 
830    Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
831    with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
832    Return true if the available bits match.  */
833 
834 static bool
value_contents_bits_eq(const struct value * val1,int offset1,const struct value * val2,int offset2,int length)835 value_contents_bits_eq (const struct value *val1, int offset1,
836 			const struct value *val2, int offset2,
837 			int length)
838 {
839   /* Each array element corresponds to a ranges source (unavailable,
840      optimized out).  '1' is for VAL1, '2' for VAL2.  */
841   struct ranges_and_idx rp1[2], rp2[2];
842 
843   /* See function description in value.h.  */
844   gdb_assert (!val1->lazy && !val2->lazy);
845 
846   /* We shouldn't be trying to compare past the end of the values.  */
847   gdb_assert (offset1 + length
848 	      <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
849   gdb_assert (offset2 + length
850 	      <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
851 
852   memset (&rp1, 0, sizeof (rp1));
853   memset (&rp2, 0, sizeof (rp2));
854   rp1[0].ranges = &val1->unavailable;
855   rp2[0].ranges = &val2->unavailable;
856   rp1[1].ranges = &val1->optimized_out;
857   rp2[1].ranges = &val2->optimized_out;
858 
859   while (length > 0)
860     {
861       ULONGEST l = 0, h = 0; /* init for gcc -Wall */
862       int i;
863 
864       for (i = 0; i < 2; i++)
865 	{
866 	  ULONGEST l_tmp, h_tmp;
867 
868 	  /* The contents only match equal if the invalid/unavailable
869 	     contents ranges match as well.  */
870 	  if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
871 						   offset1, offset2, length,
872 						   &l_tmp, &h_tmp))
873 	    return false;
874 
875 	  /* We're interested in the lowest/first range found.  */
876 	  if (i == 0 || l_tmp < l)
877 	    {
878 	      l = l_tmp;
879 	      h = h_tmp;
880 	    }
881 	}
882 
883       /* Compare the available/valid contents.  */
884       if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
885 				   val2->contents.get (), offset2, l) != 0)
886 	return false;
887 
888       length -= h;
889       offset1 += h;
890       offset2 += h;
891     }
892 
893   return true;
894 }
895 
896 bool
value_contents_eq(const struct value * val1,LONGEST offset1,const struct value * val2,LONGEST offset2,LONGEST length)897 value_contents_eq (const struct value *val1, LONGEST offset1,
898 		   const struct value *val2, LONGEST offset2,
899 		   LONGEST length)
900 {
901   return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
902 				 val2, offset2 * TARGET_CHAR_BIT,
903 				 length * TARGET_CHAR_BIT);
904 }
905 
906 
907 /* The value-history records all the values printed by print commands
908    during this session.  */
909 
910 static std::vector<value_ref_ptr> value_history;
911 
912 
913 /* List of all value objects currently allocated
914    (except for those released by calls to release_value)
915    This is so they can be freed after each command.  */
916 
917 static std::vector<value_ref_ptr> all_values;
918 
919 /* Allocate a lazy value for type TYPE.  Its actual content is
920    "lazily" allocated too: the content field of the return value is
921    NULL; it will be allocated when it is fetched from the target.  */
922 
923 struct value *
allocate_value_lazy(struct type * type)924 allocate_value_lazy (struct type *type)
925 {
926   struct value *val;
927 
928   /* Call check_typedef on our type to make sure that, if TYPE
929      is a TYPE_CODE_TYPEDEF, its length is set to the length
930      of the target type instead of zero.  However, we do not
931      replace the typedef type by the target type, because we want
932      to keep the typedef in order to be able to set the VAL's type
933      description correctly.  */
934   check_typedef (type);
935 
936   val = new struct value (type);
937 
938   /* Values start out on the all_values chain.  */
939   all_values.emplace_back (val);
940 
941   return val;
942 }
943 
944 /* The maximum size, in bytes, that GDB will try to allocate for a value.
945    The initial value of 64k was not selected for any specific reason, it is
946    just a reasonable starting point.  */
947 
948 static int max_value_size = 65536; /* 64k bytes */
949 
950 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
951    LONGEST, otherwise GDB will not be able to parse integer values from the
952    CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
953    be unable to parse "set max-value-size 2".
954 
955    As we want a consistent GDB experience across hosts with different sizes
956    of LONGEST, this arbitrary minimum value was selected, so long as this
957    is bigger than LONGEST on all GDB supported hosts we're fine.  */
958 
959 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
960 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
961 
962 /* Implement the "set max-value-size" command.  */
963 
964 static void
set_max_value_size(const char * args,int from_tty,struct cmd_list_element * c)965 set_max_value_size (const char *args, int from_tty,
966 		    struct cmd_list_element *c)
967 {
968   gdb_assert (max_value_size == -1 || max_value_size >= 0);
969 
970   if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
971     {
972       max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
973       error (_("max-value-size set too low, increasing to %d bytes"),
974 	     max_value_size);
975     }
976 }
977 
978 /* Implement the "show max-value-size" command.  */
979 
980 static void
show_max_value_size(struct ui_file * file,int from_tty,struct cmd_list_element * c,const char * value)981 show_max_value_size (struct ui_file *file, int from_tty,
982 		     struct cmd_list_element *c, const char *value)
983 {
984   if (max_value_size == -1)
985     fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
986   else
987     fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
988 		      max_value_size);
989 }
990 
991 /* Called before we attempt to allocate or reallocate a buffer for the
992    contents of a value.  TYPE is the type of the value for which we are
993    allocating the buffer.  If the buffer is too large (based on the user
994    controllable setting) then throw an error.  If this function returns
995    then we should attempt to allocate the buffer.  */
996 
997 static void
check_type_length_before_alloc(const struct type * type)998 check_type_length_before_alloc (const struct type *type)
999 {
1000   ULONGEST length = TYPE_LENGTH (type);
1001 
1002   if (max_value_size > -1 && length > max_value_size)
1003     {
1004       if (type->name () != NULL)
1005 	error (_("value of type `%s' requires %s bytes, which is more "
1006 		 "than max-value-size"), type->name (), pulongest (length));
1007       else
1008 	error (_("value requires %s bytes, which is more than "
1009 		 "max-value-size"), pulongest (length));
1010     }
1011 }
1012 
1013 /* Allocate the contents of VAL if it has not been allocated yet.  */
1014 
1015 static void
allocate_value_contents(struct value * val)1016 allocate_value_contents (struct value *val)
1017 {
1018   if (!val->contents)
1019     {
1020       check_type_length_before_alloc (val->enclosing_type);
1021       val->contents.reset
1022 	((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1023     }
1024 }
1025 
1026 /* Allocate a  value  and its contents for type TYPE.  */
1027 
1028 struct value *
allocate_value(struct type * type)1029 allocate_value (struct type *type)
1030 {
1031   struct value *val = allocate_value_lazy (type);
1032 
1033   allocate_value_contents (val);
1034   val->lazy = 0;
1035   return val;
1036 }
1037 
1038 /* Allocate a  value  that has the correct length
1039    for COUNT repetitions of type TYPE.  */
1040 
1041 struct value *
allocate_repeat_value(struct type * type,int count)1042 allocate_repeat_value (struct type *type, int count)
1043 {
1044   int low_bound = current_language->string_lower_bound;		/* ??? */
1045   /* FIXME-type-allocation: need a way to free this type when we are
1046      done with it.  */
1047   struct type *array_type
1048     = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1049 
1050   return allocate_value (array_type);
1051 }
1052 
1053 struct value *
allocate_computed_value(struct type * type,const struct lval_funcs * funcs,void * closure)1054 allocate_computed_value (struct type *type,
1055                          const struct lval_funcs *funcs,
1056                          void *closure)
1057 {
1058   struct value *v = allocate_value_lazy (type);
1059 
1060   VALUE_LVAL (v) = lval_computed;
1061   v->location.computed.funcs = funcs;
1062   v->location.computed.closure = closure;
1063 
1064   return v;
1065 }
1066 
1067 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT.  */
1068 
1069 struct value *
allocate_optimized_out_value(struct type * type)1070 allocate_optimized_out_value (struct type *type)
1071 {
1072   struct value *retval = allocate_value_lazy (type);
1073 
1074   mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1075   set_value_lazy (retval, 0);
1076   return retval;
1077 }
1078 
1079 /* Accessor methods.  */
1080 
1081 struct type *
value_type(const struct value * value)1082 value_type (const struct value *value)
1083 {
1084   return value->type;
1085 }
1086 void
deprecated_set_value_type(struct value * value,struct type * type)1087 deprecated_set_value_type (struct value *value, struct type *type)
1088 {
1089   value->type = type;
1090 }
1091 
1092 LONGEST
value_offset(const struct value * value)1093 value_offset (const struct value *value)
1094 {
1095   return value->offset;
1096 }
1097 void
set_value_offset(struct value * value,LONGEST offset)1098 set_value_offset (struct value *value, LONGEST offset)
1099 {
1100   value->offset = offset;
1101 }
1102 
1103 LONGEST
value_bitpos(const struct value * value)1104 value_bitpos (const struct value *value)
1105 {
1106   return value->bitpos;
1107 }
1108 void
set_value_bitpos(struct value * value,LONGEST bit)1109 set_value_bitpos (struct value *value, LONGEST bit)
1110 {
1111   value->bitpos = bit;
1112 }
1113 
1114 LONGEST
value_bitsize(const struct value * value)1115 value_bitsize (const struct value *value)
1116 {
1117   return value->bitsize;
1118 }
1119 void
set_value_bitsize(struct value * value,LONGEST bit)1120 set_value_bitsize (struct value *value, LONGEST bit)
1121 {
1122   value->bitsize = bit;
1123 }
1124 
1125 struct value *
value_parent(const struct value * value)1126 value_parent (const struct value *value)
1127 {
1128   return value->parent.get ();
1129 }
1130 
1131 /* See value.h.  */
1132 
1133 void
set_value_parent(struct value * value,struct value * parent)1134 set_value_parent (struct value *value, struct value *parent)
1135 {
1136   value->parent = value_ref_ptr::new_reference (parent);
1137 }
1138 
1139 gdb_byte *
value_contents_raw(struct value * value)1140 value_contents_raw (struct value *value)
1141 {
1142   struct gdbarch *arch = get_value_arch (value);
1143   int unit_size = gdbarch_addressable_memory_unit_size (arch);
1144 
1145   allocate_value_contents (value);
1146   return value->contents.get () + value->embedded_offset * unit_size;
1147 }
1148 
1149 gdb_byte *
value_contents_all_raw(struct value * value)1150 value_contents_all_raw (struct value *value)
1151 {
1152   allocate_value_contents (value);
1153   return value->contents.get ();
1154 }
1155 
1156 struct type *
value_enclosing_type(const struct value * value)1157 value_enclosing_type (const struct value *value)
1158 {
1159   return value->enclosing_type;
1160 }
1161 
1162 /* Look at value.h for description.  */
1163 
1164 struct type *
value_actual_type(struct value * value,int resolve_simple_types,int * real_type_found)1165 value_actual_type (struct value *value, int resolve_simple_types,
1166 		   int *real_type_found)
1167 {
1168   struct value_print_options opts;
1169   struct type *result;
1170 
1171   get_user_print_options (&opts);
1172 
1173   if (real_type_found)
1174     *real_type_found = 0;
1175   result = value_type (value);
1176   if (opts.objectprint)
1177     {
1178       /* If result's target type is TYPE_CODE_STRUCT, proceed to
1179 	 fetch its rtti type.  */
1180       if ((result->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1181 	  && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
1182 	      == TYPE_CODE_STRUCT)
1183 	  && !value_optimized_out (value))
1184         {
1185           struct type *real_type;
1186 
1187           real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1188           if (real_type)
1189             {
1190               if (real_type_found)
1191                 *real_type_found = 1;
1192               result = real_type;
1193             }
1194         }
1195       else if (resolve_simple_types)
1196         {
1197           if (real_type_found)
1198             *real_type_found = 1;
1199           result = value_enclosing_type (value);
1200         }
1201     }
1202 
1203   return result;
1204 }
1205 
1206 void
error_value_optimized_out(void)1207 error_value_optimized_out (void)
1208 {
1209   error (_("value has been optimized out"));
1210 }
1211 
1212 static void
require_not_optimized_out(const struct value * value)1213 require_not_optimized_out (const struct value *value)
1214 {
1215   if (!value->optimized_out.empty ())
1216     {
1217       if (value->lval == lval_register)
1218 	error (_("register has not been saved in frame"));
1219       else
1220 	error_value_optimized_out ();
1221     }
1222 }
1223 
1224 static void
require_available(const struct value * value)1225 require_available (const struct value *value)
1226 {
1227   if (!value->unavailable.empty ())
1228     throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1229 }
1230 
1231 const gdb_byte *
value_contents_for_printing(struct value * value)1232 value_contents_for_printing (struct value *value)
1233 {
1234   if (value->lazy)
1235     value_fetch_lazy (value);
1236   return value->contents.get ();
1237 }
1238 
1239 const gdb_byte *
value_contents_for_printing_const(const struct value * value)1240 value_contents_for_printing_const (const struct value *value)
1241 {
1242   gdb_assert (!value->lazy);
1243   return value->contents.get ();
1244 }
1245 
1246 const gdb_byte *
value_contents_all(struct value * value)1247 value_contents_all (struct value *value)
1248 {
1249   const gdb_byte *result = value_contents_for_printing (value);
1250   require_not_optimized_out (value);
1251   require_available (value);
1252   return result;
1253 }
1254 
1255 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1256    SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted.  */
1257 
1258 static void
ranges_copy_adjusted(std::vector<range> * dst_range,int dst_bit_offset,const std::vector<range> & src_range,int src_bit_offset,int bit_length)1259 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1260 		      const std::vector<range> &src_range, int src_bit_offset,
1261 		      int bit_length)
1262 {
1263   for (const range &r : src_range)
1264     {
1265       ULONGEST h, l;
1266 
1267       l = std::max (r.offset, (LONGEST) src_bit_offset);
1268       h = std::min (r.offset + r.length,
1269 		    (LONGEST) src_bit_offset + bit_length);
1270 
1271       if (l < h)
1272 	insert_into_bit_range_vector (dst_range,
1273 				      dst_bit_offset + (l - src_bit_offset),
1274 				      h - l);
1275     }
1276 }
1277 
1278 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1279    SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted.  */
1280 
1281 static void
value_ranges_copy_adjusted(struct value * dst,int dst_bit_offset,const struct value * src,int src_bit_offset,int bit_length)1282 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1283 			    const struct value *src, int src_bit_offset,
1284 			    int bit_length)
1285 {
1286   ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1287 			src->unavailable, src_bit_offset,
1288 			bit_length);
1289   ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1290 			src->optimized_out, src_bit_offset,
1291 			bit_length);
1292 }
1293 
1294 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1295    (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1296    contents, starting at DST_OFFSET.  If unavailable contents are
1297    being copied from SRC, the corresponding DST contents are marked
1298    unavailable accordingly.  Neither DST nor SRC may be lazy
1299    values.
1300 
1301    It is assumed the contents of DST in the [DST_OFFSET,
1302    DST_OFFSET+LENGTH) range are wholly available.  */
1303 
1304 void
value_contents_copy_raw(struct value * dst,LONGEST dst_offset,struct value * src,LONGEST src_offset,LONGEST length)1305 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1306 			 struct value *src, LONGEST src_offset, LONGEST length)
1307 {
1308   LONGEST src_bit_offset, dst_bit_offset, bit_length;
1309   struct gdbarch *arch = get_value_arch (src);
1310   int unit_size = gdbarch_addressable_memory_unit_size (arch);
1311 
1312   /* A lazy DST would make that this copy operation useless, since as
1313      soon as DST's contents were un-lazied (by a later value_contents
1314      call, say), the contents would be overwritten.  A lazy SRC would
1315      mean we'd be copying garbage.  */
1316   gdb_assert (!dst->lazy && !src->lazy);
1317 
1318   /* The overwritten DST range gets unavailability ORed in, not
1319      replaced.  Make sure to remember to implement replacing if it
1320      turns out actually necessary.  */
1321   gdb_assert (value_bytes_available (dst, dst_offset, length));
1322   gdb_assert (!value_bits_any_optimized_out (dst,
1323 					     TARGET_CHAR_BIT * dst_offset,
1324 					     TARGET_CHAR_BIT * length));
1325 
1326   /* Copy the data.  */
1327   memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1328 	  value_contents_all_raw (src) + src_offset * unit_size,
1329 	  length * unit_size);
1330 
1331   /* Copy the meta-data, adjusted.  */
1332   src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1333   dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1334   bit_length = length * unit_size * HOST_CHAR_BIT;
1335 
1336   value_ranges_copy_adjusted (dst, dst_bit_offset,
1337 			      src, src_bit_offset,
1338 			      bit_length);
1339 }
1340 
1341 /* Copy LENGTH bytes of SRC value's (all) contents
1342    (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1343    (all) contents, starting at DST_OFFSET.  If unavailable contents
1344    are being copied from SRC, the corresponding DST contents are
1345    marked unavailable accordingly.  DST must not be lazy.  If SRC is
1346    lazy, it will be fetched now.
1347 
1348    It is assumed the contents of DST in the [DST_OFFSET,
1349    DST_OFFSET+LENGTH) range are wholly available.  */
1350 
1351 void
value_contents_copy(struct value * dst,LONGEST dst_offset,struct value * src,LONGEST src_offset,LONGEST length)1352 value_contents_copy (struct value *dst, LONGEST dst_offset,
1353 		     struct value *src, LONGEST src_offset, LONGEST length)
1354 {
1355   if (src->lazy)
1356     value_fetch_lazy (src);
1357 
1358   value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1359 }
1360 
1361 int
value_lazy(const struct value * value)1362 value_lazy (const struct value *value)
1363 {
1364   return value->lazy;
1365 }
1366 
1367 void
set_value_lazy(struct value * value,int val)1368 set_value_lazy (struct value *value, int val)
1369 {
1370   value->lazy = val;
1371 }
1372 
1373 int
value_stack(const struct value * value)1374 value_stack (const struct value *value)
1375 {
1376   return value->stack;
1377 }
1378 
1379 void
set_value_stack(struct value * value,int val)1380 set_value_stack (struct value *value, int val)
1381 {
1382   value->stack = val;
1383 }
1384 
1385 const gdb_byte *
value_contents(struct value * value)1386 value_contents (struct value *value)
1387 {
1388   const gdb_byte *result = value_contents_writeable (value);
1389   require_not_optimized_out (value);
1390   require_available (value);
1391   return result;
1392 }
1393 
1394 gdb_byte *
value_contents_writeable(struct value * value)1395 value_contents_writeable (struct value *value)
1396 {
1397   if (value->lazy)
1398     value_fetch_lazy (value);
1399   return value_contents_raw (value);
1400 }
1401 
1402 int
value_optimized_out(struct value * value)1403 value_optimized_out (struct value *value)
1404 {
1405   /* We can only know if a value is optimized out once we have tried to
1406      fetch it.  */
1407   if (value->optimized_out.empty () && value->lazy)
1408     {
1409       try
1410 	{
1411 	  value_fetch_lazy (value);
1412 	}
1413       catch (const gdb_exception_error &ex)
1414 	{
1415 	  switch (ex.error)
1416 	    {
1417 	    case MEMORY_ERROR:
1418 	    case OPTIMIZED_OUT_ERROR:
1419 	    case NOT_AVAILABLE_ERROR:
1420 	      /* These can normally happen when we try to access an
1421 		 optimized out or unavailable register, either in a
1422 		 physical register or spilled to memory.  */
1423 	      break;
1424 	    default:
1425 	      throw;
1426 	    }
1427 	}
1428     }
1429 
1430   return !value->optimized_out.empty ();
1431 }
1432 
1433 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1434    the following LENGTH bytes.  */
1435 
1436 void
mark_value_bytes_optimized_out(struct value * value,int offset,int length)1437 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1438 {
1439   mark_value_bits_optimized_out (value,
1440 				 offset * TARGET_CHAR_BIT,
1441 				 length * TARGET_CHAR_BIT);
1442 }
1443 
1444 /* See value.h.  */
1445 
1446 void
mark_value_bits_optimized_out(struct value * value,LONGEST offset,LONGEST length)1447 mark_value_bits_optimized_out (struct value *value,
1448 			       LONGEST offset, LONGEST length)
1449 {
1450   insert_into_bit_range_vector (&value->optimized_out, offset, length);
1451 }
1452 
1453 int
value_bits_synthetic_pointer(const struct value * value,LONGEST offset,LONGEST length)1454 value_bits_synthetic_pointer (const struct value *value,
1455 			      LONGEST offset, LONGEST length)
1456 {
1457   if (value->lval != lval_computed
1458       || !value->location.computed.funcs->check_synthetic_pointer)
1459     return 0;
1460   return value->location.computed.funcs->check_synthetic_pointer (value,
1461 								  offset,
1462 								  length);
1463 }
1464 
1465 LONGEST
value_embedded_offset(const struct value * value)1466 value_embedded_offset (const struct value *value)
1467 {
1468   return value->embedded_offset;
1469 }
1470 
1471 void
set_value_embedded_offset(struct value * value,LONGEST val)1472 set_value_embedded_offset (struct value *value, LONGEST val)
1473 {
1474   value->embedded_offset = val;
1475 }
1476 
1477 LONGEST
value_pointed_to_offset(const struct value * value)1478 value_pointed_to_offset (const struct value *value)
1479 {
1480   return value->pointed_to_offset;
1481 }
1482 
1483 void
set_value_pointed_to_offset(struct value * value,LONGEST val)1484 set_value_pointed_to_offset (struct value *value, LONGEST val)
1485 {
1486   value->pointed_to_offset = val;
1487 }
1488 
1489 const struct lval_funcs *
value_computed_funcs(const struct value * v)1490 value_computed_funcs (const struct value *v)
1491 {
1492   gdb_assert (value_lval_const (v) == lval_computed);
1493 
1494   return v->location.computed.funcs;
1495 }
1496 
1497 void *
value_computed_closure(const struct value * v)1498 value_computed_closure (const struct value *v)
1499 {
1500   gdb_assert (v->lval == lval_computed);
1501 
1502   return v->location.computed.closure;
1503 }
1504 
1505 enum lval_type *
deprecated_value_lval_hack(struct value * value)1506 deprecated_value_lval_hack (struct value *value)
1507 {
1508   return &value->lval;
1509 }
1510 
1511 enum lval_type
value_lval_const(const struct value * value)1512 value_lval_const (const struct value *value)
1513 {
1514   return value->lval;
1515 }
1516 
1517 CORE_ADDR
value_address(const struct value * value)1518 value_address (const struct value *value)
1519 {
1520   if (value->lval != lval_memory)
1521     return 0;
1522   if (value->parent != NULL)
1523     return value_address (value->parent.get ()) + value->offset;
1524   if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1525     {
1526       gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1527       return TYPE_DATA_LOCATION_ADDR (value_type (value));
1528     }
1529 
1530   return value->location.address + value->offset;
1531 }
1532 
1533 CORE_ADDR
value_raw_address(const struct value * value)1534 value_raw_address (const struct value *value)
1535 {
1536   if (value->lval != lval_memory)
1537     return 0;
1538   return value->location.address;
1539 }
1540 
1541 void
set_value_address(struct value * value,CORE_ADDR addr)1542 set_value_address (struct value *value, CORE_ADDR addr)
1543 {
1544   gdb_assert (value->lval == lval_memory);
1545   value->location.address = addr;
1546 }
1547 
1548 struct internalvar **
deprecated_value_internalvar_hack(struct value * value)1549 deprecated_value_internalvar_hack (struct value *value)
1550 {
1551   return &value->location.internalvar;
1552 }
1553 
1554 struct frame_id *
deprecated_value_next_frame_id_hack(struct value * value)1555 deprecated_value_next_frame_id_hack (struct value *value)
1556 {
1557   gdb_assert (value->lval == lval_register);
1558   return &value->location.reg.next_frame_id;
1559 }
1560 
1561 int *
deprecated_value_regnum_hack(struct value * value)1562 deprecated_value_regnum_hack (struct value *value)
1563 {
1564   gdb_assert (value->lval == lval_register);
1565   return &value->location.reg.regnum;
1566 }
1567 
1568 int
deprecated_value_modifiable(const struct value * value)1569 deprecated_value_modifiable (const struct value *value)
1570 {
1571   return value->modifiable;
1572 }
1573 
1574 /* Return a mark in the value chain.  All values allocated after the
1575    mark is obtained (except for those released) are subject to being freed
1576    if a subsequent value_free_to_mark is passed the mark.  */
1577 struct value *
value_mark(void)1578 value_mark (void)
1579 {
1580   if (all_values.empty ())
1581     return nullptr;
1582   return all_values.back ().get ();
1583 }
1584 
1585 /* See value.h.  */
1586 
1587 void
value_incref(struct value * val)1588 value_incref (struct value *val)
1589 {
1590   val->reference_count++;
1591 }
1592 
1593 /* Release a reference to VAL, which was acquired with value_incref.
1594    This function is also called to deallocate values from the value
1595    chain.  */
1596 
1597 void
value_decref(struct value * val)1598 value_decref (struct value *val)
1599 {
1600   if (val != nullptr)
1601     {
1602       gdb_assert (val->reference_count > 0);
1603       val->reference_count--;
1604       if (val->reference_count == 0)
1605 	delete val;
1606     }
1607 }
1608 
1609 /* Free all values allocated since MARK was obtained by value_mark
1610    (except for those released).  */
1611 void
value_free_to_mark(const struct value * mark)1612 value_free_to_mark (const struct value *mark)
1613 {
1614   auto iter = std::find (all_values.begin (), all_values.end (), mark);
1615   if (iter == all_values.end ())
1616     all_values.clear ();
1617   else
1618     all_values.erase (iter + 1, all_values.end ());
1619 }
1620 
1621 /* Remove VAL from the chain all_values
1622    so it will not be freed automatically.  */
1623 
1624 value_ref_ptr
release_value(struct value * val)1625 release_value (struct value *val)
1626 {
1627   if (val == nullptr)
1628     return value_ref_ptr ();
1629 
1630   std::vector<value_ref_ptr>::reverse_iterator iter;
1631   for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1632     {
1633       if (*iter == val)
1634 	{
1635 	  value_ref_ptr result = *iter;
1636 	  all_values.erase (iter.base () - 1);
1637 	  return result;
1638 	}
1639     }
1640 
1641   /* We must always return an owned reference.  Normally this happens
1642      because we transfer the reference from the value chain, but in
1643      this case the value was not on the chain.  */
1644   return value_ref_ptr::new_reference (val);
1645 }
1646 
1647 /* See value.h.  */
1648 
1649 std::vector<value_ref_ptr>
value_release_to_mark(const struct value * mark)1650 value_release_to_mark (const struct value *mark)
1651 {
1652   std::vector<value_ref_ptr> result;
1653 
1654   auto iter = std::find (all_values.begin (), all_values.end (), mark);
1655   if (iter == all_values.end ())
1656     std::swap (result, all_values);
1657   else
1658     {
1659       std::move (iter + 1, all_values.end (), std::back_inserter (result));
1660       all_values.erase (iter + 1, all_values.end ());
1661     }
1662   std::reverse (result.begin (), result.end ());
1663   return result;
1664 }
1665 
1666 /* Return a copy of the value ARG.
1667    It contains the same contents, for same memory address,
1668    but it's a different block of storage.  */
1669 
1670 struct value *
value_copy(struct value * arg)1671 value_copy (struct value *arg)
1672 {
1673   struct type *encl_type = value_enclosing_type (arg);
1674   struct value *val;
1675 
1676   if (value_lazy (arg))
1677     val = allocate_value_lazy (encl_type);
1678   else
1679     val = allocate_value (encl_type);
1680   val->type = arg->type;
1681   VALUE_LVAL (val) = VALUE_LVAL (arg);
1682   val->location = arg->location;
1683   val->offset = arg->offset;
1684   val->bitpos = arg->bitpos;
1685   val->bitsize = arg->bitsize;
1686   val->lazy = arg->lazy;
1687   val->embedded_offset = value_embedded_offset (arg);
1688   val->pointed_to_offset = arg->pointed_to_offset;
1689   val->modifiable = arg->modifiable;
1690   if (!value_lazy (val))
1691     {
1692       memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1693 	      TYPE_LENGTH (value_enclosing_type (arg)));
1694 
1695     }
1696   val->unavailable = arg->unavailable;
1697   val->optimized_out = arg->optimized_out;
1698   val->parent = arg->parent;
1699   if (VALUE_LVAL (val) == lval_computed)
1700     {
1701       const struct lval_funcs *funcs = val->location.computed.funcs;
1702 
1703       if (funcs->copy_closure)
1704         val->location.computed.closure = funcs->copy_closure (val);
1705     }
1706   return val;
1707 }
1708 
1709 /* Return a "const" and/or "volatile" qualified version of the value V.
1710    If CNST is true, then the returned value will be qualified with
1711    "const".
1712    if VOLTL is true, then the returned value will be qualified with
1713    "volatile".  */
1714 
1715 struct value *
make_cv_value(int cnst,int voltl,struct value * v)1716 make_cv_value (int cnst, int voltl, struct value *v)
1717 {
1718   struct type *val_type = value_type (v);
1719   struct type *enclosing_type = value_enclosing_type (v);
1720   struct value *cv_val = value_copy (v);
1721 
1722   deprecated_set_value_type (cv_val,
1723 			     make_cv_type (cnst, voltl, val_type, NULL));
1724   set_value_enclosing_type (cv_val,
1725 			    make_cv_type (cnst, voltl, enclosing_type, NULL));
1726 
1727   return cv_val;
1728 }
1729 
1730 /* Return a version of ARG that is non-lvalue.  */
1731 
1732 struct value *
value_non_lval(struct value * arg)1733 value_non_lval (struct value *arg)
1734 {
1735   if (VALUE_LVAL (arg) != not_lval)
1736     {
1737       struct type *enc_type = value_enclosing_type (arg);
1738       struct value *val = allocate_value (enc_type);
1739 
1740       memcpy (value_contents_all_raw (val), value_contents_all (arg),
1741 	      TYPE_LENGTH (enc_type));
1742       val->type = arg->type;
1743       set_value_embedded_offset (val, value_embedded_offset (arg));
1744       set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1745       return val;
1746     }
1747    return arg;
1748 }
1749 
1750 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY.  */
1751 
1752 void
value_force_lval(struct value * v,CORE_ADDR addr)1753 value_force_lval (struct value *v, CORE_ADDR addr)
1754 {
1755   gdb_assert (VALUE_LVAL (v) == not_lval);
1756 
1757   write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1758   v->lval = lval_memory;
1759   v->location.address = addr;
1760 }
1761 
1762 void
set_value_component_location(struct value * component,const struct value * whole)1763 set_value_component_location (struct value *component,
1764 			      const struct value *whole)
1765 {
1766   struct type *type;
1767 
1768   gdb_assert (whole->lval != lval_xcallable);
1769 
1770   if (whole->lval == lval_internalvar)
1771     VALUE_LVAL (component) = lval_internalvar_component;
1772   else
1773     VALUE_LVAL (component) = whole->lval;
1774 
1775   component->location = whole->location;
1776   if (whole->lval == lval_computed)
1777     {
1778       const struct lval_funcs *funcs = whole->location.computed.funcs;
1779 
1780       if (funcs->copy_closure)
1781         component->location.computed.closure = funcs->copy_closure (whole);
1782     }
1783 
1784   /* If type has a dynamic resolved location property
1785      update it's value address.  */
1786   type = value_type (whole);
1787   if (NULL != TYPE_DATA_LOCATION (type)
1788       && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1789     set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1790 }
1791 
1792 /* Access to the value history.  */
1793 
1794 /* Record a new value in the value history.
1795    Returns the absolute history index of the entry.  */
1796 
1797 int
record_latest_value(struct value * val)1798 record_latest_value (struct value *val)
1799 {
1800   /* We don't want this value to have anything to do with the inferior anymore.
1801      In particular, "set $1 = 50" should not affect the variable from which
1802      the value was taken, and fast watchpoints should be able to assume that
1803      a value on the value history never changes.  */
1804   if (value_lazy (val))
1805     value_fetch_lazy (val);
1806   /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1807      from.  This is a bit dubious, because then *&$1 does not just return $1
1808      but the current contents of that location.  c'est la vie...  */
1809   val->modifiable = 0;
1810 
1811   value_history.push_back (release_value (val));
1812 
1813   return value_history.size ();
1814 }
1815 
1816 /* Return a copy of the value in the history with sequence number NUM.  */
1817 
1818 struct value *
access_value_history(int num)1819 access_value_history (int num)
1820 {
1821   int absnum = num;
1822 
1823   if (absnum <= 0)
1824     absnum += value_history.size ();
1825 
1826   if (absnum <= 0)
1827     {
1828       if (num == 0)
1829 	error (_("The history is empty."));
1830       else if (num == 1)
1831 	error (_("There is only one value in the history."));
1832       else
1833 	error (_("History does not go back to $$%d."), -num);
1834     }
1835   if (absnum > value_history.size ())
1836     error (_("History has not yet reached $%d."), absnum);
1837 
1838   absnum--;
1839 
1840   return value_copy (value_history[absnum].get ());
1841 }
1842 
1843 static void
show_values(const char * num_exp,int from_tty)1844 show_values (const char *num_exp, int from_tty)
1845 {
1846   int i;
1847   struct value *val;
1848   static int num = 1;
1849 
1850   if (num_exp)
1851     {
1852       /* "show values +" should print from the stored position.
1853          "show values <exp>" should print around value number <exp>.  */
1854       if (num_exp[0] != '+' || num_exp[1] != '\0')
1855 	num = parse_and_eval_long (num_exp) - 5;
1856     }
1857   else
1858     {
1859       /* "show values" means print the last 10 values.  */
1860       num = value_history.size () - 9;
1861     }
1862 
1863   if (num <= 0)
1864     num = 1;
1865 
1866   for (i = num; i < num + 10 && i <= value_history.size (); i++)
1867     {
1868       struct value_print_options opts;
1869 
1870       val = access_value_history (i);
1871       printf_filtered (("$%d = "), i);
1872       get_user_print_options (&opts);
1873       value_print (val, gdb_stdout, &opts);
1874       printf_filtered (("\n"));
1875     }
1876 
1877   /* The next "show values +" should start after what we just printed.  */
1878   num += 10;
1879 
1880   /* Hitting just return after this command should do the same thing as
1881      "show values +".  If num_exp is null, this is unnecessary, since
1882      "show values +" is not useful after "show values".  */
1883   if (from_tty && num_exp)
1884     set_repeat_arguments ("+");
1885 }
1886 
1887 enum internalvar_kind
1888 {
1889   /* The internal variable is empty.  */
1890   INTERNALVAR_VOID,
1891 
1892   /* The value of the internal variable is provided directly as
1893      a GDB value object.  */
1894   INTERNALVAR_VALUE,
1895 
1896   /* A fresh value is computed via a call-back routine on every
1897      access to the internal variable.  */
1898   INTERNALVAR_MAKE_VALUE,
1899 
1900   /* The internal variable holds a GDB internal convenience function.  */
1901   INTERNALVAR_FUNCTION,
1902 
1903   /* The variable holds an integer value.  */
1904   INTERNALVAR_INTEGER,
1905 
1906   /* The variable holds a GDB-provided string.  */
1907   INTERNALVAR_STRING,
1908 };
1909 
1910 union internalvar_data
1911 {
1912   /* A value object used with INTERNALVAR_VALUE.  */
1913   struct value *value;
1914 
1915   /* The call-back routine used with INTERNALVAR_MAKE_VALUE.  */
1916   struct
1917   {
1918     /* The functions to call.  */
1919     const struct internalvar_funcs *functions;
1920 
1921     /* The function's user-data.  */
1922     void *data;
1923   } make_value;
1924 
1925   /* The internal function used with INTERNALVAR_FUNCTION.  */
1926   struct
1927   {
1928     struct internal_function *function;
1929     /* True if this is the canonical name for the function.  */
1930     int canonical;
1931   } fn;
1932 
1933   /* An integer value used with INTERNALVAR_INTEGER.  */
1934   struct
1935   {
1936     /* If type is non-NULL, it will be used as the type to generate
1937        a value for this internal variable.  If type is NULL, a default
1938        integer type for the architecture is used.  */
1939     struct type *type;
1940     LONGEST val;
1941   } integer;
1942 
1943   /* A string value used with INTERNALVAR_STRING.  */
1944   char *string;
1945 };
1946 
1947 /* Internal variables.  These are variables within the debugger
1948    that hold values assigned by debugger commands.
1949    The user refers to them with a '$' prefix
1950    that does not appear in the variable names stored internally.  */
1951 
1952 struct internalvar
1953 {
1954   struct internalvar *next;
1955   char *name;
1956 
1957   /* We support various different kinds of content of an internal variable.
1958      enum internalvar_kind specifies the kind, and union internalvar_data
1959      provides the data associated with this particular kind.  */
1960 
1961   enum internalvar_kind kind;
1962 
1963   union internalvar_data u;
1964 };
1965 
1966 static struct internalvar *internalvars;
1967 
1968 /* If the variable does not already exist create it and give it the
1969    value given.  If no value is given then the default is zero.  */
1970 static void
init_if_undefined_command(const char * args,int from_tty)1971 init_if_undefined_command (const char* args, int from_tty)
1972 {
1973   struct internalvar* intvar;
1974 
1975   /* Parse the expression - this is taken from set_command().  */
1976   expression_up expr = parse_expression (args);
1977 
1978   /* Validate the expression.
1979      Was the expression an assignment?
1980      Or even an expression at all?  */
1981   if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1982     error (_("Init-if-undefined requires an assignment expression."));
1983 
1984   /* Extract the variable from the parsed expression.
1985      In the case of an assign the lvalue will be in elts[1] and elts[2].  */
1986   if (expr->elts[1].opcode != OP_INTERNALVAR)
1987     error (_("The first parameter to init-if-undefined "
1988 	     "should be a GDB variable."));
1989   intvar = expr->elts[2].internalvar;
1990 
1991   /* Only evaluate the expression if the lvalue is void.
1992      This may still fail if the expression is invalid.  */
1993   if (intvar->kind == INTERNALVAR_VOID)
1994     evaluate_expression (expr.get ());
1995 }
1996 
1997 
1998 /* Look up an internal variable with name NAME.  NAME should not
1999    normally include a dollar sign.
2000 
2001    If the specified internal variable does not exist,
2002    the return value is NULL.  */
2003 
2004 struct internalvar *
lookup_only_internalvar(const char * name)2005 lookup_only_internalvar (const char *name)
2006 {
2007   struct internalvar *var;
2008 
2009   for (var = internalvars; var; var = var->next)
2010     if (strcmp (var->name, name) == 0)
2011       return var;
2012 
2013   return NULL;
2014 }
2015 
2016 /* Complete NAME by comparing it to the names of internal
2017    variables.  */
2018 
2019 void
complete_internalvar(completion_tracker & tracker,const char * name)2020 complete_internalvar (completion_tracker &tracker, const char *name)
2021 {
2022   struct internalvar *var;
2023   int len;
2024 
2025   len = strlen (name);
2026 
2027   for (var = internalvars; var; var = var->next)
2028     if (strncmp (var->name, name, len) == 0)
2029       tracker.add_completion (make_unique_xstrdup (var->name));
2030 }
2031 
2032 /* Create an internal variable with name NAME and with a void value.
2033    NAME should not normally include a dollar sign.  */
2034 
2035 struct internalvar *
create_internalvar(const char * name)2036 create_internalvar (const char *name)
2037 {
2038   struct internalvar *var = XNEW (struct internalvar);
2039 
2040   var->name = xstrdup (name);
2041   var->kind = INTERNALVAR_VOID;
2042   var->next = internalvars;
2043   internalvars = var;
2044   return var;
2045 }
2046 
2047 /* Create an internal variable with name NAME and register FUN as the
2048    function that value_of_internalvar uses to create a value whenever
2049    this variable is referenced.  NAME should not normally include a
2050    dollar sign.  DATA is passed uninterpreted to FUN when it is
2051    called.  CLEANUP, if not NULL, is called when the internal variable
2052    is destroyed.  It is passed DATA as its only argument.  */
2053 
2054 struct internalvar *
create_internalvar_type_lazy(const char * name,const struct internalvar_funcs * funcs,void * data)2055 create_internalvar_type_lazy (const char *name,
2056 			      const struct internalvar_funcs *funcs,
2057 			      void *data)
2058 {
2059   struct internalvar *var = create_internalvar (name);
2060 
2061   var->kind = INTERNALVAR_MAKE_VALUE;
2062   var->u.make_value.functions = funcs;
2063   var->u.make_value.data = data;
2064   return var;
2065 }
2066 
2067 /* See documentation in value.h.  */
2068 
2069 int
compile_internalvar_to_ax(struct internalvar * var,struct agent_expr * expr,struct axs_value * value)2070 compile_internalvar_to_ax (struct internalvar *var,
2071 			   struct agent_expr *expr,
2072 			   struct axs_value *value)
2073 {
2074   if (var->kind != INTERNALVAR_MAKE_VALUE
2075       || var->u.make_value.functions->compile_to_ax == NULL)
2076     return 0;
2077 
2078   var->u.make_value.functions->compile_to_ax (var, expr, value,
2079 					      var->u.make_value.data);
2080   return 1;
2081 }
2082 
2083 /* Look up an internal variable with name NAME.  NAME should not
2084    normally include a dollar sign.
2085 
2086    If the specified internal variable does not exist,
2087    one is created, with a void value.  */
2088 
2089 struct internalvar *
lookup_internalvar(const char * name)2090 lookup_internalvar (const char *name)
2091 {
2092   struct internalvar *var;
2093 
2094   var = lookup_only_internalvar (name);
2095   if (var)
2096     return var;
2097 
2098   return create_internalvar (name);
2099 }
2100 
2101 /* Return current value of internal variable VAR.  For variables that
2102    are not inherently typed, use a value type appropriate for GDBARCH.  */
2103 
2104 struct value *
value_of_internalvar(struct gdbarch * gdbarch,struct internalvar * var)2105 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2106 {
2107   struct value *val;
2108   struct trace_state_variable *tsv;
2109 
2110   /* If there is a trace state variable of the same name, assume that
2111      is what we really want to see.  */
2112   tsv = find_trace_state_variable (var->name);
2113   if (tsv)
2114     {
2115       tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2116 								&(tsv->value));
2117       if (tsv->value_known)
2118 	val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2119 				  tsv->value);
2120       else
2121 	val = allocate_value (builtin_type (gdbarch)->builtin_void);
2122       return val;
2123     }
2124 
2125   switch (var->kind)
2126     {
2127     case INTERNALVAR_VOID:
2128       val = allocate_value (builtin_type (gdbarch)->builtin_void);
2129       break;
2130 
2131     case INTERNALVAR_FUNCTION:
2132       val = allocate_value (builtin_type (gdbarch)->internal_fn);
2133       break;
2134 
2135     case INTERNALVAR_INTEGER:
2136       if (!var->u.integer.type)
2137 	val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2138 				  var->u.integer.val);
2139       else
2140 	val = value_from_longest (var->u.integer.type, var->u.integer.val);
2141       break;
2142 
2143     case INTERNALVAR_STRING:
2144       val = value_cstring (var->u.string, strlen (var->u.string),
2145 			   builtin_type (gdbarch)->builtin_char);
2146       break;
2147 
2148     case INTERNALVAR_VALUE:
2149       val = value_copy (var->u.value);
2150       if (value_lazy (val))
2151 	value_fetch_lazy (val);
2152       break;
2153 
2154     case INTERNALVAR_MAKE_VALUE:
2155       val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2156 							var->u.make_value.data);
2157       break;
2158 
2159     default:
2160       internal_error (__FILE__, __LINE__, _("bad kind"));
2161     }
2162 
2163   /* Change the VALUE_LVAL to lval_internalvar so that future operations
2164      on this value go back to affect the original internal variable.
2165 
2166      Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2167      no underlying modifiable state in the internal variable.
2168 
2169      Likewise, if the variable's value is a computed lvalue, we want
2170      references to it to produce another computed lvalue, where
2171      references and assignments actually operate through the
2172      computed value's functions.
2173 
2174      This means that internal variables with computed values
2175      behave a little differently from other internal variables:
2176      assignments to them don't just replace the previous value
2177      altogether.  At the moment, this seems like the behavior we
2178      want.  */
2179 
2180   if (var->kind != INTERNALVAR_MAKE_VALUE
2181       && val->lval != lval_computed)
2182     {
2183       VALUE_LVAL (val) = lval_internalvar;
2184       VALUE_INTERNALVAR (val) = var;
2185     }
2186 
2187   return val;
2188 }
2189 
2190 int
get_internalvar_integer(struct internalvar * var,LONGEST * result)2191 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2192 {
2193   if (var->kind == INTERNALVAR_INTEGER)
2194     {
2195       *result = var->u.integer.val;
2196       return 1;
2197     }
2198 
2199   if (var->kind == INTERNALVAR_VALUE)
2200     {
2201       struct type *type = check_typedef (value_type (var->u.value));
2202 
2203       if (type->code () == TYPE_CODE_INT)
2204 	{
2205 	  *result = value_as_long (var->u.value);
2206 	  return 1;
2207 	}
2208     }
2209 
2210   return 0;
2211 }
2212 
2213 static int
get_internalvar_function(struct internalvar * var,struct internal_function ** result)2214 get_internalvar_function (struct internalvar *var,
2215 			  struct internal_function **result)
2216 {
2217   switch (var->kind)
2218     {
2219     case INTERNALVAR_FUNCTION:
2220       *result = var->u.fn.function;
2221       return 1;
2222 
2223     default:
2224       return 0;
2225     }
2226 }
2227 
2228 void
set_internalvar_component(struct internalvar * var,LONGEST offset,LONGEST bitpos,LONGEST bitsize,struct value * newval)2229 set_internalvar_component (struct internalvar *var,
2230 			   LONGEST offset, LONGEST bitpos,
2231 			   LONGEST bitsize, struct value *newval)
2232 {
2233   gdb_byte *addr;
2234   struct gdbarch *arch;
2235   int unit_size;
2236 
2237   switch (var->kind)
2238     {
2239     case INTERNALVAR_VALUE:
2240       addr = value_contents_writeable (var->u.value);
2241       arch = get_value_arch (var->u.value);
2242       unit_size = gdbarch_addressable_memory_unit_size (arch);
2243 
2244       if (bitsize)
2245 	modify_field (value_type (var->u.value), addr + offset,
2246 		      value_as_long (newval), bitpos, bitsize);
2247       else
2248 	memcpy (addr + offset * unit_size, value_contents (newval),
2249 		TYPE_LENGTH (value_type (newval)));
2250       break;
2251 
2252     default:
2253       /* We can never get a component of any other kind.  */
2254       internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2255     }
2256 }
2257 
2258 void
set_internalvar(struct internalvar * var,struct value * val)2259 set_internalvar (struct internalvar *var, struct value *val)
2260 {
2261   enum internalvar_kind new_kind;
2262   union internalvar_data new_data = { 0 };
2263 
2264   if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2265     error (_("Cannot overwrite convenience function %s"), var->name);
2266 
2267   /* Prepare new contents.  */
2268   switch (check_typedef (value_type (val))->code ())
2269     {
2270     case TYPE_CODE_VOID:
2271       new_kind = INTERNALVAR_VOID;
2272       break;
2273 
2274     case TYPE_CODE_INTERNAL_FUNCTION:
2275       gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2276       new_kind = INTERNALVAR_FUNCTION;
2277       get_internalvar_function (VALUE_INTERNALVAR (val),
2278 				&new_data.fn.function);
2279       /* Copies created here are never canonical.  */
2280       break;
2281 
2282     default:
2283       new_kind = INTERNALVAR_VALUE;
2284       struct value *copy = value_copy (val);
2285       copy->modifiable = 1;
2286 
2287       /* Force the value to be fetched from the target now, to avoid problems
2288 	 later when this internalvar is referenced and the target is gone or
2289 	 has changed.  */
2290       if (value_lazy (copy))
2291 	value_fetch_lazy (copy);
2292 
2293       /* Release the value from the value chain to prevent it from being
2294 	 deleted by free_all_values.  From here on this function should not
2295 	 call error () until new_data is installed into the var->u to avoid
2296 	 leaking memory.  */
2297       new_data.value = release_value (copy).release ();
2298 
2299       /* Internal variables which are created from values with a dynamic
2300          location don't need the location property of the origin anymore.
2301          The resolved dynamic location is used prior then any other address
2302          when accessing the value.
2303          If we keep it, we would still refer to the origin value.
2304          Remove the location property in case it exist.  */
2305       value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2306 
2307       break;
2308     }
2309 
2310   /* Clean up old contents.  */
2311   clear_internalvar (var);
2312 
2313   /* Switch over.  */
2314   var->kind = new_kind;
2315   var->u = new_data;
2316   /* End code which must not call error().  */
2317 }
2318 
2319 void
set_internalvar_integer(struct internalvar * var,LONGEST l)2320 set_internalvar_integer (struct internalvar *var, LONGEST l)
2321 {
2322   /* Clean up old contents.  */
2323   clear_internalvar (var);
2324 
2325   var->kind = INTERNALVAR_INTEGER;
2326   var->u.integer.type = NULL;
2327   var->u.integer.val = l;
2328 }
2329 
2330 void
set_internalvar_string(struct internalvar * var,const char * string)2331 set_internalvar_string (struct internalvar *var, const char *string)
2332 {
2333   /* Clean up old contents.  */
2334   clear_internalvar (var);
2335 
2336   var->kind = INTERNALVAR_STRING;
2337   var->u.string = xstrdup (string);
2338 }
2339 
2340 static void
set_internalvar_function(struct internalvar * var,struct internal_function * f)2341 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2342 {
2343   /* Clean up old contents.  */
2344   clear_internalvar (var);
2345 
2346   var->kind = INTERNALVAR_FUNCTION;
2347   var->u.fn.function = f;
2348   var->u.fn.canonical = 1;
2349   /* Variables installed here are always the canonical version.  */
2350 }
2351 
2352 void
clear_internalvar(struct internalvar * var)2353 clear_internalvar (struct internalvar *var)
2354 {
2355   /* Clean up old contents.  */
2356   switch (var->kind)
2357     {
2358     case INTERNALVAR_VALUE:
2359       value_decref (var->u.value);
2360       break;
2361 
2362     case INTERNALVAR_STRING:
2363       xfree (var->u.string);
2364       break;
2365 
2366     case INTERNALVAR_MAKE_VALUE:
2367       if (var->u.make_value.functions->destroy != NULL)
2368 	var->u.make_value.functions->destroy (var->u.make_value.data);
2369       break;
2370 
2371     default:
2372       break;
2373     }
2374 
2375   /* Reset to void kind.  */
2376   var->kind = INTERNALVAR_VOID;
2377 }
2378 
2379 char *
internalvar_name(const struct internalvar * var)2380 internalvar_name (const struct internalvar *var)
2381 {
2382   return var->name;
2383 }
2384 
2385 static struct internal_function *
create_internal_function(const char * name,internal_function_fn handler,void * cookie)2386 create_internal_function (const char *name,
2387 			  internal_function_fn handler, void *cookie)
2388 {
2389   struct internal_function *ifn = XNEW (struct internal_function);
2390 
2391   ifn->name = xstrdup (name);
2392   ifn->handler = handler;
2393   ifn->cookie = cookie;
2394   return ifn;
2395 }
2396 
2397 char *
value_internal_function_name(struct value * val)2398 value_internal_function_name (struct value *val)
2399 {
2400   struct internal_function *ifn;
2401   int result;
2402 
2403   gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2404   result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2405   gdb_assert (result);
2406 
2407   return ifn->name;
2408 }
2409 
2410 struct value *
call_internal_function(struct gdbarch * gdbarch,const struct language_defn * language,struct value * func,int argc,struct value ** argv)2411 call_internal_function (struct gdbarch *gdbarch,
2412 			const struct language_defn *language,
2413 			struct value *func, int argc, struct value **argv)
2414 {
2415   struct internal_function *ifn;
2416   int result;
2417 
2418   gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2419   result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2420   gdb_assert (result);
2421 
2422   return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2423 }
2424 
2425 /* The 'function' command.  This does nothing -- it is just a
2426    placeholder to let "help function NAME" work.  This is also used as
2427    the implementation of the sub-command that is created when
2428    registering an internal function.  */
2429 static void
function_command(const char * command,int from_tty)2430 function_command (const char *command, int from_tty)
2431 {
2432   /* Do nothing.  */
2433 }
2434 
2435 /* Helper function that does the work for add_internal_function.  */
2436 
2437 static struct cmd_list_element *
do_add_internal_function(const char * name,const char * doc,internal_function_fn handler,void * cookie)2438 do_add_internal_function (const char *name, const char *doc,
2439 			  internal_function_fn handler, void *cookie)
2440 {
2441   struct internal_function *ifn;
2442   struct internalvar *var = lookup_internalvar (name);
2443 
2444   ifn = create_internal_function (name, handler, cookie);
2445   set_internalvar_function (var, ifn);
2446 
2447   return add_cmd (name, no_class, function_command, doc, &functionlist);
2448 }
2449 
2450 /* See value.h.  */
2451 
2452 void
add_internal_function(const char * name,const char * doc,internal_function_fn handler,void * cookie)2453 add_internal_function (const char *name, const char *doc,
2454 		       internal_function_fn handler, void *cookie)
2455 {
2456   do_add_internal_function (name, doc, handler, cookie);
2457 }
2458 
2459 /* See value.h.  */
2460 
2461 void
add_internal_function(gdb::unique_xmalloc_ptr<char> && name,gdb::unique_xmalloc_ptr<char> && doc,internal_function_fn handler,void * cookie)2462 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2463 		       gdb::unique_xmalloc_ptr<char> &&doc,
2464 		       internal_function_fn handler, void *cookie)
2465 {
2466   struct cmd_list_element *cmd
2467     = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2468   doc.release ();
2469   cmd->doc_allocated = 1;
2470   name.release ();
2471   cmd->name_allocated = 1;
2472 }
2473 
2474 /* Update VALUE before discarding OBJFILE.  COPIED_TYPES is used to
2475    prevent cycles / duplicates.  */
2476 
2477 void
preserve_one_value(struct value * value,struct objfile * objfile,htab_t copied_types)2478 preserve_one_value (struct value *value, struct objfile *objfile,
2479 		    htab_t copied_types)
2480 {
2481   if (TYPE_OBJFILE (value->type) == objfile)
2482     value->type = copy_type_recursive (objfile, value->type, copied_types);
2483 
2484   if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2485     value->enclosing_type = copy_type_recursive (objfile,
2486 						 value->enclosing_type,
2487 						 copied_types);
2488 }
2489 
2490 /* Likewise for internal variable VAR.  */
2491 
2492 static void
preserve_one_internalvar(struct internalvar * var,struct objfile * objfile,htab_t copied_types)2493 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2494 			  htab_t copied_types)
2495 {
2496   switch (var->kind)
2497     {
2498     case INTERNALVAR_INTEGER:
2499       if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2500 	var->u.integer.type
2501 	  = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2502       break;
2503 
2504     case INTERNALVAR_VALUE:
2505       preserve_one_value (var->u.value, objfile, copied_types);
2506       break;
2507     }
2508 }
2509 
2510 /* Update the internal variables and value history when OBJFILE is
2511    discarded; we must copy the types out of the objfile.  New global types
2512    will be created for every convenience variable which currently points to
2513    this objfile's types, and the convenience variables will be adjusted to
2514    use the new global types.  */
2515 
2516 void
preserve_values(struct objfile * objfile)2517 preserve_values (struct objfile *objfile)
2518 {
2519   htab_t copied_types;
2520   struct internalvar *var;
2521 
2522   /* Create the hash table.  We allocate on the objfile's obstack, since
2523      it is soon to be deleted.  */
2524   copied_types = create_copied_types_hash (objfile);
2525 
2526   for (const value_ref_ptr &item : value_history)
2527     preserve_one_value (item.get (), objfile, copied_types);
2528 
2529   for (var = internalvars; var; var = var->next)
2530     preserve_one_internalvar (var, objfile, copied_types);
2531 
2532   preserve_ext_lang_values (objfile, copied_types);
2533 
2534   htab_delete (copied_types);
2535 }
2536 
2537 static void
show_convenience(const char * ignore,int from_tty)2538 show_convenience (const char *ignore, int from_tty)
2539 {
2540   struct gdbarch *gdbarch = get_current_arch ();
2541   struct internalvar *var;
2542   int varseen = 0;
2543   struct value_print_options opts;
2544 
2545   get_user_print_options (&opts);
2546   for (var = internalvars; var; var = var->next)
2547     {
2548 
2549       if (!varseen)
2550 	{
2551 	  varseen = 1;
2552 	}
2553       printf_filtered (("$%s = "), var->name);
2554 
2555       try
2556 	{
2557 	  struct value *val;
2558 
2559 	  val = value_of_internalvar (gdbarch, var);
2560 	  value_print (val, gdb_stdout, &opts);
2561 	}
2562       catch (const gdb_exception_error &ex)
2563 	{
2564 	  fprintf_styled (gdb_stdout, metadata_style.style (),
2565 			  _("<error: %s>"), ex.what ());
2566 	}
2567 
2568       printf_filtered (("\n"));
2569     }
2570   if (!varseen)
2571     {
2572       /* This text does not mention convenience functions on purpose.
2573 	 The user can't create them except via Python, and if Python support
2574 	 is installed this message will never be printed ($_streq will
2575 	 exist).  */
2576       printf_unfiltered (_("No debugger convenience variables now defined.\n"
2577 			   "Convenience variables have "
2578 			   "names starting with \"$\";\n"
2579 			   "use \"set\" as in \"set "
2580 			   "$foo = 5\" to define them.\n"));
2581     }
2582 }
2583 
2584 
2585 /* See value.h.  */
2586 
2587 struct value *
value_from_xmethod(xmethod_worker_up && worker)2588 value_from_xmethod (xmethod_worker_up &&worker)
2589 {
2590   struct value *v;
2591 
2592   v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2593   v->lval = lval_xcallable;
2594   v->location.xm_worker = worker.release ();
2595   v->modifiable = 0;
2596 
2597   return v;
2598 }
2599 
2600 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD.  */
2601 
2602 struct type *
result_type_of_xmethod(struct value * method,gdb::array_view<value * > argv)2603 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2604 {
2605   gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2606 	      && method->lval == lval_xcallable && !argv.empty ());
2607 
2608   return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2609 }
2610 
2611 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD.  */
2612 
2613 struct value *
call_xmethod(struct value * method,gdb::array_view<value * > argv)2614 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2615 {
2616   gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2617 	      && method->lval == lval_xcallable && !argv.empty ());
2618 
2619   return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2620 }
2621 
2622 /* Extract a value as a C number (either long or double).
2623    Knows how to convert fixed values to double, or
2624    floating values to long.
2625    Does not deallocate the value.  */
2626 
2627 LONGEST
value_as_long(struct value * val)2628 value_as_long (struct value *val)
2629 {
2630   /* This coerces arrays and functions, which is necessary (e.g.
2631      in disassemble_command).  It also dereferences references, which
2632      I suspect is the most logical thing to do.  */
2633   val = coerce_array (val);
2634   return unpack_long (value_type (val), value_contents (val));
2635 }
2636 
2637 /* Extract a value as a C pointer.  Does not deallocate the value.
2638    Note that val's type may not actually be a pointer; value_as_long
2639    handles all the cases.  */
2640 CORE_ADDR
value_as_address(struct value * val)2641 value_as_address (struct value *val)
2642 {
2643   struct gdbarch *gdbarch = get_type_arch (value_type (val));
2644 
2645   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2646      whether we want this to be true eventually.  */
2647 #if 0
2648   /* gdbarch_addr_bits_remove is wrong if we are being called for a
2649      non-address (e.g. argument to "signal", "info break", etc.), or
2650      for pointers to char, in which the low bits *are* significant.  */
2651   return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2652 #else
2653 
2654   /* There are several targets (IA-64, PowerPC, and others) which
2655      don't represent pointers to functions as simply the address of
2656      the function's entry point.  For example, on the IA-64, a
2657      function pointer points to a two-word descriptor, generated by
2658      the linker, which contains the function's entry point, and the
2659      value the IA-64 "global pointer" register should have --- to
2660      support position-independent code.  The linker generates
2661      descriptors only for those functions whose addresses are taken.
2662 
2663      On such targets, it's difficult for GDB to convert an arbitrary
2664      function address into a function pointer; it has to either find
2665      an existing descriptor for that function, or call malloc and
2666      build its own.  On some targets, it is impossible for GDB to
2667      build a descriptor at all: the descriptor must contain a jump
2668      instruction; data memory cannot be executed; and code memory
2669      cannot be modified.
2670 
2671      Upon entry to this function, if VAL is a value of type `function'
2672      (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2673      value_address (val) is the address of the function.  This is what
2674      you'll get if you evaluate an expression like `main'.  The call
2675      to COERCE_ARRAY below actually does all the usual unary
2676      conversions, which includes converting values of type `function'
2677      to `pointer to function'.  This is the challenging conversion
2678      discussed above.  Then, `unpack_long' will convert that pointer
2679      back into an address.
2680 
2681      So, suppose the user types `disassemble foo' on an architecture
2682      with a strange function pointer representation, on which GDB
2683      cannot build its own descriptors, and suppose further that `foo'
2684      has no linker-built descriptor.  The address->pointer conversion
2685      will signal an error and prevent the command from running, even
2686      though the next step would have been to convert the pointer
2687      directly back into the same address.
2688 
2689      The following shortcut avoids this whole mess.  If VAL is a
2690      function, just return its address directly.  */
2691   if (value_type (val)->code () == TYPE_CODE_FUNC
2692       || value_type (val)->code () == TYPE_CODE_METHOD)
2693     return value_address (val);
2694 
2695   val = coerce_array (val);
2696 
2697   /* Some architectures (e.g. Harvard), map instruction and data
2698      addresses onto a single large unified address space.  For
2699      instance: An architecture may consider a large integer in the
2700      range 0x10000000 .. 0x1000ffff to already represent a data
2701      addresses (hence not need a pointer to address conversion) while
2702      a small integer would still need to be converted integer to
2703      pointer to address.  Just assume such architectures handle all
2704      integer conversions in a single function.  */
2705 
2706   /* JimB writes:
2707 
2708      I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2709      must admonish GDB hackers to make sure its behavior matches the
2710      compiler's, whenever possible.
2711 
2712      In general, I think GDB should evaluate expressions the same way
2713      the compiler does.  When the user copies an expression out of
2714      their source code and hands it to a `print' command, they should
2715      get the same value the compiler would have computed.  Any
2716      deviation from this rule can cause major confusion and annoyance,
2717      and needs to be justified carefully.  In other words, GDB doesn't
2718      really have the freedom to do these conversions in clever and
2719      useful ways.
2720 
2721      AndrewC pointed out that users aren't complaining about how GDB
2722      casts integers to pointers; they are complaining that they can't
2723      take an address from a disassembly listing and give it to `x/i'.
2724      This is certainly important.
2725 
2726      Adding an architecture method like integer_to_address() certainly
2727      makes it possible for GDB to "get it right" in all circumstances
2728      --- the target has complete control over how things get done, so
2729      people can Do The Right Thing for their target without breaking
2730      anyone else.  The standard doesn't specify how integers get
2731      converted to pointers; usually, the ABI doesn't either, but
2732      ABI-specific code is a more reasonable place to handle it.  */
2733 
2734   if (value_type (val)->code () != TYPE_CODE_PTR
2735       && !TYPE_IS_REFERENCE (value_type (val))
2736       && gdbarch_integer_to_address_p (gdbarch))
2737     return gdbarch_integer_to_address (gdbarch, value_type (val),
2738 				       value_contents (val));
2739 
2740   return unpack_long (value_type (val), value_contents (val));
2741 #endif
2742 }
2743 
2744 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2745    as a long, or as a double, assuming the raw data is described
2746    by type TYPE.  Knows how to convert different sizes of values
2747    and can convert between fixed and floating point.  We don't assume
2748    any alignment for the raw data.  Return value is in host byte order.
2749 
2750    If you want functions and arrays to be coerced to pointers, and
2751    references to be dereferenced, call value_as_long() instead.
2752 
2753    C++: It is assumed that the front-end has taken care of
2754    all matters concerning pointers to members.  A pointer
2755    to member which reaches here is considered to be equivalent
2756    to an INT (or some size).  After all, it is only an offset.  */
2757 
2758 LONGEST
unpack_long(struct type * type,const gdb_byte * valaddr)2759 unpack_long (struct type *type, const gdb_byte *valaddr)
2760 {
2761   enum bfd_endian byte_order = type_byte_order (type);
2762   enum type_code code = type->code ();
2763   int len = TYPE_LENGTH (type);
2764   int nosign = TYPE_UNSIGNED (type);
2765 
2766   switch (code)
2767     {
2768     case TYPE_CODE_TYPEDEF:
2769       return unpack_long (check_typedef (type), valaddr);
2770     case TYPE_CODE_ENUM:
2771     case TYPE_CODE_FLAGS:
2772     case TYPE_CODE_BOOL:
2773     case TYPE_CODE_INT:
2774     case TYPE_CODE_CHAR:
2775     case TYPE_CODE_RANGE:
2776     case TYPE_CODE_MEMBERPTR:
2777       {
2778 	LONGEST result;
2779 	if (nosign)
2780 	  result = extract_unsigned_integer (valaddr, len, byte_order);
2781 	else
2782 	  result = extract_signed_integer (valaddr, len, byte_order);
2783 	if (code == TYPE_CODE_RANGE)
2784 	  result += type->bounds ()->bias;
2785 	return result;
2786       }
2787 
2788     case TYPE_CODE_FLT:
2789     case TYPE_CODE_DECFLOAT:
2790       return target_float_to_longest (valaddr, type);
2791 
2792     case TYPE_CODE_PTR:
2793     case TYPE_CODE_REF:
2794     case TYPE_CODE_RVALUE_REF:
2795       /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2796          whether we want this to be true eventually.  */
2797       return extract_typed_address (valaddr, type);
2798 
2799     default:
2800       error (_("Value can't be converted to integer."));
2801     }
2802 }
2803 
2804 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2805    as a CORE_ADDR, assuming the raw data is described by type TYPE.
2806    We don't assume any alignment for the raw data.  Return value is in
2807    host byte order.
2808 
2809    If you want functions and arrays to be coerced to pointers, and
2810    references to be dereferenced, call value_as_address() instead.
2811 
2812    C++: It is assumed that the front-end has taken care of
2813    all matters concerning pointers to members.  A pointer
2814    to member which reaches here is considered to be equivalent
2815    to an INT (or some size).  After all, it is only an offset.  */
2816 
2817 CORE_ADDR
unpack_pointer(struct type * type,const gdb_byte * valaddr)2818 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2819 {
2820   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2821      whether we want this to be true eventually.  */
2822   return unpack_long (type, valaddr);
2823 }
2824 
2825 bool
is_floating_value(struct value * val)2826 is_floating_value (struct value *val)
2827 {
2828   struct type *type = check_typedef (value_type (val));
2829 
2830   if (is_floating_type (type))
2831     {
2832       if (!target_float_is_valid (value_contents (val), type))
2833 	error (_("Invalid floating value found in program."));
2834       return true;
2835     }
2836 
2837   return false;
2838 }
2839 
2840 
2841 /* Get the value of the FIELDNO'th field (which must be static) of
2842    TYPE.  */
2843 
2844 struct value *
value_static_field(struct type * type,int fieldno)2845 value_static_field (struct type *type, int fieldno)
2846 {
2847   struct value *retval;
2848 
2849   switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2850     {
2851     case FIELD_LOC_KIND_PHYSADDR:
2852       retval = value_at_lazy (type->field (fieldno).type (),
2853 			      TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2854       break;
2855     case FIELD_LOC_KIND_PHYSNAME:
2856     {
2857       const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2858       /* TYPE_FIELD_NAME (type, fieldno); */
2859       struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2860 
2861       if (sym.symbol == NULL)
2862 	{
2863 	  /* With some compilers, e.g. HP aCC, static data members are
2864 	     reported as non-debuggable symbols.  */
2865 	  struct bound_minimal_symbol msym
2866 	    = lookup_minimal_symbol (phys_name, NULL, NULL);
2867 	  struct type *field_type = type->field (fieldno).type ();
2868 
2869 	  if (!msym.minsym)
2870 	    retval = allocate_optimized_out_value (field_type);
2871 	  else
2872 	    retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2873 	}
2874       else
2875 	retval = value_of_variable (sym.symbol, sym.block);
2876       break;
2877     }
2878     default:
2879       gdb_assert_not_reached ("unexpected field location kind");
2880     }
2881 
2882   return retval;
2883 }
2884 
2885 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2886    You have to be careful here, since the size of the data area for the value
2887    is set by the length of the enclosing type.  So if NEW_ENCL_TYPE is bigger
2888    than the old enclosing type, you have to allocate more space for the
2889    data.  */
2890 
2891 void
set_value_enclosing_type(struct value * val,struct type * new_encl_type)2892 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2893 {
2894   if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2895     {
2896       check_type_length_before_alloc (new_encl_type);
2897       val->contents
2898 	.reset ((gdb_byte *) xrealloc (val->contents.release (),
2899 				       TYPE_LENGTH (new_encl_type)));
2900     }
2901 
2902   val->enclosing_type = new_encl_type;
2903 }
2904 
2905 /* Given a value ARG1 (offset by OFFSET bytes)
2906    of a struct or union type ARG_TYPE,
2907    extract and return the value of one of its (non-static) fields.
2908    FIELDNO says which field.  */
2909 
2910 struct value *
value_primitive_field(struct value * arg1,LONGEST offset,int fieldno,struct type * arg_type)2911 value_primitive_field (struct value *arg1, LONGEST offset,
2912 		       int fieldno, struct type *arg_type)
2913 {
2914   struct value *v;
2915   struct type *type;
2916   struct gdbarch *arch = get_value_arch (arg1);
2917   int unit_size = gdbarch_addressable_memory_unit_size (arch);
2918 
2919   arg_type = check_typedef (arg_type);
2920   type = arg_type->field (fieldno).type ();
2921 
2922   /* Call check_typedef on our type to make sure that, if TYPE
2923      is a TYPE_CODE_TYPEDEF, its length is set to the length
2924      of the target type instead of zero.  However, we do not
2925      replace the typedef type by the target type, because we want
2926      to keep the typedef in order to be able to print the type
2927      description correctly.  */
2928   check_typedef (type);
2929 
2930   if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2931     {
2932       /* Handle packed fields.
2933 
2934 	 Create a new value for the bitfield, with bitpos and bitsize
2935 	 set.  If possible, arrange offset and bitpos so that we can
2936 	 do a single aligned read of the size of the containing type.
2937 	 Otherwise, adjust offset to the byte containing the first
2938 	 bit.  Assume that the address, offset, and embedded offset
2939 	 are sufficiently aligned.  */
2940 
2941       LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2942       LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
2943 
2944       v = allocate_value_lazy (type);
2945       v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2946       if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2947 	  && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2948 	v->bitpos = bitpos % container_bitsize;
2949       else
2950 	v->bitpos = bitpos % 8;
2951       v->offset = (value_embedded_offset (arg1)
2952 		   + offset
2953 		   + (bitpos - v->bitpos) / 8);
2954       set_value_parent (v, arg1);
2955       if (!value_lazy (arg1))
2956 	value_fetch_lazy (v);
2957     }
2958   else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2959     {
2960       /* This field is actually a base subobject, so preserve the
2961 	 entire object's contents for later references to virtual
2962 	 bases, etc.  */
2963       LONGEST boffset;
2964 
2965       /* Lazy register values with offsets are not supported.  */
2966       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2967 	value_fetch_lazy (arg1);
2968 
2969       /* We special case virtual inheritance here because this
2970 	 requires access to the contents, which we would rather avoid
2971 	 for references to ordinary fields of unavailable values.  */
2972       if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2973 	boffset = baseclass_offset (arg_type, fieldno,
2974 				    value_contents (arg1),
2975 				    value_embedded_offset (arg1),
2976 				    value_address (arg1),
2977 				    arg1);
2978       else
2979 	boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2980 
2981       if (value_lazy (arg1))
2982 	v = allocate_value_lazy (value_enclosing_type (arg1));
2983       else
2984 	{
2985 	  v = allocate_value (value_enclosing_type (arg1));
2986 	  value_contents_copy_raw (v, 0, arg1, 0,
2987 				   TYPE_LENGTH (value_enclosing_type (arg1)));
2988 	}
2989       v->type = type;
2990       v->offset = value_offset (arg1);
2991       v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2992     }
2993   else if (NULL != TYPE_DATA_LOCATION (type))
2994     {
2995       /* Field is a dynamic data member.  */
2996 
2997       gdb_assert (0 == offset);
2998       /* We expect an already resolved data location.  */
2999       gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3000       /* For dynamic data types defer memory allocation
3001          until we actual access the value.  */
3002       v = allocate_value_lazy (type);
3003     }
3004   else
3005     {
3006       /* Plain old data member */
3007       offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3008 	         / (HOST_CHAR_BIT * unit_size));
3009 
3010       /* Lazy register values with offsets are not supported.  */
3011       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3012 	value_fetch_lazy (arg1);
3013 
3014       if (value_lazy (arg1))
3015 	v = allocate_value_lazy (type);
3016       else
3017 	{
3018 	  v = allocate_value (type);
3019 	  value_contents_copy_raw (v, value_embedded_offset (v),
3020 				   arg1, value_embedded_offset (arg1) + offset,
3021 				   type_length_units (type));
3022 	}
3023       v->offset = (value_offset (arg1) + offset
3024 		   + value_embedded_offset (arg1));
3025     }
3026   set_value_component_location (v, arg1);
3027   return v;
3028 }
3029 
3030 /* Given a value ARG1 of a struct or union type,
3031    extract and return the value of one of its (non-static) fields.
3032    FIELDNO says which field.  */
3033 
3034 struct value *
value_field(struct value * arg1,int fieldno)3035 value_field (struct value *arg1, int fieldno)
3036 {
3037   return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3038 }
3039 
3040 /* Return a non-virtual function as a value.
3041    F is the list of member functions which contains the desired method.
3042    J is an index into F which provides the desired method.
3043 
3044    We only use the symbol for its address, so be happy with either a
3045    full symbol or a minimal symbol.  */
3046 
3047 struct value *
value_fn_field(struct value ** arg1p,struct fn_field * f,int j,struct type * type,LONGEST offset)3048 value_fn_field (struct value **arg1p, struct fn_field *f,
3049 		int j, struct type *type,
3050 		LONGEST offset)
3051 {
3052   struct value *v;
3053   struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3054   const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3055   struct symbol *sym;
3056   struct bound_minimal_symbol msym;
3057 
3058   sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3059   if (sym != NULL)
3060     {
3061       memset (&msym, 0, sizeof (msym));
3062     }
3063   else
3064     {
3065       gdb_assert (sym == NULL);
3066       msym = lookup_bound_minimal_symbol (physname);
3067       if (msym.minsym == NULL)
3068 	return NULL;
3069     }
3070 
3071   v = allocate_value (ftype);
3072   VALUE_LVAL (v) = lval_memory;
3073   if (sym)
3074     {
3075       set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3076     }
3077   else
3078     {
3079       /* The minimal symbol might point to a function descriptor;
3080 	 resolve it to the actual code address instead.  */
3081       struct objfile *objfile = msym.objfile;
3082       struct gdbarch *gdbarch = objfile->arch ();
3083 
3084       set_value_address (v,
3085 	gdbarch_convert_from_func_ptr_addr
3086 	   (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ()));
3087     }
3088 
3089   if (arg1p)
3090     {
3091       if (type != value_type (*arg1p))
3092 	*arg1p = value_ind (value_cast (lookup_pointer_type (type),
3093 					value_addr (*arg1p)));
3094 
3095       /* Move the `this' pointer according to the offset.
3096          VALUE_OFFSET (*arg1p) += offset; */
3097     }
3098 
3099   return v;
3100 }
3101 
3102 
3103 
3104 /* See value.h.  */
3105 
3106 LONGEST
unpack_bits_as_long(struct type * field_type,const gdb_byte * valaddr,LONGEST bitpos,LONGEST bitsize)3107 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3108 		     LONGEST bitpos, LONGEST bitsize)
3109 {
3110   enum bfd_endian byte_order = type_byte_order (field_type);
3111   ULONGEST val;
3112   ULONGEST valmask;
3113   int lsbcount;
3114   LONGEST bytes_read;
3115   LONGEST read_offset;
3116 
3117   /* Read the minimum number of bytes required; there may not be
3118      enough bytes to read an entire ULONGEST.  */
3119   field_type = check_typedef (field_type);
3120   if (bitsize)
3121     bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3122   else
3123     {
3124       bytes_read = TYPE_LENGTH (field_type);
3125       bitsize = 8 * bytes_read;
3126     }
3127 
3128   read_offset = bitpos / 8;
3129 
3130   val = extract_unsigned_integer (valaddr + read_offset,
3131 				  bytes_read, byte_order);
3132 
3133   /* Extract bits.  See comment above.  */
3134 
3135   if (byte_order == BFD_ENDIAN_BIG)
3136     lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3137   else
3138     lsbcount = (bitpos % 8);
3139   val >>= lsbcount;
3140 
3141   /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3142      If the field is signed, and is negative, then sign extend.  */
3143 
3144   if (bitsize < 8 * (int) sizeof (val))
3145     {
3146       valmask = (((ULONGEST) 1) << bitsize) - 1;
3147       val &= valmask;
3148       if (!TYPE_UNSIGNED (field_type))
3149 	{
3150 	  if (val & (valmask ^ (valmask >> 1)))
3151 	    {
3152 	      val |= ~valmask;
3153 	    }
3154 	}
3155     }
3156 
3157   return val;
3158 }
3159 
3160 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3161    VALADDR + EMBEDDED_OFFSET.  VALADDR points to the contents of
3162    ORIGINAL_VALUE, which must not be NULL.  See
3163    unpack_value_bits_as_long for more details.  */
3164 
3165 int
unpack_value_field_as_long(struct type * type,const gdb_byte * valaddr,LONGEST embedded_offset,int fieldno,const struct value * val,LONGEST * result)3166 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3167 			    LONGEST embedded_offset, int fieldno,
3168 			    const struct value *val, LONGEST *result)
3169 {
3170   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3171   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3172   struct type *field_type = type->field (fieldno).type ();
3173   int bit_offset;
3174 
3175   gdb_assert (val != NULL);
3176 
3177   bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3178   if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3179       || !value_bits_available (val, bit_offset, bitsize))
3180     return 0;
3181 
3182   *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3183 				 bitpos, bitsize);
3184   return 1;
3185 }
3186 
3187 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3188    object at VALADDR.  See unpack_bits_as_long for more details.  */
3189 
3190 LONGEST
unpack_field_as_long(struct type * type,const gdb_byte * valaddr,int fieldno)3191 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3192 {
3193   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3194   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3195   struct type *field_type = type->field (fieldno).type ();
3196 
3197   return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3198 }
3199 
3200 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3201    VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3202    the contents in DEST_VAL, zero or sign extending if the type of
3203    DEST_VAL is wider than BITSIZE.  VALADDR points to the contents of
3204    VAL.  If the VAL's contents required to extract the bitfield from
3205    are unavailable/optimized out, DEST_VAL is correspondingly
3206    marked unavailable/optimized out.  */
3207 
3208 void
unpack_value_bitfield(struct value * dest_val,LONGEST bitpos,LONGEST bitsize,const gdb_byte * valaddr,LONGEST embedded_offset,const struct value * val)3209 unpack_value_bitfield (struct value *dest_val,
3210 		       LONGEST bitpos, LONGEST bitsize,
3211 		       const gdb_byte *valaddr, LONGEST embedded_offset,
3212 		       const struct value *val)
3213 {
3214   enum bfd_endian byte_order;
3215   int src_bit_offset;
3216   int dst_bit_offset;
3217   struct type *field_type = value_type (dest_val);
3218 
3219   byte_order = type_byte_order (field_type);
3220 
3221   /* First, unpack and sign extend the bitfield as if it was wholly
3222      valid.  Optimized out/unavailable bits are read as zero, but
3223      that's OK, as they'll end up marked below.  If the VAL is
3224      wholly-invalid we may have skipped allocating its contents,
3225      though.  See allocate_optimized_out_value.  */
3226   if (valaddr != NULL)
3227     {
3228       LONGEST num;
3229 
3230       num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3231 				 bitpos, bitsize);
3232       store_signed_integer (value_contents_raw (dest_val),
3233 			    TYPE_LENGTH (field_type), byte_order, num);
3234     }
3235 
3236   /* Now copy the optimized out / unavailability ranges to the right
3237      bits.  */
3238   src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3239   if (byte_order == BFD_ENDIAN_BIG)
3240     dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3241   else
3242     dst_bit_offset = 0;
3243   value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3244 			      val, src_bit_offset, bitsize);
3245 }
3246 
3247 /* Return a new value with type TYPE, which is FIELDNO field of the
3248    object at VALADDR + EMBEDDEDOFFSET.  VALADDR points to the contents
3249    of VAL.  If the VAL's contents required to extract the bitfield
3250    from are unavailable/optimized out, the new value is
3251    correspondingly marked unavailable/optimized out.  */
3252 
3253 struct value *
value_field_bitfield(struct type * type,int fieldno,const gdb_byte * valaddr,LONGEST embedded_offset,const struct value * val)3254 value_field_bitfield (struct type *type, int fieldno,
3255 		      const gdb_byte *valaddr,
3256 		      LONGEST embedded_offset, const struct value *val)
3257 {
3258   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3259   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3260   struct value *res_val = allocate_value (type->field (fieldno).type ());
3261 
3262   unpack_value_bitfield (res_val, bitpos, bitsize,
3263 			 valaddr, embedded_offset, val);
3264 
3265   return res_val;
3266 }
3267 
3268 /* Modify the value of a bitfield.  ADDR points to a block of memory in
3269    target byte order; the bitfield starts in the byte pointed to.  FIELDVAL
3270    is the desired value of the field, in host byte order.  BITPOS and BITSIZE
3271    indicate which bits (in target bit order) comprise the bitfield.
3272    Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3273    0 <= BITPOS, where lbits is the size of a LONGEST in bits.  */
3274 
3275 void
modify_field(struct type * type,gdb_byte * addr,LONGEST fieldval,LONGEST bitpos,LONGEST bitsize)3276 modify_field (struct type *type, gdb_byte *addr,
3277 	      LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3278 {
3279   enum bfd_endian byte_order = type_byte_order (type);
3280   ULONGEST oword;
3281   ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3282   LONGEST bytesize;
3283 
3284   /* Normalize BITPOS.  */
3285   addr += bitpos / 8;
3286   bitpos %= 8;
3287 
3288   /* If a negative fieldval fits in the field in question, chop
3289      off the sign extension bits.  */
3290   if ((~fieldval & ~(mask >> 1)) == 0)
3291     fieldval &= mask;
3292 
3293   /* Warn if value is too big to fit in the field in question.  */
3294   if (0 != (fieldval & ~mask))
3295     {
3296       /* FIXME: would like to include fieldval in the message, but
3297          we don't have a sprintf_longest.  */
3298       warning (_("Value does not fit in %s bits."), plongest (bitsize));
3299 
3300       /* Truncate it, otherwise adjoining fields may be corrupted.  */
3301       fieldval &= mask;
3302     }
3303 
3304   /* Ensure no bytes outside of the modified ones get accessed as it may cause
3305      false valgrind reports.  */
3306 
3307   bytesize = (bitpos + bitsize + 7) / 8;
3308   oword = extract_unsigned_integer (addr, bytesize, byte_order);
3309 
3310   /* Shifting for bit field depends on endianness of the target machine.  */
3311   if (byte_order == BFD_ENDIAN_BIG)
3312     bitpos = bytesize * 8 - bitpos - bitsize;
3313 
3314   oword &= ~(mask << bitpos);
3315   oword |= fieldval << bitpos;
3316 
3317   store_unsigned_integer (addr, bytesize, byte_order, oword);
3318 }
3319 
3320 /* Pack NUM into BUF using a target format of TYPE.  */
3321 
3322 void
pack_long(gdb_byte * buf,struct type * type,LONGEST num)3323 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3324 {
3325   enum bfd_endian byte_order = type_byte_order (type);
3326   LONGEST len;
3327 
3328   type = check_typedef (type);
3329   len = TYPE_LENGTH (type);
3330 
3331   switch (type->code ())
3332     {
3333     case TYPE_CODE_RANGE:
3334       num -= type->bounds ()->bias;
3335       /* Fall through.  */
3336     case TYPE_CODE_INT:
3337     case TYPE_CODE_CHAR:
3338     case TYPE_CODE_ENUM:
3339     case TYPE_CODE_FLAGS:
3340     case TYPE_CODE_BOOL:
3341     case TYPE_CODE_MEMBERPTR:
3342       store_signed_integer (buf, len, byte_order, num);
3343       break;
3344 
3345     case TYPE_CODE_REF:
3346     case TYPE_CODE_RVALUE_REF:
3347     case TYPE_CODE_PTR:
3348       store_typed_address (buf, type, (CORE_ADDR) num);
3349       break;
3350 
3351     case TYPE_CODE_FLT:
3352     case TYPE_CODE_DECFLOAT:
3353       target_float_from_longest (buf, type, num);
3354       break;
3355 
3356     default:
3357       error (_("Unexpected type (%d) encountered for integer constant."),
3358 	     type->code ());
3359     }
3360 }
3361 
3362 
3363 /* Pack NUM into BUF using a target format of TYPE.  */
3364 
3365 static void
pack_unsigned_long(gdb_byte * buf,struct type * type,ULONGEST num)3366 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3367 {
3368   LONGEST len;
3369   enum bfd_endian byte_order;
3370 
3371   type = check_typedef (type);
3372   len = TYPE_LENGTH (type);
3373   byte_order = type_byte_order (type);
3374 
3375   switch (type->code ())
3376     {
3377     case TYPE_CODE_INT:
3378     case TYPE_CODE_CHAR:
3379     case TYPE_CODE_ENUM:
3380     case TYPE_CODE_FLAGS:
3381     case TYPE_CODE_BOOL:
3382     case TYPE_CODE_RANGE:
3383     case TYPE_CODE_MEMBERPTR:
3384       store_unsigned_integer (buf, len, byte_order, num);
3385       break;
3386 
3387     case TYPE_CODE_REF:
3388     case TYPE_CODE_RVALUE_REF:
3389     case TYPE_CODE_PTR:
3390       store_typed_address (buf, type, (CORE_ADDR) num);
3391       break;
3392 
3393     case TYPE_CODE_FLT:
3394     case TYPE_CODE_DECFLOAT:
3395       target_float_from_ulongest (buf, type, num);
3396       break;
3397 
3398     default:
3399       error (_("Unexpected type (%d) encountered "
3400 	       "for unsigned integer constant."),
3401 	     type->code ());
3402     }
3403 }
3404 
3405 
3406 /* Convert C numbers into newly allocated values.  */
3407 
3408 struct value *
value_from_longest(struct type * type,LONGEST num)3409 value_from_longest (struct type *type, LONGEST num)
3410 {
3411   struct value *val = allocate_value (type);
3412 
3413   pack_long (value_contents_raw (val), type, num);
3414   return val;
3415 }
3416 
3417 
3418 /* Convert C unsigned numbers into newly allocated values.  */
3419 
3420 struct value *
value_from_ulongest(struct type * type,ULONGEST num)3421 value_from_ulongest (struct type *type, ULONGEST num)
3422 {
3423   struct value *val = allocate_value (type);
3424 
3425   pack_unsigned_long (value_contents_raw (val), type, num);
3426 
3427   return val;
3428 }
3429 
3430 
3431 /* Create a value representing a pointer of type TYPE to the address
3432    ADDR.  */
3433 
3434 struct value *
value_from_pointer(struct type * type,CORE_ADDR addr)3435 value_from_pointer (struct type *type, CORE_ADDR addr)
3436 {
3437   struct value *val = allocate_value (type);
3438 
3439   store_typed_address (value_contents_raw (val),
3440 		       check_typedef (type), addr);
3441   return val;
3442 }
3443 
3444 /* Create and return a value object of TYPE containing the value D.  The
3445    TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3446    it is converted to target format.  */
3447 
3448 struct value *
value_from_host_double(struct type * type,double d)3449 value_from_host_double (struct type *type, double d)
3450 {
3451   struct value *value = allocate_value (type);
3452   gdb_assert (type->code () == TYPE_CODE_FLT);
3453   target_float_from_host_double (value_contents_raw (value),
3454 				 value_type (value), d);
3455   return value;
3456 }
3457 
3458 /* Create a value of type TYPE whose contents come from VALADDR, if it
3459    is non-null, and whose memory address (in the inferior) is
3460    ADDRESS.  The type of the created value may differ from the passed
3461    type TYPE.  Make sure to retrieve values new type after this call.
3462    Note that TYPE is not passed through resolve_dynamic_type; this is
3463    a special API intended for use only by Ada.  */
3464 
3465 struct value *
value_from_contents_and_address_unresolved(struct type * type,const gdb_byte * valaddr,CORE_ADDR address)3466 value_from_contents_and_address_unresolved (struct type *type,
3467 					    const gdb_byte *valaddr,
3468 					    CORE_ADDR address)
3469 {
3470   struct value *v;
3471 
3472   if (valaddr == NULL)
3473     v = allocate_value_lazy (type);
3474   else
3475     v = value_from_contents (type, valaddr);
3476   VALUE_LVAL (v) = lval_memory;
3477   set_value_address (v, address);
3478   return v;
3479 }
3480 
3481 /* Create a value of type TYPE whose contents come from VALADDR, if it
3482    is non-null, and whose memory address (in the inferior) is
3483    ADDRESS.  The type of the created value may differ from the passed
3484    type TYPE.  Make sure to retrieve values new type after this call.  */
3485 
3486 struct value *
value_from_contents_and_address(struct type * type,const gdb_byte * valaddr,CORE_ADDR address)3487 value_from_contents_and_address (struct type *type,
3488 				 const gdb_byte *valaddr,
3489 				 CORE_ADDR address)
3490 {
3491   gdb::array_view<const gdb_byte> view;
3492   if (valaddr != nullptr)
3493     view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
3494   struct type *resolved_type = resolve_dynamic_type (type, view, address);
3495   struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3496   struct value *v;
3497 
3498   if (valaddr == NULL)
3499     v = allocate_value_lazy (resolved_type);
3500   else
3501     v = value_from_contents (resolved_type, valaddr);
3502   if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3503       && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3504     address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3505   VALUE_LVAL (v) = lval_memory;
3506   set_value_address (v, address);
3507   return v;
3508 }
3509 
3510 /* Create a value of type TYPE holding the contents CONTENTS.
3511    The new value is `not_lval'.  */
3512 
3513 struct value *
value_from_contents(struct type * type,const gdb_byte * contents)3514 value_from_contents (struct type *type, const gdb_byte *contents)
3515 {
3516   struct value *result;
3517 
3518   result = allocate_value (type);
3519   memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3520   return result;
3521 }
3522 
3523 /* Extract a value from the history file.  Input will be of the form
3524    $digits or $$digits.  See block comment above 'write_dollar_variable'
3525    for details.  */
3526 
3527 struct value *
value_from_history_ref(const char * h,const char ** endp)3528 value_from_history_ref (const char *h, const char **endp)
3529 {
3530   int index, len;
3531 
3532   if (h[0] == '$')
3533     len = 1;
3534   else
3535     return NULL;
3536 
3537   if (h[1] == '$')
3538     len = 2;
3539 
3540   /* Find length of numeral string.  */
3541   for (; isdigit (h[len]); len++)
3542     ;
3543 
3544   /* Make sure numeral string is not part of an identifier.  */
3545   if (h[len] == '_' || isalpha (h[len]))
3546     return NULL;
3547 
3548   /* Now collect the index value.  */
3549   if (h[1] == '$')
3550     {
3551       if (len == 2)
3552 	{
3553 	  /* For some bizarre reason, "$$" is equivalent to "$$1",
3554 	     rather than to "$$0" as it ought to be!  */
3555 	  index = -1;
3556 	  *endp += len;
3557 	}
3558       else
3559 	{
3560 	  char *local_end;
3561 
3562 	  index = -strtol (&h[2], &local_end, 10);
3563 	  *endp = local_end;
3564 	}
3565     }
3566   else
3567     {
3568       if (len == 1)
3569 	{
3570 	  /* "$" is equivalent to "$0".  */
3571 	  index = 0;
3572 	  *endp += len;
3573 	}
3574       else
3575 	{
3576 	  char *local_end;
3577 
3578 	  index = strtol (&h[1], &local_end, 10);
3579 	  *endp = local_end;
3580 	}
3581     }
3582 
3583   return access_value_history (index);
3584 }
3585 
3586 /* Get the component value (offset by OFFSET bytes) of a struct or
3587    union WHOLE.  Component's type is TYPE.  */
3588 
3589 struct value *
value_from_component(struct value * whole,struct type * type,LONGEST offset)3590 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3591 {
3592   struct value *v;
3593 
3594   if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3595     v = allocate_value_lazy (type);
3596   else
3597     {
3598       v = allocate_value (type);
3599       value_contents_copy (v, value_embedded_offset (v),
3600 			   whole, value_embedded_offset (whole) + offset,
3601 			   type_length_units (type));
3602     }
3603   v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3604   set_value_component_location (v, whole);
3605 
3606   return v;
3607 }
3608 
3609 struct value *
coerce_ref_if_computed(const struct value * arg)3610 coerce_ref_if_computed (const struct value *arg)
3611 {
3612   const struct lval_funcs *funcs;
3613 
3614   if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3615     return NULL;
3616 
3617   if (value_lval_const (arg) != lval_computed)
3618     return NULL;
3619 
3620   funcs = value_computed_funcs (arg);
3621   if (funcs->coerce_ref == NULL)
3622     return NULL;
3623 
3624   return funcs->coerce_ref (arg);
3625 }
3626 
3627 /* Look at value.h for description.  */
3628 
3629 struct value *
readjust_indirect_value_type(struct value * value,struct type * enc_type,const struct type * original_type,struct value * original_value,CORE_ADDR original_value_address)3630 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3631 			      const struct type *original_type,
3632 			      struct value *original_value,
3633 			      CORE_ADDR original_value_address)
3634 {
3635   gdb_assert (original_type->code () == TYPE_CODE_PTR
3636 	      || TYPE_IS_REFERENCE (original_type));
3637 
3638   struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
3639   gdb::array_view<const gdb_byte> view;
3640   struct type *resolved_original_target_type
3641     = resolve_dynamic_type (original_target_type, view,
3642 			    original_value_address);
3643 
3644   /* Re-adjust type.  */
3645   deprecated_set_value_type (value, resolved_original_target_type);
3646 
3647   /* Add embedding info.  */
3648   set_value_enclosing_type (value, enc_type);
3649   set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3650 
3651   /* We may be pointing to an object of some derived type.  */
3652   return value_full_object (value, NULL, 0, 0, 0);
3653 }
3654 
3655 struct value *
coerce_ref(struct value * arg)3656 coerce_ref (struct value *arg)
3657 {
3658   struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3659   struct value *retval;
3660   struct type *enc_type;
3661 
3662   retval = coerce_ref_if_computed (arg);
3663   if (retval)
3664     return retval;
3665 
3666   if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3667     return arg;
3668 
3669   enc_type = check_typedef (value_enclosing_type (arg));
3670   enc_type = TYPE_TARGET_TYPE (enc_type);
3671 
3672   CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg));
3673   retval = value_at_lazy (enc_type, addr);
3674   enc_type = value_type (retval);
3675   return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3676 				       arg, addr);
3677 }
3678 
3679 struct value *
coerce_array(struct value * arg)3680 coerce_array (struct value *arg)
3681 {
3682   struct type *type;
3683 
3684   arg = coerce_ref (arg);
3685   type = check_typedef (value_type (arg));
3686 
3687   switch (type->code ())
3688     {
3689     case TYPE_CODE_ARRAY:
3690       if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3691 	arg = value_coerce_array (arg);
3692       break;
3693     case TYPE_CODE_FUNC:
3694       arg = value_coerce_function (arg);
3695       break;
3696     }
3697   return arg;
3698 }
3699 
3700 
3701 /* Return the return value convention that will be used for the
3702    specified type.  */
3703 
3704 enum return_value_convention
struct_return_convention(struct gdbarch * gdbarch,struct value * function,struct type * value_type)3705 struct_return_convention (struct gdbarch *gdbarch,
3706 			  struct value *function, struct type *value_type)
3707 {
3708   enum type_code code = value_type->code ();
3709 
3710   if (code == TYPE_CODE_ERROR)
3711     error (_("Function return type unknown."));
3712 
3713   /* Probe the architecture for the return-value convention.  */
3714   return gdbarch_return_value (gdbarch, function, value_type,
3715 			       NULL, NULL, NULL);
3716 }
3717 
3718 /* Return true if the function returning the specified type is using
3719    the convention of returning structures in memory (passing in the
3720    address as a hidden first parameter).  */
3721 
3722 int
using_struct_return(struct gdbarch * gdbarch,struct value * function,struct type * value_type)3723 using_struct_return (struct gdbarch *gdbarch,
3724 		     struct value *function, struct type *value_type)
3725 {
3726   if (value_type->code () == TYPE_CODE_VOID)
3727     /* A void return value is never in memory.  See also corresponding
3728        code in "print_return_value".  */
3729     return 0;
3730 
3731   return (struct_return_convention (gdbarch, function, value_type)
3732 	  != RETURN_VALUE_REGISTER_CONVENTION);
3733 }
3734 
3735 /* Set the initialized field in a value struct.  */
3736 
3737 void
set_value_initialized(struct value * val,int status)3738 set_value_initialized (struct value *val, int status)
3739 {
3740   val->initialized = status;
3741 }
3742 
3743 /* Return the initialized field in a value struct.  */
3744 
3745 int
value_initialized(const struct value * val)3746 value_initialized (const struct value *val)
3747 {
3748   return val->initialized;
3749 }
3750 
3751 /* Helper for value_fetch_lazy when the value is a bitfield.  */
3752 
3753 static void
value_fetch_lazy_bitfield(struct value * val)3754 value_fetch_lazy_bitfield (struct value *val)
3755 {
3756   gdb_assert (value_bitsize (val) != 0);
3757 
3758   /* To read a lazy bitfield, read the entire enclosing value.  This
3759      prevents reading the same block of (possibly volatile) memory once
3760      per bitfield.  It would be even better to read only the containing
3761      word, but we have no way to record that just specific bits of a
3762      value have been fetched.  */
3763   struct value *parent = value_parent (val);
3764 
3765   if (value_lazy (parent))
3766     value_fetch_lazy (parent);
3767 
3768   unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3769 			 value_contents_for_printing (parent),
3770 			 value_offset (val), parent);
3771 }
3772 
3773 /* Helper for value_fetch_lazy when the value is in memory.  */
3774 
3775 static void
value_fetch_lazy_memory(struct value * val)3776 value_fetch_lazy_memory (struct value *val)
3777 {
3778   gdb_assert (VALUE_LVAL (val) == lval_memory);
3779 
3780   CORE_ADDR addr = value_address (val);
3781   struct type *type = check_typedef (value_enclosing_type (val));
3782 
3783   if (TYPE_LENGTH (type))
3784       read_value_memory (val, 0, value_stack (val),
3785 			 addr, value_contents_all_raw (val),
3786 			 type_length_units (type));
3787 }
3788 
3789 /* Helper for value_fetch_lazy when the value is in a register.  */
3790 
3791 static void
value_fetch_lazy_register(struct value * val)3792 value_fetch_lazy_register (struct value *val)
3793 {
3794   struct frame_info *next_frame;
3795   int regnum;
3796   struct type *type = check_typedef (value_type (val));
3797   struct value *new_val = val, *mark = value_mark ();
3798 
3799   /* Offsets are not supported here; lazy register values must
3800      refer to the entire register.  */
3801   gdb_assert (value_offset (val) == 0);
3802 
3803   while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3804     {
3805       struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3806 
3807       next_frame = frame_find_by_id (next_frame_id);
3808       regnum = VALUE_REGNUM (new_val);
3809 
3810       gdb_assert (next_frame != NULL);
3811 
3812       /* Convertible register routines are used for multi-register
3813 	 values and for interpretation in different types
3814 	 (e.g. float or int from a double register).  Lazy
3815 	 register values should have the register's natural type,
3816 	 so they do not apply.  */
3817       gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3818 					       regnum, type));
3819 
3820       /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3821 	 Since a "->next" operation was performed when setting
3822 	 this field, we do not need to perform a "next" operation
3823 	 again when unwinding the register.  That's why
3824 	 frame_unwind_register_value() is called here instead of
3825 	 get_frame_register_value().  */
3826       new_val = frame_unwind_register_value (next_frame, regnum);
3827 
3828       /* If we get another lazy lval_register value, it means the
3829 	 register is found by reading it from NEXT_FRAME's next frame.
3830 	 frame_unwind_register_value should never return a value with
3831 	 the frame id pointing to NEXT_FRAME.  If it does, it means we
3832 	 either have two consecutive frames with the same frame id
3833 	 in the frame chain, or some code is trying to unwind
3834 	 behind get_prev_frame's back (e.g., a frame unwind
3835 	 sniffer trying to unwind), bypassing its validations.  In
3836 	 any case, it should always be an internal error to end up
3837 	 in this situation.  */
3838       if (VALUE_LVAL (new_val) == lval_register
3839 	  && value_lazy (new_val)
3840 	  && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3841 	internal_error (__FILE__, __LINE__,
3842 			_("infinite loop while fetching a register"));
3843     }
3844 
3845   /* If it's still lazy (for instance, a saved register on the
3846      stack), fetch it.  */
3847   if (value_lazy (new_val))
3848     value_fetch_lazy (new_val);
3849 
3850   /* Copy the contents and the unavailability/optimized-out
3851      meta-data from NEW_VAL to VAL.  */
3852   set_value_lazy (val, 0);
3853   value_contents_copy (val, value_embedded_offset (val),
3854 		       new_val, value_embedded_offset (new_val),
3855 		       type_length_units (type));
3856 
3857   if (frame_debug)
3858     {
3859       struct gdbarch *gdbarch;
3860       struct frame_info *frame;
3861       /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3862 	 so that the frame level will be shown correctly.  */
3863       frame = frame_find_by_id (VALUE_FRAME_ID (val));
3864       regnum = VALUE_REGNUM (val);
3865       gdbarch = get_frame_arch (frame);
3866 
3867       fprintf_unfiltered (gdb_stdlog,
3868 			  "{ value_fetch_lazy "
3869 			  "(frame=%d,regnum=%d(%s),...) ",
3870 			  frame_relative_level (frame), regnum,
3871 			  user_reg_map_regnum_to_name (gdbarch, regnum));
3872 
3873       fprintf_unfiltered (gdb_stdlog, "->");
3874       if (value_optimized_out (new_val))
3875 	{
3876 	  fprintf_unfiltered (gdb_stdlog, " ");
3877 	  val_print_optimized_out (new_val, gdb_stdlog);
3878 	}
3879       else
3880 	{
3881 	  int i;
3882 	  const gdb_byte *buf = value_contents (new_val);
3883 
3884 	  if (VALUE_LVAL (new_val) == lval_register)
3885 	    fprintf_unfiltered (gdb_stdlog, " register=%d",
3886 				VALUE_REGNUM (new_val));
3887 	  else if (VALUE_LVAL (new_val) == lval_memory)
3888 	    fprintf_unfiltered (gdb_stdlog, " address=%s",
3889 				paddress (gdbarch,
3890 					  value_address (new_val)));
3891 	  else
3892 	    fprintf_unfiltered (gdb_stdlog, " computed");
3893 
3894 	  fprintf_unfiltered (gdb_stdlog, " bytes=");
3895 	  fprintf_unfiltered (gdb_stdlog, "[");
3896 	  for (i = 0; i < register_size (gdbarch, regnum); i++)
3897 	    fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3898 	  fprintf_unfiltered (gdb_stdlog, "]");
3899 	}
3900 
3901       fprintf_unfiltered (gdb_stdlog, " }\n");
3902     }
3903 
3904   /* Dispose of the intermediate values.  This prevents
3905      watchpoints from trying to watch the saved frame pointer.  */
3906   value_free_to_mark (mark);
3907 }
3908 
3909 /* Load the actual content of a lazy value.  Fetch the data from the
3910    user's process and clear the lazy flag to indicate that the data in
3911    the buffer is valid.
3912 
3913    If the value is zero-length, we avoid calling read_memory, which
3914    would abort.  We mark the value as fetched anyway -- all 0 bytes of
3915    it.  */
3916 
3917 void
value_fetch_lazy(struct value * val)3918 value_fetch_lazy (struct value *val)
3919 {
3920   gdb_assert (value_lazy (val));
3921   allocate_value_contents (val);
3922   /* A value is either lazy, or fully fetched.  The
3923      availability/validity is only established as we try to fetch a
3924      value.  */
3925   gdb_assert (val->optimized_out.empty ());
3926   gdb_assert (val->unavailable.empty ());
3927   if (value_bitsize (val))
3928     value_fetch_lazy_bitfield (val);
3929   else if (VALUE_LVAL (val) == lval_memory)
3930     value_fetch_lazy_memory (val);
3931   else if (VALUE_LVAL (val) == lval_register)
3932     value_fetch_lazy_register (val);
3933   else if (VALUE_LVAL (val) == lval_computed
3934 	   && value_computed_funcs (val)->read != NULL)
3935     value_computed_funcs (val)->read (val);
3936   else
3937     internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3938 
3939   set_value_lazy (val, 0);
3940 }
3941 
3942 /* Implementation of the convenience function $_isvoid.  */
3943 
3944 static struct value *
isvoid_internal_fn(struct gdbarch * gdbarch,const struct language_defn * language,void * cookie,int argc,struct value ** argv)3945 isvoid_internal_fn (struct gdbarch *gdbarch,
3946 		    const struct language_defn *language,
3947 		    void *cookie, int argc, struct value **argv)
3948 {
3949   int ret;
3950 
3951   if (argc != 1)
3952     error (_("You must provide one argument for $_isvoid."));
3953 
3954   ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
3955 
3956   return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3957 }
3958 
3959 /* Implementation of the convenience function $_creal.  Extracts the
3960    real part from a complex number.  */
3961 
3962 static struct value *
creal_internal_fn(struct gdbarch * gdbarch,const struct language_defn * language,void * cookie,int argc,struct value ** argv)3963 creal_internal_fn (struct gdbarch *gdbarch,
3964 		   const struct language_defn *language,
3965 		   void *cookie, int argc, struct value **argv)
3966 {
3967   if (argc != 1)
3968     error (_("You must provide one argument for $_creal."));
3969 
3970   value *cval = argv[0];
3971   type *ctype = check_typedef (value_type (cval));
3972   if (ctype->code () != TYPE_CODE_COMPLEX)
3973     error (_("expected a complex number"));
3974   return value_real_part (cval);
3975 }
3976 
3977 /* Implementation of the convenience function $_cimag.  Extracts the
3978    imaginary part from a complex number.  */
3979 
3980 static struct value *
cimag_internal_fn(struct gdbarch * gdbarch,const struct language_defn * language,void * cookie,int argc,struct value ** argv)3981 cimag_internal_fn (struct gdbarch *gdbarch,
3982 		   const struct language_defn *language,
3983 		   void *cookie, int argc,
3984 		   struct value **argv)
3985 {
3986   if (argc != 1)
3987     error (_("You must provide one argument for $_cimag."));
3988 
3989   value *cval = argv[0];
3990   type *ctype = check_typedef (value_type (cval));
3991   if (ctype->code () != TYPE_CODE_COMPLEX)
3992     error (_("expected a complex number"));
3993   return value_imaginary_part (cval);
3994 }
3995 
3996 #if GDB_SELF_TEST
3997 namespace selftests
3998 {
3999 
4000 /* Test the ranges_contain function.  */
4001 
4002 static void
test_ranges_contain()4003 test_ranges_contain ()
4004 {
4005   std::vector<range> ranges;
4006   range r;
4007 
4008   /* [10, 14] */
4009   r.offset = 10;
4010   r.length = 5;
4011   ranges.push_back (r);
4012 
4013   /* [20, 24] */
4014   r.offset = 20;
4015   r.length = 5;
4016   ranges.push_back (r);
4017 
4018   /* [2, 6] */
4019   SELF_CHECK (!ranges_contain (ranges, 2, 5));
4020   /* [9, 13] */
4021   SELF_CHECK (ranges_contain (ranges, 9, 5));
4022   /* [10, 11] */
4023   SELF_CHECK (ranges_contain (ranges, 10, 2));
4024   /* [10, 14] */
4025   SELF_CHECK (ranges_contain (ranges, 10, 5));
4026   /* [13, 18] */
4027   SELF_CHECK (ranges_contain (ranges, 13, 6));
4028   /* [14, 18] */
4029   SELF_CHECK (ranges_contain (ranges, 14, 5));
4030   /* [15, 18] */
4031   SELF_CHECK (!ranges_contain (ranges, 15, 4));
4032   /* [16, 19] */
4033   SELF_CHECK (!ranges_contain (ranges, 16, 4));
4034   /* [16, 21] */
4035   SELF_CHECK (ranges_contain (ranges, 16, 6));
4036   /* [21, 21] */
4037   SELF_CHECK (ranges_contain (ranges, 21, 1));
4038   /* [21, 25] */
4039   SELF_CHECK (ranges_contain (ranges, 21, 5));
4040   /* [26, 28] */
4041   SELF_CHECK (!ranges_contain (ranges, 26, 3));
4042 }
4043 
4044 /* Check that RANGES contains the same ranges as EXPECTED.  */
4045 
4046 static bool
check_ranges_vector(gdb::array_view<const range> ranges,gdb::array_view<const range> expected)4047 check_ranges_vector (gdb::array_view<const range> ranges,
4048 		     gdb::array_view<const range> expected)
4049 {
4050   return ranges == expected;
4051 }
4052 
4053 /* Test the insert_into_bit_range_vector function.  */
4054 
4055 static void
test_insert_into_bit_range_vector()4056 test_insert_into_bit_range_vector ()
4057 {
4058   std::vector<range> ranges;
4059 
4060   /* [10, 14] */
4061   {
4062     insert_into_bit_range_vector (&ranges, 10, 5);
4063     static const range expected[] = {
4064       {10, 5}
4065     };
4066     SELF_CHECK (check_ranges_vector (ranges, expected));
4067   }
4068 
4069   /* [10, 14] */
4070   {
4071     insert_into_bit_range_vector (&ranges, 11, 4);
4072     static const range expected = {10, 5};
4073     SELF_CHECK (check_ranges_vector (ranges, expected));
4074   }
4075 
4076   /* [10, 14] [20, 24] */
4077   {
4078     insert_into_bit_range_vector (&ranges, 20, 5);
4079     static const range expected[] = {
4080       {10, 5},
4081       {20, 5},
4082     };
4083     SELF_CHECK (check_ranges_vector (ranges, expected));
4084   }
4085 
4086   /* [10, 14] [17, 24] */
4087   {
4088     insert_into_bit_range_vector (&ranges, 17, 5);
4089     static const range expected[] = {
4090       {10, 5},
4091       {17, 8},
4092     };
4093     SELF_CHECK (check_ranges_vector (ranges, expected));
4094   }
4095 
4096   /* [2, 8] [10, 14] [17, 24] */
4097   {
4098     insert_into_bit_range_vector (&ranges, 2, 7);
4099     static const range expected[] = {
4100       {2, 7},
4101       {10, 5},
4102       {17, 8},
4103     };
4104     SELF_CHECK (check_ranges_vector (ranges, expected));
4105   }
4106 
4107   /* [2, 14] [17, 24] */
4108   {
4109     insert_into_bit_range_vector (&ranges, 9, 1);
4110     static const range expected[] = {
4111       {2, 13},
4112       {17, 8},
4113     };
4114     SELF_CHECK (check_ranges_vector (ranges, expected));
4115   }
4116 
4117   /* [2, 14] [17, 24] */
4118   {
4119     insert_into_bit_range_vector (&ranges, 9, 1);
4120     static const range expected[] = {
4121       {2, 13},
4122       {17, 8},
4123     };
4124     SELF_CHECK (check_ranges_vector (ranges, expected));
4125   }
4126 
4127   /* [2, 33] */
4128   {
4129     insert_into_bit_range_vector (&ranges, 4, 30);
4130     static const range expected = {2, 32};
4131     SELF_CHECK (check_ranges_vector (ranges, expected));
4132   }
4133 }
4134 
4135 } /* namespace selftests */
4136 #endif /* GDB_SELF_TEST */
4137 
4138 void _initialize_values ();
4139 void
_initialize_values()4140 _initialize_values ()
4141 {
4142   add_cmd ("convenience", no_class, show_convenience, _("\
4143 Debugger convenience (\"$foo\") variables and functions.\n\
4144 Convenience variables are created when you assign them values;\n\
4145 thus, \"set $foo=1\" gives \"$foo\" the value 1.  Values may be any type.\n\
4146 \n\
4147 A few convenience variables are given values automatically:\n\
4148 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4149 \"$__\" holds the contents of the last address examined with \"x\"."
4150 #ifdef HAVE_PYTHON
4151 "\n\n\
4152 Convenience functions are defined via the Python API."
4153 #endif
4154 	   ), &showlist);
4155   add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4156 
4157   add_cmd ("values", no_set_class, show_values, _("\
4158 Elements of value history around item number IDX (or last ten)."),
4159 	   &showlist);
4160 
4161   add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4162 Initialize a convenience variable if necessary.\n\
4163 init-if-undefined VARIABLE = EXPRESSION\n\
4164 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4165 exist or does not contain a value.  The EXPRESSION is not evaluated if the\n\
4166 VARIABLE is already initialized."));
4167 
4168   add_prefix_cmd ("function", no_class, function_command, _("\
4169 Placeholder command for showing help on convenience functions."),
4170 		  &functionlist, "function ", 0, &cmdlist);
4171 
4172   add_internal_function ("_isvoid", _("\
4173 Check whether an expression is void.\n\
4174 Usage: $_isvoid (expression)\n\
4175 Return 1 if the expression is void, zero otherwise."),
4176 			 isvoid_internal_fn, NULL);
4177 
4178   add_internal_function ("_creal", _("\
4179 Extract the real part of a complex number.\n\
4180 Usage: $_creal (expression)\n\
4181 Return the real part of a complex number, the type depends on the\n\
4182 type of a complex number."),
4183 			 creal_internal_fn, NULL);
4184 
4185   add_internal_function ("_cimag", _("\
4186 Extract the imaginary part of a complex number.\n\
4187 Usage: $_cimag (expression)\n\
4188 Return the imaginary part of a complex number, the type depends on the\n\
4189 type of a complex number."),
4190 			 cimag_internal_fn, NULL);
4191 
4192   add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4193 				       class_support, &max_value_size, _("\
4194 Set maximum sized value gdb will load from the inferior."), _("\
4195 Show maximum sized value gdb will load from the inferior."), _("\
4196 Use this to control the maximum size, in bytes, of a value that gdb\n\
4197 will load from the inferior.  Setting this value to 'unlimited'\n\
4198 disables checking.\n\
4199 Setting this does not invalidate already allocated values, it only\n\
4200 prevents future values, larger than this size, from being allocated."),
4201 			    set_max_value_size,
4202 			    show_max_value_size,
4203 			    &setlist, &showlist);
4204 #if GDB_SELF_TEST
4205   selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4206   selftests::register_test ("insert_into_bit_range_vector",
4207 			    selftests::test_insert_into_bit_range_vector);
4208 #endif
4209 }
4210 
4211 /* See value.h.  */
4212 
4213 void
finalize_values()4214 finalize_values ()
4215 {
4216   all_values.clear ();
4217 }
4218