1 
2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors               ---*/
4 /*--- in threaded programs.                              hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of Helgrind, a Valgrind tool for detecting errors
9    in threaded programs.
10 
11    Copyright (C) 2007-2017 OpenWorks LLP
12       info@open-works.co.uk
13 
14    Copyright (C) 2007-2017 Apple, Inc.
15 
16    This program is free software; you can redistribute it and/or
17    modify it under the terms of the GNU General Public License as
18    published by the Free Software Foundation; either version 2 of the
19    License, or (at your option) any later version.
20 
21    This program is distributed in the hope that it will be useful, but
22    WITHOUT ANY WARRANTY; without even the implied warranty of
23    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24    General Public License for more details.
25 
26    You should have received a copy of the GNU General Public License
27    along with this program; if not, write to the Free Software
28    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29    02111-1307, USA.
30 
31    The GNU General Public License is contained in the file COPYING.
32 
33    Neither the names of the U.S. Department of Energy nor the
34    University of California nor the names of its contributors may be
35    used to endorse or promote products derived from this software
36    without prior written permission.
37 */
38 
39 #include "pub_tool_basics.h"
40 #include "pub_tool_gdbserver.h"
41 #include "pub_tool_libcassert.h"
42 #include "pub_tool_libcbase.h"
43 #include "pub_tool_libcprint.h"
44 #include "pub_tool_threadstate.h"
45 #include "pub_tool_tooliface.h"
46 #include "pub_tool_hashtable.h"
47 #include "pub_tool_replacemalloc.h"
48 #include "pub_tool_machine.h"
49 #include "pub_tool_options.h"
50 #include "pub_tool_xarray.h"
51 #include "pub_tool_stacktrace.h"
52 #include "pub_tool_wordfm.h"
53 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54 #include "pub_tool_redir.h"     // sonames for the dynamic linkers
55 #include "pub_tool_vki.h"       // VKI_PAGE_SIZE
56 #include "pub_tool_libcproc.h"
57 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
58 #include "pub_tool_poolalloc.h"
59 #include "pub_tool_addrinfo.h"
60 #include "pub_tool_xtree.h"
61 #include "pub_tool_xtmemory.h"
62 
63 #include "hg_basics.h"
64 #include "hg_wordset.h"
65 #include "hg_addrdescr.h"
66 #include "hg_lock_n_thread.h"
67 #include "hg_errors.h"
68 
69 #include "libhb.h"
70 
71 #include "helgrind.h"
72 
73 
74 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
75 
76 // FIXME: when client destroys a lock or a CV, remove these
77 // from our mappings, so that the associated SO can be freed up
78 
79 /*----------------------------------------------------------------*/
80 /*---                                                          ---*/
81 /*----------------------------------------------------------------*/
82 
83 /* Note this needs to be compiled with -fno-strict-aliasing, since it
84    contains a whole bunch of calls to lookupFM etc which cast between
85    Word and pointer types.  gcc rightly complains this breaks ANSI C
86    strict aliasing rules, at -O2.  No complaints at -O, but -O2 gives
87    worthwhile performance benefits over -O.
88 */
89 
90 // FIXME what is supposed to happen to locks in memory which
91 // is relocated as a result of client realloc?
92 
93 // FIXME put referencing ThreadId into Thread and get
94 // rid of the slow reverse mapping function.
95 
96 // FIXME accesses to NoAccess areas: change state to Excl?
97 
98 // FIXME report errors for accesses of NoAccess memory?
99 
100 // FIXME pth_cond_wait/timedwait wrappers.  Even if these fail,
101 // the thread still holds the lock.
102 
103 /* ------------ Debug/trace options ------------ */
104 
105 // 0 for silent, 1 for some stuff, 2 for lots of stuff
106 #define SHOW_EVENTS 0
107 
108 
109 static void all__sanity_check ( const HChar* who ); /* fwds */
110 
111 #define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
112 
113 // 0 for none, 1 for dump at end of run
114 #define SHOW_DATA_STRUCTURES 0
115 
116 
117 /* ------------ Misc comments ------------ */
118 
119 // FIXME: don't hardwire initial entries for root thread.
120 // Instead, let the pre_thread_ll_create handler do this.
121 
122 
123 /*----------------------------------------------------------------*/
124 /*--- Primary data structures                                  ---*/
125 /*----------------------------------------------------------------*/
126 
127 /* Admin linked list of Threads */
128 static Thread* admin_threads = NULL;
get_admin_threads(void)129 Thread* get_admin_threads ( void ) { return admin_threads; }
130 
131 /* Admin double linked list of Locks */
132 /* We need a double linked list to properly and efficiently
133    handle del_LockN. */
134 static Lock* admin_locks = NULL;
135 
136 /* Mapping table for core ThreadIds to Thread* */
137 static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
138 
139 /* Mapping table for lock guest addresses to Lock* */
140 static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
141 
142 /* The word-set universes for lock sets. */
143 static WordSetU* univ_lsets = NULL; /* sets of Lock* */
144 static WordSetU* univ_laog  = NULL; /* sets of Lock*, for LAOG */
145 static Int next_gc_univ_laog = 1;
146 /* univ_laog will be garbaged collected when the nr of element in univ_laog is
147    >= next_gc_univ_laog. */
148 
149 /* Allow libhb to get at the universe of locksets stored
150    here.  Sigh. */
HG_(get_univ_lsets)151 WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
152 
153 /* Allow libhb to get at the list of locks stored here.  Ditto
154    sigh. */
HG_(get_admin_locks)155 Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
156 
157 
158 /*----------------------------------------------------------------*/
159 /*--- Simple helpers for the data structures                   ---*/
160 /*----------------------------------------------------------------*/
161 
162 static UWord stats__lockN_acquires = 0;
163 static UWord stats__lockN_releases = 0;
164 
165 #if defined(VGO_solaris)
166 Bool HG_(clo_ignore_thread_creation) = True;
167 #else
168 Bool HG_(clo_ignore_thread_creation) = False;
169 #endif /* VGO_solaris */
170 
171 static
172 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
173 
174 /* --------- Constructors --------- */
175 
mk_Thread(Thr * hbthr)176 static Thread* mk_Thread ( Thr* hbthr ) {
177    static Int indx      = 1;
178    Thread* thread       = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
179    thread->locksetA     = HG_(emptyWS)( univ_lsets );
180    thread->locksetW     = HG_(emptyWS)( univ_lsets );
181    thread->magic        = Thread_MAGIC;
182    thread->hbthr        = hbthr;
183    thread->coretid      = VG_INVALID_THREADID;
184    thread->created_at   = NULL;
185    thread->announced    = False;
186    thread->first_sp_delta = 0;
187    thread->errmsg_index = indx++;
188    thread->admin        = admin_threads;
189    thread->synchr_nesting = 0;
190    thread->pthread_create_nesting_level = 0;
191 #if defined(VGO_solaris)
192    thread->bind_guard_flag = 0;
193 #endif /* VGO_solaris */
194 
195    admin_threads        = thread;
196    return thread;
197 }
198 
199 // Make a new lock which is unlocked (hence ownerless)
200 // and insert the new lock in admin_locks double linked list.
mk_LockN(LockKind kind,Addr guestaddr)201 static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
202    static ULong unique = 0;
203    Lock* lock             = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
204    /* begin: add to double linked list */
205    if (admin_locks)
206       admin_locks->admin_prev = lock;
207    lock->admin_next       = admin_locks;
208    lock->admin_prev       = NULL;
209    admin_locks            = lock;
210    /* end: add */
211    lock->unique           = unique++;
212    lock->magic            = LockN_MAGIC;
213    lock->appeared_at      = NULL;
214    lock->acquired_at      = NULL;
215    lock->hbso             = libhb_so_alloc();
216    lock->guestaddr        = guestaddr;
217    lock->kind             = kind;
218    lock->heldW            = False;
219    lock->heldBy           = NULL;
220    tl_assert(HG_(is_sane_LockN)(lock));
221    return lock;
222 }
223 
224 /* Release storage for a Lock.  Also release storage in .heldBy, if
225    any. Removes from admin_locks double linked list. */
del_LockN(Lock * lk)226 static void del_LockN ( Lock* lk )
227 {
228    tl_assert(HG_(is_sane_LockN)(lk));
229    tl_assert(lk->hbso);
230    libhb_so_dealloc(lk->hbso);
231    if (lk->heldBy)
232       VG_(deleteBag)( lk->heldBy );
233    /* begin: del lock from double linked list */
234    if (lk == admin_locks) {
235       tl_assert(lk->admin_prev == NULL);
236       if (lk->admin_next)
237          lk->admin_next->admin_prev = NULL;
238       admin_locks = lk->admin_next;
239    }
240    else {
241       tl_assert(lk->admin_prev != NULL);
242       lk->admin_prev->admin_next = lk->admin_next;
243       if (lk->admin_next)
244          lk->admin_next->admin_prev = lk->admin_prev;
245    }
246    /* end: del */
247    VG_(memset)(lk, 0xAA, sizeof(*lk));
248    HG_(free)(lk);
249 }
250 
251 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
252    it.  This is done strictly: only combinations resulting from
253    correct program and libpthread behaviour are allowed. */
lockN_acquire_writer(Lock * lk,Thread * thr)254 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
255 {
256    tl_assert(HG_(is_sane_LockN)(lk));
257    tl_assert(HG_(is_sane_Thread)(thr));
258 
259    stats__lockN_acquires++;
260 
261    /* EXPOSITION only */
262    /* We need to keep recording snapshots of where the lock was
263       acquired, so as to produce better lock-order error messages. */
264    if (lk->acquired_at == NULL) {
265       ThreadId tid;
266       tl_assert(lk->heldBy == NULL);
267       tid = map_threads_maybe_reverse_lookup_SLOW(thr);
268       lk->acquired_at
269          = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
270    } else {
271       tl_assert(lk->heldBy != NULL);
272    }
273    /* end EXPOSITION only */
274 
275    switch (lk->kind) {
276       case LK_nonRec:
277       case_LK_nonRec:
278          tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
279          tl_assert(!lk->heldW);
280          lk->heldW  = True;
281          lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
282          VG_(addToBag)( lk->heldBy, (UWord)thr );
283          break;
284       case LK_mbRec:
285          if (lk->heldBy == NULL)
286             goto case_LK_nonRec;
287          /* 2nd and subsequent locking of a lock by its owner */
288          tl_assert(lk->heldW);
289          /* assert: lk is only held by one thread .. */
290          tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1);
291          /* assert: .. and that thread is 'thr'. */
292          tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
293                    == VG_(sizeTotalBag)(lk->heldBy));
294          VG_(addToBag)(lk->heldBy, (UWord)thr);
295          break;
296       case LK_rdwr:
297          tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
298          goto case_LK_nonRec;
299       default:
300          tl_assert(0);
301   }
302   tl_assert(HG_(is_sane_LockN)(lk));
303 }
304 
lockN_acquire_reader(Lock * lk,Thread * thr)305 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
306 {
307    tl_assert(HG_(is_sane_LockN)(lk));
308    tl_assert(HG_(is_sane_Thread)(thr));
309    /* can only add reader to a reader-writer lock. */
310    tl_assert(lk->kind == LK_rdwr);
311    /* lk must be free or already r-held. */
312    tl_assert(lk->heldBy == NULL
313              || (lk->heldBy != NULL && !lk->heldW));
314 
315    stats__lockN_acquires++;
316 
317    /* EXPOSITION only */
318    /* We need to keep recording snapshots of where the lock was
319       acquired, so as to produce better lock-order error messages. */
320    if (lk->acquired_at == NULL) {
321       ThreadId tid;
322       tl_assert(lk->heldBy == NULL);
323       tid = map_threads_maybe_reverse_lookup_SLOW(thr);
324       lk->acquired_at
325          = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
326    } else {
327       tl_assert(lk->heldBy != NULL);
328    }
329    /* end EXPOSITION only */
330 
331    if (lk->heldBy) {
332       VG_(addToBag)(lk->heldBy, (UWord)thr);
333    } else {
334       lk->heldW  = False;
335       lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
336       VG_(addToBag)( lk->heldBy, (UWord)thr );
337    }
338    tl_assert(!lk->heldW);
339    tl_assert(HG_(is_sane_LockN)(lk));
340 }
341 
342 /* Update 'lk' to reflect a release of it by 'thr'.  This is done
343    strictly: only combinations resulting from correct program and
344    libpthread behaviour are allowed. */
345 
lockN_release(Lock * lk,Thread * thr)346 static void lockN_release ( Lock* lk, Thread* thr )
347 {
348    Bool b;
349    tl_assert(HG_(is_sane_LockN)(lk));
350    tl_assert(HG_(is_sane_Thread)(thr));
351    /* lock must be held by someone */
352    tl_assert(lk->heldBy);
353    stats__lockN_releases++;
354    /* Remove it from the holder set */
355    b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
356    /* thr must actually have been a holder of lk */
357    tl_assert(b);
358    /* normalise */
359    tl_assert(lk->acquired_at);
360    if (VG_(isEmptyBag)(lk->heldBy)) {
361       VG_(deleteBag)(lk->heldBy);
362       lk->heldBy      = NULL;
363       lk->heldW       = False;
364       lk->acquired_at = NULL;
365    }
366    tl_assert(HG_(is_sane_LockN)(lk));
367 }
368 
remove_Lock_from_locksets_of_all_owning_Threads(Lock * lk)369 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
370 {
371    Thread* thr;
372    if (!lk->heldBy) {
373       tl_assert(!lk->heldW);
374       return;
375    }
376    /* for each thread that holds this lock do ... */
377    VG_(initIterBag)( lk->heldBy );
378    while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
379       tl_assert(HG_(is_sane_Thread)(thr));
380       tl_assert(HG_(elemWS)( univ_lsets,
381                              thr->locksetA, (UWord)lk ));
382       thr->locksetA
383          = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
384 
385       if (lk->heldW) {
386          tl_assert(HG_(elemWS)( univ_lsets,
387                                 thr->locksetW, (UWord)lk ));
388          thr->locksetW
389             = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
390       }
391    }
392    VG_(doneIterBag)( lk->heldBy );
393 }
394 
395 
396 /*----------------------------------------------------------------*/
397 /*--- Print out the primary data structures                    ---*/
398 /*----------------------------------------------------------------*/
399 
400 #define PP_THREADS      (1<<1)
401 #define PP_LOCKS        (1<<2)
402 #define PP_ALL (PP_THREADS | PP_LOCKS)
403 
404 
405 static const Int sHOW_ADMIN = 0;
406 
space(Int n)407 static void space ( Int n )
408 {
409    Int  i;
410    HChar spaces[128+1];
411    tl_assert(n >= 0 && n < 128);
412    if (n == 0)
413       return;
414    for (i = 0; i < n; i++)
415       spaces[i] = ' ';
416    spaces[i] = 0;
417    tl_assert(i < 128+1);
418    VG_(printf)("%s", spaces);
419 }
420 
pp_Thread(Int d,Thread * t)421 static void pp_Thread ( Int d, Thread* t )
422 {
423    space(d+0); VG_(printf)("Thread %p {\n", t);
424    if (sHOW_ADMIN) {
425    space(d+3); VG_(printf)("admin    %p\n",   t->admin);
426    space(d+3); VG_(printf)("magic    0x%x\n", (UInt)t->magic);
427    }
428    space(d+3); VG_(printf)("locksetA %d\n",   (Int)t->locksetA);
429    space(d+3); VG_(printf)("locksetW %d\n",   (Int)t->locksetW);
430    space(d+0); VG_(printf)("}\n");
431 }
432 
pp_admin_threads(Int d)433 static void pp_admin_threads ( Int d )
434 {
435    Int     i, n;
436    Thread* t;
437    for (n = 0, t = admin_threads;  t;  n++, t = t->admin) {
438       /* nothing */
439    }
440    space(d); VG_(printf)("admin_threads (%d records) {\n", n);
441    for (i = 0, t = admin_threads;  t;  i++, t = t->admin) {
442       if (0) {
443          space(n);
444          VG_(printf)("admin_threads record %d of %d:\n", i, n);
445       }
446       pp_Thread(d+3, t);
447    }
448    space(d); VG_(printf)("}\n");
449 }
450 
pp_map_threads(Int d)451 static void pp_map_threads ( Int d )
452 {
453    Int i, n = 0;
454    space(d); VG_(printf)("map_threads ");
455    for (i = 0; i < VG_N_THREADS; i++) {
456       if (map_threads[i] != NULL)
457          n++;
458    }
459    VG_(printf)("(%d entries) {\n", n);
460    for (i = 0; i < VG_N_THREADS; i++) {
461       if (map_threads[i] == NULL)
462          continue;
463       space(d+3);
464       VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
465    }
466    space(d); VG_(printf)("}\n");
467 }
468 
show_LockKind(LockKind lkk)469 static const HChar* show_LockKind ( LockKind lkk ) {
470    switch (lkk) {
471       case LK_mbRec:  return "mbRec";
472       case LK_nonRec: return "nonRec";
473       case LK_rdwr:   return "rdwr";
474       default:        tl_assert(0);
475    }
476 }
477 
478 /* Pretty Print lock lk.
479    if show_lock_addrdescr, describes the (guest) lock address.
480      (this description will be more complete with --read-var-info=yes).
481    if show_internal_data, shows also helgrind internal information.
482    d is the level at which output is indented. */
pp_Lock(Int d,Lock * lk,Bool show_lock_addrdescr,Bool show_internal_data)483 static void pp_Lock ( Int d, Lock* lk,
484                       Bool show_lock_addrdescr,
485                       Bool show_internal_data)
486 {
487    // FIXME PW EPOCH should use the epoch of the allocated_at ec.
488    const DiEpoch cur_ep = VG_(current_DiEpoch)();
489    space(d+0);
490    if (show_internal_data)
491       VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
492    else
493       VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
494    if (!show_lock_addrdescr
495        || !HG_(get_and_pp_addrdescr) (cur_ep, (Addr) lk->guestaddr))
496       VG_(printf)("\n");
497 
498    if (sHOW_ADMIN) {
499       space(d+3); VG_(printf)("admin_n  %p\n",   lk->admin_next);
500       space(d+3); VG_(printf)("admin_p  %p\n",   lk->admin_prev);
501       space(d+3); VG_(printf)("magic    0x%x\n", (UInt)lk->magic);
502    }
503    if (show_internal_data) {
504       space(d+3); VG_(printf)("unique %llu\n", lk->unique);
505    }
506    space(d+3); VG_(printf)("kind   %s\n", show_LockKind(lk->kind));
507    if (show_internal_data) {
508       space(d+3); VG_(printf)("heldW  %s\n", lk->heldW ? "yes" : "no");
509    }
510    if (show_internal_data) {
511       space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
512    }
513    if (lk->heldBy) {
514       Thread* thr;
515       UWord   count;
516       VG_(printf)(" { ");
517       VG_(initIterBag)( lk->heldBy );
518       while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
519          if (show_internal_data)
520             VG_(printf)("%lu:%p ", count, thr);
521          else {
522             VG_(printf)("%c%lu:thread #%d ",
523                         lk->heldW ? 'W' : 'R',
524                         count, thr->errmsg_index);
525             if (thr->coretid == VG_INVALID_THREADID)
526                VG_(printf)("tid (exited) ");
527             else
528                VG_(printf)("tid %u ", thr->coretid);
529 
530          }
531       }
532       VG_(doneIterBag)( lk->heldBy );
533       VG_(printf)("}\n");
534    }
535    space(d+0); VG_(printf)("}\n");
536 }
537 
pp_admin_locks(Int d)538 static void pp_admin_locks ( Int d )
539 {
540    Int   i, n;
541    Lock* lk;
542    for (n = 0, lk = admin_locks;  lk;  n++, lk = lk->admin_next) {
543       /* nothing */
544    }
545    space(d); VG_(printf)("admin_locks (%d records) {\n", n);
546    for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next) {
547       if (0) {
548          space(n);
549          VG_(printf)("admin_locks record %d of %d:\n", i, n);
550       }
551       pp_Lock(d+3, lk,
552               False /* show_lock_addrdescr */,
553               True /* show_internal_data */);
554    }
555    space(d); VG_(printf)("}\n");
556 }
557 
pp_map_locks(Int d)558 static void pp_map_locks ( Int d)
559 {
560    void* gla;
561    Lock* lk;
562    space(d); VG_(printf)("map_locks (%d entries) {\n",
563                          (Int)VG_(sizeFM)( map_locks ));
564    VG_(initIterFM)( map_locks );
565    while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
566                                       (UWord*)&lk )) {
567       space(d+3);
568       VG_(printf)("guest %p -> Lock %p\n", gla, lk);
569    }
570    VG_(doneIterFM)( map_locks );
571    space(d); VG_(printf)("}\n");
572 }
573 
pp_everything(Int flags,const HChar * caller)574 static void pp_everything ( Int flags, const HChar* caller )
575 {
576    Int d = 0;
577    VG_(printf)("\n");
578    VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
579    if (flags & PP_THREADS) {
580       VG_(printf)("\n");
581       pp_admin_threads(d+3);
582       VG_(printf)("\n");
583       pp_map_threads(d+3);
584    }
585    if (flags & PP_LOCKS) {
586       VG_(printf)("\n");
587       pp_admin_locks(d+3);
588       VG_(printf)("\n");
589       pp_map_locks(d+3);
590    }
591 
592    VG_(printf)("\n");
593    VG_(printf)("}\n");
594    VG_(printf)("\n");
595 }
596 
597 #undef SHOW_ADMIN
598 
599 
600 /*----------------------------------------------------------------*/
601 /*--- Initialise the primary data structures                   ---*/
602 /*----------------------------------------------------------------*/
603 
initialise_data_structures(Thr * hbthr_root)604 static void initialise_data_structures ( Thr* hbthr_root )
605 {
606    Thread*   thr;
607    WordSetID wsid;
608 
609    /* Get everything initialised and zeroed. */
610    tl_assert(admin_threads == NULL);
611    tl_assert(admin_locks == NULL);
612 
613    tl_assert(map_threads == NULL);
614    map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
615 
616    tl_assert(sizeof(Addr) == sizeof(UWord));
617    tl_assert(map_locks == NULL);
618    map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
619                            NULL/*unboxed Word cmp*/);
620 
621    tl_assert(univ_lsets == NULL);
622    univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
623                                   8/*cacheSize*/ );
624    tl_assert(univ_lsets != NULL);
625    /* Ensure that univ_lsets is non-empty, with lockset zero being the
626       empty lockset.  hg_errors.c relies on the assumption that
627       lockset number zero in univ_lsets is always valid. */
628    wsid = HG_(emptyWS)(univ_lsets);
629    tl_assert(wsid == 0);
630 
631    tl_assert(univ_laog == NULL);
632    if (HG_(clo_track_lockorders)) {
633       univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
634                                     HG_(free), 24/*cacheSize*/ );
635       tl_assert(univ_laog != NULL);
636    }
637 
638    /* Set up entries for the root thread */
639    // FIXME: this assumes that the first real ThreadId is 1
640 
641    /* a Thread for the new thread ... */
642    thr = mk_Thread(hbthr_root);
643    thr->coretid = 1; /* FIXME: hardwires an assumption about the
644                         identity of the root thread. */
645    tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
646    libhb_set_Thr_hgthread(hbthr_root, thr);
647 
648    /* and bind it in the thread-map table. */
649    tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
650    tl_assert(thr->coretid != VG_INVALID_THREADID);
651 
652    map_threads[thr->coretid] = thr;
653 
654    tl_assert(VG_INVALID_THREADID == 0);
655 
656    all__sanity_check("initialise_data_structures");
657 }
658 
659 
660 /*----------------------------------------------------------------*/
661 /*--- map_threads :: array[core-ThreadId] of Thread*           ---*/
662 /*----------------------------------------------------------------*/
663 
664 /* Doesn't assert if the relevant map_threads entry is NULL. */
map_threads_maybe_lookup(ThreadId coretid)665 static Thread* map_threads_maybe_lookup ( ThreadId coretid )
666 {
667    Thread* thr;
668    tl_assert( HG_(is_sane_ThreadId)(coretid) );
669    thr = map_threads[coretid];
670    return thr;
671 }
672 
673 /* Asserts if the relevant map_threads entry is NULL. */
map_threads_lookup(ThreadId coretid)674 static inline Thread* map_threads_lookup ( ThreadId coretid )
675 {
676    Thread* thr;
677    tl_assert( HG_(is_sane_ThreadId)(coretid) );
678    thr = map_threads[coretid];
679    tl_assert(thr);
680    return thr;
681 }
682 
683 /* Do a reverse lookup.  Does not assert if 'thr' is not found in
684    map_threads. */
map_threads_maybe_reverse_lookup_SLOW(Thread * thr)685 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
686 {
687    ThreadId tid;
688    tl_assert(HG_(is_sane_Thread)(thr));
689    /* Check nobody used the invalid-threadid slot */
690    tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
691    tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
692    tid = thr->coretid;
693    tl_assert(HG_(is_sane_ThreadId)(tid));
694    return tid;
695 }
696 
697 /* Do a reverse lookup.  Warning: POTENTIALLY SLOW.  Asserts if 'thr'
698    is not found in map_threads. */
map_threads_reverse_lookup_SLOW(Thread * thr)699 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
700 {
701    ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
702    tl_assert(tid != VG_INVALID_THREADID);
703    tl_assert(map_threads[tid]);
704    tl_assert(map_threads[tid]->coretid == tid);
705    return tid;
706 }
707 
map_threads_delete(ThreadId coretid)708 static void map_threads_delete ( ThreadId coretid )
709 {
710    Thread* thr;
711    tl_assert(coretid != 0);
712    tl_assert( HG_(is_sane_ThreadId)(coretid) );
713    thr = map_threads[coretid];
714    tl_assert(thr);
715    map_threads[coretid] = NULL;
716 }
717 
HG_(thread_enter_synchr)718 static void HG_(thread_enter_synchr)(Thread *thr) {
719    tl_assert(thr->synchr_nesting >= 0);
720 #if defined(VGO_solaris)
721    thr->synchr_nesting += 1;
722 #endif /* VGO_solaris */
723 }
724 
HG_(thread_leave_synchr)725 static void HG_(thread_leave_synchr)(Thread *thr) {
726 #if defined(VGO_solaris)
727    thr->synchr_nesting -= 1;
728 #endif /* VGO_solaris */
729    tl_assert(thr->synchr_nesting >= 0);
730 }
731 
HG_(thread_enter_pthread_create)732 static void HG_(thread_enter_pthread_create)(Thread *thr) {
733    tl_assert(thr->pthread_create_nesting_level >= 0);
734    thr->pthread_create_nesting_level += 1;
735 }
736 
HG_(thread_leave_pthread_create)737 static void HG_(thread_leave_pthread_create)(Thread *thr) {
738    tl_assert(thr->pthread_create_nesting_level > 0);
739    thr->pthread_create_nesting_level -= 1;
740 }
741 
HG_(get_pthread_create_nesting_level)742 static Int HG_(get_pthread_create_nesting_level)(ThreadId tid) {
743    Thread *thr = map_threads_maybe_lookup(tid);
744    return thr->pthread_create_nesting_level;
745 }
746 
747 /*----------------------------------------------------------------*/
748 /*--- map_locks :: WordFM guest-Addr-of-lock Lock*             ---*/
749 /*----------------------------------------------------------------*/
750 
751 /* Make sure there is a lock table entry for the given (lock) guest
752    address.  If not, create one of the stated 'kind' in unheld state.
753    In any case, return the address of the existing or new Lock. */
754 static
map_locks_lookup_or_create(LockKind lkk,Addr ga,ThreadId tid)755 Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
756 {
757    Bool  found;
758    Lock* oldlock = NULL;
759    tl_assert(HG_(is_sane_ThreadId)(tid));
760    found = VG_(lookupFM)( map_locks,
761                           NULL, (UWord*)&oldlock, (UWord)ga );
762    if (!found) {
763       Lock* lock = mk_LockN(lkk, ga);
764       lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
765       tl_assert(HG_(is_sane_LockN)(lock));
766       VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
767       tl_assert(oldlock == NULL);
768       return lock;
769    } else {
770       tl_assert(oldlock != NULL);
771       tl_assert(HG_(is_sane_LockN)(oldlock));
772       tl_assert(oldlock->guestaddr == ga);
773       return oldlock;
774    }
775 }
776 
map_locks_maybe_lookup(Addr ga)777 static Lock* map_locks_maybe_lookup ( Addr ga )
778 {
779    Bool  found;
780    Lock* lk = NULL;
781    found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
782    tl_assert(found  ?  lk != NULL  :  lk == NULL);
783    return lk;
784 }
785 
map_locks_delete(Addr ga)786 static void map_locks_delete ( Addr ga )
787 {
788    Addr  ga2 = 0;
789    Lock* lk  = NULL;
790    VG_(delFromFM)( map_locks,
791                    (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
792    /* delFromFM produces the val which is being deleted, if it is
793       found.  So assert it is non-null; that in effect asserts that we
794       are deleting a (ga, Lock) pair which actually exists. */
795    tl_assert(lk != NULL);
796    tl_assert(ga2 == ga);
797 }
798 
799 
800 
801 /*----------------------------------------------------------------*/
802 /*--- Sanity checking the data structures                      ---*/
803 /*----------------------------------------------------------------*/
804 
805 static UWord stats__sanity_checks = 0;
806 
807 static void laog__sanity_check ( const HChar* who ); /* fwds */
808 
809 /* REQUIRED INVARIANTS:
810 
811    Thread vs Segment/Lock/SecMaps
812 
813       for each t in Threads {
814 
815          // Thread.lockset: each element is really a valid Lock
816 
817          // Thread.lockset: each Lock in set is actually held by that thread
818          for lk in Thread.lockset
819             lk == LockedBy(t)
820 
821          // Thread.csegid is a valid SegmentID
822          // and the associated Segment has .thr == t
823 
824       }
825 
826       all thread Locksets are pairwise empty under intersection
827       (that is, no lock is claimed to be held by more than one thread)
828       -- this is guaranteed if all locks in locksets point back to their
829       owner threads
830 
831    Lock vs Thread/Segment/SecMaps
832 
833       for each entry (gla, la) in map_locks
834          gla == la->guest_addr
835 
836       for each lk in Locks {
837 
838          lk->tag is valid
839          lk->guest_addr does not have shadow state NoAccess
840          if lk == LockedBy(t), then t->lockset contains lk
841          if lk == UnlockedBy(segid) then segid is valid SegmentID
842              and can be mapped to a valid Segment(seg)
843              and seg->thr->lockset does not contain lk
844          if lk == UnlockedNew then (no lockset contains lk)
845 
846          secmaps for lk has .mbHasLocks == True
847 
848       }
849 
850    Segment vs Thread/Lock/SecMaps
851 
852       the Segment graph is a dag (no cycles)
853       all of the Segment graph must be reachable from the segids
854          mentioned in the Threads
855 
856       for seg in Segments {
857 
858          seg->thr is a sane Thread
859 
860       }
861 
862    SecMaps vs Segment/Thread/Lock
863 
864       for sm in SecMaps {
865 
866          sm properly aligned
867          if any shadow word is ShR or ShM then .mbHasShared == True
868 
869          for each Excl(segid) state
870             map_segments_lookup maps to a sane Segment(seg)
871          for each ShM/ShR(tsetid,lsetid) state
872             each lk in lset is a valid Lock
873             each thr in tset is a valid thread, which is non-dead
874 
875       }
876 */
877 
878 
879 /* Return True iff 'thr' holds 'lk' in some mode. */
thread_is_a_holder_of_Lock(Thread * thr,Lock * lk)880 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
881 {
882    if (lk->heldBy)
883       return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
884    else
885       return False;
886 }
887 
888 /* Sanity check Threads, as far as possible */
889 __attribute__((noinline))
threads__sanity_check(const HChar * who)890 static void threads__sanity_check ( const HChar* who )
891 {
892 #define BAD(_str) do { how = (_str); goto bad; } while (0)
893    const HChar* how = "no error";
894    Thread*   thr;
895    WordSetID wsA, wsW;
896    UWord*    ls_words;
897    UWord     ls_size, i;
898    Lock*     lk;
899    for (thr = admin_threads; thr; thr = thr->admin) {
900       if (!HG_(is_sane_Thread)(thr)) BAD("1");
901       wsA = thr->locksetA;
902       wsW = thr->locksetW;
903       // locks held in W mode are a subset of all locks held
904       if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
905       HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
906       for (i = 0; i < ls_size; i++) {
907          lk = (Lock*)ls_words[i];
908          // Thread.lockset: each element is really a valid Lock
909          if (!HG_(is_sane_LockN)(lk)) BAD("2");
910          // Thread.lockset: each Lock in set is actually held by that
911          // thread
912          if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
913       }
914    }
915    return;
916   bad:
917    VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
918    tl_assert(0);
919 #undef BAD
920 }
921 
922 
923 /* Sanity check Locks, as far as possible */
924 __attribute__((noinline))
locks__sanity_check(const HChar * who)925 static void locks__sanity_check ( const HChar* who )
926 {
927 #define BAD(_str) do { how = (_str); goto bad; } while (0)
928    const HChar* how = "no error";
929    Addr      gla;
930    Lock*     lk;
931    Int       i;
932    // # entries in admin_locks == # entries in map_locks
933    for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next)
934       ;
935    if (i != VG_(sizeFM)(map_locks)) BAD("1");
936    // for each entry (gla, lk) in map_locks
937    //      gla == lk->guest_addr
938    VG_(initIterFM)( map_locks );
939    while (VG_(nextIterFM)( map_locks,
940                            (UWord*)&gla, (UWord*)&lk )) {
941       if (lk->guestaddr != gla) BAD("2");
942    }
943    VG_(doneIterFM)( map_locks );
944    // scan through admin_locks ...
945    for (lk = admin_locks; lk; lk = lk->admin_next) {
946       // lock is sane.  Quite comprehensive, also checks that
947       // referenced (holder) threads are sane.
948       if (!HG_(is_sane_LockN)(lk)) BAD("3");
949       // map_locks binds guest address back to this lock
950       if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
951       // look at all threads mentioned as holders of this lock.  Ensure
952       // this lock is mentioned in their locksets.
953       if (lk->heldBy) {
954          Thread* thr;
955          UWord   count;
956          VG_(initIterBag)( lk->heldBy );
957          while (VG_(nextIterBag)( lk->heldBy,
958                                   (UWord*)&thr, &count )) {
959             // HG_(is_sane_LockN) above ensures these
960             tl_assert(count >= 1);
961             tl_assert(HG_(is_sane_Thread)(thr));
962             if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
963                BAD("6");
964             // also check the w-only lockset
965             if (lk->heldW
966                 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
967                BAD("7");
968             if ((!lk->heldW)
969                 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
970                BAD("8");
971          }
972          VG_(doneIterBag)( lk->heldBy );
973       } else {
974          /* lock not held by anybody */
975          if (lk->heldW) BAD("9"); /* should be False if !heldBy */
976          // since lk is unheld, then (no lockset contains lk)
977          // hmm, this is really too expensive to check.  Hmm.
978       }
979    }
980 
981    return;
982   bad:
983    VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
984    tl_assert(0);
985 #undef BAD
986 }
987 
988 
all_except_Locks__sanity_check(const HChar * who)989 static void all_except_Locks__sanity_check ( const HChar* who ) {
990    stats__sanity_checks++;
991    if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
992    threads__sanity_check(who);
993    if (HG_(clo_track_lockorders))
994       laog__sanity_check(who);
995 }
all__sanity_check(const HChar * who)996 static void all__sanity_check ( const HChar* who ) {
997    all_except_Locks__sanity_check(who);
998    locks__sanity_check(who);
999 }
1000 
1001 
1002 /*----------------------------------------------------------------*/
1003 /*--- Shadow value and address range handlers                  ---*/
1004 /*----------------------------------------------------------------*/
1005 
1006 static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
1007 //static void laog__handle_lock_deletions    ( WordSetID ); /* fwds */
1008 static inline Thread* get_current_Thread ( void ); /* fwds */
1009 __attribute__((noinline))
1010 static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
1011 
1012 
1013 /* Block-copy states (needed for implementing realloc()). */
1014 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1015    Is that a problem? (hence 'scopy' rather than 'ccopy') */
shadow_mem_scopy_range(Thread * thr,Addr src,Addr dst,SizeT len)1016 static void shadow_mem_scopy_range ( Thread* thr,
1017                                      Addr src, Addr dst, SizeT len )
1018 {
1019    Thr*     hbthr = thr->hbthr;
1020    tl_assert(hbthr);
1021    libhb_copy_shadow_state( hbthr, src, dst, len );
1022 }
1023 
shadow_mem_cread_range(Thread * thr,Addr a,SizeT len)1024 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1025 {
1026    Thr*     hbthr = thr->hbthr;
1027    tl_assert(hbthr);
1028    LIBHB_CREAD_N(hbthr, a, len);
1029 }
1030 
shadow_mem_cwrite_range(Thread * thr,Addr a,SizeT len)1031 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1032    Thr*     hbthr = thr->hbthr;
1033    tl_assert(hbthr);
1034    LIBHB_CWRITE_N(hbthr, a, len);
1035 }
1036 
shadow_mem_make_New(Thread * thr,Addr a,SizeT len)1037 inline static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1038 {
1039    libhb_srange_new( thr->hbthr, a, len );
1040 }
1041 
shadow_mem_make_NoAccess_NoFX(Thread * thr,Addr aIN,SizeT len)1042 inline static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN,
1043                                                    SizeT len )
1044 {
1045    if (0 && len > 500)
1046       VG_(printf)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN, len );
1047    // has no effect (NoFX)
1048    libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1049 }
1050 
shadow_mem_make_NoAccess_AHAE(Thread * thr,Addr aIN,SizeT len)1051 inline static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN,
1052                                                    SizeT len)
1053 {
1054    if (0 && len > 500)
1055       VG_(printf)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN, len );
1056    // Actually Has An Effect (AHAE)
1057    libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
1058 }
1059 
shadow_mem_make_Untracked(Thread * thr,Addr aIN,SizeT len)1060 inline static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN,
1061                                                SizeT len )
1062 {
1063    if (0 && len > 500)
1064       VG_(printf)("make Untracked ( %#lx, %lu )\n", aIN, len );
1065    libhb_srange_untrack( thr->hbthr, aIN, len );
1066 }
1067 
1068 
1069 /*----------------------------------------------------------------*/
1070 /*--- Event handlers (evh__* functions)                        ---*/
1071 /*--- plus helpers (evhH__* functions)                         ---*/
1072 /*----------------------------------------------------------------*/
1073 
1074 /*--------- Event handler helpers (evhH__* functions) ---------*/
1075 
1076 /* Create a new segment for 'thr', making it depend (.prev) on its
1077    existing segment, bind together the SegmentID and Segment, and
1078    return both of them.  Also update 'thr' so it references the new
1079    Segment. */
1080 //zz static
1081 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1082 //zz                                           /*OUT*/Segment** new_segP,
1083 //zz                                           Thread* thr )
1084 //zz {
1085 //zz    Segment* cur_seg;
1086 //zz    tl_assert(new_segP);
1087 //zz    tl_assert(new_segidP);
1088 //zz    tl_assert(HG_(is_sane_Thread)(thr));
1089 //zz    cur_seg = map_segments_lookup( thr->csegid );
1090 //zz    tl_assert(cur_seg);
1091 //zz    tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1092 //zz                                       at their owner thread. */
1093 //zz    *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1094 //zz    *new_segidP = alloc_SegmentID();
1095 //zz    map_segments_add( *new_segidP, *new_segP );
1096 //zz    thr->csegid = *new_segidP;
1097 //zz }
1098 
1099 
1100 /* The lock at 'lock_ga' has acquired a writer.  Make all necessary
1101    updates, and also do all possible error checks. */
1102 static
evhH__post_thread_w_acquires_lock(Thread * thr,LockKind lkk,Addr lock_ga)1103 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1104                                          LockKind lkk, Addr lock_ga )
1105 {
1106    Lock* lk;
1107 
1108    /* Basically what we need to do is call lockN_acquire_writer.
1109       However, that will barf if any 'invalid' lock states would
1110       result.  Therefore check before calling.  Side effect is that
1111       'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1112       routine.
1113 
1114       Because this routine is only called after successful lock
1115       acquisition, we should not be asked to move the lock into any
1116       invalid states.  Requests to do so are bugs in libpthread, since
1117       that should have rejected any such requests. */
1118 
1119    tl_assert(HG_(is_sane_Thread)(thr));
1120    /* Try to find the lock.  If we can't, then create a new one with
1121       kind 'lkk'. */
1122    lk = map_locks_lookup_or_create(
1123            lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1124    tl_assert( HG_(is_sane_LockN)(lk) );
1125 
1126    /* check libhb level entities exist */
1127    tl_assert(thr->hbthr);
1128    tl_assert(lk->hbso);
1129 
1130    if (lk->heldBy == NULL) {
1131       /* the lock isn't held.  Simple. */
1132       tl_assert(!lk->heldW);
1133       lockN_acquire_writer( lk, thr );
1134       /* acquire a dependency from the lock's VCs */
1135       libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1136       goto noerror;
1137    }
1138 
1139    /* So the lock is already held.  If held as a r-lock then
1140       libpthread must be buggy. */
1141    tl_assert(lk->heldBy);
1142    if (!lk->heldW) {
1143       HG_(record_error_Misc)(
1144          thr, "Bug in libpthread: write lock "
1145               "granted on rwlock which is currently rd-held");
1146       goto error;
1147    }
1148 
1149    /* So the lock is held in w-mode.  If it's held by some other
1150       thread, then libpthread must be buggy. */
1151    tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
1152 
1153    if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1154       HG_(record_error_Misc)(
1155          thr, "Bug in libpthread: write lock "
1156               "granted on mutex/rwlock which is currently "
1157               "wr-held by a different thread");
1158       goto error;
1159    }
1160 
1161    /* So the lock is already held in w-mode by 'thr'.  That means this
1162       is an attempt to lock it recursively, which is only allowable
1163       for LK_mbRec kinded locks.  Since this routine is called only
1164       once the lock has been acquired, this must also be a libpthread
1165       bug. */
1166    if (lk->kind != LK_mbRec) {
1167       HG_(record_error_Misc)(
1168          thr, "Bug in libpthread: recursive write lock "
1169               "granted on mutex/wrlock which does not "
1170               "support recursion");
1171       goto error;
1172    }
1173 
1174    /* So we are recursively re-locking a lock we already w-hold. */
1175    lockN_acquire_writer( lk, thr );
1176    /* acquire a dependency from the lock's VC.  Probably pointless,
1177       but also harmless. */
1178    libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1179    goto noerror;
1180 
1181   noerror:
1182    if (HG_(clo_track_lockorders)) {
1183       /* check lock order acquisition graph, and update.  This has to
1184          happen before the lock is added to the thread's locksetA/W. */
1185       laog__pre_thread_acquires_lock( thr, lk );
1186    }
1187    /* update the thread's held-locks set */
1188    thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1189    thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
1190    /* fall through */
1191 
1192   error:
1193    tl_assert(HG_(is_sane_LockN)(lk));
1194 }
1195 
1196 
1197 /* The lock at 'lock_ga' has acquired a reader.  Make all necessary
1198    updates, and also do all possible error checks. */
1199 static
evhH__post_thread_r_acquires_lock(Thread * thr,LockKind lkk,Addr lock_ga)1200 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1201                                          LockKind lkk, Addr lock_ga )
1202 {
1203    Lock* lk;
1204 
1205    /* Basically what we need to do is call lockN_acquire_reader.
1206       However, that will barf if any 'invalid' lock states would
1207       result.  Therefore check before calling.  Side effect is that
1208       'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1209       routine.
1210 
1211       Because this routine is only called after successful lock
1212       acquisition, we should not be asked to move the lock into any
1213       invalid states.  Requests to do so are bugs in libpthread, since
1214       that should have rejected any such requests. */
1215 
1216    tl_assert(HG_(is_sane_Thread)(thr));
1217    /* Try to find the lock.  If we can't, then create a new one with
1218       kind 'lkk'.  Only a reader-writer lock can be read-locked,
1219       hence the first assertion. */
1220    tl_assert(lkk == LK_rdwr);
1221    lk = map_locks_lookup_or_create(
1222            lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1223    tl_assert( HG_(is_sane_LockN)(lk) );
1224 
1225    /* check libhb level entities exist */
1226    tl_assert(thr->hbthr);
1227    tl_assert(lk->hbso);
1228 
1229    if (lk->heldBy == NULL) {
1230       /* the lock isn't held.  Simple. */
1231       tl_assert(!lk->heldW);
1232       lockN_acquire_reader( lk, thr );
1233       /* acquire a dependency from the lock's VC */
1234       libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1235       goto noerror;
1236    }
1237 
1238    /* So the lock is already held.  If held as a w-lock then
1239       libpthread must be buggy. */
1240    tl_assert(lk->heldBy);
1241    if (lk->heldW) {
1242       HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1243                                    "granted on rwlock which is "
1244                                    "currently wr-held");
1245       goto error;
1246    }
1247 
1248    /* Easy enough.  In short anybody can get a read-lock on a rwlock
1249       provided it is either unlocked or already in rd-held. */
1250    lockN_acquire_reader( lk, thr );
1251    /* acquire a dependency from the lock's VC.  Probably pointless,
1252       but also harmless. */
1253    libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1254    goto noerror;
1255 
1256   noerror:
1257    if (HG_(clo_track_lockorders)) {
1258       /* check lock order acquisition graph, and update.  This has to
1259          happen before the lock is added to the thread's locksetA/W. */
1260       laog__pre_thread_acquires_lock( thr, lk );
1261    }
1262    /* update the thread's held-locks set */
1263    thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1264    /* but don't update thr->locksetW, since lk is only rd-held */
1265    /* fall through */
1266 
1267   error:
1268    tl_assert(HG_(is_sane_LockN)(lk));
1269 }
1270 
1271 
1272 /* The lock at 'lock_ga' is just about to be unlocked.  Make all
1273    necessary updates, and also do all possible error checks. */
1274 static
evhH__pre_thread_releases_lock(Thread * thr,Addr lock_ga,Bool isRDWR)1275 void evhH__pre_thread_releases_lock ( Thread* thr,
1276                                       Addr lock_ga, Bool isRDWR )
1277 {
1278    Lock* lock;
1279    Word  n;
1280    Bool  was_heldW;
1281 
1282    /* This routine is called prior to a lock release, before
1283       libpthread has had a chance to validate the call.  Hence we need
1284       to detect and reject any attempts to move the lock into an
1285       invalid state.  Such attempts are bugs in the client.
1286 
1287       isRDWR is True if we know from the wrapper context that lock_ga
1288       should refer to a reader-writer lock, and is False if [ditto]
1289       lock_ga should refer to a standard mutex. */
1290 
1291    tl_assert(HG_(is_sane_Thread)(thr));
1292    lock = map_locks_maybe_lookup( lock_ga );
1293 
1294    if (!lock) {
1295       /* We know nothing about a lock at 'lock_ga'.  Nevertheless
1296          the client is trying to unlock it.  So complain, then ignore
1297          the attempt. */
1298       HG_(record_error_UnlockBogus)( thr, lock_ga );
1299       return;
1300    }
1301 
1302    tl_assert(lock->guestaddr == lock_ga);
1303    tl_assert(HG_(is_sane_LockN)(lock));
1304 
1305    if (isRDWR && lock->kind != LK_rdwr) {
1306       HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1307                                    "pthread_mutex_t* argument " );
1308    }
1309    if ((!isRDWR) && lock->kind == LK_rdwr) {
1310       HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1311                                    "pthread_rwlock_t* argument " );
1312    }
1313 
1314    if (!lock->heldBy) {
1315       /* The lock is not held.  This indicates a serious bug in the
1316          client. */
1317       tl_assert(!lock->heldW);
1318       HG_(record_error_UnlockUnlocked)( thr, lock );
1319       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1320       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1321       goto error;
1322    }
1323 
1324    /* test just above dominates */
1325    tl_assert(lock->heldBy);
1326    was_heldW = lock->heldW;
1327 
1328    /* The lock is held.  Is this thread one of the holders?  If not,
1329       report a bug in the client. */
1330    n = VG_(elemBag)( lock->heldBy, (UWord)thr );
1331    tl_assert(n >= 0);
1332    if (n == 0) {
1333       /* We are not a current holder of the lock.  This is a bug in
1334          the guest, and (per POSIX pthread rules) the unlock
1335          attempt will fail.  So just complain and do nothing
1336          else. */
1337       Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
1338       tl_assert(HG_(is_sane_Thread)(realOwner));
1339       tl_assert(realOwner != thr);
1340       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1341       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1342       HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1343       goto error;
1344    }
1345 
1346    /* Ok, we hold the lock 'n' times. */
1347    tl_assert(n >= 1);
1348 
1349    lockN_release( lock, thr );
1350 
1351    n--;
1352    tl_assert(n >= 0);
1353 
1354    if (n > 0) {
1355       tl_assert(lock->heldBy);
1356       tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1357       /* We still hold the lock.  So either it's a recursive lock
1358          or a rwlock which is currently r-held. */
1359       tl_assert(lock->kind == LK_mbRec
1360                 || (lock->kind == LK_rdwr && !lock->heldW));
1361       tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1362       if (lock->heldW)
1363          tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1364       else
1365          tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1366    } else {
1367       /* n is zero.  This means we don't hold the lock any more.  But
1368          if it's a rwlock held in r-mode, someone else could still
1369          hold it.  Just do whatever sanity checks we can. */
1370       if (lock->kind == LK_rdwr && lock->heldBy) {
1371          /* It's a rwlock.  We no longer hold it but we used to;
1372             nevertheless it still appears to be held by someone else.
1373             The implication is that, prior to this release, it must
1374             have been shared by us and and whoever else is holding it;
1375             which in turn implies it must be r-held, since a lock
1376             can't be w-held by more than one thread. */
1377          /* The lock is now R-held by somebody else: */
1378          tl_assert(lock->heldW == False);
1379       } else {
1380          /* Normal case.  It's either not a rwlock, or it's a rwlock
1381             that we used to hold in w-mode (which is pretty much the
1382             same thing as a non-rwlock.)  Since this transaction is
1383             atomic (V does not allow multiple threads to run
1384             simultaneously), it must mean the lock is now not held by
1385             anybody.  Hence assert for it. */
1386          /* The lock is now not held by anybody: */
1387          tl_assert(!lock->heldBy);
1388          tl_assert(lock->heldW == False);
1389       }
1390       //if (lock->heldBy) {
1391       //   tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1392       //}
1393       /* update this thread's lockset accordingly. */
1394       thr->locksetA
1395          = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
1396       thr->locksetW
1397          = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
1398       /* push our VC into the lock */
1399       tl_assert(thr->hbthr);
1400       tl_assert(lock->hbso);
1401       /* If the lock was previously W-held, then we want to do a
1402          strong send, and if previously R-held, then a weak send. */
1403       libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1404    }
1405    /* fall through */
1406 
1407   error:
1408    tl_assert(HG_(is_sane_LockN)(lock));
1409 }
1410 
1411 
1412 /* ---------------------------------------------------------- */
1413 /* -------- Event handlers proper (evh__* functions) -------- */
1414 /* ---------------------------------------------------------- */
1415 
1416 /* What is the Thread* for the currently running thread?  This is
1417    absolutely performance critical.  We receive notifications from the
1418    core for client code starts/stops, and cache the looked-up result
1419    in 'current_Thread'.  Hence, for the vast majority of requests,
1420    finding the current thread reduces to a read of a global variable,
1421    provided get_current_Thread_in_C_C is inlined.
1422 
1423    Outside of client code, current_Thread is NULL, and presumably
1424    any uses of it will cause a segfault.  Hence:
1425 
1426    - for uses definitely within client code, use
1427      get_current_Thread_in_C_C.
1428 
1429    - for all other uses, use get_current_Thread.
1430 */
1431 
1432 static Thread *current_Thread      = NULL,
1433               *current_Thread_prev = NULL;
1434 
evh__start_client_code(ThreadId tid,ULong nDisp)1435 static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1436    if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1437    tl_assert(current_Thread == NULL);
1438    current_Thread = map_threads_lookup( tid );
1439    tl_assert(current_Thread != NULL);
1440    if (current_Thread != current_Thread_prev) {
1441       libhb_Thr_resumes( current_Thread->hbthr );
1442       current_Thread_prev = current_Thread;
1443    }
1444 }
evh__stop_client_code(ThreadId tid,ULong nDisp)1445 static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1446    if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1447    tl_assert(current_Thread != NULL);
1448    current_Thread = NULL;
1449    libhb_maybe_GC();
1450 }
get_current_Thread_in_C_C(void)1451 static inline Thread* get_current_Thread_in_C_C ( void ) {
1452    return current_Thread;
1453 }
get_current_Thread(void)1454 static inline Thread* get_current_Thread ( void ) {
1455    ThreadId coretid;
1456    Thread*  thr;
1457    thr = get_current_Thread_in_C_C();
1458    if (LIKELY(thr))
1459       return thr;
1460    /* evidently not in client code.  Do it the slow way. */
1461    coretid = VG_(get_running_tid)();
1462    /* FIXME: get rid of the following kludge.  It exists because
1463       evh__new_mem is called during initialisation (as notification
1464       of initial memory layout) and VG_(get_running_tid)() returns
1465       VG_INVALID_THREADID at that point. */
1466    if (coretid == VG_INVALID_THREADID)
1467       coretid = 1; /* KLUDGE */
1468    thr = map_threads_lookup( coretid );
1469    return thr;
1470 }
1471 
1472 static
evh__new_mem(Addr a,SizeT len)1473 void evh__new_mem ( Addr a, SizeT len ) {
1474    Thread *thr = get_current_Thread();
1475    if (SHOW_EVENTS >= 2)
1476       VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1477    shadow_mem_make_New( thr, a, len );
1478    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1479       all__sanity_check("evh__new_mem-post");
1480    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1481       shadow_mem_make_Untracked( thr, a, len );
1482 }
1483 
1484 static
evh__new_mem_stack(Addr a,SizeT len)1485 void evh__new_mem_stack ( Addr a, SizeT len ) {
1486    Thread *thr = get_current_Thread();
1487    if (SHOW_EVENTS >= 2)
1488       VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1489    shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
1490    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1491       all__sanity_check("evh__new_mem_stack-post");
1492    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1493       shadow_mem_make_Untracked( thr, a, len );
1494 }
1495 
1496 #define DCL_evh__new_mem_stack(syze)                                     \
1497 static void VG_REGPARM(1) evh__new_mem_stack_##syze(Addr new_SP)         \
1498 {                                                                        \
1499    Thread *thr = get_current_Thread();                                   \
1500    if (SHOW_EVENTS >= 2)                                                 \
1501       VG_(printf)("evh__new_mem_stack_" #syze "(%p, %lu)\n",             \
1502                   (void*)new_SP, (SizeT)syze );                          \
1503    shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + new_SP, syze );     \
1504    if (syze >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE)) \
1505       all__sanity_check("evh__new_mem_stack_" #syze "-post");            \
1506    if (UNLIKELY(thr->pthread_create_nesting_level > 0))                  \
1507       shadow_mem_make_Untracked( thr, new_SP, syze );                    \
1508 }
1509 
1510 DCL_evh__new_mem_stack(4);
1511 DCL_evh__new_mem_stack(8);
1512 DCL_evh__new_mem_stack(12);
1513 DCL_evh__new_mem_stack(16);
1514 DCL_evh__new_mem_stack(32);
1515 DCL_evh__new_mem_stack(112);
1516 DCL_evh__new_mem_stack(128);
1517 DCL_evh__new_mem_stack(144);
1518 DCL_evh__new_mem_stack(160);
1519 
1520 static
evh__new_mem_w_tid(Addr a,SizeT len,ThreadId tid)1521 void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1522    Thread *thr = get_current_Thread();
1523    if (SHOW_EVENTS >= 2)
1524       VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1525    shadow_mem_make_New( thr, a, len );
1526    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1527       all__sanity_check("evh__new_mem_w_tid-post");
1528    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1529       shadow_mem_make_Untracked( thr, a, len );
1530 }
1531 
1532 static
evh__new_mem_w_perms(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1533 void evh__new_mem_w_perms ( Addr a, SizeT len,
1534                             Bool rr, Bool ww, Bool xx, ULong di_handle ) {
1535    Thread *thr = get_current_Thread();
1536    if (SHOW_EVENTS >= 1)
1537       VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1538                   (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1539    if (rr || ww || xx) {
1540       shadow_mem_make_New( thr, a, len );
1541       if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1542          shadow_mem_make_Untracked( thr, a, len );
1543    }
1544    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1545       all__sanity_check("evh__new_mem_w_perms-post");
1546 }
1547 
1548 static
evh__set_perms(Addr a,SizeT len,Bool rr,Bool ww,Bool xx)1549 void evh__set_perms ( Addr a, SizeT len,
1550                       Bool rr, Bool ww, Bool xx ) {
1551    // This handles mprotect requests.  If the memory is being put
1552    // into no-R no-W state, paint it as NoAccess, for the reasons
1553    // documented at evh__die_mem_munmap().
1554    if (SHOW_EVENTS >= 1)
1555       VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1556                   (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1557    /* Hmm.  What should we do here, that actually makes any sense?
1558       Let's say: if neither readable nor writable, then declare it
1559       NoAccess, else leave it alone. */
1560    if (!(rr || ww))
1561       shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1562    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1563       all__sanity_check("evh__set_perms-post");
1564 }
1565 
1566 static
evh__die_mem(Addr a,SizeT len)1567 void evh__die_mem ( Addr a, SizeT len ) {
1568    // Urr, libhb ignores this.
1569    if (SHOW_EVENTS >= 2)
1570       VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1571    shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
1572    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1573       all__sanity_check("evh__die_mem-post");
1574 }
1575 
1576 static
evh__die_mem_munmap(Addr a,SizeT len)1577 void evh__die_mem_munmap ( Addr a, SizeT len ) {
1578    // It's important that libhb doesn't ignore this.  If, as is likely,
1579    // the client is subject to address space layout randomization,
1580    // then unmapped areas may never get remapped over, even in long
1581    // runs.  If we just ignore them we wind up with large resource
1582    // (VTS) leaks in libhb.  So force them to NoAccess, so that all
1583    // VTS references in the affected area are dropped.  Marking memory
1584    // as NoAccess is expensive, but we assume that munmap is sufficiently
1585    // rare that the space gains of doing this are worth the costs.
1586    if (SHOW_EVENTS >= 2)
1587       VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1588    shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1589 }
1590 
1591 static
evh__untrack_mem(Addr a,SizeT len)1592 void evh__untrack_mem ( Addr a, SizeT len ) {
1593    // Libhb doesn't ignore this.
1594    if (SHOW_EVENTS >= 2)
1595       VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1596    shadow_mem_make_Untracked( get_current_Thread(), a, len );
1597    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1598       all__sanity_check("evh__untrack_mem-post");
1599 }
1600 
1601 static
evh__copy_mem(Addr src,Addr dst,SizeT len)1602 void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1603    if (SHOW_EVENTS >= 2)
1604       VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1605    Thread *thr = get_current_Thread();
1606    if (LIKELY(thr->synchr_nesting == 0))
1607       shadow_mem_scopy_range( thr , src, dst, len );
1608    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1609       all__sanity_check("evh__copy_mem-post");
1610 }
1611 
1612 static
evh__pre_thread_ll_create(ThreadId parent,ThreadId child)1613 void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1614 {
1615    if (SHOW_EVENTS >= 1)
1616       VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1617                   (Int)parent, (Int)child );
1618 
1619    if (parent != VG_INVALID_THREADID) {
1620       Thread* thr_p;
1621       Thread* thr_c;
1622       Thr*    hbthr_p;
1623       Thr*    hbthr_c;
1624 
1625       tl_assert(HG_(is_sane_ThreadId)(parent));
1626       tl_assert(HG_(is_sane_ThreadId)(child));
1627       tl_assert(parent != child);
1628 
1629       thr_p = map_threads_maybe_lookup( parent );
1630       thr_c = map_threads_maybe_lookup( child );
1631 
1632       tl_assert(thr_p != NULL);
1633       tl_assert(thr_c == NULL);
1634 
1635       hbthr_p = thr_p->hbthr;
1636       tl_assert(hbthr_p != NULL);
1637       tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
1638 
1639       hbthr_c = libhb_create ( hbthr_p );
1640 
1641       /* Create a new thread record for the child. */
1642       /* a Thread for the new thread ... */
1643       thr_c = mk_Thread( hbthr_c );
1644       tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1645       libhb_set_Thr_hgthread(hbthr_c, thr_c);
1646 
1647       /* and bind it in the thread-map table */
1648       map_threads[child] = thr_c;
1649       tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1650       thr_c->coretid = child;
1651 
1652       /* Record where the parent is so we can later refer to this in
1653          error messages.
1654 
1655          On x86/amd64-linux, this entails a nasty glibc specific hack.
1656          The stack snapshot is taken immediately after the parent has
1657          returned from its sys_clone call.  Unfortunately there is no
1658          unwind info for the insn following "syscall" - reading the
1659          glibc sources confirms this.  So we ask for a snapshot to be
1660          taken as if RIP was 3 bytes earlier, in a place where there
1661          is unwind info.  Sigh.
1662       */
1663       { Word first_ip_delta = 0;
1664 #       if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
1665         first_ip_delta = -3;
1666 #       elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
1667         first_ip_delta = -1;
1668 #       endif
1669         thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1670       }
1671 
1672       if (HG_(clo_ignore_thread_creation)) {
1673          HG_(thread_enter_pthread_create)(thr_c);
1674          tl_assert(thr_c->synchr_nesting == 0);
1675          HG_(thread_enter_synchr)(thr_c);
1676          /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1677       }
1678    }
1679 
1680    if (HG_(clo_sanity_flags) & SCE_THREADS)
1681       all__sanity_check("evh__pre_thread_create-post");
1682 }
1683 
1684 static
evh__pre_thread_ll_exit(ThreadId quit_tid)1685 void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1686 {
1687    Int     nHeld;
1688    Thread* thr_q;
1689    if (SHOW_EVENTS >= 1)
1690       VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1691                   (Int)quit_tid );
1692 
1693    /* quit_tid has disappeared without joining to any other thread.
1694       Therefore there is no synchronisation event associated with its
1695       exit and so we have to pretty much treat it as if it was still
1696       alive but mysteriously making no progress.  That is because, if
1697       we don't know when it really exited, then we can never say there
1698       is a point in time when we're sure the thread really has
1699       finished, and so we need to consider the possibility that it
1700       lingers indefinitely and continues to interact with other
1701       threads. */
1702    /* However, it might have rendezvous'd with a thread that called
1703       pthread_join with this one as arg, prior to this point (that's
1704       how NPTL works).  In which case there has already been a prior
1705       sync event.  So in any case, just let the thread exit.  On NPTL,
1706       all thread exits go through here. */
1707    tl_assert(HG_(is_sane_ThreadId)(quit_tid));
1708    thr_q = map_threads_maybe_lookup( quit_tid );
1709    tl_assert(thr_q != NULL);
1710 
1711    /* Complain if this thread holds any locks. */
1712    nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1713    tl_assert(nHeld >= 0);
1714    if (nHeld > 0) {
1715       HChar buf[80];
1716       VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1717                         nHeld, nHeld > 1 ? "s" : "");
1718       HG_(record_error_Misc)( thr_q, buf );
1719    }
1720 
1721    /* Not much to do here:
1722       - tell libhb the thread is gone
1723       - clear the map_threads entry, in order that the Valgrind core
1724         can re-use it. */
1725    /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1726       in sync. */
1727    tl_assert(thr_q->hbthr);
1728    libhb_async_exit(thr_q->hbthr);
1729    tl_assert(thr_q->coretid == quit_tid);
1730    thr_q->coretid = VG_INVALID_THREADID;
1731    map_threads_delete( quit_tid );
1732 
1733    if (HG_(clo_sanity_flags) & SCE_THREADS)
1734       all__sanity_check("evh__pre_thread_ll_exit-post");
1735 }
1736 
1737 /* This is called immediately after fork, for the child only.  'tid'
1738    is the only surviving thread (as per POSIX rules on fork() in
1739    threaded programs), so we have to clean up map_threads to remove
1740    entries for any other threads. */
1741 static
evh__atfork_child(ThreadId tid)1742 void evh__atfork_child ( ThreadId tid )
1743 {
1744    UInt    i;
1745    Thread* thr;
1746    /* Slot 0 should never be used. */
1747    thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1748    tl_assert(!thr);
1749    /* Clean up all other slots except 'tid'. */
1750    for (i = 1; i < VG_N_THREADS; i++) {
1751       if (i == tid)
1752          continue;
1753       thr = map_threads_maybe_lookup(i);
1754       if (!thr)
1755          continue;
1756       /* Cleanup actions (next 5 lines) copied from end of
1757          evh__pre_thread_ll_exit; keep in sync. */
1758       tl_assert(thr->hbthr);
1759       libhb_async_exit(thr->hbthr);
1760       tl_assert(thr->coretid == i);
1761       thr->coretid = VG_INVALID_THREADID;
1762       map_threads_delete(i);
1763    }
1764 }
1765 
1766 /* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
1767 static
generate_quitter_stayer_dependence(Thr * hbthr_q,Thr * hbthr_s)1768 void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
1769 {
1770    SO*      so;
1771    /* Allocate a temporary synchronisation object and use it to send
1772       an imaginary message from the quitter to the stayer, the purpose
1773       being to generate a dependence from the quitter to the
1774       stayer. */
1775    so = libhb_so_alloc();
1776    tl_assert(so);
1777    /* Send last arg of _so_send as False, since the sending thread
1778       doesn't actually exist any more, so we don't want _so_send to
1779       try taking stack snapshots of it. */
1780    libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
1781    libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1782    libhb_so_dealloc(so);
1783 
1784    /* Tell libhb that the quitter has been reaped.  Note that we might
1785       have to be cleverer about this, to exclude 2nd and subsequent
1786       notifications for the same hbthr_q, in the case where the app is
1787       buggy (calls pthread_join twice or more on the same thread) AND
1788       where libpthread is also buggy and doesn't return ESRCH on
1789       subsequent calls.  (If libpthread isn't thusly buggy, then the
1790       wrapper for pthread_join in hg_intercepts.c will stop us getting
1791       notified here multiple times for the same joinee.)  See also
1792       comments in helgrind/tests/jointwice.c. */
1793    libhb_joinedwith_done(hbthr_q);
1794 }
1795 
1796 
1797 static
evh__HG_PTHREAD_JOIN_POST(ThreadId stay_tid,Thread * quit_thr)1798 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1799 {
1800    Thread*  thr_s;
1801    Thread*  thr_q;
1802    Thr*     hbthr_s;
1803    Thr*     hbthr_q;
1804 
1805    if (SHOW_EVENTS >= 1)
1806       VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1807                   (Int)stay_tid, quit_thr );
1808 
1809    tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1810 
1811    thr_s = map_threads_maybe_lookup( stay_tid );
1812    thr_q = quit_thr;
1813    tl_assert(thr_s != NULL);
1814    tl_assert(thr_q != NULL);
1815    tl_assert(thr_s != thr_q);
1816 
1817    hbthr_s = thr_s->hbthr;
1818    hbthr_q = thr_q->hbthr;
1819    tl_assert(hbthr_s != hbthr_q);
1820    tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1821    tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1822 
1823    generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
1824 
1825    /* evh__pre_thread_ll_exit issues an error message if the exiting
1826       thread holds any locks.  No need to check here. */
1827 
1828    /* This holds because, at least when using NPTL as the thread
1829       library, we should be notified the low level thread exit before
1830       we hear of any join event on it.  The low level exit
1831       notification feeds through into evh__pre_thread_ll_exit,
1832       which should clear the map_threads entry for it.  Hence we
1833       expect there to be no map_threads entry at this point. */
1834    tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1835               == VG_INVALID_THREADID);
1836 
1837    if (HG_(clo_sanity_flags) & SCE_THREADS)
1838       all__sanity_check("evh__post_thread_join-post");
1839 }
1840 
1841 static
evh__pre_mem_read(CorePart part,ThreadId tid,const HChar * s,Addr a,SizeT size)1842 void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
1843                          Addr a, SizeT size) {
1844    if (SHOW_EVENTS >= 2
1845        || (SHOW_EVENTS >= 1 && size != 1))
1846       VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1847                   (Int)tid, s, (void*)a, size );
1848    Thread *thr = map_threads_lookup(tid);
1849    if (LIKELY(thr->synchr_nesting == 0))
1850       shadow_mem_cread_range(thr, a, size);
1851    if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1852       all__sanity_check("evh__pre_mem_read-post");
1853 }
1854 
1855 static
evh__pre_mem_read_asciiz(CorePart part,ThreadId tid,const HChar * s,Addr a)1856 void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1857                                 const HChar* s, Addr a ) {
1858    Int len;
1859    if (SHOW_EVENTS >= 1)
1860       VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1861                   (Int)tid, s, (void*)a );
1862    // Don't segfault if the string starts in an obviously stupid
1863    // place.  Actually we should check the whole string, not just
1864    // the start address, but that's too much trouble.  At least
1865    // checking the first byte is better than nothing.  See #255009.
1866    if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1867       return;
1868    Thread *thr = map_threads_lookup(tid);
1869    len = VG_(strlen)( (HChar*) a );
1870    if (LIKELY(thr->synchr_nesting == 0))
1871       shadow_mem_cread_range( thr, a, len+1 );
1872    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1873       all__sanity_check("evh__pre_mem_read_asciiz-post");
1874 }
1875 
1876 static
evh__pre_mem_write(CorePart part,ThreadId tid,const HChar * s,Addr a,SizeT size)1877 void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
1878                           Addr a, SizeT size ) {
1879    if (SHOW_EVENTS >= 1)
1880       VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1881                   (Int)tid, s, (void*)a, size );
1882    Thread *thr = map_threads_lookup(tid);
1883    if (LIKELY(thr->synchr_nesting == 0))
1884       shadow_mem_cwrite_range(thr, a, size);
1885    if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1886       all__sanity_check("evh__pre_mem_write-post");
1887 }
1888 
1889 static
evh__new_mem_heap(Addr a,SizeT len,Bool is_inited)1890 void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1891    if (SHOW_EVENTS >= 1)
1892       VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1893                   (void*)a, len, (Int)is_inited );
1894    // We ignore the initialisation state (is_inited); that's ok.
1895    shadow_mem_make_New(get_current_Thread(), a, len);
1896    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1897       all__sanity_check("evh__pre_mem_read-post");
1898 }
1899 
1900 static
evh__die_mem_heap(Addr a,SizeT len)1901 void evh__die_mem_heap ( Addr a, SizeT len ) {
1902    Thread* thr;
1903    if (SHOW_EVENTS >= 1)
1904       VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1905    thr = get_current_Thread();
1906    tl_assert(thr);
1907    if (HG_(clo_free_is_write)) {
1908       /* Treat frees as if the memory was written immediately prior to
1909          the free.  This shakes out more races, specifically, cases
1910          where memory is referenced by one thread, and freed by
1911          another, and there's no observable synchronisation event to
1912          guarantee that the reference happens before the free. */
1913       if (LIKELY(thr->synchr_nesting == 0))
1914          shadow_mem_cwrite_range(thr, a, len);
1915    }
1916    shadow_mem_make_NoAccess_AHAE( thr, a, len );
1917    /* We used to call instead
1918           shadow_mem_make_NoAccess_NoFX( thr, a, len );
1919       A non-buggy application will not access anymore
1920       the freed memory, and so marking no access is in theory useless.
1921       Not marking freed memory would avoid the overhead for applications
1922       doing mostly malloc/free, as the freed memory should then be recycled
1923       very quickly after marking.
1924       We rather mark it noaccess for the following reasons:
1925         * accessibility bits then always correctly represents the memory
1926           status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1927         * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1928           blocks, on a ppc64le, for a unrealistic workload of an application
1929           doing only malloc/free).
1930         * marking no access allows to GC the SecMap, which might improve
1931           performance and/or memory usage.
1932         * we might detect more applications bugs when memory is marked
1933           noaccess.
1934       If needed, we could support here an option --free-is-noaccess=yes|no
1935       to avoid marking freed memory as no access if some applications
1936       would need to avoid the marking noaccess overhead. */
1937 
1938    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1939       all__sanity_check("evh__pre_mem_read-post");
1940 }
1941 
1942 /* --- Event handlers called from generated code --- */
1943 
1944 static VG_REGPARM(1)
evh__mem_help_cread_1(Addr a)1945 void evh__mem_help_cread_1(Addr a) {
1946    Thread*  thr = get_current_Thread_in_C_C();
1947    Thr*     hbthr = thr->hbthr;
1948    if (LIKELY(thr->synchr_nesting == 0))
1949       LIBHB_CREAD_1(hbthr, a);
1950 }
1951 
1952 static VG_REGPARM(1)
evh__mem_help_cread_2(Addr a)1953 void evh__mem_help_cread_2(Addr a) {
1954    Thread*  thr = get_current_Thread_in_C_C();
1955    Thr*     hbthr = thr->hbthr;
1956    if (LIKELY(thr->synchr_nesting == 0))
1957       LIBHB_CREAD_2(hbthr, a);
1958 }
1959 
1960 static VG_REGPARM(1)
evh__mem_help_cread_4(Addr a)1961 void evh__mem_help_cread_4(Addr a) {
1962    Thread*  thr = get_current_Thread_in_C_C();
1963    Thr*     hbthr = thr->hbthr;
1964    if (LIKELY(thr->synchr_nesting == 0))
1965       LIBHB_CREAD_4(hbthr, a);
1966 }
1967 
1968 static VG_REGPARM(1)
evh__mem_help_cread_8(Addr a)1969 void evh__mem_help_cread_8(Addr a) {
1970    Thread*  thr = get_current_Thread_in_C_C();
1971    Thr*     hbthr = thr->hbthr;
1972    if (LIKELY(thr->synchr_nesting == 0))
1973       LIBHB_CREAD_8(hbthr, a);
1974 }
1975 
1976 static VG_REGPARM(2)
evh__mem_help_cread_N(Addr a,SizeT size)1977 void evh__mem_help_cread_N(Addr a, SizeT size) {
1978    Thread*  thr = get_current_Thread_in_C_C();
1979    Thr*     hbthr = thr->hbthr;
1980    if (LIKELY(thr->synchr_nesting == 0))
1981       LIBHB_CREAD_N(hbthr, a, size);
1982 }
1983 
1984 static VG_REGPARM(1)
evh__mem_help_cwrite_1(Addr a)1985 void evh__mem_help_cwrite_1(Addr a) {
1986    Thread*  thr = get_current_Thread_in_C_C();
1987    Thr*     hbthr = thr->hbthr;
1988    if (LIKELY(thr->synchr_nesting == 0))
1989       LIBHB_CWRITE_1(hbthr, a);
1990 }
1991 
1992 static VG_REGPARM(1)
evh__mem_help_cwrite_2(Addr a)1993 void evh__mem_help_cwrite_2(Addr a) {
1994    Thread*  thr = get_current_Thread_in_C_C();
1995    Thr*     hbthr = thr->hbthr;
1996    if (LIKELY(thr->synchr_nesting == 0))
1997       LIBHB_CWRITE_2(hbthr, a);
1998 }
1999 
2000 static VG_REGPARM(1)
evh__mem_help_cwrite_4(Addr a)2001 void evh__mem_help_cwrite_4(Addr a) {
2002    Thread*  thr = get_current_Thread_in_C_C();
2003    Thr*     hbthr = thr->hbthr;
2004    if (LIKELY(thr->synchr_nesting == 0))
2005       LIBHB_CWRITE_4(hbthr, a);
2006 }
2007 
2008 /* Same as evh__mem_help_cwrite_4 but unwind will use a first_sp_delta of
2009    one word. */
2010 static VG_REGPARM(1)
evh__mem_help_cwrite_4_fixupSP(Addr a)2011 void evh__mem_help_cwrite_4_fixupSP(Addr a) {
2012    Thread*  thr = get_current_Thread_in_C_C();
2013    Thr*     hbthr = thr->hbthr;
2014 
2015    thr->first_sp_delta = sizeof(Word);
2016    if (LIKELY(thr->synchr_nesting == 0))
2017       LIBHB_CWRITE_4(hbthr, a);
2018    thr->first_sp_delta = 0;
2019 }
2020 
2021 static VG_REGPARM(1)
evh__mem_help_cwrite_8(Addr a)2022 void evh__mem_help_cwrite_8(Addr a) {
2023    Thread*  thr = get_current_Thread_in_C_C();
2024    Thr*     hbthr = thr->hbthr;
2025    if (LIKELY(thr->synchr_nesting == 0))
2026       LIBHB_CWRITE_8(hbthr, a);
2027 }
2028 
2029 /* Same as evh__mem_help_cwrite_8 but unwind will use a first_sp_delta of
2030    one word. */
2031 static VG_REGPARM(1)
evh__mem_help_cwrite_8_fixupSP(Addr a)2032 void evh__mem_help_cwrite_8_fixupSP(Addr a) {
2033    Thread*  thr = get_current_Thread_in_C_C();
2034    Thr*     hbthr = thr->hbthr;
2035 
2036    thr->first_sp_delta = sizeof(Word);
2037    if (LIKELY(thr->synchr_nesting == 0))
2038       LIBHB_CWRITE_8(hbthr, a);
2039    thr->first_sp_delta = 0;
2040 }
2041 
2042 static VG_REGPARM(2)
evh__mem_help_cwrite_N(Addr a,SizeT size)2043 void evh__mem_help_cwrite_N(Addr a, SizeT size) {
2044    Thread*  thr = get_current_Thread_in_C_C();
2045    Thr*     hbthr = thr->hbthr;
2046    if (LIKELY(thr->synchr_nesting == 0))
2047       LIBHB_CWRITE_N(hbthr, a, size);
2048 }
2049 
2050 
2051 /* ------------------------------------------------------- */
2052 /* -------------- events to do with mutexes -------------- */
2053 /* ------------------------------------------------------- */
2054 
2055 /* EXPOSITION only: by intercepting lock init events we can show the
2056    user where the lock was initialised, rather than only being able to
2057    show where it was first locked.  Intercepting lock initialisations
2058    is not necessary for the basic operation of the race checker. */
2059 static
evh__HG_PTHREAD_MUTEX_INIT_POST(ThreadId tid,void * mutex,Word mbRec)2060 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
2061                                       void* mutex, Word mbRec )
2062 {
2063    if (SHOW_EVENTS >= 1)
2064       VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2065                   (Int)tid, mbRec, (void*)mutex );
2066    tl_assert(mbRec == 0 || mbRec == 1);
2067    map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
2068                                (Addr)mutex, tid );
2069    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2070       all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2071 }
2072 
2073 static
evh__HG_PTHREAD_MUTEX_DESTROY_PRE(ThreadId tid,void * mutex,Bool mutex_is_init)2074 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
2075                                         Bool mutex_is_init )
2076 {
2077    Thread* thr;
2078    Lock*   lk;
2079    if (SHOW_EVENTS >= 1)
2080       VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2081                   "(ctid=%d, %p, isInit=%d)\n",
2082                   (Int)tid, (void*)mutex, (Int)mutex_is_init );
2083 
2084    thr = map_threads_maybe_lookup( tid );
2085    /* cannot fail - Thread* must already exist */
2086    tl_assert( HG_(is_sane_Thread)(thr) );
2087 
2088    lk = map_locks_maybe_lookup( (Addr)mutex );
2089 
2090    if (lk == NULL && mutex_is_init) {
2091       /* We're destroying a mutex which we don't have any record of,
2092          and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2093          Assume it never got used, and so we don't need to do anything
2094          more. */
2095       goto out;
2096    }
2097 
2098    if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
2099       HG_(record_error_Misc)(
2100          thr, "pthread_mutex_destroy with invalid argument" );
2101    }
2102 
2103    if (lk) {
2104       tl_assert( HG_(is_sane_LockN)(lk) );
2105       tl_assert( lk->guestaddr == (Addr)mutex );
2106       if (lk->heldBy) {
2107          /* Basically act like we unlocked the lock */
2108          HG_(record_error_Misc)(
2109             thr, "pthread_mutex_destroy of a locked mutex" );
2110          /* remove lock from locksets of all owning threads */
2111          remove_Lock_from_locksets_of_all_owning_Threads( lk );
2112          VG_(deleteBag)( lk->heldBy );
2113          lk->heldBy = NULL;
2114          lk->heldW = False;
2115          lk->acquired_at = NULL;
2116       }
2117       tl_assert( !lk->heldBy );
2118       tl_assert( HG_(is_sane_LockN)(lk) );
2119 
2120       if (HG_(clo_track_lockorders))
2121          laog__handle_one_lock_deletion(lk);
2122       map_locks_delete( lk->guestaddr );
2123       del_LockN( lk );
2124    }
2125 
2126   out:
2127    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2128       all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2129 }
2130 
evh__HG_PTHREAD_MUTEX_LOCK_PRE(ThreadId tid,void * mutex,Word isTryLock)2131 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2132                                              void* mutex, Word isTryLock )
2133 {
2134    /* Just check the mutex is sane; nothing else to do. */
2135    // 'mutex' may be invalid - not checked by wrapper
2136    Thread* thr;
2137    Lock*   lk;
2138    if (SHOW_EVENTS >= 1)
2139       VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2140                   (Int)tid, (void*)mutex );
2141 
2142    tl_assert(isTryLock == 0 || isTryLock == 1);
2143    thr = map_threads_maybe_lookup( tid );
2144    tl_assert(thr); /* cannot fail - Thread* must already exist */
2145 
2146    lk = map_locks_maybe_lookup( (Addr)mutex );
2147 
2148    if (lk && (lk->kind == LK_rdwr)) {
2149       HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2150                                    "pthread_rwlock_t* argument " );
2151    }
2152 
2153    if ( lk
2154         && isTryLock == 0
2155         && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2156         && lk->heldBy
2157         && lk->heldW
2158         && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
2159       /* uh, it's a non-recursive lock and we already w-hold it, and
2160          this is a real lock operation (not a speculative "tryLock"
2161          kind of thing).  Duh.  Deadlock coming up; but at least
2162          produce an error message. */
2163       const HChar* errstr = "Attempt to re-lock a "
2164                             "non-recursive lock I already hold";
2165       const HChar* auxstr = "Lock was previously acquired";
2166       if (lk->acquired_at) {
2167          HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2168       } else {
2169          HG_(record_error_Misc)( thr, errstr );
2170       }
2171    }
2172 }
2173 
evh__HG_PTHREAD_MUTEX_LOCK_POST(ThreadId tid,void * mutex)2174 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2175 {
2176    // only called if the real library call succeeded - so mutex is sane
2177    Thread* thr;
2178    if (SHOW_EVENTS >= 1)
2179       VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2180                   (Int)tid, (void*)mutex );
2181 
2182    thr = map_threads_maybe_lookup( tid );
2183    tl_assert(thr); /* cannot fail - Thread* must already exist */
2184 
2185    evhH__post_thread_w_acquires_lock(
2186       thr,
2187       LK_mbRec, /* if not known, create new lock with this LockKind */
2188       (Addr)mutex
2189    );
2190 }
2191 
evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ThreadId tid,void * mutex)2192 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2193 {
2194    // 'mutex' may be invalid - not checked by wrapper
2195    Thread* thr;
2196    if (SHOW_EVENTS >= 1)
2197       VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2198                   (Int)tid, (void*)mutex );
2199 
2200    thr = map_threads_maybe_lookup( tid );
2201    tl_assert(thr); /* cannot fail - Thread* must already exist */
2202 
2203    evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2204 }
2205 
evh__HG_PTHREAD_MUTEX_UNLOCK_POST(ThreadId tid,void * mutex)2206 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2207 {
2208    // only called if the real library call succeeded - so mutex is sane
2209    Thread* thr;
2210    if (SHOW_EVENTS >= 1)
2211       VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2212                   (Int)tid, (void*)mutex );
2213    thr = map_threads_maybe_lookup( tid );
2214    tl_assert(thr); /* cannot fail - Thread* must already exist */
2215 
2216    // anything we should do here?
2217 }
2218 
2219 
2220 /* ------------------------------------------------------- */
2221 /* -------------- events to do with spinlocks ------------ */
2222 /* ------------------------------------------------------- */
2223 
2224 /* All a bit of a kludge.  Pretend we're really dealing with ordinary
2225    pthread_mutex_t's instead, for the most part. */
2226 
evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE(ThreadId tid,void * slock)2227 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2228                                                      void* slock )
2229 {
2230    Thread* thr;
2231    Lock*   lk;
2232    /* In glibc's kludgey world, we're either initialising or unlocking
2233       it.  Since this is the pre-routine, if it is locked, unlock it
2234       and take a dependence edge.  Otherwise, do nothing. */
2235 
2236    if (SHOW_EVENTS >= 1)
2237       VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2238                   "(ctid=%d, slock=%p)\n",
2239                   (Int)tid, (void*)slock );
2240 
2241    thr = map_threads_maybe_lookup( tid );
2242    /* cannot fail - Thread* must already exist */;
2243    tl_assert( HG_(is_sane_Thread)(thr) );
2244 
2245    lk = map_locks_maybe_lookup( (Addr)slock );
2246    if (lk && lk->heldBy) {
2247       /* it's held.  So do the normal pre-unlock actions, as copied
2248          from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE.  This stupidly
2249          duplicates the map_locks_maybe_lookup. */
2250       evhH__pre_thread_releases_lock( thr, (Addr)slock,
2251                                            False/*!isRDWR*/ );
2252    }
2253 }
2254 
evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST(ThreadId tid,void * slock)2255 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2256                                                       void* slock )
2257 {
2258    Lock* lk;
2259    /* More kludgery.  If the lock has never been seen before, do
2260       actions as per evh__HG_PTHREAD_MUTEX_INIT_POST.  Else do
2261       nothing. */
2262 
2263    if (SHOW_EVENTS >= 1)
2264       VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2265                   "(ctid=%d, slock=%p)\n",
2266                   (Int)tid, (void*)slock );
2267 
2268    lk = map_locks_maybe_lookup( (Addr)slock );
2269    if (!lk) {
2270       map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2271    }
2272 }
2273 
evh__HG_PTHREAD_SPIN_LOCK_PRE(ThreadId tid,void * slock,Word isTryLock)2274 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2275                                            void* slock, Word isTryLock )
2276 {
2277    evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2278 }
2279 
evh__HG_PTHREAD_SPIN_LOCK_POST(ThreadId tid,void * slock)2280 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2281                                             void* slock )
2282 {
2283    evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2284 }
2285 
evh__HG_PTHREAD_SPIN_DESTROY_PRE(ThreadId tid,void * slock)2286 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2287                                               void* slock )
2288 {
2289    evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
2290 }
2291 
2292 
2293 /* ----------------------------------------------------- */
2294 /* --------------- events to do with CVs --------------- */
2295 /* ----------------------------------------------------- */
2296 
2297 /* A mapping from CV to (the SO associated with it, plus some
2298    auxiliary data for error checking).  When the CV is
2299    signalled/broadcasted upon, we do a 'send' into the SO, and when a
2300    wait on it completes, we do a 'recv' from the SO.  This is believed
2301    to give the correct happens-before events arising from CV
2302    signallings/broadcasts.
2303 */
2304 
2305 /* .so is the SO for this CV.
2306    .mx_ga is the associated mutex, when .nWaiters > 0
2307 
2308    POSIX says effectively that the first pthread_cond_{timed}wait call
2309    causes a dynamic binding between the CV and the mutex, and that
2310    lasts until such time as the waiter count falls to zero.  Hence
2311    need to keep track of the number of waiters in order to do
2312    consistency tracking. */
2313 typedef
2314    struct {
2315       SO*   so;       /* libhb-allocated SO */
2316       void* mx_ga;    /* addr of associated mutex, if any */
2317       UWord nWaiters; /* # threads waiting on the CV */
2318    }
2319    CVInfo;
2320 
2321 
2322 /* pthread_cond_t* -> CVInfo* */
2323 static WordFM* map_cond_to_CVInfo = NULL;
2324 
map_cond_to_CVInfo_INIT(void)2325 static void map_cond_to_CVInfo_INIT ( void ) {
2326    if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2327       map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2328                                        "hg.mctCI.1", HG_(free), NULL );
2329    }
2330 }
2331 
map_cond_to_CVInfo_lookup_or_alloc(void * cond)2332 static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
2333    UWord key, val;
2334    map_cond_to_CVInfo_INIT();
2335    if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2336       tl_assert(key == (UWord)cond);
2337       return (CVInfo*)val;
2338    } else {
2339       SO*     so  = libhb_so_alloc();
2340       CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2341       cvi->so     = so;
2342       cvi->mx_ga  = 0;
2343       VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2344       return cvi;
2345    }
2346 }
2347 
map_cond_to_CVInfo_lookup_NO_alloc(void * cond)2348 static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2349    UWord key, val;
2350    map_cond_to_CVInfo_INIT();
2351    if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2352       tl_assert(key == (UWord)cond);
2353       return (CVInfo*)val;
2354    } else {
2355       return NULL;
2356    }
2357 }
2358 
map_cond_to_CVInfo_delete(ThreadId tid,void * cond,Bool cond_is_init)2359 static void map_cond_to_CVInfo_delete ( ThreadId tid,
2360                                         void* cond, Bool cond_is_init ) {
2361    Thread*   thr;
2362    UWord keyW, valW;
2363 
2364    thr = map_threads_maybe_lookup( tid );
2365    tl_assert(thr); /* cannot fail - Thread* must already exist */
2366 
2367    map_cond_to_CVInfo_INIT();
2368    if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2369       CVInfo* cvi = (CVInfo*)valW;
2370       tl_assert(keyW == (UWord)cond);
2371       tl_assert(cvi);
2372       tl_assert(cvi->so);
2373       if (cvi->nWaiters > 0) {
2374          HG_(record_error_Misc)(
2375             thr, "pthread_cond_destroy:"
2376                  " destruction of condition variable being waited upon");
2377          /* Destroying a cond var being waited upon outcome is EBUSY and
2378             variable is not destroyed. */
2379          return;
2380       }
2381       if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2382          tl_assert(0); // cond var found above, and not here ???
2383       libhb_so_dealloc(cvi->so);
2384       cvi->mx_ga = 0;
2385       HG_(free)(cvi);
2386    } else {
2387       /* We have no record of this CV.  So complain about it
2388          .. except, don't bother to complain if it has exactly the
2389          value PTHREAD_COND_INITIALIZER, since it might be that the CV
2390          was initialised like that but never used. */
2391       if (!cond_is_init) {
2392          HG_(record_error_Misc)(
2393             thr, "pthread_cond_destroy: destruction of unknown cond var");
2394       }
2395    }
2396 }
2397 
evh__HG_PTHREAD_COND_SIGNAL_PRE(ThreadId tid,void * cond)2398 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2399 {
2400    /* 'tid' has signalled on 'cond'.  As per the comment above, bind
2401       cond to a SO if it is not already so bound, and 'send' on the
2402       SO.  This is later used by other thread(s) which successfully
2403       exit from a pthread_cond_wait on the same cv; then they 'recv'
2404       from the SO, thereby acquiring a dependency on this signalling
2405       event. */
2406    Thread*   thr;
2407    CVInfo*   cvi;
2408    //Lock*     lk;
2409 
2410    if (SHOW_EVENTS >= 1)
2411       VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2412                   (Int)tid, (void*)cond );
2413 
2414    thr = map_threads_maybe_lookup( tid );
2415    tl_assert(thr); /* cannot fail - Thread* must already exist */
2416 
2417    cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2418    tl_assert(cvi);
2419    tl_assert(cvi->so);
2420 
2421    // error-if: mutex is bogus
2422    // error-if: mutex is not locked
2423    // Hmm.  POSIX doesn't actually say that it's an error to call
2424    // pthread_cond_signal with the associated mutex being unlocked.
2425    // Although it does say that it should be "if consistent scheduling
2426    // is desired."  For that reason, print "dubious" if the lock isn't
2427    // held by any thread.  Skip the "dubious" if it is held by some
2428    // other thread; that sounds straight-out wrong.
2429    //
2430    // Anybody who writes code that signals on a CV without holding
2431    // the associated MX needs to be shipped off to a lunatic asylum
2432    // ASAP, even though POSIX doesn't actually declare such behaviour
2433    // illegal -- it makes code extremely difficult to understand/
2434    // reason about.  In particular it puts the signalling thread in
2435    // a situation where it is racing against the released waiter
2436    // as soon as the signalling is done, and so there needs to be
2437    // some auxiliary synchronisation mechanism in the program that
2438    // makes this safe -- or the race(s) need to be harmless, or
2439    // probably nonexistent.
2440    //
2441    if (1) {
2442       Lock* lk = NULL;
2443       if (cvi->mx_ga != 0) {
2444          lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2445       }
2446       /* note: lk could be NULL.  Be careful. */
2447       if (lk) {
2448          if (lk->kind == LK_rdwr) {
2449             HG_(record_error_Misc)(thr,
2450                "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2451          }
2452          if (lk->heldBy == NULL) {
2453             HG_(record_error_Misc)(thr,
2454                "pthread_cond_{signal,broadcast}: dubious: "
2455                "associated lock is not held by any thread");
2456          }
2457          if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
2458             HG_(record_error_Misc)(thr,
2459                "pthread_cond_{signal,broadcast}: "
2460                "associated lock is not held by calling thread");
2461          }
2462       } else {
2463          /* Couldn't even find the damn thing. */
2464          // But actually .. that's not necessarily an error.  We don't
2465          // know the (CV,MX) binding until a pthread_cond_wait or bcast
2466          // shows us what it is, and if that may not have happened yet.
2467          // So just keep quiet in this circumstance.
2468          //HG_(record_error_Misc)( thr,
2469          //   "pthread_cond_{signal,broadcast}: "
2470          //   "no or invalid mutex associated with cond");
2471       }
2472    }
2473 
2474    libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2475 }
2476 
2477 /* returns True if it reckons 'mutex' is valid and held by this
2478    thread, else False */
evh__HG_PTHREAD_COND_WAIT_PRE(ThreadId tid,void * cond,void * mutex)2479 static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2480                                             void* cond, void* mutex )
2481 {
2482    Thread* thr;
2483    Lock*   lk;
2484    Bool    lk_valid = True;
2485    CVInfo* cvi;
2486 
2487    if (SHOW_EVENTS >= 1)
2488       VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2489                   "(ctid=%d, cond=%p, mutex=%p)\n",
2490                   (Int)tid, (void*)cond, (void*)mutex );
2491 
2492    thr = map_threads_maybe_lookup( tid );
2493    tl_assert(thr); /* cannot fail - Thread* must already exist */
2494 
2495    lk = map_locks_maybe_lookup( (Addr)mutex );
2496 
2497    /* Check for stupid mutex arguments.  There are various ways to be
2498       a bozo.  Only complain once, though, even if more than one thing
2499       is wrong. */
2500    if (lk == NULL) {
2501       lk_valid = False;
2502       HG_(record_error_Misc)(
2503          thr,
2504          "pthread_cond_{timed}wait called with invalid mutex" );
2505    } else {
2506       tl_assert( HG_(is_sane_LockN)(lk) );
2507       if (lk->kind == LK_rdwr) {
2508          lk_valid = False;
2509          HG_(record_error_Misc)(
2510             thr, "pthread_cond_{timed}wait called with mutex "
2511                  "of type pthread_rwlock_t*" );
2512       } else
2513          if (lk->heldBy == NULL) {
2514          lk_valid = False;
2515          HG_(record_error_Misc)(
2516             thr, "pthread_cond_{timed}wait called with un-held mutex");
2517       } else
2518       if (lk->heldBy != NULL
2519           && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
2520          lk_valid = False;
2521          HG_(record_error_Misc)(
2522             thr, "pthread_cond_{timed}wait called with mutex "
2523                  "held by a different thread" );
2524       }
2525    }
2526 
2527    // error-if: cond is also associated with a different mutex
2528    cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2529    tl_assert(cvi);
2530    tl_assert(cvi->so);
2531    if (cvi->nWaiters == 0) {
2532       /* form initial (CV,MX) binding */
2533       cvi->mx_ga = mutex;
2534    }
2535    else /* check existing (CV,MX) binding */
2536    if (cvi->mx_ga != mutex) {
2537       HG_(record_error_Misc)(
2538          thr, "pthread_cond_{timed}wait: cond is associated "
2539               "with a different mutex");
2540    }
2541    cvi->nWaiters++;
2542 
2543    return lk_valid;
2544 }
2545 
evh__HG_PTHREAD_COND_WAIT_POST(ThreadId tid,void * cond,void * mutex,Bool timeout)2546 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2547                                              void* cond, void* mutex,
2548                                              Bool timeout)
2549 {
2550    /* A pthread_cond_wait(cond, mutex) completed successfully.  Find
2551       the SO for this cond, and 'recv' from it so as to acquire a
2552       dependency edge back to the signaller/broadcaster. */
2553    Thread* thr;
2554    CVInfo* cvi;
2555 
2556    if (SHOW_EVENTS >= 1)
2557       VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2558                   "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2559                   (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
2560 
2561    thr = map_threads_maybe_lookup( tid );
2562    tl_assert(thr); /* cannot fail - Thread* must already exist */
2563 
2564    // error-if: cond is also associated with a different mutex
2565 
2566    cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2567    if (!cvi) {
2568       /* This could be either a bug in helgrind or the guest application
2569          that did an error (e.g. cond var was destroyed by another thread.
2570          Let's assume helgrind is perfect ...
2571          Note that this is similar to drd behaviour. */
2572       HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2573                              " being waited upon");
2574       return;
2575    }
2576 
2577    tl_assert(cvi);
2578    tl_assert(cvi->so);
2579    tl_assert(cvi->nWaiters > 0);
2580 
2581    if (!timeout && !libhb_so_everSent(cvi->so)) {
2582       /* Hmm.  How can a wait on 'cond' succeed if nobody signalled
2583          it?  If this happened it would surely be a bug in the threads
2584          library.  Or one of those fabled "spurious wakeups". */
2585       HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2586                                    "succeeded"
2587                                    " without prior pthread_cond_post");
2588    }
2589 
2590    /* anyway, acquire a dependency on it. */
2591    libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2592 
2593    cvi->nWaiters--;
2594 }
2595 
evh__HG_PTHREAD_COND_INIT_POST(ThreadId tid,void * cond,void * cond_attr)2596 static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2597                                              void* cond, void* cond_attr )
2598 {
2599    CVInfo* cvi;
2600 
2601    if (SHOW_EVENTS >= 1)
2602       VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2603                   "(ctid=%d, cond=%p, cond_attr=%p)\n",
2604                   (Int)tid, (void*)cond, (void*) cond_attr );
2605 
2606    cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2607    tl_assert (cvi);
2608    tl_assert (cvi->so);
2609 }
2610 
2611 
evh__HG_PTHREAD_COND_DESTROY_PRE(ThreadId tid,void * cond,Bool cond_is_init)2612 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2613                                                void* cond, Bool cond_is_init )
2614 {
2615    /* Deal with destroy events.  The only purpose is to free storage
2616       associated with the CV, so as to avoid any possible resource
2617       leaks. */
2618    if (SHOW_EVENTS >= 1)
2619       VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2620                   "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2621                   (Int)tid, (void*)cond, (Int)cond_is_init );
2622 
2623    map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
2624 }
2625 
2626 
2627 /* ------------------------------------------------------- */
2628 /* -------------- events to do with rwlocks -------------- */
2629 /* ------------------------------------------------------- */
2630 
2631 /* EXPOSITION only */
2632 static
evh__HG_PTHREAD_RWLOCK_INIT_POST(ThreadId tid,void * rwl)2633 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2634 {
2635    if (SHOW_EVENTS >= 1)
2636       VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2637                   (Int)tid, (void*)rwl );
2638    map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
2639    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2640       all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2641 }
2642 
2643 static
evh__HG_PTHREAD_RWLOCK_DESTROY_PRE(ThreadId tid,void * rwl)2644 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2645 {
2646    Thread* thr;
2647    Lock*   lk;
2648    if (SHOW_EVENTS >= 1)
2649       VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2650                   (Int)tid, (void*)rwl );
2651 
2652    thr = map_threads_maybe_lookup( tid );
2653    /* cannot fail - Thread* must already exist */
2654    tl_assert( HG_(is_sane_Thread)(thr) );
2655 
2656    lk = map_locks_maybe_lookup( (Addr)rwl );
2657 
2658    if (lk == NULL || lk->kind != LK_rdwr) {
2659       HG_(record_error_Misc)(
2660          thr, "pthread_rwlock_destroy with invalid argument" );
2661    }
2662 
2663    if (lk) {
2664       tl_assert( HG_(is_sane_LockN)(lk) );
2665       tl_assert( lk->guestaddr == (Addr)rwl );
2666       if (lk->heldBy) {
2667          /* Basically act like we unlocked the lock */
2668          HG_(record_error_Misc)(
2669             thr, "pthread_rwlock_destroy of a locked mutex" );
2670          /* remove lock from locksets of all owning threads */
2671          remove_Lock_from_locksets_of_all_owning_Threads( lk );
2672          VG_(deleteBag)( lk->heldBy );
2673          lk->heldBy = NULL;
2674          lk->heldW = False;
2675          lk->acquired_at = NULL;
2676       }
2677       tl_assert( !lk->heldBy );
2678       tl_assert( HG_(is_sane_LockN)(lk) );
2679 
2680       if (HG_(clo_track_lockorders))
2681          laog__handle_one_lock_deletion(lk);
2682       map_locks_delete( lk->guestaddr );
2683       del_LockN( lk );
2684    }
2685 
2686    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2687       all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2688 }
2689 
2690 static
evh__HG_PTHREAD_RWLOCK_LOCK_PRE(ThreadId tid,void * rwl,Word isW,Word isTryLock)2691 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2692                                        void* rwl,
2693                                        Word isW, Word isTryLock )
2694 {
2695    /* Just check the rwl is sane; nothing else to do. */
2696    // 'rwl' may be invalid - not checked by wrapper
2697    Thread* thr;
2698    Lock*   lk;
2699    if (SHOW_EVENTS >= 1)
2700       VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2701                   (Int)tid, (Int)isW, (void*)rwl );
2702 
2703    tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2704    tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
2705    thr = map_threads_maybe_lookup( tid );
2706    tl_assert(thr); /* cannot fail - Thread* must already exist */
2707 
2708    lk = map_locks_maybe_lookup( (Addr)rwl );
2709    if ( lk
2710         && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2711       /* Wrong kind of lock.  Duh.  */
2712       HG_(record_error_Misc)(
2713          thr, "pthread_rwlock_{rd,rw}lock with a "
2714               "pthread_mutex_t* argument " );
2715    }
2716 }
2717 
2718 static
evh__HG_PTHREAD_RWLOCK_LOCK_POST(ThreadId tid,void * rwl,Word isW)2719 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2720 {
2721    // only called if the real library call succeeded - so mutex is sane
2722    Thread* thr;
2723    if (SHOW_EVENTS >= 1)
2724       VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2725                   (Int)tid, (Int)isW, (void*)rwl );
2726 
2727    tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2728    thr = map_threads_maybe_lookup( tid );
2729    tl_assert(thr); /* cannot fail - Thread* must already exist */
2730 
2731    (isW ? evhH__post_thread_w_acquires_lock
2732         : evhH__post_thread_r_acquires_lock)(
2733       thr,
2734       LK_rdwr, /* if not known, create new lock with this LockKind */
2735       (Addr)rwl
2736    );
2737 }
2738 
evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ThreadId tid,void * rwl)2739 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2740 {
2741    // 'rwl' may be invalid - not checked by wrapper
2742    Thread* thr;
2743    if (SHOW_EVENTS >= 1)
2744       VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2745                   (Int)tid, (void*)rwl );
2746 
2747    thr = map_threads_maybe_lookup( tid );
2748    tl_assert(thr); /* cannot fail - Thread* must already exist */
2749 
2750    evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2751 }
2752 
evh__HG_PTHREAD_RWLOCK_UNLOCK_POST(ThreadId tid,void * rwl)2753 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2754 {
2755    // only called if the real library call succeeded - so mutex is sane
2756    Thread* thr;
2757    if (SHOW_EVENTS >= 1)
2758       VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2759                   (Int)tid, (void*)rwl );
2760    thr = map_threads_maybe_lookup( tid );
2761    tl_assert(thr); /* cannot fail - Thread* must already exist */
2762 
2763    // anything we should do here?
2764 }
2765 
2766 
2767 /* ---------------------------------------------------------- */
2768 /* -------------- events to do with semaphores -------------- */
2769 /* ---------------------------------------------------------- */
2770 
2771 /* This is similar to but not identical to the handling for condition
2772    variables. */
2773 
2774 /* For each semaphore, we maintain a stack of SOs.  When a 'post'
2775    operation is done on a semaphore (unlocking, essentially), a new SO
2776    is created for the posting thread, the posting thread does a strong
2777    send to it (which merely installs the posting thread's VC in the
2778    SO), and the SO is pushed on the semaphore's stack.
2779 
2780    Later, when a (probably different) thread completes 'wait' on the
2781    semaphore, we pop a SO off the semaphore's stack (which should be
2782    nonempty), and do a strong recv from it.  This mechanism creates
2783    dependencies between posters and waiters of the semaphore.
2784 
2785    It may not be necessary to use a stack - perhaps a bag of SOs would
2786    do.  But we do need to keep track of how many unused-up posts have
2787    happened for the semaphore.
2788 
2789    Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2790    twice on S.  T3 cannot complete its waits without both T1 and T2
2791    posting.  The above mechanism will ensure that T3 acquires
2792    dependencies on both T1 and T2.
2793 
2794    When a semaphore is initialised with value N, we do as if we'd
2795    posted N times on the semaphore: basically create N SOs and do a
2796    strong send to all of then.  This allows up to N waits on the
2797    semaphore to acquire a dependency on the initialisation point,
2798    which AFAICS is the correct behaviour.
2799 
2800    We don't emit an error for DESTROY_PRE on a semaphore we don't know
2801    about.  We should.
2802 */
2803 
2804 /* sem_t* -> XArray* SO* */
2805 static WordFM* map_sem_to_SO_stack = NULL;
2806 
map_sem_to_SO_stack_INIT(void)2807 static void map_sem_to_SO_stack_INIT ( void ) {
2808    if (map_sem_to_SO_stack == NULL) {
2809       map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2810                                         HG_(free), NULL );
2811    }
2812 }
2813 
push_SO_for_sem(void * sem,SO * so)2814 static void push_SO_for_sem ( void* sem, SO* so ) {
2815    UWord   keyW;
2816    XArray* xa;
2817    tl_assert(so);
2818    map_sem_to_SO_stack_INIT();
2819    if (VG_(lookupFM)( map_sem_to_SO_stack,
2820                       &keyW, (UWord*)&xa, (UWord)sem )) {
2821       tl_assert(keyW == (UWord)sem);
2822       tl_assert(xa);
2823       VG_(addToXA)( xa, &so );
2824    } else {
2825      xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2826       VG_(addToXA)( xa, &so );
2827       VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
2828    }
2829 }
2830 
mb_pop_SO_for_sem(void * sem)2831 static SO* mb_pop_SO_for_sem ( void* sem ) {
2832    UWord    keyW;
2833    XArray*  xa;
2834    SO* so;
2835    map_sem_to_SO_stack_INIT();
2836    if (VG_(lookupFM)( map_sem_to_SO_stack,
2837                       &keyW, (UWord*)&xa, (UWord)sem )) {
2838       /* xa is the stack for this semaphore. */
2839       Word sz;
2840       tl_assert(keyW == (UWord)sem);
2841       sz = VG_(sizeXA)( xa );
2842       tl_assert(sz >= 0);
2843       if (sz == 0)
2844          return NULL; /* odd, the stack is empty */
2845       so = *(SO**)VG_(indexXA)( xa, sz-1 );
2846       tl_assert(so);
2847       VG_(dropTailXA)( xa, 1 );
2848       return so;
2849    } else {
2850       /* hmm, that's odd.  No stack for this semaphore. */
2851       return NULL;
2852    }
2853 }
2854 
evh__HG_POSIX_SEM_DESTROY_PRE(ThreadId tid,void * sem)2855 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
2856 {
2857    UWord keyW, valW;
2858    SO*   so;
2859 
2860    if (SHOW_EVENTS >= 1)
2861       VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2862                   (Int)tid, (void*)sem );
2863 
2864    map_sem_to_SO_stack_INIT();
2865 
2866    /* Empty out the semaphore's SO stack.  This way of doing it is
2867       stupid, but at least it's easy. */
2868    while (1) {
2869       so = mb_pop_SO_for_sem( sem );
2870       if (!so) break;
2871       libhb_so_dealloc(so);
2872    }
2873 
2874    if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2875       XArray* xa = (XArray*)valW;
2876       tl_assert(keyW == (UWord)sem);
2877       tl_assert(xa);
2878       tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2879       VG_(deleteXA)(xa);
2880    }
2881 }
2882 
2883 static
evh__HG_POSIX_SEM_INIT_POST(ThreadId tid,void * sem,UWord value)2884 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2885 {
2886    SO*     so;
2887    Thread* thr;
2888 
2889    if (SHOW_EVENTS >= 1)
2890       VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2891                   (Int)tid, (void*)sem, value );
2892 
2893    thr = map_threads_maybe_lookup( tid );
2894    tl_assert(thr); /* cannot fail - Thread* must already exist */
2895 
2896    /* Empty out the semaphore's SO stack.  This way of doing it is
2897       stupid, but at least it's easy. */
2898    while (1) {
2899       so = mb_pop_SO_for_sem( sem );
2900       if (!so) break;
2901       libhb_so_dealloc(so);
2902    }
2903 
2904    /* If we don't do this check, the following while loop runs us out
2905       of memory for stupid initial values of 'value'. */
2906    if (value > 10000) {
2907       HG_(record_error_Misc)(
2908          thr, "sem_init: initial value exceeds 10000; using 10000" );
2909       value = 10000;
2910    }
2911 
2912    /* Now create 'valid' new SOs for the thread, do a strong send to
2913       each of them, and push them all on the stack. */
2914    for (; value > 0; value--) {
2915       Thr* hbthr = thr->hbthr;
2916       tl_assert(hbthr);
2917 
2918       so = libhb_so_alloc();
2919       libhb_so_send( hbthr, so, True/*strong send*/ );
2920       push_SO_for_sem( sem, so );
2921    }
2922 }
2923 
evh__HG_POSIX_SEM_POST_PRE(ThreadId tid,void * sem)2924 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
2925 {
2926    /* 'tid' has posted on 'sem'.  Create a new SO, do a strong send to
2927       it (iow, write our VC into it, then tick ours), and push the SO
2928       on on a stack of SOs associated with 'sem'.  This is later used
2929       by other thread(s) which successfully exit from a sem_wait on
2930       the same sem; by doing a strong recv from SOs popped of the
2931       stack, they acquire dependencies on the posting thread
2932       segment(s). */
2933 
2934    Thread* thr;
2935    SO*     so;
2936    Thr*    hbthr;
2937 
2938    if (SHOW_EVENTS >= 1)
2939       VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2940                   (Int)tid, (void*)sem );
2941 
2942    thr = map_threads_maybe_lookup( tid );
2943    tl_assert(thr); /* cannot fail - Thread* must already exist */
2944 
2945    // error-if: sem is bogus
2946 
2947    hbthr = thr->hbthr;
2948    tl_assert(hbthr);
2949 
2950    so = libhb_so_alloc();
2951    libhb_so_send( hbthr, so, True/*strong send*/ );
2952    push_SO_for_sem( sem, so );
2953 }
2954 
evh__HG_POSIX_SEM_WAIT_POST(ThreadId tid,void * sem)2955 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
2956 {
2957    /* A sem_wait(sem) completed successfully.  Pop the posting-SO for
2958       the 'sem' from this semaphore's SO-stack, and do a strong recv
2959       from it.  This creates a dependency back to one of the post-ers
2960       for the semaphore. */
2961 
2962    Thread* thr;
2963    SO*     so;
2964    Thr*    hbthr;
2965 
2966    if (SHOW_EVENTS >= 1)
2967       VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2968                   (Int)tid, (void*)sem );
2969 
2970    thr = map_threads_maybe_lookup( tid );
2971    tl_assert(thr); /* cannot fail - Thread* must already exist */
2972 
2973    // error-if: sem is bogus
2974 
2975    so = mb_pop_SO_for_sem( sem );
2976 
2977    if (so) {
2978       hbthr = thr->hbthr;
2979       tl_assert(hbthr);
2980 
2981       libhb_so_recv( hbthr, so, True/*strong recv*/ );
2982       libhb_so_dealloc(so);
2983    } else {
2984       /* Hmm.  How can a wait on 'sem' succeed if nobody posted to it?
2985          If this happened it would surely be a bug in the threads
2986          library. */
2987       HG_(record_error_Misc)(
2988          thr, "Bug in libpthread: sem_wait succeeded on"
2989               " semaphore without prior sem_post");
2990    }
2991 }
2992 
2993 
2994 /* -------------------------------------------------------- */
2995 /* -------------- events to do with barriers -------------- */
2996 /* -------------------------------------------------------- */
2997 
2998 typedef
2999    struct {
3000       Bool    initted; /* has it yet been initted by guest? */
3001       Bool    resizable; /* is resizing allowed? */
3002       UWord   size;    /* declared size */
3003       XArray* waiting; /* XA of Thread*.  # present is 0 .. .size */
3004    }
3005    Bar;
3006 
new_Bar(void)3007 static Bar* new_Bar ( void ) {
3008    Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
3009    /* all fields are zero */
3010    tl_assert(bar->initted == False);
3011    return bar;
3012 }
3013 
delete_Bar(Bar * bar)3014 static void delete_Bar ( Bar* bar ) {
3015    tl_assert(bar);
3016    if (bar->waiting)
3017       VG_(deleteXA)(bar->waiting);
3018    HG_(free)(bar);
3019 }
3020 
3021 /* A mapping which stores auxiliary data for barriers. */
3022 
3023 /* pthread_barrier_t* -> Bar* */
3024 static WordFM* map_barrier_to_Bar = NULL;
3025 
map_barrier_to_Bar_INIT(void)3026 static void map_barrier_to_Bar_INIT ( void ) {
3027    if (UNLIKELY(map_barrier_to_Bar == NULL)) {
3028       map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
3029                                        "hg.mbtBI.1", HG_(free), NULL );
3030    }
3031 }
3032 
map_barrier_to_Bar_lookup_or_alloc(void * barrier)3033 static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
3034    UWord key, val;
3035    map_barrier_to_Bar_INIT();
3036    if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
3037       tl_assert(key == (UWord)barrier);
3038       return (Bar*)val;
3039    } else {
3040       Bar* bar = new_Bar();
3041       VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
3042       return bar;
3043    }
3044 }
3045 
map_barrier_to_Bar_delete(void * barrier)3046 static void map_barrier_to_Bar_delete ( void* barrier ) {
3047    UWord keyW, valW;
3048    map_barrier_to_Bar_INIT();
3049    if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
3050       Bar* bar = (Bar*)valW;
3051       tl_assert(keyW == (UWord)barrier);
3052       delete_Bar(bar);
3053    }
3054 }
3055 
3056 
evh__HG_PTHREAD_BARRIER_INIT_PRE(ThreadId tid,void * barrier,UWord count,UWord resizable)3057 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
3058                                                void* barrier,
3059                                                UWord count,
3060                                                UWord resizable )
3061 {
3062    Thread* thr;
3063    Bar*    bar;
3064 
3065    if (SHOW_EVENTS >= 1)
3066       VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
3067                   "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3068                   (Int)tid, (void*)barrier, count, resizable );
3069 
3070    thr = map_threads_maybe_lookup( tid );
3071    tl_assert(thr); /* cannot fail - Thread* must already exist */
3072 
3073    if (count == 0) {
3074       HG_(record_error_Misc)(
3075          thr, "pthread_barrier_init: 'count' argument is zero"
3076       );
3077    }
3078 
3079    if (resizable != 0 && resizable != 1) {
3080       HG_(record_error_Misc)(
3081          thr, "pthread_barrier_init: invalid 'resizable' argument"
3082       );
3083    }
3084 
3085    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3086    tl_assert(bar);
3087 
3088    if (bar->initted) {
3089       HG_(record_error_Misc)(
3090          thr, "pthread_barrier_init: barrier is already initialised"
3091       );
3092    }
3093 
3094    if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3095       tl_assert(bar->initted);
3096       HG_(record_error_Misc)(
3097          thr, "pthread_barrier_init: threads are waiting at barrier"
3098       );
3099       VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3100    }
3101    if (!bar->waiting) {
3102       bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
3103                                  sizeof(Thread*) );
3104    }
3105 
3106    tl_assert(VG_(sizeXA)(bar->waiting) == 0);
3107    bar->initted   = True;
3108    bar->resizable = resizable == 1 ? True : False;
3109    bar->size      = count;
3110 }
3111 
3112 
evh__HG_PTHREAD_BARRIER_DESTROY_PRE(ThreadId tid,void * barrier)3113 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
3114                                                   void* barrier )
3115 {
3116    Thread* thr;
3117    Bar*    bar;
3118 
3119    /* Deal with destroy events.  The only purpose is to free storage
3120       associated with the barrier, so as to avoid any possible
3121       resource leaks. */
3122    if (SHOW_EVENTS >= 1)
3123       VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3124                   "(tid=%d, barrier=%p)\n",
3125                   (Int)tid, (void*)barrier );
3126 
3127    thr = map_threads_maybe_lookup( tid );
3128    tl_assert(thr); /* cannot fail - Thread* must already exist */
3129 
3130    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3131    tl_assert(bar);
3132 
3133    if (!bar->initted) {
3134       HG_(record_error_Misc)(
3135          thr, "pthread_barrier_destroy: barrier was never initialised"
3136       );
3137    }
3138 
3139    if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3140       HG_(record_error_Misc)(
3141          thr, "pthread_barrier_destroy: threads are waiting at barrier"
3142       );
3143    }
3144 
3145    /* Maybe we shouldn't do this; just let it persist, so that when it
3146       is reinitialised we don't need to do any dynamic memory
3147       allocation?  The downside is a potentially unlimited space leak,
3148       if the client creates (in turn) a large number of barriers all
3149       at different locations.  Note that if we do later move to the
3150       don't-delete-it scheme, we need to mark the barrier as
3151       uninitialised again since otherwise a later _init call will
3152       elicit a duplicate-init error.  */
3153    map_barrier_to_Bar_delete( barrier );
3154 }
3155 
3156 
3157 /* All the threads have arrived.  Now do the Interesting Bit.  Get a
3158    new synchronisation object and do a weak send to it from all the
3159    participating threads.  This makes its vector clocks be the join of
3160    all the individual threads' vector clocks.  Then do a strong
3161    receive from it back to all threads, so that their VCs are a copy
3162    of it (hence are all equal to the join of their original VCs.) */
do_barrier_cross_sync_and_empty(Bar * bar)3163 static void do_barrier_cross_sync_and_empty ( Bar* bar )
3164 {
3165    /* XXX check bar->waiting has no duplicates */
3166    UWord i;
3167    SO*   so = libhb_so_alloc();
3168 
3169    tl_assert(bar->waiting);
3170    tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3171 
3172    /* compute the join ... */
3173    for (i = 0; i < bar->size; i++) {
3174       Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3175       Thr* hbthr = t->hbthr;
3176       libhb_so_send( hbthr, so, False/*weak send*/ );
3177    }
3178    /* ... and distribute to all threads */
3179    for (i = 0; i < bar->size; i++) {
3180       Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3181       Thr* hbthr = t->hbthr;
3182       libhb_so_recv( hbthr, so, True/*strong recv*/ );
3183    }
3184 
3185    /* finally, we must empty out the waiting vector */
3186    VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3187 
3188    /* and we don't need this any more.  Perhaps a stack-allocated
3189       SO would be better? */
3190    libhb_so_dealloc(so);
3191 }
3192 
3193 
evh__HG_PTHREAD_BARRIER_WAIT_PRE(ThreadId tid,void * barrier)3194 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3195                                                void* barrier )
3196 {
3197   /* This function gets called after a client thread calls
3198      pthread_barrier_wait but before it arrives at the real
3199      pthread_barrier_wait.
3200 
3201      Why is the following correct?  It's a bit subtle.
3202 
3203      If this is not the last thread arriving at the barrier, we simply
3204      note its presence and return.  Because valgrind (at least as of
3205      Nov 08) is single threaded, we are guaranteed safe from any race
3206      conditions when in this function -- no other client threads are
3207      running.
3208 
3209      If this is the last thread, then we are again the only running
3210      thread.  All the other threads will have either arrived at the
3211      real pthread_barrier_wait or are on their way to it, but in any
3212      case are guaranteed not to be able to move past it, because this
3213      thread is currently in this function and so has not yet arrived
3214      at the real pthread_barrier_wait.  That means that:
3215 
3216      1. While we are in this function, none of the other threads
3217         waiting at the barrier can move past it.
3218 
3219      2. When this function returns (and simulated execution resumes),
3220         this thread and all other waiting threads will be able to move
3221         past the real barrier.
3222 
3223      Because of this, it is now safe to update the vector clocks of
3224      all threads, to represent the fact that they all arrived at the
3225      barrier and have all moved on.  There is no danger of any
3226      complications to do with some threads leaving the barrier and
3227      racing back round to the front, whilst others are still leaving
3228      (which is the primary source of complication in correct handling/
3229      implementation of barriers).  That can't happen because we update
3230      here our data structures so as to indicate that the threads have
3231      passed the barrier, even though, as per (2) above, they are
3232      guaranteed not to pass the barrier until we return.
3233 
3234      This relies crucially on Valgrind being single threaded.  If that
3235      changes, this will need to be reconsidered.
3236    */
3237    Thread* thr;
3238    Bar*    bar;
3239    UWord   present;
3240 
3241    if (SHOW_EVENTS >= 1)
3242       VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3243                   "(tid=%d, barrier=%p)\n",
3244                   (Int)tid, (void*)barrier );
3245 
3246    thr = map_threads_maybe_lookup( tid );
3247    tl_assert(thr); /* cannot fail - Thread* must already exist */
3248 
3249    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3250    tl_assert(bar);
3251 
3252    if (!bar->initted) {
3253       HG_(record_error_Misc)(
3254          thr, "pthread_barrier_wait: barrier is uninitialised"
3255       );
3256       return; /* client is broken .. avoid assertions below */
3257    }
3258 
3259    /* guaranteed by _INIT_PRE above */
3260    tl_assert(bar->size > 0);
3261    tl_assert(bar->waiting);
3262 
3263    VG_(addToXA)( bar->waiting, &thr );
3264 
3265    /* guaranteed by this function */
3266    present = VG_(sizeXA)(bar->waiting);
3267    tl_assert(present > 0 && present <= bar->size);
3268 
3269    if (present < bar->size)
3270       return;
3271 
3272    do_barrier_cross_sync_and_empty(bar);
3273 }
3274 
3275 
evh__HG_PTHREAD_BARRIER_RESIZE_PRE(ThreadId tid,void * barrier,UWord newcount)3276 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3277                                                  void* barrier,
3278                                                  UWord newcount )
3279 {
3280    Thread* thr;
3281    Bar*    bar;
3282    UWord   present;
3283 
3284    if (SHOW_EVENTS >= 1)
3285       VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3286                   "(tid=%d, barrier=%p, newcount=%lu)\n",
3287                   (Int)tid, (void*)barrier, newcount );
3288 
3289    thr = map_threads_maybe_lookup( tid );
3290    tl_assert(thr); /* cannot fail - Thread* must already exist */
3291 
3292    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3293    tl_assert(bar);
3294 
3295    if (!bar->initted) {
3296       HG_(record_error_Misc)(
3297          thr, "pthread_barrier_resize: barrier is uninitialised"
3298       );
3299       return; /* client is broken .. avoid assertions below */
3300    }
3301 
3302    if (!bar->resizable) {
3303       HG_(record_error_Misc)(
3304          thr, "pthread_barrier_resize: barrier is may not be resized"
3305       );
3306       return; /* client is broken .. avoid assertions below */
3307    }
3308 
3309    if (newcount == 0) {
3310       HG_(record_error_Misc)(
3311          thr, "pthread_barrier_resize: 'newcount' argument is zero"
3312       );
3313       return; /* client is broken .. avoid assertions below */
3314    }
3315 
3316    /* guaranteed by _INIT_PRE above */
3317    tl_assert(bar->size > 0);
3318    tl_assert(bar->waiting);
3319    /* Guaranteed by this fn */
3320    tl_assert(newcount > 0);
3321 
3322    if (newcount >= bar->size) {
3323       /* Increasing the capacity.  There's no possibility of threads
3324          moving on from the barrier in this situation, so just note
3325          the fact and do nothing more. */
3326       bar->size = newcount;
3327    } else {
3328       /* Decreasing the capacity.  If we decrease it to be equal or
3329          below the number of waiting threads, they will now move past
3330          the barrier, so need to mess with dep edges in the same way
3331          as if the barrier had filled up normally. */
3332       present = VG_(sizeXA)(bar->waiting);
3333       tl_assert(present >= 0 && present <= bar->size);
3334       if (newcount <= present) {
3335          bar->size = present; /* keep the cross_sync call happy */
3336          do_barrier_cross_sync_and_empty(bar);
3337       }
3338       bar->size = newcount;
3339    }
3340 }
3341 
3342 
3343 /* ----------------------------------------------------- */
3344 /* ----- events to do with user-specified HB edges ----- */
3345 /* ----------------------------------------------------- */
3346 
3347 /* A mapping from arbitrary UWord tag to the SO associated with it.
3348    The UWord tags are meaningless to us, interpreted only by the
3349    user. */
3350 
3351 
3352 
3353 /* UWord -> SO* */
3354 static WordFM* map_usertag_to_SO = NULL;
3355 
map_usertag_to_SO_INIT(void)3356 static void map_usertag_to_SO_INIT ( void ) {
3357    if (UNLIKELY(map_usertag_to_SO == NULL)) {
3358       map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3359                                       "hg.mutS.1", HG_(free), NULL );
3360    }
3361 }
3362 
map_usertag_to_SO_lookup_or_alloc(UWord usertag)3363 static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3364    UWord key, val;
3365    map_usertag_to_SO_INIT();
3366    if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3367       tl_assert(key == (UWord)usertag);
3368       return (SO*)val;
3369    } else {
3370       SO* so = libhb_so_alloc();
3371       VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3372       return so;
3373    }
3374 }
3375 
map_usertag_to_SO_delete(UWord usertag)3376 static void map_usertag_to_SO_delete ( UWord usertag ) {
3377    UWord keyW, valW;
3378    map_usertag_to_SO_INIT();
3379    if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3380       SO* so = (SO*)valW;
3381       tl_assert(keyW == usertag);
3382       tl_assert(so);
3383       libhb_so_dealloc(so);
3384    }
3385 }
3386 
3387 
3388 static
evh__HG_USERSO_SEND_PRE(ThreadId tid,UWord usertag)3389 void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3390 {
3391    /* TID is just about to notionally sent a message on a notional
3392       abstract synchronisation object whose identity is given by
3393       USERTAG.  Bind USERTAG to a real SO if it is not already so
3394       bound, and do a 'weak send' on the SO.  This joins the vector
3395       clocks from this thread into any vector clocks already present
3396       in the SO.  The resulting SO vector clocks are later used by
3397       other thread(s) which successfully 'receive' from the SO,
3398       thereby acquiring a dependency on all the events that have
3399       previously signalled on this SO. */
3400    Thread* thr;
3401    SO*     so;
3402 
3403    if (SHOW_EVENTS >= 1)
3404       VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3405                   (Int)tid, usertag );
3406 
3407    thr = map_threads_maybe_lookup( tid );
3408    tl_assert(thr); /* cannot fail - Thread* must already exist */
3409 
3410    so = map_usertag_to_SO_lookup_or_alloc( usertag );
3411    tl_assert(so);
3412 
3413    libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3414 }
3415 
3416 static
evh__HG_USERSO_RECV_POST(ThreadId tid,UWord usertag)3417 void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3418 {
3419    /* TID has just notionally received a message from a notional
3420       abstract synchronisation object whose identity is given by
3421       USERTAG.  Bind USERTAG to a real SO if it is not already so
3422       bound.  If the SO has at some point in the past been 'sent' on,
3423       to a 'strong receive' on it, thereby acquiring a dependency on
3424       the sender. */
3425    Thread* thr;
3426    SO*     so;
3427 
3428    if (SHOW_EVENTS >= 1)
3429       VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3430                   (Int)tid, usertag );
3431 
3432    thr = map_threads_maybe_lookup( tid );
3433    tl_assert(thr); /* cannot fail - Thread* must already exist */
3434 
3435    so = map_usertag_to_SO_lookup_or_alloc( usertag );
3436    tl_assert(so);
3437 
3438    /* Acquire a dependency on it.  If the SO has never so far been
3439       sent on, then libhb_so_recv will do nothing.  So we're safe
3440       regardless of SO's history. */
3441    libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3442 }
3443 
3444 static
evh__HG_USERSO_FORGET_ALL(ThreadId tid,UWord usertag)3445 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3446 {
3447    /* TID declares that any happens-before edges notionally stored in
3448       USERTAG can be deleted.  If (as would normally be the case) a
3449       SO is associated with USERTAG, then the association is removed
3450       and all resources associated with SO are freed.  Importantly,
3451       that frees up any VTSs stored in SO. */
3452    if (SHOW_EVENTS >= 1)
3453       VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3454                   (Int)tid, usertag );
3455 
3456    map_usertag_to_SO_delete( usertag );
3457 }
3458 
3459 
3460 #if defined(VGO_solaris)
3461 /* ----------------------------------------------------- */
3462 /* --- events to do with bind guard/clear intercepts --- */
3463 /* ----------------------------------------------------- */
3464 
3465 static
evh__HG_RTLD_BIND_GUARD(ThreadId tid,Int flags)3466 void evh__HG_RTLD_BIND_GUARD(ThreadId tid, Int flags)
3467 {
3468    if (SHOW_EVENTS >= 1)
3469       VG_(printf)("evh__HG_RTLD_BIND_GUARD"
3470                   "(tid=%d, flags=%d)\n",
3471                   (Int)tid, flags);
3472 
3473    Thread *thr = map_threads_maybe_lookup(tid);
3474    tl_assert(thr != NULL);
3475 
3476    Int bindflag = (flags & VKI_THR_FLG_RTLD);
3477    if ((bindflag & thr->bind_guard_flag) == 0) {
3478       thr->bind_guard_flag |= bindflag;
3479       HG_(thread_enter_synchr)(thr);
3480       /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3481       HG_(thread_enter_pthread_create)(thr);
3482    }
3483 }
3484 
3485 static
evh__HG_RTLD_BIND_CLEAR(ThreadId tid,Int flags)3486 void evh__HG_RTLD_BIND_CLEAR(ThreadId tid, Int flags)
3487 {
3488    if (SHOW_EVENTS >= 1)
3489       VG_(printf)("evh__HG_RTLD_BIND_CLEAR"
3490                   "(tid=%d, flags=%d)\n",
3491                   (Int)tid, flags);
3492 
3493    Thread *thr = map_threads_maybe_lookup(tid);
3494    tl_assert(thr != NULL);
3495 
3496    Int bindflag = (flags & VKI_THR_FLG_RTLD);
3497    if ((thr->bind_guard_flag & bindflag) != 0) {
3498       thr->bind_guard_flag &= ~bindflag;
3499       HG_(thread_leave_synchr)(thr);
3500       HG_(thread_leave_pthread_create)(thr);
3501    }
3502 }
3503 #endif /* VGO_solaris */
3504 
3505 
3506 /*--------------------------------------------------------------*/
3507 /*--- Lock acquisition order monitoring                      ---*/
3508 /*--------------------------------------------------------------*/
3509 
3510 /* FIXME: here are some optimisations still to do in
3511           laog__pre_thread_acquires_lock.
3512 
3513    The graph is structured so that if L1 --*--> L2 then L1 must be
3514    acquired before L2.
3515 
3516    The common case is that some thread T holds (eg) L1 L2 and L3 and
3517    is repeatedly acquiring and releasing Ln, and there is no ordering
3518    error in what it is doing.  Hence it repeatedly:
3519 
3520    (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3521        produces the answer No (because there is no error).
3522 
3523    (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3524        (because they already got added the first time T acquired Ln).
3525 
3526    Hence cache these two events:
3527 
3528    (1) Cache result of the query from last time.  Invalidate the cache
3529        any time any edges are added to or deleted from laog.
3530 
3531    (2) Cache these add-edge requests and ignore them if said edges
3532        have already been added to laog.  Invalidate the cache any time
3533        any edges are deleted from laog.
3534 */
3535 
3536 typedef
3537    struct {
3538       WordSetID inns; /* in univ_laog */
3539       WordSetID outs; /* in univ_laog */
3540    }
3541    LAOGLinks;
3542 
3543 /* lock order acquisition graph */
3544 static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3545 
3546 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3547    where that edge was created, so that we can show the user later if
3548    we need to. */
3549 typedef
3550    struct {
3551       Addr        src_ga; /* Lock guest addresses for */
3552       Addr        dst_ga; /* src/dst of the edge */
3553       ExeContext* src_ec; /* And corresponding places where that */
3554       ExeContext* dst_ec; /* ordering was established */
3555    }
3556    LAOGLinkExposition;
3557 
cmp_LAOGLinkExposition(UWord llx1W,UWord llx2W)3558 static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
3559    /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3560    LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3561    LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3562    if (llx1->src_ga < llx2->src_ga) return -1;
3563    if (llx1->src_ga > llx2->src_ga) return  1;
3564    if (llx1->dst_ga < llx2->dst_ga) return -1;
3565    if (llx1->dst_ga > llx2->dst_ga) return  1;
3566    return 0;
3567 }
3568 
3569 static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3570 /* end EXPOSITION ONLY */
3571 
3572 
3573 __attribute__((noinline))
laog__init(void)3574 static void laog__init ( void )
3575 {
3576    tl_assert(!laog);
3577    tl_assert(!laog_exposition);
3578    tl_assert(HG_(clo_track_lockorders));
3579 
3580    laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3581                       HG_(free), NULL/*unboxedcmp*/ );
3582 
3583    laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3584                                  cmp_LAOGLinkExposition );
3585 }
3586 
laog__show(const HChar * who)3587 static void laog__show ( const HChar* who ) {
3588    UWord i, ws_size;
3589    UWord* ws_words;
3590    Lock* me;
3591    LAOGLinks* links;
3592    VG_(printf)("laog (requested by %s) {\n", who);
3593    VG_(initIterFM)( laog );
3594    me = NULL;
3595    links = NULL;
3596    while (VG_(nextIterFM)( laog, (UWord*)&me,
3597                                  (UWord*)&links )) {
3598       tl_assert(me);
3599       tl_assert(links);
3600       VG_(printf)("   node %p:\n", me);
3601       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3602       for (i = 0; i < ws_size; i++)
3603          VG_(printf)("      inn %#lx\n", ws_words[i] );
3604       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3605       for (i = 0; i < ws_size; i++)
3606          VG_(printf)("      out %#lx\n", ws_words[i] );
3607       me = NULL;
3608       links = NULL;
3609    }
3610    VG_(doneIterFM)( laog );
3611    VG_(printf)("}\n");
3612 }
3613 
univ_laog_do_GC(void)3614 static void univ_laog_do_GC ( void ) {
3615    Word i;
3616    LAOGLinks* links;
3617    Word seen = 0;
3618    Int prev_next_gc_univ_laog = next_gc_univ_laog;
3619    const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3620 
3621    Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3622                                         (Int) univ_laog_cardinality
3623                                         * sizeof(Bool) );
3624    // univ_laog_seen[*] set to 0 (False) by zalloc.
3625 
3626    VG_(initIterFM)( laog );
3627    links = NULL;
3628    while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3629       tl_assert(links);
3630       tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3631       univ_laog_seen[links->inns] = True;
3632       tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3633       univ_laog_seen[links->outs] = True;
3634       links = NULL;
3635    }
3636    VG_(doneIterFM)( laog );
3637 
3638    for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3639       if (univ_laog_seen[i])
3640          seen++;
3641       else
3642          HG_(dieWS) ( univ_laog, (WordSet)i );
3643    }
3644 
3645    HG_(free) (univ_laog_seen);
3646 
3647    // We need to decide the value of the next_gc.
3648    // 3 solutions were looked at:
3649    // Sol 1: garbage collect at seen * 2
3650    //   This solution was a lot slower, probably because we both do a lot of
3651    //   garbage collection and do not keep long enough laog WV that will become
3652    //   useful  again very soon.
3653    // Sol 2: garbage collect at a percentage increase of the current cardinality
3654    //         (with a min increase of 1)
3655    //   Trials on a small test program with 1%, 5% and 10% increase was done.
3656    //   1% is slightly faster than 5%, which is slightly slower than 10%.
3657    //   However, on a big application, this caused the memory to be exhausted,
3658    //   as even a 1% increase of size at each gc becomes a lot, when many gc
3659    //   are done.
3660    // Sol 3: always garbage collect at current cardinality + 1.
3661    //   This solution was the fastest of the 3 solutions, and caused no memory
3662    //   exhaustion in the big application.
3663    //
3664    // With regards to cost introduced by gc: on the t2t perf test (doing only
3665    // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3666    // version with garbage collection. With t2t 50 20 2, my machine started
3667    // to page out, and so the garbage collected version was much faster.
3668    // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3669    // difference performance is insignificant (~ 0.1 s).
3670    // Of course, it might be that real life programs are not well represented
3671    // by t2t.
3672 
3673    // If ever we want to have a more sophisticated control
3674    // (e.g. clo options to control the percentage increase or fixed increased),
3675    // we should do it here, eg.
3676    //     next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3677    // Currently, we just hard-code the solution 3 above.
3678    next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3679 
3680    if (VG_(clo_stats))
3681       VG_(message)
3682          (Vg_DebugMsg,
3683           "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3684           (Int)univ_laog_cardinality, (Int)seen, next_gc_univ_laog);
3685 }
3686 
3687 
3688 __attribute__((noinline))
laog__add_edge(Lock * src,Lock * dst)3689 static void laog__add_edge ( Lock* src, Lock* dst ) {
3690    UWord      keyW;
3691    LAOGLinks* links;
3692    Bool       presentF, presentR;
3693    if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3694 
3695    /* Take the opportunity to sanity check the graph.  Record in
3696       presentF if there is already a src->dst mapping in this node's
3697       forwards links, and presentR if there is already a src->dst
3698       mapping in this node's backwards links.  They should agree!
3699       Also, we need to know whether the edge was already present so as
3700       to decide whether or not to update the link details mapping.  We
3701       can compute presentF and presentR essentially for free, so may
3702       as well do this always. */
3703    presentF = presentR = False;
3704 
3705    /* Update the out edges for src */
3706    keyW  = 0;
3707    links = NULL;
3708    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3709       WordSetID outs_new;
3710       tl_assert(links);
3711       tl_assert(keyW == (UWord)src);
3712       outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
3713       presentF = outs_new == links->outs;
3714       links->outs = outs_new;
3715    } else {
3716       links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
3717       links->inns = HG_(emptyWS)( univ_laog );
3718       links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3719       VG_(addToFM)( laog, (UWord)src, (UWord)links );
3720    }
3721    /* Update the in edges for dst */
3722    keyW  = 0;
3723    links = NULL;
3724    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3725       WordSetID inns_new;
3726       tl_assert(links);
3727       tl_assert(keyW == (UWord)dst);
3728       inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
3729       presentR = inns_new == links->inns;
3730       links->inns = inns_new;
3731    } else {
3732       links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
3733       links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
3734       links->outs = HG_(emptyWS)( univ_laog );
3735       VG_(addToFM)( laog, (UWord)dst, (UWord)links );
3736    }
3737 
3738    tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3739 
3740    if (!presentF && src->acquired_at && dst->acquired_at) {
3741       LAOGLinkExposition expo;
3742       /* If this edge is entering the graph, and we have acquired_at
3743          information for both src and dst, record those acquisition
3744          points.  Hence, if there is later a violation of this
3745          ordering, we can show the user the two places in which the
3746          required src-dst ordering was previously established. */
3747       if (0) VG_(printf)("acquire edge %#lx %#lx\n",
3748                          src->guestaddr, dst->guestaddr);
3749       expo.src_ga = src->guestaddr;
3750       expo.dst_ga = dst->guestaddr;
3751       expo.src_ec = NULL;
3752       expo.dst_ec = NULL;
3753       tl_assert(laog_exposition);
3754       if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
3755          /* we already have it; do nothing */
3756       } else {
3757          LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3758                                                sizeof(LAOGLinkExposition));
3759          expo2->src_ga = src->guestaddr;
3760          expo2->dst_ga = dst->guestaddr;
3761          expo2->src_ec = src->acquired_at;
3762          expo2->dst_ec = dst->acquired_at;
3763          VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
3764       }
3765    }
3766 
3767    if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3768       univ_laog_do_GC();
3769 }
3770 
3771 __attribute__((noinline))
laog__del_edge(Lock * src,Lock * dst)3772 static void laog__del_edge ( Lock* src, Lock* dst ) {
3773    UWord      keyW;
3774    LAOGLinks* links;
3775    if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
3776    /* Update the out edges for src */
3777    keyW  = 0;
3778    links = NULL;
3779    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3780       tl_assert(links);
3781       tl_assert(keyW == (UWord)src);
3782       links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
3783    }
3784    /* Update the in edges for dst */
3785    keyW  = 0;
3786    links = NULL;
3787    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3788       tl_assert(links);
3789       tl_assert(keyW == (UWord)dst);
3790       links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
3791    }
3792 
3793    /* Remove the exposition of src,dst (if present) */
3794    {
3795       LAOGLinkExposition *fm_expo;
3796 
3797       LAOGLinkExposition expo;
3798       expo.src_ga = src->guestaddr;
3799       expo.dst_ga = dst->guestaddr;
3800       expo.src_ec = NULL;
3801       expo.dst_ec = NULL;
3802 
3803       if (VG_(delFromFM) (laog_exposition,
3804                           (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3805          HG_(free) (fm_expo);
3806       }
3807    }
3808 
3809    /* deleting edges can increase nr of of WS so check for gc. */
3810    if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3811       univ_laog_do_GC();
3812    if (0) VG_(printf)("laog__del_edge exit\n");
3813 }
3814 
3815 __attribute__((noinline))
laog__succs(Lock * lk)3816 static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3817    UWord      keyW;
3818    LAOGLinks* links;
3819    keyW  = 0;
3820    links = NULL;
3821    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3822       tl_assert(links);
3823       tl_assert(keyW == (UWord)lk);
3824       return links->outs;
3825    } else {
3826       return HG_(emptyWS)( univ_laog );
3827    }
3828 }
3829 
3830 __attribute__((noinline))
laog__preds(Lock * lk)3831 static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3832    UWord      keyW;
3833    LAOGLinks* links;
3834    keyW  = 0;
3835    links = NULL;
3836    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3837       tl_assert(links);
3838       tl_assert(keyW == (UWord)lk);
3839       return links->inns;
3840    } else {
3841       return HG_(emptyWS)( univ_laog );
3842    }
3843 }
3844 
3845 __attribute__((noinline))
laog__sanity_check(const HChar * who)3846 static void laog__sanity_check ( const HChar* who ) {
3847    UWord i, ws_size;
3848    UWord* ws_words;
3849    Lock* me;
3850    LAOGLinks* links;
3851    VG_(initIterFM)( laog );
3852    me = NULL;
3853    links = NULL;
3854    if (0) VG_(printf)("laog sanity check\n");
3855    while (VG_(nextIterFM)( laog, (UWord*)&me,
3856                                  (UWord*)&links )) {
3857       tl_assert(me);
3858       tl_assert(links);
3859       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3860       for (i = 0; i < ws_size; i++) {
3861          if ( ! HG_(elemWS)( univ_laog,
3862                              laog__succs( (Lock*)ws_words[i] ),
3863                              (UWord)me ))
3864             goto bad;
3865       }
3866       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3867       for (i = 0; i < ws_size; i++) {
3868          if ( ! HG_(elemWS)( univ_laog,
3869                              laog__preds( (Lock*)ws_words[i] ),
3870                              (UWord)me ))
3871             goto bad;
3872       }
3873       me = NULL;
3874       links = NULL;
3875    }
3876    VG_(doneIterFM)( laog );
3877    return;
3878 
3879   bad:
3880    VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3881    laog__show(who);
3882    tl_assert(0);
3883 }
3884 
3885 /* If there is a path in laog from 'src' to any of the elements in
3886    'dst', return an arbitrarily chosen element of 'dst' reachable from
3887    'src'.  If no path exist from 'src' to any element in 'dst', return
3888    NULL. */
3889 __attribute__((noinline))
3890 static
laog__do_dfs_from_to(Lock * src,WordSetID dsts)3891 Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3892 {
3893    Lock*     ret;
3894    Word      ssz;
3895    XArray*   stack;   /* of Lock* */
3896    WordFM*   visited; /* Lock* -> void, iow, Set(Lock*) */
3897    Lock*     here;
3898    WordSetID succs;
3899    UWord     succs_size, i;
3900    UWord*    succs_words;
3901    //laog__sanity_check();
3902 
3903    /* If the destination set is empty, we can never get there from
3904       'src' :-), so don't bother to try */
3905    if (HG_(isEmptyWS)( univ_lsets, dsts ))
3906       return NULL;
3907 
3908    ret     = NULL;
3909    stack   = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3910    visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
3911 
3912    (void) VG_(addToXA)( stack, &src );
3913 
3914    while (True) {
3915 
3916       ssz = VG_(sizeXA)( stack );
3917 
3918       if (ssz == 0) { ret = NULL; break; }
3919 
3920       here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3921       VG_(dropTailXA)( stack, 1 );
3922 
3923       if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
3924 
3925       if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
3926          continue;
3927 
3928       VG_(addToFM)( visited, (UWord)here, 0 );
3929 
3930       succs = laog__succs( here );
3931       HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3932       for (i = 0; i < succs_size; i++)
3933          (void) VG_(addToXA)( stack, &succs_words[i] );
3934    }
3935 
3936    VG_(deleteFM)( visited, NULL, NULL );
3937    VG_(deleteXA)( stack );
3938    return ret;
3939 }
3940 
3941 
3942 /* Thread 'thr' is acquiring 'lk'.  Check for inconsistent ordering
3943    between 'lk' and the locks already held by 'thr' and issue a
3944    complaint if so.  Also, update the ordering graph appropriately.
3945 */
3946 __attribute__((noinline))
laog__pre_thread_acquires_lock(Thread * thr,Lock * lk)3947 static void laog__pre_thread_acquires_lock (
3948                Thread* thr, /* NB: BEFORE lock is added */
3949                Lock*   lk
3950             )
3951 {
3952    UWord*   ls_words;
3953    UWord    ls_size, i;
3954    Lock*    other;
3955 
3956    /* It may be that 'thr' already holds 'lk' and is recursively
3957       relocking in.  In this case we just ignore the call. */
3958    /* NB: univ_lsets really is correct here */
3959    if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
3960       return;
3961 
3962    /* First, the check.  Complain if there is any path in laog from lk
3963       to any of the locks already held by thr, since if any such path
3964       existed, it would mean that previously lk was acquired before
3965       (rather than after, as we are doing here) at least one of those
3966       locks.
3967    */
3968    other = laog__do_dfs_from_to(lk, thr->locksetA);
3969    if (other) {
3970       LAOGLinkExposition key, *found;
3971       /* So we managed to find a path lk --*--> other in the graph,
3972          which implies that 'lk' should have been acquired before
3973          'other' but is in fact being acquired afterwards.  We present
3974          the lk/other arguments to record_error_LockOrder in the order
3975          in which they should have been acquired. */
3976       /* Go look in the laog_exposition mapping, to find the allocation
3977          points for this edge, so we can show the user. */
3978       key.src_ga = lk->guestaddr;
3979       key.dst_ga = other->guestaddr;
3980       key.src_ec = NULL;
3981       key.dst_ec = NULL;
3982       found = NULL;
3983       if (VG_(lookupFM)( laog_exposition,
3984                          (UWord*)&found, NULL, (UWord)&key )) {
3985          tl_assert(found != &key);
3986          tl_assert(found->src_ga == key.src_ga);
3987          tl_assert(found->dst_ga == key.dst_ga);
3988          tl_assert(found->src_ec);
3989          tl_assert(found->dst_ec);
3990          HG_(record_error_LockOrder)(
3991             thr, lk, other,
3992                  found->src_ec, found->dst_ec, other->acquired_at );
3993       } else {
3994          /* Hmm.  This can't happen (can it?) */
3995          /* Yes, it can happen: see tests/tc14_laog_dinphils.
3996             Imagine we have 3 philosophers A B C, and the forks
3997             between them:
3998 
3999                            C
4000 
4001                        fCA   fBC
4002 
4003                       A   fAB   B
4004 
4005             Let's have the following actions:
4006                    A takes    fCA,fAB
4007                    A releases fCA,fAB
4008                    B takes    fAB,fBC
4009                    B releases fAB,fBC
4010                    C takes    fBC,fCA
4011                    C releases fBC,fCA
4012 
4013             Helgrind will report a lock order error when C takes fCA.
4014             Effectively, we have a deadlock if the following
4015             sequence is done:
4016                 A takes fCA
4017                 B takes fAB
4018                 C takes fBC
4019 
4020             The error reported is:
4021               Observed (incorrect) order fBC followed by fCA
4022             but the stack traces that have established the required order
4023             are not given.
4024 
4025             This is because there is no pair (fCA, fBC) in laog exposition :
4026             the laog_exposition records all pairs of locks between a new lock
4027             taken by a thread and all the already taken locks.
4028             So, there is no laog_exposition (fCA, fBC) as no thread ever
4029             first locked fCA followed by fBC.
4030 
4031             In other words, when the deadlock cycle involves more than
4032             two locks, then helgrind does not report the sequence of
4033             operations that created the cycle.
4034 
4035             However, we can report the current stack trace (where
4036             lk is being taken), and the stack trace where other was acquired:
4037             Effectively, the variable 'other' contains a lock currently
4038             held by this thread, with its 'acquired_at'. */
4039 
4040          HG_(record_error_LockOrder)(
4041             thr, lk, other,
4042                  NULL, NULL, other->acquired_at );
4043       }
4044    }
4045 
4046    /* Second, add to laog the pairs
4047         (old, lk)  |  old <- locks already held by thr
4048       Since both old and lk are currently held by thr, their acquired_at
4049       fields must be non-NULL.
4050    */
4051    tl_assert(lk->acquired_at);
4052    HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
4053    for (i = 0; i < ls_size; i++) {
4054       Lock* old = (Lock*)ls_words[i];
4055       tl_assert(old->acquired_at);
4056       laog__add_edge( old, lk );
4057    }
4058 
4059    /* Why "except_Locks" ?  We're here because a lock is being
4060       acquired by a thread, and we're in an inconsistent state here.
4061       See the call points in evhH__post_thread_{r,w}_acquires_lock.
4062       When called in this inconsistent state, locks__sanity_check duly
4063       barfs. */
4064    if (HG_(clo_sanity_flags) & SCE_LAOG)
4065       all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4066 }
4067 
4068 /* Allocates a duplicate of words. Caller must HG_(free) the result. */
UWordV_dup(UWord * words,Word words_size)4069 static UWord* UWordV_dup(UWord* words, Word words_size)
4070 {
4071    UInt i;
4072 
4073    if (words_size == 0)
4074       return NULL;
4075 
4076    UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
4077 
4078    for (i = 0; i < words_size; i++)
4079       dup[i] = words[i];
4080 
4081    return dup;
4082 }
4083 
4084 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4085 
4086 __attribute__((noinline))
laog__handle_one_lock_deletion(Lock * lk)4087 static void laog__handle_one_lock_deletion ( Lock* lk )
4088 {
4089    WordSetID preds, succs;
4090    UWord preds_size, succs_size, i, j;
4091    UWord *preds_words, *succs_words;
4092 
4093    preds = laog__preds( lk );
4094    succs = laog__succs( lk );
4095 
4096    // We need to duplicate the payload, as these can be garbage collected
4097    // during the del/add operations below.
4098    HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
4099    preds_words = UWordV_dup(preds_words, preds_size);
4100 
4101    HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
4102    succs_words = UWordV_dup(succs_words, succs_size);
4103 
4104    for (i = 0; i < preds_size; i++)
4105       laog__del_edge( (Lock*)preds_words[i], lk );
4106 
4107    for (j = 0; j < succs_size; j++)
4108       laog__del_edge( lk, (Lock*)succs_words[j] );
4109 
4110    for (i = 0; i < preds_size; i++) {
4111       for (j = 0; j < succs_size; j++) {
4112          if (preds_words[i] != succs_words[j]) {
4113             /* This can pass unlocked locks to laog__add_edge, since
4114                we're deleting stuff.  So their acquired_at fields may
4115                be NULL. */
4116             laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
4117          }
4118       }
4119    }
4120 
4121    if (preds_words)
4122       HG_(free) (preds_words);
4123    if (succs_words)
4124       HG_(free) (succs_words);
4125 
4126    // Remove lk information from laog links FM
4127    {
4128       LAOGLinks *links;
4129       Lock* linked_lk;
4130 
4131       if (VG_(delFromFM) (laog,
4132                           (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
4133          tl_assert (linked_lk == lk);
4134          HG_(free) (links);
4135       }
4136    }
4137    /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
4138 }
4139 
4140 //__attribute__((noinline))
4141 //static void laog__handle_lock_deletions (
4142 //               WordSetID /* in univ_laog */ locksToDelete
4143 //            )
4144 //{
4145 //   Word   i, ws_size;
4146 //   UWord* ws_words;
4147 //
4148 //
4149 //   HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
4150 //   UWordV_dup call needed here ...
4151 //   for (i = 0; i < ws_size; i++)
4152 //      laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4153 //
4154 //   if (HG_(clo_sanity_flags) & SCE_LAOG)
4155 //      all__sanity_check("laog__handle_lock_deletions-post");
4156 //}
4157 
4158 
4159 /*--------------------------------------------------------------*/
4160 /*--- Malloc/free replacements                               ---*/
4161 /*--------------------------------------------------------------*/
4162 
4163 typedef
4164    struct {
4165       void*       next;    /* required by m_hashtable */
4166       Addr        payload; /* ptr to actual block    */
4167       SizeT       szB;     /* size requested         */
4168       ExeContext* where;   /* where it was allocated */
4169       Thread*     thr;     /* allocating thread      */
4170    }
4171    MallocMeta;
4172 
4173 /* A hash table of MallocMetas, used to track malloc'd blocks
4174    (obviously). */
4175 static VgHashTable *hg_mallocmeta_table = NULL;
4176 
4177 /* MallocMeta are small elements. We use a pool to avoid
4178    the overhead of malloc for each MallocMeta. */
4179 static PoolAlloc *MallocMeta_poolalloc = NULL;
4180 
new_MallocMeta(void)4181 static MallocMeta* new_MallocMeta ( void ) {
4182    MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
4183    VG_(memset)(md, 0, sizeof(MallocMeta));
4184    return md;
4185 }
delete_MallocMeta(MallocMeta * md)4186 static void delete_MallocMeta ( MallocMeta* md ) {
4187    VG_(freeEltPA)(MallocMeta_poolalloc, md);
4188 }
4189 
4190 
4191 /* Allocate a client block and set up the metadata for it. */
4192 
4193 static
handle_alloc(ThreadId tid,SizeT szB,SizeT alignB,Bool is_zeroed)4194 void* handle_alloc ( ThreadId tid,
4195                      SizeT szB, SizeT alignB, Bool is_zeroed )
4196 {
4197    Addr        p;
4198    MallocMeta* md;
4199 
4200    tl_assert( ((SSizeT)szB) >= 0 );
4201    p = (Addr)VG_(cli_malloc)(alignB, szB);
4202    if (!p) {
4203       return NULL;
4204    }
4205    if (is_zeroed)
4206       VG_(memset)((void*)p, 0, szB);
4207 
4208    /* Note that map_threads_lookup must succeed (cannot assert), since
4209       memory can only be allocated by currently alive threads, hence
4210       they must have an entry in map_threads. */
4211    md = new_MallocMeta();
4212    md->payload = p;
4213    md->szB     = szB;
4214    md->where   = VG_(record_ExeContext)( tid, 0 );
4215    md->thr     = map_threads_lookup( tid );
4216 
4217    VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4218    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
4219       VG_(XTMemory_Full_alloc)(md->szB, md->where);
4220 
4221    /* Tell the lower level memory wranglers. */
4222    evh__new_mem_heap( p, szB, is_zeroed );
4223 
4224    return (void*)p;
4225 }
4226 
4227 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
4228    Cast to a signed type to catch any unexpectedly negative args.
4229    We're assuming here that the size asked for is not greater than
4230    2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4231    platforms). */
hg_cli__malloc(ThreadId tid,SizeT n)4232 static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4233    if (((SSizeT)n) < 0) return NULL;
4234    return handle_alloc ( tid, n, VG_(clo_alignment),
4235                          /*is_zeroed*/False );
4236 }
hg_cli____builtin_new(ThreadId tid,SizeT n)4237 static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4238    if (((SSizeT)n) < 0) return NULL;
4239    return handle_alloc ( tid, n, VG_(clo_alignment),
4240                          /*is_zeroed*/False );
4241 }
hg_cli____builtin_vec_new(ThreadId tid,SizeT n)4242 static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4243    if (((SSizeT)n) < 0) return NULL;
4244    return handle_alloc ( tid, n, VG_(clo_alignment),
4245                          /*is_zeroed*/False );
4246 }
hg_cli__memalign(ThreadId tid,SizeT align,SizeT n)4247 static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4248    if (((SSizeT)n) < 0) return NULL;
4249    return handle_alloc ( tid, n, align,
4250                          /*is_zeroed*/False );
4251 }
hg_cli__calloc(ThreadId tid,SizeT nmemb,SizeT size1)4252 static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4253    if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4254    return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4255                          /*is_zeroed*/True );
4256 }
4257 
4258 
4259 /* Free a client block, including getting rid of the relevant
4260    metadata. */
4261 
handle_free(ThreadId tid,void * p)4262 static void handle_free ( ThreadId tid, void* p )
4263 {
4264    MallocMeta *md, *old_md;
4265    SizeT      szB;
4266 
4267    /* First see if we can find the metadata for 'p'. */
4268    md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4269    if (!md)
4270       return; /* apparently freeing a bogus address.  Oh well. */
4271 
4272    tl_assert(md->payload == (Addr)p);
4273    szB = md->szB;
4274    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full)) {
4275       ExeContext* ec_free = VG_(record_ExeContext)( tid, 0 );
4276       VG_(XTMemory_Full_free)(md->szB, md->where, ec_free);
4277    }
4278 
4279    /* Nuke the metadata block */
4280    old_md = (MallocMeta*)
4281             VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4282    tl_assert(old_md); /* it must be present - we just found it */
4283    tl_assert(old_md == md);
4284    tl_assert(old_md->payload == (Addr)p);
4285 
4286    VG_(cli_free)((void*)old_md->payload);
4287    delete_MallocMeta(old_md);
4288 
4289    /* Tell the lower level memory wranglers. */
4290    evh__die_mem_heap( (Addr)p, szB );
4291 }
4292 
hg_cli__free(ThreadId tid,void * p)4293 static void hg_cli__free ( ThreadId tid, void* p ) {
4294    handle_free(tid, p);
4295 }
hg_cli____builtin_delete(ThreadId tid,void * p)4296 static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4297    handle_free(tid, p);
4298 }
hg_cli____builtin_vec_delete(ThreadId tid,void * p)4299 static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4300    handle_free(tid, p);
4301 }
4302 
4303 
hg_cli__realloc(ThreadId tid,void * payloadV,SizeT new_size)4304 static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4305 {
4306    MallocMeta *md, *md_new, *md_tmp;
4307    SizeT      i;
4308 
4309    Addr payload = (Addr)payloadV;
4310 
4311    if (((SSizeT)new_size) < 0) return NULL;
4312 
4313    md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4314    if (!md)
4315       return NULL; /* apparently realloc-ing a bogus address.  Oh well. */
4316 
4317    tl_assert(md->payload == payload);
4318 
4319    if (md->szB == new_size) {
4320       /* size unchanged */
4321       md->where = VG_(record_ExeContext)(tid, 0);
4322       return payloadV;
4323    }
4324 
4325    if (md->szB > new_size) {
4326       /* new size is smaller */
4327       md->szB   = new_size;
4328       md->where = VG_(record_ExeContext)(tid, 0);
4329       evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4330       return payloadV;
4331    }
4332 
4333    /* else */ {
4334       /* new size is bigger */
4335       Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4336 
4337       /* First half kept and copied, second half new */
4338       // FIXME: shouldn't we use a copier which implements the
4339       // memory state machine?
4340       evh__copy_mem( payload, p_new, md->szB );
4341       evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
4342                           /*inited*/False );
4343       /* FIXME: can anything funny happen here?  specifically, if the
4344          old range contained a lock, then die_mem_heap will complain.
4345          Is that the correct behaviour?  Not sure. */
4346       evh__die_mem_heap( payload, md->szB );
4347 
4348       /* Copy from old to new */
4349       for (i = 0; i < md->szB; i++)
4350          ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4351 
4352       /* Because the metadata hash table is index by payload address,
4353          we have to get rid of the old hash table entry and make a new
4354          one.  We can't just modify the existing metadata in place,
4355          because then it would (almost certainly) be in the wrong hash
4356          chain. */
4357       md_new = new_MallocMeta();
4358       *md_new = *md;
4359 
4360       md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4361       tl_assert(md_tmp);
4362       tl_assert(md_tmp == md);
4363 
4364       VG_(cli_free)((void*)md->payload);
4365       delete_MallocMeta(md);
4366 
4367       /* Update fields */
4368       md_new->where   = VG_(record_ExeContext)( tid, 0 );
4369       md_new->szB     = new_size;
4370       md_new->payload = p_new;
4371       md_new->thr     = map_threads_lookup( tid );
4372 
4373       /* and add */
4374       VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4375 
4376       return (void*)p_new;
4377    }
4378 }
4379 
hg_cli_malloc_usable_size(ThreadId tid,void * p)4380 static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4381 {
4382    MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4383 
4384    // There may be slop, but pretend there isn't because only the asked-for
4385    // area will have been shadowed properly.
4386    return ( md ? md->szB : 0 );
4387 }
4388 
4389 
4390 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
4391    Slow linear search.  With a bit of hash table help if 'data_addr'
4392    is either the start of a block or up to 15 word-sized steps along
4393    from the start of a block. */
4394 
addr_is_in_MM_Chunk(MallocMeta * mm,Addr a)4395 static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4396 {
4397    /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4398       right at it. */
4399   if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4400      return True;
4401   /* else normal interval rules apply */
4402   if (LIKELY(a < mm->payload)) return False;
4403   if (LIKELY(a >= mm->payload + mm->szB)) return False;
4404   return True;
4405 }
4406 
HG_(mm_find_containing_block)4407 Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
4408                                     /*OUT*/UInt*        tnr,
4409                                     /*OUT*/Addr*        payload,
4410                                     /*OUT*/SizeT*       szB,
4411                                     Addr                data_addr )
4412 {
4413    MallocMeta* mm;
4414    Int i;
4415    const Int n_fast_check_words = 16;
4416 
4417    /* Before searching the list of allocated blocks in hg_mallocmeta_table,
4418       first verify that data_addr is in a heap client segment. */
4419    const NSegment *s = VG_(am_find_nsegment) (data_addr);
4420    if (s == NULL || !s->isCH)
4421      return False;
4422 
4423    /* First, do a few fast searches on the basis that data_addr might
4424       be exactly the start of a block or up to 15 words inside.  This
4425       can happen commonly via the creq
4426       _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4427    for (i = 0; i < n_fast_check_words; i++) {
4428       mm = VG_(HT_lookup)( hg_mallocmeta_table,
4429                            data_addr - (UWord)(UInt)i * sizeof(UWord) );
4430       if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4431          goto found;
4432    }
4433 
4434    /* Well, this totally sucks.  But without using an interval tree or
4435       some such, it's hard to see how to do better.  We have to check
4436       every block in the entire table. */
4437    VG_(HT_ResetIter)(hg_mallocmeta_table);
4438    while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
4439       if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4440          goto found;
4441    }
4442 
4443    /* Not found.  Bah. */
4444    return False;
4445    /*NOTREACHED*/
4446 
4447   found:
4448    tl_assert(mm);
4449    tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4450    if (where)   *where   = mm->where;
4451    if (tnr)     *tnr     = mm->thr->errmsg_index;
4452    if (payload) *payload = mm->payload;
4453    if (szB)     *szB     = mm->szB;
4454    return True;
4455 }
4456 
4457 
4458 /*--------------------------------------------------------------*/
4459 /*--- Instrumentation                                        ---*/
4460 /*--------------------------------------------------------------*/
4461 
4462 #define unop(_op, _arg1)         IRExpr_Unop((_op),(_arg1))
4463 #define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4464 #define mkexpr(_tmp)             IRExpr_RdTmp((_tmp))
4465 #define mkU32(_n)                IRExpr_Const(IRConst_U32(_n))
4466 #define mkU64(_n)                IRExpr_Const(IRConst_U64(_n))
4467 #define assign(_t, _e)           IRStmt_WrTmp((_t), (_e))
4468 
4469 /* This takes and returns atoms, of course.  Not full IRExprs. */
mk_And1(IRSB * sbOut,IRExpr * arg1,IRExpr * arg2)4470 static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4471 {
4472    tl_assert(arg1 && arg2);
4473    tl_assert(isIRAtom(arg1));
4474    tl_assert(isIRAtom(arg2));
4475    /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))).  Appalling
4476       code, I know. */
4477    IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4478    IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4479    IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4480    IRTemp res   = newIRTemp(sbOut->tyenv, Ity_I1);
4481    addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4482    addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4483    addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4484                                                        mkexpr(wide2))));
4485    addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4486    return mkexpr(res);
4487 }
4488 
instrument_mem_access(IRSB * sbOut,IRExpr * addr,Int szB,Bool isStore,Bool fixupSP_needed,Int hWordTy_szB,Int goff_sp,Int goff_sp_s1,IRExpr * guard)4489 static void instrument_mem_access ( IRSB*   sbOut,
4490                                     IRExpr* addr,
4491                                     Int     szB,
4492                                     Bool    isStore,
4493                                     Bool    fixupSP_needed,
4494                                     Int     hWordTy_szB,
4495                                     Int     goff_sp,
4496                                     Int     goff_sp_s1,
4497                                     /* goff_sp_s1 is the offset in guest
4498                                        state where the cachedstack validity
4499                                        is stored. */
4500                                     IRExpr* guard ) /* NULL => True */
4501 {
4502    IRType   tyAddr   = Ity_INVALID;
4503    const HChar* hName    = NULL;
4504    void*    hAddr    = NULL;
4505    Int      regparms = 0;
4506    IRExpr** argv     = NULL;
4507    IRDirty* di       = NULL;
4508 
4509    // THRESH is the size of the window above SP (well,
4510    // mostly above) that we assume implies a stack reference.
4511    const Int THRESH = 4096 * 4; // somewhat arbitrary
4512    const Int rz_szB = VG_STACK_REDZONE_SZB;
4513 
4514    tl_assert(isIRAtom(addr));
4515    tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4516 
4517    tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
4518    tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4519 
4520    /* So the effective address is in 'addr' now. */
4521    regparms = 1; // unless stated otherwise
4522    if (isStore) {
4523       switch (szB) {
4524          case 1:
4525             hName = "evh__mem_help_cwrite_1";
4526             hAddr = &evh__mem_help_cwrite_1;
4527             argv = mkIRExprVec_1( addr );
4528             break;
4529          case 2:
4530             hName = "evh__mem_help_cwrite_2";
4531             hAddr = &evh__mem_help_cwrite_2;
4532             argv = mkIRExprVec_1( addr );
4533             break;
4534          case 4:
4535             if (fixupSP_needed) {
4536                /* Unwind has to be done with a SP fixed up with one word.
4537                   See Ist_Put heuristic in hg_instrument. */
4538                hName = "evh__mem_help_cwrite_4_fixupSP";
4539                hAddr = &evh__mem_help_cwrite_4_fixupSP;
4540             } else {
4541                hName = "evh__mem_help_cwrite_4";
4542                hAddr = &evh__mem_help_cwrite_4;
4543             }
4544             argv = mkIRExprVec_1( addr );
4545             break;
4546          case 8:
4547             if (fixupSP_needed) {
4548                /* Unwind has to be done with a SP fixed up with one word.
4549                   See Ist_Put heuristic in hg_instrument. */
4550                hName = "evh__mem_help_cwrite_8_fixupSP";
4551                hAddr = &evh__mem_help_cwrite_8_fixupSP;
4552             } else {
4553                hName = "evh__mem_help_cwrite_8";
4554                hAddr = &evh__mem_help_cwrite_8;
4555             }
4556             argv = mkIRExprVec_1( addr );
4557             break;
4558          default:
4559             tl_assert(szB > 8 && szB <= 512); /* stay sane */
4560             regparms = 2;
4561             hName = "evh__mem_help_cwrite_N";
4562             hAddr = &evh__mem_help_cwrite_N;
4563             argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4564             break;
4565       }
4566    } else {
4567       switch (szB) {
4568          case 1:
4569             hName = "evh__mem_help_cread_1";
4570             hAddr = &evh__mem_help_cread_1;
4571             argv = mkIRExprVec_1( addr );
4572             break;
4573          case 2:
4574             hName = "evh__mem_help_cread_2";
4575             hAddr = &evh__mem_help_cread_2;
4576             argv = mkIRExprVec_1( addr );
4577             break;
4578          case 4:
4579             hName = "evh__mem_help_cread_4";
4580             hAddr = &evh__mem_help_cread_4;
4581             argv = mkIRExprVec_1( addr );
4582             break;
4583          case 8:
4584             hName = "evh__mem_help_cread_8";
4585             hAddr = &evh__mem_help_cread_8;
4586             argv = mkIRExprVec_1( addr );
4587             break;
4588          default:
4589             tl_assert(szB > 8 && szB <= 512); /* stay sane */
4590             regparms = 2;
4591             hName = "evh__mem_help_cread_N";
4592             hAddr = &evh__mem_help_cread_N;
4593             argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4594             break;
4595       }
4596    }
4597 
4598    /* Create the helper. */
4599    tl_assert(hName);
4600    tl_assert(hAddr);
4601    tl_assert(argv);
4602    di = unsafeIRDirty_0_N( regparms,
4603                            hName, VG_(fnptr_to_fnentry)( hAddr ),
4604                            argv );
4605 
4606    if (HG_(clo_delta_stacktrace)) {
4607       /* memory access helper might read the shadow1 SP offset, that
4608          indicates if the cached stacktrace is valid. */
4609       di->fxState[0].fx = Ifx_Read;
4610       di->fxState[0].offset = goff_sp_s1;
4611       di->fxState[0].size = hWordTy_szB;
4612       di->fxState[0].nRepeats = 0;
4613       di->fxState[0].repeatLen = 0;
4614       di->nFxState = 1;
4615    }
4616 
4617    if (! HG_(clo_check_stack_refs)) {
4618       /* We're ignoring memory references which are (obviously) to the
4619          stack.  In fact just skip stack refs that are within 4 pages
4620          of SP (SP - the redzone, really), as that's simple, easy, and
4621          filters out most stack references. */
4622       /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4623          some arbitrary N.  If that is true then addr is outside the
4624          range (SP - RZ .. SP + N - RZ).  If N is smallish (a few
4625          pages) then we can say addr is within a few pages of SP and
4626          so can't possibly be a heap access, and so can be skipped.
4627 
4628          Note that the condition simplifies to
4629             (addr - SP + RZ) >u N
4630          which generates better code in x86/amd64 backends, but it does
4631          not unfortunately simplify to
4632             (addr - SP) >u (N - RZ)
4633          (would be beneficial because N - RZ is a constant) because
4634          wraparound arithmetic messes up the comparison.  eg.
4635          20 >u 10 == True,
4636          but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4637       */
4638       IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4639       addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4640 
4641       /* "addr - SP" */
4642       IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4643       addStmtToIRSB(
4644          sbOut,
4645          assign(addr_minus_sp,
4646                 tyAddr == Ity_I32
4647                    ? binop(Iop_Sub32, addr, mkexpr(sp))
4648                    : binop(Iop_Sub64, addr, mkexpr(sp)))
4649       );
4650 
4651       /* "addr - SP + RZ" */
4652       IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4653       addStmtToIRSB(
4654          sbOut,
4655          assign(diff,
4656                 tyAddr == Ity_I32
4657                    ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4658                    : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4659       );
4660 
4661       /* guardA == "guard on the address" */
4662       IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
4663       addStmtToIRSB(
4664          sbOut,
4665          assign(guardA,
4666                 tyAddr == Ity_I32
4667                    ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4668                    : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4669       );
4670       di->guard = mkexpr(guardA);
4671    }
4672 
4673    /* If there's a guard on the access itself (as supplied by the
4674       caller of this routine), we need to AND that in to any guard we
4675       might already have. */
4676    if (guard) {
4677       di->guard = mk_And1(sbOut, di->guard, guard);
4678    }
4679 
4680    /* Add the helper. */
4681    addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
4682 }
4683 
4684 
4685 /* Figure out if GA is a guest code address in the dynamic linker, and
4686    if so return True.  Otherwise (and in case of any doubt) return
4687    False.  (sidedly safe w/ False as the safe value) */
is_in_dynamic_linker_shared_object(Addr ga)4688 static Bool is_in_dynamic_linker_shared_object( Addr ga )
4689 {
4690    DebugInfo* dinfo;
4691    const HChar* soname;
4692 
4693    dinfo = VG_(find_DebugInfo)( VG_(current_DiEpoch)(), ga );
4694    if (!dinfo) return False;
4695 
4696    soname = VG_(DebugInfo_get_soname)(dinfo);
4697    tl_assert(soname);
4698    if (0) VG_(printf)("%s\n", soname);
4699 
4700    return VG_(is_soname_ld_so)(soname);
4701 }
4702 
4703 static
addInvalidateCachedStack(IRSB * bbOut,Int goff_sp_s1,Int hWordTy_szB)4704 void addInvalidateCachedStack (IRSB*   bbOut,
4705                                Int     goff_sp_s1,
4706                                Int     hWordTy_szB)
4707 {
4708    /* Invalidate cached stack: Write 0 in the shadow1 offset 0 */
4709    addStmtToIRSB( bbOut,
4710                   IRStmt_Put(goff_sp_s1,
4711                              hWordTy_szB == 4 ?
4712                              mkU32(0) : mkU64(0)));
4713    /// ???? anything more efficient than assign a Word???
4714 }
4715 
4716 static
hg_instrument(VgCallbackClosure * closure,IRSB * bbIn,const VexGuestLayout * layout,const VexGuestExtents * vge,const VexArchInfo * archinfo_host,IRType gWordTy,IRType hWordTy)4717 IRSB* hg_instrument ( VgCallbackClosure* closure,
4718                       IRSB* bbIn,
4719                       const VexGuestLayout* layout,
4720                       const VexGuestExtents* vge,
4721                       const VexArchInfo* archinfo_host,
4722                       IRType gWordTy, IRType hWordTy )
4723 {
4724    Int     i;
4725    IRSB*   bbOut;
4726    Addr    cia; /* address of current insn */
4727    IRStmt* st;
4728    Bool    inLDSO = False;
4729    Addr    inLDSOmask4K = 1; /* mismatches on first check */
4730 
4731    // Set to True when SP must be fixed up when taking a stack trace for the
4732    // mem accesses in the rest of the instruction
4733    Bool    fixupSP_needed = False;
4734 
4735    const Int goff_SP = layout->offset_SP;
4736    /* SP in shadow1 indicates if cached stack is valid.
4737       We have to invalidate the cached stack e.g. when seeing call or ret. */
4738    const Int goff_SP_s1 = layout->total_sizeB + layout->offset_SP;
4739    const Int hWordTy_szB = sizeofIRType(hWordTy);
4740 
4741    if (gWordTy != hWordTy) {
4742       /* We don't currently support this case. */
4743       VG_(tool_panic)("host/guest word size mismatch");
4744    }
4745 
4746    if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4747       VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4748    }
4749 
4750    /* Set up BB */
4751    bbOut           = emptyIRSB();
4752    bbOut->tyenv    = deepCopyIRTypeEnv(bbIn->tyenv);
4753    bbOut->next     = deepCopyIRExpr(bbIn->next);
4754    bbOut->jumpkind = bbIn->jumpkind;
4755    bbOut->offsIP   = bbIn->offsIP;
4756 
4757    // Copy verbatim any IR preamble preceding the first IMark
4758    i = 0;
4759    while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4760       addStmtToIRSB( bbOut, bbIn->stmts[i] );
4761       i++;
4762    }
4763 
4764    // Get the first statement, and initial cia from it
4765    tl_assert(bbIn->stmts_used > 0);
4766    tl_assert(i < bbIn->stmts_used);
4767    st = bbIn->stmts[i];
4768    tl_assert(Ist_IMark == st->tag);
4769    cia = st->Ist.IMark.addr;
4770    st = NULL;
4771 
4772    for (/*use current i*/; i < bbIn->stmts_used; i++) {
4773       st = bbIn->stmts[i];
4774       tl_assert(st);
4775       tl_assert(isFlatIRStmt(st));
4776       switch (st->tag) {
4777          case Ist_Exit:
4778             /* No memory reference, but if we do anything else than
4779                Ijk_Boring, indicate to helgrind that the previously
4780                recorded stack is invalid.
4781                For Ijk_Boring, also invalidate the stack if the exit
4782                instruction has no CF info. This heuristic avoids cached
4783                stack trace mismatch in some cases such as longjmp
4784                implementation. Similar logic below for the bb exit. */
4785             if (HG_(clo_delta_stacktrace)
4786                 && (st->Ist.Exit.jk != Ijk_Boring || ! VG_(has_CF_info)(cia)))
4787                addInvalidateCachedStack(bbOut, goff_SP_s1, hWordTy_szB);
4788             break;
4789          case Ist_NoOp:
4790          case Ist_AbiHint:
4791             /* None of these can contain any memory references. */
4792             break;
4793          case Ist_Put:
4794             /* This cannot contain any memory references. */
4795             /* If we see a put to SP, from now on in this instruction,
4796                the SP needed to unwind has to be fixed up by one word.
4797                This very simple heuristic ensures correct unwinding in the
4798                typical case of a push instruction. If we need to cover more
4799                cases, then we need to better track how the SP is modified by
4800                the instruction (and calculate a precise sp delta), rather than
4801                assuming that the SP is decremented by a Word size. */
4802             if (HG_(clo_delta_stacktrace) && st->Ist.Put.offset == goff_SP) {
4803                fixupSP_needed = True;
4804             }
4805             break;
4806          case Ist_PutI:
4807             /* This cannot contain any memory references. */
4808             break;
4809 
4810          case Ist_IMark:
4811             fixupSP_needed = False;
4812 
4813             /* no mem refs, but note the insn address. */
4814             cia = st->Ist.IMark.addr;
4815 
4816             /* Don't instrument the dynamic linker.  It generates a
4817                lot of races which we just expensively suppress, so
4818                it's pointless.
4819 
4820                Avoid flooding is_in_dynamic_linker_shared_object with
4821                requests by only checking at transitions between 4K
4822                pages. */
4823             if ((cia & ~(Addr)0xFFF) != inLDSOmask4K) {
4824                if (0) VG_(printf)("NEW %#lx\n", cia);
4825                inLDSOmask4K = cia & ~(Addr)0xFFF;
4826                inLDSO = is_in_dynamic_linker_shared_object(cia);
4827             } else {
4828                if (0) VG_(printf)("old %#lx\n", cia);
4829             }
4830             break;
4831 
4832          case Ist_MBE:
4833             switch (st->Ist.MBE.event) {
4834                case Imbe_Fence:
4835                case Imbe_CancelReservation:
4836                   break; /* not interesting */
4837                default:
4838                   goto unhandled;
4839             }
4840             break;
4841 
4842          case Ist_CAS: {
4843             /* Atomic read-modify-write cycle.  Just pretend it's a
4844                read. */
4845             IRCAS* cas    = st->Ist.CAS.details;
4846             Bool   isDCAS = cas->oldHi != IRTemp_INVALID;
4847             if (isDCAS) {
4848                tl_assert(cas->expdHi);
4849                tl_assert(cas->dataHi);
4850             } else {
4851                tl_assert(!cas->expdHi);
4852                tl_assert(!cas->dataHi);
4853             }
4854             /* Just be boring about it. */
4855             if (!inLDSO) {
4856                instrument_mem_access(
4857                   bbOut,
4858                   cas->addr,
4859                   (isDCAS ? 2 : 1)
4860                      * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4861                   False/*!isStore*/, fixupSP_needed,
4862                   hWordTy_szB, goff_SP, goff_SP_s1,
4863                   NULL/*no-guard*/
4864                );
4865             }
4866             break;
4867          }
4868 
4869          case Ist_LLSC: {
4870             /* We pretend store-conditionals don't exist, viz, ignore
4871                them.  Whereas load-linked's are treated the same as
4872                normal loads. */
4873             IRType dataTy;
4874             if (st->Ist.LLSC.storedata == NULL) {
4875                /* LL */
4876                dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
4877                if (!inLDSO) {
4878                   instrument_mem_access(
4879                      bbOut,
4880                      st->Ist.LLSC.addr,
4881                      sizeofIRType(dataTy),
4882                      False/*!isStore*/, fixupSP_needed,
4883                      hWordTy_szB, goff_SP, goff_SP_s1,
4884                      NULL/*no-guard*/
4885                   );
4886                }
4887             } else {
4888                /* SC */
4889                /*ignore */
4890             }
4891             break;
4892          }
4893 
4894          case Ist_Store:
4895             if (!inLDSO) {
4896                instrument_mem_access(
4897                   bbOut,
4898                   st->Ist.Store.addr,
4899                   sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4900                   True/*isStore*/, fixupSP_needed,
4901                   hWordTy_szB, goff_SP, goff_SP_s1,
4902                   NULL/*no-guard*/
4903                );
4904             }
4905             break;
4906 
4907          case Ist_StoreG: {
4908             IRStoreG* sg   = st->Ist.StoreG.details;
4909             IRExpr*   data = sg->data;
4910             IRExpr*   addr = sg->addr;
4911             IRType    type = typeOfIRExpr(bbIn->tyenv, data);
4912             tl_assert(type != Ity_INVALID);
4913             instrument_mem_access( bbOut, addr, sizeofIRType(type),
4914                                    True/*isStore*/, fixupSP_needed,
4915                                    hWordTy_szB,
4916                                    goff_SP, goff_SP_s1, sg->guard );
4917             break;
4918          }
4919 
4920          case Ist_LoadG: {
4921             IRLoadG* lg       = st->Ist.LoadG.details;
4922             IRType   type     = Ity_INVALID; /* loaded type */
4923             IRType   typeWide = Ity_INVALID; /* after implicit widening */
4924             IRExpr*  addr     = lg->addr;
4925             typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4926             tl_assert(type != Ity_INVALID);
4927             instrument_mem_access( bbOut, addr, sizeofIRType(type),
4928                                    False/*!isStore*/, fixupSP_needed,
4929                                    hWordTy_szB,
4930                                    goff_SP, goff_SP_s1, lg->guard );
4931             break;
4932          }
4933 
4934          case Ist_WrTmp: {
4935             IRExpr* data = st->Ist.WrTmp.data;
4936             if (data->tag == Iex_Load) {
4937                if (!inLDSO) {
4938                   instrument_mem_access(
4939                      bbOut,
4940                      data->Iex.Load.addr,
4941                      sizeofIRType(data->Iex.Load.ty),
4942                      False/*!isStore*/, fixupSP_needed,
4943                      hWordTy_szB, goff_SP, goff_SP_s1,
4944                      NULL/*no-guard*/
4945                   );
4946                }
4947             }
4948             break;
4949          }
4950 
4951          case Ist_Dirty: {
4952             Int      dataSize;
4953             IRDirty* d = st->Ist.Dirty.details;
4954             if (d->mFx != Ifx_None) {
4955                /* This dirty helper accesses memory.  Collect the
4956                   details. */
4957                tl_assert(d->mAddr != NULL);
4958                tl_assert(d->mSize != 0);
4959                dataSize = d->mSize;
4960                if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
4961                   if (!inLDSO) {
4962                      instrument_mem_access(
4963                         bbOut, d->mAddr, dataSize,
4964                         False/*!isStore*/, fixupSP_needed,
4965                         hWordTy_szB, goff_SP, goff_SP_s1,
4966                         NULL/*no-guard*/
4967                      );
4968                   }
4969                }
4970                if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
4971                   if (!inLDSO) {
4972                      instrument_mem_access(
4973                         bbOut, d->mAddr, dataSize,
4974                         True/*isStore*/, fixupSP_needed,
4975                         hWordTy_szB, goff_SP, goff_SP_s1,
4976                         NULL/*no-guard*/
4977                      );
4978                   }
4979                }
4980             } else {
4981                tl_assert(d->mAddr == NULL);
4982                tl_assert(d->mSize == 0);
4983             }
4984             break;
4985          }
4986 
4987          default:
4988          unhandled:
4989             ppIRStmt(st);
4990             tl_assert(0);
4991 
4992       } /* switch (st->tag) */
4993 
4994       addStmtToIRSB( bbOut, st );
4995    } /* iterate over bbIn->stmts */
4996 
4997    // See above the case Ist_Exit:
4998    if (HG_(clo_delta_stacktrace)
4999        && (bbOut->jumpkind != Ijk_Boring || ! VG_(has_CF_info)(cia)))
5000       addInvalidateCachedStack(bbOut, goff_SP_s1, hWordTy_szB);
5001 
5002    return bbOut;
5003 }
5004 
5005 #undef binop
5006 #undef mkexpr
5007 #undef mkU32
5008 #undef mkU64
5009 #undef assign
5010 
5011 
5012 /*----------------------------------------------------------------*/
5013 /*--- Client requests                                          ---*/
5014 /*----------------------------------------------------------------*/
5015 
5016 /* Sheesh.  Yet another goddam finite map. */
5017 static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
5018 
map_pthread_t_to_Thread_INIT(void)5019 static void map_pthread_t_to_Thread_INIT ( void ) {
5020    if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
5021       map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
5022                                             HG_(free), NULL );
5023    }
5024 }
5025 
5026 /* A list of Ada dependent tasks and their masters. Used for implementing
5027    the Ada task termination semantic as implemented by the
5028    gcc gnat Ada runtime. */
5029 typedef
5030    struct {
5031       void* dependent; // Ada Task Control Block of the Dependent
5032       void* master;    // ATCB of the master
5033       Word  master_level; // level of dependency between master and dependent
5034       Thread* hg_dependent; // helgrind Thread* for dependent task.
5035    }
5036    GNAT_dmml; // (d)ependent (m)aster (m)aster_(l)evel.
5037 static XArray* gnat_dmmls;   /* of GNAT_dmml */
gnat_dmmls_INIT(void)5038 static void gnat_dmmls_INIT (void)
5039 {
5040    if (UNLIKELY(gnat_dmmls == NULL)) {
5041       gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
5042                                HG_(free),
5043                                sizeof(GNAT_dmml) );
5044    }
5045 }
5046 
xtmemory_report_next_block(XT_Allocs * xta,ExeContext ** ec_alloc)5047 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
5048 {
5049    const MallocMeta* md = VG_(HT_Next)(hg_mallocmeta_table);
5050    if (md) {
5051       xta->nbytes = md->szB;
5052       xta->nblocks = 1;
5053       *ec_alloc = md->where;
5054    } else
5055       xta->nblocks = 0;
5056 }
HG_(xtmemory_report)5057 static void HG_(xtmemory_report) ( const HChar* filename, Bool fini )
5058 {
5059    // Make xtmemory_report_next_block ready to be called.
5060    VG_(HT_ResetIter)(hg_mallocmeta_table);
5061    VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
5062                         VG_(XT_filter_1top_and_maybe_below_main));
5063 }
5064 
print_monitor_help(void)5065 static void print_monitor_help ( void )
5066 {
5067    VG_(gdb_printf)
5068       (
5069 "\n"
5070 "helgrind monitor commands:\n"
5071 "  info locks [lock_addr]  : show status of lock at addr lock_addr\n"
5072 "           with no lock_addr, show status of all locks\n"
5073 "  accesshistory <addr> [<len>]   : show access history recorded\n"
5074 "                     for <len> (or 1) bytes at <addr>\n"
5075 "  xtmemory [<filename>]\n"
5076 "        dump xtree memory profile in <filename> (default xtmemory.kcg.%%p.%%n)\n"
5077 "\n");
5078 }
5079 
5080 /* return True if request recognised, False otherwise */
handle_gdb_monitor_command(ThreadId tid,HChar * req)5081 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
5082 {
5083    HChar* wcmd;
5084    HChar s[VG_(strlen)(req)]; /* copy for strtok_r */
5085    HChar *ssaveptr;
5086    Int   kwdid;
5087 
5088    VG_(strcpy) (s, req);
5089 
5090    wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
5091    /* NB: if possible, avoid introducing a new command below which
5092       starts with the same first letter(s) as an already existing
5093       command. This ensures a shorter abbreviation for the user. */
5094    switch (VG_(keyword_id)
5095            ("help info accesshistory xtmemory",
5096             wcmd, kwd_report_duplicated_matches)) {
5097    case -2: /* multiple matches */
5098       return True;
5099    case -1: /* not found */
5100       return False;
5101    case  0: /* help */
5102       print_monitor_help();
5103       return True;
5104    case  1: /* info */
5105       wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
5106       switch (kwdid = VG_(keyword_id)
5107               ("locks",
5108                wcmd, kwd_report_all)) {
5109       case -2:
5110       case -1:
5111          break;
5112       case 0: // locks
5113          {
5114             const HChar* wa;
5115             Addr lk_addr = 0;
5116             Bool lk_shown = False;
5117             Bool all_locks = True;
5118             Int i;
5119             Lock* lk;
5120 
5121             wa = VG_(strtok_r) (NULL, " ", &ssaveptr);
5122             if (wa != NULL) {
5123                if (VG_(parse_Addr) (&wa, &lk_addr) )
5124                   all_locks = False;
5125                else {
5126                   VG_(gdb_printf) ("missing or malformed address\n");
5127                }
5128             }
5129             for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next) {
5130                if (all_locks || lk_addr == lk->guestaddr) {
5131                   pp_Lock(0, lk,
5132                           True /* show_lock_addrdescr */,
5133                           False /* show_internal_data */);
5134                   lk_shown = True;
5135                }
5136             }
5137             if (i == 0)
5138                VG_(gdb_printf) ("no locks\n");
5139             if (!all_locks && !lk_shown)
5140                VG_(gdb_printf) ("lock with address %p not found\n",
5141                                 (void*)lk_addr);
5142          }
5143          break;
5144       default:
5145          tl_assert(0);
5146       }
5147       return True;
5148 
5149    case  2: /* accesshistory */
5150       {
5151          Addr address;
5152          SizeT szB = 1;
5153          if (HG_(clo_history_level) < 2) {
5154             VG_(gdb_printf)
5155                ("helgrind must be started with --history-level=full"
5156                 " to use accesshistory\n");
5157             return True;
5158          }
5159          if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
5160             if (szB >= 1)
5161                libhb_event_map_access_history (address, szB, HG_(print_access));
5162             else
5163                VG_(gdb_printf) ("len must be >=1\n");
5164          }
5165          return True;
5166       }
5167 
5168    case  3: { /* xtmemory */
5169       HChar* filename;
5170       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
5171       HG_(xtmemory_report)(filename, False);
5172       return True;
5173    }
5174 
5175    default:
5176       tl_assert(0);
5177       return False;
5178    }
5179 }
5180 
5181 static
hg_handle_client_request(ThreadId tid,UWord * args,UWord * ret)5182 Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
5183 {
5184    if (!VG_IS_TOOL_USERREQ('H','G',args[0])
5185        && VG_USERREQ__GDB_MONITOR_COMMAND   != args[0])
5186       return False;
5187 
5188    /* Anything that gets past the above check is one of ours, so we
5189       should be able to handle it. */
5190 
5191    /* default, meaningless return value, unless otherwise set */
5192    *ret = 0;
5193 
5194    switch (args[0]) {
5195 
5196       /* --- --- User-visible client requests --- --- */
5197 
5198       case VG_USERREQ__HG_CLEAN_MEMORY:
5199          if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
5200                             args[1], args[2]);
5201          /* Call die_mem to (expensively) tidy up properly, if there
5202             are any held locks etc in the area.  Calling evh__die_mem
5203             and then evh__new_mem is a bit inefficient; probably just
5204             the latter would do. */
5205          if (args[2] > 0) { /* length */
5206             evh__die_mem(args[1], args[2]);
5207             /* and then set it to New */
5208             evh__new_mem(args[1], args[2]);
5209          }
5210          break;
5211 
5212       case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
5213          Addr  payload = 0;
5214          SizeT pszB = 0;
5215          if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5216                             args[1]);
5217          if (HG_(mm_find_containing_block)(NULL, NULL,
5218                                            &payload, &pszB, args[1])) {
5219             if (pszB > 0) {
5220                evh__die_mem(payload, pszB);
5221                evh__new_mem(payload, pszB);
5222             }
5223             *ret = pszB;
5224          } else {
5225             *ret = (UWord)-1;
5226          }
5227          break;
5228       }
5229 
5230       case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
5231          if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
5232                             args[1], args[2]);
5233          if (args[2] > 0) { /* length */
5234             evh__untrack_mem(args[1], args[2]);
5235          }
5236          break;
5237 
5238       case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
5239          if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
5240                             args[1], args[2]);
5241          if (args[2] > 0) { /* length */
5242             evh__new_mem(args[1], args[2]);
5243          }
5244          break;
5245 
5246       case _VG_USERREQ__HG_GET_ABITS:
5247          if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
5248                             args[1], args[2], args[3]);
5249          UChar *zzabit = (UChar *) args[2];
5250          if (zzabit == NULL
5251              || VG_(am_is_valid_for_client)((Addr)zzabit, (SizeT)args[3],
5252                                             VKI_PROT_READ|VKI_PROT_WRITE))
5253             *ret = (UWord) libhb_srange_get_abits ((Addr)   args[1],
5254                                                    (UChar*) args[2],
5255                                                    (SizeT)  args[3]);
5256          else
5257             *ret = -1;
5258          break;
5259 
5260       /* This thread (tid) (a master) is informing us that it has
5261          seen the termination of a dependent task, and that this should
5262          be considered as a join between master and dependent. */
5263       case _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN: {
5264          Word n;
5265          const Thread *stayer = map_threads_maybe_lookup( tid );
5266          const void *dependent = (void*)args[1];
5267          const void *master = (void*)args[2];
5268 
5269          if (0)
5270          VG_(printf)("HG_GNAT_DEPENDENT_MASTER_JOIN (tid %d): "
5271                      "self_id = %p Thread* = %p dependent %p\n",
5272                      (Int)tid, master, stayer, dependent);
5273 
5274          gnat_dmmls_INIT();
5275          /* Similar loop as for master completed hook below, but stops at
5276             the first matching occurence, only comparing master and
5277             dependent. */
5278          for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5279             GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5280             if (dmml->master == master
5281                 && dmml->dependent == dependent) {
5282                if (0)
5283                VG_(printf)("quitter %p dependency to stayer %p (join)\n",
5284                            dmml->hg_dependent->hbthr,  stayer->hbthr);
5285                tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5286                generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5287                                                    stayer->hbthr);
5288                VG_(removeIndexXA) (gnat_dmmls, n);
5289                break;
5290             }
5291          }
5292          break;
5293       }
5294 
5295       /* --- --- Client requests for Helgrind's use only --- --- */
5296 
5297       /* Some thread is telling us its pthread_t value.  Record the
5298          binding between that and the associated Thread*, so we can
5299          later find the Thread* again when notified of a join by the
5300          thread. */
5301       case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
5302          Thread* my_thr = NULL;
5303          if (0)
5304          VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
5305                      (void*)args[1]);
5306          map_pthread_t_to_Thread_INIT();
5307          my_thr = map_threads_maybe_lookup( tid );
5308          /* This assertion should hold because the map_threads (tid to
5309             Thread*) binding should have been made at the point of
5310             low-level creation of this thread, which should have
5311             happened prior to us getting this client request for it.
5312             That's because this client request is sent from
5313             client-world from the 'thread_wrapper' function, which
5314             only runs once the thread has been low-level created. */
5315          tl_assert(my_thr != NULL);
5316          /* So now we know that (pthread_t)args[1] is associated with
5317             (Thread*)my_thr.  Note that down. */
5318          if (0)
5319          VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
5320                      (void*)args[1], (void*)my_thr );
5321          VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
5322 
5323          if (my_thr->coretid != 1) {
5324             /* FIXME: hardwires assumption about identity of the root thread. */
5325             if (HG_(clo_ignore_thread_creation)) {
5326                HG_(thread_leave_pthread_create)(my_thr);
5327                HG_(thread_leave_synchr)(my_thr);
5328                tl_assert(my_thr->synchr_nesting == 0);
5329             }
5330          }
5331          break;
5332       }
5333 
5334       case _VG_USERREQ__HG_PTH_API_ERROR: {
5335          Thread* my_thr = NULL;
5336          map_pthread_t_to_Thread_INIT();
5337          my_thr = map_threads_maybe_lookup( tid );
5338          tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
5339          HG_(record_error_PthAPIerror)(
5340             my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
5341          break;
5342       }
5343 
5344       /* This thread (tid) has completed a join with the quitting
5345          thread whose pthread_t is in args[1]. */
5346       case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
5347          Thread* thr_q = NULL; /* quitter Thread* */
5348          Bool    found = False;
5349          if (0)
5350          VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
5351                      (void*)args[1]);
5352          map_pthread_t_to_Thread_INIT();
5353          found = VG_(lookupFM)( map_pthread_t_to_Thread,
5354                                 NULL, (UWord*)&thr_q, (UWord)args[1] );
5355           /* Can this fail?  It would mean that our pthread_join
5356              wrapper observed a successful join on args[1] yet that
5357              thread never existed (or at least, it never lodged an
5358              entry in the mapping (via SET_MY_PTHREAD_T)).  Which
5359              sounds like a bug in the threads library. */
5360          // FIXME: get rid of this assertion; handle properly
5361          tl_assert(found);
5362          if (found) {
5363             if (0)
5364             VG_(printf)(".................... quitter Thread* = %p\n",
5365                         thr_q);
5366             evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
5367          }
5368          break;
5369       }
5370 
5371       /* This thread (tid) is informing us of its master. */
5372       case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
5373          GNAT_dmml dmml;
5374          dmml.dependent = (void*)args[1];
5375          dmml.master = (void*)args[2];
5376          dmml.master_level = (Word)args[3];
5377          dmml.hg_dependent = map_threads_maybe_lookup( tid );
5378          tl_assert(dmml.hg_dependent);
5379 
5380          if (0)
5381          VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
5382                      "dependent = %p master = %p master_level = %ld"
5383                      " dependent Thread* = %p\n",
5384                      (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
5385                      dmml.hg_dependent);
5386          gnat_dmmls_INIT();
5387          VG_(addToXA) (gnat_dmmls, &dmml);
5388          break;
5389       }
5390 
5391       /* This thread (tid) is informing us that it has completed a
5392          master. */
5393       case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
5394          Word n;
5395          const Thread *stayer = map_threads_maybe_lookup( tid );
5396          const void *master = (void*)args[1];
5397          const Word master_level = (Word) args[2];
5398          tl_assert(stayer);
5399 
5400          if (0)
5401          VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5402                      "self_id = %p master_level = %ld Thread* = %p\n",
5403                      (Int)tid, master, master_level, stayer);
5404 
5405          gnat_dmmls_INIT();
5406          /* Reverse loop on the array, simulating a pthread_join for
5407             the Dependent tasks of the completed master, and removing
5408             them from the array. */
5409          for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5410             GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5411             if (dmml->master == master
5412                 && dmml->master_level == master_level) {
5413                if (0)
5414                VG_(printf)("quitter %p dependency to stayer %p\n",
5415                            dmml->hg_dependent->hbthr,  stayer->hbthr);
5416                tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5417                generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5418                                                    stayer->hbthr);
5419                VG_(removeIndexXA) (gnat_dmmls, n);
5420             }
5421          }
5422          break;
5423       }
5424 
5425       /* EXPOSITION only: by intercepting lock init events we can show
5426          the user where the lock was initialised, rather than only
5427          being able to show where it was first locked.  Intercepting
5428          lock initialisations is not necessary for the basic operation
5429          of the race checker. */
5430       case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5431          evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5432          break;
5433 
5434       /* mutex=arg[1], mutex_is_init=arg[2] */
5435       case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
5436          evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5437          break;
5438 
5439       case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE:   // pth_mx_t*
5440          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5441          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5442             evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
5443          break;
5444 
5445       case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST:  // pth_mx_t*
5446          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5447             evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5448          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5449          break;
5450 
5451       case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE:     // pth_mx_t*
5452          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5453          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5454             evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5455          break;
5456 
5457       case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST:    // pth_mx_t*, long
5458          if ((args[2] == True) // lock actually taken
5459              && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5460             evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5461          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5462          break;
5463 
5464       /* This thread is about to do pthread_cond_signal on the
5465          pthread_cond_t* in arg[1].  Ditto pthread_cond_broadcast. */
5466       case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5467       case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
5468          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5469          evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5470          break;
5471 
5472       case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST:
5473       case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST:
5474          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5475          break;
5476 
5477       /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5478          Returns a flag indicating whether or not the mutex is believed to be
5479          valid for this operation. */
5480       case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
5481          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5482          Bool mutex_is_valid
5483             = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5484                                                   (void*)args[2] );
5485          *ret = mutex_is_valid ? 1 : 0;
5486          break;
5487       }
5488 
5489       /* Thread successfully completed pthread_cond_init:
5490          cond=arg[1], cond_attr=arg[2] */
5491       case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5492          evh__HG_PTHREAD_COND_INIT_POST( tid,
5493                                          (void*)args[1], (void*)args[2] );
5494 	 break;
5495 
5496       /* cond=arg[1], cond_is_init=arg[2] */
5497       case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
5498          evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5499          break;
5500 
5501       /* Thread completed pthread_cond_wait, cond=arg[1],
5502          mutex=arg[2], timeout=arg[3], successful=arg[4] */
5503       case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5504          if (args[4] == True)
5505             evh__HG_PTHREAD_COND_WAIT_POST( tid,
5506                                             (void*)args[1], (void*)args[2],
5507                                             (Bool)args[3] );
5508          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5509          break;
5510 
5511       case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5512          evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5513          break;
5514 
5515       case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5516          evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5517          break;
5518 
5519       /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
5520       case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
5521          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5522          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5523             evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5524                                              args[2], args[3] );
5525          break;
5526 
5527       /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
5528       case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5529          if ((args[3] == True)
5530              && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5531             evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5532          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5533          break;
5534 
5535       case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5536          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5537          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5538             evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5539          break;
5540 
5541       case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5542          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5543             evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5544          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5545          break;
5546 
5547       case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5548          evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
5549          break;
5550 
5551       case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5552          evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
5553          break;
5554 
5555       case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5556          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5557          evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5558          break;
5559 
5560       case _VG_USERREQ__HG_POSIX_SEM_POST_POST: /* sem_t* */
5561          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5562          break;
5563 
5564       case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE: /* sem_t* */
5565          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5566          break;
5567 
5568       case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t*, long tookLock */
5569          if (args[2] == True)
5570             evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5571          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5572          break;
5573 
5574       case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
5575          /* pth_bar_t*, ulong count, ulong resizable */
5576          evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5577                                                 args[2], args[3] );
5578          break;
5579 
5580       case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5581          /* pth_bar_t*, ulong newcount */
5582          evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5583                                               args[2] );
5584          break;
5585 
5586       case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5587          /* pth_bar_t* */
5588          evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5589          break;
5590 
5591       case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5592          /* pth_bar_t* */
5593          evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5594          break;
5595 
5596       case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5597          /* pth_spinlock_t* */
5598          evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5599          break;
5600 
5601       case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5602          /* pth_spinlock_t* */
5603          evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5604          break;
5605 
5606       case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5607          /* pth_spinlock_t*, Word */
5608          evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5609          break;
5610 
5611       case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5612          /* pth_spinlock_t* */
5613          evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5614          break;
5615 
5616       case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5617          /* pth_spinlock_t* */
5618          evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5619          break;
5620 
5621       case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
5622          /* HChar* who */
5623          HChar*  who = (HChar*)args[1];
5624          HChar   buf[50 + 50];
5625          Thread* thr = map_threads_maybe_lookup( tid );
5626          tl_assert( thr ); /* I must be mapped */
5627          tl_assert( who );
5628          tl_assert( VG_(strlen)(who) <= 50 );
5629          VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5630          /* record_error_Misc strdup's buf, so this is safe: */
5631          HG_(record_error_Misc)( thr, buf );
5632          break;
5633       }
5634 
5635       case _VG_USERREQ__HG_USERSO_SEND_PRE:
5636          /* UWord arbitrary-SO-tag */
5637          evh__HG_USERSO_SEND_PRE( tid, args[1] );
5638          break;
5639 
5640       case _VG_USERREQ__HG_USERSO_RECV_POST:
5641          /* UWord arbitrary-SO-tag */
5642          evh__HG_USERSO_RECV_POST( tid, args[1] );
5643          break;
5644 
5645       case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5646          /* UWord arbitrary-SO-tag */
5647          evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5648          break;
5649 
5650       case VG_USERREQ__GDB_MONITOR_COMMAND: {
5651          Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5652          if (handled)
5653             *ret = 1;
5654          else
5655             *ret = 0;
5656          return handled;
5657       }
5658 
5659       case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN: {
5660          Thread *thr = map_threads_maybe_lookup(tid);
5661          if (HG_(clo_ignore_thread_creation)) {
5662             HG_(thread_enter_pthread_create)(thr);
5663             HG_(thread_enter_synchr)(thr);
5664          }
5665          break;
5666       }
5667 
5668       case _VG_USERREQ__HG_PTHREAD_CREATE_END: {
5669          Thread *thr = map_threads_maybe_lookup(tid);
5670          if (HG_(clo_ignore_thread_creation)) {
5671             HG_(thread_leave_pthread_create)(thr);
5672             HG_(thread_leave_synchr)(thr);
5673          }
5674          break;
5675       }
5676 
5677       case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE: // pth_mx_t*, long tryLock
5678          evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5679          break;
5680 
5681       case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST:    // pth_mx_t*
5682          evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5683          break;
5684 
5685       case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED:       // void*, long isW
5686          evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5687          break;
5688 
5689       case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED:       // void*
5690          evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5691          break;
5692 
5693       case _VG_USERREQ__HG_POSIX_SEM_RELEASED: /* sem_t* */
5694          evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5695          break;
5696 
5697       case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED: /* sem_t* */
5698          evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5699          break;
5700 
5701 #if defined(VGO_solaris)
5702       case _VG_USERREQ__HG_RTLD_BIND_GUARD:
5703          evh__HG_RTLD_BIND_GUARD(tid, args[1]);
5704          break;
5705 
5706       case _VG_USERREQ__HG_RTLD_BIND_CLEAR:
5707          evh__HG_RTLD_BIND_CLEAR(tid, args[1]);
5708          break;
5709 #endif /* VGO_solaris */
5710 
5711       default:
5712          /* Unhandled Helgrind client request! */
5713          tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5714                        args[0]);
5715    }
5716 
5717    return True;
5718 }
5719 
5720 
5721 /*----------------------------------------------------------------*/
5722 /*--- Setup                                                    ---*/
5723 /*----------------------------------------------------------------*/
5724 
hg_process_cmd_line_option(const HChar * arg)5725 static Bool hg_process_cmd_line_option ( const HChar* arg )
5726 {
5727    const HChar* tmp_str;
5728 
5729    if      VG_BOOL_CLO(arg, "--track-lockorders",
5730                             HG_(clo_track_lockorders)) {}
5731    else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5732                             HG_(clo_cmp_race_err_addrs)) {}
5733 
5734    else if VG_XACT_CLO(arg, "--history-level=none",
5735                             HG_(clo_history_level), 0);
5736    else if VG_XACT_CLO(arg, "--history-level=approx",
5737                             HG_(clo_history_level), 1);
5738    else if VG_XACT_CLO(arg, "--history-level=full",
5739                             HG_(clo_history_level), 2);
5740 
5741    else if VG_BOOL_CLO(arg, "--delta-stacktrace",
5742                             HG_(clo_delta_stacktrace)) {}
5743 
5744    else if VG_BINT_CLO(arg, "--conflict-cache-size",
5745                        HG_(clo_conflict_cache_size), 10*1000, 150*1000*1000) {}
5746 
5747    /* "stuvwx" --> stuvwx (binary) */
5748    else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
5749       Int j;
5750 
5751       if (6 != VG_(strlen)(tmp_str)) {
5752          VG_(message)(Vg_UserMsg,
5753                       "--hg-sanity-flags argument must have 6 digits\n");
5754          return False;
5755       }
5756       for (j = 0; j < 6; j++) {
5757          if      ('0' == tmp_str[j]) { /* do nothing */ }
5758          else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
5759          else {
5760             VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
5761                                      "only contain 0s and 1s\n");
5762             return False;
5763          }
5764       }
5765       if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
5766    }
5767 
5768    else if VG_BOOL_CLO(arg, "--free-is-write",
5769                             HG_(clo_free_is_write)) {}
5770 
5771    else if VG_XACT_CLO(arg, "--vts-pruning=never",
5772                             HG_(clo_vts_pruning), 0);
5773    else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5774                             HG_(clo_vts_pruning), 1);
5775    else if VG_XACT_CLO(arg, "--vts-pruning=always",
5776                             HG_(clo_vts_pruning), 2);
5777 
5778    else if VG_BOOL_CLO(arg, "--check-stack-refs",
5779                             HG_(clo_check_stack_refs)) {}
5780    else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
5781                             HG_(clo_ignore_thread_creation)) {}
5782 
5783    else
5784       return VG_(replacement_malloc_process_cmd_line_option)(arg);
5785 
5786    return True;
5787 }
5788 
hg_print_usage(void)5789 static void hg_print_usage ( void )
5790 {
5791    VG_(printf)(
5792 "    --free-is-write=no|yes    treat heap frees as writes [no]\n"
5793 "    --track-lockorders=no|yes show lock ordering errors? [yes]\n"
5794 "    --history-level=none|approx|full [full]\n"
5795 "       full:   show both stack traces for a data race (can be very slow)\n"
5796 "       approx: full trace for one thread, approx for the other (faster)\n"
5797 "       none:   only show trace for one thread in a race (fastest)\n"
5798 "    --delta-stacktrace=no|yes [yes on linux amd64/x86]\n"
5799 "        no : always compute a full history stacktrace from unwind info\n"
5800 "        yes : derive a stacktrace from the previous stacktrace\n"
5801 "          if there was no call/return or similar instruction\n"
5802 "    --conflict-cache-size=N   size of 'full' history cache [2000000]\n"
5803 "    --check-stack-refs=no|yes race-check reads and writes on the\n"
5804 "                              main stack and thread stacks? [yes]\n"
5805 "    --ignore-thread-creation=yes|no Ignore activities during thread\n"
5806 "                              creation [%s]\n",
5807 HG_(clo_ignore_thread_creation) ? "yes" : "no"
5808    );
5809 }
5810 
hg_print_debug_usage(void)5811 static void hg_print_debug_usage ( void )
5812 {
5813    VG_(printf)("    --cmp-race-err-addrs=no|yes  are data addresses in "
5814                "race errors significant? [no]\n");
5815    VG_(printf)("    --hg-sanity-flags=<XXXXXX>   sanity check "
5816                "  at events (X = 0|1) [000000]\n");
5817    VG_(printf)("    --hg-sanity-flags values:\n");
5818    VG_(printf)("       010000   after changes to "
5819                "lock-order-acquisition-graph\n");
5820    VG_(printf)("       001000   at memory accesses\n");
5821    VG_(printf)("       000100   at mem permission setting for "
5822                "ranges >= %d bytes\n", SCE_BIGRANGE_T);
5823    VG_(printf)("       000010   at lock/unlock events\n");
5824    VG_(printf)("       000001   at thread create/join events\n");
5825    VG_(printf)(
5826 "    --vts-pruning=never|auto|always [auto]\n"
5827 "       never:   is never done (may cause big space leaks in Helgrind)\n"
5828 "       auto:    done just often enough to keep space usage under control\n"
5829 "       always:  done after every VTS GC (mostly just a big time waster)\n"
5830     );
5831 }
5832 
hg_print_stats(void)5833 static void hg_print_stats (void)
5834 {
5835 
5836    if (1) {
5837       VG_(printf)("\n");
5838       HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5839       if (HG_(clo_track_lockorders)) {
5840          VG_(printf)("\n");
5841          HG_(ppWSUstats)( univ_laog,  "univ_laog" );
5842       }
5843    }
5844 
5845    //zz       VG_(printf)("\n");
5846    //zz       VG_(printf)(" hbefore: %'10lu queries\n",        stats__hbefore_queries);
5847    //zz       VG_(printf)(" hbefore: %'10lu cache 0 hits\n",   stats__hbefore_cache0s);
5848    //zz       VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5849    //zz       VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5850    //zz       VG_(printf)(" hbefore: %'10lu   of which slow\n",
5851    //zz                   stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5852    //zz       VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5853    //zz                   stats__hbefore_stk_hwm);
5854    //zz       VG_(printf)(" hbefore: %'10lu cache invals\n",   stats__hbefore_invals);
5855    //zz       VG_(printf)(" hbefore: %'10lu probes\n",         stats__hbefore_probes);
5856 
5857    VG_(printf)("\n");
5858    VG_(printf)("        locksets: %'8d unique lock sets\n",
5859                (Int)HG_(cardinalityWSU)( univ_lsets ));
5860    if (HG_(clo_track_lockorders)) {
5861       VG_(printf)("       univ_laog: %'8d unique lock sets\n",
5862                   (Int)HG_(cardinalityWSU)( univ_laog ));
5863    }
5864 
5865    //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5866    //            stats__ga_LL_adds,
5867    //            (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5868 
5869    VG_(printf)("  LockN-to-P map: %'8llu queries (%llu map size)\n",
5870                HG_(stats__LockN_to_P_queries),
5871                HG_(stats__LockN_to_P_get_map_size)() );
5872 
5873    VG_(printf)("client malloc-ed blocks: %'8u\n",
5874                VG_(HT_count_nodes)(hg_mallocmeta_table));
5875 
5876    VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5877                HG_(stats__string_table_queries),
5878                HG_(stats__string_table_get_map_size)() );
5879    if (HG_(clo_track_lockorders)) {
5880       VG_(printf)("            LAOG: %'8d map size\n",
5881                   (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5882       VG_(printf)(" LAOG exposition: %'8d map size\n",
5883                   (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5884    }
5885 
5886    VG_(printf)("           locks: %'8lu acquires, "
5887                "%'lu releases\n",
5888                stats__lockN_acquires,
5889                stats__lockN_releases
5890               );
5891    VG_(printf)("   sanity checks: %'8lu\n", stats__sanity_checks);
5892 
5893    VG_(printf)("\n");
5894    libhb_shutdown(True); // This in fact only print stats.
5895 }
5896 
hg_fini(Int exitcode)5897 static void hg_fini ( Int exitcode )
5898 {
5899    HG_(xtmemory_report) (VG_(clo_xtree_memory_file), True);
5900 
5901    if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5902        && HG_(clo_history_level) >= 2) {
5903       VG_(umsg)(
5904          "Use --history-level=approx or =none to gain increased speed, at\n" );
5905       VG_(umsg)(
5906          "the cost of reduced accuracy of conflicting-access information\n");
5907    }
5908 
5909    if (SHOW_DATA_STRUCTURES)
5910       pp_everything( PP_ALL, "SK_(fini)" );
5911    if (HG_(clo_sanity_flags))
5912       all__sanity_check("SK_(fini)");
5913 
5914    if (VG_(clo_stats))
5915       hg_print_stats();
5916 }
5917 
5918 /* FIXME: move these somewhere sane */
5919 
5920 static
for_libhb__get_stacktrace(Thr * hbt,Addr * frames,UWord nRequest)5921 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5922 {
5923    Thread*     thr;
5924    ThreadId    tid;
5925    UWord       nActual;
5926    tl_assert(hbt);
5927    thr = libhb_get_Thr_hgthread( hbt );
5928    tl_assert(thr);
5929    tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5930    nActual = (UWord)VG_(get_StackTrace_with_deltas)
5931                            ( tid, frames, (UInt)nRequest,
5932                              NULL, NULL, 0,
5933                              thr->first_sp_delta);
5934    tl_assert(nActual <= nRequest);
5935    for (; nActual < nRequest; nActual++)
5936       frames[nActual] = 0;
5937 }
5938 
5939 static
for_libhb__get_EC(Thr * hbt)5940 ExeContext* for_libhb__get_EC ( Thr* hbt )
5941 {
5942    Thread*     thr;
5943    ThreadId    tid;
5944    ExeContext* ec;
5945    tl_assert(hbt);
5946    thr = libhb_get_Thr_hgthread( hbt );
5947    tl_assert(thr);
5948    tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5949    /* this will assert if tid is invalid */
5950    ec = VG_(record_ExeContext)( tid, 0 );
5951    return ec;
5952 }
5953 
5954 
hg_post_clo_init(void)5955 static void hg_post_clo_init ( void )
5956 {
5957    Thr* hbthr_root;
5958 
5959    if (HG_(clo_delta_stacktrace)
5960        && VG_(clo_vex_control).guest_chase_thresh != 0) {
5961       if (VG_(clo_verbosity) >= 2)
5962          VG_(message)(Vg_UserMsg,
5963                       "helgrind --delta-stacktrace=yes only works with "
5964                       "--vex-guest-chase-thresh=0\n"
5965                       "=> (re-setting it to 0\n");
5966       VG_(clo_vex_control).guest_chase_thresh = 0;
5967    }
5968 
5969 
5970    /////////////////////////////////////////////
5971    hbthr_root = libhb_init( for_libhb__get_stacktrace,
5972                             for_libhb__get_EC );
5973    /////////////////////////////////////////////
5974 
5975 
5976    if (HG_(clo_track_lockorders))
5977       laog__init();
5978 
5979    initialise_data_structures(hbthr_root);
5980    if (VG_(clo_xtree_memory) == Vg_XTMemory_Full)
5981       // Activate full xtree memory profiling.
5982       VG_(XTMemory_Full_init)(VG_(XT_filter_1top_and_maybe_below_main));
5983 }
5984 
hg_info_location(DiEpoch ep,Addr a)5985 static void hg_info_location (DiEpoch ep, Addr a)
5986 {
5987    (void) HG_(get_and_pp_addrdescr) (ep, a);
5988 }
5989 
hg_pre_clo_init(void)5990 static void hg_pre_clo_init ( void )
5991 {
5992    VG_(details_name)            ("Helgrind");
5993    VG_(details_version)         (NULL);
5994    VG_(details_description)     ("a thread error detector");
5995    VG_(details_copyright_author)(
5996       "Copyright (C) 2007-2017, and GNU GPL'd, by OpenWorks LLP et al.");
5997    VG_(details_bug_reports_to)  (VG_BUGS_TO);
5998    VG_(details_avg_translation_sizeB) ( 320 );
5999 
6000    VG_(basic_tool_funcs)          (hg_post_clo_init,
6001                                    hg_instrument,
6002                                    hg_fini);
6003 
6004    VG_(needs_core_errors)         ();
6005    VG_(needs_tool_errors)         (HG_(eq_Error),
6006                                    HG_(before_pp_Error),
6007                                    HG_(pp_Error),
6008                                    False,/*show TIDs for errors*/
6009                                    HG_(update_extra),
6010                                    HG_(recognised_suppression),
6011                                    HG_(read_extra_suppression_info),
6012                                    HG_(error_matches_suppression),
6013                                    HG_(get_error_name),
6014                                    HG_(get_extra_suppression_info),
6015                                    HG_(print_extra_suppression_use),
6016                                    HG_(update_extra_suppression_use));
6017 
6018    VG_(needs_xml_output)          ();
6019 
6020    VG_(needs_command_line_options)(hg_process_cmd_line_option,
6021                                    hg_print_usage,
6022                                    hg_print_debug_usage);
6023    VG_(needs_client_requests)     (hg_handle_client_request);
6024 
6025    // FIXME?
6026    //VG_(needs_sanity_checks)       (hg_cheap_sanity_check,
6027    //                                hg_expensive_sanity_check);
6028 
6029    VG_(needs_print_stats) (hg_print_stats);
6030    VG_(needs_info_location) (hg_info_location);
6031 
6032    VG_(needs_malloc_replacement)  (hg_cli__malloc,
6033                                    hg_cli____builtin_new,
6034                                    hg_cli____builtin_vec_new,
6035                                    hg_cli__memalign,
6036                                    hg_cli__calloc,
6037                                    hg_cli__free,
6038                                    hg_cli____builtin_delete,
6039                                    hg_cli____builtin_vec_delete,
6040                                    hg_cli__realloc,
6041                                    hg_cli_malloc_usable_size,
6042                                    HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
6043 
6044    /* 21 Dec 08: disabled this; it mostly causes H to start more
6045       slowly and use significantly more memory, without very often
6046       providing useful results.  The user can request to load this
6047       information manually with --read-var-info=yes. */
6048    if (0) VG_(needs_var_info)(); /* optional */
6049 
6050    VG_(track_new_mem_startup)     ( evh__new_mem_w_perms );
6051    VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
6052    VG_(track_new_mem_brk)         ( evh__new_mem_w_tid );
6053    VG_(track_new_mem_mmap)        ( evh__new_mem_w_perms );
6054    VG_(track_new_mem_stack)       ( evh__new_mem_stack );
6055    VG_(track_new_mem_stack_4)     ( evh__new_mem_stack_4 );
6056    VG_(track_new_mem_stack_8)     ( evh__new_mem_stack_8 );
6057    VG_(track_new_mem_stack_12)    ( evh__new_mem_stack_12 );
6058    VG_(track_new_mem_stack_16)    ( evh__new_mem_stack_16 );
6059    VG_(track_new_mem_stack_32)    ( evh__new_mem_stack_32 );
6060    VG_(track_new_mem_stack_112)   ( evh__new_mem_stack_112 );
6061    VG_(track_new_mem_stack_128)   ( evh__new_mem_stack_128 );
6062    VG_(track_new_mem_stack_144)   ( evh__new_mem_stack_144 );
6063    VG_(track_new_mem_stack_160)   ( evh__new_mem_stack_160 );
6064 
6065    // FIXME: surely this isn't thread-aware
6066    VG_(track_copy_mem_remap)      ( evh__copy_mem );
6067 
6068    VG_(track_change_mem_mprotect) ( evh__set_perms );
6069 
6070    VG_(track_die_mem_stack_signal)( evh__die_mem );
6071    VG_(track_die_mem_brk)         ( evh__die_mem_munmap );
6072    VG_(track_die_mem_munmap)      ( evh__die_mem_munmap );
6073 
6074    /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
6075       which has no effect. We do not use  VG_(track_die_mem_stack),
6076       as this would be an expensive way to do nothing. */
6077    // VG_(track_die_mem_stack)       ( evh__die_mem );
6078 
6079    // FIXME: what is this for?
6080    VG_(track_ban_mem_stack)       (NULL);
6081 
6082    VG_(track_pre_mem_read)        ( evh__pre_mem_read );
6083    VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
6084    VG_(track_pre_mem_write)       ( evh__pre_mem_write );
6085    VG_(track_post_mem_write)      (NULL);
6086 
6087    /////////////////
6088 
6089    VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
6090    VG_(track_pre_thread_ll_exit)  ( evh__pre_thread_ll_exit );
6091 
6092    VG_(track_start_client_code)( evh__start_client_code );
6093    VG_(track_stop_client_code)( evh__stop_client_code );
6094 
6095    /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
6096       as described in comments at the top of pub_tool_hashtable.h, are
6097       met.  Blargh. */
6098    tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
6099    tl_assert( sizeof(UWord) == sizeof(Addr) );
6100    hg_mallocmeta_table
6101       = VG_(HT_construct)( "hg_malloc_metadata_table" );
6102 
6103    MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
6104                                        1000,
6105                                        HG_(zalloc),
6106                                        "hg_malloc_metadata_pool",
6107                                        HG_(free));
6108 
6109    // add a callback to clean up on (threaded) fork.
6110    VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
6111 }
6112 
6113 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
6114 
6115 /*--------------------------------------------------------------------*/
6116 /*--- end                                                hg_main.c ---*/
6117 /*--------------------------------------------------------------------*/
6118