13cab2bb3Spatrick //===-- tsan_shadow_test.cpp ----------------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of ThreadSanitizer (TSan), a race detector.
103cab2bb3Spatrick //
113cab2bb3Spatrick //===----------------------------------------------------------------------===//
123cab2bb3Spatrick #include "tsan_platform.h"
133cab2bb3Spatrick #include "tsan_rtl.h"
143cab2bb3Spatrick #include "gtest/gtest.h"
153cab2bb3Spatrick 
163cab2bb3Spatrick namespace __tsan {
173cab2bb3Spatrick 
18*810390e3Srobert struct Region {
19*810390e3Srobert   uptr start;
20*810390e3Srobert   uptr end;
21*810390e3Srobert };
223cab2bb3Spatrick 
CheckShadow(const Shadow * s,Sid sid,Epoch epoch,uptr addr,uptr size,AccessType typ)23*810390e3Srobert void CheckShadow(const Shadow *s, Sid sid, Epoch epoch, uptr addr, uptr size,
24*810390e3Srobert                  AccessType typ) {
25*810390e3Srobert   uptr addr1 = 0;
26*810390e3Srobert   uptr size1 = 0;
27*810390e3Srobert   AccessType typ1 = 0;
28*810390e3Srobert   s->GetAccess(&addr1, &size1, &typ1);
29*810390e3Srobert   CHECK_EQ(s->sid(), sid);
30*810390e3Srobert   CHECK_EQ(s->epoch(), epoch);
31*810390e3Srobert   CHECK_EQ(addr1, addr);
32*810390e3Srobert   CHECK_EQ(size1, size);
33*810390e3Srobert   CHECK_EQ(typ1, typ);
343cab2bb3Spatrick }
35*810390e3Srobert 
TEST(Shadow,Shadow)36*810390e3Srobert TEST(Shadow, Shadow) {
37*810390e3Srobert   Sid sid = static_cast<Sid>(11);
38*810390e3Srobert   Epoch epoch = static_cast<Epoch>(22);
39*810390e3Srobert   FastState fs;
40*810390e3Srobert   fs.SetSid(sid);
41*810390e3Srobert   fs.SetEpoch(epoch);
42*810390e3Srobert   CHECK_EQ(fs.sid(), sid);
43*810390e3Srobert   CHECK_EQ(fs.epoch(), epoch);
44*810390e3Srobert   CHECK_EQ(fs.GetIgnoreBit(), false);
45*810390e3Srobert   fs.SetIgnoreBit();
46*810390e3Srobert   CHECK_EQ(fs.GetIgnoreBit(), true);
47*810390e3Srobert   fs.ClearIgnoreBit();
48*810390e3Srobert   CHECK_EQ(fs.GetIgnoreBit(), false);
49*810390e3Srobert 
50*810390e3Srobert   Shadow s0(fs, 1, 2, kAccessWrite);
51*810390e3Srobert   CheckShadow(&s0, sid, epoch, 1, 2, kAccessWrite);
52*810390e3Srobert   Shadow s1(fs, 2, 3, kAccessRead);
53*810390e3Srobert   CheckShadow(&s1, sid, epoch, 2, 3, kAccessRead);
54*810390e3Srobert   Shadow s2(fs, 0xfffff8 + 4, 1, kAccessWrite | kAccessAtomic);
55*810390e3Srobert   CheckShadow(&s2, sid, epoch, 4, 1, kAccessWrite | kAccessAtomic);
56*810390e3Srobert   Shadow s3(fs, 0xfffff8 + 0, 8, kAccessRead | kAccessAtomic);
57*810390e3Srobert   CheckShadow(&s3, sid, epoch, 0, 8, kAccessRead | kAccessAtomic);
58*810390e3Srobert 
59*810390e3Srobert   CHECK(!s0.IsBothReadsOrAtomic(kAccessRead | kAccessAtomic));
60*810390e3Srobert   CHECK(!s1.IsBothReadsOrAtomic(kAccessAtomic));
61*810390e3Srobert   CHECK(!s1.IsBothReadsOrAtomic(kAccessWrite));
62*810390e3Srobert   CHECK(s1.IsBothReadsOrAtomic(kAccessRead));
63*810390e3Srobert   CHECK(s2.IsBothReadsOrAtomic(kAccessAtomic));
64*810390e3Srobert   CHECK(!s2.IsBothReadsOrAtomic(kAccessWrite));
65*810390e3Srobert   CHECK(!s2.IsBothReadsOrAtomic(kAccessRead));
66*810390e3Srobert   CHECK(s3.IsBothReadsOrAtomic(kAccessAtomic));
67*810390e3Srobert   CHECK(!s3.IsBothReadsOrAtomic(kAccessWrite));
68*810390e3Srobert   CHECK(s3.IsBothReadsOrAtomic(kAccessRead));
69*810390e3Srobert 
70*810390e3Srobert   CHECK(!s0.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic));
71*810390e3Srobert   CHECK(s1.IsRWWeakerOrEqual(kAccessWrite));
72*810390e3Srobert   CHECK(s1.IsRWWeakerOrEqual(kAccessRead));
73*810390e3Srobert   CHECK(!s1.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic));
74*810390e3Srobert 
75*810390e3Srobert   CHECK(!s2.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic));
76*810390e3Srobert   CHECK(s2.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic));
77*810390e3Srobert   CHECK(s2.IsRWWeakerOrEqual(kAccessRead));
78*810390e3Srobert   CHECK(s2.IsRWWeakerOrEqual(kAccessWrite));
79*810390e3Srobert 
80*810390e3Srobert   CHECK(s3.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic));
81*810390e3Srobert   CHECK(s3.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic));
82*810390e3Srobert   CHECK(s3.IsRWWeakerOrEqual(kAccessRead));
83*810390e3Srobert   CHECK(s3.IsRWWeakerOrEqual(kAccessWrite));
84*810390e3Srobert 
85*810390e3Srobert   Shadow sro(Shadow::kRodata);
86*810390e3Srobert   CheckShadow(&sro, static_cast<Sid>(0), kEpochZero, 0, 0, kAccessRead);
873cab2bb3Spatrick }
883cab2bb3Spatrick 
TEST(Shadow,Mapping)893cab2bb3Spatrick TEST(Shadow, Mapping) {
903cab2bb3Spatrick   static int global;
913cab2bb3Spatrick   int stack;
923cab2bb3Spatrick   void *heap = malloc(0);
933cab2bb3Spatrick   free(heap);
943cab2bb3Spatrick 
953cab2bb3Spatrick   CHECK(IsAppMem((uptr)&global));
963cab2bb3Spatrick   CHECK(IsAppMem((uptr)&stack));
973cab2bb3Spatrick   CHECK(IsAppMem((uptr)heap));
983cab2bb3Spatrick 
993cab2bb3Spatrick   CHECK(IsShadowMem(MemToShadow((uptr)&global)));
1003cab2bb3Spatrick   CHECK(IsShadowMem(MemToShadow((uptr)&stack)));
1013cab2bb3Spatrick   CHECK(IsShadowMem(MemToShadow((uptr)heap)));
1023cab2bb3Spatrick }
1033cab2bb3Spatrick 
TEST(Shadow,Celling)1043cab2bb3Spatrick TEST(Shadow, Celling) {
1053cab2bb3Spatrick   u64 aligned_data[4];
1063cab2bb3Spatrick   char *data = (char*)aligned_data;
107*810390e3Srobert   CHECK(IsAligned(reinterpret_cast<uptr>(data), kShadowSize));
108*810390e3Srobert   RawShadow *s0 = MemToShadow((uptr)&data[0]);
109*810390e3Srobert   CHECK(IsAligned(reinterpret_cast<uptr>(s0), kShadowSize));
1103cab2bb3Spatrick   for (unsigned i = 1; i < kShadowCell; i++)
1113cab2bb3Spatrick     CHECK_EQ(s0, MemToShadow((uptr)&data[i]));
1123cab2bb3Spatrick   for (unsigned i = kShadowCell; i < 2*kShadowCell; i++)
113*810390e3Srobert     CHECK_EQ(s0 + kShadowCnt, MemToShadow((uptr)&data[i]));
1143cab2bb3Spatrick   for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++)
115*810390e3Srobert     CHECK_EQ(s0 + 2 * kShadowCnt, MemToShadow((uptr)&data[i]));
1163cab2bb3Spatrick }
1173cab2bb3Spatrick 
118*810390e3Srobert // Detect is the Mapping has kBroken field.
119*810390e3Srobert template <uptr>
120*810390e3Srobert struct Has {
121*810390e3Srobert   typedef bool Result;
122*810390e3Srobert };
123*810390e3Srobert 
124*810390e3Srobert template <typename Mapping>
broken(...)125*810390e3Srobert bool broken(...) {
126*810390e3Srobert   return false;
127*810390e3Srobert }
128*810390e3Srobert 
129*810390e3Srobert template <typename Mapping>
broken(uptr what,typename Has<Mapping::kBroken>::Result=false)130*810390e3Srobert bool broken(uptr what, typename Has<Mapping::kBroken>::Result = false) {
131*810390e3Srobert   return Mapping::kBroken & what;
132*810390e3Srobert }
133*810390e3Srobert 
CompareRegion(const void * region_a,const void * region_b)134*810390e3Srobert static int CompareRegion(const void *region_a, const void *region_b) {
135*810390e3Srobert   uptr start_a = ((const struct Region *)region_a)->start;
136*810390e3Srobert   uptr start_b = ((const struct Region *)region_b)->start;
137*810390e3Srobert 
138*810390e3Srobert   if (start_a < start_b) {
139*810390e3Srobert     return -1;
140*810390e3Srobert   } else if (start_a > start_b) {
141*810390e3Srobert     return 1;
142*810390e3Srobert   } else {
143*810390e3Srobert     return 0;
144*810390e3Srobert   }
145*810390e3Srobert }
146*810390e3Srobert 
147*810390e3Srobert template <typename Mapping>
AddMetaRegion(struct Region * shadows,int * num_regions,uptr start,uptr end)148*810390e3Srobert static void AddMetaRegion(struct Region *shadows, int *num_regions, uptr start,
149*810390e3Srobert                           uptr end) {
150*810390e3Srobert   // If the app region is not empty, add its meta to the array.
151*810390e3Srobert   if (start != end) {
152*810390e3Srobert     shadows[*num_regions].start = (uptr)MemToMetaImpl::Apply<Mapping>(start);
153*810390e3Srobert     shadows[*num_regions].end = (uptr)MemToMetaImpl::Apply<Mapping>(end - 1);
154*810390e3Srobert     *num_regions = (*num_regions) + 1;
155*810390e3Srobert   }
156*810390e3Srobert }
157*810390e3Srobert 
158*810390e3Srobert struct MappingTest {
159*810390e3Srobert   template <typename Mapping>
Apply__tsan::MappingTest160*810390e3Srobert   static void Apply() {
161*810390e3Srobert     // Easy (but ugly) way to print the mapping name.
162*810390e3Srobert     Printf("%s\n", __PRETTY_FUNCTION__);
163*810390e3Srobert     TestRegion<Mapping>(Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd);
164*810390e3Srobert     TestRegion<Mapping>(Mapping::kMidAppMemBeg, Mapping::kMidAppMemEnd);
165*810390e3Srobert     TestRegion<Mapping>(Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd);
166*810390e3Srobert     TestRegion<Mapping>(Mapping::kHeapMemBeg, Mapping::kHeapMemEnd);
167*810390e3Srobert 
168*810390e3Srobert     TestDisjointMetas<Mapping>();
169*810390e3Srobert 
170*810390e3Srobert     // Not tested: the ordering of regions (low app vs. shadow vs. mid app
171*810390e3Srobert     // etc.). That is enforced at runtime by CheckAndProtect.
172*810390e3Srobert   }
173*810390e3Srobert 
174*810390e3Srobert   template <typename Mapping>
TestRegion__tsan::MappingTest175*810390e3Srobert   static void TestRegion(uptr beg, uptr end) {
176*810390e3Srobert     if (beg == end)
177*810390e3Srobert       return;
178*810390e3Srobert     Printf("checking region [0x%zx-0x%zx)\n", beg, end);
179*810390e3Srobert     uptr prev = 0;
180*810390e3Srobert     for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 256) {
181*810390e3Srobert       for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
182*810390e3Srobert         const uptr p = RoundDown(p0 + x, kShadowCell);
183*810390e3Srobert         if (p < beg || p >= end)
184*810390e3Srobert           continue;
185*810390e3Srobert         const uptr s = MemToShadowImpl::Apply<Mapping>(p);
186*810390e3Srobert         u32 *const m = MemToMetaImpl::Apply<Mapping>(p);
187*810390e3Srobert         const uptr r = ShadowToMemImpl::Apply<Mapping>(s);
188*810390e3Srobert         Printf("  addr=0x%zx: shadow=0x%zx meta=%p reverse=0x%zx\n", p, s, m,
189*810390e3Srobert                r);
190*810390e3Srobert         CHECK(IsAppMemImpl::Apply<Mapping>(p));
191*810390e3Srobert         if (!broken<Mapping>(kBrokenMapping))
192*810390e3Srobert           CHECK(IsShadowMemImpl::Apply<Mapping>(s));
193*810390e3Srobert         CHECK(IsMetaMemImpl::Apply<Mapping>(reinterpret_cast<uptr>(m)));
194*810390e3Srobert         CHECK_EQ(p, RestoreAddrImpl::Apply<Mapping>(CompressAddr(p)));
195*810390e3Srobert         if (!broken<Mapping>(kBrokenReverseMapping))
196*810390e3Srobert           CHECK_EQ(p, r);
197*810390e3Srobert         if (prev && !broken<Mapping>(kBrokenLinearity)) {
198*810390e3Srobert           // Ensure that shadow and meta mappings are linear within a single
199*810390e3Srobert           // user range. Lots of code that processes memory ranges assumes it.
200*810390e3Srobert           const uptr prev_s = MemToShadowImpl::Apply<Mapping>(prev);
201*810390e3Srobert           u32 *const prev_m = MemToMetaImpl::Apply<Mapping>(prev);
202*810390e3Srobert           CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
203*810390e3Srobert           CHECK_EQ(m - prev_m, (p - prev) / kMetaShadowCell);
204*810390e3Srobert         }
205*810390e3Srobert         prev = p;
206*810390e3Srobert       }
207*810390e3Srobert     }
208*810390e3Srobert   }
209*810390e3Srobert 
210*810390e3Srobert   template <typename Mapping>
TestDisjointMetas__tsan::MappingTest211*810390e3Srobert   static void TestDisjointMetas() {
212*810390e3Srobert     // Checks that the meta for each app region does not overlap with
213*810390e3Srobert     // the meta for other app regions. For example, the meta for a high
214*810390e3Srobert     // app pointer shouldn't be aliased to the meta of a mid app pointer.
215*810390e3Srobert     // Notice that this is important even though there does not exist a
216*810390e3Srobert     // MetaToMem function.
217*810390e3Srobert     // (If a MetaToMem function did exist, we could simply
218*810390e3Srobert     // check in the TestRegion function that it inverts MemToMeta.)
219*810390e3Srobert     //
220*810390e3Srobert     // We don't try to be clever by allowing the non-PIE (low app)
221*810390e3Srobert     // and PIE (mid and high app) meta regions to overlap.
222*810390e3Srobert     struct Region metas[4];
223*810390e3Srobert     int num_regions = 0;
224*810390e3Srobert     AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kLoAppMemBeg,
225*810390e3Srobert                            Mapping::kLoAppMemEnd);
226*810390e3Srobert     AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kMidAppMemBeg,
227*810390e3Srobert                            Mapping::kMidAppMemEnd);
228*810390e3Srobert     AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kHiAppMemBeg,
229*810390e3Srobert                            Mapping::kHiAppMemEnd);
230*810390e3Srobert     AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kHeapMemBeg,
231*810390e3Srobert                            Mapping::kHeapMemEnd);
232*810390e3Srobert 
233*810390e3Srobert     // It is not required that the low app shadow is below the mid app
234*810390e3Srobert     // shadow etc., hence we sort the shadows.
235*810390e3Srobert     qsort(metas, num_regions, sizeof(struct Region), CompareRegion);
236*810390e3Srobert 
237*810390e3Srobert     for (int i = 0; i < num_regions; i++)
238*810390e3Srobert       Printf("[0x%lu, 0x%lu]\n", metas[i].start, metas[i].end);
239*810390e3Srobert 
240*810390e3Srobert     if (!broken<Mapping>(kBrokenAliasedMetas))
241*810390e3Srobert       for (int i = 1; i < num_regions; i++)
242*810390e3Srobert         CHECK(metas[i - 1].end <= metas[i].start);
243*810390e3Srobert   }
244*810390e3Srobert };
245*810390e3Srobert 
TEST(Shadow,AllMappings)246*810390e3Srobert TEST(Shadow, AllMappings) { ForEachMapping<MappingTest>(); }
247*810390e3Srobert 
2483cab2bb3Spatrick }  // namespace __tsan
249