1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <utils/StrongPointer.h>
20 #include <utils/RefBase.h>
21 
22 #include <thread>
23 #include <atomic>
24 #include <sched.h>
25 #include <errno.h>
26 
27 // Enhanced version of StrongPointer_test, but using RefBase underneath.
28 
29 using namespace android;
30 
31 static constexpr int NITERS = 1000000;
32 
33 static constexpr int INITIAL_STRONG_VALUE = 1 << 28;  // Mirroring RefBase definition.
34 
35 class Foo : public RefBase {
36 public:
Foo(bool * deleted_check)37     Foo(bool* deleted_check) : mDeleted(deleted_check) {
38         *mDeleted = false;
39     }
40 
~Foo()41     ~Foo() {
42         *mDeleted = true;
43     }
44 private:
45     bool* mDeleted;
46 };
47 
TEST(RefBase,StrongMoves)48 TEST(RefBase, StrongMoves) {
49     bool isDeleted;
50     Foo* foo = new Foo(&isDeleted);
51     ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
52     ASSERT_FALSE(isDeleted) << "Already deleted...?";
53     sp<Foo> sp1(foo);
54     wp<Foo> wp1(sp1);
55     ASSERT_EQ(1, foo->getStrongCount());
56     // Weak count includes both strong and weak references.
57     ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
58     {
59         sp<Foo> sp2 = std::move(sp1);
60         ASSERT_EQ(1, foo->getStrongCount())
61                 << "std::move failed, incremented refcnt";
62         ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
63         // The strong count isn't increasing, let's double check the old object
64         // is properly reset and doesn't early delete
65         sp1 = std::move(sp2);
66     }
67     ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
68     {
69         // Now let's double check it deletes on time
70         sp<Foo> sp2 = std::move(sp1);
71     }
72     ASSERT_TRUE(isDeleted) << "foo was leaked!";
73     ASSERT_TRUE(wp1.promote().get() == nullptr);
74 }
75 
TEST(RefBase,WeakCopies)76 TEST(RefBase, WeakCopies) {
77     bool isDeleted;
78     Foo* foo = new Foo(&isDeleted);
79     EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
80     ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
81     wp<Foo> wp1(foo);
82     EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
83     {
84         wp<Foo> wp2 = wp1;
85         ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
86     }
87     EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
88     ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
89     wp1 = nullptr;
90     ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
91 }
92 
93 
94 // Set up a situation in which we race with visit2AndRremove() to delete
95 // 2 strong references.  Bar destructor checks that there are no early
96 // deletions and prior updates are visible to destructor.
97 class Bar : public RefBase {
98 public:
Bar(std::atomic<int> * delete_count)99     Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
100             mDeleteCount(delete_count) {
101     }
102 
~Bar()103     ~Bar() {
104         EXPECT_TRUE(mVisited1);
105         EXPECT_TRUE(mVisited2);
106         (*mDeleteCount)++;
107     }
108     bool mVisited1;
109     bool mVisited2;
110 private:
111     std::atomic<int>* mDeleteCount;
112 };
113 
114 static sp<Bar> buffer;
115 static std::atomic<bool> bufferFull(false);
116 
117 // Wait until bufferFull has value val.
waitFor(bool val)118 static inline void waitFor(bool val) {
119     while (bufferFull != val) {}
120 }
121 
122 cpu_set_t otherCpus;
123 
124 // Divide the cpus we're allowed to run on into myCpus and otherCpus.
125 // Set origCpus to the processors we were originally allowed to run on.
126 // Return false if origCpus doesn't include at least processors 0 and 1.
setExclusiveCpus(cpu_set_t * origCpus,cpu_set_t * myCpus,cpu_set_t * otherCpus)127 static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
128         cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
129     if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
130         return false;
131     }
132     if (!CPU_ISSET(0,  origCpus) || !CPU_ISSET(1, origCpus)) {
133         return false;
134     }
135     CPU_ZERO(myCpus);
136     CPU_ZERO(otherCpus);
137     CPU_OR(myCpus, myCpus, origCpus);
138     CPU_OR(otherCpus, otherCpus, origCpus);
139     for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
140         // I get the even cores, the other thread gets the odd ones.
141         if (i & 1) {
142             CPU_CLR(i, myCpus);
143         } else {
144             CPU_CLR(i, otherCpus);
145         }
146     }
147     return true;
148 }
149 
visit2AndRemove()150 static void visit2AndRemove() {
151     if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
152         FAIL() << "setaffinity returned:" << errno;
153     }
154     for (int i = 0; i < NITERS; ++i) {
155         waitFor(true);
156         buffer->mVisited2 = true;
157         buffer = nullptr;
158         bufferFull = false;
159     }
160 }
161 
TEST(RefBase,RacingDestructors)162 TEST(RefBase, RacingDestructors) {
163     cpu_set_t origCpus;
164     cpu_set_t myCpus;
165     // Restrict us and the helper thread to disjoint cpu sets.
166     // This prevents us from getting scheduled against each other,
167     // which would be atrociously slow.
168     if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
169         std::thread t(visit2AndRemove);
170         std::atomic<int> deleteCount(0);
171         if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
172             FAIL() << "setaffinity returned:" << errno;
173         }
174         for (int i = 0; i < NITERS; ++i) {
175             waitFor(false);
176             Bar* bar = new Bar(&deleteCount);
177             sp<Bar> sp3(bar);
178             buffer = sp3;
179             bufferFull = true;
180             ASSERT_TRUE(bar->getStrongCount() >= 1);
181             // Weak count includes strong count.
182             ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
183             sp3->mVisited1 = true;
184             sp3 = nullptr;
185         }
186         t.join();
187         if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
188             FAIL();
189         }
190         ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
191     }  // Otherwise this is slow and probably pointless on a uniprocessor.
192 }
193 
194 static wp<Bar> wpBuffer;
195 static std::atomic<bool> wpBufferFull(false);
196 
197 // Wait until wpBufferFull has value val.
wpWaitFor(bool val)198 static inline void wpWaitFor(bool val) {
199     while (wpBufferFull != val) {}
200 }
201 
visit3AndRemove()202 static void visit3AndRemove() {
203     if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
204         FAIL() << "setaffinity returned:" << errno;
205     }
206     for (int i = 0; i < NITERS; ++i) {
207         wpWaitFor(true);
208         {
209             sp<Bar> sp1 = wpBuffer.promote();
210             // We implicitly check that sp1 != NULL
211             sp1->mVisited2 = true;
212         }
213         wpBuffer = nullptr;
214         wpBufferFull = false;
215     }
216 }
217 
TEST(RefBase,RacingPromotions)218 TEST(RefBase, RacingPromotions) {
219     cpu_set_t origCpus;
220     cpu_set_t myCpus;
221     // Restrict us and the helper thread to disjoint cpu sets.
222     // This prevents us from getting scheduled against each other,
223     // which would be atrociously slow.
224     if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
225         std::thread t(visit3AndRemove);
226         std::atomic<int> deleteCount(0);
227         if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
228             FAIL() << "setaffinity returned:" << errno;
229         }
230         for (int i = 0; i < NITERS; ++i) {
231             Bar* bar = new Bar(&deleteCount);
232             wp<Bar> wp1(bar);
233             bar->mVisited1 = true;
234             if (i % (NITERS / 10) == 0) {
235                 // Do this rarely, since it generates a log message.
236                 wp1 = nullptr;  // No longer destroys the object.
237                 wp1 = bar;
238             }
239             wpBuffer = wp1;
240             ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
241             wpBufferFull = true;
242             // Promotion races with that in visit3AndRemove.
243             // This may or may not succeed, but it shouldn't interfere with
244             // the concurrent one.
245             sp<Bar> sp1 = wp1.promote();
246             wpWaitFor(false);  // Waits for other thread to drop strong pointer.
247             sp1 = nullptr;
248             // No strong pointers here.
249             sp1 = wp1.promote();
250             ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
251         }
252         t.join();
253         if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
254             FAIL();
255         }
256         ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
257     }  // Otherwise this is slow and probably pointless on a uniprocessor.
258 }
259