1 // Copyright (c) 2020 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 
5 
6 #include <txrequest.h>
7 #include <uint256.h>
8 
9 #include <test/util/setup_common.h>
10 
11 #include <algorithm>
12 #include <functional>
13 #include <vector>
14 
15 #include <boost/test/unit_test.hpp>
16 
17 BOOST_FIXTURE_TEST_SUITE(txrequest_tests, BasicTestingSetup)
18 
19 namespace {
20 
21 constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min();
22 constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max();
23 constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1};
24 constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0};
25 
26 /** An Action is a function to call at a particular (simulated) timestamp. */
27 using Action = std::pair<std::chrono::microseconds, std::function<void()>>;
28 
29 /** Object that stores actions from multiple interleaved scenarios, and data shared across them.
30  *
31  * The Scenario below is used to fill this.
32  */
33 struct Runner
34 {
35     /** The TxRequestTracker being tested. */
36     TxRequestTracker txrequest;
37 
38     /** List of actions to be executed (in order of increasing timestamp). */
39     std::vector<Action> actions;
40 
41     /** Which node ids have been assigned already (to prevent reuse). */
42     std::set<NodeId> peerset;
43 
44     /** Which txhashes have been assigned already (to prevent reuse). */
45     std::set<uint256> txhashset;
46 
47     /** Which (peer, gtxid) combinations are known to be expired. These need to be accumulated here instead of
48      *  checked directly in the GetRequestable return value to avoid introducing a dependency between the various
49      *  parallel tests. */
50     std::multiset<std::pair<NodeId, GenTxid>> expired;
51 };
52 
RandomTime8s()53 std::chrono::microseconds RandomTime8s() { return std::chrono::microseconds{1 + InsecureRandBits(23)}; }
RandomTime1y()54 std::chrono::microseconds RandomTime1y() { return std::chrono::microseconds{1 + InsecureRandBits(45)}; }
55 
56 /** A proxy for a Runner that helps build a sequence of consecutive test actions on a TxRequestTracker.
57  *
58  * Each Scenario is a proxy through which actions for the (sequential) execution of various tests are added to a
59  * Runner. The actions from multiple scenarios are then run concurrently, resulting in these tests being performed
60  * against a TxRequestTracker in parallel. Every test has its own unique txhashes and NodeIds which are not
61  * reused in other tests, and thus they should be independent from each other. Running them in parallel however
62  * means that we verify the behavior (w.r.t. one test's txhashes and NodeIds) even when the state of the data
63  * structure is more complicated due to the presence of other tests.
64  */
65 class Scenario
66 {
67     Runner& m_runner;
68     std::chrono::microseconds m_now;
69     std::string m_testname;
70 
71 public:
Scenario(Runner & runner,std::chrono::microseconds starttime)72     Scenario(Runner& runner, std::chrono::microseconds starttime) : m_runner(runner), m_now(starttime) {}
73 
74     /** Set a name for the current test, to give more clear error messages. */
SetTestName(std::string testname)75     void SetTestName(std::string testname)
76     {
77         m_testname = std::move(testname);
78     }
79 
80     /** Advance this Scenario's time; this affects the timestamps newly scheduled events get. */
AdvanceTime(std::chrono::microseconds amount)81     void AdvanceTime(std::chrono::microseconds amount)
82     {
83         assert(amount.count() >= 0);
84         m_now += amount;
85     }
86 
87     /** Schedule a ForgetTxHash call at the Scheduler's current time. */
ForgetTxHash(const uint256 & txhash)88     void ForgetTxHash(const uint256& txhash)
89     {
90         auto& runner = m_runner;
91         runner.actions.emplace_back(m_now, [=,&runner]() {
92             runner.txrequest.ForgetTxHash(txhash);
93             runner.txrequest.SanityCheck();
94         });
95     }
96 
97     /** Schedule a ReceivedInv call at the Scheduler's current time. */
ReceivedInv(NodeId peer,const GenTxid & gtxid,bool pref,std::chrono::microseconds reqtime)98     void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime)
99     {
100         auto& runner = m_runner;
101         runner.actions.emplace_back(m_now, [=,&runner]() {
102             runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime);
103             runner.txrequest.SanityCheck();
104         });
105     }
106 
107     /** Schedule a DisconnectedPeer call at the Scheduler's current time. */
DisconnectedPeer(NodeId peer)108     void DisconnectedPeer(NodeId peer)
109     {
110         auto& runner = m_runner;
111         runner.actions.emplace_back(m_now, [=,&runner]() {
112             runner.txrequest.DisconnectedPeer(peer);
113             runner.txrequest.SanityCheck();
114         });
115     }
116 
117     /** Schedule a RequestedTx call at the Scheduler's current time. */
RequestedTx(NodeId peer,const uint256 & txhash,std::chrono::microseconds exptime)118     void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime)
119     {
120         auto& runner = m_runner;
121         runner.actions.emplace_back(m_now, [=,&runner]() {
122             runner.txrequest.RequestedTx(peer, txhash, exptime);
123             runner.txrequest.SanityCheck();
124         });
125     }
126 
127     /** Schedule a ReceivedResponse call at the Scheduler's current time. */
ReceivedResponse(NodeId peer,const uint256 & txhash)128     void ReceivedResponse(NodeId peer, const uint256& txhash)
129     {
130         auto& runner = m_runner;
131         runner.actions.emplace_back(m_now, [=,&runner]() {
132             runner.txrequest.ReceivedResponse(peer, txhash);
133             runner.txrequest.SanityCheck();
134         });
135     }
136 
137     /** Schedule calls to verify the TxRequestTracker's state at the Scheduler's current time.
138      *
139      * @param peer       The peer whose state will be inspected.
140      * @param expected   The expected return value for GetRequestable(peer)
141      * @param candidates The expected return value CountCandidates(peer)
142      * @param inflight   The expected return value CountInFlight(peer)
143      * @param completed  The expected return value of Count(peer), minus candidates and inflight.
144      * @param checkname  An arbitrary string to include in error messages, for test identificatrion.
145      * @param offset     Offset with the current time to use (must be <= 0). This allows simulations of time going
146      *                   backwards (but note that the ordering of this event only follows the scenario's m_now.
147      */
Check(NodeId peer,const std::vector<GenTxid> & expected,size_t candidates,size_t inflight,size_t completed,const std::string & checkname,std::chrono::microseconds offset=std::chrono::microseconds{0})148     void Check(NodeId peer, const std::vector<GenTxid>& expected, size_t candidates, size_t inflight,
149         size_t completed, const std::string& checkname,
150         std::chrono::microseconds offset = std::chrono::microseconds{0})
151     {
152         const auto comment = m_testname + " " + checkname;
153         auto& runner = m_runner;
154         const auto now = m_now;
155         assert(offset.count() <= 0);
__anond11f5b410702() 156         runner.actions.emplace_back(m_now, [=,&runner]() {
157             std::vector<std::pair<NodeId, GenTxid>> expired_now;
158             auto ret = runner.txrequest.GetRequestable(peer, now + offset, &expired_now);
159             for (const auto& entry : expired_now) runner.expired.insert(entry);
160             runner.txrequest.SanityCheck();
161             runner.txrequest.PostGetRequestableSanityCheck(now + offset);
162             size_t total = candidates + inflight + completed;
163             size_t real_total = runner.txrequest.Count(peer);
164             size_t real_candidates = runner.txrequest.CountCandidates(peer);
165             size_t real_inflight = runner.txrequest.CountInFlight(peer);
166             BOOST_CHECK_MESSAGE(real_total == total, strprintf("[" + comment + "] total %i (%i expected)", real_total, total));
167             BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[" + comment + "] inflight %i (%i expected)", real_inflight, inflight));
168             BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[" + comment + "] candidates %i (%i expected)", real_candidates, candidates));
169             BOOST_CHECK_MESSAGE(ret == expected, "[" + comment + "] mismatching requestables");
170         });
171     }
172 
173     /** Verify that an announcement for gtxid by peer has expired some time before this check is scheduled.
174      *
175      * Every expected expiration should be accounted for through exactly one call to this function.
176      */
CheckExpired(NodeId peer,GenTxid gtxid)177     void CheckExpired(NodeId peer, GenTxid gtxid)
178     {
179         const auto& testname = m_testname;
180         auto& runner = m_runner;
181         runner.actions.emplace_back(m_now, [=,&runner]() {
182             auto it = runner.expired.find(std::pair<NodeId, GenTxid>{peer, gtxid});
183             BOOST_CHECK_MESSAGE(it != runner.expired.end(), "[" + testname + "] missing expiration");
184             if (it != runner.expired.end()) runner.expired.erase(it);
185         });
186     }
187 
188     /** Generate a random txhash, whose priorities for certain peers are constrained.
189      *
190      * For example, NewTxHash({{p1,p2,p3},{p2,p4,p5}}) will generate a txhash T such that both:
191      *  - priority(p1,T) > priority(p2,T) > priority(p3,T)
192      *  - priority(p2,T) > priority(p4,T) > priority(p5,T)
193      * where priority is the predicted internal TxRequestTracker's priority, assuming all announcements
194      * are within the same preferredness class.
195      */
NewTxHash(const std::vector<std::vector<NodeId>> & orders={})196     uint256 NewTxHash(const std::vector<std::vector<NodeId>>& orders = {})
197     {
198         uint256 ret;
199         bool ok;
200         do {
201             ret = InsecureRand256();
202             ok = true;
203             for (const auto& order : orders) {
204                 for (size_t pos = 1; pos < order.size(); ++pos) {
205                     uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true);
206                     uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true);
207                     if (prio_prev <= prio_cur) {
208                         ok = false;
209                         break;
210                     }
211                 }
212                 if (!ok) break;
213             }
214             if (ok) {
215                 ok = m_runner.txhashset.insert(ret).second;
216             }
217         } while(!ok);
218         return ret;
219     }
220 
221     /** Generate a random GenTxid; the txhash follows NewTxHash; the is_wtxid flag is random. */
NewGTxid(const std::vector<std::vector<NodeId>> & orders={})222     GenTxid NewGTxid(const std::vector<std::vector<NodeId>>& orders = {})
223     {
224         return {InsecureRandBool(), NewTxHash(orders)};
225     }
226 
227     /** Generate a new random NodeId to use as peer. The same NodeId is never returned twice
228      *  (across all Scenarios combined). */
NewPeer()229     NodeId NewPeer()
230     {
231         bool ok;
232         NodeId ret;
233         do {
234             ret = InsecureRandBits(63);
235             ok = m_runner.peerset.insert(ret).second;
236         } while(!ok);
237         return ret;
238     }
239 
Now() const240     std::chrono::microseconds Now() const { return m_now; }
241 };
242 
243 /** Add to scenario a test with a single tx announced by a single peer.
244  *
245  * config is an integer in [0, 32), which controls which variant of the test is used.
246  */
BuildSingleTest(Scenario & scenario,int config)247 void BuildSingleTest(Scenario& scenario, int config)
248 {
249     auto peer = scenario.NewPeer();
250     auto gtxid = scenario.NewGTxid();
251     bool immediate = config & 1;
252     bool preferred = config & 2;
253     auto delay = immediate ? NO_TIME : RandomTime8s();
254 
255     scenario.SetTestName(strprintf("Single(config=%i)", config));
256 
257     // Receive an announcement, either immediately requestable or delayed.
258     scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay);
259     if (immediate) {
260         scenario.Check(peer, {gtxid}, 1, 0, 0, "s1");
261     } else {
262         scenario.Check(peer, {}, 1, 0, 0, "s2");
263         scenario.AdvanceTime(delay - MICROSECOND);
264         scenario.Check(peer, {}, 1, 0, 0, "s3");
265         scenario.AdvanceTime(MICROSECOND);
266         scenario.Check(peer, {gtxid}, 1, 0, 0, "s4");
267     }
268 
269     if (config >> 3) { // We'll request the transaction
270         scenario.AdvanceTime(RandomTime8s());
271         auto expiry = RandomTime8s();
272         scenario.Check(peer, {gtxid}, 1, 0, 0, "s5");
273         scenario.RequestedTx(peer, gtxid.GetHash(), scenario.Now() + expiry);
274         scenario.Check(peer, {}, 0, 1, 0, "s6");
275 
276         if ((config >> 3) == 1) { // The request will time out
277             scenario.AdvanceTime(expiry - MICROSECOND);
278             scenario.Check(peer, {}, 0, 1, 0, "s7");
279             scenario.AdvanceTime(MICROSECOND);
280             scenario.Check(peer, {}, 0, 0, 0, "s8");
281             scenario.CheckExpired(peer, gtxid);
282             return;
283         } else {
284             scenario.AdvanceTime(std::chrono::microseconds{InsecureRandRange(expiry.count())});
285             scenario.Check(peer, {}, 0, 1, 0, "s9");
286             if ((config >> 3) == 3) { // A response will arrive for the transaction
287                 scenario.ReceivedResponse(peer, gtxid.GetHash());
288                 scenario.Check(peer, {}, 0, 0, 0, "s10");
289                 return;
290             }
291         }
292     }
293 
294     if (config & 4) { // The peer will go offline
295         scenario.DisconnectedPeer(peer);
296     } else { // The transaction is no longer needed
297         scenario.ForgetTxHash(gtxid.GetHash());
298     }
299     scenario.Check(peer, {}, 0, 0, 0, "s11");
300 }
301 
302 /** Add to scenario a test with a single tx announced by two peers, to verify the
303  *  right peer is selected for requests.
304  *
305  * config is an integer in [0, 32), which controls which variant of the test is used.
306  */
BuildPriorityTest(Scenario & scenario,int config)307 void BuildPriorityTest(Scenario& scenario, int config)
308 {
309     scenario.SetTestName(strprintf("Priority(config=%i)", config));
310 
311     // Two peers. They will announce in order {peer1, peer2}.
312     auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer();
313     // Construct a transaction that under random rules would be preferred by peer2 or peer1,
314     // depending on configuration.
315     bool prio1 = config & 1;
316     auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}});
317     bool pref1 = config & 2, pref2 = config & 4;
318 
319     scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME);
320     scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1");
321     if (InsecureRandBool()) {
322         scenario.AdvanceTime(RandomTime8s());
323         scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2");
324     }
325 
326     scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME);
327     bool stage2_prio =
328         // At this point, peer2 will be given priority if:
329         // - It is preferred and peer1 is not
330         (pref2 && !pref1) ||
331         // - They're in the same preference class,
332         //   and the randomized priority favors peer2 over peer1.
333         (pref1 == pref2 && !prio1);
334     NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2;
335     scenario.Check(otherpeer, {}, 1, 0, 0, "p3");
336     scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4");
337     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
338     scenario.Check(otherpeer, {}, 1, 0, 0, "p5");
339     scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6");
340 
341     // We possibly request from the selected peer.
342     if (config & 8) {
343         scenario.RequestedTx(priopeer, gtxid.GetHash(), MAX_TIME);
344         scenario.Check(priopeer, {}, 0, 1, 0, "p7");
345         scenario.Check(otherpeer, {}, 1, 0, 0, "p8");
346         if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
347     }
348 
349     // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them.
350     if (config & 16) {
351         scenario.DisconnectedPeer(priopeer);
352     } else {
353         scenario.ReceivedResponse(priopeer, gtxid.GetHash());
354     }
355     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
356     scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8");
357     scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9");
358     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
359 
360     // Now the other peer goes offline.
361     scenario.DisconnectedPeer(otherpeer);
362     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
363     scenario.Check(peer1, {}, 0, 0, 0, "p10");
364     scenario.Check(peer2, {}, 0, 0, 0, "p11");
365 }
366 
367 /** Add to scenario a randomized test in which N peers announce the same transaction, to verify
368  *  the order in which they are requested. */
BuildBigPriorityTest(Scenario & scenario,int peers)369 void BuildBigPriorityTest(Scenario& scenario, int peers)
370 {
371     scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers));
372 
373     // We will have N peers announce the same transaction.
374     std::map<NodeId, bool> preferred;
375     std::vector<NodeId> pref_peers, npref_peers;
376     int num_pref = InsecureRandRange(peers + 1) ; // Some preferred, ...
377     int num_npref = peers - num_pref; // some not preferred.
378     for (int i = 0; i < num_pref; ++i) {
379         pref_peers.push_back(scenario.NewPeer());
380         preferred[pref_peers.back()] = true;
381     }
382     for (int i = 0; i < num_npref; ++i) {
383         npref_peers.push_back(scenario.NewPeer());
384         preferred[npref_peers.back()] = false;
385     }
386     // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers).
387     std::vector<NodeId> request_order;
388     for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]);
389     for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]);
390 
391     // Determine the announcement order randomly.
392     std::vector<NodeId> announce_order = request_order;
393     Shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx);
394 
395     // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and
396     // within npref_peers.
397     auto gtxid = scenario.NewGTxid({pref_peers, npref_peers});
398 
399     // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the
400     // to-be-requested-from-peer will change every time a subsequent reqtime is passed.
401     std::map<NodeId, std::chrono::microseconds> reqtimes;
402     auto reqtime = scenario.Now();
403     for (int i = peers - 1; i >= 0; --i) {
404         reqtime += RandomTime8s();
405         reqtimes[request_order[i]] = reqtime;
406     }
407 
408     // Actually announce from all peers simultaneously (but in announce_order).
409     for (const auto peer : announce_order) {
410         scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]);
411     }
412     for (const auto peer : announce_order) {
413         scenario.Check(peer, {}, 1, 0, 0, "b1");
414     }
415 
416     // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from
417     // high priority to low priority within each class.
418     for (int i = peers - 1; i >= 0; --i) {
419         scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND);
420         scenario.Check(request_order[i], {}, 1, 0, 0, "b2");
421         scenario.AdvanceTime(MICROSECOND);
422         scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3");
423     }
424 
425     // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from
426     // peer should be the best remaining one, so verify this after every response.
427     for (int i = 0; i < peers; ++i) {
428         if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
429         const int pos = InsecureRandRange(request_order.size());
430         const auto peer = request_order[pos];
431         request_order.erase(request_order.begin() + pos);
432         if (InsecureRandBool()) {
433             scenario.DisconnectedPeer(peer);
434             scenario.Check(peer, {}, 0, 0, 0, "b4");
435         } else {
436             scenario.ReceivedResponse(peer, gtxid.GetHash());
437             scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5");
438         }
439         if (request_order.size()) {
440             scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6");
441         }
442     }
443 
444     // Everything is gone in the end.
445     for (const auto peer : announce_order) {
446         scenario.Check(peer, {}, 0, 0, 0, "b7");
447     }
448 }
449 
450 /** Add to scenario a test with one peer announcing two transactions, to verify they are
451  *  fetched in announcement order.
452  *
453  *  config is an integer in [0, 4) inclusive, and selects the variant of the test.
454  */
BuildRequestOrderTest(Scenario & scenario,int config)455 void BuildRequestOrderTest(Scenario& scenario, int config)
456 {
457     scenario.SetTestName(strprintf("RequestOrder(config=%i)", config));
458 
459     auto peer = scenario.NewPeer();
460     auto gtxid1 = scenario.NewGTxid();
461     auto gtxid2 = scenario.NewGTxid();
462 
463     auto reqtime2 = scenario.Now() + RandomTime8s();
464     auto reqtime1 = reqtime2 + RandomTime8s();
465 
466     scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1);
467     // Simulate time going backwards by giving the second announcement an earlier reqtime.
468     scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2);
469 
470     scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now());
471     scenario.Check(peer, {}, 2, 0, 0, "o1");
472     scenario.AdvanceTime(MICROSECOND);
473     scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2");
474     scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now());
475     scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3");
476     scenario.AdvanceTime(MICROSECOND);
477     // Even with time going backwards in between announcements, the return value of GetRequestable is in
478     // announcement order.
479     scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4");
480 
481     scenario.DisconnectedPeer(peer);
482     scenario.Check(peer, {}, 0, 0, 0, "o5");
483 }
484 
485 /** Add to scenario a test that verifies behavior related to both txid and wtxid with the same
486  *  hash being announced.
487  *
488  *  config is an integer in [0, 4) inclusive, and selects the variant of the test used.
489 */
BuildWtxidTest(Scenario & scenario,int config)490 void BuildWtxidTest(Scenario& scenario, int config)
491 {
492     scenario.SetTestName(strprintf("Wtxid(config=%i)", config));
493 
494     auto peerT = scenario.NewPeer();
495     auto peerW = scenario.NewPeer();
496     auto txhash = scenario.NewTxHash();
497     GenTxid txid{false, txhash};
498     GenTxid wtxid{true, txhash};
499 
500     auto reqtimeT = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s();
501     auto reqtimeW = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s();
502 
503     // Announce txid first or wtxid first.
504     if (config & 1) {
505         scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
506         if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
507         scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
508     } else {
509         scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
510         if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
511         scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
512     }
513 
514     // Let time pass if needed, and check that the preferred announcement (txid or wtxid)
515     // is correctly to-be-requested (and with the correct wtxidness).
516     auto max_reqtime = std::max(reqtimeT, reqtimeW);
517     if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now());
518     if (config & 2) {
519         scenario.Check(peerT, {txid}, 1, 0, 0, "w1");
520         scenario.Check(peerW, {}, 1, 0, 0, "w2");
521     } else {
522         scenario.Check(peerT, {}, 1, 0, 0, "w3");
523         scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4");
524     }
525 
526     // Let the preferred announcement be requested. It's not going to be delivered.
527     auto expiry = RandomTime8s();
528     if (config & 2) {
529         scenario.RequestedTx(peerT, txid.GetHash(), scenario.Now() + expiry);
530         scenario.Check(peerT, {}, 0, 1, 0, "w5");
531         scenario.Check(peerW, {}, 1, 0, 0, "w6");
532     } else {
533         scenario.RequestedTx(peerW, wtxid.GetHash(), scenario.Now() + expiry);
534         scenario.Check(peerT, {}, 1, 0, 0, "w7");
535         scenario.Check(peerW, {}, 0, 1, 0, "w8");
536     }
537 
538     // After reaching expiration time of the preferred announcement, verify that the
539     // remaining one is requestable
540     scenario.AdvanceTime(expiry);
541     if (config & 2) {
542         scenario.Check(peerT, {}, 0, 0, 1, "w9");
543         scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10");
544         scenario.CheckExpired(peerT, txid);
545     } else {
546         scenario.Check(peerT, {txid}, 1, 0, 0, "w11");
547         scenario.Check(peerW, {}, 0, 0, 1, "w12");
548         scenario.CheckExpired(peerW, wtxid);
549     }
550 
551     // If a good transaction with either that hash as wtxid or txid arrives, both
552     // announcements are gone.
553     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
554     scenario.ForgetTxHash(txhash);
555     scenario.Check(peerT, {}, 0, 0, 0, "w13");
556     scenario.Check(peerW, {}, 0, 0, 0, "w14");
557 }
558 
559 /** Add to scenario a test that exercises clocks that go backwards. */
BuildTimeBackwardsTest(Scenario & scenario)560 void BuildTimeBackwardsTest(Scenario& scenario)
561 {
562     auto peer1 = scenario.NewPeer();
563     auto peer2 = scenario.NewPeer();
564     auto gtxid = scenario.NewGTxid({{peer1, peer2}});
565 
566     // Announce from peer2.
567     auto reqtime = scenario.Now() + RandomTime8s();
568     scenario.ReceivedInv(peer2, gtxid, true, reqtime);
569     scenario.Check(peer2, {}, 1, 0, 0, "r1");
570     scenario.AdvanceTime(reqtime - scenario.Now());
571     scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2");
572     // Check that if the clock goes backwards by 1us, the transaction would stop being requested.
573     scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND);
574     // But it reverts to being requested if time goes forward again.
575     scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4");
576 
577     // Announce from peer1.
578     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
579     scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME);
580     scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5");
581     scenario.Check(peer1, {}, 1, 0, 0, "r6");
582 
583     // Request from peer1.
584     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
585     auto expiry = scenario.Now() + RandomTime8s();
586     scenario.RequestedTx(peer1, gtxid.GetHash(), expiry);
587     scenario.Check(peer1, {}, 0, 1, 0, "r7");
588     scenario.Check(peer2, {}, 1, 0, 0, "r8");
589 
590     // Expiration passes.
591     scenario.AdvanceTime(expiry - scenario.Now());
592     scenario.Check(peer1, {}, 0, 0, 1, "r9");
593     scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2.
594     scenario.CheckExpired(peer1, gtxid);
595     scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire.
596     scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND);
597 
598     // Peer2 goes offline, meaning no viable announcements remain.
599     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
600     scenario.DisconnectedPeer(peer2);
601     scenario.Check(peer1, {}, 0, 0, 0, "r13");
602     scenario.Check(peer2, {}, 0, 0, 0, "r14");
603 }
604 
605 /** Add to scenario a test that involves RequestedTx() calls for txhashes not returned by GetRequestable. */
BuildWeirdRequestsTest(Scenario & scenario)606 void BuildWeirdRequestsTest(Scenario& scenario)
607 {
608     auto peer1 = scenario.NewPeer();
609     auto peer2 = scenario.NewPeer();
610     auto gtxid1 = scenario.NewGTxid({{peer1, peer2}});
611     auto gtxid2 = scenario.NewGTxid({{peer2, peer1}});
612 
613     // Announce gtxid1 by peer1.
614     scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME);
615     scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1");
616 
617     // Announce gtxid2 by peer2.
618     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
619     scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME);
620     scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2");
621     scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3");
622 
623     // We request gtxid2 from *peer1* - no effect.
624     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
625     scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
626     scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4");
627     scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5");
628 
629     // Now request gtxid1 from peer1 - marks it as REQUESTED.
630     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
631     auto expiryA = scenario.Now() + RandomTime8s();
632     scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryA);
633     scenario.Check(peer1, {}, 0, 1, 0, "q6");
634     scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7");
635 
636     // Request it a second time - nothing happens, as it's already REQUESTED.
637     auto expiryB = expiryA + RandomTime8s();
638     scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryB);
639     scenario.Check(peer1, {}, 0, 1, 0, "q8");
640     scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9");
641 
642     // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires.
643     scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME);
644     scenario.Check(peer1, {}, 0, 1, 0, "q10");
645     scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11");
646 
647     // When reaching expiryA, it expires (not expiryB, which is later).
648     scenario.AdvanceTime(expiryA - scenario.Now());
649     scenario.Check(peer1, {}, 0, 0, 1, "q12");
650     scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13");
651     scenario.CheckExpired(peer1, gtxid1);
652 
653     // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED.
654     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
655     scenario.RequestedTx(peer1, gtxid1.GetHash(), MAX_TIME);
656     scenario.Check(peer1, {}, 0, 0, 1, "q14");
657     scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15");
658 
659     // Now announce gtxid2 from peer1.
660     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
661     scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME);
662     scenario.Check(peer1, {}, 1, 0, 1, "q16");
663     scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17");
664 
665     // And request it from peer1 (weird as peer2 has the preference).
666     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
667     scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
668     scenario.Check(peer1, {}, 0, 1, 1, "q18");
669     scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19");
670 
671     // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED.
672     if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
673     scenario.RequestedTx(peer2, gtxid2.GetHash(), MAX_TIME);
674     scenario.Check(peer1, {}, 0, 0, 2, "q20");
675     scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21");
676 
677     // If peer2 goes offline, no viable announcements remain.
678     scenario.DisconnectedPeer(peer2);
679     scenario.Check(peer1, {}, 0, 0, 0, "q22");
680     scenario.Check(peer2, {}, 0, 0, 0, "q23");
681 }
682 
TestInterleavedScenarios()683 void TestInterleavedScenarios()
684 {
685     // Create a list of functions which add tests to scenarios.
686     std::vector<std::function<void(Scenario&)>> builders;
687     // Add instances of every test, for every configuration.
688     for (int n = 0; n < 64; ++n) {
689         builders.emplace_back([n](Scenario& scenario){ BuildWtxidTest(scenario, n); });
690         builders.emplace_back([n](Scenario& scenario){ BuildRequestOrderTest(scenario, n & 3); });
691         builders.emplace_back([n](Scenario& scenario){ BuildSingleTest(scenario, n & 31); });
692         builders.emplace_back([n](Scenario& scenario){ BuildPriorityTest(scenario, n & 31); });
693         builders.emplace_back([n](Scenario& scenario){ BuildBigPriorityTest(scenario, (n & 7) + 1); });
694         builders.emplace_back([](Scenario& scenario){ BuildTimeBackwardsTest(scenario); });
695         builders.emplace_back([](Scenario& scenario){ BuildWeirdRequestsTest(scenario); });
696     }
697     // Randomly shuffle all those functions.
698     Shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx);
699 
700     Runner runner;
701     auto starttime = RandomTime1y();
702     // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each.
703     while (builders.size()) {
704         // Introduce some variation in the start time of each scenario, so they don't all start off
705         // concurrently, but get a more random interleaving.
706         auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s();
707         Scenario scenario(runner, scenario_start);
708         for (int j = 0; builders.size() && j < 10; ++j) {
709             builders.back()(scenario);
710             builders.pop_back();
711         }
712     }
713     // Sort all the actions from all those scenarios chronologically, resulting in the actions from
714     // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario
715     // aren't reordered w.r.t. each other.
716     std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) {
717         return a1.first < a2.first;
718     });
719 
720     // Run all actions from all scenarios, in order.
721     for (auto& action : runner.actions) {
722         action.second();
723     }
724 
725     BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U);
726     BOOST_CHECK(runner.expired.empty());
727 }
728 
729 }  // namespace
730 
BOOST_AUTO_TEST_CASE(TxRequestTest)731 BOOST_AUTO_TEST_CASE(TxRequestTest)
732 {
733     for (int i = 0; i < 5; ++i) {
734         TestInterleavedScenarios();
735     }
736 }
737 
738 BOOST_AUTO_TEST_SUITE_END()
739