1 2 /** 3 * Copyright (C) 2018-present MongoDB, Inc. 4 * 5 * This program is free software: you can redistribute it and/or modify 6 * it under the terms of the Server Side Public License, version 1, 7 * as published by MongoDB, Inc. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * Server Side Public License for more details. 13 * 14 * You should have received a copy of the Server Side Public License 15 * along with this program. If not, see 16 * <http://www.mongodb.com/licensing/server-side-public-license>. 17 * 18 * As a special exception, the copyright holders give permission to link the 19 * code of portions of this program with the OpenSSL library under certain 20 * conditions as described in each individual source file and distribute 21 * linked combinations including the program with the OpenSSL library. You 22 * must comply with the Server Side Public License in all respects for 23 * all of the code used other than as permitted herein. If you modify file(s) 24 * with this exception, you may extend this exception to your version of the 25 * file(s), but you are not obligated to do so. If you do not wish to do so, 26 * delete this exception statement from your version. If you delete this 27 * exception statement from all source files in the program, then also delete 28 * it in the license file. 29 */ 30 31 #include "mongo/platform/basic.h" 32 33 #include <cstdint> 34 35 #include "mongo/bson/simple_bsonobj_comparator.h" 36 #include "mongo/bson/timestamp.h" 37 #include "mongo/db/catalog/collection.h" 38 #include "mongo/db/catalog/index_catalog.h" 39 #include "mongo/db/catalog/index_create.h" 40 #include "mongo/db/client.h" 41 #include "mongo/db/concurrency/write_conflict_exception.h" 42 #include "mongo/db/db.h" 43 #include "mongo/db/db_raii.h" 44 #include "mongo/db/dbdirectclient.h" 45 #include "mongo/db/dbhelpers.h" 46 #include "mongo/db/index/index_descriptor.h" 47 #include "mongo/db/logical_clock.h" 48 #include "mongo/db/op_observer_impl.h" 49 #include "mongo/db/repl/apply_ops.h" 50 #include "mongo/db/repl/oplog.h" 51 #include "mongo/db/repl/optime.h" 52 #include "mongo/db/repl/repl_client_info.h" 53 #include "mongo/db/repl/replication_coordinator_global.h" 54 #include "mongo/db/repl/replication_coordinator_mock.h" 55 #include "mongo/db/service_context.h" 56 #include "mongo/db/storage/kv/kv_storage_engine.h" 57 #include "mongo/unittest/unittest.h" 58 #include "mongo/util/stacktrace.h" 59 60 namespace mongo { 61 62 class StorageTimestampTest { 63 public: 64 ServiceContext::UniqueOperationContext _opCtxRaii = cc().makeOperationContext(); 65 OperationContext* _opCtx = _opCtxRaii.get(); 66 LogicalClock* _clock = LogicalClock::get(_opCtx); 67 StorageTimestampTest()68 StorageTimestampTest() { 69 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 70 mongo::serverGlobalParams.enableMajorityReadConcern)) { 71 return; 72 } 73 74 repl::ReplSettings replSettings; 75 replSettings.setOplogSizeBytes(10 * 1024 * 1024); 76 replSettings.setReplSetString("rs0"); 77 auto coordinatorMock = 78 new repl::ReplicationCoordinatorMock(_opCtx->getServiceContext(), replSettings); 79 coordinatorMock->alwaysAllowWrites(true); 80 setGlobalReplicationCoordinator(coordinatorMock); 81 82 // Since the Client object persists across tests, even though the global 83 // ReplicationCoordinator does not, we need to clear the last op associated with the client 84 // to avoid the invariant in ReplClientInfo::setLastOp that the optime only goes forward. 85 repl::ReplClientInfo::forClient(_opCtx->getClient()).clearLastOp_forTest(); 86 87 getGlobalServiceContext()->setOpObserver(stdx::make_unique<OpObserverImpl>()); 88 89 repl::setOplogCollectionName(); 90 repl::createOplog(_opCtx); 91 92 ASSERT_OK(_clock->advanceClusterTime(LogicalTime(Timestamp(1, 0)))); 93 } 94 ~StorageTimestampTest()95 ~StorageTimestampTest() { 96 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 97 mongo::serverGlobalParams.enableMajorityReadConcern)) { 98 return; 99 } 100 101 try { 102 reset(NamespaceString("local.oplog.rs")); 103 } catch (...) { 104 FAIL("Exception while cleaning up test"); 105 } 106 } 107 108 /** 109 * Walking on ice: resetting the ReplicationCoordinator destroys the underlying 110 * `DropPendingCollectionReaper`. Use a truncate/dropAllIndexes to clean out a collection 111 * without actually dropping it. 112 */ reset(NamespaceString nss) const113 void reset(NamespaceString nss) const { 114 ::mongo::writeConflictRetry(_opCtx, "deleteAll", nss.ns(), [&] { 115 invariant(_opCtx->recoveryUnit()->selectSnapshot(Timestamp::min()).isOK()); 116 AutoGetCollection collRaii(_opCtx, nss, LockMode::MODE_X); 117 118 if (collRaii.getCollection()) { 119 WriteUnitOfWork wunit(_opCtx); 120 invariant(collRaii.getCollection()->truncate(_opCtx).isOK()); 121 collRaii.getCollection()->getIndexCatalog()->dropAllIndexes(_opCtx, false); 122 wunit.commit(); 123 return; 124 } 125 126 AutoGetOrCreateDb dbRaii(_opCtx, nss.db(), LockMode::MODE_X); 127 WriteUnitOfWork wunit(_opCtx); 128 invariant(dbRaii.getDb()->createCollection(_opCtx, nss.ns())); 129 wunit.commit(); 130 }); 131 } 132 insertDocument(Collection * coll,const InsertStatement & stmt)133 void insertDocument(Collection* coll, const InsertStatement& stmt) { 134 // Insert some documents. 135 OpDebug* const nullOpDebug = nullptr; 136 const bool enforceQuota = false; 137 const bool fromMigrate = false; 138 ASSERT_OK(coll->insertDocument(_opCtx, stmt, nullOpDebug, enforceQuota, fromMigrate)); 139 } 140 itCount(Collection * coll)141 std::int32_t itCount(Collection* coll) { 142 std::uint64_t ret = 0; 143 auto cursor = coll->getRecordStore()->getCursor(_opCtx); 144 while (cursor->next() != boost::none) { 145 ++ret; 146 } 147 148 return ret; 149 } 150 findOne(Collection * coll)151 BSONObj findOne(Collection* coll) { 152 auto optRecord = coll->getRecordStore()->getCursor(_opCtx)->next(); 153 if (optRecord == boost::none) { 154 // Print a stack trace to help disambiguate which `findOne` failed. 155 printStackTrace(); 156 FAIL("Did not find any documents."); 157 } 158 return optRecord.get().data.toBson(); 159 } 160 doAtomicApplyOps(const std::string & dbName,const std::list<BSONObj> & applyOpsList)161 StatusWith<BSONObj> doAtomicApplyOps(const std::string& dbName, 162 const std::list<BSONObj>& applyOpsList) { 163 BSONObjBuilder result; 164 Status status = applyOps(_opCtx, 165 dbName, 166 BSON("applyOps" << applyOpsList), 167 repl::OplogApplication::Mode::kApplyOpsCmd, 168 &result); 169 if (!status.isOK()) { 170 return status; 171 } 172 173 return {result.obj()}; 174 } 175 176 // Creates a dummy command operation to persuade `applyOps` to be non-atomic. doNonAtomicApplyOps(const std::string & dbName,const std::list<BSONObj> & applyOpsList,Timestamp dummyTs)177 StatusWith<BSONObj> doNonAtomicApplyOps(const std::string& dbName, 178 const std::list<BSONObj>& applyOpsList, 179 Timestamp dummyTs) { 180 BSONArrayBuilder builder; 181 builder.append(applyOpsList); 182 builder << BSON("ts" << dummyTs << "t" << 1LL << "h" << 1 << "op" 183 << "c" 184 << "ns" 185 << "test.$cmd" 186 << "o" 187 << BSON("applyOps" << BSONArrayBuilder().obj())); 188 BSONObjBuilder result; 189 Status status = applyOps(_opCtx, 190 dbName, 191 BSON("applyOps" << builder.arr()), 192 repl::OplogApplication::Mode::kApplyOpsCmd, 193 &result); 194 if (!status.isOK()) { 195 return status; 196 } 197 198 return {result.obj()}; 199 } 200 }; 201 202 class SecondaryInsertTimes : public StorageTimestampTest { 203 public: run()204 void run() { 205 // Only run on 'wiredTiger'. No other storage engines to-date timestamp writes. 206 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 207 mongo::serverGlobalParams.enableMajorityReadConcern)) { 208 return; 209 } 210 211 // In order for applyOps to assign timestamps, we must be in non-replicated mode. 212 repl::UnreplicatedWritesBlock uwb(_opCtx); 213 214 // Create a new collection. 215 NamespaceString nss("unittests.timestampedUpdates"); 216 reset(nss); 217 218 AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX); 219 220 const std::uint32_t docsToInsert = 10; 221 const LogicalTime firstInsertTime = _clock->reserveTicks(docsToInsert); 222 for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { 223 BSONObjBuilder result; 224 ASSERT_OK(applyOps( 225 _opCtx, 226 nss.db().toString(), 227 BSON("applyOps" << BSON_ARRAY( 228 BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL 229 << "h" 230 << 0xBEEFBEEFLL 231 << "v" 232 << 2 233 << "op" 234 << "i" 235 << "ns" 236 << nss.ns() 237 << "ui" 238 << autoColl.getCollection()->uuid().get() 239 << "o" 240 << BSON("_id" << idx)) 241 << BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL 242 << "h" 243 << 1 244 << "op" 245 << "c" 246 << "ns" 247 << "test.$cmd" 248 << "o" 249 << BSON("applyOps" << BSONArrayBuilder().obj())))), 250 repl::OplogApplication::Mode::kApplyOpsCmd, 251 &result)); 252 } 253 254 for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { 255 auto recoveryUnit = _opCtx->recoveryUnit(); 256 recoveryUnit->abandonSnapshot(); 257 ASSERT_OK(recoveryUnit->selectSnapshot(firstInsertTime.addTicks(idx).asTimestamp())); 258 BSONObj result; 259 ASSERT(Helpers::getLast(_opCtx, nss.ns().c_str(), result)) << " idx is " << idx; 260 ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(result, BSON("_id" << idx))) 261 << "Doc: " << result.toString() << " Expected: " << BSON("_id" << idx); 262 } 263 } 264 }; 265 266 class SecondaryArrayInsertTimes : public StorageTimestampTest { 267 public: run()268 void run() { 269 // Only run on 'wiredTiger'. No other storage engines to-date timestamp writes. 270 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 271 mongo::serverGlobalParams.enableMajorityReadConcern)) { 272 return; 273 } 274 275 // In order for applyOps to assign timestamps, we must be in non-replicated mode. 276 repl::UnreplicatedWritesBlock uwb(_opCtx); 277 278 // Create a new collection. 279 NamespaceString nss("unittests.timestampedUpdates"); 280 reset(nss); 281 282 AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX); 283 284 const std::uint32_t docsToInsert = 10; 285 const LogicalTime firstInsertTime = _clock->reserveTicks(docsToInsert); 286 BSONObjBuilder fullCommand; 287 BSONArrayBuilder applyOpsB(fullCommand.subarrayStart("applyOps")); 288 289 BSONObjBuilder applyOpsElem1Builder; 290 291 // Populate the "ts" field with an array of all the grouped inserts' timestamps. 292 BSONArrayBuilder tsArrayBuilder(applyOpsElem1Builder.subarrayStart("ts")); 293 for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { 294 tsArrayBuilder.append(firstInsertTime.addTicks(idx).asTimestamp()); 295 } 296 tsArrayBuilder.done(); 297 298 // Populate the "t" (term) field with an array of all the grouped inserts' terms. 299 BSONArrayBuilder tArrayBuilder(applyOpsElem1Builder.subarrayStart("t")); 300 for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { 301 tArrayBuilder.append(1LL); 302 } 303 tArrayBuilder.done(); 304 305 // Populate the "o" field with an array of all the grouped inserts. 306 BSONArrayBuilder oArrayBuilder(applyOpsElem1Builder.subarrayStart("o")); 307 for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { 308 oArrayBuilder.append(BSON("_id" << idx)); 309 } 310 oArrayBuilder.done(); 311 312 applyOpsElem1Builder << "h" << 0xBEEFBEEFLL << "v" << 2 << "op" 313 << "i" 314 << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid().get(); 315 316 applyOpsB.append(applyOpsElem1Builder.done()); 317 318 BSONObjBuilder applyOpsElem2Builder; 319 applyOpsElem2Builder << "ts" << firstInsertTime.addTicks(docsToInsert).asTimestamp() << "t" 320 << 1LL << "h" << 1 << "op" 321 << "c" 322 << "ns" 323 << "test.$cmd" 324 << "o" << BSON("applyOps" << BSONArrayBuilder().obj()); 325 326 applyOpsB.append(applyOpsElem2Builder.done()); 327 applyOpsB.done(); 328 // Apply the group of inserts. 329 BSONObjBuilder result; 330 ASSERT_OK(applyOps(_opCtx, 331 nss.db().toString(), 332 fullCommand.done(), 333 repl::OplogApplication::Mode::kApplyOpsCmd, 334 &result)); 335 336 337 for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { 338 auto recoveryUnit = _opCtx->recoveryUnit(); 339 recoveryUnit->abandonSnapshot(); 340 ASSERT_OK(recoveryUnit->selectSnapshot(firstInsertTime.addTicks(idx).asTimestamp())); 341 BSONObj result; 342 ASSERT(Helpers::getLast(_opCtx, nss.ns().c_str(), result)) << " idx is " << idx; 343 ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(result, BSON("_id" << idx))) 344 << "Doc: " << result.toString() << " Expected: " << BSON("_id" << idx); 345 } 346 } 347 }; 348 349 class SecondaryDeleteTimes : public StorageTimestampTest { 350 public: run()351 void run() { 352 // Only run on 'wiredTiger'. No other storage engines to-date timestamp writes. 353 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 354 mongo::serverGlobalParams.enableMajorityReadConcern)) { 355 return; 356 } 357 358 // In order for applyOps to assign timestamps, we must be in non-replicated mode. 359 repl::UnreplicatedWritesBlock uwb(_opCtx); 360 361 // Create a new collection. 362 NamespaceString nss("unittests.timestampedDeletes"); 363 reset(nss); 364 365 AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX); 366 367 // Insert some documents. 368 const std::int32_t docsToInsert = 10; 369 const LogicalTime firstInsertTime = _clock->reserveTicks(docsToInsert); 370 const LogicalTime lastInsertTime = firstInsertTime.addTicks(docsToInsert - 1); 371 WriteUnitOfWork wunit(_opCtx); 372 for (std::int32_t num = 0; num < docsToInsert; ++num) { 373 insertDocument(autoColl.getCollection(), 374 InsertStatement(BSON("_id" << num << "a" << num), 375 firstInsertTime.addTicks(num).asTimestamp(), 376 0LL)); 377 } 378 wunit.commit(); 379 ASSERT_EQ(docsToInsert, itCount(autoColl.getCollection())); 380 381 // Delete all documents one at a time. 382 const LogicalTime startDeleteTime = _clock->reserveTicks(docsToInsert); 383 for (std::int32_t num = 0; num < docsToInsert; ++num) { 384 ASSERT_OK( 385 doNonAtomicApplyOps( 386 nss.db().toString(), 387 {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL << "h" 388 << 0xBEEFBEEFLL 389 << "v" 390 << 2 391 << "op" 392 << "d" 393 << "ns" 394 << nss.ns() 395 << "ui" 396 << autoColl.getCollection()->uuid().get() 397 << "o" 398 << BSON("_id" << num))}, 399 startDeleteTime.addTicks(num).asTimestamp()) 400 .getStatus()); 401 } 402 403 for (std::int32_t num = 0; num <= docsToInsert; ++num) { 404 // The first loop queries at `lastInsertTime` and should count all documents. Querying 405 // at each successive tick counts one less document. 406 auto recoveryUnit = _opCtx->recoveryUnit(); 407 recoveryUnit->abandonSnapshot(); 408 ASSERT_OK(recoveryUnit->selectSnapshot(lastInsertTime.addTicks(num).asTimestamp())); 409 ASSERT_EQ(docsToInsert - num, itCount(autoColl.getCollection())); 410 } 411 } 412 }; 413 414 class SecondaryUpdateTimes : public StorageTimestampTest { 415 public: run()416 void run() { 417 // Only run on 'wiredTiger'. No other storage engines to-date timestamp writes. 418 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 419 mongo::serverGlobalParams.enableMajorityReadConcern)) { 420 return; 421 } 422 423 // In order for applyOps to assign timestamps, we must be in non-replicated mode. 424 repl::UnreplicatedWritesBlock uwb(_opCtx); 425 426 // Create a new collection. 427 NamespaceString nss("unittests.timestampedUpdates"); 428 reset(nss); 429 430 AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX); 431 432 // Insert one document that will go through a series of updates. 433 const LogicalTime insertTime = _clock->reserveTicks(1); 434 WriteUnitOfWork wunit(_opCtx); 435 insertDocument(autoColl.getCollection(), 436 InsertStatement(BSON("_id" << 0), insertTime.asTimestamp(), 0LL)); 437 wunit.commit(); 438 ASSERT_EQ(1, itCount(autoColl.getCollection())); 439 440 // Each pair in the vector represents the update to perform at the next tick of the 441 // clock. `pair.first` is the update to perform and `pair.second` is the full value of the 442 // document after the transformation. 443 const std::vector<std::pair<BSONObj, BSONObj>> updates = { 444 {BSON("$set" << BSON("val" << 1)), BSON("_id" << 0 << "val" << 1)}, 445 {BSON("$unset" << BSON("val" << 1)), BSON("_id" << 0)}, 446 {BSON("$addToSet" << BSON("theSet" << 1)), 447 BSON("_id" << 0 << "theSet" << BSON_ARRAY(1))}, 448 {BSON("$addToSet" << BSON("theSet" << 2)), 449 BSON("_id" << 0 << "theSet" << BSON_ARRAY(1 << 2))}, 450 {BSON("$pull" << BSON("theSet" << 1)), BSON("_id" << 0 << "theSet" << BSON_ARRAY(2))}, 451 {BSON("$pull" << BSON("theSet" << 2)), BSON("_id" << 0 << "theSet" << BSONArray())}, 452 {BSON("$set" << BSON("theMap.val" << 1)), 453 BSON("_id" << 0 << "theSet" << BSONArray() << "theMap" << BSON("val" << 1))}, 454 {BSON("$rename" << BSON("theSet" 455 << "theOtherSet")), 456 BSON("_id" << 0 << "theMap" << BSON("val" << 1) << "theOtherSet" << BSONArray())}}; 457 458 const LogicalTime firstUpdateTime = _clock->reserveTicks(updates.size()); 459 for (std::size_t idx = 0; idx < updates.size(); ++idx) { 460 ASSERT_OK( 461 doNonAtomicApplyOps( 462 nss.db().toString(), 463 {BSON("ts" << firstUpdateTime.addTicks(idx).asTimestamp() << "t" << 0LL << "h" 464 << 0xBEEFBEEFLL 465 << "v" 466 << 2 467 << "op" 468 << "u" 469 << "ns" 470 << nss.ns() 471 << "ui" 472 << autoColl.getCollection()->uuid().get() 473 << "o2" 474 << BSON("_id" << 0) 475 << "o" 476 << updates[idx].first)}, 477 firstUpdateTime.addTicks(idx).asTimestamp()) 478 .getStatus()); 479 } 480 481 for (std::size_t idx = 0; idx < updates.size(); ++idx) { 482 // Querying at each successive ticks after `insertTime` sees the document transform in 483 // the series. 484 auto recoveryUnit = _opCtx->recoveryUnit(); 485 recoveryUnit->abandonSnapshot(); 486 ASSERT_OK(recoveryUnit->selectSnapshot(insertTime.addTicks(idx + 1).asTimestamp())); 487 488 auto doc = findOne(autoColl.getCollection()); 489 ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, updates[idx].second)) 490 << "Doc: " << doc.toString() << " Expected: " << updates[idx].second.toString(); 491 } 492 } 493 }; 494 495 class SecondaryInsertToUpsert : public StorageTimestampTest { 496 public: run()497 void run() { 498 // Only run on 'wiredTiger'. No other storage engines to-date timestamp writes. 499 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 500 mongo::serverGlobalParams.enableMajorityReadConcern)) { 501 return; 502 } 503 504 // In order for applyOps to assign timestamps, we must be in non-replicated mode. 505 repl::UnreplicatedWritesBlock uwb(_opCtx); 506 507 // Create a new collection. 508 NamespaceString nss("unittests.insertToUpsert"); 509 reset(nss); 510 511 AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX); 512 513 const LogicalTime insertTime = _clock->reserveTicks(2); 514 515 // This applyOps runs into an insert of `{_id: 0, field: 0}` followed by a second insert 516 // on the same collection with `{_id: 0}`. It's expected for this second insert to be 517 // turned into an upsert. The goal document does not contain `field: 0`. 518 BSONObjBuilder resultBuilder; 519 auto swResult = doNonAtomicApplyOps( 520 nss.db().toString(), 521 {BSON("ts" << insertTime.asTimestamp() << "t" << 1LL << "h" << 0xBEEFBEEFLL << "v" << 2 522 << "op" 523 << "i" 524 << "ns" 525 << nss.ns() 526 << "ui" 527 << autoColl.getCollection()->uuid().get() 528 << "o" 529 << BSON("_id" << 0 << "field" << 0)), 530 BSON("ts" << insertTime.addTicks(1).asTimestamp() << "t" << 1LL << "h" << 0xBEEFBEEFLL 531 << "v" 532 << 2 533 << "op" 534 << "i" 535 << "ns" 536 << nss.ns() 537 << "ui" 538 << autoColl.getCollection()->uuid().get() 539 << "o" 540 << BSON("_id" << 0))}, 541 insertTime.addTicks(1).asTimestamp()); 542 ASSERT_OK(swResult); 543 544 BSONObj& result = swResult.getValue(); 545 ASSERT_EQ(3, result.getIntField("applied")); 546 ASSERT(result["results"].Array()[0].Bool()); 547 ASSERT(result["results"].Array()[1].Bool()); 548 ASSERT(result["results"].Array()[2].Bool()); 549 550 // Reading at `insertTime` should show the original document, `{_id: 0, field: 0}`. 551 auto recoveryUnit = _opCtx->recoveryUnit(); 552 recoveryUnit->abandonSnapshot(); 553 ASSERT_OK(recoveryUnit->selectSnapshot(insertTime.asTimestamp())); 554 auto doc = findOne(autoColl.getCollection()); 555 ASSERT_EQ(0, 556 SimpleBSONObjComparator::kInstance.compare(doc, BSON("_id" << 0 << "field" << 0))) 557 << "Doc: " << doc.toString() << " Expected: {_id: 0, field: 0}"; 558 559 // Reading at `insertTime + 1` should show the second insert that got converted to an 560 // upsert, `{_id: 0}`. 561 recoveryUnit->abandonSnapshot(); 562 ASSERT_OK(recoveryUnit->selectSnapshot(insertTime.addTicks(1).asTimestamp())); 563 doc = findOne(autoColl.getCollection()); 564 ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, BSON("_id" << 0))) 565 << "Doc: " << doc.toString() << " Expected: {_id: 0}"; 566 } 567 }; 568 569 class SecondaryAtomicApplyOps : public StorageTimestampTest { 570 public: run()571 void run() { 572 // Only run on 'wiredTiger'. No other storage engines to-date timestamp writes. 573 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 574 mongo::serverGlobalParams.enableMajorityReadConcern)) { 575 return; 576 } 577 578 // Create a new collection. 579 NamespaceString nss("unittests.insertToUpsert"); 580 reset(nss); 581 582 AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX); 583 584 // Reserve a timestamp before the inserts should happen. 585 const LogicalTime preInsertTimestamp = _clock->reserveTicks(1); 586 auto swResult = doAtomicApplyOps(nss.db().toString(), 587 {BSON("v" << 2 << "op" 588 << "i" 589 << "ns" 590 << nss.ns() 591 << "ui" 592 << autoColl.getCollection()->uuid().get() 593 << "o" 594 << BSON("_id" << 0)), 595 BSON("v" << 2 << "op" 596 << "i" 597 << "ns" 598 << nss.ns() 599 << "ui" 600 << autoColl.getCollection()->uuid().get() 601 << "o" 602 << BSON("_id" << 1))}); 603 ASSERT_OK(swResult); 604 605 ASSERT_EQ(2, swResult.getValue().getIntField("applied")); 606 ASSERT(swResult.getValue()["results"].Array()[0].Bool()); 607 ASSERT(swResult.getValue()["results"].Array()[1].Bool()); 608 609 // Reading at `preInsertTimestamp` should not find anything. 610 auto recoveryUnit = _opCtx->recoveryUnit(); 611 recoveryUnit->abandonSnapshot(); 612 ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.asTimestamp())); 613 ASSERT_EQ(0, itCount(autoColl.getCollection())) 614 << "Should not observe a write at `preInsertTimestamp`. TS: " 615 << preInsertTimestamp.asTimestamp(); 616 617 // Reading at `preInsertTimestamp + 1` should observe both inserts. 618 recoveryUnit = _opCtx->recoveryUnit(); 619 recoveryUnit->abandonSnapshot(); 620 ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.addTicks(1).asTimestamp())); 621 ASSERT_EQ(2, itCount(autoColl.getCollection())) 622 << "Should observe both writes at `preInsertTimestamp + 1`. TS: " 623 << preInsertTimestamp.addTicks(1).asTimestamp(); 624 } 625 }; 626 627 628 // This should have the same result as `SecondaryInsertToUpsert` except it gets there a different 629 // way. Doing an atomic `applyOps` should result in a WriteConflictException because the same 630 // transaction is trying to write modify the same document twice. The `applyOps` command should 631 // catch that failure and retry in non-atomic mode, preserving the timestamps supplied by the 632 // user. 633 class SecondaryAtomicApplyOpsWCEToNonAtomic : public StorageTimestampTest { 634 public: run()635 void run() { 636 // Only run on 'wiredTiger'. No other storage engines to-date timestamp writes. 637 if (!(mongo::storageGlobalParams.engine == "wiredTiger" && 638 mongo::serverGlobalParams.enableMajorityReadConcern)) { 639 return; 640 } 641 642 // Create a new collectiont. 643 NamespaceString nss("unitteTsts.insertToUpsert"); 644 reset(nss); 645 646 AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX); 647 648 const LogicalTime preInsertTimestamp = _clock->reserveTicks(1); 649 auto swResult = doAtomicApplyOps(nss.db().toString(), 650 {BSON("v" << 2 << "op" 651 << "i" 652 << "ns" 653 << nss.ns() 654 << "ui" 655 << autoColl.getCollection()->uuid().get() 656 << "o" 657 << BSON("_id" << 0 << "field" << 0)), 658 BSON("v" << 2 << "op" 659 << "i" 660 << "ns" 661 << nss.ns() 662 << "ui" 663 << autoColl.getCollection()->uuid().get() 664 << "o" 665 << BSON("_id" << 0))}); 666 ASSERT_OK(swResult); 667 668 ASSERT_EQ(2, swResult.getValue().getIntField("applied")); 669 ASSERT(swResult.getValue()["results"].Array()[0].Bool()); 670 ASSERT(swResult.getValue()["results"].Array()[1].Bool()); 671 672 // Reading at `insertTime` should not see any documents. 673 auto recoveryUnit = _opCtx->recoveryUnit(); 674 recoveryUnit->abandonSnapshot(); 675 ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.asTimestamp())); 676 ASSERT_EQ(0, itCount(autoColl.getCollection())) 677 << "Should not find any documents at `preInsertTimestamp`. TS: " 678 << preInsertTimestamp.asTimestamp(); 679 680 // Reading at `preInsertTimestamp + 1` should show the final state of the document. 681 recoveryUnit->abandonSnapshot(); 682 ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.addTicks(1).asTimestamp())); 683 auto doc = findOne(autoColl.getCollection()); 684 ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, BSON("_id" << 0))) 685 << "Doc: " << doc.toString() << " Expected: {_id: 0}"; 686 } 687 }; 688 689 class AllStorageTimestampTests : public unittest::Suite { 690 public: AllStorageTimestampTests()691 AllStorageTimestampTests() : unittest::Suite("StorageTimestampTests") {} setupTests()692 void setupTests() { 693 add<SecondaryInsertTimes>(); 694 add<SecondaryArrayInsertTimes>(); 695 add<SecondaryDeleteTimes>(); 696 add<SecondaryUpdateTimes>(); 697 add<SecondaryInsertToUpsert>(); 698 add<SecondaryAtomicApplyOps>(); 699 add<SecondaryAtomicApplyOpsWCEToNonAtomic>(); 700 } 701 }; 702 703 unittest::SuiteInstance<AllStorageTimestampTests> allStorageTimestampTests; 704 } // namespace mongo 705