1 /*- 2 * See the file LICENSE for redistribution information. 3 * 4 * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. 5 * 6 */ 7 8 package com.sleepycat.je.cleaner; 9 10 import static org.junit.Assert.assertEquals; 11 import static org.junit.Assert.assertNotNull; 12 import static org.junit.Assert.assertSame; 13 import static org.junit.Assert.assertTrue; 14 import static org.junit.Assert.fail; 15 16 import java.io.IOException; 17 import java.util.HashMap; 18 import java.util.HashSet; 19 import java.util.Iterator; 20 import java.util.List; 21 import java.util.Map; 22 import java.util.Set; 23 24 import org.junit.After; 25 import org.junit.Test; 26 import org.junit.runner.RunWith; 27 import org.junit.runners.Parameterized; 28 import org.junit.runners.Parameterized.Parameters; 29 30 import com.sleepycat.bind.tuple.IntegerBinding; 31 import com.sleepycat.bind.tuple.LongBinding; 32 import com.sleepycat.je.CacheMode; 33 import com.sleepycat.je.CheckpointConfig; 34 import com.sleepycat.je.Cursor; 35 import com.sleepycat.je.Database; 36 import com.sleepycat.je.DatabaseConfig; 37 import com.sleepycat.je.DatabaseEntry; 38 import com.sleepycat.je.DatabaseException; 39 import com.sleepycat.je.DbInternal; 40 import com.sleepycat.je.Environment; 41 import com.sleepycat.je.EnvironmentConfig; 42 import com.sleepycat.je.EnvironmentMutableConfig; 43 import com.sleepycat.je.EnvironmentStats; 44 import com.sleepycat.je.LockMode; 45 import com.sleepycat.je.OperationStatus; 46 import com.sleepycat.je.StatsConfig; 47 import com.sleepycat.je.Transaction; 48 import com.sleepycat.je.config.EnvironmentParams; 49 import com.sleepycat.je.dbi.CursorImpl; 50 import com.sleepycat.je.dbi.DatabaseImpl; 51 import com.sleepycat.je.dbi.EnvironmentImpl; 52 import com.sleepycat.je.dbi.MemoryBudget; 53 import com.sleepycat.je.junit.JUnitThread; 54 import com.sleepycat.je.log.FileManager; 55 import com.sleepycat.je.recovery.Checkpointer; 56 import com.sleepycat.je.tree.BIN; 57 import com.sleepycat.je.tree.FileSummaryLN; 58 import com.sleepycat.je.tree.IN; 59 import com.sleepycat.je.tree.Node; 60 import com.sleepycat.je.txn.BasicLocker; 61 import com.sleepycat.je.txn.LockType; 62 import com.sleepycat.je.util.StringDbt; 63 import com.sleepycat.je.util.TestUtils; 64 import com.sleepycat.je.utilint.TestHook; 65 import com.sleepycat.utilint.StringUtils; 66 67 @RunWith(Parameterized.class) 68 public class CleanerTest extends CleanerTestBase { 69 70 private static final int N_KEYS = 300; 71 private static final int N_KEY_BYTES = 10; 72 73 /* 74 * Make the log file size small enough to allow cleaning, but large enough 75 * not to generate a lot of fsyncing at the log file boundaries. 76 */ 77 private static final int FILE_SIZE = 10000; 78 protected Database db = null; 79 private Database exampleDb; 80 81 private static final CheckpointConfig FORCE_CONFIG = 82 new CheckpointConfig(); 83 static { 84 FORCE_CONFIG.setForce(true); 85 } 86 87 private JUnitThread junitThread; 88 private volatile int synchronizer; 89 CleanerTest(boolean multiSubDir)90 public CleanerTest(boolean multiSubDir) { 91 envMultiSubDir = multiSubDir; 92 customName = envMultiSubDir ? "multi-sub-dir" : null ; 93 } 94 95 @Parameters genParams()96 public static List<Object[]> genParams() { 97 98 return getEnv(new boolean[] {false, true}); 99 } 100 initEnv(boolean createDb, boolean allowDups)101 private void initEnv(boolean createDb, boolean allowDups) 102 throws DatabaseException { 103 104 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 105 DbInternal.disableParameterValidation(envConfig); 106 envConfig.setTransactional(true); 107 envConfig.setAllowCreate(true); 108 envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); 109 envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), 110 Integer.toString(FILE_SIZE)); 111 envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), 112 "false"); 113 envConfig.setConfigParam(EnvironmentParams.CLEANER_REMOVE.getName(), 114 "false"); 115 envConfig.setConfigParam 116 (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); 117 envConfig.setConfigParam 118 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 119 envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); 120 envConfig.setConfigParam(EnvironmentParams.BIN_DELTA_PERCENT.getName(), 121 "75"); 122 if (envMultiSubDir) { 123 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 124 DATA_DIRS + ""); 125 } 126 127 env = new Environment(envHome, envConfig); 128 129 String databaseName = "cleanerDb"; 130 DatabaseConfig dbConfig = new DatabaseConfig(); 131 dbConfig.setTransactional(true); 132 dbConfig.setAllowCreate(createDb); 133 dbConfig.setSortedDuplicates(allowDups); 134 exampleDb = env.openDatabase(null, databaseName, dbConfig); 135 } 136 137 @After tearDown()138 public void tearDown() 139 throws Exception { 140 141 if (junitThread != null) { 142 junitThread.shutdown(); 143 junitThread = null; 144 } 145 146 super.tearDown(); 147 exampleDb = null; 148 } 149 closeEnv()150 private void closeEnv() 151 throws DatabaseException { 152 153 if (exampleDb != null) { 154 exampleDb.close(); 155 exampleDb = null; 156 } 157 158 if (env != null) { 159 env.close(); 160 env = null; 161 } 162 } 163 164 @Test testCleanerNoDupes()165 public void testCleanerNoDupes() 166 throws Throwable { 167 168 initEnv(true, false); 169 try { 170 doCleanerTest(N_KEYS, 1); 171 } catch (Throwable t) { 172 t.printStackTrace(); 173 throw t; 174 } 175 } 176 177 @Test testCleanerWithDupes()178 public void testCleanerWithDupes() 179 throws Throwable { 180 181 initEnv(true, true); 182 try { 183 doCleanerTest(2, 500); 184 } catch (Throwable t) { 185 t.printStackTrace(); 186 throw t; 187 } 188 } 189 doCleanerTest(int nKeys, int nDupsPerKey)190 private void doCleanerTest(int nKeys, int nDupsPerKey) 191 throws DatabaseException { 192 193 EnvironmentImpl environment = 194 DbInternal.getEnvironmentImpl(env); 195 FileManager fileManager = environment.getFileManager(); 196 Map<String, Set<String>> expectedMap = 197 new HashMap<String, Set<String>>(); 198 doLargePut(expectedMap, nKeys, nDupsPerKey, true); 199 Long lastNum = fileManager.getLastFileNum(); 200 201 /* Read the data back. */ 202 StringDbt foundKey = new StringDbt(); 203 StringDbt foundData = new StringDbt(); 204 205 Cursor cursor = exampleDb.openCursor(null, null); 206 207 while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) == 208 OperationStatus.SUCCESS) { 209 } 210 211 env.checkpoint(FORCE_CONFIG); 212 213 for (int i = 0; i < (int) lastNum.longValue(); i++) { 214 215 /* 216 * Force clean one file. Utilization-based cleaning won't 217 * work here, since utilization is over 90%. 218 */ 219 DbInternal.getEnvironmentImpl(env). 220 getCleaner(). 221 doClean(false, // cleanMultipleFiles 222 true); // forceCleaning 223 } 224 225 EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); 226 assertTrue(stats.getNINsCleaned() > 0); 227 228 cursor.close(); 229 closeEnv(); 230 231 initEnv(false, (nDupsPerKey > 1)); 232 233 checkData(expectedMap); 234 assertTrue(fileManager.getLastFileNum().longValue() > 235 lastNum.longValue()); 236 237 closeEnv(); 238 } 239 240 /** 241 * Ensure that INs are cleaned. 242 */ 243 @Test testCleanInternalNodes()244 public void testCleanInternalNodes() 245 throws DatabaseException { 246 247 initEnv(true, true); 248 int nKeys = 200; 249 250 EnvironmentImpl environment = 251 DbInternal.getEnvironmentImpl(env); 252 FileManager fileManager = environment.getFileManager(); 253 /* Insert a lot of keys. ExpectedMap holds the expected data */ 254 Map<String, Set<String>> expectedMap = 255 new HashMap<String, Set<String>>(); 256 doLargePut(expectedMap, nKeys, 1, true); 257 258 /* Modify every other piece of data. */ 259 modifyData(expectedMap, 10, true); 260 checkData(expectedMap); 261 262 /* Checkpoint */ 263 env.checkpoint(FORCE_CONFIG); 264 checkData(expectedMap); 265 266 /* Modify every other piece of data. */ 267 modifyData(expectedMap, 10, true); 268 checkData(expectedMap); 269 270 /* Checkpoint -- this should obsolete INs. */ 271 env.checkpoint(FORCE_CONFIG); 272 checkData(expectedMap); 273 274 /* Clean */ 275 Long lastNum = fileManager.getLastFileNum(); 276 env.cleanLog(); 277 278 /* Validate after cleaning. */ 279 checkData(expectedMap); 280 EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); 281 282 /* Make sure we really cleaned something.*/ 283 assertTrue(stats.getNINsCleaned() > 0); 284 assertTrue(stats.getNLNsCleaned() > 0); 285 286 closeEnv(); 287 initEnv(false, true); 288 checkData(expectedMap); 289 assertTrue(fileManager.getLastFileNum().longValue() > 290 lastNum.longValue()); 291 292 closeEnv(); 293 } 294 295 /** 296 * See if we can clean in the middle of the file set. 297 */ 298 @Test testCleanFileHole()299 public void testCleanFileHole() 300 throws Throwable { 301 302 initEnv(true, true); 303 304 int nKeys = 20; // test ends up inserting 2*nKeys 305 int nDupsPerKey = 30; 306 307 EnvironmentImpl environment = 308 DbInternal.getEnvironmentImpl(env); 309 FileManager fileManager = environment.getFileManager(); 310 311 /* Insert some non dup data, modify, insert dup data. */ 312 Map<String, Set<String>> expectedMap = 313 new HashMap<String, Set<String>>(); 314 doLargePut(expectedMap, nKeys, 1, true); 315 modifyData(expectedMap, 10, true); 316 doLargePut(expectedMap, nKeys, nDupsPerKey, true); 317 checkData(expectedMap); 318 319 /* 320 * Delete all the data, but abort. (Try to fill up the log 321 * with entries we don't need. 322 */ 323 deleteData(expectedMap, false, false); 324 checkData(expectedMap); 325 326 /* Do some more insertions, but abort them. */ 327 doLargePut(expectedMap, nKeys, nDupsPerKey, false); 328 checkData(expectedMap); 329 330 /* Do some more insertions and commit them. */ 331 doLargePut(expectedMap, nKeys, nDupsPerKey, true); 332 checkData(expectedMap); 333 334 /* Checkpoint */ 335 env.checkpoint(FORCE_CONFIG); 336 checkData(expectedMap); 337 338 /* Clean */ 339 Long lastNum = fileManager.getLastFileNum(); 340 env.cleanLog(); 341 342 /* Validate after cleaning. */ 343 checkData(expectedMap); 344 EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); 345 346 /* Make sure we really cleaned something.*/ 347 assertTrue(stats.getNINsCleaned() > 0); 348 assertTrue(stats.getNLNsCleaned() > 0); 349 350 closeEnv(); 351 initEnv(false, true); 352 checkData(expectedMap); 353 assertTrue(fileManager.getLastFileNum().longValue() > 354 lastNum.longValue()); 355 356 closeEnv(); 357 } 358 359 /** 360 * Test for SR13191. This SR shows a problem where a MapLN is initialized 361 * with a DatabaseImpl that has a null EnvironmentImpl. When the Database 362 * gets used, a NullPointerException occurs in the Cursor code which 363 * expects there to be an EnvironmentImpl present. The MapLN gets init'd 364 * by the Cleaner reading through a log file and encountering a MapLN which 365 * is not presently in the DbTree. As an efficiency, the Cleaner calls 366 * updateEntry on the BIN to try to insert the MapLN into the BIN so that 367 * it won't have to fetch it when it migrates the BIN. But this is bad 368 * since the MapLN has not been init'd properly. The fix was to ensure 369 * that the MapLN is init'd correctly by calling postFetchInit on it just 370 * prior to inserting it into the BIN. 371 * 372 * This test first creates an environment and two databases. The first 373 * database it just adds to the tree with no data. This will be the MapLN 374 * that eventually gets instantiated by the cleaner. The second database 375 * is used just to create a bunch of data that will get deleted so as to 376 * create a low utilization for one of the log files. Once the data for 377 * db2 is created, the log is flipped (so file 0 is the one with the MapLN 378 * for db1 in it), and the environment is closed and reopened. We insert 379 * more data into db2 until we have enough .jdb files that file 0 is 380 * attractive to the cleaner. Call the cleaner to have it instantiate the 381 * MapLN and then use the MapLN in a Database.get() call. 382 */ 383 @Test testSR13191()384 public void testSR13191() 385 throws Throwable { 386 387 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 388 envConfig.setAllowCreate(true); 389 envConfig.setConfigParam 390 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 391 if (envMultiSubDir) { 392 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 393 DATA_DIRS + ""); 394 } 395 env = new Environment(envHome, envConfig); 396 EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); 397 FileManager fileManager = 398 DbInternal.getEnvironmentImpl(env).getFileManager(); 399 400 DatabaseConfig dbConfig = new DatabaseConfig(); 401 dbConfig.setAllowCreate(true); 402 Database db1 = 403 env.openDatabase(null, "db1", dbConfig); 404 405 Database db2 = 406 env.openDatabase(null, "db2", dbConfig); 407 408 DatabaseEntry key = new DatabaseEntry(); 409 DatabaseEntry data = new DatabaseEntry(); 410 IntegerBinding.intToEntry(1, key); 411 data.setData(new byte[100000]); 412 for (int i = 0; i < 50; i++) { 413 assertEquals(OperationStatus.SUCCESS, db2.put(null, key, data)); 414 } 415 db1.close(); 416 db2.close(); 417 assertEquals("Should have 0 as current file", 0L, 418 fileManager.getCurrentFileNum()); 419 envImpl.forceLogFileFlip(); 420 env.close(); 421 422 env = new Environment(envHome, envConfig); 423 fileManager = DbInternal.getEnvironmentImpl(env).getFileManager(); 424 assertEquals("Should have 1 as current file", 1L, 425 fileManager.getCurrentFileNum()); 426 427 db2 = env.openDatabase(null, "db2", dbConfig); 428 429 for (int i = 0; i < 250; i++) { 430 assertEquals(OperationStatus.SUCCESS, db2.put(null, key, data)); 431 } 432 433 db2.close(); 434 env.cleanLog(); 435 db1 = env.openDatabase(null, "db1", dbConfig); 436 db1.get(null, key, data, null); 437 db1.close(); 438 env.close(); 439 } 440 441 /** 442 * Tests that setting je.env.runCleaner=false stops the cleaner from 443 * processing more files even if the target minUtilization is not met 444 * [#15158]. 445 */ 446 @Test testCleanerStop()447 public void testCleanerStop() 448 throws Throwable { 449 450 final int fileSize = 1000000; 451 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 452 envConfig.setAllowCreate(true); 453 envConfig.setConfigParam 454 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 455 envConfig.setConfigParam 456 (EnvironmentParams.LOG_FILE_MAX.getName(), 457 Integer.toString(fileSize)); 458 envConfig.setConfigParam 459 (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); 460 if (envMultiSubDir) { 461 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 462 DATA_DIRS + ""); 463 } 464 env = new Environment(envHome, envConfig); 465 466 DatabaseConfig dbConfig = new DatabaseConfig(); 467 dbConfig.setAllowCreate(true); 468 Database db = env.openDatabase(null, "CleanerStop", dbConfig); 469 470 DatabaseEntry key = new DatabaseEntry(new byte[1]); 471 DatabaseEntry data = new DatabaseEntry(new byte[fileSize]); 472 for (int i = 0; i <= 10; i += 1) { 473 db.put(null, key, data); 474 } 475 env.checkpoint(FORCE_CONFIG); 476 477 EnvironmentStats stats = env.getStats(null); 478 assertEquals(0, stats.getNCleanerRuns()); 479 480 envConfig = env.getConfig(); 481 envConfig.setConfigParam 482 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "true"); 483 env.setMutableConfig(envConfig); 484 485 int iter = 0; 486 while (stats.getNCleanerRuns() < 10) { 487 iter += 1; 488 if (iter == 20) { 489 490 /* 491 * At one time the DaemonThread did not wakeup immediately in 492 * this test. A workaround was to add an item to the job queue 493 * in FileProcessor.wakeup. Later the job queue was removed 494 * and the DaemonThread.run() was fixed to wakeup immediately. 495 * This test verifies that the cleanup of the run() method 496 * works properly [#15267]. 497 */ 498 fail("Cleaner did not run after " + iter + " tries"); 499 } 500 Thread.yield(); 501 Thread.sleep(1000); 502 stats = env.getStats(null); 503 } 504 505 envConfig.setConfigParam 506 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 507 env.setMutableConfig(envConfig); 508 509 long prevNFiles = stats.getNCleanerRuns(); 510 511 /* Do multiple updates to create obsolete records. */ 512 for (int i = 0; i <= 10; i++) { 513 db.put(null, key, data); 514 } 515 516 /* Wait a while to see if cleaner starts to work. */ 517 Thread.sleep(1000); 518 519 stats = env.getStats(null); 520 long currNFiles = stats.getNCleanerRuns(); 521 assertEquals("Expected no files cleaned, prevNFiles=" + prevNFiles + 522 ", currNFiles=" + currNFiles, 523 prevNFiles, currNFiles); 524 525 db.close(); 526 env.close(); 527 } 528 529 /** 530 * Tests that the FileSelector memory budget is subtracted when the 531 * environment is closed. Before the fix in SR [#16368], it was not. 532 */ 533 @Test testFileSelectorMemBudget()534 public void testFileSelectorMemBudget() 535 throws Throwable { 536 537 final int fileSize = 1000000; 538 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 539 envConfig.setAllowCreate(true); 540 envConfig.setConfigParam 541 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 542 envConfig.setConfigParam 543 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 544 envConfig.setConfigParam 545 (EnvironmentParams.LOG_FILE_MAX.getName(), 546 Integer.toString(fileSize)); 547 envConfig.setConfigParam 548 (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); 549 if (envMultiSubDir) { 550 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 551 DATA_DIRS + ""); 552 } 553 env = new Environment(envHome, envConfig); 554 555 DatabaseConfig dbConfig = new DatabaseConfig(); 556 dbConfig.setAllowCreate(true); 557 Database db = env.openDatabase(null, "foo", dbConfig); 558 559 DatabaseEntry key = new DatabaseEntry(new byte[1]); 560 DatabaseEntry data = new DatabaseEntry(new byte[fileSize]); 561 for (int i = 0; i <= 10; i += 1) { 562 db.put(null, key, data); 563 } 564 env.checkpoint(FORCE_CONFIG); 565 566 int nFiles = env.cleanLog(); 567 assertTrue(nFiles > 0); 568 569 db.close(); 570 571 /* 572 * To force the memory leak to be detected we have to close without a 573 * checkpoint. The checkpoint will finish processing all cleaned files 574 * and subtract them from the budget. But this should happen during 575 * close, even without a checkpoint. 576 */ 577 EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); 578 envImpl.close(false /*doCheckpoint*/); 579 } 580 581 /** 582 * Tests that the cleanLog cannot be called in a read-only environment. 583 * [#16368] 584 */ 585 @Test testCleanLogReadOnly()586 public void testCleanLogReadOnly() 587 throws Throwable { 588 589 /* Open read-write. */ 590 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 591 envConfig.setAllowCreate(true); 592 if (envMultiSubDir) { 593 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 594 DATA_DIRS + ""); 595 } 596 env = new Environment(envHome, envConfig); 597 env.close(); 598 env = null; 599 600 /* Open read-only. */ 601 envConfig.setAllowCreate(false); 602 envConfig.setReadOnly(true); 603 env = new Environment(envHome, envConfig); 604 605 /* Try cleanLog in a read-only env. */ 606 try { 607 env.cleanLog(); 608 fail(); 609 } catch (UnsupportedOperationException e) { 610 assertEquals 611 ("Log cleaning not allowed in a read-only or memory-only " + 612 "environment", e.getMessage()); 613 614 } 615 } 616 617 /** 618 * Tests that when a file being cleaned is deleted, we ignore the error and 619 * don't repeatedly try to clean it. This is happening when we mistakedly 620 * clean a file after it has been queued for deletion. The workaround is 621 * to catch LogFileNotFoundException in the cleaner and ignore the error. 622 * We're testing the workaround here by forcing cleaning of deleted files. 623 * [#15528] 624 */ 625 @Test testUnexpectedFileDeletion()626 public void testUnexpectedFileDeletion() 627 throws DatabaseException { 628 629 initEnv(true, false); 630 EnvironmentMutableConfig config = env.getMutableConfig(); 631 config.setConfigParam 632 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 633 config.setConfigParam 634 (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); 635 env.setMutableConfig(config); 636 637 final EnvironmentImpl envImpl = 638 DbInternal.getEnvironmentImpl(env); 639 final Cleaner cleaner = envImpl.getCleaner(); 640 final FileSelector fileSelector = cleaner.getFileSelector(); 641 642 Map<String, Set<String>> expectedMap = 643 new HashMap<String, Set<String>>(); 644 doLargePut(expectedMap, 1000, 1, true); 645 checkData(expectedMap); 646 647 final long file1 = 0; 648 final long file2 = 1; 649 650 for (int i = 0; i < 100; i += 1) { 651 modifyData(expectedMap, 1, true); 652 checkData(expectedMap); 653 fileSelector.injectFileForCleaning(new Long(file1)); 654 fileSelector.injectFileForCleaning(new Long(file2)); 655 assertTrue(fileSelector.getToBeCleanedFiles().contains(file1)); 656 assertTrue(fileSelector.getToBeCleanedFiles().contains(file2)); 657 while (env.cleanLog() > 0) {} 658 assertTrue(!fileSelector.getToBeCleanedFiles().contains(file1)); 659 assertTrue(!fileSelector.getToBeCleanedFiles().contains(file2)); 660 env.checkpoint(FORCE_CONFIG); 661 Map<Long,FileSummary> allFiles = envImpl.getUtilizationProfile(). 662 getFileSummaryMap(true /*includeTrackedFiles*/); 663 assertTrue(!allFiles.containsKey(file1)); 664 assertTrue(!allFiles.containsKey(file2)); 665 } 666 checkData(expectedMap); 667 668 closeEnv(); 669 } 670 671 /** 672 * Helper routine. Generates keys with random alpha values while data 673 * is numbered numerically. 674 */ doLargePut(Map<String, Set<String>> expectedMap, int nKeys, int nDupsPerKey, boolean commit)675 private void doLargePut(Map<String, Set<String>> expectedMap, 676 int nKeys, 677 int nDupsPerKey, 678 boolean commit) 679 throws DatabaseException { 680 681 Transaction txn = env.beginTransaction(null, null); 682 for (int i = 0; i < nKeys; i++) { 683 byte[] key = new byte[N_KEY_BYTES]; 684 TestUtils.generateRandomAlphaBytes(key); 685 String keyString = StringUtils.fromUTF8(key); 686 687 /* 688 * The data map is keyed by key value, and holds a hash 689 * map of all data values. 690 */ 691 Set<String> dataVals = new HashSet<String>(); 692 if (commit) { 693 expectedMap.put(keyString, dataVals); 694 } 695 for (int j = 0; j < nDupsPerKey; j++) { 696 String dataString = Integer.toString(j); 697 exampleDb.put(txn, 698 new StringDbt(keyString), 699 new StringDbt(dataString)); 700 dataVals.add(dataString); 701 } 702 } 703 if (commit) { 704 txn.commit(); 705 } else { 706 txn.abort(); 707 } 708 } 709 710 /** 711 * Increment each data value. 712 */ modifyData(Map<String, Set<String>> expectedMap, int increment, boolean commit)713 private void modifyData(Map<String, Set<String>> expectedMap, 714 int increment, 715 boolean commit) 716 throws DatabaseException { 717 718 Transaction txn = env.beginTransaction(null, null); 719 720 StringDbt foundKey = new StringDbt(); 721 StringDbt foundData = new StringDbt(); 722 723 Cursor cursor = exampleDb.openCursor(txn, null); 724 OperationStatus status = cursor.getFirst(foundKey, foundData, 725 LockMode.DEFAULT); 726 727 boolean toggle = true; 728 while (status == OperationStatus.SUCCESS) { 729 if (toggle) { 730 731 String foundKeyString = foundKey.getString(); 732 String foundDataString = foundData.getString(); 733 int newValue = Integer.parseInt(foundDataString) + increment; 734 String newDataString = Integer.toString(newValue); 735 736 /* If committing, adjust the expected map. */ 737 if (commit) { 738 739 Set<String> dataVals = expectedMap.get(foundKeyString); 740 if (dataVals == null) { 741 fail("Couldn't find " + 742 foundKeyString + "/" + foundDataString); 743 } else if (dataVals.contains(foundDataString)) { 744 dataVals.remove(foundDataString); 745 dataVals.add(newDataString); 746 } else { 747 fail("Couldn't find " + 748 foundKeyString + "/" + foundDataString); 749 } 750 } 751 752 assertEquals(OperationStatus.SUCCESS, 753 cursor.delete()); 754 assertEquals(OperationStatus.SUCCESS, 755 cursor.put(foundKey, 756 new StringDbt(newDataString))); 757 toggle = false; 758 } else { 759 toggle = true; 760 } 761 762 status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); 763 } 764 765 cursor.close(); 766 if (commit) { 767 txn.commit(); 768 } else { 769 txn.abort(); 770 } 771 } 772 773 /** 774 * Delete data. 775 */ deleteData(Map<String, Set<String>> expectedMap, boolean everyOther, boolean commit)776 private void deleteData(Map<String, Set<String>> expectedMap, 777 boolean everyOther, 778 boolean commit) 779 throws DatabaseException { 780 781 Transaction txn = env.beginTransaction(null, null); 782 783 StringDbt foundKey = new StringDbt(); 784 StringDbt foundData = new StringDbt(); 785 786 Cursor cursor = exampleDb.openCursor(txn, null); 787 OperationStatus status = cursor.getFirst(foundKey, foundData, 788 LockMode.DEFAULT); 789 790 boolean toggle = true; 791 while (status == OperationStatus.SUCCESS) { 792 if (toggle) { 793 794 String foundKeyString = foundKey.getString(); 795 String foundDataString = foundData.getString(); 796 797 /* If committing, adjust the expected map */ 798 if (commit) { 799 800 Set dataVals = expectedMap.get(foundKeyString); 801 if (dataVals == null) { 802 fail("Couldn't find " + 803 foundKeyString + "/" + foundDataString); 804 } else if (dataVals.contains(foundDataString)) { 805 dataVals.remove(foundDataString); 806 if (dataVals.size() == 0) { 807 expectedMap.remove(foundKeyString); 808 } 809 } else { 810 fail("Couldn't find " + 811 foundKeyString + "/" + foundDataString); 812 } 813 } 814 815 assertEquals(OperationStatus.SUCCESS, cursor.delete()); 816 } 817 818 if (everyOther) { 819 toggle = toggle? false: true; 820 } 821 822 status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); 823 } 824 825 cursor.close(); 826 if (commit) { 827 txn.commit(); 828 } else { 829 txn.abort(); 830 } 831 } 832 833 /** 834 * Check what's in the database against what's in the expected map. 835 */ checkData(Map<String, Set<String>> expectedMap)836 private void checkData(Map<String, Set<String>> expectedMap) 837 throws DatabaseException { 838 839 StringDbt foundKey = new StringDbt(); 840 StringDbt foundData = new StringDbt(); 841 Cursor cursor = exampleDb.openCursor(null, null); 842 OperationStatus status = cursor.getFirst(foundKey, foundData, 843 LockMode.DEFAULT); 844 845 /* 846 * Make a copy of expectedMap so that we're free to delete out 847 * of the set of expected results when we verify. 848 * Also make a set of counts for each key value, to test count. 849 */ 850 851 Map<String, Set<String>> checkMap = new HashMap<String, Set<String>>(); 852 Map<String, Integer>countMap = new HashMap<String, Integer>(); 853 Iterator<Map.Entry<String, Set<String>>> iter = 854 expectedMap.entrySet().iterator(); 855 while (iter.hasNext()) { 856 Map.Entry<String, Set<String>> entry = iter.next(); 857 Set<String> copySet = new HashSet<String>(); 858 copySet.addAll(entry.getValue()); 859 checkMap.put(entry.getKey(), copySet); 860 countMap.put(entry.getKey(), new Integer(copySet.size())); 861 } 862 863 while (status == OperationStatus.SUCCESS) { 864 String foundKeyString = foundKey.getString(); 865 String foundDataString = foundData.getString(); 866 867 /* Check that the current value is in the check values map */ 868 Set dataVals = checkMap.get(foundKeyString); 869 if (dataVals == null) { 870 fail("Couldn't find " + 871 foundKeyString + "/" + foundDataString); 872 } else if (dataVals.contains(foundDataString)) { 873 dataVals.remove(foundDataString); 874 if (dataVals.size() == 0) { 875 checkMap.remove(foundKeyString); 876 } 877 } else { 878 fail("Couldn't find " + 879 foundKeyString + "/" + 880 foundDataString + 881 " in data vals"); 882 } 883 884 /* Check that the count is right. */ 885 int count = cursor.count(); 886 assertEquals(countMap.get(foundKeyString).intValue(), 887 count); 888 889 status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); 890 } 891 892 cursor.close(); 893 894 if (checkMap.size() != 0) { 895 dumpExpected(checkMap); 896 fail("checkMapSize = " + checkMap.size()); 897 898 } 899 assertEquals(0, checkMap.size()); 900 } 901 dumpExpected(Map expectedMap)902 private void dumpExpected(Map expectedMap) { 903 Iterator iter = expectedMap.entrySet().iterator(); 904 while (iter.hasNext()) { 905 Map.Entry entry = (Map.Entry) iter.next(); 906 String key = (String) entry.getKey(); 907 Iterator dataIter = ((Set) entry.getValue()).iterator(); 908 while (dataIter.hasNext()) { 909 System.out.println("key=" + key + 910 " data=" + (String) dataIter.next()); 911 } 912 } 913 } 914 915 /** 916 * Tests that cleaner mutable configuration parameters can be changed and 917 * that the changes actually take effect. 918 */ 919 @Test testMutableConfig()920 public void testMutableConfig() 921 throws DatabaseException { 922 923 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 924 envConfig.setAllowCreate(true); 925 if (envMultiSubDir) { 926 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 927 DATA_DIRS + ""); 928 } 929 env = new Environment(envHome, envConfig); 930 envConfig = env.getConfig(); 931 EnvironmentImpl envImpl = 932 DbInternal.getEnvironmentImpl(env); 933 Cleaner cleaner = envImpl.getCleaner(); 934 MemoryBudget budget = envImpl.getMemoryBudget(); 935 String name; 936 String val; 937 938 /* je.cleaner.minUtilization */ 939 name = EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(); 940 setParam(name, "33"); 941 assertEquals(33, cleaner.minUtilization); 942 943 /* je.cleaner.minFileUtilization */ 944 name = EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName(); 945 setParam(name, "7"); 946 assertEquals(7, cleaner.minFileUtilization); 947 948 /* je.cleaner.bytesInterval */ 949 name = EnvironmentParams.CLEANER_BYTES_INTERVAL.getName(); 950 setParam(name, "1000"); 951 assertEquals(1000, cleaner.cleanerBytesInterval); 952 953 /* je.cleaner.deadlockRetry */ 954 name = EnvironmentParams.CLEANER_DEADLOCK_RETRY.getName(); 955 setParam(name, "7"); 956 assertEquals(7, cleaner.nDeadlockRetries); 957 958 /* je.cleaner.lockTimeout */ 959 name = EnvironmentParams.CLEANER_LOCK_TIMEOUT.getName(); 960 setParam(name, "7000"); 961 assertEquals(7, cleaner.lockTimeout); 962 963 /* je.cleaner.expunge */ 964 name = EnvironmentParams.CLEANER_REMOVE.getName(); 965 val = "false".equals(envConfig.getConfigParam(name)) ? 966 "true" : "false"; 967 setParam(name, val); 968 assertEquals(val.equals("true"), cleaner.expunge); 969 970 /* je.cleaner.minAge */ 971 name = EnvironmentParams.CLEANER_MIN_AGE.getName(); 972 setParam(name, "7"); 973 assertEquals(7, cleaner.minAge); 974 975 /* je.cleaner.maxBatchFiles */ 976 name = EnvironmentParams.CLEANER_MAX_BATCH_FILES.getName(); 977 setParam(name, "7"); 978 assertEquals(7, cleaner.maxBatchFiles); 979 980 /* je.cleaner.readSize */ 981 name = EnvironmentParams.CLEANER_READ_SIZE.getName(); 982 setParam(name, "7777"); 983 assertEquals(7777, cleaner.readBufferSize); 984 985 /* je.cleaner.detailMaxMemoryPercentage */ 986 name = EnvironmentParams.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE. 987 getName(); 988 setParam(name, "7"); 989 assertEquals((budget.getMaxMemory() * 7) / 100, 990 budget.getTrackerBudget()); 991 992 /* je.cleaner.threads */ 993 name = EnvironmentParams.CLEANER_THREADS.getName(); 994 setParam(name, "7"); 995 assertEquals((envImpl.isNoLocking() ? 0 : 7), 996 countCleanerThreads()); 997 998 env.close(); 999 env = null; 1000 } 1001 1002 /** 1003 * Sets a mutable config param, checking that the given value is not 1004 * already set and that it actually changes. 1005 */ setParam(String name, String val)1006 private void setParam(String name, String val) 1007 throws DatabaseException { 1008 1009 EnvironmentMutableConfig config = env.getMutableConfig(); 1010 String myVal = config.getConfigParam(name); 1011 assertTrue(!val.equals(myVal)); 1012 1013 config.setConfigParam(name, val); 1014 env.setMutableConfig(config); 1015 1016 config = env.getMutableConfig(); 1017 myVal = config.getConfigParam(name); 1018 assertTrue(val.equals(myVal)); 1019 } 1020 1021 /** 1022 * Count the number of threads with the name "Cleaner#". 1023 */ countCleanerThreads()1024 private int countCleanerThreads() { 1025 1026 Thread[] threads = new Thread[Thread.activeCount()]; 1027 Thread.enumerate(threads); 1028 1029 int count = 0; 1030 for (int i = 0; i < threads.length; i += 1) { 1031 if (threads[i] != null && 1032 threads[i].getName().startsWith("Cleaner")) { 1033 count += 1; 1034 } 1035 } 1036 1037 return count; 1038 } 1039 1040 /** 1041 * Checks that the memory budget is updated properly by the 1042 * UtilizationTracker. Prior to a bug fix [#15505] amounts were added to 1043 * the budget but not subtracted when two TrackedFileSummary objects were 1044 * merged. Merging occurs when a local tracker is added to the global 1045 * tracker. Local trackers are used during recovery, checkpoints, lazy 1046 * compression, and reverse splits. 1047 */ 1048 @Test testTrackerMemoryBudget()1049 public void testTrackerMemoryBudget() 1050 throws DatabaseException { 1051 1052 /* Open environment. */ 1053 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 1054 envConfig.setAllowCreate(true); 1055 envConfig.setTransactional(true); 1056 envConfig.setConfigParam 1057 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 1058 envConfig.setConfigParam 1059 (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); 1060 if (envMultiSubDir) { 1061 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 1062 DATA_DIRS + ""); 1063 } 1064 env = new Environment(envHome, envConfig); 1065 1066 /* Open database. */ 1067 DatabaseConfig dbConfig = new DatabaseConfig(); 1068 dbConfig.setTransactional(true); 1069 dbConfig.setAllowCreate(true); 1070 exampleDb = env.openDatabase(null, "foo", dbConfig); 1071 1072 /* Insert data. */ 1073 DatabaseEntry key = new DatabaseEntry(); 1074 DatabaseEntry data = new DatabaseEntry(); 1075 for (int i = 1; i <= 200; i += 1) { 1076 IntegerBinding.intToEntry(i, key); 1077 IntegerBinding.intToEntry(i, data); 1078 exampleDb.put(null, key, data); 1079 } 1080 1081 /* Sav the admin budget baseline. */ 1082 flushTrackedFiles(); 1083 long admin = env.getStats(null).getAdminBytes(); 1084 1085 /* 1086 * Nothing becomes obsolete when inserting and no INs are logged, so 1087 * the budget does not increase. 1088 */ 1089 IntegerBinding.intToEntry(201, key); 1090 exampleDb.put(null, key, data); 1091 assertEquals(admin, env.getStats(null).getAdminBytes()); 1092 flushTrackedFiles(); 1093 assertEquals(admin, env.getStats(null).getAdminBytes()); 1094 1095 /* 1096 * Update a record and expect the budget to increase because the old 1097 * LN becomes obsolete. 1098 */ 1099 exampleDb.put(null, key, data); 1100 assertTrue(admin < env.getStats(null).getAdminBytes()); 1101 flushTrackedFiles(); 1102 assertEquals(admin, env.getStats(null).getAdminBytes()); 1103 1104 /* 1105 * Delete all records and expect the budget to increase because LNs 1106 * become obsolete. 1107 */ 1108 for (int i = 1; i <= 201; i += 1) { 1109 IntegerBinding.intToEntry(i, key); 1110 exampleDb.delete(null, key); 1111 } 1112 assertTrue(admin < env.getStats(null).getAdminBytes()); 1113 flushTrackedFiles(); 1114 assertEquals(admin, env.getStats(null).getAdminBytes()); 1115 1116 /* 1117 * Compress and expect no change to the budget. Prior to the fix for 1118 * [#15505] the assertion below failed because the baseline admin 1119 * budget was not restored. 1120 */ 1121 env.compress(); 1122 flushTrackedFiles(); 1123 assertEquals(admin, env.getStats(null).getAdminBytes()); 1124 1125 closeEnv(); 1126 } 1127 1128 /** 1129 * Flushes all tracked files to subtract tracked info from the admin memory 1130 * budget. 1131 */ 1132 private void flushTrackedFiles() 1133 throws DatabaseException { 1134 1135 EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); 1136 UtilizationTracker tracker = envImpl.getUtilizationTracker(); 1137 UtilizationProfile profile = envImpl.getUtilizationProfile(); 1138 1139 for (TrackedFileSummary summary : tracker.getTrackedFiles()) { 1140 profile.flushFileSummary(summary); 1141 } 1142 } 1143 1144 /** 1145 * Tests that memory is budgeted correctly for FileSummaryLNs that are 1146 * inserted and deleted after calling setTrackedSummary. The size of the 1147 * FileSummaryLN changes during logging when setTrackedSummary is called, 1148 * and this is accounted for specially in CursorImpl.finishInsert. [#15831] 1149 */ 1150 @Test 1151 public void testFileSummaryLNMemoryUsage() 1152 throws DatabaseException { 1153 1154 /* Open environment, prevent concurrent access by daemons. */ 1155 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 1156 envConfig.setAllowCreate(true); 1157 envConfig.setConfigParam 1158 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 1159 envConfig.setConfigParam 1160 (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); 1161 envConfig.setConfigParam 1162 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 1163 envConfig.setConfigParam 1164 (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); 1165 if (envMultiSubDir) { 1166 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 1167 DATA_DIRS + ""); 1168 } 1169 env = new Environment(envHome, envConfig); 1170 1171 EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); 1172 UtilizationProfile up = envImpl.getUtilizationProfile(); 1173 DatabaseImpl fileSummaryDb = up.getFileSummaryDb(); 1174 MemoryBudget memBudget = envImpl.getMemoryBudget(); 1175 1176 BasicLocker locker = null; 1177 CursorImpl cursor = null; 1178 try { 1179 locker = BasicLocker.createBasicLocker(envImpl); 1180 cursor = new CursorImpl(fileSummaryDb, locker); 1181 1182 /* Get parent BIN. There should be only one BIN in the tree. */ 1183 IN root = 1184 fileSummaryDb.getTree().getRootIN(CacheMode.DEFAULT); 1185 root.releaseLatch(); 1186 assertEquals(1, root.getNEntries()); 1187 BIN parent = (BIN) root.getTarget(0); 1188 1189 /* Use an artificial FileSummaryLN with a tracked summary. */ 1190 FileSummaryLN ln = new FileSummaryLN(new FileSummary()); 1191 TrackedFileSummary tfs = new TrackedFileSummary 1192 (envImpl.getUtilizationTracker(), 0 /*fileNum*/, 1193 true /*trackDetail*/); 1194 tfs.trackObsolete(0, true /*checkDupOffsets*/); 1195 byte[] keyBytes = 1196 FileSummaryLN.makeFullKey(0 /*fileNum*/, 123 /*sequence*/); 1197 int keySize = MemoryBudget.byteArraySize(keyBytes.length); 1198 1199 /* Perform insert after calling setTrackedSummary. */ 1200 long oldSize = ln.getMemorySizeIncludedByParent(); 1201 long oldParentSize = getAdjustedMemSize(parent, memBudget); 1202 ln.setTrackedSummary(tfs); 1203 OperationStatus status = cursor.insertRecord( 1204 keyBytes, ln, false, fileSummaryDb.getRepContext()); 1205 assertSame(status, OperationStatus.SUCCESS); 1206 1207 cursor.latchBIN(); 1208 assertTrue(cursor.isOnBIN(parent)); 1209 ln.addExtraMarshaledMemorySize(parent); 1210 cursor.releaseBIN(); 1211 1212 long newSize = ln.getMemorySizeIncludedByParent(); 1213 long newParentSize = getAdjustedMemSize(parent, memBudget); 1214 1215 /* The size of the LN increases during logging. */ 1216 assertEquals(newSize, 1217 oldSize + 1218 ln.getObsoleteOffsets().getExtraMemorySize()); 1219 1220 /* The correct size is accounted for by the parent BIN. */ 1221 assertEquals(newSize + keySize, newParentSize - oldParentSize); 1222 1223 /* Correct size is subtracted during eviction. */ 1224 oldParentSize = newParentSize; 1225 cursor.evict(); 1226 newParentSize = getAdjustedMemSize(parent, memBudget); 1227 assertEquals(oldParentSize - newSize, newParentSize); 1228 1229 /* Fetch a fresh FileSummaryLN before deleting it. */ 1230 oldParentSize = newParentSize; 1231 ln = (FileSummaryLN) cursor.lockAndGetCurrentLN(LockType.READ); 1232 newSize = ln.getMemorySizeIncludedByParent(); 1233 newParentSize = getAdjustedMemSize(parent, memBudget); 1234 assertEquals(newSize, newParentSize - oldParentSize); 1235 1236 /* Perform delete. */ 1237 oldSize = newSize; 1238 oldParentSize = newParentSize; 1239 status = cursor.deleteCurrentRecord(fileSummaryDb.getRepContext()); 1240 assertSame(status, OperationStatus.SUCCESS); 1241 newSize = ln.getMemorySizeIncludedByParent(); 1242 newParentSize = getAdjustedMemSize(parent, memBudget); 1243 1244 /* Size changes during delete also, which performs eviction. */ 1245 assertTrue(newSize < oldSize); 1246 assertTrue(oldSize - newSize > 1247 ln.getObsoleteOffsets().getExtraMemorySize()); 1248 assertEquals(0 - oldSize, newParentSize - oldParentSize); 1249 } finally { 1250 if (cursor != null) { 1251 cursor.releaseBIN(); 1252 cursor.close(); 1253 } 1254 if (locker != null) { 1255 locker.operationEnd(); 1256 } 1257 } 1258 1259 TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/); 1260 1261 /* Insert again, this time using the UtilizationProfile method. */ 1262 FileSummaryLN ln = new FileSummaryLN(new FileSummary()); 1263 TrackedFileSummary tfs = new TrackedFileSummary 1264 (envImpl.getUtilizationTracker(), 0 /*fileNum*/, 1265 true /*trackDetail*/); 1266 tfs.trackObsolete(0, true/*checkDupOffsets*/); 1267 ln.setTrackedSummary(tfs); 1268 assertTrue(up.insertFileSummary(ln, 0 /*fileNum*/, 123 /*sequence*/)); 1269 TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/); 1270 1271 closeEnv(); 1272 } 1273 1274 /** 1275 * Checks that log utilization is updated incrementally during the 1276 * checkpoint rather than only when the highest dirty level in the Btree is 1277 * flushed. This feature (incremental update) was added so that log 1278 * cleaning is not delayed until the end of the checkpoint. [#16037] 1279 */ 1280 @Test 1281 public void testUtilizationDuringCheckpoint() 1282 throws DatabaseException { 1283 1284 /* 1285 * Use Database.sync of a deferred-write database to perform this test 1286 * rather than a checkpoint, because the hook is called at a 1287 * predictable place when only a single database is flushed. The 1288 * implementation of Checkpointer.flushDirtyNodes is shared for 1289 * Database.sync and checkpoint, so this tests both cases. 1290 */ 1291 final int FANOUT = 25; 1292 final int N_KEYS = FANOUT * FANOUT * FANOUT; 1293 1294 /* Open environment. */ 1295 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 1296 envConfig.setAllowCreate(true); 1297 envConfig.setConfigParam 1298 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 1299 if (envMultiSubDir) { 1300 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 1301 DATA_DIRS + ""); 1302 } 1303 env = new Environment(envHome, envConfig); 1304 1305 /* Open ordinary non-transactional database. */ 1306 DatabaseConfig dbConfig = new DatabaseConfig(); 1307 dbConfig.setAllowCreate(true); 1308 dbConfig.setNodeMaxEntries(FANOUT); 1309 exampleDb = env.openDatabase(null, "foo", dbConfig); 1310 1311 /* Clear stats. */ 1312 StatsConfig statsConfig = new StatsConfig(); 1313 statsConfig.setClear(true); 1314 env.getStats(statsConfig); 1315 1316 /* Write to database to create a 3 level Btree. */ 1317 DatabaseEntry keyEntry = new DatabaseEntry(); 1318 DatabaseEntry dataEntry = new DatabaseEntry(new byte[0]); 1319 for (int i = 0; i < N_KEYS; i += 1) { 1320 LongBinding.longToEntry(i, keyEntry); 1321 assertSame(OperationStatus.SUCCESS, 1322 exampleDb.put(null, keyEntry, dataEntry)); 1323 EnvironmentStats stats = env.getStats(statsConfig); 1324 if (stats.getNEvictPasses() > 0) { 1325 break; 1326 } 1327 } 1328 1329 /* 1330 * Sync and write an LN in each BIN to create a bunch of dirty INs 1331 * that, when flushed again, will cause the prior versions to be 1332 * obsolete. 1333 */ 1334 env.sync(); 1335 for (int i = 0; i < N_KEYS; i += FANOUT) { 1336 LongBinding.longToEntry(i, keyEntry); 1337 assertSame(OperationStatus.SUCCESS, 1338 exampleDb.put(null, keyEntry, dataEntry)); 1339 } 1340 1341 /* 1342 * Close and re-open as a deferred-write DB so that we can call sync. 1343 * The INs will remain dirty. 1344 */ 1345 exampleDb.close(); 1346 dbConfig = new DatabaseConfig(); 1347 dbConfig.setDeferredWrite(true); 1348 exampleDb = env.openDatabase(null, "foo", dbConfig); 1349 1350 /* 1351 * The test hook is called just before writing the highest dirty level 1352 * in the Btree. At that point, utilization should be reduced if the 1353 * incremental utilization update feature is working properly. Before 1354 * adding this feature, utilization was not changed at this point. 1355 */ 1356 final int oldUtilization = getUtilization(); 1357 final StringBuilder hookCalledFlag = new StringBuilder(); 1358 1359 Checkpointer.setMaxFlushLevelHook(new TestHook() { 1360 public void doHook() { 1361 hookCalledFlag.append(1); 1362 final int newUtilization; 1363 newUtilization = getUtilization(); 1364 String msg = "oldUtilization=" + oldUtilization + 1365 " newUtilization=" + newUtilization; 1366 assertTrue(msg, oldUtilization - newUtilization >= 5); 1367 /* Don't call the test hook repeatedly. */ 1368 Checkpointer.setMaxFlushLevelHook(null); 1369 } 1370 public Object getHookValue() { 1371 throw new UnsupportedOperationException(); 1372 } 1373 public void doIOHook() { 1374 throw new UnsupportedOperationException(); 1375 } 1376 public void hookSetup() { 1377 throw new UnsupportedOperationException(); 1378 } 1379 public void doHook(Object obj) { 1380 throw new UnsupportedOperationException(); 1381 } 1382 }); 1383 exampleDb.sync(); 1384 assertTrue(hookCalledFlag.length() > 0); 1385 1386 /* While we're here, do a quick check of getLastKnownUtilization. */ 1387 final int lastKnownUtilization = 1388 env.getStats(null).getLastKnownUtilization(); 1389 assertTrue(lastKnownUtilization > 0); 1390 1391 closeEnv(); 1392 } 1393 1394 private int getUtilization() { 1395 EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); 1396 Map<Long,FileSummary> map = 1397 envImpl.getUtilizationProfile().getFileSummaryMap(true); 1398 FileSummary totals = new FileSummary(); 1399 for (FileSummary summary : map.values()) { 1400 totals.add(summary); 1401 } 1402 return FileSummary.utilization(totals.getObsoleteSize(), 1403 totals.totalSize); 1404 } 1405 1406 /** 1407 * Returns the memory size taken by the given IN, not including the target 1408 * rep, which changes during eviction. 1409 */ 1410 private long getAdjustedMemSize(IN in, MemoryBudget memBudget) { 1411 return getMemSize(in, memBudget) - 1412 in.getTargets().calculateMemorySize(); 1413 } 1414 1415 /** 1416 * Returns the memory size taken by the given IN and the tree memory usage. 1417 */ 1418 private long getMemSize(IN in, MemoryBudget memBudget) { 1419 return memBudget.getTreeMemoryUsage() + 1420 in.getInMemorySize() - 1421 in.getBudgetedMemorySize(); 1422 } 1423 1424 /** 1425 * Tests that dirtiness is logged upwards during a checkpoint, even if a 1426 * node is evicted and refetched after being added to the checkpointer's 1427 * dirty map, and before that entry in the dirty map is processed by the 1428 * checkpointer. [#16523] 1429 * 1430 * Root INa 1431 * / \ 1432 * INb ... 1433 * / 1434 * INc 1435 * / 1436 * BINd 1437 * 1438 * The scenario that causes the bug is: 1439 * 1440 * 1) Prior to the final checkpoint, the cleaner processes a log file 1441 * containing BINd. The cleaner marks BINd dirty so that it will be 1442 * flushed prior to the end of the next checkpoint, at which point the file 1443 * containing BINd will be deleted. The cleaner also calls 1444 * setProhibitNextDelta on BINd to ensure that a full version will be 1445 * logged. 1446 * 1447 * 2) At checkpoint start, BINd is added to the checkpoiner's dirty map. 1448 * It so happens that INa is also dirty, perhaps as the result of a split, 1449 * and added to the dirty map. The checkpointer's max flush level is 4. 1450 * 1451 * 3) The evictor flushes BINd and then its parent INc. Both are logged 1452 * provisionally, since their level is less than 4, the checkpointer's max 1453 * flush level. INb, the parent of INc, is dirty. 1454 * 1455 * 4) INc, along with BINd, is loaded back into the Btree as the result of 1456 * reading an LN in BINd. INc and BINd are both non-dirty. INb, the 1457 * parent of INc, is still dirty. 1458 * 1459 * 5) The checkpointer processes its reference to BINd in the dirty map. 1460 * It finds that BINd is not dirty, so does not need to be logged. It 1461 * attempts to add the parent, INc, to the dirty map in order to propogate 1462 * changes upward. However, becaue INc is not dirty, it is not added to 1463 * the dirty map -- this was the bug, it should be added even if not dirty. 1464 * So as the result of this step, the checkpointer does no logging and does 1465 * not add anything to the dirty map. 1466 * 1467 * 6) The checkpointer logs INa (it was dirty at the start of the 1468 * checkpoint) and the checkpoint finishes. It deletes the cleaned log 1469 * file that contains the original version of BINd. 1470 * 1471 * The key thing is that INb is now dirty and was not logged. It should 1472 * have been logged as the result of being an ancestor of BINd, which was 1473 * in the dirty map. Its parent INa was logged, but does not refer to the 1474 * latest version of INb/INc/BINd. 1475 * 1476 * 7) Now we recover. INc and BINd, which were evicted during step (3), 1477 * are not replayed because they are provisional -- they are lost. When a 1478 * search for an LN in BINd is performed, we traverse down to the old 1479 * version of BINd, which causes LogFileNotFound. 1480 * 1481 * The fix is to add INc to the dirty map at step (5), even though it is 1482 * not dirty. When the reference to INc in the dirty map is processed we 1483 * will not log INc, but we will add its parent INb to the dirty map. Then 1484 * when the reference to INb is processed, it will be logged because it is 1485 * dirty. Then INa is logged and refers to the latest version of 1486 * INb/INc/BINd. 1487 * 1488 * This problem could only occur with a Btree of depth 4 or greater. 1489 */ 1490 @Test 1491 public void testEvictionDuringCheckpoint() 1492 throws DatabaseException { 1493 1494 /* Use small fanout to create a deep tree. */ 1495 final int FANOUT = 6; 1496 final int N_KEYS = FANOUT * FANOUT * FANOUT; 1497 1498 /* Open environment without interference of daemon threads. */ 1499 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 1500 envConfig.setAllowCreate(true); 1501 envConfig.setConfigParam 1502 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 1503 envConfig.setConfigParam 1504 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 1505 envConfig.setConfigParam 1506 (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); 1507 envConfig.setConfigParam 1508 (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); 1509 if (envMultiSubDir) { 1510 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 1511 DATA_DIRS + ""); 1512 } 1513 env = new Environment(envHome, envConfig); 1514 final EnvironmentImpl envImpl = 1515 DbInternal.getEnvironmentImpl(env); 1516 1517 /* Open database. */ 1518 DatabaseConfig dbConfig = new DatabaseConfig(); 1519 dbConfig.setAllowCreate(true); 1520 dbConfig.setNodeMaxEntries(FANOUT); 1521 exampleDb = env.openDatabase(null, "foo", dbConfig); 1522 DatabaseImpl dbImpl = DbInternal.getDatabaseImpl(exampleDb); 1523 1524 /* Write to database to create a 4 level Btree. */ 1525 final DatabaseEntry keyEntry = new DatabaseEntry(); 1526 final DatabaseEntry dataEntry = new DatabaseEntry(new byte[0]); 1527 int nRecords; 1528 for (nRecords = 1;; nRecords += 1) { 1529 LongBinding.longToEntry(nRecords, keyEntry); 1530 assertSame(OperationStatus.SUCCESS, 1531 exampleDb.put(null, keyEntry, dataEntry)); 1532 if (nRecords % 10 == 0) { 1533 int level = envImpl.getDbTree().getHighestLevel(dbImpl); 1534 if ((level & IN.LEVEL_MASK) >= 4) { 1535 break; 1536 } 1537 } 1538 } 1539 1540 /* Flush all dirty nodes. */ env.sync()1541 env.sync(); 1542 1543 /* Get BINd and its ancestors. Mark BINd and INa dirty. */ 1544 final IN nodeINa = dbImpl.getTree().getRootIN(CacheMode.DEFAULT); nodeINa.releaseLatch()1545 nodeINa.releaseLatch(); 1546 final IN nodeINb = (IN) nodeINa.getTarget(0); 1547 final IN nodeINc = (IN) nodeINb.getTarget(0); 1548 final BIN nodeBINd = (BIN) nodeINc.getTarget(0); 1549 assertNotNull(nodeBINd); 1550 nodeINa.setDirty(true); 1551 nodeBINd.setDirty(true); 1552 1553 /* 1554 * The test hook is called after creating the checkpoint dirty map and 1555 * just before flushing dirty nodes. 1556 */ 1557 final StringBuilder hookCalledFlag = new StringBuilder(); 1558 Checkpointer.setBeforeFlushHook(new TestHook() { public void doHook() { hookCalledFlag.append(1); Checkpointer.setBeforeFlushHook(null); try { simulateEviction(env, envImpl, nodeBINd, nodeINc); simulateEviction(env, envImpl, nodeINc, nodeINb); LongBinding.longToEntry(1, keyEntry); assertSame(OperationStatus.SUCCESS, exampleDb.get(null, keyEntry, dataEntry, null)); } catch (DatabaseException e) { throw new RuntimeException(e); } } public Object getHookValue() { throw new UnsupportedOperationException(); } public void doIOHook() { throw new UnsupportedOperationException(); } public void hookSetup() { throw new UnsupportedOperationException(); } public void doHook(Object obj) { throw new UnsupportedOperationException(); } })1559 Checkpointer.setBeforeFlushHook(new TestHook() { 1560 public void doHook() { 1561 hookCalledFlag.append(1); 1562 /* Don't call the test hook repeatedly. */ 1563 Checkpointer.setBeforeFlushHook(null); 1564 try { 1565 /* Evict BINd and INc. */ 1566 simulateEviction(env, envImpl, nodeBINd, nodeINc); 1567 simulateEviction(env, envImpl, nodeINc, nodeINb); 1568 1569 /* 1570 * Force BINd and INc to be loaded into cache by fetching 1571 * the left-most record. 1572 * 1573 * Note that nodeINc and nodeBINd are different instances 1574 * and are no longer in the Btree but we don't change these 1575 * variables because they are final. They should not be 1576 * used past this point. 1577 */ 1578 LongBinding.longToEntry(1, keyEntry); 1579 assertSame(OperationStatus.SUCCESS, 1580 exampleDb.get(null, keyEntry, dataEntry, null)); 1581 } catch (DatabaseException e) { 1582 throw new RuntimeException(e); 1583 } 1584 } 1585 public Object getHookValue() { 1586 throw new UnsupportedOperationException(); 1587 } 1588 public void doIOHook() { 1589 throw new UnsupportedOperationException(); 1590 } 1591 public void hookSetup() { 1592 throw new UnsupportedOperationException(); 1593 } 1594 public void doHook(Object obj) { 1595 throw new UnsupportedOperationException(); 1596 } 1597 }); 1598 env.checkpoint(FORCE_CONFIG); hookCalledFlag.length()1599 assertTrue(hookCalledFlag.length() > 0); 1600 assertTrue(!nodeINa.getDirty()); 1601 assertTrue(!nodeINb.getDirty()); /* This failed before the bug fix. */ 1602 closeEnv()1603 closeEnv(); 1604 } 1605 1606 /** 1607 * Simulate eviction by logging this node, updating the LSN in its 1608 * parent slot, setting the Node to null in the parent slot, and 1609 * removing the IN from the INList. Logging is provisional. The 1610 * parent is dirtied. May not be called unless this node is dirty and 1611 * none of its children are dirty. Children may be resident. 1612 */ 1613 private void simulateEviction(Environment env, 1614 EnvironmentImpl envImpl, 1615 IN nodeToEvict, 1616 IN parentNode) 1617 throws DatabaseException { 1618 1619 assertTrue("not dirty " + nodeToEvict.getNodeId(), 1620 nodeToEvict.getDirty()); 1621 assertTrue(!hasDirtyChildren(nodeToEvict)); 1622 1623 parentNode.latch(); 1624 1625 long lsn = TestUtils.logIN( 1626 env, nodeToEvict, false /*allowDeltas*/, true /*provisional*/, 1627 parentNode); 1628 1629 int index; 1630 for (index = 0;; index += 1) { 1631 if (index >= parentNode.getNEntries()) { 1632 fail(); 1633 } 1634 if (parentNode.getTarget(index) == nodeToEvict) { 1635 break; 1636 } 1637 } 1638 1639 nodeToEvict.latch(); 1640 1641 envImpl.getEvictor().remove(nodeToEvict); 1642 envImpl.getInMemoryINs().remove(nodeToEvict); 1643 parentNode.updateNode(index, null /*node*/, lsn, 0 /*lastLoggedSize*/); 1644 1645 nodeToEvict.releaseLatch(); 1646 parentNode.releaseLatch(); 1647 } 1648 1649 private boolean hasDirtyChildren(IN parent) { 1650 for (int i = 0; i < parent.getNEntries(); i += 1) { 1651 Node child = parent.getTarget(i); 1652 if (child instanceof IN) { 1653 IN in = (IN) child; 1654 if (in.getDirty()) { 1655 return true; 1656 } 1657 } 1658 } 1659 return false; 1660 } 1661 1662 @Test 1663 public void testMultiCleaningBug() 1664 throws DatabaseException { 1665 1666 initEnv(true, false); 1667 1668 final EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); 1669 final Cleaner cleaner = envImpl.getCleaner(); 1670 1671 Map<String, Set<String>> expectedMap = 1672 new HashMap<String, Set<String>>(); 1673 doLargePut(expectedMap, 1000, 1, true); 1674 modifyData(expectedMap, 1, true); 1675 checkData(expectedMap); 1676 1677 final TestHook hook = new TestHook() { 1678 public void doHook() { 1679 /* Signal that hook was called. */ 1680 if (synchronizer != 99) { 1681 synchronizer = 1; 1682 } 1683 /* Wait for signal to proceed with cleaning. */ 1684 while (synchronizer != 2 && 1685 synchronizer != 99 && 1686 !Thread.interrupted()) { 1687 Thread.yield(); 1688 } 1689 } 1690 public Object getHookValue() { 1691 throw new UnsupportedOperationException(); 1692 } 1693 public void doIOHook() throws IOException { 1694 throw new UnsupportedOperationException(); 1695 } 1696 public void hookSetup() { 1697 throw new UnsupportedOperationException(); 1698 } 1699 public void doHook(Object obj) { 1700 throw new UnsupportedOperationException(); 1701 } 1702 }; 1703 1704 junitThread = new JUnitThread("TestMultiCleaningBug") { 1705 public void testBody() 1706 throws DatabaseException { 1707 1708 try { 1709 while (synchronizer != 99) { 1710 /* Wait for initial state. */ 1711 while (synchronizer != 0 && 1712 synchronizer != 99 && 1713 !Thread.interrupted()) { 1714 Thread.yield(); 1715 } 1716 /* Clean with hook set, hook is called next. */ 1717 cleaner.setFileChosenHook(hook); 1718 env.cleanLog(); 1719 /* Signal that cleaning is done. */ 1720 if (synchronizer != 99) { 1721 synchronizer = 3; 1722 } 1723 } 1724 } catch (Throwable e) { 1725 e.printStackTrace(); 1726 } 1727 } 1728 }; 1729 1730 /* Kick off thread above. */ 1731 synchronizer = 0; 1732 junitThread.start(); 1733 1734 for (int i = 0; i < 100 && junitThread.isAlive(); i += 1) { 1735 /* Wait for hook to be called when a file is chosen. */ 1736 while (synchronizer != 1 && junitThread.isAlive()) { 1737 Thread.yield(); 1738 } 1739 /* Allow the thread to clean the chosen file. */ 1740 synchronizer = 2; 1741 /* But immediately clean here, which could select the same file. */ 1742 cleaner.setFileChosenHook(null); 1743 env.cleanLog(); 1744 /* Wait for both cleaner runs to finish. */ 1745 while (synchronizer != 3 && junitThread.isAlive()) { 1746 Thread.yield(); 1747 } 1748 /* Make more waste to be cleaned. */ 1749 modifyData(expectedMap, 1, true); 1750 synchronizer = 0; 1751 } 1752 1753 synchronizer = 99; 1754 1755 try { 1756 junitThread.finishTest(); 1757 junitThread = null; 1758 } catch (Throwable e) { 1759 e.printStackTrace(); 1760 fail(e.toString()); 1761 } 1762 1763 closeEnv(); 1764 } 1765 1766 /** 1767 * Ensures that LN migration is immediate. Lazy migration is no longer 1768 * used, even if configured. 1769 */ 1770 @SuppressWarnings("deprecation") 1771 public void testCleanerLazyMigrationConfig() 1772 throws DatabaseException { 1773 1774 /* Open environment without interference of daemon threads. */ 1775 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 1776 DbInternal.disableParameterValidation(envConfig); 1777 envConfig.setAllowCreate(true); 1778 envConfig.setTransactional(true); 1779 envConfig.setConfigParam 1780 (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); 1781 envConfig.setConfigParam 1782 (EnvironmentParams.LOG_FILE_MAX.getName(), 1783 Integer.toString(FILE_SIZE)); 1784 envConfig.setConfigParam 1785 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 1786 envConfig.setConfigParam 1787 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 1788 envConfig.setConfigParam 1789 (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); 1790 envConfig.setConfigParam 1791 (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); 1792 1793 /* Configure immediate migration, even though it's deprecated. */ 1794 envConfig.setConfigParam 1795 (EnvironmentConfig.CLEANER_LAZY_MIGRATION, "true"); 1796 1797 if (envMultiSubDir) { 1798 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 1799 DATA_DIRS + ""); 1800 } 1801 1802 env = new Environment(envHome, envConfig); 1803 final EnvironmentImpl envImpl = 1804 DbInternal.getEnvironmentImpl(env); 1805 1806 /* Open database. */ 1807 DatabaseConfig dbConfig = new DatabaseConfig(); 1808 dbConfig.setAllowCreate(true); 1809 dbConfig.setTransactional(true); 1810 exampleDb = env.openDatabase(null, "foo", dbConfig); 1811 1812 /* Clear stats. */ 1813 StatsConfig clearStats = new StatsConfig(); 1814 clearStats.setClear(true); 1815 env.getStats(clearStats); 1816 1817 /* Insert and update data. */ 1818 Map<String, Set<String>> expectedMap = 1819 new HashMap<String, Set<String>>(); 1820 doLargePut(expectedMap, 1000, 1, true); 1821 modifyData(expectedMap, 1, true); 1822 checkData(expectedMap); 1823 1824 /* Clean. */ 1825 while (true) { 1826 long files = env.cleanLog(); 1827 if (files == 0) { 1828 break; 1829 } 1830 } 1831 1832 /* There should be no checkpoint or eviction. */ 1833 EnvironmentStats stats = env.getStats(null); 1834 assertEquals(0, stats.getNEvictPasses()); 1835 assertEquals(0, stats.getNCheckpoints()); 1836 assertTrue(stats.getNCleanerRuns() > 0); 1837 1838 /* Clear stats. */ 1839 env.getStats(clearStats); 1840 1841 /* Flush all dirty nodes. */ 1842 env.sync(); 1843 1844 stats = env.getStats(null); 1845 assertEquals(0, stats.getNLNsMigrated()); 1846 1847 closeEnv(); 1848 } 1849 1850 /** 1851 * Checks that no fetch misses occur when deleting FileSummaryLNs. 1852 */ 1853 @Test 1854 public void testOptimizedFileSummaryLNDeletion() { 1855 1856 /* Open environment without interference of daemon threads. */ 1857 EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 1858 DbInternal.disableParameterValidation(envConfig); 1859 envConfig.setAllowCreate(true); 1860 envConfig.setTransactional(true); 1861 envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), 1862 Integer.toString(FILE_SIZE)); 1863 envConfig.setConfigParam 1864 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 1865 envConfig.setConfigParam 1866 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 1867 envConfig.setConfigParam 1868 (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); 1869 envConfig.setConfigParam 1870 (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); 1871 1872 if (envMultiSubDir) { 1873 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 1874 DATA_DIRS + ""); 1875 } 1876 1877 env = new Environment(envHome, envConfig); 1878 final EnvironmentImpl envImpl = 1879 DbInternal.getEnvironmentImpl(env); 1880 1881 /* Open database. */ 1882 DatabaseConfig dbConfig = new DatabaseConfig(); 1883 dbConfig.setAllowCreate(true); 1884 dbConfig.setTransactional(true); 1885 exampleDb = env.openDatabase(null, "foo", dbConfig); 1886 1887 /* Insert and update data. */ 1888 Map<String, Set<String>> expectedMap = 1889 new HashMap<String, Set<String>>(); 1890 doLargePut(expectedMap, 1000, 1, true); 1891 modifyData(expectedMap, 1, false); 1892 checkData(expectedMap); 1893 deleteData(expectedMap, false, true); 1894 checkData(expectedMap); 1895 1896 /* Clear stats. */ 1897 StatsConfig clearStats = new StatsConfig(); 1898 clearStats.setClear(true); 1899 env.getStats(clearStats); 1900 1901 /* Clean. */ 1902 while (true) { 1903 long files = env.cleanLog(); 1904 if (files == 0) { 1905 break; 1906 } 1907 } 1908 1909 /* There should be cleaning but no checkpoint or eviction. */ 1910 EnvironmentStats stats = env.getStats(clearStats); 1911 assertTrue(stats.getNCleanerRuns() > 0); 1912 assertEquals(0, stats.getNEvictPasses()); 1913 assertEquals(0, stats.getNCheckpoints()); 1914 1915 /* 1916 * Flush all dirty nodes, which should delete the cleaned log files and 1917 * their FileSummaryLNs and also update the related MapLNs. 1918 */ 1919 env.sync(); 1920 1921 /* 1922 * Before optimization of FileSummaryLN deletion, there were 16 cache 1923 * misses. Without the optimization the cache misses occur because 1924 * FileSummaryLNs are always evicted after reading or writing them, and 1925 * then were fetched before deleting them. Now that we can delete 1926 * FileSummaryLNs without fetching, there should be no misses. 1927 */ 1928 stats = env.getStats(clearStats); 1929 assertEquals(0, stats.getNLNsFetchMiss()); 1930 1931 closeEnv(); 1932 } 1933 1934 /** 1935 * Ensure that LN migration does not cause the representation of INs to 1936 * change when migration places an LN in the slot and then evicts it 1937 * afterwards. The representation should remain "no target" (empty), if 1938 * that was the BIN representation before the LN migration. Before the bug 1939 * fix [#21734] we neglected to call BIN.compactMemory after the eviction. 1940 * The fix is for the cleaner to call BIN.evictLN, rather then doing the 1941 * eviction separately. 1942 */ 1943 public void testCompactBINAfterMigrateLN() { 1944 1945 /* Open environment without interference of daemon threads. */ 1946 final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); 1947 DbInternal.disableParameterValidation(envConfig); 1948 envConfig.setAllowCreate(true); 1949 envConfig.setTransactional(true); 1950 envConfig.setConfigParam 1951 (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); 1952 envConfig.setConfigParam 1953 (EnvironmentParams.LOG_FILE_MAX.getName(), 1954 Integer.toString(FILE_SIZE)); 1955 envConfig.setConfigParam 1956 (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); 1957 envConfig.setConfigParam 1958 (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); 1959 envConfig.setConfigParam 1960 (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); 1961 envConfig.setConfigParam 1962 (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); 1963 1964 if (envMultiSubDir) { 1965 envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, 1966 DATA_DIRS + ""); 1967 } 1968 1969 env = new Environment(envHome, envConfig); 1970 final EnvironmentImpl envImpl = 1971 DbInternal.getEnvironmentImpl(env); 1972 1973 /* Open database with CacheMode.EVICT_LN. */ 1974 final DatabaseConfig dbConfig = new DatabaseConfig(); 1975 dbConfig.setAllowCreate(true); 1976 dbConfig.setTransactional(true); 1977 dbConfig.setCacheMode(CacheMode.EVICT_LN); 1978 exampleDb = env.openDatabase(null, "foo", dbConfig); 1979 1980 /* Clear stats. */ 1981 final StatsConfig clearStats = new StatsConfig(); 1982 clearStats.setClear(true); 1983 env.getStats(clearStats); 1984 1985 /* Insert and update data. */ 1986 final Map<String, Set<String>> expectedMap = 1987 new HashMap<String, Set<String>>(); 1988 doLargePut(expectedMap, 1000, 1, true); 1989 modifyData(expectedMap, 1, true); 1990 checkData(expectedMap); 1991 1992 EnvironmentStats stats = env.getStats(clearStats); 1993 1994 /* There should be no checkpoint or eviction. */ 1995 assertEquals(0, stats.getNEvictPasses()); 1996 assertEquals(0, stats.getNCheckpoints()); 1997 1998 /* 1999 * Due to using EVICT_LN mode, the representation of most INs should be 2000 * "no target" (empty). 2001 */ 2002 final long nNoTarget = stats.getNINNoTarget(); 2003 assertTrue(stats.toString(), nNoTarget > stats.getNINSparseTarget()); 2004 2005 /* Clean. */ 2006 while (true) { 2007 final long files = env.cleanLog(); 2008 if (files == 0) { 2009 break; 2010 } 2011 } 2012 2013 stats = env.getStats(null); 2014 2015 /* There should be no checkpoint or eviction. */ 2016 assertEquals(0, stats.getNEvictPasses()); 2017 assertEquals(0, stats.getNCheckpoints()); 2018 2019 /* A bunch of LNs should have been migrated. */ 2020 assertTrue(stats.getNCleanerRuns() > 0); 2021 assertTrue(stats.getNLNsMigrated() > 100); 2022 2023 /* 2024 * Most importantly, LN migration should not cause the representation 2025 * of INs to change -- most should still be "no target" (empty). 2026 * [#21734] 2027 * 2028 * The reason that nNoTarget is reduced by one (one is subtracted from 2029 * nNoTarget below) is apparently because a FileSummaryLN DB BIN has 2030 * changed. 2031 */ 2032 final long nNoTarget2 = stats.getNINNoTarget(); 2033 assertTrue("nNoTarget=" + nNoTarget + " nNoTarget2=" + nNoTarget2, 2034 nNoTarget2 >= nNoTarget - 1); 2035 2036 closeEnv(); 2037 } 2038 } 2039