1 /*- 2 * See the file LICENSE for redistribution information. 3 * 4 * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. 5 * 6 */ 7 8 package com.sleepycat.je.log; 9 10 import static com.sleepycat.je.log.LogStatDefinition.GROUP_DESC; 11 import static com.sleepycat.je.log.LogStatDefinition.GROUP_NAME; 12 import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_END_OF_LOG; 13 import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_REPEAT_FAULT_READS; 14 import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_TEMP_BUFFER_WRITES; 15 16 import java.io.FileNotFoundException; 17 import java.io.IOException; 18 import java.io.RandomAccessFile; 19 import java.nio.ByteBuffer; 20 import java.util.Collection; 21 import java.util.Map; 22 import java.util.Queue; 23 import java.util.concurrent.ConcurrentLinkedQueue; 24 import java.util.concurrent.CountDownLatch; 25 26 import com.sleepycat.je.DatabaseException; 27 import com.sleepycat.je.EnvironmentFailureException; 28 import com.sleepycat.je.StatsConfig; 29 import com.sleepycat.je.cleaner.DbFileSummary; 30 import com.sleepycat.je.cleaner.LocalUtilizationTracker; 31 import com.sleepycat.je.cleaner.TrackedFileSummary; 32 import com.sleepycat.je.cleaner.UtilizationTracker; 33 import com.sleepycat.je.config.EnvironmentParams; 34 import com.sleepycat.je.dbi.DatabaseImpl; 35 import com.sleepycat.je.dbi.DbConfigManager; 36 import com.sleepycat.je.dbi.EnvironmentFailureReason; 37 import com.sleepycat.je.dbi.EnvironmentImpl; 38 import com.sleepycat.je.log.entry.LogEntry; 39 import com.sleepycat.je.recovery.Checkpointer; 40 import com.sleepycat.je.txn.WriteLockInfo; 41 import com.sleepycat.je.utilint.DbLsn; 42 import com.sleepycat.je.utilint.LSNStat; 43 import com.sleepycat.je.utilint.LongStat; 44 import com.sleepycat.je.utilint.StatGroup; 45 import com.sleepycat.je.utilint.TestHook; 46 import com.sleepycat.je.utilint.TestHookExecute; 47 import com.sleepycat.je.utilint.VLSN; 48 49 /** 50 * The LogManager supports reading and writing to the JE log. 51 * The writing of data to the log is serialized via the logWriteMutex. 52 * Typically space is allocated under the LWL. The client computes 53 * the checksum and copies the data into the log buffer (not holding 54 * the LWL). 55 */ 56 public class LogManager { 57 58 /* No-op loggable object. */ 59 private static final String DEBUG_NAME = LogManager.class.getName(); 60 61 private final LogBufferPool logBufferPool; // log buffers 62 private final Object logWriteMutex; // synchronizes log writes 63 private final boolean doChecksumOnRead; // if true, do checksum on read 64 private final FileManager fileManager; // access to files 65 private final FSyncManager grpManager; 66 private final EnvironmentImpl envImpl; 67 private final boolean readOnly; 68 69 /* How many bytes to read when faulting in. */ 70 private final int readBufferSize; 71 72 /* The last LSN in the log during recovery. */ 73 private long lastLsnAtRecovery = DbLsn.NULL_LSN; 74 75 /* Stats */ 76 private final StatGroup stats; 77 78 /* 79 * Number of times we have to repeat a read when we fault in an object 80 * because the initial read was too small. 81 */ 82 private final LongStat nRepeatFaultReads; 83 84 /* 85 * Number of times we have to use the temporary marshalling buffer to 86 * write to the log. 87 */ 88 private final LongStat nTempBufferWrites; 89 90 /* The location of the next entry to be written to the log. */ 91 private final LSNStat endOfLog; 92 93 /* 94 * Used to determine if we switched log buffers. For 95 * NOSYNC durability, if we switched log buffers, 96 * the thread will write the previous dirty buffers. 97 */ 98 private LogBuffer prevLogBuffer = null; 99 100 /* For unit tests */ 101 private TestHook readHook; // used for generating exceptions on log reads 102 103 /* For unit tests. */ 104 private TestHook<Object> delayVLSNRegisterHook; 105 private TestHook<CountDownLatch> flushHook; 106 107 /* A queue to hold log entries which are to be logged lazily. */ 108 private final Queue<LazyQueueEntry> lazyLogQueue = 109 new ConcurrentLinkedQueue<LazyQueueEntry>(); 110 111 /* 112 * An entry in the lazyLogQueue. A struct to hold the entry and repContext. 113 */ 114 private static class LazyQueueEntry { 115 private final LogEntry entry; 116 private final ReplicationContext repContext; 117 LazyQueueEntry(LogEntry entry, ReplicationContext repContext)118 private LazyQueueEntry(LogEntry entry, ReplicationContext repContext) { 119 this.entry = entry; 120 this.repContext = repContext; 121 } 122 } 123 124 /** 125 * There is a single log manager per database environment. 126 */ LogManager(EnvironmentImpl envImpl, boolean readOnly)127 public LogManager(EnvironmentImpl envImpl, 128 boolean readOnly) 129 throws DatabaseException { 130 131 /* Set up log buffers. */ 132 this.envImpl = envImpl; 133 this.fileManager = envImpl.getFileManager(); 134 this.grpManager = new FSyncManager(this.envImpl); 135 DbConfigManager configManager = envImpl.getConfigManager(); 136 this.readOnly = readOnly; 137 logBufferPool = new LogBufferPool(fileManager, envImpl); 138 139 /* See if we're configured to do a checksum when reading in objects. */ 140 doChecksumOnRead = 141 configManager.getBoolean(EnvironmentParams.LOG_CHECKSUM_READ); 142 143 logWriteMutex = new Object(); 144 readBufferSize = 145 configManager.getInt(EnvironmentParams.LOG_FAULT_READ_SIZE); 146 147 /* Do the stats definitions. */ 148 stats = new StatGroup(GROUP_NAME, GROUP_DESC); 149 nRepeatFaultReads = new LongStat(stats, LOGMGR_REPEAT_FAULT_READS); 150 nTempBufferWrites = new LongStat(stats, LOGMGR_TEMP_BUFFER_WRITES); 151 endOfLog = new LSNStat(stats, LOGMGR_END_OF_LOG); 152 } 153 getChecksumOnRead()154 public boolean getChecksumOnRead() { 155 return doChecksumOnRead; 156 } 157 getLastLsnAtRecovery()158 public long getLastLsnAtRecovery() { 159 return lastLsnAtRecovery; 160 } 161 setLastLsnAtRecovery(long lastLsnAtRecovery)162 public void setLastLsnAtRecovery(long lastLsnAtRecovery) { 163 this.lastLsnAtRecovery = lastLsnAtRecovery; 164 } 165 166 /** 167 * Reset the pool when the cache is resized. This method is called after 168 * the memory budget has been calculated. 169 */ resetPool(DbConfigManager configManager)170 public void resetPool(DbConfigManager configManager) 171 throws DatabaseException { 172 synchronized (logWriteMutex) { 173 logBufferPool.reset(configManager); 174 } 175 } 176 177 /* 178 * Writing to the log 179 */ 180 181 /** 182 * Log this single object and force a write of the log files. 183 * @param entry object to be logged 184 * @param fsyncRequired if true, log files should also be fsynced. 185 * @return LSN of the new log entry 186 */ logForceFlush(LogEntry entry, boolean fsyncRequired, ReplicationContext repContext)187 public long logForceFlush(LogEntry entry, 188 boolean fsyncRequired, 189 ReplicationContext repContext) 190 throws DatabaseException { 191 192 return log(entry, 193 Provisional.NO, 194 true, // flush required 195 fsyncRequired, 196 false, // forceNewLogFile 197 repContext); // repContext 198 } 199 200 /** 201 * Log this single object and force a flip of the log files. 202 * @param entry object to be logged 203 * @return LSN of the new log entry 204 */ logForceFlip(LogEntry entry)205 public long logForceFlip(LogEntry entry) 206 throws DatabaseException { 207 208 return log(entry, 209 Provisional.NO, 210 true, // flush required 211 false, // fsync required 212 true, // forceNewLogFile 213 ReplicationContext.NO_REPLICATE); 214 } 215 216 /** 217 * Write a log entry. 218 * @param entry object to be logged 219 * @return LSN of the new log entry 220 */ log(LogEntry entry, ReplicationContext repContext)221 public long log(LogEntry entry, ReplicationContext repContext) 222 throws DatabaseException { 223 224 return log(entry, 225 Provisional.NO, 226 false, // flush required 227 false, // fsync required 228 false, // forceNewLogFile 229 repContext); 230 } 231 232 /** 233 * Write a log entry lazily. 234 * @param entry object to be logged 235 */ logLazily(LogEntry entry, ReplicationContext repContext)236 public void logLazily(LogEntry entry, ReplicationContext repContext) { 237 238 lazyLogQueue.add(new LazyQueueEntry(entry, repContext)); 239 } 240 241 /** 242 * Translates individual log params to LogItem and LogContext fields. 243 */ log(LogEntry entry, Provisional provisional, boolean flushRequired, boolean fsyncRequired, boolean forceNewLogFile, ReplicationContext repContext)244 private long log(LogEntry entry, 245 Provisional provisional, 246 boolean flushRequired, 247 boolean fsyncRequired, 248 boolean forceNewLogFile, 249 ReplicationContext repContext) 250 throws DatabaseException { 251 252 LogItem item = new LogItem(); 253 item.entry = entry; 254 item.provisional = provisional; 255 item.repContext = repContext; 256 257 LogContext context = new LogContext(); 258 context.flushRequired = flushRequired; 259 context.fsyncRequired = fsyncRequired; 260 context.forceNewLogFile = forceNewLogFile; 261 262 log(item, context); 263 264 return item.newLsn; 265 } 266 267 /** 268 * Log an item, first logging any items on the lazyLogQueue, and finally 269 * flushing and sync'ing (if requested). 270 */ log(LogItem item, LogContext context)271 public void log(LogItem item, LogContext context) 272 throws DatabaseException { 273 274 /* 275 * In a read-only env we return NULL_LSN (the default value for 276 * LogItem.newLsn) for all entries. We allow this to proceed, rather 277 * than throwing an exception, to support logging INs for splits that 278 * occur during recovery, for one reason. Logging LNs in a read-only 279 * env is not allowed, and this is checked in the LN class. 280 */ 281 if (readOnly) { 282 return; 283 } 284 285 try { 286 /* Flush any pending lazy entries. */ 287 LazyQueueEntry lqe = lazyLogQueue.poll(); 288 while (lqe != null) { 289 LogItem lqeItem = new LogItem(); 290 lqeItem.entry = lqe.entry; 291 lqeItem.provisional = Provisional.NO; 292 lqeItem.repContext = lqe.repContext; 293 294 LogContext lqeContext = new LogContext(); 295 296 logItem(lqeItem, lqeContext); 297 lqe = lazyLogQueue.poll(); 298 } 299 300 LogEntry logEntry = item.entry; 301 302 /* 303 * If possible, marshall this entry outside the log write latch to 304 * allow greater concurrency by shortening the write critical 305 * section. Note that the header may only be created during 306 * marshalling because it calls entry.getSize(). 307 */ 308 if (logEntry.getLogType().marshallOutsideLatch()) { 309 item.header = new LogEntryHeader 310 (logEntry, item.provisional, item.repContext); 311 item.buffer = marshallIntoBuffer(item.header, logEntry); 312 } 313 314 logItem(item, context); 315 316 if (context.fsyncRequired || context.flushRequired) { 317 /* Flush log buffer and fsync */ 318 grpManager.sync(context.fsyncRequired); 319 } else if (context.switchedLogBuffer) { 320 /* 321 * The operation does not require writing to 322 * the log file, but since we switched log 323 * buffers, this thread will write the previous dirty 324 * log buffers (not this thread's log record though). 325 * This is done for NOSYNC durability so those types 326 * of transactions won't fill all the log buffers thus 327 * forcing to have to write the buffers under the 328 * log write latch. 329 */ 330 logBufferPool.writeDirty(false); 331 } 332 TestHookExecute.doHookIfSet(flushHook); 333 334 /* 335 * We've logged this log entry from the replication stream. Let the 336 * Replicator know, so this node can create a VLSN->LSN mapping. Do 337 * this before the ckpt so we have a better chance of writing this 338 * mapping to disk. 339 */ 340 if (item.repContext.inReplicationStream()) { 341 342 assert (item.header.getVLSN() != null) : 343 "Unexpected null vlsn: " + item.header + " " + 344 item.repContext; 345 346 /* Block the VLSN registration, used by unit tests. */ 347 TestHookExecute.doHookIfSet(delayVLSNRegisterHook); 348 envImpl.registerVLSN(item); 349 } 350 351 } catch (EnvironmentFailureException e) { 352 353 /* 354 * Final checks are below for unexpected exceptions during the 355 * critical write path. Most should be caught by 356 * serialLogInternal, but the catches here account for other 357 * exceptions above. Note that Errors must be caught here as well 358 * as Exceptions. [#21929] 359 * 360 * If we've already invalidated the environment, rethrow so as not 361 * to excessively wrap the exception. 362 */ 363 if (!envImpl.isValid() || 364 FileManager.continueAfterWriteException()) { 365 throw e; 366 } 367 throw EnvironmentFailureException.unexpectedException(envImpl, e); 368 } catch (Exception e) { 369 throw EnvironmentFailureException.unexpectedException(envImpl, e); 370 } catch (Error e) { 371 envImpl.invalidate(e); 372 throw e; 373 } 374 375 /* 376 * Periodically, as a function of how much data is written, ask the 377 * checkpointer or the cleaner to wake up. 378 */ 379 Checkpointer ckpter = envImpl.getCheckpointer(); 380 if (ckpter != null) { 381 ckpter.wakeupAfterWrite(); 382 } 383 if (context.wakeupCleaner) { 384 envImpl.getUtilizationTracker().activateCleaner(); 385 } 386 387 /* Update background writes. */ 388 if (context.backgroundIO) { 389 envImpl.updateBackgroundWrites 390 (context.totalNewSize, logBufferPool.getLogBufferSize()); 391 } 392 } 393 logItem(LogItem item, LogContext context)394 private void logItem(LogItem item, LogContext context) 395 throws IOException, DatabaseException { 396 397 final UtilizationTracker tracker = envImpl.getUtilizationTracker(); 398 399 final boolean flushRequired = 400 context.flushRequired && 401 !context.fsyncRequired; 402 403 final LogWriteInfo lwi = serialLog( 404 item, context, context.forceNewLogFile, flushRequired, tracker); 405 406 if (lwi != null) { 407 408 /* 409 * Add checksum, prev offset, and VLSN to the entry. 410 * Copy data into the log buffer. 411 */ 412 item.buffer = item.header.addPostMarshallingInfo( 413 item.buffer, lwi.fileOffset, lwi.vlsn); 414 lwi.lbs.put(item.buffer); 415 } 416 417 /* Update obsolete info under the LWL */ 418 updateObsolete(context, tracker); 419 } 420 421 /** 422 * This method handles exceptions to be certain that the Environment is 423 * invalidated when any exception occurs in the critical write path, and it 424 * checks for an invalid environment to be sure that no subsequent write is 425 * allowed. [#21929] 426 * 427 * Invalidation is necessary because a logging operation does not ensure 428 * that the internal state -- correspondence of LSN pointer, log buffer 429 * position and file position, and the integrity of the VLSN index [#20919] 430 * -- is maintained correctly when an exception occurs. Allowing a 431 * subsequent write can cause log corruption. 432 */ serialLog(LogItem item, LogContext context, boolean forceNewLogFile, boolean flushRequired, UtilizationTracker tracker)433 private LogWriteInfo serialLog(LogItem item, 434 LogContext context, 435 boolean forceNewLogFile, 436 boolean flushRequired, 437 UtilizationTracker tracker) 438 throws IOException { 439 synchronized (logWriteMutex) { 440 /* Do not attempt to write with an invalid environment. */ 441 envImpl.checkIfInvalid(); 442 443 try { 444 return serialLogWork( 445 item, context, forceNewLogFile, flushRequired, tracker); 446 } catch (EnvironmentFailureException e) { 447 448 /* 449 * If we've already invalidated the environment, rethrow so 450 * as not to excessively wrap the exception. 451 */ 452 if (!envImpl.isValid() || 453 FileManager.continueAfterWriteException()) { 454 throw e; 455 } 456 /* Otherwise, invalidate the environment. */ 457 throw EnvironmentFailureException.unexpectedException( 458 envImpl, e); 459 } catch (Exception e) { 460 throw EnvironmentFailureException.unexpectedException( 461 envImpl, e); 462 } catch (Error e) { 463 /* Errors must be caught here as well as Exceptions.[#21929] */ 464 envImpl.invalidate(e); 465 throw e; 466 } 467 } 468 } 469 470 /** 471 * This method is used as part of writing data to the log. Called 472 * under the LogWriteLatch. 473 * Data is either written to the LogBuffer or allocates space in the 474 * LogBuffer. The LogWriteInfo object is used to save information about 475 * the space allocate in the LogBuffer. The caller uses the object to 476 * copy data into the underlying LogBuffer. A null value returned 477 * indicates that the item was written to the log. This occurs when the 478 * data item is too big to fit into an empty LogBuffer. 479 * 480 * @param item to be written. 481 * @param context log context. 482 * @param forceNewLogFile if true force new log file. 483 * @param flushRequired if true write will go to disk and not the deferred 484 * queue 485 * @param tracker utilization. 486 * @return a LogWriteInfo object used to access allocated LogBuffer space. 487 * If null, the data was written to the log. 488 * @throws IOException 489 */ serialLogWork(LogItem item, LogContext context, boolean forceNewLogFile, boolean flushRequired, UtilizationTracker tracker)490 private LogWriteInfo serialLogWork(LogItem item, 491 LogContext context, 492 boolean forceNewLogFile, 493 boolean flushRequired, 494 UtilizationTracker tracker) 495 throws IOException { 496 long currentLsn = DbLsn.NULL_LSN; 497 boolean usedTemporaryBuffer = false; 498 int entrySize; 499 long fileOffset; 500 LogBufferSegment useBuffer; 501 LogBuffer lastLogBuffer = null; 502 VLSN vlsn = null; 503 boolean marshallOutsideLatch = (item.buffer != null); 504 505 /* 506 * Do obsolete tracking before marshalling a FileSummaryLN into the 507 * log buffer so that a FileSummaryLN counts itself. 508 * countObsoleteNode must be called before computing the entry 509 * size, since it can change the size of a FileSummaryLN entry that 510 * we're logging 511 */ 512 LogEntryType entryType = item.entry.getLogType(); 513 514 if (!DbLsn.isTransientOrNull(item.oldLsn)) { 515 if (context.obsoleteDupsAllowed) { 516 tracker.countObsoleteNodeDupsAllowed( 517 item.oldLsn, entryType, item.oldSize, context.nodeDb); 518 } else { 519 tracker.countObsoleteNode( 520 item.oldLsn, entryType, item.oldSize, context.nodeDb); 521 } 522 } 523 524 /* Count auxOldLsn for same database; no specified size. */ 525 if (!DbLsn.isTransientOrNull(item.auxOldLsn)) { 526 if (context.obsoleteDupsAllowed) { 527 tracker.countObsoleteNodeDupsAllowed( 528 item.auxOldLsn, entryType, 0, context.nodeDb); 529 } else { 530 tracker.countObsoleteNode( 531 item.auxOldLsn, entryType, 0, context.nodeDb); 532 } 533 } 534 535 /* 536 * If an entry must be protected within the log write latch for 537 * marshalling, take care to also calculate its size in the 538 * protected section. Note that we have to get the size *before* 539 * marshalling so that the currentLsn and size are correct for 540 * utilization tracking. 541 */ 542 if (marshallOutsideLatch) { 543 entrySize = item.buffer.limit(); 544 assert item.header != null; 545 } else { 546 assert item.header == null; 547 item.header = new LogEntryHeader 548 (item.entry, item.provisional, item.repContext); 549 entrySize = item.header.getEntrySize(); 550 } 551 552 /* 553 * Get the next free slot in the log, under the log write latch. 554 * Bump the LSN values, which gives us a valid previous pointer, 555 * which is part of the log entry header. 556 * We need to bump the LSN first, and bumping the LSN must 557 * be done within the log write latch. 558 */ 559 if (forceNewLogFile) { 560 fileManager.forceNewLogFile(); 561 } 562 563 boolean flippedFile = fileManager.bumpLsn(entrySize); 564 currentLsn = DbLsn.NULL_LSN; 565 usedTemporaryBuffer = false; 566 boolean success = false; 567 568 try { 569 currentLsn = fileManager.getLastUsedLsn(); 570 571 /* 572 * countNewLogEntry and countObsoleteNodeInexact cannot change 573 * a FileSummaryLN size, so they are safe to call after 574 * getSizeForWrite. 575 */ 576 if (tracker.countNewLogEntry 577 (currentLsn, entryType, entrySize, context.nodeDb)) { 578 context.wakeupCleaner = true; 579 } 580 581 /* 582 * LN deletions and dup DB LNs are obsolete immediately. Inexact 583 * counting is used to save resources because the cleaner knows 584 * that all such LNs are obsolete. 585 */ 586 if (item.entry.isImmediatelyObsolete(context.nodeDb)) { 587 tracker.countObsoleteNodeInexact 588 (currentLsn, entryType, entrySize, context.nodeDb); 589 } 590 591 /* 592 * This entry must be marshalled within the log write latch. 593 */ 594 if (!marshallOutsideLatch) { 595 assert item.buffer == null; 596 item.buffer = marshallIntoBuffer(item.header, item.entry); 597 } 598 599 /* Sanity check */ 600 if (entrySize != item.buffer.limit()) { 601 throw EnvironmentFailureException.unexpectedState 602 ("Logged entry entrySize= " + entrySize + 603 " but marshalledSize=" + item.buffer.limit() + 604 " type=" + entryType + " currentLsn=" + 605 DbLsn.getNoFormatString(currentLsn)); 606 } 607 608 /* 609 * Ask for a log buffer suitable for holding this new entry. 610 * If the current log buffer is full, or if we flipped into a 611 * new file, write it to disk and get a new, empty log buffer 612 * to use. The returned buffer will be latched for write. 613 */ 614 lastLogBuffer = 615 logBufferPool.getWriteBuffer(entrySize, flippedFile); 616 617 if (lastLogBuffer != prevLogBuffer) { 618 context.switchedLogBuffer = true; 619 } 620 prevLogBuffer = lastLogBuffer; 621 622 fileOffset = fileManager.getPrevEntryOffset(); 623 624 if (item.repContext.getClientVLSN() != null || 625 item.repContext.mustGenerateVLSN()) { 626 627 if (item.repContext.mustGenerateVLSN()) { 628 vlsn = envImpl.bumpVLSN(); 629 } else { 630 vlsn = item.repContext.getClientVLSN(); 631 } 632 } 633 634 /* 635 * If the LogBufferPool buffer (useBuffer) doesn't have 636 * sufficient space (since they're fixed size), just use the 637 * temporary buffer and throw it away when we're done. That 638 * way we don't grow the LogBuffers in the pool permanently. 639 * We risk an OOME on this temporary usage, but we'll risk it. 640 * [#12674] 641 */ 642 lastLogBuffer.latchForWrite(); 643 try { 644 useBuffer = lastLogBuffer.allocate(entrySize); 645 if (useBuffer == null) { 646 /* Add checksum, prev offset, and VLSN to the entry. */ 647 item.buffer = 648 item.header.addPostMarshallingInfo(item.buffer, 649 fileOffset, 650 vlsn); 651 652 fileManager.writeLogBuffer 653 (new LogBuffer(item.buffer, currentLsn), 654 flushRequired); 655 usedTemporaryBuffer = true; 656 assert lastLogBuffer.getDataBuffer().position() == 0; 657 nTempBufferWrites.increment(); 658 } 659 } finally { 660 lastLogBuffer.release(); 661 } 662 success = true; 663 } finally { 664 if (!success) { 665 666 /* 667 * The LSN pointer, log buffer position, and corresponding 668 * file position march in lockstep. 669 * 670 * 1. We bump the LSN. 671 * 2. We copy loggable entry into the log buffer. 672 * 3. We may try to write the log buffer. 673 * 674 * If we've failed to put the entry into the log buffer 675 * (2), we need to restore old LSN state so that the log 676 * buffer doesn't have a hole. [SR #12638] If we fail after 677 * (2), we don't need to restore state, because log buffers 678 * will still match file positions. 679 * 680 * This assumes that the last possible activity was the 681 * write of the log buffers. 682 */ 683 fileManager.restoreLastPosition(); 684 685 /* 686 * If the entry was not written to the log, it will not be 687 * part of the replication stream, and we should reuse the 688 * vlsn. 689 */ 690 if (item.header.getVLSN() != null) { 691 envImpl.decrementVLSN(); 692 } 693 } 694 } 695 696 /* 697 * Set the lsn for the log buffer before giving up the lwl. 698 * Readers will have to wait until pincount is zero to access data 699 */ 700 if (!usedTemporaryBuffer) { 701 lastLogBuffer.registerLsn(currentLsn); 702 } 703 704 /* 705 * If the txn is not null, the first entry is an LN. Update the txn 706 * with info about the latest LSN. Note that this has to happen 707 * within the log write latch. 708 */ 709 item.entry.postLogWork(item.header, currentLsn, vlsn); 710 711 item.newLsn = currentLsn; 712 item.newSize = entrySize; 713 context.totalNewSize += entrySize; 714 715 return (useBuffer == null ? 716 null : new LogWriteInfo(useBuffer, vlsn, fileOffset)); 717 } 718 719 /** 720 * Serialize a loggable object into this buffer. 721 */ marshallIntoBuffer(LogEntryHeader header, LogEntry entry)722 private ByteBuffer marshallIntoBuffer(LogEntryHeader header, 723 LogEntry entry) { 724 int entrySize = header.getSize() + header.getItemSize(); 725 726 ByteBuffer destBuffer = ByteBuffer.allocate(entrySize); 727 header.writeToLog(destBuffer); 728 729 /* Put the entry in. */ 730 entry.writeEntry(destBuffer); 731 732 /* Set the limit so it can be used as the size of the entry. */ 733 destBuffer.flip(); 734 735 return destBuffer; 736 } 737 738 /** 739 * Serialize a log entry into this buffer with proper entry header. Return 740 * it ready for a copy. 741 */ putIntoBuffer(LogEntry entry, long prevLogEntryOffset)742 ByteBuffer putIntoBuffer(LogEntry entry, 743 long prevLogEntryOffset) { 744 LogEntryHeader header = new LogEntryHeader 745 (entry, Provisional.NO, ReplicationContext.NO_REPLICATE); 746 747 /* 748 * Currently this method is only used for serializing the FileHeader. 749 * Assert that we do not need the Txn mutex in case this method is used 750 * in the future for other log entries. See LN.log. [#17204] 751 */ 752 assert !entry.getLogType().isTransactional(); 753 754 ByteBuffer destBuffer = marshallIntoBuffer(header, entry); 755 756 return header.addPostMarshallingInfo(destBuffer, 757 prevLogEntryOffset, 758 null); 759 } 760 761 /* 762 * Reading from the log. 763 */ 764 765 /** 766 * Instantiate all the objects in the log entry at this LSN. 767 */ getLogEntry(long lsn)768 public LogEntry getLogEntry(long lsn) 769 throws FileNotFoundException { 770 771 return getLogEntry(lsn, false /*invisibleReadAllowed*/).getEntry(); 772 } 773 getWholeLogEntry(long lsn)774 public WholeEntry getWholeLogEntry(long lsn) 775 throws FileNotFoundException { 776 777 return getLogEntry(lsn, false /*invisibleReadAllowed*/); 778 } 779 780 /** 781 * Instantiate all the objects in the log entry at this LSN. Allow the 782 * fetch of invisible log entries if we are in recovery. 783 */ getLogEntryAllowInvisibleAtRecovery(long lsn)784 public WholeEntry getLogEntryAllowInvisibleAtRecovery(long lsn) 785 throws FileNotFoundException { 786 787 return getLogEntry(lsn, envImpl.isInInit() /*invisibleReadAllowed*/); 788 } 789 790 /** 791 * Instantiate all the objects in the log entry at this LSN. The entry 792 * may be marked invisible. 793 */ getLogEntryAllowInvisible(long lsn)794 public WholeEntry getLogEntryAllowInvisible(long lsn) 795 throws FileNotFoundException { 796 797 return getLogEntry(lsn, true); 798 } 799 800 /** 801 * Instantiate all the objects in the log entry at this LSN. 802 * @param lsn location of entry in log. 803 * @param invisibleReadAllowed true if it's expected that the target log 804 * entry might be invisible. Correct the known-to-be-bad checksum before 805 * proceeding. 806 * @return log entry that embodies all the objects in the log entry. 807 */ getLogEntry(long lsn, boolean invisibleReadAllowed)808 private WholeEntry getLogEntry(long lsn, boolean invisibleReadAllowed) 809 throws FileNotFoundException { 810 811 /* Fail loudly if the environment is invalid. */ 812 envImpl.checkIfInvalid(); 813 814 try { 815 816 /* 817 * Get a log source for the log entry which provides an abstraction 818 * that hides whether the entry is in a buffer or on disk. Will 819 * register as a reader for the buffer or the file, which will take 820 * a latch if necessary. 821 */ 822 LogSource logSource = getLogSource(lsn); 823 824 /* Read the log entry from the log source. */ 825 return getLogEntryFromLogSource(lsn, logSource, 826 invisibleReadAllowed); 827 } catch (ChecksumException e) { 828 throw new EnvironmentFailureException 829 (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); 830 } 831 } 832 getLogEntryHandleFileNotFound(long lsn)833 public LogEntry getLogEntryHandleFileNotFound(long lsn) 834 throws DatabaseException { 835 836 try { 837 return getLogEntry(lsn); 838 } catch (FileNotFoundException e) { 839 throw new EnvironmentFailureException 840 (envImpl, 841 EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); 842 } 843 } 844 getWholeLogEntryHandleFileNotFound(long lsn)845 public WholeEntry getWholeLogEntryHandleFileNotFound(long lsn) 846 throws DatabaseException { 847 848 try { 849 return getWholeLogEntry(lsn); 850 } catch (FileNotFoundException e) { 851 throw new EnvironmentFailureException 852 (envImpl, 853 EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); 854 } 855 } 856 857 /** 858 * Throws ChecksumException rather than translating it to 859 * EnvironmentFailureException and invalidating the environment. Used 860 * instead of getLogEntry when a ChecksumException is handled specially. 861 */ getLogEntryAllowChecksumException(long lsn)862 LogEntry getLogEntryAllowChecksumException(long lsn) 863 throws ChecksumException, FileNotFoundException, DatabaseException { 864 865 return getLogEntryFromLogSource 866 (lsn, 867 getLogSource(lsn), 868 false /*invisibleReadAllowed*/).getEntry(); 869 } 870 getLogEntryAllowChecksumException(long lsn, RandomAccessFile file, int logVersion)871 LogEntry getLogEntryAllowChecksumException(long lsn, 872 RandomAccessFile file, 873 int logVersion) 874 throws ChecksumException, DatabaseException { 875 876 return getLogEntryFromLogSource 877 (lsn, 878 new FileSource(file, readBufferSize, fileManager, 879 DbLsn.getFileNumber(lsn), logVersion), 880 false /*invisibleReadAllowed*/).getEntry(); 881 } 882 883 /** 884 * Instantiate all the objects in the log entry at this LSN. This will 885 * release the log source at the first opportunity. 886 * 887 * Is non-private for unit testing. 888 * 889 * @param lsn location of entry in log 890 * @param invisibleReadAllowed if true, we will permit the read of invisible 891 * log entries, and we will adjust the invisible bit so that the checksum 892 * will validate 893 * @return log entry that embodies all the objects in the log entry 894 */ getLogEntryFromLogSource(long lsn, LogSource logSource, boolean invisibleReadAllowed)895 WholeEntry getLogEntryFromLogSource(long lsn, 896 LogSource logSource, 897 boolean invisibleReadAllowed) 898 throws ChecksumException, DatabaseException { 899 900 try { 901 902 /* 903 * Read the log entry header into a byte buffer. This assumes 904 * that the minimum size of this byte buffer (determined by 905 * je.log.faultReadSize) is always >= the maximum log entry header. 906 */ 907 long fileOffset = DbLsn.getFileOffset(lsn); 908 ByteBuffer entryBuffer = logSource.getBytes(fileOffset); 909 if (entryBuffer.remaining() < LogEntryHeader.MIN_HEADER_SIZE) { 910 throw new ChecksumException 911 ("Incomplete log entry header, size=" + 912 entryBuffer.remaining() + " lsn=" + 913 DbLsn.getNoFormatString(lsn)); 914 } 915 916 /* Read the fixed length portion of the header. */ 917 LogEntryHeader header = 918 new LogEntryHeader(entryBuffer, logSource.getLogVersion()); 919 920 /* Read the variable length portion of the header. */ 921 if (header.isVariableLength()) { 922 if (entryBuffer.remaining() < 923 header.getVariablePortionSize()) { 924 throw new ChecksumException 925 ("Incomplete log entry header, size=" + 926 entryBuffer.remaining() + " varSize=" + 927 header.getVariablePortionSize() + " lsn=" + 928 DbLsn.getNoFormatString(lsn)); 929 } 930 header.readVariablePortion(entryBuffer); 931 } 932 933 ChecksumValidator validator = null; 934 if (doChecksumOnRead) { 935 int itemStart = entryBuffer.position(); 936 937 /* 938 * We're about to read an invisible log entry, which has 939 * knowingly been left on disk with a bad checksum. Flip the 940 * invisible bit in the backing byte buffer now, so the 941 * checksum will be valid. The LogEntryHeader object itself 942 * still has the invisible bit set, which is useful for 943 * debugging. 944 */ 945 if (header.isInvisible()) { 946 LogEntryHeader.turnOffInvisible 947 (entryBuffer, itemStart - header.getSize()); 948 } 949 950 /* Add header to checksum bytes */ 951 validator = new ChecksumValidator(); 952 int headerSizeMinusChecksum = header.getSizeMinusChecksum(); 953 entryBuffer.position(itemStart - 954 headerSizeMinusChecksum); 955 validator.update(entryBuffer, headerSizeMinusChecksum); 956 entryBuffer.position(itemStart); 957 } 958 959 /* 960 * Now that we know the size, read the rest of the entry 961 * if the first read didn't get enough. 962 */ 963 int itemSize = header.getItemSize(); 964 if (entryBuffer.remaining() < itemSize) { 965 entryBuffer = logSource.getBytes(fileOffset + header.getSize(), 966 itemSize); 967 nRepeatFaultReads.increment(); 968 } 969 970 /* 971 * Do entry validation. Run checksum before checking the entry 972 * type, it will be the more encompassing error. 973 */ 974 if (doChecksumOnRead) { 975 /* Check the checksum first. */ 976 validator.update(entryBuffer, itemSize); 977 validator.validate(header.getChecksum(), lsn); 978 } 979 980 /* 981 * If invisibleReadAllowed == false, we should not be fetching 982 * an invisible log entry. 983 */ 984 if (header.isInvisible() && !invisibleReadAllowed) { 985 throw new EnvironmentFailureException 986 (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, 987 "Read invisible log entry at " + 988 DbLsn.getNoFormatString(lsn) + " " + header); 989 } 990 991 assert LogEntryType.isValidType(header.getType()): 992 "Read non-valid log entry type: " + header.getType(); 993 994 /* Read the entry. */ 995 LogEntry logEntry = 996 LogEntryType.findType(header.getType()).getNewLogEntry(); 997 logEntry.readEntry(envImpl, header, entryBuffer); 998 999 /* For testing only; generate a read io exception. */ 1000 if (readHook != null) { 1001 try { 1002 readHook.doIOHook(); 1003 } catch (IOException e) { 1004 /* Simulate what the FileManager would do. */ 1005 throw new EnvironmentFailureException 1006 (envImpl, EnvironmentFailureReason.LOG_READ, e); 1007 } 1008 } 1009 1010 /* 1011 * Done with the log source, release in the finally clause. Note 1012 * that the buffer we get back from logSource is just a duplicated 1013 * buffer, where the position and state are copied but not the 1014 * actual data. So we must not release the logSource until we are 1015 * done marshalling the data from the buffer into the object 1016 * itself. 1017 */ 1018 return new WholeEntry(header, logEntry); 1019 1020 } catch (Error e) { 1021 envImpl.invalidate(e); 1022 throw e; 1023 1024 } finally { 1025 if (logSource != null) { 1026 logSource.release(); 1027 } 1028 } 1029 } 1030 1031 /** 1032 * Return a ByteBuffer holding the log entry at this LSN. The log entry 1033 * must begin at position 0, to mimic the marshalledBuffer used in 1034 * serialLogInternal(). 1035 * 1036 * @param lsn location of entry in log 1037 * @return log entry that embodies all the objects in the log entry 1038 */ getByteBufferFromLog(long lsn)1039 public ByteBuffer getByteBufferFromLog(long lsn) 1040 throws DatabaseException { 1041 1042 /* Fail loudly if the environment is invalid. */ 1043 envImpl.checkIfInvalid(); 1044 1045 /* 1046 * Get a log source for the log entry which provides an abstraction 1047 * that hides whether the entry is in a buffer or on disk. Will 1048 * register as a reader for the buffer or the file, which will take a 1049 * latch if necessary. 1050 */ 1051 LogSource logSource = null; 1052 try { 1053 logSource = getLogSource(lsn); 1054 1055 /* 1056 * Read the log entry header into a byte buffer. This assumes 1057 * that the minimum size of this byte buffer (determined by 1058 * je.log.faultReadSize) is always >= the maximum log entry header. 1059 */ 1060 long fileOffset = DbLsn.getFileOffset(lsn); 1061 ByteBuffer entryBuffer = logSource.getBytes(fileOffset); 1062 int startingPosition = entryBuffer.position(); 1063 int amountRemaining = entryBuffer.remaining(); 1064 assert (amountRemaining >= LogEntryHeader.MAX_HEADER_SIZE); 1065 1066 /* Read the header, find out how large this buffer needs to be */ 1067 LogEntryHeader header = 1068 new LogEntryHeader(entryBuffer, logSource.getLogVersion()); 1069 int totalSize = header.getSize() + header.getItemSize(); 1070 1071 /* 1072 * Now that we know the size, read in the rest of the entry 1073 * if the first read didn't get enough. 1074 */ 1075 if (amountRemaining < totalSize) { 1076 entryBuffer = logSource.getBytes(fileOffset, totalSize); 1077 nRepeatFaultReads.increment(); 1078 } 1079 1080 /* 1081 * The log entry must be positioned at the start of the returned 1082 * buffer, to mimic the normal logging path. 1083 */ 1084 entryBuffer.position(startingPosition); 1085 ByteBuffer singleEntryBuffer = ByteBuffer.allocate(totalSize); 1086 entryBuffer.limit(startingPosition + totalSize); 1087 singleEntryBuffer.put(entryBuffer); 1088 singleEntryBuffer.position(0); 1089 return singleEntryBuffer; 1090 } catch (FileNotFoundException e) { 1091 throw new EnvironmentFailureException 1092 (envImpl, 1093 EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); 1094 } catch (ChecksumException e) { 1095 throw new EnvironmentFailureException 1096 (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); 1097 } finally { 1098 logSource.release(); 1099 } 1100 } 1101 1102 /** 1103 * Fault in the first object in the log entry log entry at this LSN. 1104 * @param lsn location of object in log 1105 * @return the object in the log 1106 */ getEntry(long lsn)1107 public Object getEntry(long lsn) 1108 throws FileNotFoundException, DatabaseException { 1109 1110 LogEntry entry = getLogEntry(lsn); 1111 return entry.getMainItem(); 1112 } 1113 getEntryHandleFileNotFound(long lsn)1114 public Object getEntryHandleFileNotFound(long lsn) { 1115 LogEntry entry = getLogEntryHandleFileNotFound(lsn); 1116 return entry.getMainItem(); 1117 } 1118 1119 /** 1120 * Find the LSN, whether in a file or still in the log buffers. 1121 * Is public for unit testing. 1122 */ getLogSource(long lsn)1123 public LogSource getLogSource(long lsn) 1124 throws FileNotFoundException, ChecksumException, DatabaseException { 1125 1126 /* 1127 * First look in log to see if this LSN is still in memory. 1128 */ 1129 LogBuffer logBuffer = logBufferPool.getReadBufferByLsn(lsn); 1130 1131 if (logBuffer == null) { 1132 try { 1133 /* Not in the in-memory log -- read it off disk. */ 1134 long fileNum = DbLsn.getFileNumber(lsn); 1135 return new FileHandleSource 1136 (fileManager.getFileHandle(fileNum), 1137 readBufferSize, fileManager); 1138 } catch (DatabaseException e) { 1139 /* Add LSN to exception message. */ 1140 e.addErrorMessage("lsn= " + DbLsn.getNoFormatString(lsn)); 1141 throw e; 1142 } 1143 } 1144 return logBuffer; 1145 } 1146 1147 /** 1148 * Return a log buffer locked for reading, or null if no log buffer 1149 * holds this LSN location. 1150 */ getReadBufferByLsn(long lsn)1151 public LogBuffer getReadBufferByLsn(long lsn) { 1152 1153 assert DbLsn.getFileOffset(lsn) != 0 : 1154 "Read of lsn " + DbLsn.getNoFormatString(lsn) + 1155 " is illegal because file header entry is not in the log buffer"; 1156 1157 return logBufferPool.getReadBufferByLsn(lsn); 1158 } 1159 1160 /** 1161 * Flush all log entries, fsync the log file. 1162 */ flush()1163 public void flush() 1164 throws DatabaseException { 1165 1166 if (!readOnly) { 1167 flushInternal(false /*flushRequired*/); 1168 fileManager.syncLogEnd(); 1169 } 1170 } 1171 1172 /** 1173 * May be used to avoid sync, for unit tests and for rep syncup. 1174 * 1175 * Note that if the FileManager write queue has room, this does nothing but 1176 * move the data from the log buffer to the write queue, i.e, from one 1177 * memory buffer to another. 1178 */ flushNoSync()1179 public void flushNoSync() 1180 throws DatabaseException { 1181 1182 if (!readOnly) { 1183 flushInternal(false /*flushRequired*/); 1184 } 1185 } 1186 1187 /** 1188 * Flush all log entries and write to the log but do not fsync. 1189 */ flushWriteNoSync()1190 public void flushWriteNoSync() 1191 throws DatabaseException { 1192 1193 if (!readOnly) { 1194 flushInternal(true /*flushRequired*/); 1195 } 1196 } 1197 flushInternal(boolean flushRequired)1198 private void flushInternal(boolean flushRequired) throws DatabaseException 1199 { 1200 1201 /* 1202 * If we cannot bump the current buffer because there are no 1203 * free buffers, the only recourse is to write all buffers 1204 * under the LWL. 1205 */ 1206 synchronized (logWriteMutex) { 1207 if (!logBufferPool.bumpCurrent(0)) { 1208 logBufferPool.bumpAndWriteSynced(0, flushRequired); 1209 return; 1210 } 1211 } 1212 1213 /* 1214 * We bumped the current buffer but did not write any buffers above. 1215 * Write the dirty buffers now. Hopefully this is the common case. 1216 */ 1217 logBufferPool.writeDirty(flushRequired); 1218 } 1219 loadStats(StatsConfig config)1220 public StatGroup loadStats(StatsConfig config) 1221 throws DatabaseException { 1222 1223 if (!config.getFast()) { 1224 loadEndOfLogStat(); 1225 } 1226 1227 StatGroup copyStats = stats.cloneGroup(config.getClear()); 1228 /* Add all the LogBufferPool's stats to the LogManager's stat group. */ 1229 copyStats.addAll(logBufferPool.loadStats(config)); 1230 /* Add all the FileManager's stats to the LogManager's stat group. */ 1231 copyStats.addAll(fileManager.loadStats(config)); 1232 /* Add group commit statistics. */ 1233 copyStats.addAll(grpManager.loadStats(config)); 1234 1235 return copyStats; 1236 } 1237 1238 /** 1239 * Return the current number of cache misses in a lightweight fashion, 1240 * without incurring the cost of loading all the stats, and without clearing 1241 * any stats. 1242 */ getNCacheMiss()1243 public long getNCacheMiss() { 1244 return logBufferPool.getNCacheMiss(); 1245 } 1246 1247 /** 1248 * For unit testing. 1249 */ getBufferPoolLatchStats()1250 public StatGroup getBufferPoolLatchStats() { 1251 return logBufferPool.getBufferPoolLatchStats(); 1252 } 1253 1254 /** 1255 * Returns a tracked summary for the given file which will not be flushed. 1256 */ getUnflushableTrackedSummary(long file)1257 public TrackedFileSummary getUnflushableTrackedSummary(long file) { 1258 synchronized (logWriteMutex) { 1259 return envImpl.getUtilizationTracker(). 1260 getUnflushableTrackedSummary(file); 1261 } 1262 } 1263 1264 /** 1265 * Removes the tracked summary for the given file. 1266 */ removeTrackedFile(TrackedFileSummary tfs)1267 public void removeTrackedFile(TrackedFileSummary tfs) { 1268 synchronized (logWriteMutex) { 1269 tfs.reset(); 1270 } 1271 } 1272 updateObsolete( LogContext context, UtilizationTracker tracker)1273 public void updateObsolete( 1274 LogContext context, 1275 UtilizationTracker tracker) { 1276 1277 synchronized (logWriteMutex) { 1278 1279 /* Count other obsolete info under the log write latch. */ 1280 if (context.packedObsoleteInfo != null) { 1281 context.packedObsoleteInfo.countObsoleteInfo( 1282 tracker, context.nodeDb); 1283 } 1284 1285 if (context.obsoleteWriteLockInfo != null) { 1286 for (WriteLockInfo info : context.obsoleteWriteLockInfo) { 1287 tracker.countObsoleteNode(info.getAbortLsn(), 1288 null /*type*/, 1289 info.getAbortLogSize(), 1290 info.getAbortDb()); 1291 } 1292 } 1293 } 1294 } 1295 1296 /** 1297 * Count node as obsolete under the log write latch. This is done here 1298 * because the log write latch is managed here, and all utilization 1299 * counting must be performed under the log write latch. 1300 */ countObsoleteNode(long lsn, LogEntryType type, int size, DatabaseImpl nodeDb, boolean countExact)1301 public void countObsoleteNode(long lsn, 1302 LogEntryType type, 1303 int size, 1304 DatabaseImpl nodeDb, 1305 boolean countExact) { 1306 synchronized (logWriteMutex) { 1307 UtilizationTracker tracker = envImpl.getUtilizationTracker(); 1308 if (countExact) { 1309 tracker.countObsoleteNode(lsn, type, size, nodeDb); 1310 } else { 1311 tracker.countObsoleteNodeInexact(lsn, type, size, nodeDb); 1312 } 1313 } 1314 } 1315 1316 /** 1317 * A flavor of countObsoleteNode which does not fire an assert if the 1318 * offset has already been counted. Called through the LogManager so that 1319 * this incidence of all utilization counting can be performed under the 1320 * log write latch. 1321 */ countObsoleteNodeDupsAllowed(long lsn, LogEntryType type, int size, DatabaseImpl nodeDb)1322 public void countObsoleteNodeDupsAllowed(long lsn, 1323 LogEntryType type, 1324 int size, 1325 DatabaseImpl nodeDb) { 1326 synchronized (logWriteMutex) { 1327 UtilizationTracker tracker = envImpl.getUtilizationTracker(); 1328 tracker.countObsoleteNodeDupsAllowed(lsn, type, size, nodeDb); 1329 } 1330 } 1331 1332 /** 1333 * @see LocalUtilizationTracker#transferToUtilizationTracker 1334 */ transferToUtilizationTracker(LocalUtilizationTracker localTracker)1335 public void transferToUtilizationTracker(LocalUtilizationTracker 1336 localTracker) 1337 throws DatabaseException { 1338 synchronized (logWriteMutex) { 1339 UtilizationTracker tracker = envImpl.getUtilizationTracker(); 1340 localTracker.transferToUtilizationTracker(tracker); 1341 } 1342 } 1343 1344 /** 1345 * @see DatabaseImpl#countObsoleteDb 1346 */ countObsoleteDb(DatabaseImpl db)1347 public void countObsoleteDb(DatabaseImpl db) { 1348 synchronized (logWriteMutex) { 1349 db.countObsoleteDb(envImpl.getUtilizationTracker(), 1350 DbLsn.NULL_LSN /*mapLnLsn*/); 1351 } 1352 } 1353 removeDbFileSummaries(DatabaseImpl db, Collection<Long> fileNums)1354 public boolean removeDbFileSummaries(DatabaseImpl db, 1355 Collection<Long> fileNums) { 1356 synchronized (logWriteMutex) { 1357 return db.removeDbFileSummaries(fileNums); 1358 } 1359 } 1360 1361 /** 1362 * @see DatabaseImpl#cloneDbFileSummaries 1363 */ cloneDbFileSummaries(DatabaseImpl db)1364 public Map<Long, DbFileSummary> cloneDbFileSummaries(DatabaseImpl db) { 1365 synchronized (logWriteMutex) { 1366 return db.cloneDbFileSummariesInternal(); 1367 } 1368 } 1369 loadEndOfLogStat()1370 public void loadEndOfLogStat() { 1371 synchronized (logWriteMutex) { 1372 endOfLog.set(fileManager.getLastUsedLsn()); 1373 } 1374 } 1375 1376 /* For unit testing only. */ setReadHook(TestHook hook)1377 public void setReadHook(TestHook hook) { 1378 readHook = hook; 1379 } 1380 1381 /* For unit testing only. */ setDelayVLSNRegisterHook(TestHook<Object> hook)1382 public void setDelayVLSNRegisterHook(TestHook<Object> hook) { 1383 delayVLSNRegisterHook = hook; 1384 } 1385 1386 /* For unit testing only. */ setFlushLogHook(TestHook<CountDownLatch> hook)1387 public void setFlushLogHook(TestHook<CountDownLatch> hook) { 1388 flushHook = hook; 1389 grpManager.setFlushLogHook(hook); 1390 } 1391 1392 protected class LogWriteInfo { 1393 protected LogBufferSegment lbs; 1394 protected VLSN vlsn; 1395 protected long fileOffset; LogWriteInfo(LogBufferSegment bs, VLSN vlsn, long fileOffset)1396 LogWriteInfo(LogBufferSegment bs, VLSN vlsn, long fileOffset) { 1397 lbs = bs; 1398 this.vlsn = vlsn; 1399 this.fileOffset = fileOffset; 1400 } 1401 } 1402 } 1403