1 /*- 2 * Copyright (C) 2006-2009 Erik Larsson 3 * 4 * This program is free software: you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation, either version 3 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 package org.catacombae.hfsexplorer; 19 20 import java.io.BufferedReader; 21 import java.io.FileNotFoundException; 22 import java.io.FileOutputStream; 23 import java.io.IOException; 24 import java.io.InputStreamReader; 25 import java.util.ArrayList; 26 import java.util.LinkedList; 27 import org.catacombae.io.ReadableFileStream; 28 import org.catacombae.io.ReadableRandomAccessStream; 29 import org.catacombae.storage.io.win32.ReadableWin32FileStream; 30 import org.catacombae.hfsexplorer.fs.NullProgressMonitor; 31 import org.catacombae.storage.ps.apm.types.APMPartition; 32 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFile; 33 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFileRecord; 34 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFileThread; 35 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFileThreadRecord; 36 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFolder; 37 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFolderRecord; 38 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFolderThread; 39 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogFolderThreadRecord; 40 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogLeafRecord; 41 import org.catacombae.hfs.types.hfscommon.CommonHFSCatalogNodeID.ReservedID; 42 import org.catacombae.hfs.types.hfscommon.CommonHFSExtentDescriptor; 43 import org.catacombae.hfs.types.hfscommon.CommonHFSForkData; 44 import org.catacombae.hfs.types.hfscommon.CommonHFSVolumeHeader; 45 import org.catacombae.io.ReadableConcatenatedStream; 46 import org.catacombae.storage.io.DataLocator; 47 import org.catacombae.storage.io.ReadableStreamDataLocator; 48 import org.catacombae.storage.fs.FileSystemDetector; 49 import org.catacombae.storage.fs.FileSystemHandler; 50 import org.catacombae.storage.fs.FileSystemHandlerFactory; 51 import org.catacombae.storage.fs.FileSystemMajorType; 52 import org.catacombae.storage.fs.hfscommon.HFSCommonFileSystemHandler; 53 import org.catacombae.hfs.HFSVolume; 54 import org.catacombae.util.Util.Pair; 55 56 @SuppressWarnings("deprecation") // TODO: Fix HFSExplorer so it doesn't use deprecated methods... 57 public class HFSExplorer { 58 public static final String VERSION = "0.23.1"; 59 public static final String COPYRIGHT = 60 "Copyright \u00A9 Erik Larsson 2006-2015"; 61 public static final String[] NOTICES = { 62 "This program is distributed under the GNU General Public License version 3.", 63 "See <http://www.gnu.org/copyleft/gpl.html> for the details.", 64 "", 65 "Libraries used:", 66 " swing-layout <https://swing-layout.dev.java.net/>", 67 " Copyright \u00A9 2005-2006 Sun Microsystems, Inc. Licensed under", 68 " the Lesser General Public License.", 69 " See <http://www.gnu.org/licenses/lgpl.html> for the details.", 70 " iHarder Base64 encoder/decoder <http://iharder.sourceforge.net>", 71 " Public domain software.", 72 " Apache Ant bzip2 library <http://ant.apache.org/>", 73 " Copyright \u00A9 the Apache Software Foundation (ASF). Licensed", 74 " under the Apache License, Version 2.0.", 75 " See <http://www.apache.org/licenses/LICENSE-2.0> for the details.", 76 }; 77 78 public static BufferedReader stdin = new BufferedReader(new InputStreamReader(System.in)); 79 private static class Options { 80 public boolean readAPM = false; 81 public boolean verbose = false; 82 } 83 private static enum Operation { 84 BROWSE, 85 FRAGCHECK, 86 TEST, 87 SYSTEMFILEINFO; 88 89 private final LinkedList<String> argsList = new LinkedList<String>(); addArg(String argument)90 public void addArg(String argument) { 91 argsList.add(argument); 92 } 93 getArgs()94 public String[] getArgs() { 95 return argsList.toArray(new String[argsList.size()]); 96 } 97 getFilename()98 public String getFilename() { return argsList.getLast(); } 99 } 100 101 private static Options options = new Options(); 102 private static Operation operation; 103 private static BufferedReader stdIn = new BufferedReader(new InputStreamReader(System.in)); 104 main(String[] args)105 public static void main(String[] args) throws IOException { 106 if(args.length == 0) { 107 printUsageInfo(); 108 System.exit(0); 109 } 110 111 if(!parseOptions(args, 0, args.length)) { 112 System.exit(1); 113 return; 114 } 115 116 //RandomAccessFile isoRaf = new RandomAccessFile(args[0], "r"); 117 ReadableRandomAccessStream isoRaf; 118 if(ReadableWin32FileStream.isSystemSupported()) 119 isoRaf = new ReadableWin32FileStream(operation.getFilename()); 120 else 121 isoRaf = new ReadableFileStream(operation.getFilename()); 122 123 long offset; // Offset in isoRaf where the file system starts 124 long length; // Length of the file system data 125 if(options.readAPM) { 126 println("Reading the Apple Partition Map..."); 127 isoRaf.seek(0x200); 128 byte[] currentBlock = new byte[512]; 129 byte[] signature = new byte[2]; 130 //APMPartition p = new APMPartition(isoRaf); 131 ArrayList<APMPartition> partitions = new ArrayList<APMPartition>(); 132 for(int i = 0; i < 20; ++i) { 133 isoRaf.readFully(currentBlock); 134 signature[0] = currentBlock[0]; 135 signature[1] = currentBlock[1]; 136 if(new String(signature, "ASCII").equals("PM")) { 137 print("Partition " + i + ": "); 138 APMPartition p = new APMPartition(currentBlock, 0, 0x200); 139 partitions.add(p); 140 if(options.verbose) { 141 println(); 142 p.printPartitionInfo(System.out); 143 } 144 else 145 println("\"" + p.getPmPartNameAsString() + "\" (" + p.getPmParTypeAsString() + ")"); 146 } 147 else break; 148 } 149 print("Which partition do you wish to explore [0-" + (partitions.size()-1) + "]? "); 150 int partNum = Integer.parseInt(stdin.readLine()); 151 APMPartition chosenPartition = partitions.get(partNum); 152 String partitionType = chosenPartition.getPmParTypeAsString(); 153 if(!partitionType.trim().equals("Apple_HFS")) { 154 println("The partition is not an HFS partition!"); 155 System.exit(0); 156 } 157 println("Parsing partition " + partNum + " (" + chosenPartition.getPmPartNameAsString().trim() + "/" + partitionType.trim() + ")"); 158 offset = (chosenPartition.getPmPyPartStart()+chosenPartition.getPmLgDataStart())*0x200; 159 length = chosenPartition.getPmDataCnt()*0x200; 160 } 161 else { 162 offset = 0; 163 length = isoRaf.length(); 164 } 165 166 switch(operation) { 167 case BROWSE: 168 operationBrowse(operation, isoRaf, offset, length); 169 break; 170 case FRAGCHECK: 171 operationFragCheck(operation, isoRaf, offset, length); 172 break; 173 // case TEST: 174 // operationTest(operation, isoRaf, offset, length); 175 // break; 176 case SYSTEMFILEINFO: 177 operationSystemFileInfo(operation, isoRaf, offset, length); 178 break; 179 default: 180 throw new RuntimeException("Unknown operation: " + operation); 181 } 182 } 183 184 // private static void operationTest(Operation operation, ReadableRandomAccessStream isoRaf, long offset, long length) throws IOException { 185 // System.out.println("Reading partition data starting at " + offset + "..."); 186 // byte[] currentBlock = new byte[512]; 187 // isoRaf.seek(offset + 1024); 188 // isoRaf.read(currentBlock); 189 // HFSPlusVolumeHeader header = new HFSPlusVolumeHeader(currentBlock); 190 // header.print(System.out, " "); 191 // long catalogFilePosition = header.getBlockSize()*header.getCatalogFile().getExtents().getExtentDescriptor(0).getStartBlock(); 192 // long catalogFileLength = header.getBlockSize()*header.getCatalogFile().getExtents().getExtentDescriptor(0).getBlockCount(); 193 // System.out.println("Catalog file offset: " + catalogFilePosition); 194 // System.out.println("Catalog file length: " + catalogFileLength + " bytes"); 195 // System.out.println("Seeking..."); 196 // isoRaf.seek(offset + catalogFilePosition); 197 // System.out.println("Current file pointer: " + isoRaf.getFilePointer()); 198 // System.out.println("length of file: " + isoRaf.length()); 199 // byte[] nodeDescriptorData = new byte[14]; 200 // if(isoRaf.read(nodeDescriptorData) != nodeDescriptorData.length) 201 // System.out.println("ERROR: Did not read nodeDescriptor completely."); 202 // BTNodeDescriptor btnd = new BTNodeDescriptor(nodeDescriptorData, 0); 203 // btnd.print(System.out, ""); 204 // 205 // byte[] headerRec = new byte[BTHeaderRec.length()]; 206 // if(isoRaf.read(headerRec) != headerRec.length) 207 // System.out.println("ERROR: Did not read headerRec completely."); 208 // BTHeaderRec bthr = new BTHeaderRec(headerRec, 0); 209 // bthr.print(System.out, ""); 210 // 211 // // Now we have the node size, so we could just list the nodes and see what types are there. 212 // // Btw, does the length of the catalog file containing this b-tree align to the node size? 213 // if(catalogFileLength % bthr.getNodeSize() != 0) { 214 // System.out.println("catalogFileLength is not aligned to node size! (" + catalogFileLength + 215 // " % " + bthr.getNodeSize() + " = " + catalogFileLength % bthr.getNodeSize()); 216 // return; 217 // } 218 // else 219 // System.out.println("Number of nodes in the catalog file: " + (catalogFileLength / bthr.getNodeSize())); 220 // 221 // int nodeSize = bthr.getNodeSize(); 222 // byte[] currentNode = new byte[nodeSize]; 223 // 224 // 225 // 226 // // collect all records belonging to directory 1 (= ls) 227 // System.out.println(); 228 // System.out.println(); 229 // ForkFilter catalogFile = new ForkFilter(header.getCatalogFile(), 230 // header.getCatalogFile().getExtents().getExtentDescriptors(), 231 // isoRaf, offset, header.getBlockSize(), 0); 232 // HFSPlusCatalogLeafRecord[] f = HFSPlusFileSystemView.collectFilesInDir(new HFSCatalogNodeID(1), bthr.getRootNode(), isoRaf, 233 // offset, header, bthr, catalogFile); 234 // System.out.println("Found " + f.length + " items in subroot."); 235 // for(HFSPlusCatalogLeafRecord rec : f) { 236 // //rec.print(System.out, " "); 237 // System.out.print(" \"" + rec.getKey().getNodeName().toString() + "\""); 238 // HFSPlusCatalogLeafRecordData data = rec.getData(); 239 // if(data.getRecordType() == HFSPlusCatalogLeafRecordData.RECORD_TYPE_FOLDER && 240 // data instanceof HFSPlusCatalogFolder) { 241 // HFSPlusCatalogFolder folderData = (HFSPlusCatalogFolder)data; 242 // System.out.println(" (dir, id: " + folderData.getFolderID().toInt() + ")"); 243 // // Print contents of folder 244 // HFSPlusCatalogLeafRecord[] f2 = HFSPlusFileSystemView.collectFilesInDir(folderData.getFolderID(), bthr.getRootNode(), isoRaf, offset, header, bthr, catalogFile); 245 // System.out.println(" Found " + f2.length + " items in " + rec.getKey().getNodeName().toString() + "."); 246 // for(HFSPlusCatalogLeafRecord rec2 : f2) 247 // System.out.println(" \"" + rec2.getKey().getNodeName() + "\""); 248 // //System.out.println(); 249 // } 250 // else if(data.getRecordType() == HFSPlusCatalogLeafRecordData.RECORD_TYPE_FILE && 251 // data instanceof HFSPlusCatalogFile) { 252 // HFSPlusCatalogFile fileData = (HFSPlusCatalogFile)data; 253 // System.out.println(" (file, id: " + fileData.getFileID().toInt() + ")"); 254 // } 255 // else if(data.getRecordType() == HFSPlusCatalogLeafRecordData.RECORD_TYPE_FOLDER_THREAD && 256 // data instanceof HFSPlusCatalogThread) { 257 // System.out.println(" (folder thread)"); 258 // } 259 // else if(data.getRecordType() == HFSPlusCatalogLeafRecordData.RECORD_TYPE_FILE_THREAD && 260 // data instanceof HFSPlusCatalogThread) { 261 // System.out.println(" (file thread)"); 262 // } 263 // else { 264 // System.out.println(" (ENCOUNTERED UNKNOWN DATA. record type: " + data.getRecordType() + " rec: " + rec + ")"); 265 // } 266 // } 267 // System.out.println(); 268 // System.out.println(); 269 // 270 // 271 // 272 // System.out.println("Reading node by node..."); 273 // isoRaf.seek(offset + catalogFilePosition); 274 // int nodeNumber = 0; 275 // int bytesRead = nodeSize; 276 // while((isoRaf.getFilePointer()-catalogFilePosition+nodeSize) <= catalogFileLength) { 277 // //System.out.println("FP: " + isoRaf.getFilePointer() + " diff: " + (isoRaf.getFilePointer()-catalogFilePosition) + " (catalogFileLength: " + catalogFileLength + ")"); 278 // System.out.println("Reading node " + nodeNumber + "..."); 279 // isoRaf.readFully(currentNode); 280 // bytesRead += nodeSize; 281 // 282 // BTNodeDescriptor nodeDescriptor = new BTNodeDescriptor(currentNode, 0); 283 // if(false && nodeDescriptor.getKind() == BTNodeDescriptor.BT_LEAF_NODE) { 284 // String filename = "node" + nodeNumber + ".bin"; 285 // System.out.println("Dumping node to file: \"" + filename + "\""); 286 // FileOutputStream fos = new FileOutputStream(filename); 287 // fos.write(currentNode); 288 // fos.close(); 289 // } 290 // System.out.println(" Kind: " + nodeDescriptor.getKindAsString()); 291 // System.out.println(" Number of records: " + nodeDescriptor.getNumRecords()); 292 // short[] offsets = new short[nodeDescriptor.getNumRecords()]; 293 // for(int i = 0; i < offsets.length; ++i) { 294 // offsets[i] = Util.readShortBE(currentNode, currentNode.length-((i+1)*2)); 295 // } 296 // 297 // for(int i = 0; i < offsets.length; ++i) { 298 // int currentOffset = Util.unsign(offsets[i]); 299 // 300 // if(i < offsets.length-1) { 301 // //System.out.println(" [" + nodeNumber + "] Offset to record " + i + ": " + currentOffset); 302 // //System.out.println(" [" + nodeNumber + "] Size of record: " + (Util.unsign(offsets[i+1])-currentOffset) + " bytes"); 303 // /* 304 // int keyLength; 305 // int keyLengthSize; 306 // if(bthr.isBTBigKeysSet()) { 307 // keyLength = Util.unsign(Util.readShortBE(currentNode, currentOffset)); 308 // keyLengthSize = 2; 309 // } 310 // else { 311 // keyLength = Util.unsign(Util.readByteBE(currentNode, currentOffset)); 312 // keyLengthSize = 1; 313 // } 314 // */ 315 // if(nodeDescriptor.getKind() != BTNodeDescriptor.BT_HEADER_NODE) { 316 // HFSPlusCatalogKey currentKey = new HFSPlusCatalogKey(currentNode, currentOffset); 317 // 318 // 319 // if(nodeDescriptor.getKind() == BTNodeDescriptor.BT_LEAF_NODE) { 320 // System.out.println(" [" + nodeNumber + "] Key: " + currentKey.getKeyLength() + 321 // ", " + currentKey.getParentID().toString() + 322 // ", \"" + currentKey.getNodeName().toString() + "\""); 323 // //currentKey.print(System.out, " [" + nodeNumber + "] "); 324 // 325 // short recordType = Util.readShortBE(currentNode, currentOffset+currentKey.length()); 326 // System.out.print(" [" + nodeNumber + "] Record type: "); 327 // if(recordType == 0x0001) System.out.print("kHFSPlusFolderRecord"); 328 // else if(recordType == 0x0002) System.out.print("kHFSPlusFileRecord"); 329 // else if(recordType == 0x0003) System.out.print("kHFSPlusFolderThreadRecord"); 330 // else if(recordType == 0x0004) System.out.print("kHFSPlusFileThreadRecord"); 331 // else System.out.print("UNKNOWN! (" + recordType + ")"); 332 // System.out.println(); 333 // 334 // //System.out.println(" [" + nodeNumber + "] Record:"); 335 // if(recordType == 0x0001) { 336 // HFSPlusCatalogFolder folderRec = new HFSPlusCatalogFolder(currentNode, currentOffset+currentKey.length()); 337 // System.out.println(" [" + nodeNumber + "] Node ID: " + folderRec.getFolderID()); 338 // System.out.println(" [" + nodeNumber + "] Valence: " + folderRec.getValence()); 339 // //folderRec.print(System.out, " [" + nodeNumber + "] "); 340 // } 341 // else if(recordType == 0x0002) { 342 // HFSPlusCatalogFile fileRec = new HFSPlusCatalogFile(currentNode, currentOffset+currentKey.length()); 343 // System.out.println(" [" + nodeNumber + "] Node ID: " + fileRec.getFileID()); 344 // //fileRec.print(System.out, " [" + nodeNumber + "] "); 345 // } 346 // else if(recordType == 0x0003) { 347 // HFSPlusCatalogThread folderThreadRec = new HFSPlusCatalogThread(currentNode, currentOffset+currentKey.length()); 348 // //folderThreadRec.print(System.out, " [" + nodeNumber + "] "); 349 // } 350 // else if(recordType == 0x0004) { 351 // HFSPlusCatalogThread fileThreadRec = new HFSPlusCatalogThread(currentNode, currentOffset+currentKey.length()); 352 // //fileThreadRec.print(System.out, " [" + nodeNumber + "] "); 353 // } 354 // } 355 // else if(nodeDescriptor.getKind() == BTNodeDescriptor.BT_INDEX_NODE) { 356 //// System.out.println(" [" + nodeNumber + "] Remaining data for index: " + 357 //// ((Util.unsign(offsets[i+1])-currentOffset)- 358 //// Util.unsign(currentKey.length())) + " bytes"); 359 // System.out.println(" [" + nodeNumber + "] \"" + currentKey.getNodeName().toString() + "\" (parent: " + currentKey.getParentID() + ") -> " + Util.unsign(Util.readIntBE(currentNode, currentOffset+currentKey.length()))); 360 // } 361 // } 362 // } 363 // else { 364 //// System.out.println(" [" + nodeNumber + "] Offset to free space: " + currentOffset); 365 //// System.out.println(" [" + nodeNumber + "] Size of free space: " + (nodeSize-currentOffset-2*nodeDescriptor.getNumRecords()) + " bytes"); 366 // 367 // } 368 // } 369 // 370 // if(true) { 371 // System.out.print("Press enter to read next node (q and enter to exit)..."); 372 // if(stdIn.readLine().trim().equalsIgnoreCase("q")) 373 // return; 374 // } 375 // ++nodeNumber; 376 // } 377 // System.out.println("FP: " + isoRaf.getFilePointer() + " diff: " + (isoRaf.getFilePointer()-catalogFilePosition) + " (catalogFileLength: " + catalogFileLength + ")"); 378 // System.out.println("bytesRead: " + bytesRead + " nodeSize: " + nodeSize + " number of nodes: " + (catalogFileLength / nodeSize)); 379 // System.out.println("Nodes read: " + nodeNumber); 380 // 381 // } 382 operationBrowse(Operation op, ReadableRandomAccessStream hfsFile, long fsOffset, long fsLength)383 private static void operationBrowse(Operation op, ReadableRandomAccessStream hfsFile, long fsOffset, long fsLength) { 384 //HFSPlusFileSystemView fsView = new HFSPlusFileSystemView(hfsFile, fsOffset); 385 DataLocator inputDataLocator = new ReadableStreamDataLocator( 386 new ReadableConcatenatedStream(hfsFile, fsOffset, fsLength)); 387 388 FileSystemMajorType[] fsTypes = 389 FileSystemDetector.detectFileSystem(inputDataLocator); 390 391 FileSystemMajorType hfsType = null; 392 outer: 393 for(FileSystemMajorType type : fsTypes) { 394 switch(type) { 395 case APPLE_HFS: 396 case APPLE_HFS_PLUS: 397 case APPLE_HFSX: 398 if(hfsType != null) 399 throw new RuntimeException("Conflicting file system " + 400 "types: Detected both " + hfsType + " and " + 401 type + "."); 402 hfsType = type; 403 break; 404 default: 405 break; 406 } 407 } 408 409 if(hfsType == null) { 410 System.err.println("No HFS file system found."); 411 System.exit(1); 412 } 413 else 414 System.out.println("Detected a " + hfsType + " file system."); 415 416 FileSystemHandlerFactory fact = hfsType.createDefaultHandlerFactory(); 417 FileSystemHandler fsHandler = fact.createHandler(inputDataLocator); 418 419 HFSCommonFileSystemHandler hfsHandler; 420 if(fsHandler instanceof HFSCommonFileSystemHandler) 421 hfsHandler = (HFSCommonFileSystemHandler) fsHandler; 422 else 423 throw new RuntimeException("Unexpected HFS fsHandler type: " + 424 fsHandler.getClass()); 425 426 HFSVolume fsView = hfsHandler.getFSView(); 427 428 CommonHFSCatalogLeafRecord rootRecord = fsView.getCatalogFile().getRootFolder(); 429 CommonHFSCatalogLeafRecord currentDir = rootRecord; 430 //HFSCatalogNodeID = new HFSCatalogNodeID(1); //rootRecord.getFolderID(); 431 LinkedList<String> pathStack = new LinkedList<String>(); 432 LinkedList<CommonHFSCatalogLeafRecord> pathThread = 433 new LinkedList<CommonHFSCatalogLeafRecord>(); 434 pathStack.addLast(""); 435 436 while(true) { 437 CommonHFSCatalogFolderThread currentThread = null; 438 StringBuilder currentPath = new StringBuilder(); 439 for(String pathComponent : pathStack) { 440 currentPath.append(pathComponent); 441 currentPath.append("/"); 442 } 443 println("Listing files in \"" + currentPath.toString() + "\":"); 444 445 boolean atLeastOneNonThreadEntryFound = false; 446 CommonHFSCatalogLeafRecord[] recordsInDir = 447 fsView.getCatalogFile().listRecords(currentDir); 448 449 for(CommonHFSCatalogLeafRecord rec : recordsInDir) { 450 451 if(rec instanceof CommonHFSCatalogFileRecord) { 452 CommonHFSCatalogFileRecord catFileRec = 453 (CommonHFSCatalogFileRecord) rec; 454 CommonHFSCatalogFile catFile = catFileRec.getData(); 455 456 println(" [" + catFile.getFileID() + "] \"" + 457 rec.getKey().getNodeName() + "\" (" + 458 catFile.getDataFork() .getLogicalSize() + " B)"); 459 460 if(!atLeastOneNonThreadEntryFound) 461 atLeastOneNonThreadEntryFound = true; 462 } 463 else if(rec instanceof CommonHFSCatalogFolderRecord) { 464 CommonHFSCatalogFolderRecord catFolderRec = 465 (CommonHFSCatalogFolderRecord) rec; 466 CommonHFSCatalogFolder catFolder = catFolderRec.getData(); 467 468 println(" [" + catFolder.getFolderID() + "] \"" + 469 catFolderRec.getKey().getNodeName() + "/\""); 470 471 if(!atLeastOneNonThreadEntryFound) 472 atLeastOneNonThreadEntryFound = true; 473 } 474 else if(rec instanceof CommonHFSCatalogFolderThreadRecord) { 475 CommonHFSCatalogFolderThreadRecord catThreadRec = 476 (CommonHFSCatalogFolderThreadRecord) rec; 477 CommonHFSCatalogFolderThread catThread = 478 catThreadRec.getData(); 479 480 println(" [Folder Thread: [" + catThread.getParentID() + 481 "] \"" + catThread.getNodeName() + "\"]"); 482 483 if(currentThread == null) 484 currentThread = catThreadRec.getData(); 485 else 486 println("WARNING: Found more than one folder thread " + 487 "in " + currentPath + "!"); 488 //println(" [" + catFolder.getFolderID() + "] <Folder Thread: [" + catThread.getParentID() + "] \"" + catThread.getNodeName() + "\""); 489 } 490 else if(rec instanceof CommonHFSCatalogFileThreadRecord) { 491 CommonHFSCatalogFileThreadRecord catThreadRec = 492 (CommonHFSCatalogFileThreadRecord) rec; 493 CommonHFSCatalogFileThread catThread = 494 catThreadRec.getData(); 495 496 println(" [File Thread: [" + catThread.getParentID() + 497 "] \"" + catThread.getNodeName() + "\"]"); 498 // This thread probably does not exist in directories...? 499 } 500 } 501 502 if(currentThread == null && atLeastOneNonThreadEntryFound) 503 println("WARNING: Found no folder thread in " + currentPath + "! Won't be able to go back from children in hierarchy."); 504 505 //long nextID = -1; 506 while(true) { 507 print("Command[?]: "); 508 509 String input = null; 510 try { 511 input = stdIn.readLine().trim(); 512 } catch(IOException ioe) { 513 ioe.printStackTrace(); 514 return; 515 } 516 if(input.equalsIgnoreCase("?")) { 517 println("Available commands:"); 518 println(" ls List contents of current directory"); 519 println(" cd <dirName> Changes directory by name"); 520 println(" cdn <dirID> Changes directory by ID"); 521 println(" info <fileID> Gets extensive information about the file."); 522 println(" extract <fileID> Extracts <fileID> to current directory"); 523 println(" q Quits program"); 524 } 525 else if(input.equals("q")) 526 return; 527 else if(input.equals("ls")) 528 break; 529 else if(input.startsWith("extract ")) { 530 input = input.substring("extract ".length()).trim(); 531 try { 532 long nextID = Long.parseLong(input); 533 534 CommonHFSCatalogLeafRecord selectedFileRecord = null; 535 CommonHFSCatalogFile selectedFile = null; 536 for(CommonHFSCatalogLeafRecord rec : recordsInDir) { 537 if(rec instanceof CommonHFSCatalogFileRecord) { 538 CommonHFSCatalogFileRecord catFileRec = 539 (CommonHFSCatalogFileRecord)rec; 540 CommonHFSCatalogFile catFile = 541 catFileRec.getData(); 542 543 if(catFile.getFileID().toLong() == nextID) { 544 selectedFileRecord = rec; 545 selectedFile = catFile; 546 break; 547 } 548 } 549 } 550 if(selectedFileRecord == null) { 551 println("ID not present in dir."); 552 //nextID = -1; 553 } 554 else { 555 String dataForkFilename = selectedFileRecord.getKey().getNodeName().toString(); 556 FileOutputStream dataOut = new FileOutputStream(dataForkFilename); 557 print("Extracting data fork to file \"" + dataForkFilename + "\"..."); 558 try { 559 long bytesExtracted = 560 fsView.extractDataForkToStream(selectedFileRecord, dataOut, 561 NullProgressMonitor.getInstance()); 562 println("extracted " + bytesExtracted + " bytes."); 563 dataOut.close(); 564 } catch(IOException ioe) { 565 ioe.printStackTrace(); 566 try { dataOut.close(); } catch(IOException ioe2) {} 567 continue; // or rather... don't continue (: mwahaha 568 } 569 570 String resourceForkFilename = dataForkFilename + ".resourcefork"; 571 FileOutputStream resourceOut = new FileOutputStream(resourceForkFilename); 572 print("Extracting resource fork to file \"" + resourceForkFilename + "\"..."); 573 try { 574 long bytesExtracted = 575 fsView.extractResourceForkToStream(selectedFileRecord, 576 resourceOut, NullProgressMonitor.getInstance()); 577 println("extracted " + bytesExtracted + " bytes."); 578 resourceOut.close(); 579 } catch(IOException ioe) { 580 ioe.printStackTrace(); 581 try { dataOut.close(); } catch(IOException ioe2) {} 582 } 583 //break; // to reread the directory 584 } 585 586 } catch(FileNotFoundException fnfe) { 587 fnfe.printStackTrace(); 588 } catch(NumberFormatException nfe) { 589 //nextID = -1; 590 println("Invalid input!"); 591 } 592 } 593 else if(input.startsWith("info ")) { 594 input = input.substring("info ".length()).trim(); 595 try { 596 long nextID = Long.parseLong(input); 597 598 CommonHFSCatalogLeafRecord selectedFileRecord = null; 599 for(CommonHFSCatalogLeafRecord rec : recordsInDir) { 600 601 if(rec instanceof CommonHFSCatalogFileRecord) { 602 CommonHFSCatalogFileRecord catFileRec = 603 (CommonHFSCatalogFileRecord) rec; 604 CommonHFSCatalogFile catFile = 605 catFileRec.getData(); 606 607 if(catFile.getFileID().toLong() == nextID) { 608 selectedFileRecord = rec; 609 rec.print(System.out, ""); 610 break; 611 } 612 } 613 } 614 if(selectedFileRecord == null) { 615 println("ID not present in dir."); 616 //nextID = -1; 617 } 618 } catch(NumberFormatException nfe) { 619 //nextID = -1; 620 println("Invalid input!"); 621 } 622 } 623 else if(input.startsWith("cdn ")) { 624 input = input.substring("cdn ".length()).trim(); 625 if(input.equals("..")) { 626 println("Not yet implemented."); 627 // TODO: Implement this. 628 } 629 else { 630 try { 631 long nextID = Long.parseLong(input); 632 CommonHFSCatalogLeafRecord nextDir = null; 633 for(CommonHFSCatalogLeafRecord rec : recordsInDir) { 634 635 if(rec instanceof CommonHFSCatalogFolderRecord) { 636 //System.out.println(rec.getKey().getNodeName() + ".equals(" + input +")"); 637 CommonHFSCatalogFolderRecord catFolderRec = 638 (CommonHFSCatalogFolderRecord) rec; 639 CommonHFSCatalogFolder catFolder = 640 catFolderRec.getData(); 641 642 if(catFolder.getFolderID().toLong() == nextID) { 643 nextDir = rec; 644 break; 645 } 646 } 647 } 648 if(nextDir == null) { 649 println("ID not present in dir."); 650 //nextID = -1; 651 } 652 else { 653 pathStack.addLast(nextDir.getKey().getNodeName().toString()); 654 pathThread.addLast(currentDir); 655 currentDir = nextDir;//nextFolder.getFolderID(); 656 break; 657 } 658 } catch(Exception e) { 659 //nextID = -1; 660 println("Invalid input!"); 661 } 662 } 663 } 664 else if(input.startsWith("cd ")) { 665 input = input.substring("cd ".length()); 666 if(input.equals("..")) { 667 // HFSPlusCatalogLeafRecord nextDir = null; 668 // for(HFSPlusCatalogLeafRecord rec : recordsInDir) { 669 // HFSPlusCatalogLeafRecordData recData = rec.getData(); 670 // if((recData.getRecordType() == 671 // HFSPlusCatalogLeafRecordData.RECORD_TYPE_FOLDER_THREAD) && 672 // recData instanceof HFSPlusCatalogThread) { 673 // HFSPlusCatalogThread catThread = (HFSPlusCatalogThread)recData; 674 // nextDir = fsView.getRecord(catThread.getParentID(), catThread.getNodeName()); 675 // if(nextDir == null) 676 // System.err.println("OCULD NOTT FIAAND DIDIIRR!!!"); 677 // } 678 // } 679 // if(nextDir == null) { 680 // println("ID not present in dir."); 681 // //nextID = -1; 682 // } 683 // else { 684 pathStack.removeLast(); 685 686 currentDir = pathThread.removeLast();//nextDir;//nextFolder.getFolderID(); 687 break; 688 // } 689 } 690 else { 691 try { 692 CommonHFSCatalogLeafRecord nextDir = null; 693 for(CommonHFSCatalogLeafRecord rec : recordsInDir) { 694 695 if(rec instanceof CommonHFSCatalogFolderRecord) { 696 CommonHFSCatalogFolderRecord folderRec = 697 (CommonHFSCatalogFolderRecord) rec; 698 //System.out.println(rec.getKey().getNodeName() + ".equals(" + input +")"); 699 if(rec.getKey().getNodeName().toString().equals(input)) { 700 nextDir = rec; 701 break; 702 } 703 } 704 } 705 if(nextDir == null) { 706 println("Unknown directory."); 707 //nextID = -1; 708 } 709 else { 710 pathStack.addLast(nextDir.getKey().getNodeName().toString()); 711 pathThread.addLast(currentDir); 712 currentDir = nextDir;//nextFolder.getFolderID(); 713 break; 714 } 715 } catch(Exception e) { 716 //nextID = -1; 717 println("Invalid input!"); 718 } 719 } 720 } 721 else 722 println("Unknown command."); 723 } 724 println(); 725 } 726 } 727 operationFragCheck(Operation op, ReadableRandomAccessStream hfsFile, long fsOffset, long fsLength)728 private static void operationFragCheck(Operation op, ReadableRandomAccessStream hfsFile, long fsOffset, long fsLength) { 729 println("Gathering information about the files on the volume..."); 730 final int numberOfFilesToDisplay = 10; 731 ArrayList<Pair<CommonHFSCatalogLeafRecord, Integer>> mostFragmentedList = 732 new ArrayList<Pair<CommonHFSCatalogLeafRecord, Integer>>(numberOfFilesToDisplay+1); 733 734 /* 735 * This is the deal: 736 * - Go to catalog file 737 * - Find root dir 738 * - Depth first search starting at root dir 739 * - When a file is found that has more fragments than mostFragmentedList.getLast(), 740 * let the file bubble upwards in the list until it is at the right position. 741 * - If list.size() > numberOfFilesToDisplay: do removeLast() until they match. 742 */ 743 744 DataLocator inputDataLocator = new ReadableStreamDataLocator( 745 new ReadableConcatenatedStream(hfsFile, fsOffset, fsLength)); 746 747 FileSystemMajorType[] fsTypes = 748 FileSystemDetector.detectFileSystem(inputDataLocator); 749 750 FileSystemMajorType hfsType = null; 751 outer: 752 for(FileSystemMajorType type : fsTypes) { 753 switch(type) { 754 case APPLE_HFS: 755 case APPLE_HFS_PLUS: 756 case APPLE_HFSX: 757 if(hfsType != null) 758 throw new RuntimeException("Conflicting file system " + 759 "types: Detected both " + hfsType + " and " + 760 type + "."); 761 hfsType = type; 762 break; 763 default: 764 break; 765 } 766 } 767 768 if(hfsType == null) { 769 System.err.println("No HFS file system found."); 770 System.exit(1); 771 } 772 else 773 System.out.println("Detected a " + hfsType + " file system."); 774 775 FileSystemHandlerFactory fact = hfsType.createDefaultHandlerFactory(); 776 FileSystemHandler fsHandler = fact.createHandler(inputDataLocator); 777 778 HFSCommonFileSystemHandler hfsHandler; 779 if(fsHandler instanceof HFSCommonFileSystemHandler) 780 hfsHandler = (HFSCommonFileSystemHandler) fsHandler; 781 else 782 throw new RuntimeException("Unexpected HFS fsHandler type: " + 783 fsHandler.getClass()); 784 785 HFSVolume fsView = hfsHandler.getFSView(); 786 CommonHFSCatalogFolderRecord rootRecord = fsView.getCatalogFile().getRootFolder(); 787 CommonHFSCatalogFolderRecord currentDir = rootRecord; 788 recursiveFragmentSearch(fsView, rootRecord, mostFragmentedList, numberOfFilesToDisplay, options.verbose); 789 if(!options.verbose) println(); 790 791 println("Most fragmented files: "); 792 for(Pair<CommonHFSCatalogLeafRecord, Integer> phi : mostFragmentedList) { 793 println(phi.getB() + " - \"" + phi.getA().getKey().getNodeName() + 794 "\""); 795 } 796 } 797 recursiveFragmentSearch(HFSVolume fsView, CommonHFSCatalogLeafRecord currentDir, ArrayList<Pair<CommonHFSCatalogLeafRecord, Integer>> mostFragmentedList, final int listMaxLength, final boolean verbose)798 private static void recursiveFragmentSearch(HFSVolume fsView, CommonHFSCatalogLeafRecord currentDir, 799 ArrayList<Pair<CommonHFSCatalogLeafRecord, Integer>> mostFragmentedList, 800 final int listMaxLength, final boolean verbose) { 801 for(CommonHFSCatalogLeafRecord rec : fsView.getCatalogFile().listRecords(currentDir)) { 802 if(rec instanceof CommonHFSCatalogFileRecord) { 803 CommonHFSCatalogFile catFile = 804 ((CommonHFSCatalogFileRecord)rec).getData(); 805 806 CommonHFSExtentDescriptor[] descs = 807 fsView.getExtentsOverflowFile(). 808 getAllDataExtentDescriptors(rec); 809 810 mostFragmentedList.add(new Pair<CommonHFSCatalogLeafRecord, Integer>(rec, descs.length)); 811 812 // Let the new item bubble up to its position in the list 813 for(int i = mostFragmentedList.size()-1; i > 0; --i) { 814 Pair<CommonHFSCatalogLeafRecord, Integer> lower = mostFragmentedList.get(i); 815 Pair<CommonHFSCatalogLeafRecord, Integer> higher = mostFragmentedList.get(i-1); 816 817 if(lower.getB().intValue() > higher.getB().intValue()) { 818 // Switch places. 819 mostFragmentedList.set(i-1, lower); 820 mostFragmentedList.set(i, higher); 821 } 822 else 823 break; 824 } 825 while(mostFragmentedList.size() > listMaxLength) 826 mostFragmentedList.remove(mostFragmentedList.size()-1); 827 } 828 else if(rec instanceof CommonHFSCatalogFolderRecord) { 829 CommonHFSCatalogFolder catFolder = 830 ((CommonHFSCatalogFolderRecord)rec).getData(); 831 if(verbose) println(" Processing folder \"" + rec.getKey().getNodeName().toString() + "\""); 832 else print("."); 833 recursiveFragmentSearch(fsView, rec, mostFragmentedList, listMaxLength, verbose); 834 } 835 else if(rec instanceof CommonHFSCatalogFolderThreadRecord) { 836 CommonHFSCatalogFolderThread catThread = 837 ((CommonHFSCatalogFolderThreadRecord)rec).getData(); 838 } 839 else if(rec instanceof CommonHFSCatalogFileThreadRecord) { 840 CommonHFSCatalogFileThread catThread = 841 ((CommonHFSCatalogFileThreadRecord)rec).getData(); 842 } 843 else 844 throw new RuntimeException("Unknown record type: " + 845 rec.getClass()); 846 } 847 } 848 operationSystemFileInfo(Operation op, ReadableRandomAccessStream hfsFile, long fsOffset, long fsLength)849 private static void operationSystemFileInfo(Operation op, 850 ReadableRandomAccessStream hfsFile, long fsOffset, long fsLength) { 851 // ReadableRandomAccessStream oldHfsFile = hfsFile; 852 // System.err.println("Opening hack UDIF file..."); 853 // hfsFile = new UDIFRandomAccessLLF("/Users/erik/documents.dmg"); 854 // System.err.println("Opened."); 855 856 DataLocator inputDataLocator = new ReadableStreamDataLocator( 857 new ReadableConcatenatedStream(hfsFile, fsOffset, fsLength)); 858 859 FileSystemMajorType[] fsTypes = 860 FileSystemDetector.detectFileSystem(inputDataLocator); 861 862 FileSystemMajorType hfsType = null; 863 outer: 864 for(FileSystemMajorType type : fsTypes) { 865 switch(type) { 866 case APPLE_HFS: 867 case APPLE_HFS_PLUS: 868 case APPLE_HFSX: 869 if(hfsType != null) 870 throw new RuntimeException("Conflicting file system " + 871 "types: Detected both " + hfsType + " and " + 872 type + "."); 873 hfsType = type; 874 break; 875 default: 876 break; 877 } 878 } 879 880 if(hfsType == null) { 881 System.err.println("No HFS file system found."); 882 System.exit(1); 883 } 884 else 885 System.out.println("Detected a " + hfsType + " file system."); 886 887 FileSystemHandlerFactory fact = hfsType.createDefaultHandlerFactory(); 888 FileSystemHandler fsHandler = fact.createHandler(inputDataLocator); 889 890 HFSCommonFileSystemHandler hfsHandler; 891 if(fsHandler instanceof HFSCommonFileSystemHandler) 892 hfsHandler = (HFSCommonFileSystemHandler) fsHandler; 893 else 894 throw new RuntimeException("Unexpected HFS fsHandler type: " + 895 fsHandler.getClass()); 896 897 HFSVolume fsView = hfsHandler.getFSView(); 898 CommonHFSVolumeHeader header = fsView.getVolumeHeader(); 899 900 901 ReservedID[] ids; 902 CommonHFSForkData[] interestingFiles; 903 String[] labels; 904 905 if(header instanceof CommonHFSVolumeHeader.HFSPlusImplementation) { 906 CommonHFSVolumeHeader.HFSPlusImplementation plusHeader = 907 (CommonHFSVolumeHeader.HFSPlusImplementation) header; 908 ids = new ReservedID[] { 909 ReservedID.ALLOCATION_FILE, 910 ReservedID.EXTENTS_FILE, 911 ReservedID.CATALOG_FILE, 912 ReservedID.ATTRIBUTES_FILE, 913 ReservedID.STARTUP_FILE, 914 }; 915 916 interestingFiles = new CommonHFSForkData[] { 917 plusHeader.getAllocationFile(), 918 plusHeader.getExtentsOverflowFile(), 919 plusHeader.getCatalogFile(), 920 plusHeader.getAttributesFile(), 921 plusHeader.getStartupFile(), 922 }; 923 924 labels = new String[] { 925 "Allocation file", 926 "Extents file", 927 "Catalog file", 928 "Attributes file", 929 "Startup file", 930 }; 931 } 932 else { 933 ids = new ReservedID[] { 934 ReservedID.EXTENTS_FILE, 935 ReservedID.CATALOG_FILE, 936 }; 937 938 interestingFiles = new CommonHFSForkData[] { 939 header.getExtentsOverflowFile(), 940 header.getCatalogFile(), 941 }; 942 943 labels = new String[] { 944 "Extents file", 945 "Catalog file", 946 }; 947 948 } 949 950 for(CommonHFSForkData f : interestingFiles) { f.print(System.out, ""); } 951 // HFSPlusForkData allocationFile = header.getAllocationFile(); 952 // HFSPlusForkData extentsFile = header.getExtentsFile(); 953 // HFSPlusForkData catalogFile = header.getCatalogFile(); 954 // HFSPlusForkData attributesFile = header.getAttributesFile(); 955 // HFSPlusForkData File = header.getStartupFile(); 956 957 //HFSPlusCatalogLeafRecord rootRecord = fsView.getRoot(); 958 //HFSPlusCatalogLeafRecord currentDir = rootRecord; 959 960 for(int i = 0; i < interestingFiles.length; ++i) { 961 System.out.println(labels[i] + ":"); 962 CommonHFSForkData currentFile = interestingFiles[i]; 963 long basicExtentsBlockCount = 0; 964 CommonHFSExtentDescriptor[] basicExtents = 965 currentFile.getBasicExtents(); 966 long numberOfExtents = 0; 967 for(CommonHFSExtentDescriptor cur : basicExtents) { 968 if(cur.getStartBlock() == 0 && cur.getBlockCount() == 0) 969 break; 970 else { 971 basicExtentsBlockCount += cur.getBlockCount(); 972 ++numberOfExtents; 973 } 974 } 975 976 if(currentFile.getLogicalSize() <= basicExtentsBlockCount*header.getAllocationBlockSize()) { 977 // All blocks are in basic extents 978 System.out.println(" Number of extents: " + numberOfExtents + 979 " (all in basic)"); 980 } 981 else { 982 ReservedID currentID = ids[i]; 983 if(currentID == ReservedID.EXTENTS_FILE) { 984 System.out.println(" OVERFLOW IN EXTENTS OVERFLOW FILE!!"); 985 } 986 else { 987 CommonHFSExtentDescriptor[] allDescriptors = fsView. 988 getExtentsOverflowFile(). 989 getAllDataExtentDescriptors( 990 fsView.getCommonHFSCatalogNodeID(currentID), 991 currentFile); 992 System.out.println(" Number of extents: " + 993 allDescriptors.length + " (overflowed)"); 994 } 995 } 996 } 997 } 998 printUsageInfo()999 public static void printUsageInfo() { 1000 // For measurement of the standard terminal width in fixed width environments: 1001 // 80: <--------------------------------------------------------------------------------> 1002 println("hfsx - HFSExplorer Command Line Interface"); 1003 println("Version " + VERSION + " Build #" + BuildNumber.BUILD_NUMBER); 1004 println(COPYRIGHT.replaceAll("\u00A9", "(C)")); 1005 println(); 1006 println("Utility to explore various aspects of an HFS/HFS+/HFSX filesystem."); 1007 println("usage: hfsx [common options] <verb> [verb options] <file/device>"); 1008 println(); 1009 println(" Common options:"); 1010 println(" -apm Specifies that the HFS partition is embedded within an Apple"); 1011 println(" Partition Map. The user will be allowed to choose which partition in"); 1012 println(" the map to attempt reading."); 1013 println(" -v Verbose operation."); 1014 println(); 1015 println(" Verbs:"); 1016 println(" browse Launches a mode where the user can browse the files in a HFS+ file"); 1017 println(" system."); 1018 println(" chfrag Lists the 10 most fragmented files of the volume."); 1019 // println(" test Launches a test mode for extensive exploration of file system"); 1020 // println(" structures. Only for debugging purposes."); 1021 println(); 1022 println(" Verb options:"); 1023 println(" <none currently defined>"); 1024 } 1025 println()1026 public static void println() { 1027 //System.out.print(BACKSPACE79); 1028 System.out.println(); 1029 } println(String s)1030 public static void println(String s) { 1031 //System.out.print(BACKSPACE79); 1032 System.out.println(s); 1033 } print(String s)1034 public static void print(String s) { 1035 //System.out.print(BACKSPACE79); 1036 System.out.print(s); 1037 } vprintln()1038 public static void vprintln() { 1039 //System.out.print(BACKSPACE79); 1040 if(options.verbose) System.out.println(); 1041 } vprintln(String s)1042 public static void vprintln(String s) { 1043 //System.out.print(BACKSPACE79); 1044 if(options.verbose) System.out.println(s); 1045 } vprint(String s)1046 public static void vprint(String s) { 1047 //System.out.print(BACKSPACE79); 1048 if(options.verbose) System.out.print(s); 1049 } 1050 parseOptions(String[] arguments, int offset, int length)1051 public static boolean parseOptions(String[] arguments, int offset, 1052 int length) { 1053 int i; 1054 String currentArg = null; 1055 for(i = offset; i < length; ++i) { 1056 currentArg = arguments[i]; 1057 if(!currentArg.startsWith("-")) 1058 break; 1059 else if(currentArg.equals("-apm")) 1060 options.readAPM = true; 1061 else if(currentArg.equals("-v")) 1062 options.verbose = true; 1063 else 1064 println("\"" + currentArg + "\" is not a valid parameter."); 1065 } 1066 1067 // Now comes the verb 1068 if(currentArg.equals("browse")) 1069 operation = Operation.BROWSE; 1070 else if(currentArg.equals("chfrag")) 1071 operation = Operation.FRAGCHECK; 1072 //else if(currentArg.equals("test")) 1073 // operation = Operation.TEST; 1074 else if(currentArg.equals("systemfileinfo")) 1075 operation = Operation.SYSTEMFILEINFO; 1076 else { 1077 System.err.println("Unknown operation: " + currentArg); 1078 return false; 1079 } 1080 1081 if(operation != null) { 1082 for(++i; i < length; ++i) 1083 operation.addArg(arguments[i]); 1084 } 1085 1086 //System.out.println("SETTING FILENAME TO!! ::: " + arguments[length-1]); 1087 //operation.setFilename(arguments[length-1]); 1088 1089 return true; 1090 } 1091 1092 /* 1093 public static HFSPlusCatalogFile findFileID(HFSPlusCatalogLeafNode leafNode, HFSCatalogNodeID nodeID) { 1094 HFSPlusCatalogLeafRecord[] records = leafNode.getLeafRecords(); 1095 for(int i = 0; i < records.length; ++i) { 1096 HFSPlusCatalogLeafRecord curRec = records[i]; 1097 HFSPlusCatalogLeafRecordData curRecData = curRec.getData(); 1098 if(curRecData instanceof HFSPlusCatalogFile && 1099 ((HFSPlusCatalogFile)curRecData).getFileID().toInt() == nodeID.toInt()) { 1100 return (HFSPlusCatalogFile)curRecData; 1101 } 1102 } 1103 return null; 1104 } 1105 public static HFSPlusCatalogFolder findFolderID(HFSPlusCatalogLeafNode leafNode, HFSCatalogNodeID nodeID) { 1106 HFSPlusCatalogLeafRecord[] records = leafNode.getLeafRecords(); 1107 for(int i = 0; i < records.length; ++i) { 1108 HFSPlusCatalogLeafRecord curRec = records[i]; 1109 HFSPlusCatalogLeafRecordData curRecData = curRec.getData(); 1110 if(curRecData instanceof HFSPlusCatalogFolder && 1111 ((HFSPlusCatalogFolder)curRecData).getFolderID().toInt() == nodeID.toInt()) { 1112 return (HFSPlusCatalogFolder)curRecData; 1113 } 1114 } 1115 return null; 1116 } 1117 */ 1118 } 1119