1#!/usr/bin/env python 2# Copyright (c) 2003-2016 CORE Security Technologies 3# 4# This software is provided under under a slightly modified version 5# of the Apache Software License. See the accompanying LICENSE file 6# for more information. 7# 8# Description: Mini shell for browsing an NTFS volume 9# 10# Author: 11# Alberto Solino (@agsolino) 12# 13# 14# Reference for: 15# Structure. Quick and dirty implementation.. just for fun.. ;) 16# 17# NOTE: Lots of info (mainly the structs) taken from the NTFS-3G project.. 18# 19# TODO 20# [] Parse the attributes list attribute. It is unknown what would happen now if 21# we face a highly fragmented file that will have many attributes that won't fit 22# in the MFT Record 23# [] Support compressed, encrypted and sparse files 24# 25 26import os 27import sys 28import logging 29import struct 30import argparse 31import cmd 32import ntpath 33# If you wanna have readline like functionality in Windows, install pyreadline 34try: 35 import pyreadline as readline 36except ImportError: 37 import readline 38from datetime import datetime 39from impacket.examples import logger 40from impacket import version 41from impacket.structure import Structure 42 43 44import string 45def pretty_print(x): 46 if x in '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ': 47 return x 48 else: 49 return '.' 50 51def hexdump(data): 52 x=str(data) 53 strLen = len(x) 54 i = 0 55 while i < strLen: 56 print "%04x " % i, 57 for j in range(16): 58 if i+j < strLen: 59 print "%02X" % ord(x[i+j]), 60 else: 61 print " ", 62 if j%16 == 7: 63 print "", 64 print " ", 65 print ''.join(pretty_print(x) for x in x[i:i+16] ) 66 i += 16 67 68# Reserved/fixed MFTs 69FIXED_MFTS = 16 70 71# Attribute types 72UNUSED = 0 73STANDARD_INFORMATION = 0x10 74ATTRIBUTE_LIST = 0x20 75FILE_NAME = 0x30 76OBJECT_ID = 0x40 77SECURITY_DESCRIPTOR = 0x50 78VOLUME_NAME = 0x60 79VOLUME_INFORMATION = 0x70 80DATA = 0x80 81INDEX_ROOT = 0x90 82INDEX_ALLOCATION = 0xa0 83BITMAP = 0xb0 84REPARSE_POINT = 0xc0 85EA_INFORMATION = 0xd0 86EA = 0xe0 87PROPERTY_SET = 0xf0 88LOGGED_UTILITY_STREAM = 0x100 89FIRST_USER_DEFINED_ATTRIBUTE = 0x1000 90END = 0xffffffff 91 92# Attribute flags 93ATTR_IS_COMPRESSED = 0x0001 94ATTR_COMPRESSION_MASK = 0x00ff 95ATTR_IS_ENCRYPTED = 0x4000 96ATTR_IS_SPARSE = 0x8000 97 98# FileName type flags 99FILE_NAME_POSIX = 0x00 100FILE_NAME_WIN32 = 0x01 101FILE_NAME_DOS = 0x02 102FILE_NAME_WIN32_AND_DOS = 0x03 103 104# MFT Record flags 105MFT_RECORD_IN_USE = 0x0001 106MFT_RECORD_IS_DIRECTORY = 0x0002 107MFT_RECORD_IS_4 = 0x0004 108MFT_RECORD_IS_VIEW_INDEX = 0x0008 109MFT_REC_SPACE_FILLER = 0xfffff 110 111# File Attribute Flags 112FILE_ATTR_READONLY = 0x0001 113FILE_ATTR_HIDDEN = 0x0002 114FILE_ATTR_SYSTEM = 0x0004 115FILE_ATTR_DIRECTORY = 0x0010 116FILE_ATTR_ARCHIVE = 0x0020 117FILE_ATTR_DEVICE = 0x0040 118FILE_ATTR_NORMAL = 0x0080 119FILE_ATTR_TEMPORARY = 0x0100 120FILE_ATTR_SPARSE_FILE = 0x0200 121FILE_ATTR_REPARSE_POINT = 0x0400 122FILE_ATTR_COMPRESSED = 0x0800 123FILE_ATTR_OFFLINE = 0x1000 124FILE_ATTR_NOT_CONTENT_INDEXED = 0x2000 125FILE_ATTR_ENCRYPTED = 0x4000 126FILE_ATTR_VALID_FLAGS = 0x7fb7 127FILE_ATTR_VALID_SET_FLAGS = 0x31a7 128FILE_ATTR_I30_INDEX_PRESENT = 0x10000000 129FILE_ATTR_VIEW_INDEX_PRESENT = 0x20000000 130 131# NTFS System files 132FILE_MFT = 0 133FILE_MFTMirr = 1 134FILE_LogFile = 2 135FILE_Volume = 3 136FILE_AttrDef = 4 137FILE_Root = 5 138FILE_Bitmap = 6 139FILE_Boot = 7 140FILE_BadClus = 8 141FILE_Secure = 9 142FILE_UpCase = 10 143FILE_Extend = 11 144 145# Index Header Flags 146SMALL_INDEX = 0 147LARGE_INDEX = 1 148LEAF_NODE = 0 149INDEX_NODE = 1 150NODE_MASK = 0 151 152# Index Entry Flags 153INDEX_ENTRY_NODE = 1 154INDEX_ENTRY_END = 2 155INDEX_ENTRY_SPACE_FILLER = 0xffff 156 157 158class NTFS_BPB(Structure): 159 structure = ( 160 ('BytesPerSector','<H=0'), 161 ('SectorsPerCluster','B=0'), 162 ('ReservedSectors','<H=0'), 163 ('Reserved','3s=""'), 164 ('Reserved2','2s=""'), 165 ('MediaDescription','B=0'), 166 ('Reserved3','2s=""'), 167 ('Reserved4','<H=0'), 168 ('Reserved5','<H=0'), 169 ('Reserved6','<L=0'), 170 ('Reserved7','4s=""'), 171 ) 172 173class NTFS_EXTENDED_BPB(Structure): 174 structure = ( 175 ('Reserved','4s=""'), 176 ('TotalSectors','<Q=0'), 177 ('MFTClusterNumber','<Q=0'), 178 ('MFTMirrClusterNumber','<Q=0'), 179 ('ClusterPerFileRecord','b=0'), 180 ('Reserved2','3s=""'), 181 ('ClusterPerIndexBuffer','<b=0'), 182 ('Reserved3','3s=""'), 183 ('VolumeSerialNumber','8s=""'), 184 ('CheckSum','4s=""'), 185 ) 186 187class NTFS_BOOT_SECTOR(Structure): 188 structure = ( 189 ('JmpInstr','3s=""'), 190 ('OEM_ID','8s=""'), 191 ('BPB','25s=""'), 192 ('ExtendedBPB','48s=""'), 193 ('Bootstrap','426s=""'), 194 ('EOS','<H=0'), 195 ) 196 197class NTFS_MFT_RECORD(Structure): 198 structure = ( 199 ('MagicLabel','4s=""'), 200 ('USROffset','<H=0'), # Update Sequence Records Offset 201 ('USRSize','<H=0'), # Update Sequence Records Size 202 ('LogSeqNum','<Q=0'), 203 ('SeqNum','<H=0'), 204 ('LinkCount','<H=0'), 205 ('AttributesOffset','<H=0'), 206 ('Flags','<H=0'), 207 ('BytesInUse','<L=0'), 208 ('BytesAllocated','<L=0'), 209 ('BaseMftRecord','<Q=0'), 210 ('NextAttrInstance','<H=0'), 211 ('Reserved','<H=0'), 212 ('RecordNumber','<L=0'), 213 ) 214 215class NTFS_ATTRIBUTE_RECORD(Structure): 216 commonHdr = ( 217 ('Type','<L=0'), 218 ('Length','<L=0'), 219 ('NonResident','B=0'), 220 ('NameLength','B=0'), 221 ('NameOffset','<H=0'), 222 ('Flags','<H=0'), 223 ('Instance','<H=0'), 224 ) 225 structure = () 226 227class NTFS_ATTRIBUTE_RECORD_NON_RESIDENT(Structure): 228 structure = ( 229 ('LowestVCN','<Q=0'), 230 ('HighestVCN','<Q=0'), 231 ('DataRunsOffset','<H=0'), 232 ('CompressionUnit','<H=0'), 233 ('Reserved1','4s=""'), 234 ('AllocatedSize','<Q=0'), 235 ('DataSize','<Q=0'), 236 ('InitializedSize','<Q=0'), 237# ('CompressedSize','<Q=0'), 238 ) 239 240class NTFS_ATTRIBUTE_RECORD_RESIDENT(Structure): 241 structure = ( 242 ('ValueLen','<L=0'), 243 ('ValueOffset','<H=0'), 244 ('Flags','B=0'), 245 ('Reserved','B=0'), 246 ) 247 248class NTFS_FILE_NAME_ATTR(Structure): 249 structure = ( 250 ('ParentDirectory','<Q=0'), 251 ('CreationTime','<Q=0'), 252 ('LastDataChangeTime','<Q=0'), 253 ('LastMftChangeTime','<Q=0'), 254 ('LastAccessTime','<Q=0'), 255 ('AllocatedSize','<Q=0'), 256 ('DataSize','<Q=0'), 257 ('FileAttributes','<L=0'), 258 ('EaSize','<L=0'), 259 ('FileNameLen','B=0'), 260 ('FileNameType','B=0'), 261 ('_FileName','_-FileName','self["FileNameLen"]*2'), 262 ('FileName',':'), 263 ) 264 265class NTFS_STANDARD_INFORMATION(Structure): 266 structure = ( 267 ('CreationTime','<Q=0'), 268 ('LastDataChangeTime','<Q=0'), 269 ('LastMftChangeTime','<Q=0'), 270 ('LastAccessTime','<Q=0'), 271 ('FileAttributes','<L=0'), 272 ) 273 274class NTFS_INDEX_HEADER(Structure): 275 structure = ( 276 ('EntriesOffset','<L=0'), 277 ('IndexLength','<L=0'), 278 ('AllocatedSize','<L=0'), 279 ('Flags','B=0'), 280 ('Reserved','3s=""'), 281 ) 282 283class NTFS_INDEX_ROOT(Structure): 284 structure = ( 285 ('Type','<L=0'), 286 ('CollationRule','<L=0'), 287 ('IndexBlockSize','<L=0'), 288 ('ClustersPerIndexBlock','B=0'), 289 ('Reserved','3s=""'), 290 ('Index',':',NTFS_INDEX_HEADER), 291 ) 292 293 294class NTFS_INDEX_ALLOCATION(Structure): 295 structure = ( 296 ('Magic','4s=""'), 297 ('USROffset','<H=0'), # Update Sequence Records Offset 298 ('USRSize','<H=0'), # Update Sequence Records Size 299 ('Lsn','<Q=0'), 300 ('IndexVcn','<Q=0'), 301 ('Index',':',NTFS_INDEX_HEADER), 302 ) 303 304class NTFS_INDEX_ENTRY_HEADER(Structure): 305 structure = ( 306 ('IndexedFile','<Q=0'), 307 ('Length','<H=0'), 308 ('KeyLength','<H=0'), 309 ('Flags','<H=0'), 310 ('Reserved','<H=0'), 311 ) 312 313class NTFS_INDEX_ENTRY(Structure): 314 alignment = 8 315 structure = ( 316 ('EntryHeader',':',NTFS_INDEX_ENTRY_HEADER), 317 ('_Key','_-Key','self["EntryHeader"]["KeyLength"]'), 318 ('Key',':'), 319 ('_Vcn','_-Vcn','(self["EntryHeader"]["Flags"] & 1)*8'), 320 ('Vcn',':') 321 ) 322 323class NTFS_DATA_RUN(Structure): 324 structure = ( 325 ('LCN','<q=0'), 326 ('Clusters','<Q=0'), 327 ('StartVCN','<Q=0'), 328 ('LastVCN','<Q=0'), 329 ) 330 331def getUnixTime(t): 332 t -= 116444736000000000 333 t /= 10000000 334 return t 335 336 337class Attribute: 338 def __init__(self, iNode, data): 339 self.AttributeName = None 340 self.NTFSVolume = iNode.NTFSVolume 341 self.AttributeHeader = NTFS_ATTRIBUTE_RECORD(data) 342 if self.AttributeHeader['NameLength'] > 0 and self.AttributeHeader['Type'] != END: 343 self.AttributeName = data[self.AttributeHeader['NameOffset']:][:self.AttributeHeader['NameLength']*2].decode('utf-16le') 344 345 def getFlags(self): 346 return self.AttributeHeader['Flags'] 347 348 def getName(self): 349 return self.AttributeName 350 351 def isNonResident(self): 352 return self.AttributeHeader['NonResident'] 353 354 def dump(self): 355 return self.AttributeHeader.dump() 356 357 def getTotalSize(self): 358 return self.AttributeHeader['Length'] 359 360 def getType(self): 361 return self.AttributeHeader['Type'] 362 363class AttributeResident(Attribute): 364 def __init__(self, iNode, data): 365 logging.debug("Inside AttributeResident: iNode: %s" % iNode.INodeNumber) 366 Attribute.__init__(self,iNode,data) 367 self.ResidentHeader = NTFS_ATTRIBUTE_RECORD_RESIDENT(data[len(self.AttributeHeader):]) 368 self.AttrValue = data[self.ResidentHeader['ValueOffset']:][:self.ResidentHeader['ValueLen']] 369 370 def dump(self): 371 return self.ResidentHeader.dump() 372 373 def getFlags(self): 374 return self.ResidentHeader['Flags'] 375 376 def getValue(self): 377 return self.AttrValue 378 379 def read(self,offset,length): 380 logging.debug("Inside Read: offset: %d, length: %d" %(offset,length)) 381 return self.AttrValue[offset:][:length] 382 383 def getDataSize(self): 384 return len(self.AttrValue) 385 386class AttributeNonResident(Attribute): 387 def __init__(self, iNode, data): 388 logging.debug("Inside AttributeNonResident: iNode: %s" % iNode.INodeNumber) 389 Attribute.__init__(self,iNode,data) 390 self.NonResidentHeader = NTFS_ATTRIBUTE_RECORD_NON_RESIDENT(data[len(self.AttributeHeader):]) 391 self.AttrValue = data[self.NonResidentHeader['DataRunsOffset']:][:self.NonResidentHeader['AllocatedSize']] 392 self.DataRuns = [] 393 self.ClusterSize = 0 394 self.parseDataRuns() 395 396 def dump(self): 397 return self.NonResidentHeader.dump() 398 399 def getDataSize(self): 400 return self.NonResidentHeader['InitializedSize'] 401 402 def getValue(self): 403 return None 404 405 def parseDataRuns(self): 406 value = self.AttrValue 407 if value is not None: 408 VCN = 0 409 LCN = 0 410 LCNOffset = 0 411 while value[0] != '\x00': 412 LCN += LCNOffset 413 dr = NTFS_DATA_RUN() 414 415 size = struct.unpack('B',(value[0]))[0] 416 417 value = value[1:] 418 419 lengthBytes = size & 0x0F 420 offsetBytes = size >> 4 421 422 length = value[:lengthBytes] 423 length = struct.unpack('<Q', value[:lengthBytes]+'\x00'*(8-len(length)))[0] 424 value = value[lengthBytes:] 425 426 fillWith = '\x00' 427 if struct.unpack('B',value[offsetBytes-1])[0] & 0x80: 428 fillWith = '\xff' 429 LCNOffset = value[:offsetBytes]+fillWith*(8-len(value[:offsetBytes])) 430 LCNOffset = struct.unpack('<q',LCNOffset)[0] 431 432 value = value[offsetBytes:] 433 434 dr['LCN'] = LCN+LCNOffset 435 dr['Clusters'] = length 436 dr['StartVCN'] = VCN 437 dr['LastVCN'] = VCN + length -1 438 439 VCN += length 440 self.DataRuns.append(dr) 441 442 if len(value) == 0: 443 break 444 445 def readClusters(self, clusters, lcn): 446 logging.debug("Inside ReadClusters: clusters:%d, lcn:%d" % (clusters,lcn)) 447 if lcn == -1: 448 return '\x00'*clusters*self.ClusterSize 449 self.NTFSVolume.volumeFD.seek(lcn*self.ClusterSize,0) 450 buf = self.NTFSVolume.volumeFD.read(clusters*self.ClusterSize) 451 while len(buf) < clusters*self.ClusterSize: 452 buf+= self.NTFSVolume.volumeFD.read((clusters*self.ClusterSize)-len(buf)) 453 454 if len(buf) == 0: 455 return None 456 457 return buf 458 459 def readVCN(self, vcn, numOfClusters): 460 logging.debug("Inside ReadVCN: vcn: %d, numOfClusters: %d" % (vcn,numOfClusters)) 461 buf = '' 462 clustersLeft = numOfClusters 463 for dr in self.DataRuns: 464 if (vcn >= dr['StartVCN']) and (vcn <= dr['LastVCN']): 465 466 vcnsToRead = dr['LastVCN'] - vcn + 1 467 468 # Are we requesting to read more data outside this DataRun? 469 if numOfClusters > vcnsToRead: 470 # Yes 471 clustersToRead = vcnsToRead 472 else: 473 clustersToRead = numOfClusters 474 475 tmpBuf = self.readClusters(clustersToRead,dr['LCN']+(vcn-dr['StartVCN'])) 476 if tmpBuf is not None: 477 buf += tmpBuf 478 clustersLeft -= clustersToRead 479 vcn += clustersToRead 480 else: 481 break 482 if clustersLeft == 0: 483 break 484 return buf 485 486 def read(self,offset,length): 487 logging.debug("Inside Read: offset: %d, length: %d" %(offset,length)) 488 489 buf = '' 490 curLength = length 491 self.ClusterSize = self.NTFSVolume.BPB['BytesPerSector']*self.NTFSVolume.BPB['SectorsPerCluster'] 492 493 # Given the offset, let's calculate what VCN should be the first one to read 494 vcnToStart = offset / self.ClusterSize 495 #vcnOffset = self.ClusterSize - (offset % self.ClusterSize) 496 497 # Do we have to read partial VCNs? 498 if offset % self.ClusterSize: 499 # Read the whole VCN 500 bufTemp = self.readVCN(vcnToStart, 1) 501 if bufTemp is '': 502 # Something went wrong 503 return None 504 buf = bufTemp[offset % self.ClusterSize:] 505 curLength -= len(buf) 506 vcnToStart += 1 507 508 # Finished? 509 if curLength <= 0: 510 return buf[:length] 511 512 # First partial cluster read.. now let's keep reading full clusters 513 # Data left to be read is bigger than a Cluster? 514 if curLength / self.ClusterSize: 515 # Yep.. so let's read full clusters 516 bufTemp = self.readVCN(vcnToStart, curLength / self.ClusterSize) 517 if bufTemp is '': 518 # Something went wrong 519 return None 520 if len(bufTemp) > curLength: 521 # Too much data read, taking something off 522 buf = buf + bufTemp[:curLength] 523 else: 524 buf = buf + bufTemp 525 vcnToStart += curLength / self.ClusterSize 526 curLength -= len(bufTemp) 527 528 # Is there anything else left to be read in the last cluster? 529 if curLength > 0: 530 bufTemp = self.readVCN(vcnToStart, 1) 531 buf = buf + bufTemp[:curLength] 532 533 if buf == '': 534 return None 535 else: 536 return buf 537 538class AttributeStandardInfo: 539 def __init__(self, attribute): 540 logging.debug("Inside AttributeStandardInfo") 541 self.Attribute = attribute 542 self.StandardInfo = NTFS_STANDARD_INFORMATION(self.Attribute.AttrValue) 543 544 def getFileAttributes(self): 545 return self.StandardInfo['FileAttributes'] 546 547 def getFileTime(self): 548 if self.StandardInfo['LastDataChangeTime'] > 0: 549 return datetime.fromtimestamp(getUnixTime(self.StandardInfo['LastDataChangeTime'])) 550 else: 551 return 0 552 553 def dump(self): 554 return self.StandardInfo.dump() 555 556class AttributeFileName: 557 def __init__(self, attribute): 558 logging.debug("Inside AttributeFileName") 559 self.Attribute = attribute 560 self.FileNameRecord = NTFS_FILE_NAME_ATTR(self.Attribute.AttrValue) 561 562 def getFileNameType(self): 563 return self.FileNameRecord['FileNameType'] 564 565 def getFileAttributes(self): 566 return self.FileNameRecord['FileAttributes'] 567 568 def getFileName(self): 569 return self.FileNameRecord['FileName'].decode('utf-16le') 570 571 def getFileSize(self): 572 return self.FileNameRecord['DataSize'] 573 574 def getFlags(self): 575 return self.FileNameRecord['FileAttributes'] 576 577 def dump(self): 578 return self.FileNameRecord.dump() 579 580class AttributeIndexAllocation: 581 def __init__(self, attribute): 582 logging.debug("Inside AttributeIndexAllocation") 583 self.Attribute = attribute 584 585 def dump(self): 586 print self.Attribute.dump() 587 for i in self.Attribute.DataRuns: 588 print i.dump() 589 590 def read(self, offset, length): 591 return self.Attribute.read(offset, length) 592 593 594class AttributeIndexRoot: 595 def __init__(self, attribute): 596 logging.debug("Inside AttributeIndexRoot") 597 self.Attribute = attribute 598 self.IndexRootRecord = NTFS_INDEX_ROOT(attribute.AttrValue) 599 self.IndexEntries = [] 600 self.parseIndexEntries() 601 602 def parseIndexEntries(self): 603 data = self.Attribute.AttrValue[len(self.IndexRootRecord):] 604 while True: 605 ie = IndexEntry(data) 606 self.IndexEntries.append(ie) 607 if ie.isLastNode(): 608 break 609 data = data[ie.getSize():] 610 611 def dump(self): 612 self.IndexRootRecord.dump() 613 for i in self.IndexEntries: 614 i.dump() 615 616 def getType(self): 617 return self.IndexRootRecord['Type'] 618 619class IndexEntry: 620 def __init__(self, entry): 621 self.entry = NTFS_INDEX_ENTRY(entry) 622 623 def isSubNode(self): 624 return self.entry['EntryHeader']['Flags'] & INDEX_ENTRY_NODE 625 626 def isLastNode(self): 627 return self.entry['EntryHeader']['Flags'] & INDEX_ENTRY_END 628 629 def getVCN(self): 630 return struct.unpack('<Q', self.entry['Vcn'])[0] 631 632 def getSize(self): 633 return len(self.entry) 634 635 def getKey(self): 636 return self.entry['Key'] 637 638 def getINodeNumber(self): 639 return self.entry['EntryHeader']['IndexedFile'] & 0x0000FFFFFFFFFFFF 640 641 def dump(self): 642 self.entry.dump() 643 644class INODE: 645 def __init__(self, NTFSVolume): 646 self.NTFSVolume = NTFSVolume 647 # This is the entire file record 648 self.INodeNumber = None 649 self.Attributes = {} 650 self.AttributesRaw = None 651 self.AttributesLastPos = None 652 # Some interesting Attributes to parse 653 self.FileAttributes = 0 654 self.LastDataChangeTime = None 655 self.FileName = None 656 self.FileSize = 0 657 658 def isDirectory(self): 659 return self.FileAttributes & FILE_ATTR_I30_INDEX_PRESENT 660 661 def isCompressed(self): 662 return self.FileAttributes & FILE_ATTR_COMPRESSED 663 664 def isEncrypted(self): 665 return self.FileAttributes & FILE_ATTR_ENCRYPTED 666 667 def isSparse(self): 668 return self.FileAttributes & FILE_ATTR_SPARSE_FILE 669 670 def displayName(self): 671 if self.LastDataChangeTime is not None and self.FileName is not None: 672 try: 673# print "%d - %s %s %s " %( self.INodeNumber, self.getPrintableAttributes(), self.LastDataChangeTime.isoformat(' '), self.FileName) 674 print "%s %s %15d %s " %( self.getPrintableAttributes(), self.LastDataChangeTime.isoformat(' '), self.FileSize, self.FileName) 675 except Exception, e: 676 logging.error('Exception when trying to display inode %d: %s' % (self.INodeNumber,str(e))) 677 678 def getPrintableAttributes(self): 679 mask = '' 680 if self.FileAttributes & FILE_ATTR_I30_INDEX_PRESENT: 681 mask += 'd' 682 else: 683 mask += '-' 684 if self.FileAttributes & FILE_ATTR_HIDDEN: 685 mask += 'h' 686 else: 687 mask += '-' 688 if self.FileAttributes & FILE_ATTR_SYSTEM: 689 mask += 'S' 690 else: 691 mask += '-' 692 if self.isCompressed(): 693 mask += 'C' 694 else: 695 mask += '-' 696 if self.isEncrypted(): 697 mask += 'E' 698 else: 699 mask += '-' 700 if self.isSparse(): 701 mask += 's' 702 else: 703 mask += '-' 704 return mask 705 706 def parseAttributes(self): 707 # Parse Standard Info 708 attr = self.searchAttribute(STANDARD_INFORMATION, None) 709 if attr is not None: 710 si = AttributeStandardInfo(attr) 711 self.Attributes[STANDARD_INFORMATION] = si 712 self.FileAttributes |= si.getFileAttributes() 713 self.LastDataChangeTime = si.getFileTime() 714 self.Attributes[STANDARD_INFORMATION] = si 715 716 # Parse Filename 717 attr = self.searchAttribute(FILE_NAME, None) 718 while attr is not None: 719 fn = AttributeFileName(attr) 720 if fn.getFileNameType() != FILE_NAME_DOS: 721 self.FileName = fn.getFileName() 722 self.FileSize = fn.getFileSize() 723 self.FileAttributes |= fn.getFileAttributes() 724 self.Attributes[FILE_NAME] = fn 725 break 726 attr = self.searchAttribute(FILE_NAME, None, True) 727 728 # Parse Index Allocation 729 attr = self.searchAttribute(INDEX_ALLOCATION, unicode('$I30')) 730 if attr is not None: 731 ia = AttributeIndexAllocation(attr) 732 self.Attributes[INDEX_ALLOCATION] = ia 733 734 attr = self.searchAttribute(INDEX_ROOT,unicode('$I30')) 735 if attr is not None: 736 ir = AttributeIndexRoot(attr) 737 self.Attributes[INDEX_ROOT] = ir 738 739 def searchAttribute(self, attributeType, attributeName, findNext = False): 740 logging.debug("Inside searchAttribute: type: 0x%x, name: %s" % (attributeType, attributeName)) 741 record = None 742 743 if findNext is True: 744 data = self.AttributesLastPos 745 else: 746 data = self.AttributesRaw 747 748 while True: 749 750 if len(data) <= 8: 751 record = None 752 break 753 754 record = Attribute(self,data) 755 756 if record.getType() == END: 757 record = None 758 break 759 760 if record.getTotalSize() == 0: 761 record = None 762 break 763 764 if record.getType() == attributeType and record.getName() == attributeName: 765 if record.isNonResident() == 1: 766 record = AttributeNonResident(self, data) 767 else: 768 record = AttributeResident(self, data) 769 770 self.AttributesLastPos = data[record.getTotalSize():] 771 772 break 773 774 data = data[record.getTotalSize():] 775 776 return record 777 778 def PerformFixUp(self, record, buf, numSectors): 779 # It fixes the sequence WORDS on every sector of a cluster 780 # FixUps are used by: 781 # FILE Records in the $MFT 782 # INDX Records in directories and other indexes 783 # RCRD Records in the $LogFile 784 # RSTR Records in the $LogFile 785 786 logging.debug("Inside PerformFixUp..." ) 787 magicNum = struct.unpack('<H',buf[record['USROffset']:][:2])[0] 788 sequenceArray = buf[record['USROffset']+2:][:record['USRSize']*2] 789 790 dataList = list(buf) 791 index = 0 792 for i in range(0,numSectors*2, 2): 793 index += self.NTFSVolume.SectorSize-2 794 # Let's get the last two bytes of the sector 795 lastBytes = struct.unpack('<H', buf[index:][:2])[0] 796 # Is it the same as the magicNum? 797 if lastBytes != magicNum: 798 logging.error("Magic number 0x%x doesn't match with 0x%x" % (magicNum,lastBytes)) 799 return None 800 # Now let's replace the original bytes 801 dataList[index] = sequenceArray[i] 802 dataList[index+1] = sequenceArray[i+1] 803 index += 2 804 805 data = "".join(dataList) 806 return data 807 808 def parseIndexBlocks(self, vcn): 809 IndexEntries = [] 810 #sectors = self.NTFSVolume.IndexBlockSize / self.NTFSVolume.SectorSize 811 if self.Attributes.has_key(INDEX_ALLOCATION): 812 ia = self.Attributes[INDEX_ALLOCATION] 813 data = ia.read(vcn*self.NTFSVolume.IndexBlockSize, self.NTFSVolume.IndexBlockSize) 814 if data: 815 iaRec = NTFS_INDEX_ALLOCATION(data) 816 sectorsPerIB = self.NTFSVolume.IndexBlockSize / self.NTFSVolume.SectorSize 817 data = self.PerformFixUp(iaRec, data, sectorsPerIB) 818 if data is None: 819 return [] 820 data = data[len(iaRec)-len(NTFS_INDEX_HEADER())+iaRec['Index']['EntriesOffset']:] 821 while True: 822 ie = IndexEntry(data) 823 IndexEntries.append(ie) 824 if ie.isLastNode(): 825 break 826 data = data[ie.getSize():] 827 return IndexEntries 828 829 def walkSubNodes(self, vcn): 830 logging.debug("Inside walkSubNodes: vcn %s" % vcn) 831 entries = self.parseIndexBlocks(vcn) 832 files = [] 833 for entry in entries: 834 if entry.isSubNode(): 835 files += self.walkSubNodes(entry.getVCN()) 836 else: 837 if len(entry.getKey()) > 0 and entry.getINodeNumber() > 16: 838 fn = NTFS_FILE_NAME_ATTR(entry.getKey()) 839 if fn['FileNameType'] != FILE_NAME_DOS: 840 #inode = INODE(self.NTFSVolume) 841 #inode.FileAttributes = fn['FileAttributes'] 842 #inode.FileSize = fn['DataSize'] 843 #inode.LastDataChangeTime = datetime.fromtimestamp(getUnixTime(fn['LastDataChangeTime'])) 844 #inode.INodeNumber = entry.getINodeNumber() 845 #inode.FileName = fn['FileName'].decode('utf-16le') 846 #inode.displayName() 847 files.append(fn) 848# if inode.FileAttributes & FILE_ATTR_I30_INDEX_PRESENT and entry.getINodeNumber() > 16: 849# inode2 = self.NTFSVolume.getINode(entry.getINodeNumber()) 850# inode2.walk() 851 return files 852 853 def walk(self): 854 logging.debug("Inside Walk... ") 855 files = [] 856 if self.Attributes.has_key(INDEX_ROOT): 857 ir = self.Attributes[INDEX_ROOT] 858 859 if ir.getType() & FILE_NAME: 860 for ie in ir.IndexEntries: 861 if ie.isSubNode(): 862 files += self.walkSubNodes(ie.getVCN()) 863 return files 864 else: 865 return None 866 867 def findFirstSubNode(self, vcn, toSearch): 868 def getFileName(entry): 869 if len(entry.getKey()) > 0 and entry.getINodeNumber() > 16: 870 fn = NTFS_FILE_NAME_ATTR(entry.getKey()) 871 if fn['FileNameType'] != FILE_NAME_DOS: 872 return string.upper(fn['FileName'].decode('utf-16le')) 873 return None 874 875 entries = self.parseIndexBlocks(vcn) 876 for ie in entries: 877 name = getFileName(ie) 878 if name is not None: 879 if name == toSearch: 880 # Found! 881 return ie 882 if toSearch < name: 883 if ie.isSubNode(): 884 res = self.findFirstSubNode(ie.getVCN(), toSearch) 885 if res is not None: 886 return res 887 else: 888 # Bye bye.. not found 889 return None 890 else: 891 if ie.isSubNode(): 892 res = self.findFirstSubNode(ie.getVCN(), toSearch) 893 if res is not None: 894 return res 895 896 897 def findFirst(self, fileName): 898 # Searches for a file and returns an Index Entry. None if not found 899 900 def getFileName(entry): 901 if len(entry.getKey()) > 0 and entry.getINodeNumber() > 16: 902 fn = NTFS_FILE_NAME_ATTR(entry.getKey()) 903 if fn['FileNameType'] != FILE_NAME_DOS: 904 return string.upper(fn['FileName'].decode('utf-16le')) 905 return None 906 907 908 toSearch = unicode(string.upper(fileName)) 909 910 if self.Attributes.has_key(INDEX_ROOT): 911 ir = self.Attributes[INDEX_ROOT] 912 if ir.getType() & FILE_NAME or 1==1: 913 for ie in ir.IndexEntries: 914 name = getFileName(ie) 915 if name is not None: 916 if name == toSearch: 917 # Found! 918 return ie 919 if toSearch < name: 920 if ie.isSubNode(): 921 res = self.findFirstSubNode(ie.getVCN(), toSearch) 922 if res is not None: 923 return res 924 else: 925 # Bye bye.. not found 926 return None 927 else: 928 if ie.isSubNode(): 929 res = self.findFirstSubNode(ie.getVCN(), toSearch) 930 if res is not None: 931 return res 932 933 def getStream(self, name): 934 return self.searchAttribute( DATA, name, findNext = False) 935 936 937class NTFS: 938 def __init__(self, volumeName): 939 self.__volumeName = volumeName 940 self.__bootSector = None 941 self.__MFTStart = None 942 self.volumeFD = None 943 self.BPB = None 944 self.ExtendedBPB = None 945 self.RecordSize = None 946 self.IndexBlockSize = None 947 self.SectorSize = None 948 self.MFTINode = None 949 self.mountVolume() 950 951 def mountVolume(self): 952 logging.debug("Mounting volume...") 953 self.volumeFD = open(self.__volumeName,"rb") 954 self.readBootSector() 955 self.MFTINode = self.getINode(FILE_MFT) 956 # Check whether MFT is fragmented 957 attr = self.MFTINode.searchAttribute(DATA, None) 958 if attr is None: 959 # It's not 960 del self.MFTINode 961 self.MFTINode = None 962 963 def readBootSector(self): 964 logging.debug("Reading Boot Sector for %s" % self.__volumeName) 965 966 self.volumeFD.seek(0,0) 967 data = self.volumeFD.read(512) 968 while len(data) < 512: 969 data += self.volumeFD.read(512) 970 971 self.__bootSector = NTFS_BOOT_SECTOR(data) 972 self.BPB = NTFS_BPB(self.__bootSector['BPB']) 973 self.ExtendedBPB = NTFS_EXTENDED_BPB(self.__bootSector['ExtendedBPB']) 974 self.SectorSize = self.BPB['BytesPerSector'] 975 self.__MFTStart = self.BPB['BytesPerSector'] * self.BPB['SectorsPerCluster'] * self.ExtendedBPB['MFTClusterNumber'] 976 if self.ExtendedBPB['ClusterPerFileRecord'] > 0: 977 self.RecordSize = self.BPB['BytesPerSector'] * self.BPB['SectorsPerCluster'] * self.ExtendedBPB['ClusterPerFileRecord'] 978 else: 979 self.RecordSize = 1 << (-self.ExtendedBPB['ClusterPerFileRecord']) 980 if self.ExtendedBPB['ClusterPerIndexBuffer'] > 0: 981 self.IndexBlockSize = self.BPB['BytesPerSector'] * self.BPB['SectorsPerCluster'] * self.ExtendedBPB['ClusterPerIndexBuffer'] 982 else: 983 self.IndexBlockSize = 1 << (-self.ExtendedBPB['ClusterPerIndexBuffer']) 984 985 logging.debug("MFT should start at position %d" % self.__MFTStart) 986 987 def getINode(self, iNodeNum): 988 logging.debug("Trying to fetch inode %d" % iNodeNum) 989 990 newINode = INODE(self) 991 992 recordLen = self.RecordSize 993 994 # Let's calculate where in disk this iNode should be 995 if self.MFTINode and iNodeNum > FIXED_MFTS: 996 # Fragmented $MFT 997 attr = self.MFTINode.searchAttribute(DATA,None) 998 record = attr.read(iNodeNum*self.RecordSize, self.RecordSize) 999 else: 1000 diskPosition = self.__MFTStart + iNodeNum * self.RecordSize 1001 self.volumeFD.seek(diskPosition,0) 1002 record = self.volumeFD.read(recordLen) 1003 while len(record) < recordLen: 1004 record += self.volumeFD.read(recordLen-len(record)) 1005 1006 mftRecord = NTFS_MFT_RECORD(record) 1007 1008 record = newINode.PerformFixUp(mftRecord, record, self.RecordSize/self.SectorSize) 1009 newINode.INodeNumber = iNodeNum 1010 newINode.AttributesRaw = record[mftRecord['AttributesOffset']-recordLen:] 1011 newINode.parseAttributes() 1012 1013 return newINode 1014 1015class MiniShell(cmd.Cmd): 1016 def __init__(self, volume): 1017 cmd.Cmd.__init__(self) 1018 self.volumePath = volume 1019 self.volume = NTFS(volume) 1020 self.rootINode = self.volume.getINode(5) 1021 self.prompt = '\\>' 1022 self.intro = 'Type help for list of commands' 1023 self.currentINode = self.rootINode 1024 self.completion = [] 1025 self.pwd = '\\' 1026 self.do_ls('',False) 1027 self.last_output = '' 1028 1029 def emptyline(self): 1030 pass 1031 1032 def onecmd(self,s): 1033 retVal = False 1034 try: 1035 retVal = cmd.Cmd.onecmd(self,s) 1036 except Exception, e: 1037 logging.error(str(e)) 1038 1039 return retVal 1040 1041 def do_exit(self,line): 1042 return True 1043 1044 def do_shell(self, line): 1045 output = os.popen(line).read() 1046 print output 1047 self.last_output = output 1048 1049 def do_help(self,line): 1050 print """ 1051 cd {path} - changes the current directory to {path} 1052 pwd - shows current remote directory 1053 ls - lists all the files in the current directory 1054 lcd - change local directory 1055 get {filename} - downloads the filename from the current path 1056 cat {filename} - prints the contents of filename 1057 hexdump {filename} - hexdumps the contents of filename 1058 exit - terminates the server process (and this session) 1059 1060""" 1061 1062 def do_lcd(self,line): 1063 if line == '': 1064 print os.getcwd() 1065 else: 1066 os.chdir(line) 1067 print os.getcwd() 1068 1069 def do_cd(self, line): 1070 p = string.replace(line,'/','\\') 1071 oldpwd = self.pwd 1072 newPath = ntpath.normpath(ntpath.join(self.pwd,p)) 1073 if newPath == self.pwd: 1074 # Nothing changed 1075 return 1076 common = ntpath.commonprefix([newPath,oldpwd]) 1077 1078 if common == oldpwd: 1079 res = self.findPathName(ntpath.normpath(p)) 1080 else: 1081 res = self.findPathName(newPath) 1082 1083 if res is None: 1084 logging.error("Directory not found") 1085 self.pwd = oldpwd 1086 return 1087 if res.isDirectory() == 0: 1088 logging.error("Not a directory!") 1089 self.pwd = oldpwd 1090 return 1091 else: 1092 self.currentINode = res 1093 self.do_ls('', False) 1094 self.pwd = ntpath.join(self.pwd,p) 1095 self.pwd = ntpath.normpath(self.pwd) 1096 self.prompt = self.pwd + '>' 1097 1098 def findPathName(self, pathName): 1099 if pathName == '\\': 1100 return self.rootINode 1101 tmpINode = self.currentINode 1102 parts = pathName.split('\\') 1103 for part in parts: 1104 if part == '': 1105 tmpINode = self.rootINode 1106 else: 1107 res = tmpINode.findFirst(part) 1108 if res is None: 1109 return res 1110 else: 1111 tmpINode = self.volume.getINode(res.getINodeNumber()) 1112 1113 return tmpINode 1114 1115 def do_pwd(self,line): 1116 print self.pwd 1117 1118 def do_ls(self, line, display = True): 1119 entries = self.currentINode.walk() 1120 self.completion = [] 1121 for entry in entries: 1122 inode = INODE(self.volume) 1123 inode.FileAttributes = entry['FileAttributes'] 1124 inode.FileSize = entry['DataSize'] 1125 inode.LastDataChangeTime = datetime.fromtimestamp(getUnixTime(entry['LastDataChangeTime'])) 1126 inode.FileName = entry['FileName'].decode('utf-16le') 1127 if display is True: 1128 inode.displayName() 1129 self.completion.append((inode.FileName,inode.isDirectory())) 1130 1131 def complete_cd(self, text, line, begidx, endidx): 1132 return self.complete_get(text, line, begidx, endidx, include = 2) 1133 1134 def complete_cat(self,text,line,begidx,endidx): 1135 return self.complete_get(text, line, begidx, endidx) 1136 1137 def complete_hexdump(self,text,line,begidx,endidx): 1138 return self.complete_get(text, line, begidx, endidx) 1139 1140 def complete_get(self, text, line, begidx, endidx, include = 1): 1141 # include means 1142 # 1 just files 1143 # 2 just directories 1144 items = [] 1145 if include == 1: 1146 mask = 0 1147 else: 1148 mask = FILE_ATTR_I30_INDEX_PRESENT 1149 for i in self.completion: 1150 if i[1] == mask: 1151 items.append(i[0]) 1152 if text: 1153 return [ 1154 item for item in items 1155 if item.upper().startswith(text.upper()) 1156 ] 1157 else: 1158 return items 1159 1160 def do_hexdump(self,line): 1161 return self.do_cat(line,command = hexdump) 1162 1163 def do_cat(self, line, command = sys.stdout.write): 1164 pathName = string.replace(line,'/','\\') 1165 pathName = ntpath.normpath(ntpath.join(self.pwd,pathName)) 1166 res = self.findPathName(pathName) 1167 if res is None: 1168 logging.error("Not found!") 1169 return 1170 if res.isDirectory() > 0: 1171 logging.error("It's a directory!") 1172 return 1173 if res.isCompressed() or res.isEncrypted() or res.isSparse(): 1174 logging.error('Cannot handle compressed/encrypted/sparse files! :(') 1175 return 1176 stream = res.getStream(None) 1177 chunks = 4096*10 1178 written = 0 1179 for i in range(stream.getDataSize()/chunks): 1180 buf = stream.read(i*chunks, chunks) 1181 written += len(buf) 1182 command(buf) 1183 if stream.getDataSize() % chunks: 1184 buf = stream.read(written, stream.getDataSize() % chunks) 1185 command(buf) 1186 logging.info("%d bytes read" % stream.getDataSize()) 1187 1188 def do_get(self, line): 1189 pathName = string.replace(line,'/','\\') 1190 pathName = ntpath.normpath(ntpath.join(self.pwd,pathName)) 1191 fh = open(ntpath.basename(pathName),"wb") 1192 self.do_cat(line, command = fh.write) 1193 fh.close() 1194 1195def main(): 1196 print version.BANNER 1197 # Init the example's logger theme 1198 logger.init() 1199 parser = argparse.ArgumentParser(add_help = True, description = "NTFS explorer (read-only)") 1200 parser.add_argument('volume', action='store', help='NTFS volume to open (e.g. \\\\.\\C: or /dev/disk1s1)') 1201 parser.add_argument('-extract', action='store', help='extracts pathname (e.g. \windows\system32\config\sam)') 1202 parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON') 1203 1204 if len(sys.argv)==1: 1205 parser.print_help() 1206 sys.exit(1) 1207 options = parser.parse_args() 1208 1209 if options.debug is True: 1210 logging.getLogger().setLevel(logging.DEBUG) 1211 else: 1212 logging.getLogger().setLevel(logging.INFO) 1213 1214 shell = MiniShell(options.volume) 1215 if options.extract is not None: 1216 shell.onecmd("get %s"% options.extract) 1217 else: 1218 shell.cmdloop() 1219 1220if __name__ == '__main__': 1221 main() 1222 sys.exit(1) 1223 1224 1225 1226