1 /* 2 * Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.lang.Runtime.Version; 37 import java.nio.ByteBuffer; 38 import java.nio.MappedByteBuffer; 39 import java.nio.channels.FileChannel; 40 import java.nio.channels.FileLock; 41 import java.nio.channels.ReadableByteChannel; 42 import java.nio.channels.SeekableByteChannel; 43 import java.nio.channels.WritableByteChannel; 44 import java.nio.file.*; 45 import java.nio.file.attribute.*; 46 import java.nio.file.spi.FileSystemProvider; 47 import java.security.AccessController; 48 import java.security.PrivilegedAction; 49 import java.security.PrivilegedActionException; 50 import java.security.PrivilegedExceptionAction; 51 import java.util.*; 52 import java.util.concurrent.locks.ReadWriteLock; 53 import java.util.concurrent.locks.ReentrantReadWriteLock; 54 import java.util.function.Consumer; 55 import java.util.function.Function; 56 import java.util.jar.Attributes; 57 import java.util.jar.Manifest; 58 import java.util.regex.Pattern; 59 import java.util.zip.CRC32; 60 import java.util.zip.Deflater; 61 import java.util.zip.DeflaterOutputStream; 62 import java.util.zip.Inflater; 63 import java.util.zip.InflaterInputStream; 64 import java.util.zip.ZipException; 65 66 import static java.lang.Boolean.TRUE; 67 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 68 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 69 import static java.nio.file.StandardOpenOption.APPEND; 70 import static java.nio.file.StandardOpenOption.CREATE; 71 import static java.nio.file.StandardOpenOption.CREATE_NEW; 72 import static java.nio.file.StandardOpenOption.READ; 73 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 74 import static java.nio.file.StandardOpenOption.WRITE; 75 import static jdk.nio.zipfs.ZipConstants.*; 76 import static jdk.nio.zipfs.ZipUtils.*; 77 78 /** 79 * A FileSystem built on a zip file 80 * 81 * @author Xueming Shen 82 */ 83 class ZipFileSystem extends FileSystem { 84 // statics 85 private static final boolean isWindows = AccessController.doPrivileged( 86 (PrivilegedAction<Boolean>)()->System.getProperty("os.name") 87 .startsWith("Windows")); 88 private static final byte[] ROOTPATH = new byte[] { '/' }; 89 private static final String PROPERTY_POSIX = "enablePosixFileAttributes"; 90 private static final String PROPERTY_DEFAULT_OWNER = "defaultOwner"; 91 private static final String PROPERTY_DEFAULT_GROUP = "defaultGroup"; 92 private static final String PROPERTY_DEFAULT_PERMISSIONS = "defaultPermissions"; 93 // Property used to specify the entry version to use for a multi-release JAR 94 private static final String PROPERTY_RELEASE_VERSION = "releaseVersion"; 95 // Original property used to specify the entry version to use for a 96 // multi-release JAR which is kept for backwards compatibility. 97 private static final String PROPERTY_MULTI_RELEASE = "multi-release"; 98 99 private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS = 100 PosixFilePermissions.fromString("rwxrwxrwx"); 101 // Property used to specify the compression mode to use 102 private static final String PROPERTY_COMPRESSION_METHOD = "compressionMethod"; 103 // Value specified for compressionMethod property to compress Zip entries 104 private static final String COMPRESSION_METHOD_DEFLATED = "DEFLATED"; 105 // Value specified for compressionMethod property to not compress Zip entries 106 private static final String COMPRESSION_METHOD_STORED = "STORED"; 107 108 private final ZipFileSystemProvider provider; 109 private final Path zfpath; 110 final ZipCoder zc; 111 private final ZipPath rootdir; 112 private boolean readOnly; // readonly file system, false by default 113 114 // default time stamp for pseudo entries 115 private final long zfsDefaultTimeStamp = System.currentTimeMillis(); 116 117 // configurable by env map 118 private final boolean noExtt; // see readExtra() 119 private final boolean useTempFile; // use a temp file for newOS, default 120 // is to use BAOS for better performance 121 private final boolean forceEnd64; 122 private final int defaultCompressionMethod; // METHOD_STORED if "noCompression=true" 123 // METHOD_DEFLATED otherwise 124 125 // entryLookup is identity by default, will be overridden for multi-release jars 126 private Function<byte[], byte[]> entryLookup = Function.identity(); 127 128 // POSIX support 129 final boolean supportPosix; 130 private final UserPrincipal defaultOwner; 131 private final GroupPrincipal defaultGroup; 132 private final Set<PosixFilePermission> defaultPermissions; 133 134 private final Set<String> supportedFileAttributeViews; 135 ZipFileSystem(ZipFileSystemProvider provider, Path zfpath, Map<String, ?> env)136 ZipFileSystem(ZipFileSystemProvider provider, 137 Path zfpath, 138 Map<String, ?> env) throws IOException 139 { 140 // default encoding for name/comment 141 String nameEncoding = env.containsKey("encoding") ? 142 (String)env.get("encoding") : "UTF-8"; 143 this.noExtt = "false".equals(env.get("zipinfo-time")); 144 this.useTempFile = isTrue(env, "useTempFile"); 145 this.forceEnd64 = isTrue(env, "forceZIP64End"); 146 this.defaultCompressionMethod = getDefaultCompressionMethod(env); 147 this.supportPosix = isTrue(env, PROPERTY_POSIX); 148 this.defaultOwner = initOwner(zfpath, env); 149 this.defaultGroup = initGroup(zfpath, env); 150 this.defaultPermissions = initPermissions(env); 151 this.supportedFileAttributeViews = supportPosix ? 152 Set.of("basic", "posix", "zip") : Set.of("basic", "zip"); 153 if (Files.notExists(zfpath)) { 154 // create a new zip if it doesn't exist 155 if (isTrue(env, "create")) { 156 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 157 new END().write(os, 0, forceEnd64); 158 } 159 } else { 160 throw new NoSuchFileException(zfpath.toString()); 161 } 162 } 163 // sm and existence check 164 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 165 boolean writeable = AccessController.doPrivileged( 166 (PrivilegedAction<Boolean>)()->Files.isWritable(zfpath)); 167 this.readOnly = !writeable; 168 this.zc = ZipCoder.get(nameEncoding); 169 this.rootdir = new ZipPath(this, new byte[]{'/'}); 170 this.ch = Files.newByteChannel(zfpath, READ); 171 try { 172 this.cen = initCEN(); 173 } catch (IOException x) { 174 try { 175 this.ch.close(); 176 } catch (IOException xx) { 177 x.addSuppressed(xx); 178 } 179 throw x; 180 } 181 this.provider = provider; 182 this.zfpath = zfpath; 183 184 initializeReleaseVersion(env); 185 } 186 187 /** 188 * Return the compression method to use (STORED or DEFLATED). If the 189 * property {@code commpressionMethod} is set use its value to determine 190 * the compression method to use. If the property is not set, then the 191 * default compression is DEFLATED unless the property {@code noCompression} 192 * is set which is supported for backwards compatibility. 193 * @param env Zip FS map of properties 194 * @return The Compression method to use 195 */ getDefaultCompressionMethod(Map<String, ?> env)196 private int getDefaultCompressionMethod(Map<String, ?> env) { 197 int result = 198 isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED; 199 if (env.containsKey(PROPERTY_COMPRESSION_METHOD)) { 200 Object compressionMethod = env.get(PROPERTY_COMPRESSION_METHOD); 201 if (compressionMethod != null) { 202 if (compressionMethod instanceof String) { 203 switch (((String) compressionMethod).toUpperCase()) { 204 case COMPRESSION_METHOD_STORED: 205 result = METHOD_STORED; 206 break; 207 case COMPRESSION_METHOD_DEFLATED: 208 result = METHOD_DEFLATED; 209 break; 210 default: 211 throw new IllegalArgumentException(String.format( 212 "The value for the %s property must be %s or %s", 213 PROPERTY_COMPRESSION_METHOD, COMPRESSION_METHOD_STORED, 214 COMPRESSION_METHOD_DEFLATED)); 215 } 216 } else { 217 throw new IllegalArgumentException(String.format( 218 "The Object type for the %s property must be a String", 219 PROPERTY_COMPRESSION_METHOD)); 220 } 221 } else { 222 throw new IllegalArgumentException(String.format( 223 "The value for the %s property must be %s or %s", 224 PROPERTY_COMPRESSION_METHOD, COMPRESSION_METHOD_STORED, 225 COMPRESSION_METHOD_DEFLATED)); 226 } 227 } 228 return result; 229 } 230 231 // returns true if there is a name=true/"true" setting in env isTrue(Map<String, ?> env, String name)232 private static boolean isTrue(Map<String, ?> env, String name) { 233 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 234 } 235 236 // Initialize the default owner for files inside the zip archive. 237 // If not specified in env, it is the owner of the archive. If no owner can 238 // be determined, we try to go with system property "user.name". If that's not 239 // accessible, we return "<zipfs_default>". initOwner(Path zfpath, Map<String, ?> env)240 private UserPrincipal initOwner(Path zfpath, Map<String, ?> env) throws IOException { 241 Object o = env.get(PROPERTY_DEFAULT_OWNER); 242 if (o == null) { 243 try { 244 PrivilegedExceptionAction<UserPrincipal> pa = ()->Files.getOwner(zfpath); 245 return AccessController.doPrivileged(pa); 246 } catch (UnsupportedOperationException | PrivilegedActionException e) { 247 if (e instanceof UnsupportedOperationException || 248 e.getCause() instanceof NoSuchFileException) 249 { 250 PrivilegedAction<String> pa = ()->System.getProperty("user.name"); 251 String userName = AccessController.doPrivileged(pa); 252 return ()->userName; 253 } else { 254 throw new IOException(e); 255 } 256 } 257 } 258 if (o instanceof String) { 259 if (((String)o).isEmpty()) { 260 throw new IllegalArgumentException("Value for property " + 261 PROPERTY_DEFAULT_OWNER + " must not be empty."); 262 } 263 return ()->(String)o; 264 } 265 if (o instanceof UserPrincipal) { 266 return (UserPrincipal)o; 267 } 268 throw new IllegalArgumentException("Value for property " + 269 PROPERTY_DEFAULT_OWNER + " must be of type " + String.class + 270 " or " + UserPrincipal.class); 271 } 272 273 // Initialize the default group for files inside the zip archive. 274 // If not specified in env, we try to determine the group of the zip archive itself. 275 // If this is not possible/unsupported, we will return a group principal going by 276 // the same name as the default owner. initGroup(Path zfpath, Map<String, ?> env)277 private GroupPrincipal initGroup(Path zfpath, Map<String, ?> env) throws IOException { 278 Object o = env.get(PROPERTY_DEFAULT_GROUP); 279 if (o == null) { 280 try { 281 PosixFileAttributeView zfpv = Files.getFileAttributeView(zfpath, PosixFileAttributeView.class); 282 if (zfpv == null) { 283 return defaultOwner::getName; 284 } 285 PrivilegedExceptionAction<GroupPrincipal> pa = ()->zfpv.readAttributes().group(); 286 return AccessController.doPrivileged(pa); 287 } catch (UnsupportedOperationException | PrivilegedActionException e) { 288 if (e instanceof UnsupportedOperationException || 289 e.getCause() instanceof NoSuchFileException) 290 { 291 return defaultOwner::getName; 292 } else { 293 throw new IOException(e); 294 } 295 } 296 } 297 if (o instanceof String) { 298 if (((String)o).isEmpty()) { 299 throw new IllegalArgumentException("Value for property " + 300 PROPERTY_DEFAULT_GROUP + " must not be empty."); 301 } 302 return ()->(String)o; 303 } 304 if (o instanceof GroupPrincipal) { 305 return (GroupPrincipal)o; 306 } 307 throw new IllegalArgumentException("Value for property " + 308 PROPERTY_DEFAULT_GROUP + " must be of type " + String.class + 309 " or " + GroupPrincipal.class); 310 } 311 312 // Initialize the default permissions for files inside the zip archive. 313 // If not specified in env, it will return 777. initPermissions(Map<String, ?> env)314 private Set<PosixFilePermission> initPermissions(Map<String, ?> env) { 315 Object o = env.get(PROPERTY_DEFAULT_PERMISSIONS); 316 if (o == null) { 317 return DEFAULT_PERMISSIONS; 318 } 319 if (o instanceof String) { 320 return PosixFilePermissions.fromString((String)o); 321 } 322 if (!(o instanceof Set)) { 323 throw new IllegalArgumentException("Value for property " + 324 PROPERTY_DEFAULT_PERMISSIONS + " must be of type " + String.class + 325 " or " + Set.class); 326 } 327 Set<PosixFilePermission> perms = new HashSet<>(); 328 for (Object o2 : (Set<?>)o) { 329 if (o2 instanceof PosixFilePermission) { 330 perms.add((PosixFilePermission)o2); 331 } else { 332 throw new IllegalArgumentException(PROPERTY_DEFAULT_PERMISSIONS + 333 " must only contain objects of type " + PosixFilePermission.class); 334 } 335 } 336 return perms; 337 } 338 339 @Override provider()340 public FileSystemProvider provider() { 341 return provider; 342 } 343 344 @Override getSeparator()345 public String getSeparator() { 346 return "/"; 347 } 348 349 @Override isOpen()350 public boolean isOpen() { 351 return isOpen; 352 } 353 354 @Override isReadOnly()355 public boolean isReadOnly() { 356 return readOnly; 357 } 358 checkWritable()359 private void checkWritable() { 360 if (readOnly) { 361 throw new ReadOnlyFileSystemException(); 362 } 363 } 364 setReadOnly()365 void setReadOnly() { 366 this.readOnly = true; 367 } 368 369 @Override getRootDirectories()370 public Iterable<Path> getRootDirectories() { 371 return List.of(rootdir); 372 } 373 getRootDir()374 ZipPath getRootDir() { 375 return rootdir; 376 } 377 378 @Override getPath(String first, String... more)379 public ZipPath getPath(String first, String... more) { 380 if (more.length == 0) { 381 return new ZipPath(this, first); 382 } 383 StringBuilder sb = new StringBuilder(); 384 sb.append(first); 385 for (String path : more) { 386 if (path.length() > 0) { 387 if (sb.length() > 0) { 388 sb.append('/'); 389 } 390 sb.append(path); 391 } 392 } 393 return new ZipPath(this, sb.toString()); 394 } 395 396 @Override getUserPrincipalLookupService()397 public UserPrincipalLookupService getUserPrincipalLookupService() { 398 throw new UnsupportedOperationException(); 399 } 400 401 @Override newWatchService()402 public WatchService newWatchService() { 403 throw new UnsupportedOperationException(); 404 } 405 getFileStore(ZipPath path)406 FileStore getFileStore(ZipPath path) { 407 return new ZipFileStore(path); 408 } 409 410 @Override getFileStores()411 public Iterable<FileStore> getFileStores() { 412 return List.of(new ZipFileStore(rootdir)); 413 } 414 415 @Override supportedFileAttributeViews()416 public Set<String> supportedFileAttributeViews() { 417 return supportedFileAttributeViews; 418 } 419 420 @Override toString()421 public String toString() { 422 return zfpath.toString(); 423 } 424 getZipFile()425 Path getZipFile() { 426 return zfpath; 427 } 428 429 private static final String GLOB_SYNTAX = "glob"; 430 private static final String REGEX_SYNTAX = "regex"; 431 432 @Override getPathMatcher(String syntaxAndInput)433 public PathMatcher getPathMatcher(String syntaxAndInput) { 434 int pos = syntaxAndInput.indexOf(':'); 435 if (pos <= 0 || pos == syntaxAndInput.length()) { 436 throw new IllegalArgumentException(); 437 } 438 String syntax = syntaxAndInput.substring(0, pos); 439 String input = syntaxAndInput.substring(pos + 1); 440 String expr; 441 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 442 expr = toRegexPattern(input); 443 } else { 444 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 445 expr = input; 446 } else { 447 throw new UnsupportedOperationException("Syntax '" + syntax + 448 "' not recognized"); 449 } 450 } 451 // return matcher 452 final Pattern pattern = Pattern.compile(expr); 453 return (path)->pattern.matcher(path.toString()).matches(); 454 } 455 456 @Override close()457 public void close() throws IOException { 458 beginWrite(); 459 try { 460 if (!isOpen) 461 return; 462 isOpen = false; // set closed 463 } finally { 464 endWrite(); 465 } 466 if (!streams.isEmpty()) { // unlock and close all remaining streams 467 Set<InputStream> copy = new HashSet<>(streams); 468 for (InputStream is : copy) 469 is.close(); 470 } 471 beginWrite(); // lock and sync 472 try { 473 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 474 sync(); return null; 475 }); 476 ch.close(); // close the ch just in case no update 477 // and sync didn't close the ch 478 } catch (PrivilegedActionException e) { 479 throw (IOException)e.getException(); 480 } finally { 481 endWrite(); 482 } 483 484 synchronized (inflaters) { 485 for (Inflater inf : inflaters) 486 inf.end(); 487 } 488 synchronized (deflaters) { 489 for (Deflater def : deflaters) 490 def.end(); 491 } 492 493 beginWrite(); // lock and sync 494 try { 495 // Clear the map so that its keys & values can be garbage collected 496 inodes = null; 497 } finally { 498 endWrite(); 499 } 500 501 IOException ioe = null; 502 synchronized (tmppaths) { 503 for (Path p : tmppaths) { 504 try { 505 AccessController.doPrivileged( 506 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 507 } catch (PrivilegedActionException e) { 508 IOException x = (IOException)e.getException(); 509 if (ioe == null) 510 ioe = x; 511 else 512 ioe.addSuppressed(x); 513 } 514 } 515 } 516 provider.removeFileSystem(zfpath, this); 517 if (ioe != null) 518 throw ioe; 519 } 520 getFileAttributes(byte[] path)521 ZipFileAttributes getFileAttributes(byte[] path) 522 throws IOException 523 { 524 beginRead(); 525 try { 526 ensureOpen(); 527 IndexNode inode = getInode(path); 528 if (inode == null) { 529 return null; 530 } else if (inode instanceof Entry) { 531 return (Entry)inode; 532 } else if (inode.pos == -1) { 533 // pseudo directory, uses METHOD_STORED 534 Entry e = supportPosix ? 535 new PosixEntry(inode.name, inode.isdir, METHOD_STORED) : 536 new Entry(inode.name, inode.isdir, METHOD_STORED); 537 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 538 return e; 539 } else { 540 return supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 541 } 542 } finally { 543 endRead(); 544 } 545 } 546 checkAccess(byte[] path)547 void checkAccess(byte[] path) throws IOException { 548 beginRead(); 549 try { 550 ensureOpen(); 551 // is it necessary to readCEN as a sanity check? 552 if (getInode(path) == null) { 553 throw new NoSuchFileException(toString()); 554 } 555 556 } finally { 557 endRead(); 558 } 559 } 560 setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime)561 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 562 throws IOException 563 { 564 checkWritable(); 565 beginWrite(); 566 try { 567 ensureOpen(); 568 Entry e = getEntry(path); // ensureOpen checked 569 if (e == null) 570 throw new NoSuchFileException(getString(path)); 571 if (e.type == Entry.CEN) 572 e.type = Entry.COPY; // copy e 573 if (mtime != null) 574 e.mtime = mtime.toMillis(); 575 if (atime != null) 576 e.atime = atime.toMillis(); 577 if (ctime != null) 578 e.ctime = ctime.toMillis(); 579 update(e); 580 } finally { 581 endWrite(); 582 } 583 } 584 setOwner(byte[] path, UserPrincipal owner)585 void setOwner(byte[] path, UserPrincipal owner) throws IOException { 586 checkWritable(); 587 beginWrite(); 588 try { 589 ensureOpen(); 590 Entry e = getEntry(path); // ensureOpen checked 591 if (e == null) { 592 throw new NoSuchFileException(getString(path)); 593 } 594 // as the owner information is not persistent, we don't need to 595 // change e.type to Entry.COPY 596 if (e instanceof PosixEntry) { 597 ((PosixEntry)e).owner = owner; 598 update(e); 599 } 600 } finally { 601 endWrite(); 602 } 603 } 604 setGroup(byte[] path, GroupPrincipal group)605 void setGroup(byte[] path, GroupPrincipal group) throws IOException { 606 checkWritable(); 607 beginWrite(); 608 try { 609 ensureOpen(); 610 Entry e = getEntry(path); // ensureOpen checked 611 if (e == null) { 612 throw new NoSuchFileException(getString(path)); 613 } 614 // as the group information is not persistent, we don't need to 615 // change e.type to Entry.COPY 616 if (e instanceof PosixEntry) { 617 ((PosixEntry)e).group = group; 618 update(e); 619 } 620 } finally { 621 endWrite(); 622 } 623 } 624 setPermissions(byte[] path, Set<PosixFilePermission> perms)625 void setPermissions(byte[] path, Set<PosixFilePermission> perms) throws IOException { 626 checkWritable(); 627 beginWrite(); 628 try { 629 ensureOpen(); 630 Entry e = getEntry(path); // ensureOpen checked 631 if (e == null) { 632 throw new NoSuchFileException(getString(path)); 633 } 634 if (e.type == Entry.CEN) { 635 e.type = Entry.COPY; // copy e 636 } 637 e.posixPerms = perms == null ? -1 : ZipUtils.permsToFlags(perms); 638 update(e); 639 } finally { 640 endWrite(); 641 } 642 } 643 exists(byte[] path)644 boolean exists(byte[] path) { 645 beginRead(); 646 try { 647 ensureOpen(); 648 return getInode(path) != null; 649 } finally { 650 endRead(); 651 } 652 } 653 isDirectory(byte[] path)654 boolean isDirectory(byte[] path) { 655 beginRead(); 656 try { 657 IndexNode n = getInode(path); 658 return n != null && n.isDir(); 659 } finally { 660 endRead(); 661 } 662 } 663 664 // returns the list of child paths of "path" iteratorOf(ZipPath dir, DirectoryStream.Filter<? super Path> filter)665 Iterator<Path> iteratorOf(ZipPath dir, 666 DirectoryStream.Filter<? super Path> filter) 667 throws IOException 668 { 669 beginWrite(); // iteration of inodes needs exclusive lock 670 try { 671 ensureOpen(); 672 byte[] path = dir.getResolvedPath(); 673 IndexNode inode = getInode(path); 674 if (inode == null) 675 throw new NotDirectoryException(getString(path)); 676 List<Path> list = new ArrayList<>(); 677 IndexNode child = inode.child; 678 while (child != null) { 679 // (1) Assume each path from the zip file itself is "normalized" 680 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 681 // (3) If parent "dir" is relative when ZipDirectoryStream 682 // is created, the returned child path needs to be relative 683 // as well. 684 ZipPath childPath = new ZipPath(this, child.name, true); 685 ZipPath childFileName = childPath.getFileName(); 686 ZipPath zpath = dir.resolve(childFileName); 687 if (filter == null || filter.accept(zpath)) 688 list.add(zpath); 689 child = child.sibling; 690 } 691 return list.iterator(); 692 } finally { 693 endWrite(); 694 } 695 } 696 createDirectory(byte[] dir, FileAttribute<?>... attrs)697 void createDirectory(byte[] dir, FileAttribute<?>... attrs) throws IOException { 698 checkWritable(); 699 beginWrite(); 700 try { 701 ensureOpen(); 702 if (dir.length == 0 || exists(dir)) // root dir, or existing dir 703 throw new FileAlreadyExistsException(getString(dir)); 704 checkParents(dir); 705 Entry e = supportPosix ? 706 new PosixEntry(dir, Entry.NEW, true, METHOD_STORED, attrs) : 707 new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 708 update(e); 709 } finally { 710 endWrite(); 711 } 712 } 713 copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options)714 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 715 throws IOException 716 { 717 checkWritable(); 718 if (Arrays.equals(src, dst)) 719 return; // do nothing, src and dst are the same 720 721 beginWrite(); 722 try { 723 ensureOpen(); 724 Entry eSrc = getEntry(src); // ensureOpen checked 725 726 if (eSrc == null) 727 throw new NoSuchFileException(getString(src)); 728 if (eSrc.isDir()) { // spec says to create dst dir 729 createDirectory(dst); 730 return; 731 } 732 boolean hasReplace = false; 733 boolean hasCopyAttrs = false; 734 for (CopyOption opt : options) { 735 if (opt == REPLACE_EXISTING) 736 hasReplace = true; 737 else if (opt == COPY_ATTRIBUTES) 738 hasCopyAttrs = true; 739 } 740 Entry eDst = getEntry(dst); 741 if (eDst != null) { 742 if (!hasReplace) 743 throw new FileAlreadyExistsException(getString(dst)); 744 } else { 745 checkParents(dst); 746 } 747 // copy eSrc entry and change name 748 Entry u = supportPosix ? 749 new PosixEntry((PosixEntry)eSrc, Entry.COPY) : 750 new Entry(eSrc, Entry.COPY); 751 u.name(dst); 752 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) { 753 u.type = eSrc.type; // make it the same type 754 if (deletesrc) { // if it's a "rename", take the data 755 u.bytes = eSrc.bytes; 756 u.file = eSrc.file; 757 } else { // if it's not "rename", copy the data 758 if (eSrc.bytes != null) 759 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 760 else if (eSrc.file != null) { 761 u.file = getTempPathForEntry(null); 762 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 763 } 764 } 765 } else if (eSrc.type == Entry.CEN && eSrc.method != defaultCompressionMethod) { 766 767 /** 768 * We are copying a file within the same Zip file using a 769 * different compression method. 770 */ 771 try (InputStream in = newInputStream(src); 772 OutputStream out = newOutputStream(dst, 773 CREATE, TRUNCATE_EXISTING, WRITE)) { 774 in.transferTo(out); 775 } 776 u = getEntry(dst); 777 } 778 779 if (!hasCopyAttrs) 780 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 781 update(u); 782 if (deletesrc) 783 updateDelete(eSrc); 784 } finally { 785 endWrite(); 786 } 787 } 788 789 // Returns an output stream for writing the contents into the specified 790 // entry. newOutputStream(byte[] path, OpenOption... options)791 OutputStream newOutputStream(byte[] path, OpenOption... options) 792 throws IOException 793 { 794 checkWritable(); 795 boolean hasCreateNew = false; 796 boolean hasCreate = false; 797 boolean hasAppend = false; 798 boolean hasTruncate = false; 799 for (OpenOption opt : options) { 800 if (opt == READ) 801 throw new IllegalArgumentException("READ not allowed"); 802 if (opt == CREATE_NEW) 803 hasCreateNew = true; 804 if (opt == CREATE) 805 hasCreate = true; 806 if (opt == APPEND) 807 hasAppend = true; 808 if (opt == TRUNCATE_EXISTING) 809 hasTruncate = true; 810 } 811 if (hasAppend && hasTruncate) 812 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 813 beginRead(); // only need a readlock, the "update()" will 814 try { // try to obtain a writelock when the os is 815 ensureOpen(); // being closed. 816 Entry e = getEntry(path); 817 if (e != null) { 818 if (e.isDir() || hasCreateNew) 819 throw new FileAlreadyExistsException(getString(path)); 820 if (hasAppend) { 821 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 822 try (InputStream is = getInputStream(e)) { 823 is.transferTo(os); 824 } 825 return os; 826 } 827 return getOutputStream(supportPosix ? 828 new PosixEntry((PosixEntry)e, Entry.NEW, defaultCompressionMethod) 829 : new Entry(e, Entry.NEW, defaultCompressionMethod)); 830 } else { 831 if (!hasCreate && !hasCreateNew) 832 throw new NoSuchFileException(getString(path)); 833 checkParents(path); 834 return getOutputStream(supportPosix ? 835 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod) : 836 new Entry(path, Entry.NEW, false, defaultCompressionMethod)); 837 } 838 } finally { 839 endRead(); 840 } 841 } 842 843 // Returns an input stream for reading the contents of the specified 844 // file entry. newInputStream(byte[] path)845 InputStream newInputStream(byte[] path) throws IOException { 846 beginRead(); 847 try { 848 ensureOpen(); 849 Entry e = getEntry(path); 850 if (e == null) 851 throw new NoSuchFileException(getString(path)); 852 if (e.isDir()) 853 throw new FileSystemException(getString(path), "is a directory", null); 854 return getInputStream(e); 855 } finally { 856 endRead(); 857 } 858 } 859 checkOptions(Set<? extends OpenOption> options)860 private void checkOptions(Set<? extends OpenOption> options) { 861 // check for options of null type and option is an intance of StandardOpenOption 862 for (OpenOption option : options) { 863 if (option == null) 864 throw new NullPointerException(); 865 if (!(option instanceof StandardOpenOption)) 866 throw new IllegalArgumentException(); 867 } 868 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 869 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 870 } 871 872 // Returns an output SeekableByteChannel for either 873 // (1) writing the contents of a new entry, if the entry doesn't exist, or 874 // (2) updating/replacing the contents of an existing entry. 875 // Note: The content of the channel is not compressed until the 876 // channel is closed 877 private class EntryOutputChannel extends ByteArrayChannel { 878 final Entry e; 879 EntryOutputChannel(Entry e)880 EntryOutputChannel(Entry e) { 881 super(e.size > 0? (int)e.size : 8192, false); 882 this.e = e; 883 if (e.mtime == -1) 884 e.mtime = System.currentTimeMillis(); 885 if (e.method == -1) 886 e.method = defaultCompressionMethod; 887 // store size, compressed size, and crc-32 in datadescriptor 888 e.flag = FLAG_DATADESCR; 889 if (zc.isUTF8()) 890 e.flag |= FLAG_USE_UTF8; 891 } 892 893 @Override close()894 public void close() throws IOException { 895 super.beginWrite(); 896 try { 897 if (!isOpen()) 898 return; 899 // will update the entry 900 try (OutputStream os = getOutputStream(e)) { 901 os.write(toByteArray()); 902 } 903 super.close(); 904 } finally { 905 super.endWrite(); 906 } 907 } 908 } 909 910 // Returns a Writable/ReadByteChannel for now. Might consider to use 911 // newFileChannel() instead, which dump the entry data into a regular 912 // file on the default file system and create a FileChannel on top of it. newByteChannel(byte[] path, Set<? extends OpenOption> options, FileAttribute<?>... attrs)913 SeekableByteChannel newByteChannel(byte[] path, 914 Set<? extends OpenOption> options, 915 FileAttribute<?>... attrs) 916 throws IOException 917 { 918 checkOptions(options); 919 if (options.contains(StandardOpenOption.WRITE) || 920 options.contains(StandardOpenOption.APPEND)) { 921 checkWritable(); 922 beginRead(); // only need a read lock, the "update()" will obtain 923 // the write lock when the channel is closed 924 ensureOpen(); 925 try { 926 Entry e = getEntry(path); 927 if (e != null) { 928 if (e.isDir() || options.contains(CREATE_NEW)) 929 throw new FileAlreadyExistsException(getString(path)); 930 SeekableByteChannel sbc = 931 new EntryOutputChannel(supportPosix ? 932 new PosixEntry((PosixEntry)e, Entry.NEW) : 933 new Entry(e, Entry.NEW)); 934 if (options.contains(APPEND)) { 935 try (InputStream is = getInputStream(e)) { // copyover 936 byte[] buf = new byte[8192]; 937 ByteBuffer bb = ByteBuffer.wrap(buf); 938 int n; 939 while ((n = is.read(buf)) != -1) { 940 bb.position(0); 941 bb.limit(n); 942 sbc.write(bb); 943 } 944 } 945 } 946 return sbc; 947 } 948 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 949 throw new NoSuchFileException(getString(path)); 950 checkParents(path); 951 return new EntryOutputChannel( 952 supportPosix ? 953 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod, attrs) : 954 new Entry(path, Entry.NEW, false, defaultCompressionMethod, attrs)); 955 } finally { 956 endRead(); 957 } 958 } else { 959 beginRead(); 960 try { 961 ensureOpen(); 962 Entry e = getEntry(path); 963 if (e == null || e.isDir()) 964 throw new NoSuchFileException(getString(path)); 965 try (InputStream is = getInputStream(e)) { 966 // TBD: if (e.size < NNNNN); 967 return new ByteArrayChannel(is.readAllBytes(), true); 968 } 969 } finally { 970 endRead(); 971 } 972 } 973 } 974 975 // Returns a FileChannel of the specified entry. 976 // 977 // This implementation creates a temporary file on the default file system, 978 // copy the entry data into it if the entry exists, and then create a 979 // FileChannel on top of it. newFileChannel(byte[] path, Set<? extends OpenOption> options, FileAttribute<?>... attrs)980 FileChannel newFileChannel(byte[] path, 981 Set<? extends OpenOption> options, 982 FileAttribute<?>... attrs) 983 throws IOException 984 { 985 checkOptions(options); 986 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 987 options.contains(StandardOpenOption.APPEND)); 988 beginRead(); 989 try { 990 ensureOpen(); 991 Entry e = getEntry(path); 992 if (forWrite) { 993 checkWritable(); 994 if (e == null) { 995 if (!options.contains(StandardOpenOption.CREATE) && 996 !options.contains(StandardOpenOption.CREATE_NEW)) { 997 throw new NoSuchFileException(getString(path)); 998 } 999 } else { 1000 if (options.contains(StandardOpenOption.CREATE_NEW)) { 1001 throw new FileAlreadyExistsException(getString(path)); 1002 } 1003 if (e.isDir()) 1004 throw new FileAlreadyExistsException("directory <" 1005 + getString(path) + "> exists"); 1006 } 1007 options = new HashSet<>(options); 1008 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 1009 } else if (e == null || e.isDir()) { 1010 throw new NoSuchFileException(getString(path)); 1011 } 1012 1013 final boolean isFCH = (e != null && e.type == Entry.FILECH); 1014 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 1015 final FileChannel fch = tmpfile.getFileSystem() 1016 .provider() 1017 .newFileChannel(tmpfile, options, attrs); 1018 final Entry u = isFCH ? e : ( 1019 supportPosix ? 1020 new PosixEntry(path, tmpfile, Entry.FILECH, attrs) : 1021 new Entry(path, tmpfile, Entry.FILECH, attrs)); 1022 if (forWrite) { 1023 u.flag = FLAG_DATADESCR; 1024 u.method = defaultCompressionMethod; 1025 } 1026 // is there a better way to hook into the FileChannel's close method? 1027 return new FileChannel() { 1028 public int write(ByteBuffer src) throws IOException { 1029 return fch.write(src); 1030 } 1031 public long write(ByteBuffer[] srcs, int offset, int length) 1032 throws IOException 1033 { 1034 return fch.write(srcs, offset, length); 1035 } 1036 public long position() throws IOException { 1037 return fch.position(); 1038 } 1039 public FileChannel position(long newPosition) 1040 throws IOException 1041 { 1042 fch.position(newPosition); 1043 return this; 1044 } 1045 public long size() throws IOException { 1046 return fch.size(); 1047 } 1048 public FileChannel truncate(long size) 1049 throws IOException 1050 { 1051 fch.truncate(size); 1052 return this; 1053 } 1054 public void force(boolean metaData) 1055 throws IOException 1056 { 1057 fch.force(metaData); 1058 } 1059 public long transferTo(long position, long count, 1060 WritableByteChannel target) 1061 throws IOException 1062 { 1063 return fch.transferTo(position, count, target); 1064 } 1065 public long transferFrom(ReadableByteChannel src, 1066 long position, long count) 1067 throws IOException 1068 { 1069 return fch.transferFrom(src, position, count); 1070 } 1071 public int read(ByteBuffer dst) throws IOException { 1072 return fch.read(dst); 1073 } 1074 public int read(ByteBuffer dst, long position) 1075 throws IOException 1076 { 1077 return fch.read(dst, position); 1078 } 1079 public long read(ByteBuffer[] dsts, int offset, int length) 1080 throws IOException 1081 { 1082 return fch.read(dsts, offset, length); 1083 } 1084 public int write(ByteBuffer src, long position) 1085 throws IOException 1086 { 1087 return fch.write(src, position); 1088 } 1089 public MappedByteBuffer map(MapMode mode, 1090 long position, long size) 1091 { 1092 throw new UnsupportedOperationException(); 1093 } 1094 public FileLock lock(long position, long size, boolean shared) 1095 throws IOException 1096 { 1097 return fch.lock(position, size, shared); 1098 } 1099 public FileLock tryLock(long position, long size, boolean shared) 1100 throws IOException 1101 { 1102 return fch.tryLock(position, size, shared); 1103 } 1104 protected void implCloseChannel() throws IOException { 1105 fch.close(); 1106 if (forWrite) { 1107 u.mtime = System.currentTimeMillis(); 1108 u.size = Files.size(u.file); 1109 update(u); 1110 } else { 1111 if (!isFCH) // if this is a new fch for reading 1112 removeTempPathForEntry(tmpfile); 1113 } 1114 } 1115 }; 1116 } finally { 1117 endRead(); 1118 } 1119 } 1120 1121 // the outstanding input streams that need to be closed 1122 private Set<InputStream> streams = 1123 Collections.synchronizedSet(new HashSet<>()); 1124 1125 private final Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<>()); 1126 private Path getTempPathForEntry(byte[] path) throws IOException { 1127 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 1128 if (path != null) { 1129 Entry e = getEntry(path); 1130 if (e != null) { 1131 try (InputStream is = newInputStream(path)) { 1132 Files.copy(is, tmpPath, REPLACE_EXISTING); 1133 } 1134 } 1135 } 1136 return tmpPath; 1137 } 1138 1139 private void removeTempPathForEntry(Path path) throws IOException { 1140 Files.delete(path); 1141 tmppaths.remove(path); 1142 } 1143 1144 // check if all parents really exist. ZIP spec does not require 1145 // the existence of any "parent directory". 1146 private void checkParents(byte[] path) throws IOException { 1147 beginRead(); 1148 try { 1149 while ((path = getParent(path)) != null && 1150 path != ROOTPATH) { 1151 if (!inodes.containsKey(IndexNode.keyOf(path))) { 1152 throw new NoSuchFileException(getString(path)); 1153 } 1154 } 1155 } finally { 1156 endRead(); 1157 } 1158 } 1159 1160 private static byte[] getParent(byte[] path) { 1161 int off = getParentOff(path); 1162 if (off <= 1) 1163 return ROOTPATH; 1164 return Arrays.copyOf(path, off); 1165 } 1166 1167 private static int getParentOff(byte[] path) { 1168 int off = path.length - 1; 1169 if (off > 0 && path[off] == '/') // isDirectory 1170 off--; 1171 while (off > 0 && path[off] != '/') { off--; } 1172 return off; 1173 } 1174 1175 private void beginWrite() { 1176 rwlock.writeLock().lock(); 1177 } 1178 1179 private void endWrite() { 1180 rwlock.writeLock().unlock(); 1181 } 1182 1183 private void beginRead() { 1184 rwlock.readLock().lock(); 1185 } 1186 1187 private void endRead() { 1188 rwlock.readLock().unlock(); 1189 } 1190 1191 /////////////////////////////////////////////////////////////////// 1192 1193 private volatile boolean isOpen = true; 1194 private final SeekableByteChannel ch; // channel to the zipfile 1195 final byte[] cen; // CEN & ENDHDR 1196 private END end; 1197 private long locpos; // position of first LOC header (usually 0) 1198 1199 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 1200 1201 // name -> pos (in cen), IndexNode itself can be used as a "key" 1202 private LinkedHashMap<IndexNode, IndexNode> inodes; 1203 1204 final byte[] getBytes(String name) { 1205 return zc.getBytes(name); 1206 } 1207 1208 final String getString(byte[] name) { 1209 return zc.toString(name); 1210 } 1211 1212 @SuppressWarnings("deprecation") 1213 protected void finalize() throws IOException { 1214 close(); 1215 } 1216 1217 // Reads len bytes of data from the specified offset into buf. 1218 // Returns the total number of bytes read. 1219 // Each/every byte read from here (except the cen, which is mapped). 1220 final long readFullyAt(byte[] buf, int off, long len, long pos) 1221 throws IOException 1222 { 1223 ByteBuffer bb = ByteBuffer.wrap(buf); 1224 bb.position(off); 1225 bb.limit((int)(off + len)); 1226 return readFullyAt(bb, pos); 1227 } 1228 1229 private long readFullyAt(ByteBuffer bb, long pos) throws IOException { 1230 synchronized(ch) { 1231 return ch.position(pos).read(bb); 1232 } 1233 } 1234 1235 // Searches for end of central directory (END) header. The contents of 1236 // the END header will be read and placed in endbuf. Returns the file 1237 // position of the END header, otherwise returns -1 if the END header 1238 // was not found or an error occurred. 1239 private END findEND() throws IOException { 1240 byte[] buf = new byte[READBLOCKSZ]; 1241 long ziplen = ch.size(); 1242 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 1243 long minPos = minHDR - (buf.length - ENDHDR); 1244 1245 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) { 1246 int off = 0; 1247 if (pos < 0) { 1248 // Pretend there are some NUL bytes before start of file 1249 off = (int)-pos; 1250 Arrays.fill(buf, 0, off, (byte)0); 1251 } 1252 int len = buf.length - off; 1253 if (readFullyAt(buf, off, len, pos + off) != len) 1254 throw new ZipException("zip END header not found"); 1255 1256 // Now scan the block backwards for END header signature 1257 for (int i = buf.length - ENDHDR; i >= 0; i--) { 1258 if (buf[i] == (byte)'P' && 1259 buf[i+1] == (byte)'K' && 1260 buf[i+2] == (byte)'\005' && 1261 buf[i+3] == (byte)'\006' && 1262 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 1263 // Found END header 1264 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 1265 END end = new END(); 1266 // end.endsub = ENDSUB(buf); // not used 1267 end.centot = ENDTOT(buf); 1268 end.cenlen = ENDSIZ(buf); 1269 end.cenoff = ENDOFF(buf); 1270 // end.comlen = ENDCOM(buf); // not used 1271 end.endpos = pos + i; 1272 // try if there is zip64 end; 1273 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1274 if (end.endpos < ZIP64_LOCHDR || 1275 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1276 != loc64.length || 1277 !locator64SigAt(loc64, 0)) { 1278 return end; 1279 } 1280 long end64pos = ZIP64_LOCOFF(loc64); 1281 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1282 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1283 != end64buf.length || 1284 !end64SigAt(end64buf, 0)) { 1285 return end; 1286 } 1287 // end64 found, 1288 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1289 long cenoff64 = ZIP64_ENDOFF(end64buf); 1290 long centot64 = ZIP64_ENDTOT(end64buf); 1291 // double-check 1292 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1293 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1294 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1295 return end; 1296 } 1297 // to use the end64 values 1298 end.cenlen = cenlen64; 1299 end.cenoff = cenoff64; 1300 end.centot = (int)centot64; // assume total < 2g 1301 end.endpos = end64pos; 1302 return end; 1303 } 1304 } 1305 } 1306 throw new ZipException("zip END header not found"); 1307 } 1308 1309 private void makeParentDirs(IndexNode node, IndexNode root) { 1310 IndexNode parent; 1311 ParentLookup lookup = new ParentLookup(); 1312 while (true) { 1313 int off = getParentOff(node.name); 1314 // parent is root 1315 if (off <= 1) { 1316 node.sibling = root.child; 1317 root.child = node; 1318 break; 1319 } 1320 // parent exists 1321 lookup = lookup.as(node.name, off); 1322 if (inodes.containsKey(lookup)) { 1323 parent = inodes.get(lookup); 1324 node.sibling = parent.child; 1325 parent.child = node; 1326 break; 1327 } 1328 // parent does not exist, add new pseudo directory entry 1329 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 1330 inodes.put(parent, parent); 1331 node.sibling = parent.child; 1332 parent.child = node; 1333 node = parent; 1334 } 1335 } 1336 1337 // ZIP directory has two issues: 1338 // (1) ZIP spec does not require the ZIP file to include 1339 // directory entry 1340 // (2) all entries are not stored/organized in a "tree" 1341 // structure. 1342 // A possible solution is to build the node tree ourself as 1343 // implemented below. 1344 private void buildNodeTree() { 1345 beginWrite(); 1346 try { 1347 IndexNode root = inodes.remove(LOOKUPKEY.as(ROOTPATH)); 1348 if (root == null) { 1349 root = new IndexNode(ROOTPATH, true); 1350 } 1351 IndexNode[] nodes = inodes.values().toArray(new IndexNode[0]); 1352 inodes.put(root, root); 1353 for (IndexNode node : nodes) { 1354 makeParentDirs(node, root); 1355 } 1356 } finally { 1357 endWrite(); 1358 } 1359 } 1360 1361 private void removeFromTree(IndexNode inode) { 1362 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 1363 IndexNode child = parent.child; 1364 if (child.equals(inode)) { 1365 parent.child = child.sibling; 1366 } else { 1367 IndexNode last = child; 1368 while ((child = child.sibling) != null) { 1369 if (child.equals(inode)) { 1370 last.sibling = child.sibling; 1371 break; 1372 } else { 1373 last = child; 1374 } 1375 } 1376 } 1377 } 1378 1379 /** 1380 * If a version property has been specified and the file represents a multi-release JAR, 1381 * determine the requested runtime version and initialize the ZipFileSystem instance accordingly. 1382 * 1383 * Checks if the Zip File System property "releaseVersion" has been specified. If it has, 1384 * use its value to determine the requested version. If not use the value of the "multi-release" property. 1385 */ 1386 private void initializeReleaseVersion(Map<String, ?> env) throws IOException { 1387 Object o = env.containsKey(PROPERTY_RELEASE_VERSION) ? 1388 env.get(PROPERTY_RELEASE_VERSION) : 1389 env.get(PROPERTY_MULTI_RELEASE); 1390 1391 if (o != null && isMultiReleaseJar()) { 1392 int version; 1393 if (o instanceof String) { 1394 String s = (String)o; 1395 if (s.equals("runtime")) { 1396 version = Runtime.version().feature(); 1397 } else if (s.matches("^[1-9][0-9]*$")) { 1398 version = Version.parse(s).feature(); 1399 } else { 1400 throw new IllegalArgumentException("Invalid runtime version"); 1401 } 1402 } else if (o instanceof Integer) { 1403 version = Version.parse(((Integer)o).toString()).feature(); 1404 } else if (o instanceof Version) { 1405 version = ((Version)o).feature(); 1406 } else { 1407 throw new IllegalArgumentException("env parameter must be String, " + 1408 "Integer, or Version"); 1409 } 1410 createVersionedLinks(version < 0 ? 0 : version); 1411 setReadOnly(); 1412 } 1413 } 1414 1415 /** 1416 * Returns true if the Manifest main attribute "Multi-Release" is set to true; false otherwise. 1417 */ 1418 private boolean isMultiReleaseJar() throws IOException { 1419 try (InputStream is = newInputStream(getBytes("/META-INF/MANIFEST.MF"))) { 1420 String multiRelease = new Manifest(is).getMainAttributes() 1421 .getValue(Attributes.Name.MULTI_RELEASE); 1422 return "true".equalsIgnoreCase(multiRelease); 1423 } catch (NoSuchFileException x) { 1424 return false; 1425 } 1426 } 1427 1428 /** 1429 * Create a map of aliases for versioned entries, for example: 1430 * version/PackagePrivate.class -> META-INF/versions/9/version/PackagePrivate.class 1431 * version/PackagePrivate.java -> META-INF/versions/9/version/PackagePrivate.java 1432 * version/Version.class -> META-INF/versions/10/version/Version.class 1433 * version/Version.java -> META-INF/versions/10/version/Version.java 1434 * 1435 * Then wrap the map in a function that getEntry can use to override root 1436 * entry lookup for entries that have corresponding versioned entries. 1437 */ 1438 private void createVersionedLinks(int version) { 1439 IndexNode verdir = getInode(getBytes("/META-INF/versions")); 1440 // nothing to do, if no /META-INF/versions 1441 if (verdir == null) { 1442 return; 1443 } 1444 // otherwise, create a map and for each META-INF/versions/{n} directory 1445 // put all the leaf inodes, i.e. entries, into the alias map 1446 // possibly shadowing lower versioned entries 1447 HashMap<IndexNode, byte[]> aliasMap = new HashMap<>(); 1448 getVersionMap(version, verdir).values().forEach(versionNode -> 1449 walk(versionNode.child, entryNode -> 1450 aliasMap.put( 1451 getOrCreateInode(getRootName(entryNode, versionNode), entryNode.isdir), 1452 entryNode.name)) 1453 ); 1454 entryLookup = path -> { 1455 byte[] entry = aliasMap.get(IndexNode.keyOf(path)); 1456 return entry == null ? path : entry; 1457 }; 1458 } 1459 1460 /** 1461 * Create a sorted version map of version -> inode, for inodes <= max version. 1462 * 9 -> META-INF/versions/9 1463 * 10 -> META-INF/versions/10 1464 */ 1465 private TreeMap<Integer, IndexNode> getVersionMap(int version, IndexNode metaInfVersions) { 1466 TreeMap<Integer,IndexNode> map = new TreeMap<>(); 1467 IndexNode child = metaInfVersions.child; 1468 while (child != null) { 1469 Integer key = getVersion(child, metaInfVersions); 1470 if (key != null && key <= version) { 1471 map.put(key, child); 1472 } 1473 child = child.sibling; 1474 } 1475 return map; 1476 } 1477 1478 /** 1479 * Extract the integer version number -- META-INF/versions/9 returns 9. 1480 */ 1481 private Integer getVersion(IndexNode inode, IndexNode metaInfVersions) { 1482 try { 1483 byte[] fullName = inode.name; 1484 return Integer.parseInt(getString(Arrays 1485 .copyOfRange(fullName, metaInfVersions.name.length + 1, fullName.length))); 1486 } catch (NumberFormatException x) { 1487 // ignore this even though it might indicate issues with the JAR structure 1488 return null; 1489 } 1490 } 1491 1492 /** 1493 * Walk the IndexNode tree processing all leaf nodes. 1494 */ 1495 private void walk(IndexNode inode, Consumer<IndexNode> consumer) { 1496 if (inode == null) return; 1497 if (inode.isDir()) { 1498 walk(inode.child, consumer); 1499 } else { 1500 consumer.accept(inode); 1501 } 1502 walk(inode.sibling, consumer); 1503 } 1504 1505 /** 1506 * Extract the root name from a versioned entry name. 1507 * E.g. given inode 'META-INF/versions/9/foo/bar.class' 1508 * and prefix 'META-INF/versions/9/' returns 'foo/bar.class'. 1509 */ 1510 private byte[] getRootName(IndexNode inode, IndexNode prefix) { 1511 byte[] fullName = inode.name; 1512 return Arrays.copyOfRange(fullName, prefix.name.length, fullName.length); 1513 } 1514 1515 // Reads zip file central directory. Returns the file position of first 1516 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1517 // then the error was a zip format error and zip->msg has the error text. 1518 // Always pass in -1 for knownTotal; it's used for a recursive call. 1519 private byte[] initCEN() throws IOException { 1520 end = findEND(); 1521 if (end.endpos == 0) { 1522 inodes = new LinkedHashMap<>(10); 1523 locpos = 0; 1524 buildNodeTree(); 1525 return null; // only END header present 1526 } 1527 if (end.cenlen > end.endpos) 1528 throw new ZipException("invalid END header (bad central directory size)"); 1529 long cenpos = end.endpos - end.cenlen; // position of CEN table 1530 1531 // Get position of first local file (LOC) header, taking into 1532 // account that there may be a stub prefixed to the zip file. 1533 locpos = cenpos - end.cenoff; 1534 if (locpos < 0) 1535 throw new ZipException("invalid END header (bad central directory offset)"); 1536 1537 // read in the CEN and END 1538 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1539 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1540 throw new ZipException("read CEN tables failed"); 1541 } 1542 // Iterate through the entries in the central directory 1543 inodes = new LinkedHashMap<>(end.centot + 1); 1544 int pos = 0; 1545 int limit = cen.length - ENDHDR; 1546 while (pos < limit) { 1547 if (!cenSigAt(cen, pos)) 1548 throw new ZipException("invalid CEN header (bad signature)"); 1549 int method = CENHOW(cen, pos); 1550 int nlen = CENNAM(cen, pos); 1551 int elen = CENEXT(cen, pos); 1552 int clen = CENCOM(cen, pos); 1553 int flag = CENFLG(cen, pos); 1554 if ((flag & 1) != 0) { 1555 throw new ZipException("invalid CEN header (encrypted entry)"); 1556 } 1557 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1558 throw new ZipException("invalid CEN header (unsupported compression method: " + method + ")"); 1559 } 1560 if (pos + CENHDR + nlen > limit) { 1561 throw new ZipException("invalid CEN header (bad header size)"); 1562 } 1563 IndexNode inode = new IndexNode(cen, pos, nlen); 1564 inodes.put(inode, inode); 1565 if (zc.isUTF8() || (flag & FLAG_USE_UTF8) != 0) { 1566 checkUTF8(inode.name); 1567 } else { 1568 checkEncoding(inode.name); 1569 } 1570 // skip ext and comment 1571 pos += (CENHDR + nlen + elen + clen); 1572 } 1573 if (pos + ENDHDR != cen.length) { 1574 throw new ZipException("invalid CEN header (bad header size)"); 1575 } 1576 buildNodeTree(); 1577 return cen; 1578 } 1579 1580 private final void checkUTF8(byte[] a) throws ZipException { 1581 try { 1582 int end = a.length; 1583 int pos = 0; 1584 while (pos < end) { 1585 // ASCII fast-path: When checking that a range of bytes is 1586 // valid UTF-8, we can avoid some allocation by skipping 1587 // past bytes in the 0-127 range 1588 if (a[pos] < 0) { 1589 zc.toString(Arrays.copyOfRange(a, pos, a.length)); 1590 break; 1591 } 1592 pos++; 1593 } 1594 } catch(Exception e) { 1595 throw new ZipException("invalid CEN header (bad entry name)"); 1596 } 1597 } 1598 1599 private final void checkEncoding( byte[] a) throws ZipException { 1600 try { 1601 zc.toString(a); 1602 } catch(Exception e) { 1603 throw new ZipException("invalid CEN header (bad entry name)"); 1604 } 1605 } 1606 1607 1608 private void ensureOpen() { 1609 if (!isOpen) 1610 throw new ClosedFileSystemException(); 1611 } 1612 1613 // Creates a new empty temporary file in the same directory as the 1614 // specified file. A variant of Files.createTempFile. 1615 private Path createTempFileInSameDirectoryAs(Path path) throws IOException { 1616 Path parent = path.toAbsolutePath().getParent(); 1617 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1618 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1619 tmppaths.add(tmpPath); 1620 return tmpPath; 1621 } 1622 1623 ////////////////////update & sync ////////////////////////////////////// 1624 1625 private boolean hasUpdate = false; 1626 1627 // shared key. consumer guarantees the "writeLock" before use it. 1628 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1629 1630 private void updateDelete(IndexNode inode) { 1631 beginWrite(); 1632 try { 1633 removeFromTree(inode); 1634 inodes.remove(inode); 1635 hasUpdate = true; 1636 } finally { 1637 endWrite(); 1638 } 1639 } 1640 1641 private void update(Entry e) { 1642 beginWrite(); 1643 try { 1644 IndexNode old = inodes.put(e, e); 1645 if (old != null) { 1646 removeFromTree(old); 1647 } 1648 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1649 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1650 e.sibling = parent.child; 1651 parent.child = e; 1652 } 1653 hasUpdate = true; 1654 } finally { 1655 endWrite(); 1656 } 1657 } 1658 1659 // copy over the whole LOC entry (header if necessary, data and ext) from 1660 // old zip to the new one. 1661 private long copyLOCEntry(Entry e, boolean updateHeader, 1662 OutputStream os, 1663 long written, byte[] buf) 1664 throws IOException 1665 { 1666 long locoff = e.locoff; // where to read 1667 e.locoff = written; // update the e.locoff with new value 1668 1669 // calculate the size need to write out 1670 long size = 0; 1671 // if there is A ext 1672 if ((e.flag & FLAG_DATADESCR) != 0) { 1673 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1674 size = 24; 1675 else 1676 size = 16; 1677 } 1678 // read loc, use the original loc.elen/nlen 1679 // 1680 // an extra byte after loc is read, which should be the first byte of the 1681 // 'name' field of the loc. if this byte is '/', which means the original 1682 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1683 // is used to output the loc, in which the leading "/" will be removed 1684 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1685 throw new ZipException("loc: reading failed"); 1686 1687 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1688 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1689 size += e.csize; 1690 written = e.writeLOC(os) + size; 1691 } else { 1692 os.write(buf, 0, LOCHDR); // write out the loc header 1693 locoff += LOCHDR; 1694 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1695 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1696 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1697 written = LOCHDR + size; 1698 } 1699 int n; 1700 while (size > 0 && 1701 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1702 { 1703 if (size < n) 1704 n = (int)size; 1705 os.write(buf, 0, n); 1706 size -= n; 1707 locoff += n; 1708 } 1709 return written; 1710 } 1711 1712 private long writeEntry(Entry e, OutputStream os) 1713 throws IOException { 1714 1715 if (e.bytes == null && e.file == null) // dir, 0-length data 1716 return 0; 1717 1718 long written = 0; 1719 if (e.method != METHOD_STORED && e.csize > 0 && (e.crc != 0 || e.size == 0)) { 1720 // pre-compressed entry, write directly to output stream 1721 writeTo(e, os); 1722 } else { 1723 try (OutputStream os2 = (e.method == METHOD_STORED) ? 1724 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1725 writeTo(e, os2); 1726 } 1727 } 1728 written += e.csize; 1729 if ((e.flag & FLAG_DATADESCR) != 0) { 1730 written += e.writeEXT(os); 1731 } 1732 return written; 1733 } 1734 1735 private void writeTo(Entry e, OutputStream os) throws IOException { 1736 if (e.bytes != null) { 1737 os.write(e.bytes, 0, e.bytes.length); 1738 } else if (e.file != null) { 1739 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1740 try (InputStream is = Files.newInputStream(e.file)) { 1741 is.transferTo(os); 1742 } 1743 } 1744 Files.delete(e.file); 1745 tmppaths.remove(e.file); 1746 } 1747 } 1748 1749 // sync the zip file system, if there is any update 1750 private void sync() throws IOException { 1751 if (!hasUpdate) 1752 return; 1753 PosixFileAttributes attrs = getPosixAttributes(zfpath); 1754 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1755 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) { 1756 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1757 long written = 0; 1758 byte[] buf = null; 1759 Entry e; 1760 1761 final IndexNode manifestInode = inodes.get( 1762 IndexNode.keyOf(getBytes("/META-INF/MANIFEST.MF"))); 1763 final Iterator<IndexNode> inodeIterator = inodes.values().iterator(); 1764 boolean manifestProcessed = false; 1765 1766 // write loc 1767 while (inodeIterator.hasNext()) { 1768 final IndexNode inode; 1769 1770 // write the manifest inode (if any) first so that 1771 // java.util.jar.JarInputStream can find it 1772 if (manifestInode == null) { 1773 inode = inodeIterator.next(); 1774 } else { 1775 if (manifestProcessed) { 1776 // advance to next node, filtering out the manifest 1777 // which was already written 1778 inode = inodeIterator.next(); 1779 if (inode == manifestInode) { 1780 continue; 1781 } 1782 } else { 1783 inode = manifestInode; 1784 manifestProcessed = true; 1785 } 1786 } 1787 1788 if (inode instanceof Entry) { // an updated inode 1789 e = (Entry)inode; 1790 try { 1791 if (e.type == Entry.COPY) { 1792 // entry copy: the only thing changed is the "name" 1793 // and "nlen" in LOC header, so we update/rewrite the 1794 // LOC in new file and simply copy the rest (data and 1795 // ext) without enflating/deflating from the old zip 1796 // file LOC entry. 1797 if (buf == null) 1798 buf = new byte[8192]; 1799 written += copyLOCEntry(e, true, os, written, buf); 1800 } else { // NEW, FILECH or CEN 1801 e.locoff = written; 1802 written += e.writeLOC(os); // write loc header 1803 written += writeEntry(e, os); 1804 } 1805 elist.add(e); 1806 } catch (IOException x) { 1807 x.printStackTrace(); // skip any in-accurate entry 1808 } 1809 } else { // unchanged inode 1810 if (inode.pos == -1) { 1811 continue; // pseudo directory node 1812 } 1813 if (inode.name.length == 1 && inode.name[0] == '/') { 1814 continue; // no root '/' directory even if it 1815 // exists in original zip/jar file. 1816 } 1817 e = supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 1818 try { 1819 if (buf == null) 1820 buf = new byte[8192]; 1821 written += copyLOCEntry(e, false, os, written, buf); 1822 elist.add(e); 1823 } catch (IOException x) { 1824 x.printStackTrace(); // skip any wrong entry 1825 } 1826 } 1827 } 1828 1829 // now write back the cen and end table 1830 end.cenoff = written; 1831 for (Entry entry : elist) { 1832 written += entry.writeCEN(os); 1833 } 1834 end.centot = elist.size(); 1835 end.cenlen = written - end.cenoff; 1836 end.write(os, written, forceEnd64); 1837 } 1838 ch.close(); 1839 Files.delete(zfpath); 1840 1841 // Set the POSIX permissions of the original Zip File if available 1842 // before moving the temp file 1843 if (attrs != null) { 1844 Files.setPosixFilePermissions(tmpFile, attrs.permissions()); 1845 } 1846 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1847 hasUpdate = false; // clear 1848 } 1849 1850 /** 1851 * Returns a file's POSIX file attributes. 1852 * @param path The path to the file 1853 * @return The POSIX file attributes for the specified file or 1854 * null if the POSIX attribute view is not available 1855 * @throws IOException If an error occurs obtaining the POSIX attributes for 1856 * the specified file 1857 */ 1858 private PosixFileAttributes getPosixAttributes(Path path) throws IOException { 1859 try { 1860 PosixFileAttributeView view = 1861 Files.getFileAttributeView(path, PosixFileAttributeView.class); 1862 // Return if the attribute view is not supported 1863 if (view == null) { 1864 return null; 1865 } 1866 return view.readAttributes(); 1867 } catch (UnsupportedOperationException e) { 1868 // PosixFileAttributes not available 1869 return null; 1870 } 1871 } 1872 1873 private IndexNode getInode(byte[] path) { 1874 return inodes.get(IndexNode.keyOf(Objects.requireNonNull(entryLookup.apply(path), "path"))); 1875 } 1876 1877 /** 1878 * Return the IndexNode from the root tree. If it doesn't exist, 1879 * it gets created along with all parent directory IndexNodes. 1880 */ 1881 private IndexNode getOrCreateInode(byte[] path, boolean isdir) { 1882 IndexNode node = getInode(path); 1883 // if node exists, return it 1884 if (node != null) { 1885 return node; 1886 } 1887 1888 // otherwise create new pseudo node and parent directory hierarchy 1889 node = new IndexNode(path, isdir); 1890 beginWrite(); 1891 try { 1892 makeParentDirs(node, Objects.requireNonNull(inodes.get(IndexNode.keyOf(ROOTPATH)), "no root node found")); 1893 return node; 1894 } finally { 1895 endWrite(); 1896 } 1897 } 1898 1899 private Entry getEntry(byte[] path) throws IOException { 1900 IndexNode inode = getInode(path); 1901 if (inode instanceof Entry) 1902 return (Entry)inode; 1903 if (inode == null || inode.pos == -1) 1904 return null; 1905 return supportPosix ? new PosixEntry(this, inode): new Entry(this, inode); 1906 } 1907 1908 public void deleteFile(byte[] path, boolean failIfNotExists) 1909 throws IOException 1910 { 1911 checkWritable(); 1912 IndexNode inode = getInode(path); 1913 if (inode == null) { 1914 if (path != null && path.length == 0) 1915 throw new ZipException("root directory </> can't not be delete"); 1916 if (failIfNotExists) 1917 throw new NoSuchFileException(getString(path)); 1918 } else { 1919 if (inode.isDir() && inode.child != null) 1920 throw new DirectoryNotEmptyException(getString(path)); 1921 updateDelete(inode); 1922 } 1923 } 1924 1925 // Returns an out stream for either 1926 // (1) writing the contents of a new entry, if the entry exists, or 1927 // (2) updating/replacing the contents of the specified existing entry. 1928 private OutputStream getOutputStream(Entry e) throws IOException { 1929 if (e.mtime == -1) 1930 e.mtime = System.currentTimeMillis(); 1931 if (e.method == -1) 1932 e.method = defaultCompressionMethod; 1933 // store size, compressed size, and crc-32 in datadescr 1934 e.flag = FLAG_DATADESCR; 1935 if (zc.isUTF8()) 1936 e.flag |= FLAG_USE_UTF8; 1937 OutputStream os; 1938 if (useTempFile) { 1939 e.file = getTempPathForEntry(null); 1940 os = Files.newOutputStream(e.file, WRITE); 1941 } else { 1942 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1943 } 1944 if (e.method == METHOD_DEFLATED) { 1945 return new DeflatingEntryOutputStream(e, os); 1946 } else { 1947 return new EntryOutputStream(e, os); 1948 } 1949 } 1950 1951 private class EntryOutputStream extends FilterOutputStream { 1952 private final Entry e; 1953 private long written; 1954 private boolean isClosed; 1955 1956 EntryOutputStream(Entry e, OutputStream os) { 1957 super(os); 1958 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1959 // this.written = 0; 1960 } 1961 1962 @Override 1963 public synchronized void write(int b) throws IOException { 1964 out.write(b); 1965 written += 1; 1966 } 1967 1968 @Override 1969 public synchronized void write(byte[] b, int off, int len) 1970 throws IOException { 1971 out.write(b, off, len); 1972 written += len; 1973 } 1974 1975 @Override 1976 public synchronized void close() throws IOException { 1977 if (isClosed) { 1978 return; 1979 } 1980 isClosed = true; 1981 e.size = written; 1982 if (out instanceof ByteArrayOutputStream) 1983 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1984 super.close(); 1985 update(e); 1986 } 1987 } 1988 1989 // Output stream returned when writing "deflated" entries into memory, 1990 // to enable eager (possibly parallel) deflation and reduce memory required. 1991 private class DeflatingEntryOutputStream extends DeflaterOutputStream { 1992 private final CRC32 crc; 1993 private final Entry e; 1994 private boolean isClosed; 1995 1996 DeflatingEntryOutputStream(Entry e, OutputStream os) { 1997 super(os, getDeflater()); 1998 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1999 this.crc = new CRC32(); 2000 } 2001 2002 @Override 2003 public synchronized void write(byte[] b, int off, int len) 2004 throws IOException { 2005 super.write(b, off, len); 2006 crc.update(b, off, len); 2007 } 2008 2009 @Override 2010 public synchronized void close() throws IOException { 2011 if (isClosed) 2012 return; 2013 isClosed = true; 2014 finish(); 2015 e.size = def.getBytesRead(); 2016 e.csize = def.getBytesWritten(); 2017 e.crc = crc.getValue(); 2018 if (out instanceof ByteArrayOutputStream) 2019 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 2020 super.close(); 2021 update(e); 2022 releaseDeflater(def); 2023 } 2024 } 2025 2026 // Wrapper output stream class to write out a "stored" entry. 2027 // (1) this class does not close the underlying out stream when 2028 // being closed. 2029 // (2) no need to be "synchronized", only used by sync() 2030 private class EntryOutputStreamCRC32 extends FilterOutputStream { 2031 private final CRC32 crc; 2032 private final Entry e; 2033 private long written; 2034 private boolean isClosed; 2035 2036 EntryOutputStreamCRC32(Entry e, OutputStream os) { 2037 super(os); 2038 this.e = Objects.requireNonNull(e, "Zip entry is null"); 2039 this.crc = new CRC32(); 2040 } 2041 2042 @Override 2043 public void write(int b) throws IOException { 2044 out.write(b); 2045 crc.update(b); 2046 written += 1; 2047 } 2048 2049 @Override 2050 public void write(byte[] b, int off, int len) 2051 throws IOException { 2052 out.write(b, off, len); 2053 crc.update(b, off, len); 2054 written += len; 2055 } 2056 2057 @Override 2058 public void close() { 2059 if (isClosed) 2060 return; 2061 isClosed = true; 2062 e.size = e.csize = written; 2063 e.crc = crc.getValue(); 2064 } 2065 } 2066 2067 // Wrapper output stream class to write out a "deflated" entry. 2068 // (1) this class does not close the underlying out stream when 2069 // being closed. 2070 // (2) no need to be "synchronized", only used by sync() 2071 private class EntryOutputStreamDef extends DeflaterOutputStream { 2072 private final CRC32 crc; 2073 private final Entry e; 2074 private boolean isClosed; 2075 2076 EntryOutputStreamDef(Entry e, OutputStream os) { 2077 super(os, getDeflater()); 2078 this.e = Objects.requireNonNull(e, "Zip entry is null"); 2079 this.crc = new CRC32(); 2080 } 2081 2082 @Override 2083 public void write(byte[] b, int off, int len) throws IOException { 2084 super.write(b, off, len); 2085 crc.update(b, off, len); 2086 } 2087 2088 @Override 2089 public void close() throws IOException { 2090 if (isClosed) 2091 return; 2092 isClosed = true; 2093 finish(); 2094 e.size = def.getBytesRead(); 2095 e.csize = def.getBytesWritten(); 2096 e.crc = crc.getValue(); 2097 releaseDeflater(def); 2098 } 2099 } 2100 2101 private InputStream getInputStream(Entry e) 2102 throws IOException 2103 { 2104 InputStream eis; 2105 if (e.type == Entry.NEW) { 2106 if (e.bytes != null) 2107 eis = new ByteArrayInputStream(e.bytes); 2108 else if (e.file != null) 2109 eis = Files.newInputStream(e.file); 2110 else 2111 throw new ZipException("update entry data is missing"); 2112 } else if (e.type == Entry.FILECH) { 2113 // FILECH result is un-compressed. 2114 eis = Files.newInputStream(e.file); 2115 // TBD: wrap to hook close() 2116 // streams.add(eis); 2117 return eis; 2118 } else { // untouched CEN or COPY 2119 eis = new EntryInputStream(e, ch); 2120 } 2121 if (e.method == METHOD_DEFLATED) { 2122 // MORE: Compute good size for inflater stream: 2123 long bufSize = e.size + 2; // Inflater likes a bit of slack 2124 if (bufSize > 65536) 2125 bufSize = 8192; 2126 final long size = e.size; 2127 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 2128 private boolean isClosed = false; 2129 public void close() throws IOException { 2130 if (!isClosed) { 2131 releaseInflater(inf); 2132 this.in.close(); 2133 isClosed = true; 2134 streams.remove(this); 2135 } 2136 } 2137 // Override fill() method to provide an extra "dummy" byte 2138 // at the end of the input stream. This is required when 2139 // using the "nowrap" Inflater option. (it appears the new 2140 // zlib in 7 does not need it, but keep it for now) 2141 protected void fill() throws IOException { 2142 if (eof) { 2143 throw new EOFException( 2144 "Unexpected end of ZLIB input stream"); 2145 } 2146 len = this.in.read(buf, 0, buf.length); 2147 if (len == -1) { 2148 buf[0] = 0; 2149 len = 1; 2150 eof = true; 2151 } 2152 inf.setInput(buf, 0, len); 2153 } 2154 private boolean eof; 2155 2156 public int available() { 2157 if (isClosed) 2158 return 0; 2159 long avail = size - inf.getBytesWritten(); 2160 return avail > (long) Integer.MAX_VALUE ? 2161 Integer.MAX_VALUE : (int) avail; 2162 } 2163 }; 2164 } else if (e.method == METHOD_STORED) { 2165 // TBD: wrap/ it does not seem necessary 2166 } else { 2167 throw new ZipException("invalid compression method"); 2168 } 2169 streams.add(eis); 2170 return eis; 2171 } 2172 2173 // Inner class implementing the input stream used to read 2174 // a (possibly compressed) zip file entry. 2175 private class EntryInputStream extends InputStream { 2176 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 2177 // point to a new channel after sync() 2178 private long pos; // current position within entry data 2179 private long rem; // number of remaining bytes within entry 2180 2181 EntryInputStream(Entry e, SeekableByteChannel zfch) 2182 throws IOException 2183 { 2184 this.zfch = zfch; 2185 rem = e.csize; 2186 pos = e.locoff; 2187 if (pos == -1) { 2188 Entry e2 = getEntry(e.name); 2189 if (e2 == null) { 2190 throw new ZipException("invalid loc for entry <" + getString(e.name) + ">"); 2191 } 2192 pos = e2.locoff; 2193 } 2194 pos = -pos; // lazy initialize the real data offset 2195 } 2196 2197 public int read(byte[] b, int off, int len) throws IOException { 2198 ensureOpen(); 2199 initDataPos(); 2200 if (rem == 0) { 2201 return -1; 2202 } 2203 if (len <= 0) { 2204 return 0; 2205 } 2206 if (len > rem) { 2207 len = (int) rem; 2208 } 2209 // readFullyAt() 2210 long n; 2211 ByteBuffer bb = ByteBuffer.wrap(b); 2212 bb.position(off); 2213 bb.limit(off + len); 2214 synchronized(zfch) { 2215 n = zfch.position(pos).read(bb); 2216 } 2217 if (n > 0) { 2218 pos += n; 2219 rem -= n; 2220 } 2221 if (rem == 0) { 2222 close(); 2223 } 2224 return (int)n; 2225 } 2226 2227 public int read() throws IOException { 2228 byte[] b = new byte[1]; 2229 if (read(b, 0, 1) == 1) { 2230 return b[0] & 0xff; 2231 } else { 2232 return -1; 2233 } 2234 } 2235 2236 public long skip(long n) { 2237 ensureOpen(); 2238 if (n > rem) 2239 n = rem; 2240 pos += n; 2241 rem -= n; 2242 if (rem == 0) { 2243 close(); 2244 } 2245 return n; 2246 } 2247 2248 public int available() { 2249 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 2250 } 2251 2252 public void close() { 2253 rem = 0; 2254 streams.remove(this); 2255 } 2256 2257 private void initDataPos() throws IOException { 2258 if (pos <= 0) { 2259 pos = -pos + locpos; 2260 byte[] buf = new byte[LOCHDR]; 2261 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 2262 throw new ZipException("invalid loc " + pos + " for entry reading"); 2263 } 2264 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 2265 } 2266 } 2267 } 2268 2269 // Maxmum number of de/inflater we cache 2270 private final int MAX_FLATER = 20; 2271 // List of available Inflater objects for decompression 2272 private final List<Inflater> inflaters = new ArrayList<>(); 2273 2274 // Gets an inflater from the list of available inflaters or allocates 2275 // a new one. 2276 private Inflater getInflater() { 2277 synchronized (inflaters) { 2278 int size = inflaters.size(); 2279 if (size > 0) { 2280 return inflaters.remove(size - 1); 2281 } else { 2282 return new Inflater(true); 2283 } 2284 } 2285 } 2286 2287 // Releases the specified inflater to the list of available inflaters. 2288 private void releaseInflater(Inflater inf) { 2289 synchronized (inflaters) { 2290 if (inflaters.size() < MAX_FLATER) { 2291 inf.reset(); 2292 inflaters.add(inf); 2293 } else { 2294 inf.end(); 2295 } 2296 } 2297 } 2298 2299 // List of available Deflater objects for compression 2300 private final List<Deflater> deflaters = new ArrayList<>(); 2301 2302 // Gets a deflater from the list of available deflaters or allocates 2303 // a new one. 2304 private Deflater getDeflater() { 2305 synchronized (deflaters) { 2306 int size = deflaters.size(); 2307 if (size > 0) { 2308 return deflaters.remove(size - 1); 2309 } else { 2310 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 2311 } 2312 } 2313 } 2314 2315 // Releases the specified inflater to the list of available inflaters. 2316 private void releaseDeflater(Deflater def) { 2317 synchronized (deflaters) { 2318 if (deflaters.size() < MAX_FLATER) { 2319 def.reset(); 2320 deflaters.add(def); 2321 } else { 2322 def.end(); 2323 } 2324 } 2325 } 2326 2327 // End of central directory record 2328 static class END { 2329 // The fields that are commented out below are not used by anyone and write() uses "0" 2330 // int disknum; 2331 // int sdisknum; 2332 // int endsub; 2333 int centot; // 4 bytes 2334 long cenlen; // 4 bytes 2335 long cenoff; // 4 bytes 2336 // int comlen; // comment length 2337 // byte[] comment; 2338 2339 // members of Zip64 end of central directory locator 2340 // int diskNum; 2341 long endpos; 2342 // int disktot; 2343 2344 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 2345 boolean hasZip64 = forceEnd64; // false; 2346 long xlen = cenlen; 2347 long xoff = cenoff; 2348 if (xlen >= ZIP64_MINVAL) { 2349 xlen = ZIP64_MINVAL; 2350 hasZip64 = true; 2351 } 2352 if (xoff >= ZIP64_MINVAL) { 2353 xoff = ZIP64_MINVAL; 2354 hasZip64 = true; 2355 } 2356 int count = centot; 2357 if (count >= ZIP64_MINVAL32) { 2358 count = ZIP64_MINVAL32; 2359 hasZip64 = true; 2360 } 2361 if (hasZip64) { 2362 //zip64 end of central directory record 2363 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 2364 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 2365 writeShort(os, 45); // version made by 2366 writeShort(os, 45); // version needed to extract 2367 writeInt(os, 0); // number of this disk 2368 writeInt(os, 0); // central directory start disk 2369 writeLong(os, centot); // number of directory entries on disk 2370 writeLong(os, centot); // number of directory entries 2371 writeLong(os, cenlen); // length of central directory 2372 writeLong(os, cenoff); // offset of central directory 2373 2374 //zip64 end of central directory locator 2375 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 2376 writeInt(os, 0); // zip64 END start disk 2377 writeLong(os, offset); // offset of zip64 END 2378 writeInt(os, 1); // total number of disks (?) 2379 } 2380 writeInt(os, ENDSIG); // END record signature 2381 writeShort(os, 0); // number of this disk 2382 writeShort(os, 0); // central directory start disk 2383 writeShort(os, count); // number of directory entries on disk 2384 writeShort(os, count); // total number of directory entries 2385 writeInt(os, xlen); // length of central directory 2386 writeInt(os, xoff); // offset of central directory 2387 writeShort(os, 0); // zip file comment, not used 2388 } 2389 } 2390 2391 // Internal node that links a "name" to its pos in cen table. 2392 // The node itself can be used as a "key" to lookup itself in 2393 // the HashMap inodes. 2394 static class IndexNode { 2395 byte[] name; 2396 int hashcode; // node is hashable/hashed by its name 2397 boolean isdir; 2398 int pos = -1; // position in cen table, -1 means the 2399 // entry does not exist in zip file 2400 IndexNode child; // first child 2401 IndexNode sibling; // next sibling 2402 2403 IndexNode() {} 2404 2405 IndexNode(byte[] name, boolean isdir) { 2406 name(name); 2407 this.isdir = isdir; 2408 this.pos = -1; 2409 } 2410 2411 IndexNode(byte[] name, int pos) { 2412 name(name); 2413 this.pos = pos; 2414 } 2415 2416 // constructor for initCEN() (1) remove trailing '/' (2) pad leading '/' 2417 IndexNode(byte[] cen, int pos, int nlen) { 2418 int noff = pos + CENHDR; 2419 if (cen[noff + nlen - 1] == '/') { 2420 isdir = true; 2421 nlen--; 2422 } 2423 if (nlen > 0 && cen[noff] == '/') { 2424 name = Arrays.copyOfRange(cen, noff, noff + nlen); 2425 } else { 2426 name = new byte[nlen + 1]; 2427 System.arraycopy(cen, noff, name, 1, nlen); 2428 name[0] = '/'; 2429 } 2430 name(normalize(name)); 2431 this.pos = pos; 2432 } 2433 2434 // Normalize the IndexNode.name field. 2435 private byte[] normalize(byte[] path) { 2436 int len = path.length; 2437 if (len == 0) 2438 return path; 2439 byte prevC = 0; 2440 for (int pathPos = 0; pathPos < len; pathPos++) { 2441 byte c = path[pathPos]; 2442 if (c == '/' && prevC == '/') 2443 return normalize(path, pathPos - 1); 2444 prevC = c; 2445 } 2446 if (len > 1 && prevC == '/') { 2447 return Arrays.copyOf(path, len - 1); 2448 } 2449 return path; 2450 } 2451 2452 private byte[] normalize(byte[] path, int off) { 2453 // As we know we have at least one / to trim, we can reduce 2454 // the size of the resulting array 2455 byte[] to = new byte[path.length - 1]; 2456 int pathPos = 0; 2457 while (pathPos < off) { 2458 to[pathPos] = path[pathPos]; 2459 pathPos++; 2460 } 2461 int toPos = pathPos; 2462 byte prevC = 0; 2463 while (pathPos < path.length) { 2464 byte c = path[pathPos++]; 2465 if (c == '/' && prevC == '/') 2466 continue; 2467 to[toPos++] = c; 2468 prevC = c; 2469 } 2470 if (toPos > 1 && to[toPos - 1] == '/') 2471 toPos--; 2472 return (toPos == to.length) ? to : Arrays.copyOf(to, toPos); 2473 } 2474 2475 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 2476 2477 static final IndexNode keyOf(byte[] name) { // get a lookup key; 2478 IndexNode key = cachedKey.get(); 2479 if (key == null) { 2480 key = new IndexNode(name, -1); 2481 cachedKey.set(key); 2482 } 2483 return key.as(name); 2484 } 2485 2486 final void name(byte[] name) { 2487 this.name = name; 2488 this.hashcode = Arrays.hashCode(name); 2489 } 2490 2491 final IndexNode as(byte[] name) { // reuse the node, mostly 2492 name(name); // as a lookup "key" 2493 return this; 2494 } 2495 2496 boolean isDir() { 2497 return isdir; 2498 } 2499 2500 @Override 2501 public boolean equals(Object other) { 2502 if (!(other instanceof IndexNode)) { 2503 return false; 2504 } 2505 if (other instanceof ParentLookup) { 2506 return ((ParentLookup)other).equals(this); 2507 } 2508 return Arrays.equals(name, ((IndexNode)other).name); 2509 } 2510 2511 @Override 2512 public int hashCode() { 2513 return hashcode; 2514 } 2515 2516 @Override 2517 public String toString() { 2518 return new String(name) + (isdir ? " (dir)" : " ") + ", index: " + pos; 2519 } 2520 } 2521 2522 static class Entry extends IndexNode implements ZipFileAttributes { 2523 static final int CEN = 1; // entry read from cen 2524 static final int NEW = 2; // updated contents in bytes or file 2525 static final int FILECH = 3; // fch update in "file" 2526 static final int COPY = 4; // copy of a CEN entry 2527 2528 byte[] bytes; // updated content bytes 2529 Path file; // use tmp file to store bytes; 2530 int type = CEN; // default is the entry read from cen 2531 2532 // entry attributes 2533 int version; 2534 int flag; 2535 int posixPerms = -1; // posix permissions 2536 int method = -1; // compression method 2537 long mtime = -1; // last modification time (in DOS time) 2538 long atime = -1; // last access time 2539 long ctime = -1; // create time 2540 long crc = -1; // crc-32 of entry data 2541 long csize = -1; // compressed size of entry data 2542 long size = -1; // uncompressed size of entry data 2543 byte[] extra; 2544 2545 // CEN 2546 // The fields that are commented out below are not used by anyone and write() uses "0" 2547 // int versionMade; 2548 // int disk; 2549 // int attrs; 2550 // long attrsEx; 2551 long locoff; 2552 byte[] comment; 2553 2554 Entry(byte[] name, boolean isdir, int method) { 2555 name(name); 2556 this.isdir = isdir; 2557 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 2558 this.crc = 0; 2559 this.size = 0; 2560 this.csize = 0; 2561 this.method = method; 2562 } 2563 2564 @SuppressWarnings("unchecked") 2565 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2566 this(name, isdir, method); 2567 this.type = type; 2568 for (FileAttribute<?> attr : attrs) { 2569 String attrName = attr.name(); 2570 if (attrName.equals("posix:permissions")) { 2571 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 2572 } 2573 } 2574 } 2575 2576 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2577 this(name, type, false, METHOD_STORED, attrs); 2578 this.file = file; 2579 } 2580 2581 Entry(Entry e, int type, int compressionMethod) { 2582 this(e, type); 2583 this.method = compressionMethod; 2584 } 2585 2586 Entry(Entry e, int type) { 2587 name(e.name); 2588 this.isdir = e.isdir; 2589 this.version = e.version; 2590 this.ctime = e.ctime; 2591 this.atime = e.atime; 2592 this.mtime = e.mtime; 2593 this.crc = e.crc; 2594 this.size = e.size; 2595 this.csize = e.csize; 2596 this.method = e.method; 2597 this.extra = e.extra; 2598 /* 2599 this.versionMade = e.versionMade; 2600 this.disk = e.disk; 2601 this.attrs = e.attrs; 2602 this.attrsEx = e.attrsEx; 2603 */ 2604 this.locoff = e.locoff; 2605 this.comment = e.comment; 2606 this.posixPerms = e.posixPerms; 2607 this.type = type; 2608 } 2609 2610 Entry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2611 readCEN(zipfs, inode); 2612 } 2613 2614 // Calculates a suitable base for the version number to 2615 // be used for fields version made by/version needed to extract. 2616 // The lower bytes of these 2 byte fields hold the version number 2617 // (value/10 = major; value%10 = minor) 2618 // For different features certain minimum versions apply: 2619 // stored = 10 (1.0), deflated = 20 (2.0), zip64 = 45 (4.5) 2620 private int version(boolean zip64) throws ZipException { 2621 if (zip64) { 2622 return 45; 2623 } 2624 if (method == METHOD_DEFLATED) 2625 return 20; 2626 else if (method == METHOD_STORED) 2627 return 10; 2628 throw new ZipException("unsupported compression method"); 2629 } 2630 2631 /** 2632 * Adds information about compatibility of file attribute information 2633 * to a version value. 2634 */ 2635 private int versionMadeBy(int version) { 2636 return (posixPerms < 0) ? version : 2637 VERSION_MADE_BY_BASE_UNIX | (version & 0xff); 2638 } 2639 2640 ///////////////////// CEN ////////////////////// 2641 private void readCEN(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2642 byte[] cen = zipfs.cen; 2643 int pos = inode.pos; 2644 if (!cenSigAt(cen, pos)) 2645 throw new ZipException("invalid CEN header (bad signature)"); 2646 version = CENVER(cen, pos); 2647 flag = CENFLG(cen, pos); 2648 method = CENHOW(cen, pos); 2649 mtime = dosToJavaTime(CENTIM(cen, pos)); 2650 crc = CENCRC(cen, pos); 2651 csize = CENSIZ(cen, pos); 2652 size = CENLEN(cen, pos); 2653 int nlen = CENNAM(cen, pos); 2654 int elen = CENEXT(cen, pos); 2655 int clen = CENCOM(cen, pos); 2656 /* 2657 versionMade = CENVEM(cen, pos); 2658 disk = CENDSK(cen, pos); 2659 attrs = CENATT(cen, pos); 2660 attrsEx = CENATX(cen, pos); 2661 */ 2662 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 2663 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 2664 } 2665 locoff = CENOFF(cen, pos); 2666 pos += CENHDR; 2667 this.name = inode.name; 2668 this.isdir = inode.isdir; 2669 this.hashcode = inode.hashcode; 2670 2671 pos += nlen; 2672 if (elen > 0) { 2673 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2674 pos += elen; 2675 readExtra(zipfs); 2676 } 2677 if (clen > 0) { 2678 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2679 } 2680 } 2681 2682 private int writeCEN(OutputStream os) throws IOException { 2683 long csize0 = csize; 2684 long size0 = size; 2685 long locoff0 = locoff; 2686 int elen64 = 0; // extra for ZIP64 2687 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2688 int elenEXTT = 0; // extra for Extended Timestamp 2689 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2690 2691 byte[] zname = isdir ? toDirectoryPath(name) : name; 2692 2693 // confirm size/length 2694 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2695 int elen = (extra != null) ? extra.length : 0; 2696 int eoff = 0; 2697 int clen = (comment != null) ? comment.length : 0; 2698 if (csize >= ZIP64_MINVAL) { 2699 csize0 = ZIP64_MINVAL; 2700 elen64 += 8; // csize(8) 2701 } 2702 if (size >= ZIP64_MINVAL) { 2703 size0 = ZIP64_MINVAL; // size(8) 2704 elen64 += 8; 2705 } 2706 if (locoff >= ZIP64_MINVAL) { 2707 locoff0 = ZIP64_MINVAL; 2708 elen64 += 8; // offset(8) 2709 } 2710 if (elen64 != 0) { 2711 elen64 += 4; // header and data sz 4 bytes 2712 } 2713 boolean zip64 = (elen64 != 0); 2714 int version0 = version(zip64); 2715 while (eoff + 4 < elen) { 2716 int tag = SH(extra, eoff); 2717 int sz = SH(extra, eoff + 2); 2718 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2719 foundExtraTime = true; 2720 } 2721 eoff += (4 + sz); 2722 } 2723 if (!foundExtraTime) { 2724 if (isWindows) { // use NTFS 2725 elenNTFS = 36; // total 36 bytes 2726 } else { // Extended Timestamp otherwise 2727 elenEXTT = 9; // only mtime in cen 2728 } 2729 } 2730 writeInt(os, CENSIG); // CEN header signature 2731 writeShort(os, versionMadeBy(version0)); // version made by 2732 writeShort(os, version0); // version needed to extract 2733 writeShort(os, flag); // general purpose bit flag 2734 writeShort(os, method); // compression method 2735 // last modification time 2736 writeInt(os, (int)javaToDosTime(mtime)); 2737 writeInt(os, crc); // crc-32 2738 writeInt(os, csize0); // compressed size 2739 writeInt(os, size0); // uncompressed size 2740 writeShort(os, nlen); 2741 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2742 2743 if (comment != null) { 2744 writeShort(os, Math.min(clen, 0xffff)); 2745 } else { 2746 writeShort(os, 0); 2747 } 2748 writeShort(os, 0); // starting disk number 2749 writeShort(os, 0); // internal file attributes (unused) 2750 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2751 // attributes, used for storing posix 2752 // permissions 2753 writeInt(os, locoff0); // relative offset of local header 2754 writeBytes(os, zname, 1, nlen); 2755 if (zip64) { 2756 writeShort(os, EXTID_ZIP64);// Zip64 extra 2757 writeShort(os, elen64 - 4); // size of "this" extra block 2758 if (size0 == ZIP64_MINVAL) 2759 writeLong(os, size); 2760 if (csize0 == ZIP64_MINVAL) 2761 writeLong(os, csize); 2762 if (locoff0 == ZIP64_MINVAL) 2763 writeLong(os, locoff); 2764 } 2765 if (elenNTFS != 0) { 2766 writeShort(os, EXTID_NTFS); 2767 writeShort(os, elenNTFS - 4); 2768 writeInt(os, 0); // reserved 2769 writeShort(os, 0x0001); // NTFS attr tag 2770 writeShort(os, 24); 2771 writeLong(os, javaToWinTime(mtime)); 2772 writeLong(os, javaToWinTime(atime)); 2773 writeLong(os, javaToWinTime(ctime)); 2774 } 2775 if (elenEXTT != 0) { 2776 writeShort(os, EXTID_EXTT); 2777 writeShort(os, elenEXTT - 4); 2778 if (ctime == -1) 2779 os.write(0x3); // mtime and atime 2780 else 2781 os.write(0x7); // mtime, atime and ctime 2782 writeInt(os, javaToUnixTime(mtime)); 2783 } 2784 if (extra != null) // whatever not recognized 2785 writeBytes(os, extra); 2786 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2787 writeBytes(os, comment); 2788 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2789 } 2790 2791 ///////////////////// LOC ////////////////////// 2792 2793 private int writeLOC(OutputStream os) throws IOException { 2794 byte[] zname = isdir ? toDirectoryPath(name) : name; 2795 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2796 int elen = (extra != null) ? extra.length : 0; 2797 boolean foundExtraTime = false; // if extra timestamp present 2798 int eoff = 0; 2799 int elen64 = 0; 2800 boolean zip64 = false; 2801 int elenEXTT = 0; 2802 int elenNTFS = 0; 2803 writeInt(os, LOCSIG); // LOC header signature 2804 if ((flag & FLAG_DATADESCR) != 0) { 2805 writeShort(os, version(false)); // version needed to extract 2806 writeShort(os, flag); // general purpose bit flag 2807 writeShort(os, method); // compression method 2808 // last modification time 2809 writeInt(os, (int)javaToDosTime(mtime)); 2810 // store size, uncompressed size, and crc-32 in data descriptor 2811 // immediately following compressed entry data 2812 writeInt(os, 0); 2813 writeInt(os, 0); 2814 writeInt(os, 0); 2815 } else { 2816 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2817 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2818 zip64 = true; 2819 } 2820 writeShort(os, version(zip64)); // version needed to extract 2821 writeShort(os, flag); // general purpose bit flag 2822 writeShort(os, method); // compression method 2823 // last modification time 2824 writeInt(os, (int)javaToDosTime(mtime)); 2825 writeInt(os, crc); // crc-32 2826 if (zip64) { 2827 writeInt(os, ZIP64_MINVAL); 2828 writeInt(os, ZIP64_MINVAL); 2829 } else { 2830 writeInt(os, csize); // compressed size 2831 writeInt(os, size); // uncompressed size 2832 } 2833 } 2834 while (eoff + 4 < elen) { 2835 int tag = SH(extra, eoff); 2836 int sz = SH(extra, eoff + 2); 2837 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2838 foundExtraTime = true; 2839 } 2840 eoff += (4 + sz); 2841 } 2842 if (!foundExtraTime) { 2843 if (isWindows) { 2844 elenNTFS = 36; // NTFS, total 36 bytes 2845 } else { // on unix use "ext time" 2846 elenEXTT = 9; 2847 if (atime != -1) 2848 elenEXTT += 4; 2849 if (ctime != -1) 2850 elenEXTT += 4; 2851 } 2852 } 2853 writeShort(os, nlen); 2854 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2855 writeBytes(os, zname, 1, nlen); 2856 if (zip64) { 2857 writeShort(os, EXTID_ZIP64); 2858 writeShort(os, 16); 2859 writeLong(os, size); 2860 writeLong(os, csize); 2861 } 2862 if (elenNTFS != 0) { 2863 writeShort(os, EXTID_NTFS); 2864 writeShort(os, elenNTFS - 4); 2865 writeInt(os, 0); // reserved 2866 writeShort(os, 0x0001); // NTFS attr tag 2867 writeShort(os, 24); 2868 writeLong(os, javaToWinTime(mtime)); 2869 writeLong(os, javaToWinTime(atime)); 2870 writeLong(os, javaToWinTime(ctime)); 2871 } 2872 if (elenEXTT != 0) { 2873 writeShort(os, EXTID_EXTT); 2874 writeShort(os, elenEXTT - 4);// size for the folowing data block 2875 int fbyte = 0x1; 2876 if (atime != -1) // mtime and atime 2877 fbyte |= 0x2; 2878 if (ctime != -1) // mtime, atime and ctime 2879 fbyte |= 0x4; 2880 os.write(fbyte); // flags byte 2881 writeInt(os, javaToUnixTime(mtime)); 2882 if (atime != -1) 2883 writeInt(os, javaToUnixTime(atime)); 2884 if (ctime != -1) 2885 writeInt(os, javaToUnixTime(ctime)); 2886 } 2887 if (extra != null) { 2888 writeBytes(os, extra); 2889 } 2890 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2891 } 2892 2893 // Data Descriptor 2894 private int writeEXT(OutputStream os) throws IOException { 2895 writeInt(os, EXTSIG); // EXT header signature 2896 writeInt(os, crc); // crc-32 2897 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2898 writeLong(os, csize); 2899 writeLong(os, size); 2900 return 24; 2901 } else { 2902 writeInt(os, csize); // compressed size 2903 writeInt(os, size); // uncompressed size 2904 return 16; 2905 } 2906 } 2907 2908 // read NTFS, UNIX and ZIP64 data from cen.extra 2909 private void readExtra(ZipFileSystem zipfs) throws IOException { 2910 // Note that Section 4.5, Extensible data fields, of the PKWARE ZIP File 2911 // Format Specification does not mandate a specific order for the 2912 // data in the extra field, therefore Zip FS cannot assume the data 2913 // is written in the same order by Zip libraries as Zip FS. 2914 if (extra == null) 2915 return; 2916 int elen = extra.length; 2917 int off = 0; 2918 int newOff = 0; 2919 boolean hasZip64LocOffset = false; 2920 while (off + 4 < elen) { 2921 // extra spec: HeaderID+DataSize+Data 2922 int pos = off; 2923 int tag = SH(extra, pos); 2924 int sz = SH(extra, pos + 2); 2925 pos += 4; 2926 if (pos + sz > elen) // invalid data 2927 break; 2928 switch (tag) { 2929 case EXTID_ZIP64 : 2930 if (size == ZIP64_MINVAL) { 2931 if (pos + 8 > elen) // invalid zip64 extra 2932 break; // fields, just skip 2933 size = LL(extra, pos); 2934 pos += 8; 2935 } 2936 if (csize == ZIP64_MINVAL) { 2937 if (pos + 8 > elen) 2938 break; 2939 csize = LL(extra, pos); 2940 pos += 8; 2941 } 2942 if (locoff == ZIP64_MINVAL) { 2943 if (pos + 8 > elen) 2944 break; 2945 locoff = LL(extra, pos); 2946 } 2947 break; 2948 case EXTID_NTFS: 2949 if (sz < 32) 2950 break; 2951 pos += 4; // reserved 4 bytes 2952 if (SH(extra, pos) != 0x0001) 2953 break; 2954 if (SH(extra, pos + 2) != 24) 2955 break; 2956 // override the loc field, datatime here is 2957 // more "accurate" 2958 mtime = winToJavaTime(LL(extra, pos + 4)); 2959 atime = winToJavaTime(LL(extra, pos + 12)); 2960 ctime = winToJavaTime(LL(extra, pos + 20)); 2961 break; 2962 case EXTID_EXTT: 2963 // spec says the Extended timestamp in cen only has mtime 2964 // need to read the loc to get the extra a/ctime, if flag 2965 // "zipinfo-time" is not specified to false; 2966 // there is performance cost (move up to loc and read) to 2967 // access the loc table foreach entry; 2968 if (zipfs.noExtt) { 2969 if (sz == 5) 2970 mtime = unixToJavaTime(LG(extra, pos + 1)); 2971 break; 2972 } 2973 // If the LOC offset is 0xFFFFFFFF, then we need to read the 2974 // LOC offset from the EXTID_ZIP64 extra data. Therefore 2975 // wait until all of the CEN extra data fields have been processed 2976 // prior to reading the LOC extra data field in order to obtain 2977 // the Info-ZIP Extended Timestamp. 2978 if (locoff != ZIP64_MINVAL) { 2979 readLocEXTT(zipfs); 2980 } else { 2981 hasZip64LocOffset = true; 2982 } 2983 break; 2984 default: // unknown tag 2985 System.arraycopy(extra, off, extra, newOff, sz + 4); 2986 newOff += (sz + 4); 2987 } 2988 off += (sz + 4); 2989 } 2990 2991 // We need to read the LOC extra data and the LOC offset was obtained 2992 // from the EXTID_ZIP64 field. 2993 if (hasZip64LocOffset) { 2994 readLocEXTT(zipfs); 2995 } 2996 2997 if (newOff != 0 && newOff != extra.length) 2998 extra = Arrays.copyOf(extra, newOff); 2999 else 3000 extra = null; 3001 } 3002 3003 /** 3004 * Read the LOC extra field to obtain the Info-ZIP Extended Timestamp fields 3005 * @param zipfs The Zip FS to use 3006 * @throws IOException If an error occurs 3007 */ 3008 private void readLocEXTT(ZipFileSystem zipfs) throws IOException { 3009 byte[] buf = new byte[LOCHDR]; 3010 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 3011 != buf.length) 3012 throw new ZipException("loc: reading failed"); 3013 if (!locSigAt(buf, 0)) 3014 throw new ZipException("R" 3015 + Long.toString(getSig(buf, 0), 16)); 3016 int locElen = LOCEXT(buf); 3017 if (locElen < 9) // EXTT is at least 9 bytes 3018 return; 3019 int locNlen = LOCNAM(buf); 3020 buf = new byte[locElen]; 3021 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 3022 != buf.length) 3023 throw new ZipException("loc extra: reading failed"); 3024 int locPos = 0; 3025 while (locPos + 4 < buf.length) { 3026 int locTag = SH(buf, locPos); 3027 int locSZ = SH(buf, locPos + 2); 3028 locPos += 4; 3029 if (locTag != EXTID_EXTT) { 3030 locPos += locSZ; 3031 continue; 3032 } 3033 int end = locPos + locSZ - 4; 3034 int flag = CH(buf, locPos++); 3035 if ((flag & 0x1) != 0 && locPos <= end) { 3036 mtime = unixToJavaTime(LG(buf, locPos)); 3037 locPos += 4; 3038 } 3039 if ((flag & 0x2) != 0 && locPos <= end) { 3040 atime = unixToJavaTime(LG(buf, locPos)); 3041 locPos += 4; 3042 } 3043 if ((flag & 0x4) != 0 && locPos <= end) { 3044 ctime = unixToJavaTime(LG(buf, locPos)); 3045 } 3046 break; 3047 } 3048 } 3049 3050 @Override 3051 public String toString() { 3052 StringBuilder sb = new StringBuilder(1024); 3053 Formatter fm = new Formatter(sb); 3054 fm.format(" name : %s%n", new String(name)); 3055 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 3056 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 3057 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 3058 fm.format(" isRegularFile : %b%n", isRegularFile()); 3059 fm.format(" isDirectory : %b%n", isDirectory()); 3060 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 3061 fm.format(" isOther : %b%n", isOther()); 3062 fm.format(" fileKey : %s%n", fileKey()); 3063 fm.format(" size : %d%n", size()); 3064 fm.format(" compressedSize : %d%n", compressedSize()); 3065 fm.format(" crc : %x%n", crc()); 3066 fm.format(" method : %d%n", method()); 3067 Set<PosixFilePermission> permissions = storedPermissions().orElse(null); 3068 if (permissions != null) { 3069 fm.format(" permissions : %s%n", permissions); 3070 } 3071 fm.close(); 3072 return sb.toString(); 3073 } 3074 3075 ///////// basic file attributes /////////// 3076 @Override 3077 public FileTime creationTime() { 3078 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 3079 } 3080 3081 @Override 3082 public boolean isDirectory() { 3083 return isDir(); 3084 } 3085 3086 @Override 3087 public boolean isOther() { 3088 return false; 3089 } 3090 3091 @Override 3092 public boolean isRegularFile() { 3093 return !isDir(); 3094 } 3095 3096 @Override 3097 public FileTime lastAccessTime() { 3098 return FileTime.fromMillis(atime == -1 ? mtime : atime); 3099 } 3100 3101 @Override 3102 public FileTime lastModifiedTime() { 3103 return FileTime.fromMillis(mtime); 3104 } 3105 3106 @Override 3107 public long size() { 3108 return size; 3109 } 3110 3111 @Override 3112 public boolean isSymbolicLink() { 3113 return false; 3114 } 3115 3116 @Override 3117 public Object fileKey() { 3118 return null; 3119 } 3120 3121 ///////// zip file attributes /////////// 3122 3123 @Override 3124 public long compressedSize() { 3125 return csize; 3126 } 3127 3128 @Override 3129 public long crc() { 3130 return crc; 3131 } 3132 3133 @Override 3134 public int method() { 3135 return method; 3136 } 3137 3138 @Override 3139 public byte[] extra() { 3140 if (extra != null) 3141 return Arrays.copyOf(extra, extra.length); 3142 return null; 3143 } 3144 3145 @Override 3146 public byte[] comment() { 3147 if (comment != null) 3148 return Arrays.copyOf(comment, comment.length); 3149 return null; 3150 } 3151 3152 @Override 3153 public Optional<Set<PosixFilePermission>> storedPermissions() { 3154 Set<PosixFilePermission> perms = null; 3155 if (posixPerms != -1) { 3156 perms = new HashSet<>(PosixFilePermission.values().length); 3157 for (PosixFilePermission perm : PosixFilePermission.values()) { 3158 if ((posixPerms & ZipUtils.permToFlag(perm)) != 0) { 3159 perms.add(perm); 3160 } 3161 } 3162 } 3163 return Optional.ofNullable(perms); 3164 } 3165 } 3166 3167 final class PosixEntry extends Entry implements PosixFileAttributes { 3168 private UserPrincipal owner = defaultOwner; 3169 private GroupPrincipal group = defaultGroup; 3170 3171 PosixEntry(byte[] name, boolean isdir, int method) { 3172 super(name, isdir, method); 3173 } 3174 3175 PosixEntry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 3176 super(name, type, isdir, method, attrs); 3177 } 3178 3179 PosixEntry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 3180 super(name, file, type, attrs); 3181 } 3182 3183 PosixEntry(PosixEntry e, int type, int compressionMethod) { 3184 super(e, type); 3185 this.method = compressionMethod; 3186 } 3187 3188 PosixEntry(PosixEntry e, int type) { 3189 super(e, type); 3190 this.owner = e.owner; 3191 this.group = e.group; 3192 } 3193 3194 PosixEntry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 3195 super(zipfs, inode); 3196 } 3197 3198 @Override 3199 public UserPrincipal owner() { 3200 return owner; 3201 } 3202 3203 @Override 3204 public GroupPrincipal group() { 3205 return group; 3206 } 3207 3208 @Override 3209 public Set<PosixFilePermission> permissions() { 3210 return storedPermissions().orElse(Set.copyOf(defaultPermissions)); 3211 } 3212 } 3213 3214 // purely for parent lookup, so we don't have to copy the parent 3215 // name every time 3216 static class ParentLookup extends IndexNode { 3217 int len; 3218 ParentLookup() {} 3219 3220 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 3221 name(name, len); 3222 return this; 3223 } 3224 3225 void name(byte[] name, int len) { 3226 this.name = name; 3227 this.len = len; 3228 // calculate the hashcode the same way as Arrays.hashCode() does 3229 int result = 1; 3230 for (int i = 0; i < len; i++) 3231 result = 31 * result + name[i]; 3232 this.hashcode = result; 3233 } 3234 3235 @Override 3236 public boolean equals(Object other) { 3237 if (!(other instanceof IndexNode)) { 3238 return false; 3239 } 3240 byte[] oname = ((IndexNode)other).name; 3241 return Arrays.equals(name, 0, len, 3242 oname, 0, oname.length); 3243 } 3244 } 3245 } 3246