Merge changes from topic 'packed-batch-ref-update'

* changes:
  Add tests for updating single refs to missing objects
  Fix deleting symrefs
  RefDirectory: Throw exception if CAS of packed ref list fails
  ReceiveCommand: Explicitly check constructor preconditions
  BatchRefUpdate: Document when getPushOptions is null
diff --git a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCacheTest.java b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCacheTest.java
index 5bef9fa..32d711f 100644
--- a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCacheTest.java
+++ b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCacheTest.java
@@ -57,13 +57,14 @@
 public class DeltaBaseCacheTest {
 	private static final int SZ = 512;
 
-	private DfsPackKey key;
+	private DfsStreamKey key;
 	private DeltaBaseCache cache;
 	private TestRng rng;
 
 	@Before
 	public void setUp() {
-		key = new DfsPackKey();
+		DfsRepositoryDescription repo = new DfsRepositoryDescription("test");
+		key = DfsStreamKey.of(repo, "test.key");
 		cache = new DeltaBaseCache(SZ);
 		rng = new TestRng(getClass().getSimpleName());
 	}
diff --git a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
new file mode 100644
index 0000000..2e3ee45
--- /dev/null
+++ b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2017, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ *   names of its contributors may be used to endorse or promote
+ *   products derived from this software without specific prior
+ *   written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.internal.storage.dfs;
+
+import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.eclipse.jgit.junit.TestRng;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectInserter;
+import org.eclipse.jgit.lib.ObjectReader;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+public class DfsBlockCacheTest {
+	@Rule
+	public TestName testName = new TestName();
+	private TestRng rng;
+	private DfsBlockCache cache;
+
+	@Before
+	public void setUp() {
+		rng = new TestRng(testName.getMethodName());
+		resetCache();
+	}
+
+	@SuppressWarnings("resource")
+	@Test
+	public void streamKeyReusesBlocks() throws Exception {
+		DfsRepositoryDescription repo = new DfsRepositoryDescription("test");
+		InMemoryRepository r1 = new InMemoryRepository(repo);
+		byte[] content = rng.nextBytes(424242);
+		ObjectId id;
+		try (ObjectInserter ins = r1.newObjectInserter()) {
+			id = ins.insert(OBJ_BLOB, content);
+			ins.flush();
+		}
+
+		long oldSize = cache.getCurrentSize();
+		assertTrue(oldSize > 2000);
+		assertEquals(0, cache.getHitCount());
+
+		List<DfsPackDescription> packs = r1.getObjectDatabase().listPacks();
+		InMemoryRepository r2 = new InMemoryRepository(repo);
+		r2.getObjectDatabase().commitPack(packs, Collections.emptyList());
+		try (ObjectReader rdr = r2.newObjectReader()) {
+			byte[] actual = rdr.open(id, OBJ_BLOB).getBytes();
+			assertTrue(Arrays.equals(content, actual));
+		}
+		assertEquals(0, cache.getMissCount());
+		assertEquals(oldSize, cache.getCurrentSize());
+	}
+
+	@SuppressWarnings("resource")
+	@Test
+	public void weirdBlockSize() throws Exception {
+		DfsRepositoryDescription repo = new DfsRepositoryDescription("test");
+		InMemoryRepository r1 = new InMemoryRepository(repo);
+
+		byte[] content1 = rng.nextBytes(4);
+		byte[] content2 = rng.nextBytes(424242);
+		ObjectId id1;
+		ObjectId id2;
+		try (ObjectInserter ins = r1.newObjectInserter()) {
+			id1 = ins.insert(OBJ_BLOB, content1);
+			id2 = ins.insert(OBJ_BLOB, content2);
+			ins.flush();
+		}
+
+		resetCache();
+		List<DfsPackDescription> packs = r1.getObjectDatabase().listPacks();
+
+		InMemoryRepository r2 = new InMemoryRepository(repo);
+		r2.getObjectDatabase().setReadableChannelBlockSizeForTest(500);
+		r2.getObjectDatabase().commitPack(packs, Collections.emptyList());
+		try (ObjectReader rdr = r2.newObjectReader()) {
+			byte[] actual = rdr.open(id1, OBJ_BLOB).getBytes();
+			assertTrue(Arrays.equals(content1, actual));
+		}
+
+		InMemoryRepository r3 = new InMemoryRepository(repo);
+		r3.getObjectDatabase().setReadableChannelBlockSizeForTest(500);
+		r3.getObjectDatabase().commitPack(packs, Collections.emptyList());
+		try (ObjectReader rdr = r3.newObjectReader()) {
+			byte[] actual = rdr.open(id2, OBJ_BLOB).getBytes();
+			assertTrue(Arrays.equals(content2, actual));
+		}
+	}
+
+	private void resetCache() {
+		DfsBlockCache.reconfigure(new DfsBlockCacheConfig()
+				.setBlockSize(512)
+				.setBlockLimit(1 << 20));
+		cache = DfsBlockCache.getInstance();
+	}
+}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/BlockBasedFile.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/BlockBasedFile.java
new file mode 100644
index 0000000..813e7f4
--- /dev/null
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/BlockBasedFile.java
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2017, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ *   names of its contributors may be used to endorse or promote
+ *   products derived from this software without specific prior
+ *   written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.internal.storage.dfs;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.text.MessageFormat;
+
+import org.eclipse.jgit.annotations.Nullable;
+import org.eclipse.jgit.errors.PackInvalidException;
+import org.eclipse.jgit.internal.storage.pack.PackExt;
+
+/** Block based file stored in {@link DfsBlockCache}. */
+public abstract class BlockBasedFile {
+	/** Cache that owns this file and its data. */
+	final DfsBlockCache cache;
+
+	/** Unique identity of this file while in-memory. */
+	final DfsStreamKey key;
+
+	/** Description of the associated pack file's storage. */
+	final DfsPackDescription desc;
+	final PackExt ext;
+
+	/**
+	 * Preferred alignment for loading blocks from the backing file.
+	 * <p>
+	 * It is initialized to 0 and filled in on the first read made from the
+	 * file. Block sizes may be odd, e.g. 4091, caused by the underling DFS
+	 * storing 4091 user bytes and 5 bytes block metadata into a lower level
+	 * 4096 byte block on disk.
+	 */
+	volatile int blockSize;
+
+	/**
+	 * Total number of bytes in this pack file.
+	 * <p>
+	 * This field initializes to -1 and gets populated when a block is loaded.
+	 */
+	volatile long length;
+
+	/** True once corruption has been detected that cannot be worked around. */
+	volatile boolean invalid;
+
+	BlockBasedFile(DfsBlockCache cache, DfsPackDescription desc, PackExt ext) {
+		this.cache = cache;
+		this.key = desc.getStreamKey(ext);
+		this.desc = desc;
+		this.ext = ext;
+	}
+
+	String getFileName() {
+		return desc.getFileName(ext);
+	}
+
+	boolean invalid() {
+		return invalid;
+	}
+
+	void setInvalid() {
+		invalid = true;
+	}
+
+	void setBlockSize(int newSize) {
+		blockSize = newSize;
+	}
+
+	long alignToBlock(long pos) {
+		int size = blockSize;
+		if (size == 0)
+			size = cache.getBlockSize();
+		return (pos / size) * size;
+	}
+
+	int blockSize(ReadableChannel rc) {
+		// If the block alignment is not yet known, discover it. Prefer the
+		// larger size from either the cache or the file itself.
+		int size = blockSize;
+		if (size == 0) {
+			size = rc.blockSize();
+			if (size <= 0)
+				size = cache.getBlockSize();
+			else if (size < cache.getBlockSize())
+				size = (cache.getBlockSize() / size) * size;
+			blockSize = size;
+		}
+		return size;
+	}
+
+	DfsBlock readOneBlock(long pos, DfsReader ctx,
+			@Nullable ReadableChannel fileChannel) throws IOException {
+		if (invalid)
+			throw new PackInvalidException(getFileName());
+
+		ctx.stats.readBlock++;
+		long start = System.nanoTime();
+		ReadableChannel rc = fileChannel != null ? fileChannel
+				: ctx.db.openFile(desc, ext);
+		try {
+			int size = blockSize(rc);
+			pos = (pos / size) * size;
+
+			// If the size of the file is not yet known, try to discover it.
+			// Channels may choose to return -1 to indicate they don't
+			// know the length yet, in this case read up to the size unit
+			// given by the caller, then recheck the length.
+			long len = length;
+			if (len < 0) {
+				len = rc.size();
+				if (0 <= len)
+					length = len;
+			}
+
+			if (0 <= len && len < pos + size)
+				size = (int) (len - pos);
+			if (size <= 0)
+				throw new EOFException(MessageFormat.format(
+						DfsText.get().shortReadOfBlock, Long.valueOf(pos),
+						getFileName(), Long.valueOf(0), Long.valueOf(0)));
+
+			byte[] buf = new byte[size];
+			rc.position(pos);
+			int cnt = read(rc, ByteBuffer.wrap(buf, 0, size));
+			ctx.stats.readBlockBytes += cnt;
+			if (cnt != size) {
+				if (0 <= len) {
+					throw new EOFException(MessageFormat.format(
+							DfsText.get().shortReadOfBlock, Long.valueOf(pos),
+							getFileName(), Integer.valueOf(size),
+							Integer.valueOf(cnt)));
+				}
+
+				// Assume the entire thing was read in a single shot, compact
+				// the buffer to only the space required.
+				byte[] n = new byte[cnt];
+				System.arraycopy(buf, 0, n, 0, n.length);
+				buf = n;
+			} else if (len < 0) {
+				// With no length at the start of the read, the channel should
+				// have the length available at the end.
+				length = len = rc.size();
+			}
+
+			return new DfsBlock(key, pos, buf);
+		} finally {
+			if (rc != fileChannel) {
+				rc.close();
+			}
+			ctx.stats.readBlockMicros += elapsedMicros(start);
+		}
+	}
+
+	static int read(ReadableChannel rc, ByteBuffer buf) throws IOException {
+		int n;
+		do {
+			n = rc.read(buf);
+		} while (0 < n && buf.hasRemaining());
+		return buf.position();
+	}
+
+	static long elapsedMicros(long start) {
+		return (System.nanoTime() - start) / 1000L;
+	}
+}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCache.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCache.java
index 64a63d7..bd4b4d2 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCache.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCache.java
@@ -75,7 +75,7 @@
 		table = new Entry[1 << TABLE_BITS];
 	}
 
-	Entry get(DfsPackKey key, long position) {
+	Entry get(DfsStreamKey key, long position) {
 		Entry e = table[hash(position)];
 		for (; e != null; e = e.tableNext) {
 			if (e.offset == position && key.equals(e.pack)) {
@@ -86,7 +86,7 @@
 		return null;
 	}
 
-	void put(DfsPackKey key, long offset, int objectType, byte[] data) {
+	void put(DfsStreamKey key, long offset, int objectType, byte[] data) {
 		if (data.length > maxByteCount)
 			return; // Too large to cache.
 
@@ -189,7 +189,7 @@
 	}
 
 	static class Entry {
-		final DfsPackKey pack;
+		final DfsStreamKey pack;
 		final long offset;
 		final int type;
 		final byte[] data;
@@ -198,7 +198,7 @@
 		Entry lruPrev;
 		Entry lruNext;
 
-		Entry(DfsPackKey key, long offset, int type, byte[] data) {
+		Entry(DfsStreamKey key, long offset, int type, byte[] data) {
 			this.pack = key;
 			this.offset = offset;
 			this.type = type;
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlock.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlock.java
index 4a33fb8..dae922e 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlock.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlock.java
@@ -52,9 +52,9 @@
 
 import org.eclipse.jgit.internal.storage.pack.PackOutputStream;
 
-/** A cached slice of a {@link DfsPackFile}. */
+/** A cached slice of a {@link BlockBasedFile}. */
 final class DfsBlock {
-	final DfsPackKey pack;
+	final DfsStreamKey stream;
 
 	final long start;
 
@@ -62,8 +62,8 @@
 
 	private final byte[] block;
 
-	DfsBlock(DfsPackKey p, long pos, byte[] buf) {
-		pack = p;
+	DfsBlock(DfsStreamKey p, long pos, byte[] buf) {
+		stream = p;
 		start = pos;
 		end = pos + buf.length;
 		block = buf;
@@ -73,8 +73,8 @@
 		return block.length;
 	}
 
-	boolean contains(DfsPackKey want, long pos) {
-		return pack == want && start <= pos && pos < end;
+	boolean contains(DfsStreamKey want, long pos) {
+		return stream.equals(want) && start <= pos && pos < end;
 	}
 
 	int copy(long pos, byte[] dstbuf, int dstoff, int cnt) {
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java
index 96a2db9..45202b5 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java
@@ -45,10 +45,6 @@
 package org.eclipse.jgit.internal.storage.dfs;
 
 import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReferenceArray;
 import java.util.concurrent.locks.ReentrantLock;
@@ -57,12 +53,12 @@
 import org.eclipse.jgit.internal.JGitText;
 
 /**
- * Caches slices of a {@link DfsPackFile} in memory for faster read access.
+ * Caches slices of a {@link BlockBasedFile} in memory for faster read access.
  * <p>
  * The DfsBlockCache serves as a Java based "buffer cache", loading segments of
- * a DfsPackFile into the JVM heap prior to use. As JGit often wants to do reads
- * of only tiny slices of a file, the DfsBlockCache tries to smooth out these
- * tiny reads into larger block-sized IO operations.
+ * a BlockBasedFile into the JVM heap prior to use. As JGit often wants to do
+ * reads of only tiny slices of a file, the DfsBlockCache tries to smooth out
+ * these tiny reads into larger block-sized IO operations.
  * <p>
  * Whenever a cache miss occurs, loading is invoked by exactly one thread for
  * the given <code>(DfsPackKey,position)</code> key tuple. This is ensured by an
@@ -109,14 +105,7 @@
 	 *             settings, usually too low of a limit.
 	 */
 	public static void reconfigure(DfsBlockCacheConfig cfg) {
-		DfsBlockCache nc = new DfsBlockCache(cfg);
-		DfsBlockCache oc = cache;
-		cache = nc;
-
-		if (oc != null) {
-			for (DfsPackFile pack : oc.getPackFiles())
-				pack.key.cachedSize.set(0);
-		}
+		cache = new DfsBlockCache(cfg);
 	}
 
 	/** @return the currently active DfsBlockCache. */
@@ -154,12 +143,6 @@
 	/** As {@link #blockSize} is a power of 2, bits to shift for a / blockSize. */
 	private final int blockSizeShift;
 
-	/** Cache of pack files, indexed by description. */
-	private final Map<DfsPackDescription, DfsPackFile> packCache;
-
-	/** View of pack files in the pack cache. */
-	private final Collection<DfsPackFile> packFiles;
-
 	/** Number of times a block was found in the cache. */
 	private final AtomicLong statHit;
 
@@ -195,13 +178,12 @@
 		blockSizeShift = Integer.numberOfTrailingZeros(blockSize);
 
 		clockLock = new ReentrantLock(true /* fair */);
-		clockHand = new Ref<>(new DfsPackKey(), -1, 0, null);
+		String none = ""; //$NON-NLS-1$
+		clockHand = new Ref<>(
+				DfsStreamKey.of(new DfsRepositoryDescription(none), none),
+				-1, 0, null);
 		clockHand.next = clockHand;
 
-		packCache = new ConcurrentHashMap<>(
-				16, 0.75f, 1);
-		packFiles = Collections.unmodifiableCollection(packCache.values());
-
 		statHit = new AtomicLong();
 		statMiss = new AtomicLong();
 	}
@@ -250,38 +232,6 @@
 		return statEvict;
 	}
 
-	/**
-	 * Get the pack files stored in this cache.
-	 *
-	 * @return a collection of pack files, some of which may not actually be
-	 *             present; the caller should check the pack's cached size.
-	 */
-	public Collection<DfsPackFile> getPackFiles() {
-		return packFiles;
-	}
-
-	DfsPackFile getOrCreate(DfsPackDescription dsc, DfsPackKey key) {
-		// TODO This table grows without bound. It needs to clean up
-		// entries that aren't in cache anymore, and aren't being used
-		// by a live DfsObjDatabase reference.
-
-		DfsPackFile pack = packCache.get(dsc);
-		if (pack != null && !pack.invalid()) {
-			return pack;
-		}
-
-		// 'pack' either didn't exist or was invalid. Compute a new
-		// entry atomically (guaranteed by ConcurrentHashMap).
-		return packCache.compute(dsc, (k, v) -> {
-			if (v != null && !v.invalid()) { // valid value added by
-				return v;                    // another thread
-			} else {
-				return new DfsPackFile(
-						this, dsc, key != null ? key : new DfsPackKey());
-			}
-		});
-	}
-
 	private int hash(int packHash, long off) {
 		return packHash + (int) (off >>> blockSizeShift);
 	}
@@ -303,28 +253,28 @@
 	/**
 	 * Lookup a cached object, creating and loading it if it doesn't exist.
 	 *
-	 * @param pack
+	 * @param file
 	 *            the pack that "contains" the cached object.
 	 * @param position
 	 *            offset within <code>pack</code> of the object.
 	 * @param ctx
 	 *            current thread's reader.
-	 * @param packChannel
+	 * @param fileChannel
 	 *            optional channel to read {@code pack}.
 	 * @return the object reference.
 	 * @throws IOException
 	 *             the reference was not in the cache and could not be loaded.
 	 */
-	DfsBlock getOrLoad(DfsPackFile pack, long position, DfsReader ctx,
-			@Nullable ReadableChannel packChannel) throws IOException {
+	DfsBlock getOrLoad(BlockBasedFile file, long position, DfsReader ctx,
+			@Nullable ReadableChannel fileChannel) throws IOException {
 		final long requestedPosition = position;
-		position = pack.alignToBlock(position);
+		position = file.alignToBlock(position);
 
-		DfsPackKey key = pack.key;
+		DfsStreamKey key = file.key;
 		int slot = slot(key, position);
 		HashEntry e1 = table.get(slot);
 		DfsBlock v = scan(e1, key, position);
-		if (v != null) {
+		if (v != null && v.contains(key, requestedPosition)) {
 			ctx.stats.blockCacheHit++;
 			statHit.incrementAndGet();
 			return v;
@@ -348,7 +298,7 @@
 			statMiss.incrementAndGet();
 			boolean credit = true;
 			try {
-				v = pack.readOneBlock(position, ctx, packChannel);
+				v = file.readOneBlock(requestedPosition, ctx, fileChannel);
 				credit = false;
 			} finally {
 				if (credit)
@@ -361,7 +311,6 @@
 				e2 = table.get(slot);
 			}
 
-			key.cachedSize.addAndGet(v.size());
 			Ref<DfsBlock> ref = new Ref<>(key, position, v.size(), v);
 			ref.hot = true;
 			for (;;) {
@@ -377,9 +326,9 @@
 
 		// If the block size changed from the default, it is possible the block
 		// that was loaded is the wrong block for the requested position.
-		if (v.contains(pack.key, requestedPosition))
+		if (v.contains(file.key, requestedPosition))
 			return v;
-		return getOrLoad(pack, requestedPosition, ctx, packChannel);
+		return getOrLoad(file, requestedPosition, ctx, fileChannel);
 	}
 
 	@SuppressWarnings("unchecked")
@@ -409,7 +358,6 @@
 					dead.next = null;
 					dead.value = null;
 					live -= dead.size;
-					dead.pack.cachedSize.addAndGet(-dead.size);
 					statEvict++;
 				} while (maxBytes < live);
 				clockHand = prev;
@@ -442,10 +390,14 @@
 	}
 
 	void put(DfsBlock v) {
-		put(v.pack, v.start, v.size(), v);
+		put(v.stream, v.start, v.size(), v);
 	}
 
-	<T> Ref<T> put(DfsPackKey key, long pos, int size, T v) {
+	<T> Ref<T> putRef(DfsStreamKey key, long size, T v) {
+		return put(key, 0, (int) Math.min(size, Integer.MAX_VALUE), v);
+	}
+
+	<T> Ref<T> put(DfsStreamKey key, long pos, int size, T v) {
 		int slot = slot(key, pos);
 		HashEntry e1 = table.get(slot);
 		Ref<T> ref = scanRef(e1, key, pos);
@@ -465,7 +417,6 @@
 				}
 			}
 
-			key.cachedSize.addAndGet(size);
 			ref = new Ref<>(key, pos, size, v);
 			ref.hot = true;
 			for (;;) {
@@ -481,12 +432,12 @@
 		return ref;
 	}
 
-	boolean contains(DfsPackKey key, long position) {
+	boolean contains(DfsStreamKey key, long position) {
 		return scan(table.get(slot(key, position)), key, position) != null;
 	}
 
 	@SuppressWarnings("unchecked")
-	<T> T get(DfsPackKey key, long position) {
+	<T> T get(DfsStreamKey key, long position) {
 		T val = (T) scan(table.get(slot(key, position)), key, position);
 		if (val == null)
 			statMiss.incrementAndGet();
@@ -495,31 +446,36 @@
 		return val;
 	}
 
-	private <T> T scan(HashEntry n, DfsPackKey pack, long position) {
-		Ref<T> r = scanRef(n, pack, position);
+	private <T> T scan(HashEntry n, DfsStreamKey key, long position) {
+		Ref<T> r = scanRef(n, key, position);
 		return r != null ? r.get() : null;
 	}
 
+	<T> Ref<T> getRef(DfsStreamKey key) {
+		Ref<T> r = scanRef(table.get(slot(key, 0)), key, 0);
+		if (r != null)
+			statHit.incrementAndGet();
+		else
+			statMiss.incrementAndGet();
+		return r;
+	}
+
 	@SuppressWarnings("unchecked")
-	private <T> Ref<T> scanRef(HashEntry n, DfsPackKey pack, long position) {
+	private <T> Ref<T> scanRef(HashEntry n, DfsStreamKey key, long position) {
 		for (; n != null; n = n.next) {
 			Ref<T> r = n.ref;
-			if (r.pack == pack && r.position == position)
+			if (r.position == position && r.key.equals(key))
 				return r.get() != null ? r : null;
 		}
 		return null;
 	}
 
-	void remove(DfsPackFile pack) {
-		packCache.remove(pack.getPackDescription());
+	private int slot(DfsStreamKey key, long position) {
+		return (hash(key.hash, position) >>> 1) % tableSize;
 	}
 
-	private int slot(DfsPackKey pack, long position) {
-		return (hash(pack.hash, position) >>> 1) % tableSize;
-	}
-
-	private ReentrantLock lockFor(DfsPackKey pack, long position) {
-		return loadLocks[(hash(pack.hash, position) >>> 1) % loadLocks.length];
+	private ReentrantLock lockFor(DfsStreamKey key, long position) {
+		return loadLocks[(hash(key.hash, position) >>> 1) % loadLocks.length];
 	}
 
 	private static HashEntry clean(HashEntry top) {
@@ -545,15 +501,15 @@
 	}
 
 	static final class Ref<T> {
-		final DfsPackKey pack;
+		final DfsStreamKey key;
 		final long position;
 		final int size;
 		volatile T value;
 		Ref next;
 		volatile boolean hot;
 
-		Ref(DfsPackKey pack, long position, int size, T v) {
-			this.pack = pack;
+		Ref(DfsStreamKey key, long position, int size, T v) {
+			this.key = key;
 			this.position = position;
 			this.size = size;
 			this.value = v;
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsGarbageCollector.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsGarbageCollector.java
index a145662..ce2b053 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsGarbageCollector.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsGarbageCollector.java
@@ -563,22 +563,25 @@
 		try (DfsOutputStream out = objdb.writeFile(pack, PACK)) {
 			pw.writePack(pm, pm, out);
 			pack.addFileExt(PACK);
+			pack.setBlockSize(PACK, out.blockSize());
 		}
 
-		try (CountingOutputStream cnt =
-				new CountingOutputStream(objdb.writeFile(pack, INDEX))) {
+		try (DfsOutputStream out = objdb.writeFile(pack, INDEX)) {
+			CountingOutputStream cnt = new CountingOutputStream(out);
 			pw.writeIndex(cnt);
 			pack.addFileExt(INDEX);
 			pack.setFileSize(INDEX, cnt.getCount());
+			pack.setBlockSize(INDEX, out.blockSize());
 			pack.setIndexVersion(pw.getIndexVersion());
 		}
 
 		if (pw.prepareBitmapIndex(pm)) {
-			try (CountingOutputStream cnt = new CountingOutputStream(
-					objdb.writeFile(pack, BITMAP_INDEX))) {
+			try (DfsOutputStream out = objdb.writeFile(pack, BITMAP_INDEX)) {
+				CountingOutputStream cnt = new CountingOutputStream(out);
 				pw.writeBitmapIndex(cnt);
 				pack.addFileExt(BITMAP_INDEX);
 				pack.setFileSize(BITMAP_INDEX, cnt.getCount());
+				pack.setBlockSize(BITMAP_INDEX, out.blockSize());
 			}
 		}
 
@@ -587,8 +590,6 @@
 		pack.setLastModified(startTimeMillis);
 		newPackStats.add(stats);
 		newPackObj.add(pw.getObjectSet());
-
-		DfsBlockCache.getInstance().getOrCreate(pack, null);
 		return pack;
 	}
 }
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsInserter.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsInserter.java
index e65c9fd..01654d4 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsInserter.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsInserter.java
@@ -104,7 +104,7 @@
 	ObjectIdOwnerMap<PackedObjectInfo> objectMap;
 
 	DfsBlockCache cache;
-	DfsPackKey packKey;
+	DfsStreamKey packKey;
 	DfsPackDescription packDsc;
 	PackStream packOut;
 	private boolean rollback;
@@ -221,7 +221,7 @@
 		db.commitPack(Collections.singletonList(packDsc), null);
 		rollback = false;
 
-		DfsPackFile p = cache.getOrCreate(packDsc, packKey);
+		DfsPackFile p = new DfsPackFile(cache, packDsc);
 		if (index != null)
 			p.setPackIndex(index);
 		db.addPack(p);
@@ -281,8 +281,10 @@
 
 		rollback = true;
 		packDsc = db.newPack(DfsObjDatabase.PackSource.INSERT);
-		packOut = new PackStream(db.writeFile(packDsc, PACK));
-		packKey = new DfsPackKey();
+		DfsOutputStream dfsOut = db.writeFile(packDsc, PACK);
+		packDsc.setBlockSize(PACK, dfsOut.blockSize());
+		packOut = new PackStream(dfsOut);
+		packKey = packDsc.getStreamKey(PACK);
 
 		// Write the header as though it were a single object pack.
 		byte[] buf = packOut.hdrBuf;
@@ -312,13 +314,14 @@
 			packIndex = PackIndex.read(buf.openInputStream());
 		}
 
-		DfsOutputStream os = db.writeFile(pack, INDEX);
-		try (CountingOutputStream cnt = new CountingOutputStream(os)) {
+		try (DfsOutputStream os = db.writeFile(pack, INDEX)) {
+			CountingOutputStream cnt = new CountingOutputStream(os);
 			if (buf != null)
 				buf.writeTo(cnt, null);
 			else
 				index(cnt, packHash, list);
 			pack.addFileExt(INDEX);
+			pack.setBlockSize(INDEX, os.blockSize());
 			pack.setFileSize(INDEX, cnt.getCount());
 		} finally {
 			if (buf != null) {
@@ -633,11 +636,11 @@
 		private final int type;
 		private final long size;
 
-		private final DfsPackKey srcPack;
+		private final DfsStreamKey srcPack;
 		private final long pos;
 
 		StreamLoader(ObjectId id, int type, long sz,
-				DfsPackKey key, long pos) {
+				DfsStreamKey key, long pos) {
 			this.id = id;
 			this.type = type;
 			this.size = sz;
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsObjDatabase.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsObjDatabase.java
index 32ee6c2..76189c1 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsObjDatabase.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsObjDatabase.java
@@ -464,8 +464,8 @@
 			DfsPackFile oldPack = forReuse.remove(dsc);
 			if (oldPack != null) {
 				list.add(oldPack);
-			} else {
-				list.add(cache.getOrCreate(dsc, null));
+			} else if (dsc.hasFileExt(PackExt.PACK)) {
+				list.add(new DfsPackFile(cache, dsc));
 				foundNew = true;
 			}
 		}
@@ -482,8 +482,7 @@
 	}
 
 	private static Map<DfsPackDescription, DfsPackFile> reuseMap(PackList old) {
-		Map<DfsPackDescription, DfsPackFile> forReuse
-			= new HashMap<>();
+		Map<DfsPackDescription, DfsPackFile> forReuse = new HashMap<>();
 		for (DfsPackFile p : old.packs) {
 			if (p.invalid()) {
 				// The pack instance is corrupted, and cannot be safely used
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackCompactor.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackCompactor.java
index f7c87a4..ac14c0b 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackCompactor.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackCompactor.java
@@ -370,27 +370,23 @@
 	private static void writePack(DfsObjDatabase objdb,
 			DfsPackDescription pack,
 			PackWriter pw, ProgressMonitor pm) throws IOException {
-		DfsOutputStream out = objdb.writeFile(pack, PACK);
-		try {
+		try (DfsOutputStream out = objdb.writeFile(pack, PACK)) {
 			pw.writePack(pm, pm, out);
 			pack.addFileExt(PACK);
-		} finally {
-			out.close();
+			pack.setBlockSize(PACK, out.blockSize());
 		}
 	}
 
 	private static void writeIndex(DfsObjDatabase objdb,
 			DfsPackDescription pack,
 			PackWriter pw) throws IOException {
-		DfsOutputStream out = objdb.writeFile(pack, INDEX);
-		try {
+		try (DfsOutputStream out = objdb.writeFile(pack, INDEX)) {
 			CountingOutputStream cnt = new CountingOutputStream(out);
 			pw.writeIndex(cnt);
 			pack.addFileExt(INDEX);
 			pack.setFileSize(INDEX, cnt.getCount());
+			pack.setBlockSize(INDEX, out.blockSize());
 			pack.setIndexVersion(pw.getIndexVersion());
-		} finally {
-			out.close();
 		}
 	}
 
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackDescription.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackDescription.java
index e825f1a..58a006e 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackDescription.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackDescription.java
@@ -45,8 +45,7 @@
 
 import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
 
-import java.util.HashMap;
-import java.util.Map;
+import java.util.Arrays;
 
 import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
 import org.eclipse.jgit.internal.storage.pack.PackExt;
@@ -62,25 +61,16 @@
  */
 public class DfsPackDescription implements Comparable<DfsPackDescription> {
 	private final DfsRepositoryDescription repoDesc;
-
 	private final String packName;
-
 	private PackSource packSource;
-
 	private long lastModified;
-
-	private final Map<PackExt, Long> sizeMap;
-
+	private long[] sizeMap;
+	private int[] blockSizeMap;
 	private long objectCount;
-
 	private long deltaCount;
-
 	private PackStatistics stats;
-
 	private int extensions;
-
 	private int indexVersion;
-
 	private long estimatedPackSize;
 
 	/**
@@ -102,7 +92,10 @@
 		this.repoDesc = repoDesc;
 		int dot = name.lastIndexOf('.');
 		this.packName = (dot < 0) ? name : name.substring(0, dot);
-		this.sizeMap = new HashMap<>(PackExt.values().length * 2);
+
+		int extCnt = PackExt.values().length;
+		sizeMap = new long[extCnt];
+		blockSizeMap = new int[extCnt];
 	}
 
 	/** @return description of the repository. */
@@ -138,6 +131,15 @@
 		return packName + '.' + ext.getExtension();
 	}
 
+	/**
+	 * @param ext
+	 *            the file extension.
+	 * @return cache key for use by the block cache.
+	 */
+	public DfsStreamKey getStreamKey(PackExt ext) {
+		return DfsStreamKey.of(getRepositoryDescription(), getFileName(ext));
+	}
+
 	/** @return the source of the pack. */
 	public PackSource getPackSource() {
 		return packSource;
@@ -177,7 +179,11 @@
 	 * @return {@code this}
 	 */
 	public DfsPackDescription setFileSize(PackExt ext, long bytes) {
-		sizeMap.put(ext, Long.valueOf(Math.max(0, bytes)));
+		int i = ext.getPosition();
+		if (i >= sizeMap.length) {
+			sizeMap = Arrays.copyOf(sizeMap, i + 1);
+		}
+		sizeMap[i] = Math.max(0, bytes);
 		return this;
 	}
 
@@ -187,8 +193,36 @@
 	 * @return size of the file, in bytes. If 0 the file size is not yet known.
 	 */
 	public long getFileSize(PackExt ext) {
-		Long size = sizeMap.get(ext);
-		return size == null ? 0 : size.longValue();
+		int i = ext.getPosition();
+		return i < sizeMap.length ? sizeMap[i] : 0;
+	}
+
+	/**
+	 * @param ext
+	 *            the file extension.
+	 * @return blockSize of the file, in bytes. If 0 the blockSize size is not
+	 *         yet known and may be discovered when opening the file.
+	 */
+	public int getBlockSize(PackExt ext) {
+		int i = ext.getPosition();
+		return i < blockSizeMap.length ? blockSizeMap[i] : 0;
+	}
+
+	/**
+	 * @param ext
+	 *            the file extension.
+	 * @param blockSize
+	 *            blockSize of the file, in bytes. If 0 the blockSize is not
+	 *            known and will be determined on first read.
+	 * @return {@code this}
+	 */
+	public DfsPackDescription setBlockSize(PackExt ext, int blockSize) {
+		int i = ext.getPosition();
+		if (i >= blockSizeMap.length) {
+			blockSizeMap = Arrays.copyOf(blockSizeMap, i + 1);
+		}
+		blockSizeMap[i] = Math.max(0, blockSize);
+		return this;
 	}
 
 	/**
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackFile.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackFile.java
index 81972cd..2326219 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackFile.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackFile.java
@@ -62,7 +62,6 @@
 import java.util.zip.DataFormatException;
 import java.util.zip.Inflater;
 
-import org.eclipse.jgit.annotations.Nullable;
 import org.eclipse.jgit.errors.CorruptObjectException;
 import org.eclipse.jgit.errors.LargeObjectException;
 import org.eclipse.jgit.errors.MissingObjectException;
@@ -73,7 +72,6 @@
 import org.eclipse.jgit.internal.storage.file.PackIndex;
 import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
 import org.eclipse.jgit.internal.storage.pack.BinaryDelta;
-import org.eclipse.jgit.internal.storage.pack.PackExt;
 import org.eclipse.jgit.internal.storage.pack.PackOutputStream;
 import org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation;
 import org.eclipse.jgit.lib.AbbreviatedObjectId;
@@ -89,53 +87,7 @@
  * delta packed format yielding high compression of lots of object where some
  * objects are similar.
  */
-public final class DfsPackFile {
-	/**
-	 * File offset used to cache {@link #index} in {@link DfsBlockCache}.
-	 * <p>
-	 * To better manage memory, the forward index is stored as a single block in
-	 * the block cache under this file position. A negative value is used
-	 * because it cannot occur in a normal pack file, and it is less likely to
-	 * collide with a valid data block from the file as the high bits will all
-	 * be set when treated as an unsigned long by the cache code.
-	 */
-	private static final long POS_INDEX = -1;
-
-	/** Offset used to cache {@link #reverseIndex}. See {@link #POS_INDEX}. */
-	private static final long POS_REVERSE_INDEX = -2;
-
-	/** Offset used to cache {@link #bitmapIndex}. See {@link #POS_INDEX}. */
-	private static final long POS_BITMAP_INDEX = -3;
-
-	/** Cache that owns this pack file and its data. */
-	private final DfsBlockCache cache;
-
-	/** Description of the pack file's storage. */
-	private final DfsPackDescription packDesc;
-
-	/** Unique identity of this pack while in-memory. */
-	final DfsPackKey key;
-
-	/**
-	 * Total number of bytes in this pack file.
-	 * <p>
-	 * This field initializes to -1 and gets populated when a block is loaded.
-	 */
-	volatile long length;
-
-	/**
-	 * Preferred alignment for loading blocks from the backing file.
-	 * <p>
-	 * It is initialized to 0 and filled in on the first read made from the
-	 * file. Block sizes may be odd, e.g. 4091, caused by the underling DFS
-	 * storing 4091 user bytes and 5 bytes block metadata into a lower level
-	 * 4096 byte block on disk.
-	 */
-	private volatile int blockSize;
-
-	/** True once corruption has been detected that cannot be worked around. */
-	private volatile boolean invalid;
-
+public final class DfsPackFile extends BlockBasedFile {
 	/**
 	 * Lock for initialization of {@link #index} and {@link #corruptObjects}.
 	 * <p>
@@ -168,22 +120,22 @@
 	 *            cache that owns the pack data.
 	 * @param desc
 	 *            description of the pack within the DFS.
-	 * @param key
-	 *            interned key used to identify blocks in the block cache.
 	 */
-	DfsPackFile(DfsBlockCache cache, DfsPackDescription desc, DfsPackKey key) {
-		this.cache = cache;
-		this.packDesc = desc;
-		this.key = key;
+	DfsPackFile(DfsBlockCache cache, DfsPackDescription desc) {
+		super(cache, desc, PACK);
 
-		length = desc.getFileSize(PACK);
-		if (length <= 0)
-			length = -1;
+		int bs = desc.getBlockSize(PACK);
+		if (bs > 0) {
+			setBlockSize(bs);
+		}
+
+		long sz = desc.getFileSize(PACK);
+		length = sz > 0 ? sz : -1;
 	}
 
 	/** @return description that was originally used to configure this pack file. */
 	public DfsPackDescription getPackDescription() {
-		return packDesc;
+		return desc;
 	}
 
 	/**
@@ -194,24 +146,11 @@
 		return idxref != null && idxref.has();
 	}
 
-	/** @return bytes cached in memory for this pack, excluding the index. */
-	public long getCachedSize() {
-		return key.cachedSize.get();
-	}
-
-	String getPackName() {
-		return packDesc.getFileName(PACK);
-	}
-
-	void setBlockSize(int newSize) {
-		blockSize = newSize;
-	}
-
 	void setPackIndex(PackIndex idx) {
 		long objCnt = idx.getObjectCount();
 		int recSize = Constants.OBJECT_ID_LENGTH + 8;
-		int sz = (int) Math.min(objCnt * recSize, Integer.MAX_VALUE);
-		index = cache.put(key, POS_INDEX, sz, idx);
+		long sz = objCnt * recSize;
+		index = cache.putRef(desc.getStreamKey(INDEX), sz, idx);
 	}
 
 	/**
@@ -237,7 +176,7 @@
 		}
 
 		if (invalid)
-			throw new PackInvalidException(getPackName());
+			throw new PackInvalidException(getFileName());
 
 		Repository.getGlobalListenerList()
 				.dispatch(new BeforeDfsPackIndexLoadedEvent(this));
@@ -250,11 +189,21 @@
 					return idx;
 			}
 
+			DfsStreamKey idxKey = desc.getStreamKey(INDEX);
+			idxref = cache.getRef(idxKey);
+			if (idxref != null) {
+				PackIndex idx = idxref.get();
+				if (idx != null) {
+					index = idxref;
+					return idx;
+				}
+			}
+
 			PackIndex idx;
 			try {
 				ctx.stats.readIdx++;
 				long start = System.nanoTime();
-				ReadableChannel rc = ctx.db.openFile(packDesc, INDEX);
+				ReadableChannel rc = ctx.db.openFile(desc, INDEX);
 				try {
 					InputStream in = Channels.newInputStream(rc);
 					int wantSize = 8192;
@@ -271,18 +220,14 @@
 				}
 			} catch (EOFException e) {
 				invalid = true;
-				IOException e2 = new IOException(MessageFormat.format(
+				throw new IOException(MessageFormat.format(
 						DfsText.get().shortReadOfIndex,
-						packDesc.getFileName(INDEX)));
-				e2.initCause(e);
-				throw e2;
+						desc.getFileName(INDEX)), e);
 			} catch (IOException e) {
 				invalid = true;
-				IOException e2 = new IOException(MessageFormat.format(
+				throw new IOException(MessageFormat.format(
 						DfsText.get().cannotReadIndex,
-						packDesc.getFileName(INDEX)));
-				e2.initCause(e);
-				throw e2;
+						desc.getFileName(INDEX)), e);
 			}
 
 			setPackIndex(idx);
@@ -290,17 +235,14 @@
 		}
 	}
 
-	private static long elapsedMicros(long start) {
-		return (System.nanoTime() - start) / 1000L;
-	}
-
 	final boolean isGarbage() {
-		return packDesc.getPackSource() == UNREACHABLE_GARBAGE;
+		return desc.getPackSource() == UNREACHABLE_GARBAGE;
 	}
 
 	PackBitmapIndex getBitmapIndex(DfsReader ctx) throws IOException {
-		if (invalid || isGarbage())
+		if (invalid || isGarbage() || !desc.hasFileExt(BITMAP_INDEX))
 			return null;
+
 		DfsBlockCache.Ref<PackBitmapIndex> idxref = bitmapIndex;
 		if (idxref != null) {
 			PackBitmapIndex idx = idxref.get();
@@ -308,9 +250,6 @@
 				return idx;
 		}
 
-		if (!packDesc.hasFileExt(PackExt.BITMAP_INDEX))
-			return null;
-
 		synchronized (initLock) {
 			idxref = bitmapIndex;
 			if (idxref != null) {
@@ -319,12 +258,22 @@
 					return idx;
 			}
 
+			DfsStreamKey bitmapKey = desc.getStreamKey(BITMAP_INDEX);
+			idxref = cache.getRef(bitmapKey);
+			if (idxref != null) {
+				PackBitmapIndex idx = idxref.get();
+				if (idx != null) {
+					bitmapIndex = idxref;
+					return idx;
+				}
+			}
+
 			long size;
 			PackBitmapIndex idx;
 			try {
 				ctx.stats.readBitmap++;
 				long start = System.nanoTime();
-				ReadableChannel rc = ctx.db.openFile(packDesc, BITMAP_INDEX);
+				ReadableChannel rc = ctx.db.openFile(desc, BITMAP_INDEX);
 				try {
 					InputStream in = Channels.newInputStream(rc);
 					int wantSize = 8192;
@@ -343,21 +292,16 @@
 					ctx.stats.readIdxMicros += elapsedMicros(start);
 				}
 			} catch (EOFException e) {
-				IOException e2 = new IOException(MessageFormat.format(
+				throw new IOException(MessageFormat.format(
 						DfsText.get().shortReadOfIndex,
-						packDesc.getFileName(BITMAP_INDEX)));
-				e2.initCause(e);
-				throw e2;
+						desc.getFileName(BITMAP_INDEX)), e);
 			} catch (IOException e) {
-				IOException e2 = new IOException(MessageFormat.format(
+				throw new IOException(MessageFormat.format(
 						DfsText.get().cannotReadIndex,
-						packDesc.getFileName(BITMAP_INDEX)));
-				e2.initCause(e);
-				throw e2;
+						desc.getFileName(BITMAP_INDEX)), e);
 			}
 
-			bitmapIndex = cache.put(key, POS_BITMAP_INDEX,
-					(int) Math.min(size, Integer.MAX_VALUE), idx);
+			bitmapIndex = cache.putRef(bitmapKey, size, idx);
 			return idx;
 		}
 	}
@@ -378,11 +322,21 @@
 					return revidx;
 			}
 
+			DfsStreamKey revKey =
+					new DfsStreamKey.ForReverseIndex(desc.getStreamKey(INDEX));
+			revref = cache.getRef(revKey);
+			if (revref != null) {
+				PackReverseIndex idx = revref.get();
+				if (idx != null) {
+					reverseIndex = revref;
+					return idx;
+				}
+			}
+
 			PackIndex idx = idx(ctx);
 			PackReverseIndex revidx = new PackReverseIndex(idx);
-			int sz = (int) Math.min(
-					idx.getObjectCount() * 8, Integer.MAX_VALUE);
-			reverseIndex = cache.put(key, POS_REVERSE_INDEX, sz, revidx);
+			long cnt = idx.getObjectCount();
+			reverseIndex = cache.putRef(revKey, cnt * 8, revidx);
 			return revidx;
 		}
 	}
@@ -433,7 +387,6 @@
 
 	/** Release all memory used by this DfsPackFile instance. */
 	public void close() {
-		cache.remove(this);
 		index = null;
 		reverseIndex = null;
 	}
@@ -501,7 +454,7 @@
 				} else {
 					b = cache.get(key, alignToBlock(position));
 					if (b == null) {
-						rc = ctx.db.openFile(packDesc, PACK);
+						rc = ctx.db.openFile(desc, PACK);
 						int sz = ctx.getOptions().getStreamPackBufferSize();
 						if (sz > 0) {
 							rc.setReadAheadBytes(sz);
@@ -525,7 +478,7 @@
 
 	private long copyPackBypassCache(PackOutputStream out, DfsReader ctx)
 			throws IOException {
-		try (ReadableChannel rc = ctx.db.openFile(packDesc, PACK)) {
+		try (ReadableChannel rc = ctx.db.openFile(desc, PACK)) {
 			ByteBuffer buf = newCopyBuffer(out, rc);
 			if (ctx.getOptions().getStreamPackBufferSize() > 0)
 				rc.setReadAheadBytes(ctx.getOptions().getStreamPackBufferSize());
@@ -664,7 +617,7 @@
 					setCorrupt(src.offset);
 					throw new CorruptObjectException(MessageFormat.format(
 							JGitText.get().objectAtHasBadZlibStream,
-							Long.valueOf(src.offset), getPackName()));
+							Long.valueOf(src.offset), getFileName()));
 				}
 			} else if (validate) {
 				assert(crc1 != null);
@@ -706,7 +659,7 @@
 			CorruptObjectException corruptObject = new CorruptObjectException(
 					MessageFormat.format(
 							JGitText.get().objectAtHasBadZlibStream,
-							Long.valueOf(src.offset), getPackName()));
+							Long.valueOf(src.offset), getFileName()));
 			corruptObject.initCause(dataFormat);
 
 			StoredObjectRepresentationNotAvailableException gone;
@@ -768,24 +721,16 @@
 				if (crc2.getValue() != expectedCRC) {
 					throw new CorruptObjectException(MessageFormat.format(
 							JGitText.get().objectAtHasBadZlibStream,
-							Long.valueOf(src.offset), getPackName()));
+							Long.valueOf(src.offset), getFileName()));
 				}
 			}
 		}
 	}
 
-	boolean invalid() {
-		return invalid;
-	}
-
-	void setInvalid() {
-		invalid = true;
-	}
-
 	private IOException packfileIsTruncated() {
 		invalid = true;
 		return new IOException(MessageFormat.format(
-				JGitText.get().packfileIsTruncated, getPackName()));
+				JGitText.get().packfileIsTruncated, getFileName()));
 	}
 
 	private void readFully(long position, byte[] dstbuf, int dstoff, int cnt,
@@ -794,107 +739,10 @@
 			throw new EOFException();
 	}
 
-	long alignToBlock(long pos) {
-		int size = blockSize;
-		if (size == 0)
-			size = cache.getBlockSize();
-		return (pos / size) * size;
-	}
-
 	DfsBlock getOrLoadBlock(long pos, DfsReader ctx) throws IOException {
 		return cache.getOrLoad(this, pos, ctx, null);
 	}
 
-	DfsBlock readOneBlock(long pos, DfsReader ctx,
-			@Nullable ReadableChannel packChannel) throws IOException {
-		if (invalid)
-			throw new PackInvalidException(getPackName());
-
-		ctx.stats.readBlock++;
-		long start = System.nanoTime();
-		ReadableChannel rc = packChannel != null
-				? packChannel
-				: ctx.db.openFile(packDesc, PACK);
-		try {
-			int size = blockSize(rc);
-			pos = (pos / size) * size;
-
-			// If the size of the file is not yet known, try to discover it.
-			// Channels may choose to return -1 to indicate they don't
-			// know the length yet, in this case read up to the size unit
-			// given by the caller, then recheck the length.
-			long len = length;
-			if (len < 0) {
-				len = rc.size();
-				if (0 <= len)
-					length = len;
-			}
-
-			if (0 <= len && len < pos + size)
-				size = (int) (len - pos);
-			if (size <= 0)
-				throw new EOFException(MessageFormat.format(
-						DfsText.get().shortReadOfBlock, Long.valueOf(pos),
-						getPackName(), Long.valueOf(0), Long.valueOf(0)));
-
-			byte[] buf = new byte[size];
-			rc.position(pos);
-			int cnt = read(rc, ByteBuffer.wrap(buf, 0, size));
-			ctx.stats.readBlockBytes += cnt;
-			if (cnt != size) {
-				if (0 <= len) {
-					throw new EOFException(MessageFormat.format(
-						    DfsText.get().shortReadOfBlock,
-						    Long.valueOf(pos),
-						    getPackName(),
-						    Integer.valueOf(size),
-						    Integer.valueOf(cnt)));
-				}
-
-				// Assume the entire thing was read in a single shot, compact
-				// the buffer to only the space required.
-				byte[] n = new byte[cnt];
-				System.arraycopy(buf, 0, n, 0, n.length);
-				buf = n;
-			} else if (len < 0) {
-				// With no length at the start of the read, the channel should
-				// have the length available at the end.
-				length = len = rc.size();
-			}
-
-			return new DfsBlock(key, pos, buf);
-		} finally {
-			if (rc != packChannel) {
-				rc.close();
-			}
-			ctx.stats.readBlockMicros += elapsedMicros(start);
-		}
-	}
-
-	private int blockSize(ReadableChannel rc) {
-		// If the block alignment is not yet known, discover it. Prefer the
-		// larger size from either the cache or the file itself.
-		int size = blockSize;
-		if (size == 0) {
-			size = rc.blockSize();
-			if (size <= 0)
-				size = cache.getBlockSize();
-			else if (size < cache.getBlockSize())
-				size = (cache.getBlockSize() / size) * size;
-			blockSize = size;
-		}
-		return size;
-	}
-
-	private static int read(ReadableChannel rc, ByteBuffer buf)
-			throws IOException {
-		int n;
-		do {
-			n = rc.read(buf);
-		} while (0 < n && buf.hasRemaining());
-		return buf.position();
-	}
-
 	ObjectLoader load(DfsReader ctx, long pos)
 			throws IOException {
 		try {
@@ -1031,7 +879,7 @@
 			CorruptObjectException coe = new CorruptObjectException(
 					MessageFormat.format(
 							JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
-							getPackName()));
+							getFileName()));
 			coe.initCause(dfe);
 			throw coe;
 		}
@@ -1179,7 +1027,7 @@
 			CorruptObjectException coe = new CorruptObjectException(
 					MessageFormat.format(
 							JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
-							getPackName()));
+							getFileName()));
 			coe.initCause(dfe);
 			throw coe;
 		}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackKey.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackKey.java
deleted file mode 100644
index 98a2a94..0000000
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackKey.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2011, Google Inc.
- * and other copyright owners as documented in the project's IP log.
- *
- * This program and the accompanying materials are made available
- * under the terms of the Eclipse Distribution License v1.0 which
- * accompanies this distribution, is reproduced below, and is
- * available at http://www.eclipse.org/org/documents/edl-v10.php
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- *   copyright notice, this list of conditions and the following
- *   disclaimer in the documentation and/or other materials provided
- *   with the distribution.
- *
- * - Neither the name of the Eclipse Foundation, Inc. nor the
- *   names of its contributors may be used to endorse or promote
- *   products derived from this software without specific prior
- *   written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
- * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.eclipse.jgit.internal.storage.dfs;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-final class DfsPackKey {
-	final int hash;
-
-	final AtomicLong cachedSize;
-
-	DfsPackKey() {
-		// Multiply by 31 here so we can more directly combine with another
-		// value without doing the multiply there.
-		//
-		hash = System.identityHashCode(this) * 31;
-		cachedSize = new AtomicLong();
-	}
-}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackParser.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackParser.java
index 6430ea9..fd99db1 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackParser.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackParser.java
@@ -94,7 +94,7 @@
 	private DfsPackDescription packDsc;
 
 	/** Key used during delta resolution reading delta chains. */
-	private DfsPackKey packKey;
+	private DfsStreamKey packKey;
 
 	/** If the index was small enough, the entire index after writing. */
 	private PackIndex packIndex;
@@ -150,12 +150,13 @@
 			readBlock = null;
 			packDsc.addFileExt(PACK);
 			packDsc.setFileSize(PACK, packEnd);
+			packDsc.setBlockSize(PACK, blockSize);
 
 			writePackIndex();
 			objdb.commitPack(Collections.singletonList(packDsc), null);
 			rollback = false;
 
-			DfsPackFile p = blockCache.getOrCreate(packDsc, packKey);
+			DfsPackFile p = new DfsPackFile(blockCache, packDsc);
 			p.setBlockSize(blockSize);
 			if (packIndex != null)
 				p.setPackIndex(packIndex);
@@ -206,9 +207,9 @@
 		}
 
 		packDsc = objdb.newPack(DfsObjDatabase.PackSource.RECEIVE);
-		packKey = new DfsPackKey();
-
 		out = objdb.writeFile(packDsc, PACK);
+		packKey = packDsc.getStreamKey(PACK);
+
 		int size = out.blockSize();
 		if (size <= 0)
 			size = blockCache.getBlockSize();
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsReader.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsReader.java
index d611469..4b0d583 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsReader.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsReader.java
@@ -757,8 +757,7 @@
 	}
 
 	void pin(DfsPackFile pack, long position) throws IOException {
-		DfsBlock b = block;
-		if (b == null || !b.contains(pack.key, position)) {
+		if (block == null || !block.contains(pack.key, position)) {
 			// If memory is low, we may need what is in our window field to
 			// be cleaned up by the GC during the get for the next window.
 			// So we always clear it, even though we are just going to set
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsStreamKey.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsStreamKey.java
new file mode 100644
index 0000000..54a7489
--- /dev/null
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsStreamKey.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ *   names of its contributors may be used to endorse or promote
+ *   products derived from this software without specific prior
+ *   written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.internal.storage.dfs;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Arrays;
+
+/** Key used by {@link DfsBlockCache} to disambiguate streams. */
+public abstract class DfsStreamKey {
+	/**
+	 * @param repo
+	 *            description of the containing repository.
+	 * @param name
+	 *            compute the key from a string name.
+	 * @return key for {@code name}
+	 */
+	public static DfsStreamKey of(DfsRepositoryDescription repo, String name) {
+		return new ByteArrayDfsStreamKey(repo, name.getBytes(UTF_8));
+	}
+
+	final int hash;
+
+	/**
+	 * @param hash
+	 *            hash of the other identifying components of the key.
+	 */
+	protected DfsStreamKey(int hash) {
+		// Multiply by 31 here so we can more directly combine with another
+		// value without doing the multiply there.
+		this.hash = hash * 31;
+	}
+
+	@Override
+	public int hashCode() {
+		return hash;
+	}
+
+	@Override
+	public abstract boolean equals(Object o);
+
+	@SuppressWarnings("boxing")
+	@Override
+	public String toString() {
+		return String.format("DfsStreamKey[hash=%08x]", hash); //$NON-NLS-1$
+	}
+
+	private static final class ByteArrayDfsStreamKey extends DfsStreamKey {
+		private final DfsRepositoryDescription repo;
+		private final byte[] name;
+
+		ByteArrayDfsStreamKey(DfsRepositoryDescription repo, byte[] name) {
+			super(repo.hashCode() * 31 + Arrays.hashCode(name));
+			this.repo = repo;
+			this.name = name;
+		}
+
+		@Override
+		public boolean equals(Object o) {
+			if (o instanceof ByteArrayDfsStreamKey) {
+				ByteArrayDfsStreamKey k = (ByteArrayDfsStreamKey) o;
+				return hash == k.hash
+						&& repo.equals(k.repo)
+						&& Arrays.equals(name, k.name);
+			}
+			return false;
+		}
+	}
+
+	static final class ForReverseIndex extends DfsStreamKey {
+		private final DfsStreamKey idxKey;
+
+		ForReverseIndex(DfsStreamKey idxKey) {
+			super(idxKey.hash + 1);
+			this.idxKey = idxKey;
+		}
+
+		@Override
+		public boolean equals(Object o) {
+			return o instanceof ForReverseIndex
+					&& idxKey.equals(((ForReverseIndex) o).idxKey);
+		}
+	}
+}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/InMemoryRepository.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/InMemoryRepository.java
index 527e46b..383ed3d 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/InMemoryRepository.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/InMemoryRepository.java
@@ -53,7 +53,7 @@
 
 	static final AtomicInteger packId = new AtomicInteger();
 
-	private final DfsObjDatabase objdb;
+	private final MemObjDatabase objdb;
 	private final RefDatabase refdb;
 	private String gitwebDescription;
 	private boolean performsAtomicTransactions = true;
@@ -75,7 +75,7 @@
 	}
 
 	@Override
-	public DfsObjDatabase getObjectDatabase() {
+	public MemObjDatabase getObjectDatabase() {
 		return objdb;
 	}
 
@@ -106,13 +106,23 @@
 		gitwebDescription = d;
 	}
 
-	private class MemObjDatabase extends DfsObjDatabase {
+	/** DfsObjDatabase used by InMemoryRepository. */
+	public class MemObjDatabase extends DfsObjDatabase {
 		private List<DfsPackDescription> packs = new ArrayList<>();
+		private int blockSize;
 
 		MemObjDatabase(DfsRepository repo) {
 			super(repo, new DfsReaderOptions());
 		}
 
+		/**
+		 * @param blockSize
+		 *            force a different block size for testing.
+		 */
+		public void setReadableChannelBlockSizeForTest(int blockSize) {
+			this.blockSize = blockSize;
+		}
+
 		@Override
 		protected synchronized List<DfsPackDescription> listPacks() {
 			return packs;
@@ -152,7 +162,7 @@
 			byte[] file = memPack.fileMap.get(ext);
 			if (file == null)
 				throw new FileNotFoundException(desc.getFileName(ext));
-			return new ByteArrayReadableChannel(file);
+			return new ByteArrayReadableChannel(file, blockSize);
 		}
 
 		@Override
@@ -216,13 +226,13 @@
 
 	private static class ByteArrayReadableChannel implements ReadableChannel {
 		private final byte[] data;
-
+		private final int blockSize;
 		private int position;
-
 		private boolean open = true;
 
-		ByteArrayReadableChannel(byte[] buf) {
+		ByteArrayReadableChannel(byte[] buf, int blockSize) {
 			data = buf;
+			this.blockSize = blockSize;
 		}
 
 		@Override
@@ -262,7 +272,7 @@
 
 		@Override
 		public int blockSize() {
-			return 0;
+			return blockSize;
 		}
 
 		@Override
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/lib/ConfigConstants.java b/org.eclipse.jgit/src/org/eclipse/jgit/lib/ConfigConstants.java
index 20744b6..ad5b106 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/lib/ConfigConstants.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/lib/ConfigConstants.java
@@ -371,6 +371,13 @@
 	public static final String CONFIG_KEY_RENAMES = "renames";
 
 	/**
+	 * The "inCoreLimit" key in the "merge section". It's a size limit (bytes) used to
+	 * control a file to be stored in {@code Heap} or {@code LocalFile} during the merge.
+	 * @since 4.9
+	 */
+	public static final String CONFIG_KEY_IN_CORE_LIMIT = "inCoreLimit";
+
+	/**
 	 * The "prune" key
 	 * @since 3.3
 	 */
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/merge/ResolveMerger.java b/org.eclipse.jgit/src/org/eclipse/jgit/merge/ResolveMerger.java
index e77ad95..92c2bb3 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/merge/ResolveMerger.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/merge/ResolveMerger.java
@@ -85,6 +85,7 @@
 import org.eclipse.jgit.errors.MissingObjectException;
 import org.eclipse.jgit.errors.NoWorkTreeException;
 import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.lib.ConfigConstants;
 import org.eclipse.jgit.lib.FileMode;
 import org.eclipse.jgit.lib.ObjectId;
 import org.eclipse.jgit.lib.ObjectInserter;
@@ -272,6 +273,12 @@
 	 */
 	protected MergeAlgorithm mergeAlgorithm;
 
+	/**
+	 * The size limit (bytes) which controls a file to be stored in {@code Heap} or
+	 * {@code LocalFile} during the merge.
+	 */
+	private int inCoreLimit;
+
 	private static MergeAlgorithm getMergeAlgorithm(Config config) {
 		SupportedAlgorithm diffAlg = config.getEnum(
 				CONFIG_DIFF_SECTION, null, CONFIG_KEY_ALGORITHM,
@@ -279,6 +286,11 @@
 		return new MergeAlgorithm(DiffAlgorithm.getAlgorithm(diffAlg));
 	}
 
+	private static int getInCoreLimit(Config config) {
+		return config.getInt(
+				ConfigConstants.CONFIG_MERGE_SECTION, ConfigConstants.CONFIG_KEY_IN_CORE_LIMIT, 10 << 20);
+	}
+
 	private static String[] defaultCommitNames() {
 		return new String[] { "BASE", "OURS", "THEIRS" }; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
 	}
@@ -289,7 +301,9 @@
 	 */
 	protected ResolveMerger(Repository local, boolean inCore) {
 		super(local);
-		mergeAlgorithm = getMergeAlgorithm(local.getConfig());
+		Config config = local.getConfig();
+		mergeAlgorithm = getMergeAlgorithm(config);
+		inCoreLimit = getInCoreLimit(config);
 		commitNames = defaultCommitNames();
 		this.inCore = inCore;
 
@@ -835,7 +849,7 @@
 	private ObjectId insertMergeResult(MergeResult<RawText> result)
 			throws IOException {
 		TemporaryBuffer.LocalFile buf = new TemporaryBuffer.LocalFile(
-				db != null ? nonNullRepo().getDirectory() : null, 10 << 20);
+				db != null ? nonNullRepo().getDirectory() : null, inCoreLimit);
 		try {
 			new MergeFormatter().formatMerge(buf, result,
 					Arrays.asList(commitNames), CHARACTER_ENCODING);