Bug 512604 - Reduce the time spent in Database.flush() 

Change-Id: I6751a3d3fe9da4aa5b8831ef2602a2c25141c5f3
diff --git a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/Nd.java b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/Nd.java
index d021839..e4f477b 100644
--- a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/Nd.java
+++ b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/Nd.java
@@ -31,6 +31,14 @@
 	private static final int BLOCKED_WRITE_LOCK_OUTPUT_INTERVAL = 30000;
 	private static final int LONG_WRITE_LOCK_REPORT_THRESHOLD = 1000;
 	private static final int LONG_READ_LOCK_WAIT_REPORT_THRESHOLD = 1000;
+
+	/**
+	 * Controls the number of pages that are allowed to be dirty before a
+	 * flush will occur. Specified as a ratio of the total cache size. For
+	 * example, a ration of 0.5 would mean that a flush is forced if half
+	 * of the cache is dirty.
+	 */
+	private static final double MAX_DIRTY_CACHE_RATIO = 0.25;
 	public static boolean sDEBUG_LOCKS= false;
 	public static boolean DEBUG_DUPLICATE_DELETIONS = false;
 
@@ -348,7 +356,7 @@
 	}
 
 	public final void releaseWriteLock() {
-		releaseWriteLock(0, true);
+		releaseWriteLock(0, false);
 	}
 
 	@SuppressWarnings("nls")
@@ -392,6 +400,14 @@
 	}
 
 	private void releaseWriteLockAndFlush(int establishReadLocks, boolean flush) throws AssertionError {
+		int dirtyPages = this.getDB().getDirtyChunkCount();
+
+		// If there are too many dirty pages, force a flush now.
+		int totalCacheSize = (int) (this.db.getCache().getMaxSize() / Database.CHUNK_SIZE);
+		if (dirtyPages > totalCacheSize * MAX_DIRTY_CACHE_RATIO) {
+			flush = true;
+		}
+
 		int initialReadLocks = flush ? establishReadLocks + 1 : establishReadLocks;
 		// Convert this write lock to a read lock while we flush the page cache to disk. That will prevent
 		// other writers from dirtying more pages during the flush but will allow reads to proceed.
diff --git a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Chunk.java b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Chunk.java
index e438352..92fcd7a 100644
--- a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Chunk.java
+++ b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Chunk.java
@@ -68,6 +68,7 @@
 						+ " dirtied out of order: Only the most-recently-fetched chunk is allowed to be dirtied"); //$NON-NLS-1$
 			}
 			this.fDirty = true;
+			this.fDatabase.chunkDirtied(this);
 		}
 	}
 
@@ -96,7 +97,8 @@
 		} catch (IOException e) {
 			throw new IndexException(new DBStatus(e));
 		}
-		this.fDirty= false;
+		this.fDirty = false;
+		this.fDatabase.chunkCleaned(this);
 		return wasCanceled;
 	}
 
diff --git a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/ChunkCache.java b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/ChunkCache.java
index 00a9332..67ec4c2 100644
--- a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/ChunkCache.java
+++ b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/ChunkCache.java
@@ -102,7 +102,7 @@
 				chunk.fCacheHitFlag= false;
 				this.fPointer= (this.fPointer + 1) % this.fPageTable.length;
 			} else {
-				chunk.fDatabase.releaseChunk(chunk);
+				chunk.fDatabase.checkIfChunkReleased(chunk);
 				chunk.fCacheIndex= -1;
 				this.fPageTable[this.fPointer] = null;
 				return;
@@ -151,7 +151,7 @@
 		} else {
 			for (int i= newLength; i < oldLength; i++) {
 				final Chunk chunk= this.fPageTable[i];
-				chunk.fDatabase.releaseChunk(chunk);
+				chunk.fDatabase.checkIfChunkReleased(chunk);
 				chunk.fCacheIndex= -1;
 			}
 			Chunk[] newTable= new Chunk[newLength];
diff --git a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Database.java b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Database.java
index 35ac5fd..f9c7680 100644
--- a/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Database.java
+++ b/org.eclipse.jdt.core/search/org/eclipse/jdt/internal/core/nd/db/Database.java
@@ -163,6 +163,12 @@
 
 	private MemoryStats memoryUsage;
 	public Chunk fMostRecentlyFetchedChunk;
+	/**
+	 * Contains the set of Chunks in this Database for which the Chunk.dirty flag is set to true.
+	 * Protected by the database write lock. This set does not contain the header chunk, which is
+	 * always handled as a special case by the code that flushes chunks.
+	 */
+	private HashSet<Chunk> dirtyChunkSet = new HashSet<>();
 
 	/**
 	 * Construct a new Database object, creating a backing file if necessary.
@@ -317,6 +323,7 @@
 		this.fHeaderChunk.clear(0, CHUNK_SIZE);
 		// Chunks have been removed from the cache, so we may just reset the array of chunks.
 		this.fChunks = new Chunk[] {null};
+		this.dirtyChunkSet.clear();
 		this.fChunksUsed = this.fChunks.length;
 		try {
 			wasCanceled = this.fHeaderChunk.flush() || wasCanceled; // Zero out header chunk.
@@ -1281,6 +1288,7 @@
 		this.fHeaderChunk.clear(0, CHUNK_SIZE);
 		this.memoryUsage.refresh();
 		this.fHeaderChunk.fDirty= false;
+		this.dirtyChunkSet.clear();
 		this.fChunks= new Chunk[] { null };
 		this.fChunksUsed = this.fChunks.length;
 		try {
@@ -1300,8 +1308,8 @@
 	/**
 	 * Called from any thread via the cache, protected by {@link #fCache}.
 	 */
-	void releaseChunk(final Chunk chunk) {
-		if (!chunk.fDirty) {
+	void checkIfChunkReleased(final Chunk chunk) {
+		if (!chunk.fDirty && chunk.fCacheIndex < 0) {
 			if (DEBUG_PAGE_CACHE) {
 				System.out.println("CHUNK " + chunk.fSequenceNumber //$NON-NLS-1$
 						+ ": removing from vector in releaseChunk - instance " + System.identityHashCode(chunk)); //$NON-NLS-1$
@@ -1310,6 +1318,21 @@
 		}
 	}
 
+	void chunkDirtied(final Chunk chunk) {
+		if (chunk.fSequenceNumber < NUM_HEADER_CHUNKS) {
+			return;
+		}
+		this.dirtyChunkSet.add(chunk);
+	}
+
+	void chunkCleaned(final Chunk chunk) {
+		if (chunk.fSequenceNumber < NUM_HEADER_CHUNKS) {
+			return;
+		}
+		this.dirtyChunkSet.remove(chunk);
+		checkIfChunkReleased(chunk);
+	}
+
 	/**
 	 * Returns the cache used for this database.
 	 * @since 4.0
@@ -1339,36 +1362,21 @@
 		boolean wasInterrupted = false;
 		assert this.fLocked;
 		ArrayList<Chunk> dirtyChunks= new ArrayList<>();
-		int scanIndex = NUM_HEADER_CHUNKS;
-
-		while (scanIndex < this.fChunksUsed) {
-			synchronized (this.fCache) {
-				int countMax = Math.min(MAX_ITERATIONS_PER_LOCK, this.fChunksUsed - scanIndex);
-				for (int count = 0; count < countMax; count++) {
-					Chunk chunk = this.fChunks[scanIndex++];
-
-					if (chunk != null) {
-						if (chunk.fDirty) {
-							dirtyChunks.add(chunk); // Keep in fChunks until it is flushed.
-						} else if (chunk.fCacheIndex < 0) {
-							if (DEBUG_PAGE_CACHE) {
-								System.out.println(
-										"CHUNK " + chunk.fSequenceNumber + ": removing from vector in flush - instance " //$NON-NLS-1$//$NON-NLS-2$
-												+ System.identityHashCode(chunk));
-							}
-							// Non-dirty chunk that has been removed from cache.
-							this.fChunks[chunk.fSequenceNumber]= null;
-						}
-					}
-				}
-			}
+		synchronized (this.fCache) {
+			dirtyChunks.addAll(this.dirtyChunkSet);
 		}
+		sortBySequenceNumber(dirtyChunks);
+
 		// Also handles header chunk.
 		wasInterrupted = flushAndUnlockChunks(dirtyChunks, true) || wasInterrupted;
 
 		return wasInterrupted;
 	}
 
+	private void sortBySequenceNumber(ArrayList<Chunk> dirtyChunks) {
+		dirtyChunks.sort((a, b) -> {return a.fSequenceNumber - b.fSequenceNumber;});
+	}
+
 	/**
 	 * Interrupting the thread with {@link Thread#interrupt()} won't interrupt the write. Returns true iff an attempt
 	 * was made to interrupt the thread with {@link Thread#interrupt()}.
@@ -1395,6 +1403,7 @@
 						synchronized (this.fCache) {
 							buf = ByteBuffer.wrap(chunk.getBytes());
 							chunk.fDirty = false;
+							chunkCleaned(chunk);
 						}
 						wasCanceled = write(buf, (long) chunk.fSequenceNumber * Database.CHUNK_SIZE);
 					} catch (IOException e) {
@@ -1404,29 +1413,6 @@
 					wasInterrupted = wasCanceled || wasInterrupted;
 				}
 			}
-
-			// Only after the chunks are flushed we may unlock and release them.
-			int totalSize = dirtyChunks.size();
-			int scanIndex = 0;
-			while (scanIndex < totalSize) {
-				synchronized (this.fCache) {
-					int countMax = Math.min(MAX_ITERATIONS_PER_LOCK, totalSize - scanIndex);
-					for (int count = 0; count < countMax; count++) {
-						Chunk chunk = this.fChunks[scanIndex++];
-
-						if (chunk != null) {
-							if (chunk.fCacheIndex < 0) {
-								if (DEBUG_PAGE_CACHE) {
-									System.out.println("CHUNK " + chunk.fSequenceNumber //$NON-NLS-1$
-											+ ": removing from vector in flushAndUnlockChunks - instance " //$NON-NLS-1$
-											+ System.identityHashCode(chunk));
-								}
-								this.fChunks[chunk.fSequenceNumber]= null;
-							}
-						}
-					}
-				}
-			}
 		}
 
 		if (isComplete) {
@@ -1514,4 +1500,8 @@
 	public ChunkCache getCache() {
 		return this.fCache;
 	}
+
+	public int getDirtyChunkCount() {
+		return this.dirtyChunkSet.size();
+	}
 }