提交 e4bb3a38 authored 作者: thomasmueller's avatar thomasmueller

#933 MVStore background writer endless loop

上级 ecafdea5
......@@ -342,7 +342,7 @@ public final class MVStore {
int kb = DataUtils.getConfigParam(config, "autoCommitBufferSize", 1024);
// 19 KB memory is about 1 KB storage
autoCommitMemory = kb * 1024 * 19;
autoCompactFillRate = DataUtils.getConfigParam(config, "autoCompactFillRate", 50);
autoCompactFillRate = DataUtils.getConfigParam(config, "autoCompactFillRate", 40);
char[] encryptionKey = (char[]) config.get("encryptionKey");
try {
if (!fileStoreIsProvided) {
......@@ -1022,14 +1022,7 @@ public final class MVStore {
private long storeNowTry() {
long time = getTimeSinceCreation();
int freeDelay = retentionTime / 10;
if (time >= lastFreeUnusedChunks + freeDelay) {
// set early in case it fails (out of memory or so)
lastFreeUnusedChunks = time;
freeUnusedChunks();
// set it here as well, to avoid calling it often if it was slow
lastFreeUnusedChunks = getTimeSinceCreation();
}
freeUnusedIfNeeded(time);
int currentUnsavedPageCount = unsavedMemory;
long storeVersion = currentStoreVersion;
long version = ++currentVersion;
......@@ -1063,7 +1056,6 @@ public final class MVStore {
}
}
Chunk c = new Chunk(newChunkId);
c.pageCount = Integer.MAX_VALUE;
c.pageCountLive = Integer.MAX_VALUE;
c.maxLen = Long.MAX_VALUE;
......@@ -1234,13 +1226,27 @@ public final class MVStore {
return version;
}
/**
* Try to free unused chunks. This method doesn't directly write, but can
* change the metadata, and therefore cause a background write.
*/
private void freeUnusedIfNeeded(long time) {
int freeDelay = retentionTime / 5;
if (time >= lastFreeUnusedChunks + freeDelay) {
// set early in case it fails (out of memory or so)
lastFreeUnusedChunks = time;
freeUnusedChunks();
// set it here as well, to avoid calling it often if it was slow
lastFreeUnusedChunks = getTimeSinceCreation();
}
}
private synchronized void freeUnusedChunks() {
if (lastChunk == null || !reuseSpace) {
return;
}
Set<Integer> referenced = collectReferencedChunks();
long time = getTimeSinceCreation();
for (Iterator<Chunk> it = chunks.values().iterator(); it.hasNext(); ) {
Chunk c = it.next();
if (!referenced.contains(c.id)) {
......@@ -1510,11 +1516,11 @@ public final class MVStore {
*/
private long getFileLengthInUse() {
long result = fileStore.getFileLengthInUse();
assert result == _getFileLengthInUse() : result + " != " + _getFileLengthInUse();
assert result == measureFileLengthInUse() : result + " != " + measureFileLengthInUse();
return result;
}
private long _getFileLengthInUse() {
private long measureFileLengthInUse() {
long size = 2;
for (Chunk c : chunks.values()) {
if (c.len != Integer.MAX_VALUE) {
......@@ -1780,29 +1786,29 @@ public final class MVStore {
}
}
private ArrayList<Chunk> compactGetOldChunks(int targetFillRate, int write) {
if (lastChunk == null) {
// nothing to do
return null;
}
// calculate the fill rate
long maxLengthSum = 0;
long maxLengthLiveSum = 0;
/**
* Get the current fill rate (percentage of used space in the file). Unlike
* the fill rate of the store, here we only account for chunk data; the fill
* rate here is how much of the chunk data is live (still referenced). Young
* chunks are considered live.
*
* @return the fill rate, in percent (100 is completely full)
*/
public int getCurrentFillRate() {
long maxLengthSum = 1;
long maxLengthLiveSum = 1;
long time = getTimeSinceCreation();
for (Chunk c : chunks.values()) {
// ignore young chunks, because we don't optimize those
maxLengthSum += c.maxLen;
if (c.time + retentionTime > time) {
continue;
// young chunks (we don't optimize those):
// assume if they are fully live
// so that we don't try to optimize yet
// until they get old
maxLengthLiveSum += c.maxLen;
} else {
maxLengthLiveSum += c.maxLenLive;
}
maxLengthSum += c.maxLen;
maxLengthLiveSum += c.maxLenLive;
}
if (maxLengthLiveSum < 0) {
// no old data
return null;
}
// the fill rate of all chunks combined
if (maxLengthSum <= 0) {
......@@ -1810,6 +1816,16 @@ public final class MVStore {
maxLengthSum = 1;
}
int fillRate = (int) (100 * maxLengthLiveSum / maxLengthSum);
return fillRate;
}
private ArrayList<Chunk> compactGetOldChunks(int targetFillRate, int write) {
if (lastChunk == null) {
// nothing to do
return null;
}
long time = getTimeSinceCreation();
int fillRate = getCurrentFillRate();
if (fillRate >= targetFillRate) {
return null;
}
......@@ -2475,10 +2491,8 @@ public final class MVStore {
fileOps = false;
}
// use a lower fill rate if there were any file operations
int fillRate = fileOps ? autoCompactFillRate / 3 : autoCompactFillRate;
// TODO how to avoid endless compaction if there is a bug
// in the bookkeeping?
compact(fillRate, autoCommitMemory);
int targetFillRate = fileOps ? autoCompactFillRate / 3 : autoCompactFillRate;
compact(targetFillRate, autoCommitMemory);
autoCompactLastFileOpCount = fileStore.getWriteCount() + fileStore.getReadCount();
}
} catch (Throwable e) {
......@@ -2765,7 +2779,7 @@ public final class MVStore {
* this value, then chunks at the end of the file are moved. Compaction
* stops if the target fill rate is reached.
* <p>
* The default value is 50 (50%). The value 0 disables auto-compacting.
* The default value is 40 (40%). The value 0 disables auto-compacting.
* <p>
*
* @param percent the target fill rate
......
......@@ -142,6 +142,7 @@ import org.h2.test.store.TestKillProcessWhileWriting;
import org.h2.test.store.TestMVRTree;
import org.h2.test.store.TestMVStore;
import org.h2.test.store.TestMVStoreBenchmark;
import org.h2.test.store.TestMVStoreStopCompact;
import org.h2.test.store.TestMVStoreTool;
import org.h2.test.store.TestMVTableEngine;
import org.h2.test.store.TestObjectDataType;
......@@ -889,6 +890,7 @@ kill -9 `jps -l | grep "org.h2.test." | cut -d " " -f 1`
addTest(new TestMVRTree());
addTest(new TestMVStore());
addTest(new TestMVStoreBenchmark());
addTest(new TestMVStoreStopCompact());
addTest(new TestMVStoreTool());
addTest(new TestMVTableEngine());
addTest(new TestObjectDataType());
......
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.test.store;
import java.util.Random;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.MVStore;
import org.h2.store.fs.FileUtils;
import org.h2.test.TestBase;
/**
* Test that the MVStore eventually stops optimizing (does not excessively opti
*/
public class TestMVStoreStopCompact extends TestBase {
/**
* Run just this test.
*
* @param a ignored
*/
public static void main(String... a) throws Exception {
TestBase test = TestBase.createCaller().init();
test.config.big = true;
test.test();
}
@Override
public void test() throws Exception {
if (!config.big) {
return;
}
for(int retentionTime = 10; retentionTime < 1000; retentionTime *= 10) {
for(int timeout = 100; timeout <= 1000; timeout *= 10) {
testStopCompact(retentionTime, timeout);
}
}
}
private void testStopCompact(int retentionTime, int timeout) throws InterruptedException {
String fileName = getBaseDir() + "/testStopCompact.h3";
FileUtils.createDirectories(getBaseDir());
FileUtils.delete(fileName);
// store with a very small page size, to make sure
// there are many leaf pages
MVStore s = new MVStore.Builder().
fileName(fileName).open();
s.setRetentionTime(retentionTime);
MVMap<Integer, String> map = s.openMap("data");
long start = System.currentTimeMillis();
Random r = new Random(1);
for (int i = 0; i < 4000000; i++) {
long time = System.currentTimeMillis() - start;
if (time > timeout) {
break;
}
int x = r.nextInt(10000000);
map.put(x, "Hello World " + i * 10);
}
s.setAutoCommitDelay(100);
long oldWriteCount = s.getFileStore().getWriteCount();
// expect background write to stop after 5 seconds
Thread.sleep(5000);
long newWriteCount = s.getFileStore().getWriteCount();
// expect that compaction didn't cause many writes
assertTrue(newWriteCount - oldWriteCount < 30);
s.close();
}
}
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论