提交 df217319 authored 作者: andrei's avatar andrei

MVStore: make fields final, streamline constructor code

上级 cfbdf443
...@@ -26,6 +26,7 @@ import org.h2.mvstore.cache.CacheLongKeyLIRS; ...@@ -26,6 +26,7 @@ import org.h2.mvstore.cache.CacheLongKeyLIRS;
import org.h2.mvstore.type.StringDataType; import org.h2.mvstore.type.StringDataType;
import org.h2.util.MathUtils; import org.h2.util.MathUtils;
import org.h2.util.New; import org.h2.util.New;
import org.h2.util.Utils;
/* /*
...@@ -151,8 +152,8 @@ public final class MVStore { ...@@ -151,8 +152,8 @@ public final class MVStore {
private volatile boolean closed; private volatile boolean closed;
private FileStore fileStore; private final FileStore fileStore;
private boolean fileStoreIsProvided; private final boolean fileStoreIsProvided;
private final int pageSplitSize; private final int pageSplitSize;
...@@ -161,14 +162,14 @@ public final class MVStore { ...@@ -161,14 +162,14 @@ public final class MVStore {
* It is split in 16 segments. The stack move distance is 2% of the expected * It is split in 16 segments. The stack move distance is 2% of the expected
* number of entries. * number of entries.
*/ */
private CacheLongKeyLIRS<Page> cache; private final CacheLongKeyLIRS<Page> cache;
/** /**
* The page chunk references cache. The default size is 4 MB, and the * The page chunk references cache. The default size is 4 MB, and the
* average size is 2 KB. It is split in 16 segments. The stack move distance * average size is 2 KB. It is split in 16 segments. The stack move distance
* is 2% of the expected number of entries. * is 2% of the expected number of entries.
*/ */
private CacheLongKeyLIRS<PageChildren> cacheChunkRef; private final CacheLongKeyLIRS<PageChildren> cacheChunkRef;
/** /**
* The newest chunk. If nothing was stored yet, this field is not set. * The newest chunk. If nothing was stored yet, this field is not set.
...@@ -198,12 +199,12 @@ public final class MVStore { ...@@ -198,12 +199,12 @@ public final class MVStore {
* The metadata map. Write access to this map needs to be synchronized on * The metadata map. Write access to this map needs to be synchronized on
* the store. * the store.
*/ */
private MVMap<String, String> meta; private final MVMap<String, String> meta;
private final ConcurrentHashMap<Integer, MVMap<?, ?>> maps = private final ConcurrentHashMap<Integer, MVMap<?, ?>> maps =
new ConcurrentHashMap<>(); new ConcurrentHashMap<>();
private HashMap<String, Object> storeHeader = New.hashMap(); private final HashMap<String, Object> storeHeader = New.hashMap();
private WriteBuffer writeBuffer; private WriteBuffer writeBuffer;
...@@ -221,7 +222,7 @@ public final class MVStore { ...@@ -221,7 +222,7 @@ public final class MVStore {
private Compressor compressorHigh; private Compressor compressorHigh;
private final UncaughtExceptionHandler backgroundExceptionHandler; public final UncaughtExceptionHandler backgroundExceptionHandler;
private volatile long currentVersion; private volatile long currentVersion;
...@@ -292,67 +293,64 @@ public final class MVStore { ...@@ -292,67 +293,64 @@ public final class MVStore {
* occurred while opening * occurred while opening
* @throws IllegalArgumentException if the directory does not exist * @throws IllegalArgumentException if the directory does not exist
*/ */
MVStore(HashMap<String, Object> config) { MVStore(Map<String, Object> config) {
Object o = config.get("compress"); this.compressionLevel = Utils.getConfigParam(config, "compress", 0);
this.compressionLevel = o == null ? 0 : (Integer) o;
String fileName = (String) config.get("fileName"); String fileName = (String) config.get("fileName");
fileStore = (FileStore) config.get("fileStore"); FileStore fileStore = (FileStore) config.get("fileStore");
fileStoreIsProvided = fileStore != null; fileStoreIsProvided = fileStore != null;
if(fileStore == null && fileName != null) { if(fileStore == null && fileName != null) {
fileStore = new FileStore(); fileStore = new FileStore();
} }
o = config.get("pageSplitSize"); this.fileStore = fileStore;
int pgSplitSize;
CacheLongKeyLIRS.Config cc = null;
if (this.fileStore != null) {
int mb = Utils.getConfigParam(config, "cacheSize", 16);
if (mb > 0) {
cc = new CacheLongKeyLIRS.Config();
cc.maxMemory = mb * 1024L * 1024L;
Object o = config.get("cacheConcurrency");
if (o != null) { if (o != null) {
pgSplitSize = (Integer) o; cc.segmentCount = (Integer)o;
} else if(fileStore != null) { }
pgSplitSize = 16 * 1024; }
}
if (cc != null) {
cache = new CacheLongKeyLIRS<>(cc);
cc.maxMemory /= 4;
cacheChunkRef = new CacheLongKeyLIRS<>(cc);
} else { } else {
pgSplitSize = 48; // number of keys per page in that case cache = null;
cacheChunkRef = null;
}
int pgSplitSize = Utils.getConfigParam(config, "pageSplitSize", 16 * 1024);
// Make sure pages will fit into cache
if (cache != null && pgSplitSize > cache.getMaxItemSize()) {
pgSplitSize = (int)cache.getMaxItemSize();
} }
pageSplitSize = pgSplitSize; pageSplitSize = pgSplitSize;
o = config.get("backgroundExceptionHandler"); backgroundExceptionHandler =
this.backgroundExceptionHandler = (UncaughtExceptionHandler) o; (UncaughtExceptionHandler)config.get("backgroundExceptionHandler");
meta = new MVMap<>(StringDataType.INSTANCE, meta = new MVMap<>(StringDataType.INSTANCE,
StringDataType.INSTANCE); StringDataType.INSTANCE);
HashMap<String, Object> c = New.hashMap(); HashMap<String, Object> c = New.hashMap();
c.put("id", 0); c.put("id", 0);
c.put("createVersion", currentVersion); c.put("createVersion", currentVersion);
meta.init(this, c); meta.init(this, c);
if (fileStore == null) { if (this.fileStore != null) {
cache = null; retentionTime = this.fileStore.getDefaultRetentionTime();
cacheChunkRef = null; int kb = Utils.getConfigParam(config, "autoCommitBufferSize", 1024);
return;
}
retentionTime = fileStore.getDefaultRetentionTime();
boolean readOnly = config.containsKey("readOnly");
o = config.get("cacheSize");
int mb = o == null ? 16 : (Integer) o;
if (mb > 0) {
CacheLongKeyLIRS.Config cc = new CacheLongKeyLIRS.Config();
cc.maxMemory = mb * 1024L * 1024L;
o = config.get("cacheConcurrency");
if (o != null) {
cc.segmentCount = (Integer) o;
}
cache = new CacheLongKeyLIRS<>(cc);
cc.maxMemory /= 4;
cacheChunkRef = new CacheLongKeyLIRS<>(cc);
}
o = config.get("autoCommitBufferSize");
int kb = o == null ? 1024 : (Integer) o;
// 19 KB memory is about 1 KB storage // 19 KB memory is about 1 KB storage
autoCommitMemory = kb * 1024 * 19; autoCommitMemory = kb * 1024 * 19;
autoCompactFillRate = Utils.getConfigParam(config, "autoCompactFillRate", 50);
o = config.get("autoCompactFillRate");
autoCompactFillRate = o == null ? 50 : (Integer) o;
char[] encryptionKey = (char[]) config.get("encryptionKey"); char[] encryptionKey = (char[]) config.get("encryptionKey");
try { try {
if (!fileStoreIsProvided) { if (!fileStoreIsProvided) {
fileStore.open(fileName, readOnly, encryptionKey); boolean readOnly = config.containsKey("readOnly");
this.fileStore.open(fileName, readOnly, encryptionKey);
} }
if (fileStore.size() == 0) { if (this.fileStore.size() == 0) {
creationTime = getTimeAbsolute(); creationTime = getTimeAbsolute();
lastCommitTime = creationTime; lastCommitTime = creationTime;
storeHeader.put("H", 2); storeHeader.put("H", 2);
...@@ -374,10 +372,10 @@ public final class MVStore { ...@@ -374,10 +372,10 @@ public final class MVStore {
// setAutoCommitDelay starts the thread, but only if // setAutoCommitDelay starts the thread, but only if
// the parameter is different from the old value // the parameter is different from the old value
o = config.get("autoCommitDelay"); int delay = Utils.getConfigParam(config, "autoCommitDelay", 1000);
int delay = o == null ? 1000 : (Integer) o;
setAutoCommitDelay(delay); setAutoCommitDelay(delay);
} }
}
private void panic(IllegalStateException e) { private void panic(IllegalStateException e) {
if (backgroundExceptionHandler != null) { if (backgroundExceptionHandler != null) {
...@@ -892,30 +890,27 @@ public final class MVStore { ...@@ -892,30 +890,27 @@ public final class MVStore {
// the thread also synchronized on this, which // the thread also synchronized on this, which
// could result in a deadlock // could result in a deadlock
stopBackgroundThread(); stopBackgroundThread();
closed = true;
synchronized (this) { synchronized (this) {
closed = true;
if (fileStore != null && shrinkIfPossible) { if (fileStore != null && shrinkIfPossible) {
shrinkFileIfPossible(0); shrinkFileIfPossible(0);
} }
// release memory early - this is important when called // release memory early - this is important when called
// because of out of memory // because of out of memory
cache = null; if (cache != null) {
cacheChunkRef = null; cache.clear();
}
if (cacheChunkRef != null) {
cacheChunkRef.clear();
}
for (MVMap<?, ?> m : New.arrayList(maps.values())) { for (MVMap<?, ?> m : New.arrayList(maps.values())) {
m.close(); m.close();
} }
meta = null;
chunks.clear(); chunks.clear();
maps.clear(); maps.clear();
if (fileStore != null) { if (fileStore != null && !fileStoreIsProvided) {
try {
if (!fileStoreIsProvided) {
fileStore.close(); fileStore.close();
} }
} finally {
fileStore = null;
}
}
} }
} }
......
...@@ -82,13 +82,21 @@ public class CacheLongKeyLIRS<V> { ...@@ -82,13 +82,21 @@ public class CacheLongKeyLIRS<V> {
* Remove all entries. * Remove all entries.
*/ */
public void clear() { public void clear() {
long max = Math.max(1, maxMemory / segmentCount); long max = getMaxItemSize();
for (int i = 0; i < segmentCount; i++) { for (int i = 0; i < segmentCount; i++) {
segments[i] = new Segment<>( segments[i] = new Segment<>(
max, stackMoveDistance, 8, nonResidentQueueSize); max, stackMoveDistance, 8, nonResidentQueueSize);
} }
} }
/**
* Determines max size of the data item size to fit into cache
* @return data items size limit
*/
public long getMaxItemSize() {
return Math.max(1, maxMemory / segmentCount);
}
private Entry<V> find(long key) { private Entry<V> find(long key) {
int hash = getHash(key); int hash = getHash(key);
return getSegment(hash).find(key, hash); return getSegment(hash).find(key, hash);
......
...@@ -17,6 +17,7 @@ import java.lang.reflect.Modifier; ...@@ -17,6 +17,7 @@ import java.lang.reflect.Modifier;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.zip.ZipEntry; import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream; import java.util.zip.ZipInputStream;
...@@ -75,7 +76,7 @@ public class Utils { ...@@ -75,7 +76,7 @@ public class Utils {
buff[pos++] = (byte) (x >> 24); buff[pos++] = (byte) (x >> 24);
buff[pos++] = (byte) (x >> 16); buff[pos++] = (byte) (x >> 16);
buff[pos++] = (byte) (x >> 8); buff[pos++] = (byte) (x >> 8);
buff[pos++] = (byte) x; buff[pos] = (byte) x;
} }
/** /**
...@@ -722,7 +723,7 @@ public class Utils { ...@@ -722,7 +723,7 @@ public class Utils {
String s = getProperty(key, null); String s = getProperty(key, null);
if (s != null) { if (s != null) {
try { try {
return Integer.decode(s).intValue(); return Integer.decode(s);
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
// ignore // ignore
} }
...@@ -750,6 +751,20 @@ public class Utils { ...@@ -750,6 +751,20 @@ public class Utils {
return defaultValue; return defaultValue;
} }
public static int getConfigParam(Map<String,?> config, String key, int defaultValue) {
Object o = config.get(key);
if (o instanceof Number) {
return ((Number) o).intValue();
} else if (o != null) {
try {
return Integer.decode(o.toString());
} catch (NumberFormatException e) {
// ignore
}
}
return defaultValue;
}
/** /**
* Scale the value with the available memory. If 1 GB of RAM is available, * Scale the value with the available memory. If 1 GB of RAM is available,
* the value is returned, if 2 GB are available, then twice the value, and * the value is returned, if 2 GB are available, then twice the value, and
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论