提交 27be225f authored 作者: Thomas Mueller's avatar Thomas Mueller

A persistent multi-version map (work in progress) - allow reading old versions…

A persistent multi-version map (work in progress) - allow reading old versions after storing; in-memory mode
上级 7b72a44d
......@@ -104,7 +104,7 @@ public class RtreeMap<K, V> extends BtreeMap<K, V> {
for (int i = 0; i < p.getKeyCount(); i++) {
if (contains(p, i, key)) {
Page c = p.getChildPage(i);
long oldSize = c.getTotalSize();
long oldSize = c.getTotalCount();
Page c2 = remove(c, writeVersion, key);
if (c2 == null) {
// this child was deleted
......@@ -114,7 +114,7 @@ public class RtreeMap<K, V> extends BtreeMap<K, V> {
}
p = p.copyOnWrite(writeVersion);
p.remove(i);
} else if (oldSize != c2.getTotalSize()) {
} else if (oldSize != c2.getTotalCount()) {
p = p.copyOnWrite(writeVersion);
Object oldBounds = p.getKey(i);
if (!keyType.isInside(key, oldBounds)) {
......@@ -197,21 +197,22 @@ public class RtreeMap<K, V> extends BtreeMap<K, V> {
Object[] keys = { key };
Object[] values = { value };
p = Page.create(this, writeVersion, 1,
keys, values, null, null, 1, 0);
keys, values, null, null, null, 1, 0);
return p;
}
if (p.getKeyCount() > maxPageSize) {
// only possible if this is the root, else we would have split earlier
// (this requires maxPageSize is fixed)
p = p.copyOnWrite(writeVersion);
long totalSize = p.getTotalSize();
long totalCount = p.getTotalCount();
Page split = split(p, writeVersion);
Object[] keys = { getBounds(p), getBounds(split) };
long[] children = { p.getPos(), split.getPos(), 0 };
long[] childrenSize = { p.getTotalSize(), split.getTotalSize(), 0 };
Page[] childrenPages = { p, split, null };
long[] counts = { p.getTotalCount(), split.getTotalCount(), 0 };
p = Page.create(this, writeVersion, 2,
keys, null, children, childrenSize,
totalSize, 0);
keys, null, children, childrenPages, counts,
totalCount, 0);
// now p is a node; continues
} else if (p.isLeaf()) {
p = p.copyOnWrite(writeVersion);
......@@ -246,7 +247,7 @@ public class RtreeMap<K, V> extends BtreeMap<K, V> {
p = p.copyOnWrite(writeVersion);
p.setKey(index, getBounds(c));
p.setChild(index, c);
p.insertNode(index, getBounds(split), split.getPos(), split.getTotalSize());
p.insertNode(index, getBounds(split), split);
// now we are not sure where to add
return add(p, writeVersion, key, value, maxPageSize);
}
......@@ -360,8 +361,9 @@ public class RtreeMap<K, V> extends BtreeMap<K, V> {
private Page newPage(boolean leaf, long writeVersion) {
Object[] values = leaf ? new Object[4] : null;
long[] c = leaf ? null : new long[1];
Page[] cp = leaf ? null : new Page[1];
return Page.create(this, writeVersion, 0,
new Object[4], values, c, c, 0, 0);
new Object[4], values, c, cp, c, 0, 0);
}
private static void move(Page source, Page target, int sourceIndex) {
......@@ -370,9 +372,8 @@ public class RtreeMap<K, V> extends BtreeMap<K, V> {
Object v = source.getValue(sourceIndex);
target.insertLeaf(0, k, v);
} else {
long c = source.getChildPage(sourceIndex).getPos();
long count = source.getCounts(sourceIndex);
target.insertNode(0, k, c, count);
Page c = source.getChildPage(sourceIndex);
target.insertNode(0, k, c);
}
source.remove(sourceIndex);
}
......
......@@ -53,6 +53,7 @@ public class TestBtreeMapStore extends TestBase {
testRollbackInMemory();
testRollbackStored();
testMeta();
testInMemory();
testLargeImport();
testBtreeStore();
testDefragment();
......@@ -95,9 +96,9 @@ public class TestBtreeMapStore extends TestBase {
s.setRetainChunk(0);
long old2 = s.store();
// TODO keep old version after storing
// assertEquals("Hello", mOld.get("1"));
// assertEquals("World", mOld.get("2"));
// the old version is still available
assertEquals("Hello", mOld.get("1"));
assertEquals("World", mOld.get("2"));
m.put("1", "Hi");
m.remove("2");
......@@ -528,6 +529,36 @@ public class TestBtreeMapStore extends TestBase {
s.close();
}
private void testInMemory() {
for (int j = 0; j < 1; j++) {
BtreeMapStore s = openStore(null);
// s.setMaxPageSize(10);
// long t;
int len = 100;
// TreeMap<Integer, String> m = new TreeMap<Integer, String>();
// HashMap<Integer, String> m = New.hashMap();
BtreeMap<Integer, String> m = s.openMap("data", Integer.class, String.class);
// t = System.currentTimeMillis();
for (int i = 0; i < len; i++) {
m.put(i, "Hello World");
}
// System.out.println("put: " + (System.currentTimeMillis() - t));
// t = System.currentTimeMillis();
for (int i = 0; i < len; i++) {
assertEquals("Hello World", m.get(i));
}
// System.out.println("get: " + (System.currentTimeMillis() - t));
// t = System.currentTimeMillis();
for (int i = 0; i < len; i++) {
m.remove(i);
}
// System.out.println("remove: " + (System.currentTimeMillis() - t));
// System.out.println();
assertEquals(0, m.size());
s.close();
}
}
private void testLargeImport() {
String fileName = getBaseDir() + "/testImport.h3";
int len = 1000;
......
......@@ -75,7 +75,7 @@ public class BtreeMap<K, V> {
Object[] keys = { key };
Object[] values = { value };
p = Page.create(this, writeVersion, 1,
keys, values, null, null, 1, 0);
keys, values, null, null, null, 1, 0);
return p;
}
if (p.getKeyCount() > maxPageSize) {
......@@ -83,14 +83,15 @@ public class BtreeMap<K, V> {
// (this requires maxPageSize is fixed)
p = p.copyOnWrite(writeVersion);
int at = p.getKeyCount() / 2;
long totalSize = p.getTotalSize();
long totalCount = p.getTotalCount();
Object k = p.getKey(at);
Page split = p.split(at);
Object[] keys = { k };
long[] children = { p.getPos(), split.getPos() };
long[] childrenSize = { p.getTotalSize(), split.getTotalSize() };
Page[] childrenPages = { p, split };
long[] counts = { p.getTotalCount(), split.getTotalCount() };
p = Page.create(this, writeVersion, 1,
keys, null, children, childrenSize, totalSize, 0);
keys, null, children, childrenPages, counts, totalCount, 0);
// now p is a node; insert continues
} else if (p.isLeaf()) {
int index = p.binarySearch(key);
......@@ -119,13 +120,13 @@ public class BtreeMap<K, V> {
Page split = c.split(at);
p = p.copyOnWrite(writeVersion);
p.setChild(index, split);
p.insertNode(index, k, c.getPos(), c.getTotalSize());
p.insertNode(index, k, c);
// now we are not sure where to add
return put(p, writeVersion, key, value, maxPageSize);
}
long oldSize = c.getTotalSize();
long oldSize = c.getTotalCount();
Page c2 = put(c, writeVersion, key, value, maxPageSize);
if (c != c2 || oldSize != c2.getTotalSize()) {
if (c != c2 || oldSize != c2.getTotalCount()) {
p = p.copyOnWrite(writeVersion);
p.setChild(index, c2);
}
......@@ -354,7 +355,7 @@ public class BtreeMap<K, V> {
index++;
}
Page c = p.getChildPage(index);
long oldSize = c.getTotalSize();
long oldCount = c.getTotalCount();
Page c2 = remove(c, writeVersion, key);
if (c2 == null) {
// this child was deleted
......@@ -364,7 +365,7 @@ public class BtreeMap<K, V> {
removePage(p);
p = p.getChildPage(0);
}
} else if (oldSize != c2.getTotalSize()) {
} else if (oldCount != c2.getTotalCount()) {
p = p.copyOnWrite(writeVersion);
p.setChild(index, c2);
}
......@@ -562,7 +563,7 @@ public class BtreeMap<K, V> {
}
public long getSize() {
return root == null ? 0 : root.getTotalSize();
return root == null ? 0 : root.getTotalCount();
}
public boolean equals(Object o) {
......
......@@ -11,7 +11,6 @@ import java.io.StringReader;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.Collections;
import java.util.Comparator;
......@@ -53,7 +52,6 @@ TODO:
- support large binaries
- support stores that span multiple files (chunks stored in other files)
- triggers
- compare with newest version of IndexedDb
- support database version / schema version
- implement more counted b-tree (skip, get positions)
- merge pages if small
......@@ -83,10 +81,7 @@ public class BtreeMapStore {
private int blockSize = 4 * 1024;
private long rootChunkStart;
private int tempPageId;
private Map<Long, Page> cache = CacheLIRS.newInstance(readCacheSize, 2048);
private HashMap<Long, Page> temp = New.hashMap();
private Page[] tempCache = new Page[64];
private int lastChunkId;
private HashMap<Integer, Chunk> chunks = New.hashMap();
......@@ -134,7 +129,7 @@ public class BtreeMapStore {
/**
* Open a tree store.
*
* @param fileName the file name
* @param fileName the file name (null for in-memory)
* @param mapFactory the map factory
* @return the store
*/
......@@ -306,6 +301,9 @@ public class BtreeMapStore {
private void open() {
meta = new BtreeMap<String, String>(this, 0, "meta", STRING_TYPE, STRING_TYPE, 0);
if (fileName == null) {
return;
}
FileUtils.createDirectories(FileUtils.getParent(fileName));
try {
log("file open");
......@@ -397,8 +395,6 @@ public class BtreeMapStore {
for (BtreeMap<?, ?> m : New.arrayList(maps.values())) {
m.close();
}
temp.clear();
Arrays.fill(tempCache, null);
meta = null;
compressor = null;
chunks.clear();
......@@ -623,21 +619,6 @@ public class BtreeMapStore {
return set.size() * blockSize;
}
/**
* Register a page and get the next temporary page id.
*
* @param p the new page
* @return the page id
*/
long registerTempPage(Page p) {
long pos = --tempPageId;
// use -pos so the Long cache can be used
temp.put(-pos, p);
int index = (int) pos & (tempCache.length - 1);
tempCache[index] = p;
return pos;
}
/**
* Check whether there are any unsaved changes.
*
......@@ -833,15 +814,6 @@ public class BtreeMapStore {
* @return the page
*/
Page readPage(BtreeMap<?, ?> map, long pos) {
if (pos < 0) {
int index = (int) pos & (tempCache.length - 1);
Page p = tempCache[index];
if (p == null || p.getPos() != pos) {
p = temp.get(-pos);
tempCache[index] = p;
}
return p;
}
Page p = cache.get(pos);
if (p == null) {
long filePos = getFilePosition(pos);
......@@ -1033,9 +1005,6 @@ public class BtreeMapStore {
m.revertTemp();
}
mapsChanged.clear();
temp.clear();
Arrays.fill(tempCache, null);
tempPageId = 0;
}
/**
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论