提交 1fdb8c57 authored 作者: Thomas Mueller's avatar Thomas Mueller

Memory usage calculation should also depend on the memory size of the value, not…

Memory usage calculation should also depend on the memory size of the value, not just on the number of pages.
上级 bcf000e4
......@@ -983,8 +983,8 @@ public class MVMap<K, V> extends AbstractMap<K, V>
*
* @param pos the position of the page to remove
*/
protected void removePage(long pos) {
store.removePage(this, pos);
protected void removePage(long pos, int memory) {
store.removePage(this, pos, memory);
}
/**
......
......@@ -36,11 +36,8 @@ Documentation
- better document that writes are in background thread
- better document how to do non-unique indexes
- document pluggable store and OffHeapStore
- document file format
- the chunk id is normally the version
MVTableEngine:
- verify that tests don't use the PageStore
- test and possibly allow MVCC & MULTI_THREADED
- maybe enable MVCC by default (but allow to disable it)
- config options for compression and page size (maybe combined)
......@@ -52,9 +49,12 @@ TransactionStore:
MVStore:
- console auto-complete: only tab to complete; remove newlines before autocomplete?
- maybe change the length code to have lower gaps
- test chunk id rollover
- document and review the file format
- improve memory calculation for transient and cache
specially for large pages (when using the StreamStore)
- automated 'kill process' and 'power failure' test
- update checkstyle
......@@ -105,15 +105,13 @@ MVStore:
configured write delay to store changes
- compact* should also store uncommitted changes (if there are any)
- write a LSM-tree (log structured merge tree) utility on top of the MVStore
- improve memory calculation for transient and cache
specially for large pages (when using the StreamStore)
- StreamStore: split blocks similar to rsync crypto, where the split is made
"if the sum of the past 8196 bytes divides by 4096 with zero remainder"
- Compression: try using a bloom filter (64 bit) before trying to match
- LIRS cache: maybe remove 'mask' field, and dynamically grow the arrays
- chunk metadata: maybe split into static and variable,
or use a small page size for metadata
- data type "string": maybe use prefix compression for keys
- test chunk id rollover
*/
......@@ -208,9 +206,9 @@ public class MVStore {
private long lastStoredVersion;
/**
* The estimated number of unsaved pages (this number may not be completely
* accurate, because it may be changed concurrently, and because temporary
* pages are counted)
* The estimated number of average-sized unsaved pages. This number may not
* be completely accurate, because it may be changed concurrently, and
* because temporary pages are counted.
*/
private int unsavedPageCount;
private int autoCommitPageCount;
......@@ -1050,21 +1048,38 @@ public class MVStore {
// the last prediction did not matched
needHeader = true;
} else {
int chunkId = DataUtils.readHexInt(fileHeader, "chunk", 0);
if (lastChunk.id - chunkId > 20) {
long headerVersion = DataUtils.readHexLong(fileHeader, "version", 0);
if (lastChunk.version - headerVersion > 20) {
// we write after at least 20 entries
needHeader = true;
} else {
while (chunkId <= lastChunk.id) {
if (chunks.get(chunkId) == null) {
int chunkId = DataUtils.readHexInt(fileHeader, "chunk", 0);
while (true) {
Chunk old = chunks.get(chunkId);
if (old == null) {
// one of the chunks in between
// was removed
needHeader = true;
break;
}
if (chunkId == lastChunk.id) {
break;
}
chunkId++;
}
}
// }
// while (chunkId <= lastChunk.id) {
// if (chunks.get(chunkId) == null) {
// // one of the chunks in between
// // was removed
// needHeader = true;
// break;
// }
// chunkId++;
// }
// }
}
}
......@@ -1595,14 +1610,17 @@ public class MVStore {
*
* @param map the map the page belongs to
* @param pos the position of the page
* @param memory the memory usage
*/
void removePage(MVMap<?, ?> map, long pos) {
void removePage(MVMap<?, ?> map, long pos, int memory) {
// we need to keep temporary pages,
// to support reading old versions and rollback
if (pos == 0) {
// the value could be smaller than 0 because
// in some cases a page is allocated without a store
unsavedPageCount = Math.max(0, unsavedPageCount - 1);
// in some cases a page is allocated,
// but never stored
int count = 1 + memory / pageSplitSize;
unsavedPageCount = Math.max(0, unsavedPageCount - count);
return;
}
......@@ -1788,10 +1806,14 @@ public class MVStore {
/**
* Increment the number of unsaved pages.
*/
void registerUnsavedPage() {
int count = ++unsavedPageCount;
if (count > autoCommitPageCount && autoCommitPageCount > 0) {
*
* @param memory the memory usage of the page
*/
void registerUnsavedPage(int memory) {
int count = 1 + memory / pageSplitSize;
unsavedPageCount += count;
int newValue = unsavedPageCount;
if (newValue > autoCommitPageCount && autoCommitPageCount > 0) {
saveNeeded = true;
}
}
......
......@@ -159,7 +159,7 @@ public class Page {
}
MVStore store = map.store;
if (store != null) {
store.registerUnsavedPage();
store.registerUnsavedPage(p.memory);
}
return p;
}
......@@ -566,7 +566,8 @@ public class Page {
long c = children[i];
int type = DataUtils.getPageType(c);
if (type == DataUtils.PAGE_TYPE_LEAF) {
map.removePage(c);
int mem = DataUtils.getPageMaxLength(c);
map.removePage(c, mem);
} else {
map.readPage(c).removeAllRecursive();
}
......@@ -961,7 +962,7 @@ public class Page {
* Remove the page.
*/
public void removePage() {
map.removePage(pos);
map.removePage(pos, memory);
}
}
......@@ -96,9 +96,12 @@ public class LobStorageMap implements LobStorageInterface {
dataMap = mvStore.openMap("lobData",
new MVMapConcurrent.Builder<Long, byte[]>());
streamStore = new StreamStore(dataMap);
;; int todo; // test and then remove
// TODO currently needed to avoid out of memory,
// because memory usage is only measure in number of pages currently
streamStore.setMaxBlockSize(32 * 1024);
// streamStore.setMaxBlockSize(32 * 1024);
}
@Override
......
......@@ -68,7 +68,7 @@ public class TestRandomMapOps extends TestBase {
best = op;
size = best;
failException = ex;
// System.out.println("seed:" + seed + " op:" + op);
// System.out.println("seed:" + seed + " op:" + op + " " + ex);
}
}
if (failException != null) {
......
......@@ -42,7 +42,7 @@ public class TestStreamStore extends TestBase {
public void test() throws IOException {
FileUtils.deleteRecursive(getBaseDir(), true);
FileUtils.createDirectories(getBaseDir());
testSaveCount();
testExceptionDuringStore();
testReadCount();
testLarge();
......@@ -53,6 +53,24 @@ public class TestStreamStore extends TestBase {
testWithFullMap();
testLoop();
}
private void testSaveCount() throws IOException {
String fileName = getBaseDir() + "/testSaveCount.h3";
FileUtils.delete(fileName);
MVStore s = new MVStore.Builder().
fileName(fileName).
open();
MVMap<Long, byte[]> map = s.openMap("data");
StreamStore streamStore = new StreamStore(map);
int blockSize = 256 * 1024;
assertEquals(blockSize, streamStore.getMaxBlockSize());
for (int i = 0; i < 4 * 16; i++) {
streamStore.put(new RandomStream(blockSize, i));
}
long writeCount = s.getFileStore().getWriteCount();
assertTrue(writeCount > 2);
s.close();
}
private void testExceptionDuringStore() throws IOException {
// test that if there is an IOException while storing
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论