提交 b52978ad authored 作者: Evgenij Ryazanov's avatar Evgenij Ryazanov

Merge branch 'master' into misc

...@@ -993,7 +993,7 @@ public class MVMap<K, V> extends AbstractMap<K, V> ...@@ -993,7 +993,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
* @param memory the number of bytes used for this page * @param memory the number of bytes used for this page
*/ */
protected final void removePage(long pos, int memory) { protected final void removePage(long pos, int memory) {
store.removePage(this, pos, memory); store.removePage(pos, memory);
} }
/** /**
...@@ -1190,10 +1190,10 @@ public class MVMap<K, V> extends AbstractMap<K, V> ...@@ -1190,10 +1190,10 @@ public class MVMap<K, V> extends AbstractMap<K, V>
int attempt = 0; int attempt = 0;
int keyCount; int keyCount;
while((keyCount = rootReference.getAppendCounter()) > 0) { while((keyCount = rootReference.getAppendCounter()) > 0) {
Page page = Page.create(this, Page page = Page.createLeaf(this,
Arrays.copyOf(keysBuffer, keyCount), Arrays.copyOf(keysBuffer, keyCount),
Arrays.copyOf(valuesBuffer, keyCount), Arrays.copyOf(valuesBuffer, keyCount),
null, keyCount, 0); 0);
CursorPos pos = rootReference.root.getAppendCursorPos(null); CursorPos pos = rootReference.root.getAppendCursorPos(null);
assert page.map == this; assert page.map == this;
assert pos != null; assert pos != null;
...@@ -1215,7 +1215,7 @@ public class MVMap<K, V> extends AbstractMap<K, V> ...@@ -1215,7 +1215,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
Page.PageReference children[] = new Page.PageReference[] { Page.PageReference children[] = new Page.PageReference[] {
new Page.PageReference(p), new Page.PageReference(p),
new Page.PageReference(page)}; new Page.PageReference(page)};
p = Page.create(this, keys, null, children, p.getTotalCount() + page.getTotalCount(), 0); p = Page.createNode(this, keys, children, p.getTotalCount() + page.getTotalCount(), 0);
} }
break; break;
} }
...@@ -1784,7 +1784,7 @@ public class MVMap<K, V> extends AbstractMap<K, V> ...@@ -1784,7 +1784,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
new Page.PageReference(p), new Page.PageReference(p),
new Page.PageReference(split) new Page.PageReference(split)
}; };
p = Page.create(this, keys, null, children, totalCount, 0); p = Page.createNode(this, keys, children, totalCount, 0);
break; break;
} }
Page c = p; Page c = p;
......
...@@ -164,7 +164,8 @@ public class MVStore { ...@@ -164,7 +164,8 @@ public class MVStore {
private volatile boolean closed; private volatile boolean closed;
final FileStore fileStore; private final FileStore fileStore;
private final boolean fileStoreIsProvided; private final boolean fileStoreIsProvided;
private final int pageSplitSize; private final int pageSplitSize;
...@@ -970,31 +971,75 @@ public class MVStore { ...@@ -970,31 +971,75 @@ public class MVStore {
} }
} }
ByteBuffer readBufferForPage(long pos, int expectedMapId) {
Chunk c = getChunk(pos);
long filePos = c.block * BLOCK_SIZE;
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}; p={1}, c={2}", filePos, pos, c.toString());
}
long maxPos = (c.block + c.len) * BLOCK_SIZE;
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos);
int start = buff.position();
int remaining = buff.remaining();
int pageLength = buff.getInt();
if (pageLength > remaining || pageLength < 4) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length 4..{1}, got {2}", chunkId, remaining,
pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int mapId = DataUtils.readVarInt(buff);
if (mapId != expectedMapId) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, expectedMapId, mapId);
}
int checkTest = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check);
}
return buff;
}
/** /**
* Get the chunk for the given position. * Get the chunk for the given position.
* *
* @param pos the position * @param pos the position
* @return the chunk * @return the chunk
*/ */
Chunk getChunk(long pos) { private Chunk getChunk(long pos) {
Chunk c = getChunkIfFound(pos);
if (c == null) {
int chunkId = DataUtils.getPageChunkId(pos);
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_CHUNK_NOT_FOUND,
"Chunk {0} not found", chunkId);
}
return c;
}
private Chunk getChunkIfFound(long pos) {
int chunkId = DataUtils.getPageChunkId(pos); int chunkId = DataUtils.getPageChunkId(pos);
Chunk c = chunks.get(chunkId); Chunk c = chunks.get(chunkId);
if (c == null) { if (c == null) {
checkOpen(); checkOpen();
String s = meta.get(Chunk.getMetaKey(chunkId)); String s = meta.get(Chunk.getMetaKey(chunkId));
if (s == null) { if (s == null) {
return null; throw DataUtils.newIllegalStateException(
DataUtils.ERROR_CHUNK_NOT_FOUND,
"Chunk {0} not found", chunkId);
} }
c = Chunk.fromString(s); c = Chunk.fromString(s);
if (c.block == Long.MAX_VALUE) { if (c.block == Long.MAX_VALUE) {
...@@ -1412,9 +1457,7 @@ public class MVStore { ...@@ -1412,9 +1457,7 @@ public class MVStore {
} }
public Set<Integer> getReferenced() { public Set<Integer> getReferenced() {
Set<Integer> set = new HashSet<>(); return new HashSet<>(referencedChunks.keySet());
set.addAll(referencedChunks.keySet());
return set;
} }
public void visit(Page page, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) { public void visit(Page page, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) {
...@@ -1426,7 +1469,8 @@ public class MVStore { ...@@ -1426,7 +1469,8 @@ public class MVStore {
if (count == 0) { if (count == 0) {
return; return;
} }
final ChunkIdsCollector childCollector = new ChunkIdsCollector(this); ChunkIdsCollector childCollector = DataUtils.isPageSaved(pos) && cacheChunkRef != null ?
new ChunkIdsCollector(this) : this;
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
Page childPage = page.getChildPageIfLoaded(i); Page childPage = page.getChildPageIfLoaded(i);
if (childPage != null) { if (childPage != null) {
...@@ -1435,11 +1479,7 @@ public class MVStore { ...@@ -1435,11 +1479,7 @@ public class MVStore {
childCollector.visit(page.getChildPagePos(i), executorService, executingThreadCounter); childCollector.visit(page.getChildPagePos(i), executorService, executingThreadCounter);
} }
} }
// and cache resulting set of chunk ids cacheCollectedChunkIds(pos, childCollector);
if (DataUtils.isPageSaved(pos) && cacheChunkRef != null) {
int[] chunkIds = childCollector.getChunkIds();
cacheChunkRef.put(pos, chunkIds, Constants.MEMORY_ARRAY + 4 * chunkIds.length);
}
} }
public void visit(long pos, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) { public void visit(long pos, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) {
...@@ -1450,36 +1490,24 @@ public class MVStore { ...@@ -1450,36 +1490,24 @@ public class MVStore {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
return; return;
} }
int chunkIds[]; int[] chunkIds;
if (cacheChunkRef != null && (chunkIds = cacheChunkRef.get(pos)) != null) { if (cacheChunkRef != null && (chunkIds = cacheChunkRef.get(pos)) != null) {
// there is a cached set of chunk ids for this position // there is a cached set of chunk ids for this position
for (int chunkId : chunkIds) { for (int chunkId : chunkIds) {
registerChunk(chunkId); registerChunk(chunkId);
} }
} else { } else {
final ChunkIdsCollector childCollector = new ChunkIdsCollector(this); ChunkIdsCollector childCollector = cacheChunkRef != null ? new ChunkIdsCollector(this) : this;
Page page; Page page;
if (cache != null && (page = cache.get(pos)) != null) { if (cache != null && (page = cache.get(pos)) != null) {
// there is a full page in cache, use it // there is a full page in cache, use it
childCollector.visit(page, executorService, executingThreadCounter); childCollector.visit(page, executorService, executingThreadCounter);
} else { } else {
// page was not cached: read the data // page was not cached: read the data
Chunk chunk = getChunk(pos); ByteBuffer buff = readBufferForPage(pos, getMapId());
long filePos = chunk.block * BLOCK_SIZE; Page.readChildrenPositions(buff, pos, childCollector, executorService, executingThreadCounter);
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}; p={1}, c={2}", filePos, pos, chunk.toString());
}
long maxPos = (chunk.block + chunk.len) * BLOCK_SIZE;
Page.readChildrenPositions(fileStore, pos, filePos, maxPos,
childCollector, executorService, executingThreadCounter);
}
// and cache resulting set of chunk ids
if (cacheChunkRef != null) {
chunkIds = childCollector.getChunkIds();
cacheChunkRef.put(pos, chunkIds, Constants.MEMORY_ARRAY + 4 * chunkIds.length);
} }
cacheCollectedChunkIds(pos, childCollector);
} }
} }
...@@ -1489,13 +1517,15 @@ public class MVStore { ...@@ -1489,13 +1517,15 @@ public class MVStore {
} }
} }
private int[] getChunkIds() { private void cacheCollectedChunkIds(long pos, ChunkIdsCollector childCollector) {
int chunkIds[] = new int[referencedChunks.size()]; if (childCollector != this) {
int index = 0; int[] chunkIds = new int[childCollector.referencedChunks.size()];
for (Integer chunkId : referencedChunks.keySet()) { int index = 0;
chunkIds[index++] = chunkId; for (Integer chunkId : childCollector.referencedChunks.keySet()) {
chunkIds[index++] = chunkId;
}
cacheChunkRef.put(pos, chunkIds, Constants.MEMORY_ARRAY + 4 * chunkIds.length);
} }
return chunkIds;
} }
} }
...@@ -2052,16 +2082,8 @@ public class MVStore { ...@@ -2052,16 +2082,8 @@ public class MVStore {
} }
Page p = cache == null ? null : cache.get(pos); Page p = cache == null ? null : cache.get(pos);
if (p == null) { if (p == null) {
Chunk c = getChunk(pos); ByteBuffer buff = readBufferForPage(pos, map.getId());
long filePos = c.block * BLOCK_SIZE; p = Page.read(buff, pos, map);
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}; p={1}, c={2}", filePos, pos, c.toString());
}
long maxPos = (c.block + c.len) * BLOCK_SIZE;
p = Page.read(fileStore, pos, map, filePos, maxPos);
cachePage(p); cachePage(p);
} }
return p; return p;
...@@ -2070,11 +2092,10 @@ public class MVStore { ...@@ -2070,11 +2092,10 @@ public class MVStore {
/** /**
* Remove a page. * Remove a page.
* *
* @param map the map the page belongs to
* @param pos the position of the page * @param pos the position of the page
* @param memory the memory usage * @param memory the memory usage
*/ */
void removePage(MVMap<?, ?> map, long pos, int memory) { void removePage(long pos, int memory) {
// we need to keep temporary pages, // we need to keep temporary pages,
// to support reading old versions and rollback // to support reading old versions and rollback
if (!DataUtils.isPageSaved(pos)) { if (!DataUtils.isPageSaved(pos)) {
...@@ -2086,19 +2107,6 @@ public class MVStore { ...@@ -2086,19 +2107,6 @@ public class MVStore {
return; return;
} }
// This could result in a cache miss if the operation is rolled back,
// but we don't optimize for rollback.
// We could also keep the page in the cache, as somebody
// could still read it (reading the old version).
/*
if (cache != null) {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
// keep nodes in the cache, because they are still used for
// garbage collection
cache.remove(pos);
}
}
*/
int chunkId = DataUtils.getPageChunkId(pos); int chunkId = DataUtils.getPageChunkId(pos);
// synchronize, because pages could be freed concurrently // synchronize, because pages could be freed concurrently
synchronized (freedPageSpace) { synchronized (freedPageSpace) {
...@@ -2541,13 +2549,12 @@ public class MVStore { ...@@ -2541,13 +2549,12 @@ public class MVStore {
String oldName = getMapName(id); String oldName = getMapName(id);
if (oldName != null && !oldName.equals(newName)) { if (oldName != null && !oldName.equals(newName)) {
String idHexStr = Integer.toHexString(id); String idHexStr = Integer.toHexString(id);
// at first create a new name as an "alias"
String existingIdHexStr = meta.putIfAbsent("name." + newName, idHexStr);
// we need to cope with the case of previously unfinished rename // we need to cope with the case of previously unfinished rename
String existingIdHexStr = meta.get("name." + newName);
DataUtils.checkArgument( DataUtils.checkArgument(
existingIdHexStr == null || existingIdHexStr.equals(idHexStr), existingIdHexStr == null || existingIdHexStr.equals(idHexStr),
"A map named {0} already exists", newName); "A map named {0} already exists", newName);
// at first create a new name as an "alias"
meta.put("name." + newName, idHexStr);
// switch roles of a new and old names - old one is an alias now // switch roles of a new and old names - old one is an alias now
meta.put(MVMap.getMapKey(id), map.asString(newName)); meta.put(MVMap.getMapKey(id), map.asString(newName));
// get rid of the old name completely // get rid of the old name completely
......
...@@ -131,7 +131,7 @@ public abstract class Page implements Cloneable ...@@ -131,7 +131,7 @@ public abstract class Page implements Cloneable
memory = source.memory; memory = source.memory;
} }
Page(MVMap<?, ?> map, Object keys[]) { Page(MVMap<?, ?> map, Object[] keys) {
this.map = map; this.map = map;
this.keys = keys; this.keys = keys;
} }
...@@ -143,37 +143,46 @@ public abstract class Page implements Cloneable ...@@ -143,37 +143,46 @@ public abstract class Page implements Cloneable
* @return the new page * @return the new page
*/ */
static Page createEmptyLeaf(MVMap<?, ?> map) { static Page createEmptyLeaf(MVMap<?, ?> map) {
Page page = new Leaf(map, EMPTY_OBJECT_ARRAY, EMPTY_OBJECT_ARRAY); return createLeaf(map, EMPTY_OBJECT_ARRAY, EMPTY_OBJECT_ARRAY, PAGE_LEAF_MEMORY);
page.initMemoryAccount(PAGE_LEAF_MEMORY);
return page;
} }
public static Page createEmptyNode(MVMap<?, ?> map) { static Page createEmptyNode(MVMap<?, ?> map) {
Page page = new NonLeaf(map, EMPTY_OBJECT_ARRAY, SINGLE_EMPTY, 0); return createNode(map, EMPTY_OBJECT_ARRAY, SINGLE_EMPTY, 0,
page.initMemoryAccount(PAGE_NODE_MEMORY + PAGE_NODE_MEMORY + MEMORY_POINTER + PAGE_MEMORY_CHILD); // there is always one child
MEMORY_POINTER + PAGE_MEMORY_CHILD); // there is always one child
return page;
} }
/** /**
* Create a new page. The arrays are not cloned. * Create a new non-leaf page. The arrays are not cloned.
* *
* @param map the map * @param map the map
* @param keys the keys * @param keys the keys
* @param values the values
* @param children the child page positions * @param children the child page positions
* @param totalCount the total number of keys * @param totalCount the total number of keys
* @param memory the memory used in bytes * @param memory the memory used in bytes
* @return the page * @return the page
*/ */
public static Page create(MVMap<?, ?> map, public static Page createNode(MVMap<?, ?> map, Object[] keys, PageReference[] children,
Object[] keys, Object[] values, PageReference[] children, long totalCount, int memory) {
long totalCount, int memory) {
assert keys != null; assert keys != null;
Page p = children == null ? new Leaf(map, keys, values) : Page page = new NonLeaf(map, keys, children, totalCount);
new NonLeaf(map, keys, children, totalCount); page.initMemoryAccount(memory);
p.initMemoryAccount(memory); return page;
return p; }
/**
* Create a new leaf page. The arrays are not cloned.
*
* @param map the map
* @param keys the keys
* @param values the values
* @param memory the memory used in bytes
* @return the page
*/
public static Page createLeaf(MVMap<?, ?> map, Object[] keys, Object[] values, int memory) {
assert keys != null;
Page page = new Leaf(map, keys, values);
page.initMemoryAccount(memory);
return page;
} }
private void initMemoryAccount(int memoryCount) { private void initMemoryAccount(int memoryCount) {
...@@ -210,37 +219,17 @@ public abstract class Page implements Cloneable ...@@ -210,37 +219,17 @@ public abstract class Page implements Cloneable
/** /**
* Read a page. * Read a page.
* *
* @param fileStore the file store * @param buff ByteBuffer containing serialized page info
* @param pos the position * @param pos the position
* @param map the map * @param map the map
* @param filePos the position in the file
* @param maxPos the maximum position (the end of the chunk)
* @return the page * @return the page
*/ */
static Page read(FileStore fileStore, long pos, MVMap<?, ?> map, static Page read(ByteBuffer buff, long pos, MVMap<?, ?> map) {
long filePos, long maxPos) {
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ",
length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
boolean leaf = (DataUtils.getPageType(pos) & 1) == PAGE_TYPE_LEAF; boolean leaf = (DataUtils.getPageType(pos) & 1) == PAGE_TYPE_LEAF;
Page p = leaf ? new Leaf(map) : new NonLeaf(map); Page p = leaf ? new Leaf(map) : new NonLeaf(map);
p.pos = pos; p.pos = pos;
int chunkId = DataUtils.getPageChunkId(pos); int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos); p.read(buff, chunkId);
p.read(buff, chunkId, offset, maxLength);
return p; return p;
} }
...@@ -248,59 +237,23 @@ public abstract class Page implements Cloneable ...@@ -248,59 +237,23 @@ public abstract class Page implements Cloneable
* Read an inner node page from the buffer, but ignore the keys and * Read an inner node page from the buffer, but ignore the keys and
* values. * values.
* *
* @param fileStore the file store * @param buff ByteBuffer containing serialized page info
* @param pos the position * @param pos the position
* @param filePos the position in the file
* @param maxPos the maximum position (the end of the chunk)
* @param collector to report child pages positions to * @param collector to report child pages positions to
* @param executorService to use far parallel processing
* @param executingThreadCounter for parallel processing
*/ */
static void readChildrenPositions(FileStore fileStore, long pos, long filePos, long maxPos, static void readChildrenPositions(ByteBuffer buff, long pos,
final MVStore.ChunkIdsCollector collector, final ThreadPoolExecutor executorService, final MVStore.ChunkIdsCollector collector,
final AtomicInteger executingThreadCounter) { final ThreadPoolExecutor executorService,
ByteBuffer buff; final AtomicInteger executingThreadCounter) {
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos);
int start = buff.position();
int pageLength = buff.getInt();
if (pageLength > maxLength) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}", chunkId, maxLength,
pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int m = DataUtils.readVarInt(buff);
int mapId = collector.getMapId();
if (m != mapId) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, mapId, m);
}
int checkTest = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check);
}
int len = DataUtils.readVarInt(buff); int len = DataUtils.readVarInt(buff);
int type = buff.get(); int type = buff.get();
if ((type & 1) != DataUtils.PAGE_TYPE_NODE) { if ((type & 1) != DataUtils.PAGE_TYPE_NODE) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Position {0} expected to be a non-leaf", pos); "Position {0} expected to be a non-leaf", pos);
} }
/** /*
* The logic here is a little awkward. We want to (a) execute reads in parallel, but (b) * The logic here is a little awkward. We want to (a) execute reads in parallel, but (b)
* limit the number of threads we create. This is complicated by (a) the algorithm is * limit the number of threads we create. This is complicated by (a) the algorithm is
* recursive and needs to wait for children before returning up the call-stack, (b) checking * recursive and needs to wait for children before returning up the call-stack, (b) checking
...@@ -496,7 +449,7 @@ public abstract class Page implements Cloneable ...@@ -496,7 +449,7 @@ public abstract class Page implements Cloneable
* @param key the key * @param key the key
* @return the value or null * @return the value or null
*/ */
public int binarySearch(Object key) { int binarySearch(Object key) {
int low = 0, high = keys.length - 1; int low = 0, high = keys.length - 1;
// the cached index minus one, so that // the cached index minus one, so that
// for the first time (when cachedCompare is 0), // for the first time (when cachedCompare is 0),
...@@ -532,8 +485,8 @@ public abstract class Page implements Cloneable ...@@ -532,8 +485,8 @@ public abstract class Page implements Cloneable
final Object[] splitKeys(int aCount, int bCount) { final Object[] splitKeys(int aCount, int bCount) {
assert aCount + bCount <= getKeyCount(); assert aCount + bCount <= getKeyCount();
Object aKeys[] = createKeyStorage(aCount); Object[] aKeys = createKeyStorage(aCount);
Object bKeys[] = createKeyStorage(bCount); Object[] bKeys = createKeyStorage(bCount);
System.arraycopy(keys, 0, aKeys, 0, aCount); System.arraycopy(keys, 0, aKeys, 0, aCount);
System.arraycopy(keys, getKeyCount() - bCount, bKeys, 0, bCount); System.arraycopy(keys, getKeyCount() - bCount, bKeys, 0, bCount);
keys = aKeys; keys = aKeys;
...@@ -639,7 +592,7 @@ public abstract class Page implements Cloneable ...@@ -639,7 +592,7 @@ public abstract class Page implements Cloneable
Object old = getKey(index); Object old = getKey(index);
addMemory(-MEMORY_POINTER - keyType.getMemory(old)); addMemory(-MEMORY_POINTER - keyType.getMemory(old));
} }
Object newKeys[] = new Object[keyCount - 1]; Object[] newKeys = new Object[keyCount - 1];
DataUtils.copyExcept(keys, newKeys, keyCount, index); DataUtils.copyExcept(keys, newKeys, keyCount, index);
keys = newKeys; keys = newKeys;
} }
...@@ -649,36 +602,9 @@ public abstract class Page implements Cloneable ...@@ -649,36 +602,9 @@ public abstract class Page implements Cloneable
* *
* @param buff the buffer * @param buff the buffer
* @param chunkId the chunk id * @param chunkId the chunk id
* @param offset the offset within the chunk
* @param maxLength the maximum length
*/ */
private void read(ByteBuffer buff, int chunkId, int offset, int maxLength) { private void read(ByteBuffer buff, int chunkId) {
int start = buff.position(); int pageLength = buff.remaining() + 4; // size of int, since we've read page length already
int pageLength = buff.getInt();
if (pageLength > maxLength || pageLength < 4) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length 4..{1}, got {2}",
chunkId, maxLength, pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int mapId = DataUtils.readVarInt(buff);
if (mapId != map.getId()) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}",
chunkId, map.getId(), mapId);
}
int checkTest = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}",
chunkId, checkTest, check);
}
int len = DataUtils.readVarInt(buff); int len = DataUtils.readVarInt(buff);
keys = new Object[len]; keys = new Object[len];
int type = buff.get(); int type = buff.get();
...@@ -701,7 +627,7 @@ public abstract class Page implements Cloneable ...@@ -701,7 +627,7 @@ public abstract class Page implements Cloneable
compressor = map.getStore().getCompressorFast(); compressor = map.getStore().getCompressorFast();
} }
int lenAdd = DataUtils.readVarInt(buff); int lenAdd = DataUtils.readVarInt(buff);
int compLen = pageLength + start - buff.position(); int compLen = buff.remaining();
byte[] comp = Utils.newBytes(compLen); byte[] comp = Utils.newBytes(compLen);
buff.get(comp); buff.get(comp);
int l = compLen + lenAdd; int l = compLen + lenAdd;
...@@ -713,7 +639,7 @@ public abstract class Page implements Cloneable ...@@ -713,7 +639,7 @@ public abstract class Page implements Cloneable
if (isLeaf()) { if (isLeaf()) {
readPayLoad(buff); readPayLoad(buff);
} }
diskSpaceUsed = maxLength; diskSpaceUsed = pageLength;
recalculateMemory(); recalculateMemory();
} }
...@@ -873,7 +799,7 @@ public abstract class Page implements Cloneable ...@@ -873,7 +799,7 @@ public abstract class Page implements Cloneable
memory += mem; memory += mem;
} }
protected final void recalculateMemory() { final void recalculateMemory() {
assert isPersistent(); assert isPersistent();
memory = calculateMemory(); memory = calculateMemory();
} }
...@@ -1005,13 +931,13 @@ public abstract class Page implements Cloneable ...@@ -1005,13 +931,13 @@ public abstract class Page implements Cloneable
super(map); super(map);
} }
private NonLeaf(MVMap<?, ?> map, NonLeaf source, PageReference children[], long totalCount) { private NonLeaf(MVMap<?, ?> map, NonLeaf source, PageReference[] children, long totalCount) {
super(map, source); super(map, source);
this.children = children; this.children = children;
this.totalCount = totalCount; this.totalCount = totalCount;
} }
NonLeaf(MVMap<?, ?> map, Object keys[], PageReference children[], long totalCount) { NonLeaf(MVMap<?, ?> map, Object[] keys, PageReference[] children, long totalCount) {
super(map, keys); super(map, keys);
this.children = children; this.children = children;
this.totalCount = totalCount; this.totalCount = totalCount;
...@@ -1058,11 +984,10 @@ public abstract class Page implements Cloneable ...@@ -1058,11 +984,10 @@ public abstract class Page implements Cloneable
} }
@Override @Override
@SuppressWarnings("SuspiciousSystemArraycopy")
public Page split(int at) { public Page split(int at) {
assert !isSaved(); assert !isSaved();
int b = getKeyCount() - at; int b = getKeyCount() - at;
Object bKeys[] = splitKeys(at, b - 1); Object[] bKeys = splitKeys(at, b - 1);
PageReference[] aChildren = new PageReference[at + 1]; PageReference[] aChildren = new PageReference[at + 1];
PageReference[] bChildren = new PageReference[b]; PageReference[] bChildren = new PageReference[b];
System.arraycopy(children, 0, aChildren, 0, at + 1); System.arraycopy(children, 0, aChildren, 0, at + 1);
...@@ -1078,7 +1003,7 @@ public abstract class Page implements Cloneable ...@@ -1078,7 +1003,7 @@ public abstract class Page implements Cloneable
for (PageReference x : bChildren) { for (PageReference x : bChildren) {
t += x.count; t += x.count;
} }
Page newPage = create(map, bKeys, null, bChildren, t, 0); Page newPage = createNode(map, bKeys, bChildren, t, 0);
if(isPersistent()) { if(isPersistent()) {
recalculateMemory(); recalculateMemory();
} }
...@@ -1132,7 +1057,7 @@ public abstract class Page implements Cloneable ...@@ -1132,7 +1057,7 @@ public abstract class Page implements Cloneable
int childCount = getRawChildPageCount(); int childCount = getRawChildPageCount();
insertKey(index, key); insertKey(index, key);
PageReference newChildren[] = new PageReference[childCount + 1]; PageReference[] newChildren = new PageReference[childCount + 1];
DataUtils.copyWithGap(children, newChildren, childCount, index); DataUtils.copyWithGap(children, newChildren, childCount, index);
children = newChildren; children = newChildren;
children[index] = new PageReference(childPage); children[index] = new PageReference(childPage);
...@@ -1151,7 +1076,7 @@ public abstract class Page implements Cloneable ...@@ -1151,7 +1076,7 @@ public abstract class Page implements Cloneable
addMemory(-MEMORY_POINTER - PAGE_MEMORY_CHILD); addMemory(-MEMORY_POINTER - PAGE_MEMORY_CHILD);
} }
totalCount -= children[index].count; totalCount -= children[index].count;
PageReference newChildren[] = new PageReference[childCount - 1]; PageReference[] newChildren = new PageReference[childCount - 1];
DataUtils.copyExcept(children, newChildren, childCount, index); DataUtils.copyExcept(children, newChildren, childCount, index);
children = newChildren; children = newChildren;
} }
...@@ -1190,7 +1115,7 @@ public abstract class Page implements Cloneable ...@@ -1190,7 +1115,7 @@ public abstract class Page implements Cloneable
protected void readPayLoad(ByteBuffer buff) { protected void readPayLoad(ByteBuffer buff) {
int keyCount = getKeyCount(); int keyCount = getKeyCount();
children = new PageReference[keyCount + 1]; children = new PageReference[keyCount + 1];
long p[] = new long[keyCount + 1]; long[] p = new long[keyCount + 1];
for (int i = 0; i <= keyCount; i++) { for (int i = 0; i <= keyCount; i++) {
p[i] = buff.getLong(); p[i] = buff.getLong();
} }
...@@ -1282,7 +1207,7 @@ public abstract class Page implements Cloneable ...@@ -1282,7 +1207,7 @@ public abstract class Page implements Cloneable
/** /**
* The storage for values. * The storage for values.
*/ */
private Object values[]; private Object[] values;
Leaf(MVMap<?, ?> map) { Leaf(MVMap<?, ?> map) {
super(map); super(map);
...@@ -1293,7 +1218,7 @@ public abstract class Page implements Cloneable ...@@ -1293,7 +1218,7 @@ public abstract class Page implements Cloneable
this.values = source.values; this.values = source.values;
} }
Leaf(MVMap<?, ?> map, Object keys[], Object values[]) { Leaf(MVMap<?, ?> map, Object[] keys, Object[] values) {
super(map, keys); super(map, keys);
this.values = values; this.values = values;
} }
...@@ -1327,19 +1252,18 @@ public abstract class Page implements Cloneable ...@@ -1327,19 +1252,18 @@ public abstract class Page implements Cloneable
} }
@Override @Override
@SuppressWarnings("SuspiciousSystemArraycopy")
public Page split(int at) { public Page split(int at) {
assert !isSaved(); assert !isSaved();
int b = getKeyCount() - at; int b = getKeyCount() - at;
Object bKeys[] = splitKeys(at, b); Object[] bKeys = splitKeys(at, b);
Object bValues[] = createValueStorage(b); Object[] bValues = createValueStorage(b);
if(values != null) { if(values != null) {
Object aValues[] = createValueStorage(at); Object[] aValues = createValueStorage(at);
System.arraycopy(values, 0, aValues, 0, at); System.arraycopy(values, 0, aValues, 0, at);
System.arraycopy(values, at, bValues, 0, b); System.arraycopy(values, at, bValues, 0, b);
values = aValues; values = aValues;
} }
Page newPage = create(map, bKeys, bValues, null, b, 0); Page newPage = createLeaf(map, bKeys, bValues, 0);
if(isPersistent()) { if(isPersistent()) {
recalculateMemory(); recalculateMemory();
} }
...@@ -1384,7 +1308,7 @@ public abstract class Page implements Cloneable ...@@ -1384,7 +1308,7 @@ public abstract class Page implements Cloneable
insertKey(index, key); insertKey(index, key);
if(values != null) { if(values != null) {
Object newValues[] = createValueStorage(keyCount + 1); Object[] newValues = createValueStorage(keyCount + 1);
DataUtils.copyWithGap(values, newValues, keyCount, index); DataUtils.copyWithGap(values, newValues, keyCount, index);
values = newValues; values = newValues;
setValueInternal(index, value); setValueInternal(index, value);
...@@ -1407,7 +1331,7 @@ public abstract class Page implements Cloneable ...@@ -1407,7 +1331,7 @@ public abstract class Page implements Cloneable
Object old = getValue(index); Object old = getValue(index);
addMemory(-MEMORY_POINTER - map.getValueType().getMemory(old)); addMemory(-MEMORY_POINTER - map.getValueType().getMemory(old));
} }
Object newValues[] = createValueStorage(keyCount - 1); Object[] newValues = createValueStorage(keyCount - 1);
DataUtils.copyExcept(values, newValues, keyCount, index); DataUtils.copyExcept(values, newValues, keyCount, index);
values = newValues; values = newValues;
} }
......
...@@ -154,7 +154,7 @@ public final class MVRTreeMap<V> extends MVMap<SpatialKey, V> { ...@@ -154,7 +154,7 @@ public final class MVRTreeMap<V> extends MVMap<SpatialKey, V> {
new Page.PageReference(split), new Page.PageReference(split),
Page.PageReference.EMPTY Page.PageReference.EMPTY
}; };
p = Page.create(this, keys, null, children, totalCount, 0); p = Page.createNode(this, keys, children, totalCount, 0);
if(store.getFileStore() != null) { if(store.getFileStore() != null) {
store.registerUnsavedPage(p.getMemory()); store.registerUnsavedPage(p.getMemory());
} }
......
...@@ -182,10 +182,6 @@ public class Transaction { ...@@ -182,10 +182,6 @@ public class Transaction {
int currentStatus = getStatus(currentState); int currentStatus = getStatus(currentState);
boolean valid; boolean valid;
switch (status) { switch (status) {
case STATUS_OPEN:
valid = currentStatus == STATUS_CLOSED ||
currentStatus == STATUS_ROLLING_BACK;
break;
case STATUS_ROLLING_BACK: case STATUS_ROLLING_BACK:
valid = currentStatus == STATUS_OPEN; valid = currentStatus == STATUS_OPEN;
break; break;
...@@ -207,6 +203,7 @@ public class Transaction { ...@@ -207,6 +203,7 @@ public class Transaction {
valid = currentStatus == STATUS_COMMITTED || valid = currentStatus == STATUS_COMMITTED ||
currentStatus == STATUS_ROLLED_BACK; currentStatus == STATUS_ROLLED_BACK;
break; break;
case STATUS_OPEN:
default: default:
valid = false; valid = false;
break; break;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论