提交 b52978ad authored 作者: Evgenij Ryazanov's avatar Evgenij Ryazanov

Merge branch 'master' into misc

......@@ -993,7 +993,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
* @param memory the number of bytes used for this page
*/
protected final void removePage(long pos, int memory) {
store.removePage(this, pos, memory);
store.removePage(pos, memory);
}
/**
......@@ -1190,10 +1190,10 @@ public class MVMap<K, V> extends AbstractMap<K, V>
int attempt = 0;
int keyCount;
while((keyCount = rootReference.getAppendCounter()) > 0) {
Page page = Page.create(this,
Page page = Page.createLeaf(this,
Arrays.copyOf(keysBuffer, keyCount),
Arrays.copyOf(valuesBuffer, keyCount),
null, keyCount, 0);
0);
CursorPos pos = rootReference.root.getAppendCursorPos(null);
assert page.map == this;
assert pos != null;
......@@ -1215,7 +1215,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
Page.PageReference children[] = new Page.PageReference[] {
new Page.PageReference(p),
new Page.PageReference(page)};
p = Page.create(this, keys, null, children, p.getTotalCount() + page.getTotalCount(), 0);
p = Page.createNode(this, keys, children, p.getTotalCount() + page.getTotalCount(), 0);
}
break;
}
......@@ -1784,7 +1784,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
new Page.PageReference(p),
new Page.PageReference(split)
};
p = Page.create(this, keys, null, children, totalCount, 0);
p = Page.createNode(this, keys, children, totalCount, 0);
break;
}
Page c = p;
......
......@@ -164,7 +164,8 @@ public class MVStore {
private volatile boolean closed;
final FileStore fileStore;
private final FileStore fileStore;
private final boolean fileStoreIsProvided;
private final int pageSplitSize;
......@@ -970,31 +971,75 @@ public class MVStore {
}
}
ByteBuffer readBufferForPage(long pos, int expectedMapId) {
Chunk c = getChunk(pos);
long filePos = c.block * BLOCK_SIZE;
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}; p={1}, c={2}", filePos, pos, c.toString());
}
long maxPos = (c.block + c.len) * BLOCK_SIZE;
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos);
int start = buff.position();
int remaining = buff.remaining();
int pageLength = buff.getInt();
if (pageLength > remaining || pageLength < 4) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length 4..{1}, got {2}", chunkId, remaining,
pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int mapId = DataUtils.readVarInt(buff);
if (mapId != expectedMapId) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, expectedMapId, mapId);
}
int checkTest = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check);
}
return buff;
}
/**
* Get the chunk for the given position.
*
* @param pos the position
* @return the chunk
*/
Chunk getChunk(long pos) {
Chunk c = getChunkIfFound(pos);
if (c == null) {
int chunkId = DataUtils.getPageChunkId(pos);
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_CHUNK_NOT_FOUND,
"Chunk {0} not found", chunkId);
}
return c;
}
private Chunk getChunkIfFound(long pos) {
private Chunk getChunk(long pos) {
int chunkId = DataUtils.getPageChunkId(pos);
Chunk c = chunks.get(chunkId);
if (c == null) {
checkOpen();
String s = meta.get(Chunk.getMetaKey(chunkId));
if (s == null) {
return null;
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_CHUNK_NOT_FOUND,
"Chunk {0} not found", chunkId);
}
c = Chunk.fromString(s);
if (c.block == Long.MAX_VALUE) {
......@@ -1412,9 +1457,7 @@ public class MVStore {
}
public Set<Integer> getReferenced() {
Set<Integer> set = new HashSet<>();
set.addAll(referencedChunks.keySet());
return set;
return new HashSet<>(referencedChunks.keySet());
}
public void visit(Page page, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) {
......@@ -1426,7 +1469,8 @@ public class MVStore {
if (count == 0) {
return;
}
final ChunkIdsCollector childCollector = new ChunkIdsCollector(this);
ChunkIdsCollector childCollector = DataUtils.isPageSaved(pos) && cacheChunkRef != null ?
new ChunkIdsCollector(this) : this;
for (int i = 0; i < count; i++) {
Page childPage = page.getChildPageIfLoaded(i);
if (childPage != null) {
......@@ -1435,11 +1479,7 @@ public class MVStore {
childCollector.visit(page.getChildPagePos(i), executorService, executingThreadCounter);
}
}
// and cache resulting set of chunk ids
if (DataUtils.isPageSaved(pos) && cacheChunkRef != null) {
int[] chunkIds = childCollector.getChunkIds();
cacheChunkRef.put(pos, chunkIds, Constants.MEMORY_ARRAY + 4 * chunkIds.length);
}
cacheCollectedChunkIds(pos, childCollector);
}
public void visit(long pos, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) {
......@@ -1450,36 +1490,24 @@ public class MVStore {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
return;
}
int chunkIds[];
int[] chunkIds;
if (cacheChunkRef != null && (chunkIds = cacheChunkRef.get(pos)) != null) {
// there is a cached set of chunk ids for this position
for (int chunkId : chunkIds) {
registerChunk(chunkId);
}
} else {
final ChunkIdsCollector childCollector = new ChunkIdsCollector(this);
ChunkIdsCollector childCollector = cacheChunkRef != null ? new ChunkIdsCollector(this) : this;
Page page;
if (cache != null && (page = cache.get(pos)) != null) {
// there is a full page in cache, use it
childCollector.visit(page, executorService, executingThreadCounter);
} else {
// page was not cached: read the data
Chunk chunk = getChunk(pos);
long filePos = chunk.block * BLOCK_SIZE;
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}; p={1}, c={2}", filePos, pos, chunk.toString());
}
long maxPos = (chunk.block + chunk.len) * BLOCK_SIZE;
Page.readChildrenPositions(fileStore, pos, filePos, maxPos,
childCollector, executorService, executingThreadCounter);
}
// and cache resulting set of chunk ids
if (cacheChunkRef != null) {
chunkIds = childCollector.getChunkIds();
cacheChunkRef.put(pos, chunkIds, Constants.MEMORY_ARRAY + 4 * chunkIds.length);
ByteBuffer buff = readBufferForPage(pos, getMapId());
Page.readChildrenPositions(buff, pos, childCollector, executorService, executingThreadCounter);
}
cacheCollectedChunkIds(pos, childCollector);
}
}
......@@ -1489,13 +1517,15 @@ public class MVStore {
}
}
private int[] getChunkIds() {
int chunkIds[] = new int[referencedChunks.size()];
int index = 0;
for (Integer chunkId : referencedChunks.keySet()) {
chunkIds[index++] = chunkId;
private void cacheCollectedChunkIds(long pos, ChunkIdsCollector childCollector) {
if (childCollector != this) {
int[] chunkIds = new int[childCollector.referencedChunks.size()];
int index = 0;
for (Integer chunkId : childCollector.referencedChunks.keySet()) {
chunkIds[index++] = chunkId;
}
cacheChunkRef.put(pos, chunkIds, Constants.MEMORY_ARRAY + 4 * chunkIds.length);
}
return chunkIds;
}
}
......@@ -2052,16 +2082,8 @@ public class MVStore {
}
Page p = cache == null ? null : cache.get(pos);
if (p == null) {
Chunk c = getChunk(pos);
long filePos = c.block * BLOCK_SIZE;
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}; p={1}, c={2}", filePos, pos, c.toString());
}
long maxPos = (c.block + c.len) * BLOCK_SIZE;
p = Page.read(fileStore, pos, map, filePos, maxPos);
ByteBuffer buff = readBufferForPage(pos, map.getId());
p = Page.read(buff, pos, map);
cachePage(p);
}
return p;
......@@ -2070,11 +2092,10 @@ public class MVStore {
/**
* Remove a page.
*
* @param map the map the page belongs to
* @param pos the position of the page
* @param memory the memory usage
*/
void removePage(MVMap<?, ?> map, long pos, int memory) {
void removePage(long pos, int memory) {
// we need to keep temporary pages,
// to support reading old versions and rollback
if (!DataUtils.isPageSaved(pos)) {
......@@ -2086,19 +2107,6 @@ public class MVStore {
return;
}
// This could result in a cache miss if the operation is rolled back,
// but we don't optimize for rollback.
// We could also keep the page in the cache, as somebody
// could still read it (reading the old version).
/*
if (cache != null) {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
// keep nodes in the cache, because they are still used for
// garbage collection
cache.remove(pos);
}
}
*/
int chunkId = DataUtils.getPageChunkId(pos);
// synchronize, because pages could be freed concurrently
synchronized (freedPageSpace) {
......@@ -2541,13 +2549,12 @@ public class MVStore {
String oldName = getMapName(id);
if (oldName != null && !oldName.equals(newName)) {
String idHexStr = Integer.toHexString(id);
// at first create a new name as an "alias"
String existingIdHexStr = meta.putIfAbsent("name." + newName, idHexStr);
// we need to cope with the case of previously unfinished rename
String existingIdHexStr = meta.get("name." + newName);
DataUtils.checkArgument(
existingIdHexStr == null || existingIdHexStr.equals(idHexStr),
"A map named {0} already exists", newName);
// at first create a new name as an "alias"
meta.put("name." + newName, idHexStr);
// switch roles of a new and old names - old one is an alias now
meta.put(MVMap.getMapKey(id), map.asString(newName));
// get rid of the old name completely
......
......@@ -131,7 +131,7 @@ public abstract class Page implements Cloneable
memory = source.memory;
}
Page(MVMap<?, ?> map, Object keys[]) {
Page(MVMap<?, ?> map, Object[] keys) {
this.map = map;
this.keys = keys;
}
......@@ -143,37 +143,46 @@ public abstract class Page implements Cloneable
* @return the new page
*/
static Page createEmptyLeaf(MVMap<?, ?> map) {
Page page = new Leaf(map, EMPTY_OBJECT_ARRAY, EMPTY_OBJECT_ARRAY);
page.initMemoryAccount(PAGE_LEAF_MEMORY);
return page;
return createLeaf(map, EMPTY_OBJECT_ARRAY, EMPTY_OBJECT_ARRAY, PAGE_LEAF_MEMORY);
}
public static Page createEmptyNode(MVMap<?, ?> map) {
Page page = new NonLeaf(map, EMPTY_OBJECT_ARRAY, SINGLE_EMPTY, 0);
page.initMemoryAccount(PAGE_NODE_MEMORY +
MEMORY_POINTER + PAGE_MEMORY_CHILD); // there is always one child
return page;
static Page createEmptyNode(MVMap<?, ?> map) {
return createNode(map, EMPTY_OBJECT_ARRAY, SINGLE_EMPTY, 0,
PAGE_NODE_MEMORY + MEMORY_POINTER + PAGE_MEMORY_CHILD); // there is always one child
}
/**
* Create a new page. The arrays are not cloned.
* Create a new non-leaf page. The arrays are not cloned.
*
* @param map the map
* @param keys the keys
* @param values the values
* @param children the child page positions
* @param totalCount the total number of keys
* @param memory the memory used in bytes
* @return the page
*/
public static Page create(MVMap<?, ?> map,
Object[] keys, Object[] values, PageReference[] children,
long totalCount, int memory) {
public static Page createNode(MVMap<?, ?> map, Object[] keys, PageReference[] children,
long totalCount, int memory) {
assert keys != null;
Page p = children == null ? new Leaf(map, keys, values) :
new NonLeaf(map, keys, children, totalCount);
p.initMemoryAccount(memory);
return p;
Page page = new NonLeaf(map, keys, children, totalCount);
page.initMemoryAccount(memory);
return page;
}
/**
* Create a new leaf page. The arrays are not cloned.
*
* @param map the map
* @param keys the keys
* @param values the values
* @param memory the memory used in bytes
* @return the page
*/
public static Page createLeaf(MVMap<?, ?> map, Object[] keys, Object[] values, int memory) {
assert keys != null;
Page page = new Leaf(map, keys, values);
page.initMemoryAccount(memory);
return page;
}
private void initMemoryAccount(int memoryCount) {
......@@ -210,37 +219,17 @@ public abstract class Page implements Cloneable
/**
* Read a page.
*
* @param fileStore the file store
* @param buff ByteBuffer containing serialized page info
* @param pos the position
* @param map the map
* @param filePos the position in the file
* @param maxPos the maximum position (the end of the chunk)
* @return the page
*/
static Page read(FileStore fileStore, long pos, MVMap<?, ?> map,
long filePos, long maxPos) {
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ",
length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
static Page read(ByteBuffer buff, long pos, MVMap<?, ?> map) {
boolean leaf = (DataUtils.getPageType(pos) & 1) == PAGE_TYPE_LEAF;
Page p = leaf ? new Leaf(map) : new NonLeaf(map);
p.pos = pos;
int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos);
p.read(buff, chunkId, offset, maxLength);
p.read(buff, chunkId);
return p;
}
......@@ -248,59 +237,23 @@ public abstract class Page implements Cloneable
* Read an inner node page from the buffer, but ignore the keys and
* values.
*
* @param fileStore the file store
* @param buff ByteBuffer containing serialized page info
* @param pos the position
* @param filePos the position in the file
* @param maxPos the maximum position (the end of the chunk)
* @param collector to report child pages positions to
* @param executorService to use far parallel processing
* @param executingThreadCounter for parallel processing
*/
static void readChildrenPositions(FileStore fileStore, long pos, long filePos, long maxPos,
final MVStore.ChunkIdsCollector collector, final ThreadPoolExecutor executorService,
final AtomicInteger executingThreadCounter) {
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos);
int start = buff.position();
int pageLength = buff.getInt();
if (pageLength > maxLength) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}", chunkId, maxLength,
pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int m = DataUtils.readVarInt(buff);
int mapId = collector.getMapId();
if (m != mapId) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, mapId, m);
}
int checkTest = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check);
}
static void readChildrenPositions(ByteBuffer buff, long pos,
final MVStore.ChunkIdsCollector collector,
final ThreadPoolExecutor executorService,
final AtomicInteger executingThreadCounter) {
int len = DataUtils.readVarInt(buff);
int type = buff.get();
if ((type & 1) != DataUtils.PAGE_TYPE_NODE) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Position {0} expected to be a non-leaf", pos);
}
/**
/*
* The logic here is a little awkward. We want to (a) execute reads in parallel, but (b)
* limit the number of threads we create. This is complicated by (a) the algorithm is
* recursive and needs to wait for children before returning up the call-stack, (b) checking
......@@ -496,7 +449,7 @@ public abstract class Page implements Cloneable
* @param key the key
* @return the value or null
*/
public int binarySearch(Object key) {
int binarySearch(Object key) {
int low = 0, high = keys.length - 1;
// the cached index minus one, so that
// for the first time (when cachedCompare is 0),
......@@ -532,8 +485,8 @@ public abstract class Page implements Cloneable
final Object[] splitKeys(int aCount, int bCount) {
assert aCount + bCount <= getKeyCount();
Object aKeys[] = createKeyStorage(aCount);
Object bKeys[] = createKeyStorage(bCount);
Object[] aKeys = createKeyStorage(aCount);
Object[] bKeys = createKeyStorage(bCount);
System.arraycopy(keys, 0, aKeys, 0, aCount);
System.arraycopy(keys, getKeyCount() - bCount, bKeys, 0, bCount);
keys = aKeys;
......@@ -639,7 +592,7 @@ public abstract class Page implements Cloneable
Object old = getKey(index);
addMemory(-MEMORY_POINTER - keyType.getMemory(old));
}
Object newKeys[] = new Object[keyCount - 1];
Object[] newKeys = new Object[keyCount - 1];
DataUtils.copyExcept(keys, newKeys, keyCount, index);
keys = newKeys;
}
......@@ -649,36 +602,9 @@ public abstract class Page implements Cloneable
*
* @param buff the buffer
* @param chunkId the chunk id
* @param offset the offset within the chunk
* @param maxLength the maximum length
*/
private void read(ByteBuffer buff, int chunkId, int offset, int maxLength) {
int start = buff.position();
int pageLength = buff.getInt();
if (pageLength > maxLength || pageLength < 4) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length 4..{1}, got {2}",
chunkId, maxLength, pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int mapId = DataUtils.readVarInt(buff);
if (mapId != map.getId()) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}",
chunkId, map.getId(), mapId);
}
int checkTest = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}",
chunkId, checkTest, check);
}
private void read(ByteBuffer buff, int chunkId) {
int pageLength = buff.remaining() + 4; // size of int, since we've read page length already
int len = DataUtils.readVarInt(buff);
keys = new Object[len];
int type = buff.get();
......@@ -701,7 +627,7 @@ public abstract class Page implements Cloneable
compressor = map.getStore().getCompressorFast();
}
int lenAdd = DataUtils.readVarInt(buff);
int compLen = pageLength + start - buff.position();
int compLen = buff.remaining();
byte[] comp = Utils.newBytes(compLen);
buff.get(comp);
int l = compLen + lenAdd;
......@@ -713,7 +639,7 @@ public abstract class Page implements Cloneable
if (isLeaf()) {
readPayLoad(buff);
}
diskSpaceUsed = maxLength;
diskSpaceUsed = pageLength;
recalculateMemory();
}
......@@ -873,7 +799,7 @@ public abstract class Page implements Cloneable
memory += mem;
}
protected final void recalculateMemory() {
final void recalculateMemory() {
assert isPersistent();
memory = calculateMemory();
}
......@@ -1005,13 +931,13 @@ public abstract class Page implements Cloneable
super(map);
}
private NonLeaf(MVMap<?, ?> map, NonLeaf source, PageReference children[], long totalCount) {
private NonLeaf(MVMap<?, ?> map, NonLeaf source, PageReference[] children, long totalCount) {
super(map, source);
this.children = children;
this.totalCount = totalCount;
}
NonLeaf(MVMap<?, ?> map, Object keys[], PageReference children[], long totalCount) {
NonLeaf(MVMap<?, ?> map, Object[] keys, PageReference[] children, long totalCount) {
super(map, keys);
this.children = children;
this.totalCount = totalCount;
......@@ -1058,11 +984,10 @@ public abstract class Page implements Cloneable
}
@Override
@SuppressWarnings("SuspiciousSystemArraycopy")
public Page split(int at) {
assert !isSaved();
int b = getKeyCount() - at;
Object bKeys[] = splitKeys(at, b - 1);
Object[] bKeys = splitKeys(at, b - 1);
PageReference[] aChildren = new PageReference[at + 1];
PageReference[] bChildren = new PageReference[b];
System.arraycopy(children, 0, aChildren, 0, at + 1);
......@@ -1078,7 +1003,7 @@ public abstract class Page implements Cloneable
for (PageReference x : bChildren) {
t += x.count;
}
Page newPage = create(map, bKeys, null, bChildren, t, 0);
Page newPage = createNode(map, bKeys, bChildren, t, 0);
if(isPersistent()) {
recalculateMemory();
}
......@@ -1132,7 +1057,7 @@ public abstract class Page implements Cloneable
int childCount = getRawChildPageCount();
insertKey(index, key);
PageReference newChildren[] = new PageReference[childCount + 1];
PageReference[] newChildren = new PageReference[childCount + 1];
DataUtils.copyWithGap(children, newChildren, childCount, index);
children = newChildren;
children[index] = new PageReference(childPage);
......@@ -1151,7 +1076,7 @@ public abstract class Page implements Cloneable
addMemory(-MEMORY_POINTER - PAGE_MEMORY_CHILD);
}
totalCount -= children[index].count;
PageReference newChildren[] = new PageReference[childCount - 1];
PageReference[] newChildren = new PageReference[childCount - 1];
DataUtils.copyExcept(children, newChildren, childCount, index);
children = newChildren;
}
......@@ -1190,7 +1115,7 @@ public abstract class Page implements Cloneable
protected void readPayLoad(ByteBuffer buff) {
int keyCount = getKeyCount();
children = new PageReference[keyCount + 1];
long p[] = new long[keyCount + 1];
long[] p = new long[keyCount + 1];
for (int i = 0; i <= keyCount; i++) {
p[i] = buff.getLong();
}
......@@ -1282,7 +1207,7 @@ public abstract class Page implements Cloneable
/**
* The storage for values.
*/
private Object values[];
private Object[] values;
Leaf(MVMap<?, ?> map) {
super(map);
......@@ -1293,7 +1218,7 @@ public abstract class Page implements Cloneable
this.values = source.values;
}
Leaf(MVMap<?, ?> map, Object keys[], Object values[]) {
Leaf(MVMap<?, ?> map, Object[] keys, Object[] values) {
super(map, keys);
this.values = values;
}
......@@ -1327,19 +1252,18 @@ public abstract class Page implements Cloneable
}
@Override
@SuppressWarnings("SuspiciousSystemArraycopy")
public Page split(int at) {
assert !isSaved();
int b = getKeyCount() - at;
Object bKeys[] = splitKeys(at, b);
Object bValues[] = createValueStorage(b);
Object[] bKeys = splitKeys(at, b);
Object[] bValues = createValueStorage(b);
if(values != null) {
Object aValues[] = createValueStorage(at);
Object[] aValues = createValueStorage(at);
System.arraycopy(values, 0, aValues, 0, at);
System.arraycopy(values, at, bValues, 0, b);
values = aValues;
}
Page newPage = create(map, bKeys, bValues, null, b, 0);
Page newPage = createLeaf(map, bKeys, bValues, 0);
if(isPersistent()) {
recalculateMemory();
}
......@@ -1384,7 +1308,7 @@ public abstract class Page implements Cloneable
insertKey(index, key);
if(values != null) {
Object newValues[] = createValueStorage(keyCount + 1);
Object[] newValues = createValueStorage(keyCount + 1);
DataUtils.copyWithGap(values, newValues, keyCount, index);
values = newValues;
setValueInternal(index, value);
......@@ -1407,7 +1331,7 @@ public abstract class Page implements Cloneable
Object old = getValue(index);
addMemory(-MEMORY_POINTER - map.getValueType().getMemory(old));
}
Object newValues[] = createValueStorage(keyCount - 1);
Object[] newValues = createValueStorage(keyCount - 1);
DataUtils.copyExcept(values, newValues, keyCount, index);
values = newValues;
}
......
......@@ -154,7 +154,7 @@ public final class MVRTreeMap<V> extends MVMap<SpatialKey, V> {
new Page.PageReference(split),
Page.PageReference.EMPTY
};
p = Page.create(this, keys, null, children, totalCount, 0);
p = Page.createNode(this, keys, children, totalCount, 0);
if(store.getFileStore() != null) {
store.registerUnsavedPage(p.getMemory());
}
......
......@@ -182,10 +182,6 @@ public class Transaction {
int currentStatus = getStatus(currentState);
boolean valid;
switch (status) {
case STATUS_OPEN:
valid = currentStatus == STATUS_CLOSED ||
currentStatus == STATUS_ROLLING_BACK;
break;
case STATUS_ROLLING_BACK:
valid = currentStatus == STATUS_OPEN;
break;
......@@ -207,6 +203,7 @@ public class Transaction {
valid = currentStatus == STATUS_COMMITTED ||
currentStatus == STATUS_ROLLED_BACK;
break;
case STATUS_OPEN:
default:
valid = false;
break;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论