提交 06214924 authored 作者: Thomas Mueller's avatar Thomas Mueller

Page store bugfixes

上级 6dc3587d
......@@ -452,6 +452,7 @@ See also <a href="build.html#providing_patches">Providing Patches</a>.
</li><li>Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}).
See PostgreSQL.
</li><li>Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true).
</li><li>Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2).
</li></ul>
<h2>Not Planned</h2>
......
......@@ -21,9 +21,9 @@ import org.h2.store.PageStore;
/**
* A leaf page that contains data of one or multiple rows. Format:
* <ul>
* <li>page type: byte</li>
* <li>checksum: short</li>
* <li>parent page id (0 for root): int</li>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>parent page id (0 for root): int (3-6)</li>
* <li>table id: varInt</li>
* <li>column count: varInt</li>
* <li>entry count: short</li>
......@@ -34,6 +34,11 @@ import org.h2.store.PageStore;
*/
public class PageDataLeaf extends PageData {
/**
* The start of the data in the last overflow page.
*/
static final int START_PARENT = 3;
/**
* The row offsets.
*/
......@@ -217,6 +222,11 @@ public class PageDataLeaf extends PageData {
all.checkCapacity(data.length());
all.write(data.getBytes(), 0, data.length());
data.truncate(index.getPageStore().getPageSize());
// write the page now to disk, to avoid problems
// when the page needs to be written before the overflow
// is written to disk (the cache first removes elements,
// moves them in a write queue, and then writes them)
write(null);
do {
int type, size, next;
if (remaining <= pageSize - PageDataOverflow.START_LAST) {
......@@ -498,4 +508,18 @@ public class PageDataLeaf extends PageData {
return index.getMemorySizePerPage();
}
void setParentPageId(int id) {
// never reset the written flag not only for speed, but also
// because if would cause the page to be written again if
// it contains overflow, which would cause the data to be read,
// and that's not possible because the overflow page may be
// not in the cache but in the write queue already
if (written) {
data.setInt(START_PARENT, id);
this.parentPageId = id;
} else {
super.setParentPageId(id);
}
}
}
......@@ -21,9 +21,9 @@ import org.h2.util.MemoryUtils;
/**
* A leaf page that contains data of one or multiple rows. Format:
* <ul>
* <li>page type: byte</li>
* <li>checksum: short</li>
* <li>parent page id (0 for root): int</li>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>parent page id (0 for root): int (3-6)</li>
* <li>table id: varInt</li>
* <li>count of all children (-1 if not known): int</li>
* <li>entry count: short</li>
......
......@@ -967,6 +967,9 @@ public class PageStore implements CacheWriter {
log.recover(PageLog.RECOVERY_STAGE_UNDO);
if (reservedPages != null) {
for (int r : reservedPages.keySet()) {
if (trace.isDebugEnabled()) {
trace.debug("reserve " + r);
}
allocatePage(r);
}
}
......@@ -1164,8 +1167,8 @@ public class PageStore implements CacheWriter {
private void removeMeta(int logPos, Row row) throws SQLException {
int id = row.getValue(0).getInt();
Index index = metaObjects.get(id);
int headPos = index.getHeadPos();
PageIndex index = (PageIndex) metaObjects.get(id);
int rootPageId = index.getRootPageId();
index.getTable().removeIndex(index);
if (index instanceof PageBtreeIndex) {
if (index.isTemporary()) {
......@@ -1178,11 +1181,11 @@ public class PageStore implements CacheWriter {
}
index.remove(systemSession);
metaObjects.remove(id);
if (reservedPages != null && reservedPages.containsKey(headPos)) {
if (reservedPages != null && reservedPages.containsKey(rootPageId)) {
// re-allocate the page if it is used later on again
int latestPos = reservedPages.get(headPos);
int latestPos = reservedPages.get(rootPageId);
if (latestPos > logPos) {
allocatePage(headPos);
allocatePage(rootPageId);
}
}
}
......@@ -1201,6 +1204,7 @@ public class PageStore implements CacheWriter {
trace.debug("addMeta id=" + id + " type=" + type + " parent=" + parent + " columns=" + columnList);
}
if (redo && rootPageId != 0) {
// ensure the page is empty, but not used by regular data
writePage(rootPageId, createData());
allocatePage(rootPageId);
}
......
......@@ -973,9 +973,16 @@ public class Recover extends Tool implements DataHandler {
if (x < 0) {
break;
}
if (x == PageLog.UNDO) {
if (x == PageLog.NOOP) {
// ignore
} else if (x == PageLog.UNDO) {
int pageId = in.readVarInt();
int size = in.readVarInt();
if (size == 0) {
in.readFully(new byte[pageSize], 0, pageSize);
} else {
in.readFully(new byte[size], 0, size);
}
writer.println("-- undo page " + pageId);
} else if (x == PageLog.ADD) {
int sessionId = in.readVarInt();
......@@ -1019,7 +1026,7 @@ public class Recover extends Tool implements DataHandler {
}
writer.println(buff);
} else {
writer.println("-- end " + x);
writer.println("-- ERROR: unknown operation " + x);
break;
}
}
......@@ -1101,7 +1108,7 @@ public class Recover extends Tool implements DataHandler {
return;
}
try {
if (dataPages.size() == 0) {
while (dataPages.size() == 0) {
if (trunkPage == 0) {
endOfFile = true;
return;
......@@ -1109,19 +1116,25 @@ public class Recover extends Tool implements DataHandler {
store.seek((long) trunkPage * pageSize);
store.readFully(page.getBytes(), 0, pageSize);
page.reset();
if (!PageStore.checksumTest(page.getBytes(), trunkPage, pageSize)) {
writer.println("-- ERROR: checksum mismatch page: " +trunkPage);
endOfFile = true;
return;
}
int t = page.readByte();
page.readInt();
page.readShortInt();
if (t != Page.TYPE_STREAM_TRUNK) {
writer.println("-- eof page: " +trunkPage + " type: " + t + " expected type: " + Page.TYPE_STREAM_TRUNK);
writer.println("-- eof page: " + trunkPage + " type: " + t + " expected type: " + Page.TYPE_STREAM_TRUNK);
endOfFile = true;
return;
}
trunkPage = page.readInt();
page.readInt();
int key = page.readInt();
logKey++;
if (key != logKey) {
writer.println("-- eof page: " +trunkPage + " type: " + t + " expected key: " + logKey + " got: " + key);
writer.println("-- eof page: " + trunkPage + " type: " + t + " expected key: " + logKey + " got: " + key);
}
trunkPage = page.readInt();
int pageCount = page.readShortInt();
for (int i = 0; i < pageCount; i++) {
int d = page.readInt();
......@@ -1136,13 +1149,20 @@ public class Recover extends Tool implements DataHandler {
dataPages.add(d);
}
}
if (dataPages.size() > 0) {
page.reset();
int nextPage = dataPages.get(0);
dataPages.remove(0);
store.seek((long) nextPage * pageSize);
store.readFully(page.getBytes(), 0, pageSize);
page.reset();
if (!PageStore.checksumTest(page.getBytes(), nextPage, pageSize)) {
writer.println("-- ERROR: checksum mismatch page: " +nextPage);
endOfFile = true;
return;
}
int t = page.readByte();
page.readShortInt();
int p = page.readInt();
int k = page.readInt();
if (t != Page.TYPE_STREAM_DATA) {
......@@ -1156,7 +1176,8 @@ public class Recover extends Tool implements DataHandler {
endOfFile = true;
return;
}
remaining = page.readInt();
remaining = pageSize - page.length();
}
} catch (SQLException e) {
throw Message.convertToIOException(e);
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论