提交 796e386a authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store.

上级 69f5efd2
......@@ -56,6 +56,11 @@ abstract class PageBtree extends Record {
*/
protected int start;
/**
* If only the position of the row is stored in the page
*/
protected boolean onlyPosition;
/**
* If the page was already written to the buffer.
*/
......@@ -123,13 +128,13 @@ abstract class PageBtree extends Record {
abstract void read() throws SQLException;
/**
* Add a row.
* Try to add a row.
*
* @param row the row
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract int addRow(SearchRow row) throws SQLException;
abstract int addRowTry(SearchRow row) throws SQLException;
/**
* Find the first row.
......@@ -147,13 +152,9 @@ abstract class PageBtree extends Record {
* @return the row
*/
SearchRow getRow(int at) throws SQLException {
int test;
if (at < 0) {
System.out.println("stop");
}
SearchRow row = rows[at];
if (row == null) {
row = index.readRow(data, offsets[at]);
row = index.readRow(data, offsets[at], onlyPosition);
rows[at] = row;
}
return row;
......
......@@ -77,7 +77,7 @@ public class PageBtreeCursor implements Cursor {
return true;
}
public boolean previous() throws SQLException {
public boolean previous() {
i--;
int todo;
return true;
......
......@@ -11,7 +11,6 @@ import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.message.TraceSystem;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
......@@ -94,7 +93,7 @@ public class PageBtreeIndex extends BaseIndex {
}
while (true) {
PageBtree root = getPage(headPos);
int splitPoint = root.addRow(row);
int splitPoint = root.addRowTry(row);
if (splitPoint == 0) {
break;
}
......@@ -275,7 +274,7 @@ public class PageBtreeIndex extends BaseIndex {
return rowCount;
}
public void close(Session session) throws SQLException {
public void close(Session session) {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
......@@ -291,10 +290,14 @@ public class PageBtreeIndex extends BaseIndex {
* @param offset the offset
* @return the row
*/
SearchRow readRow(DataPage data, int offset) throws SQLException {
SearchRow readRow(DataPage data, int offset, boolean onlyPosition) throws SQLException {
data.setPos(offset);
int pos = data.readInt();
if (onlyPosition) {
return tableData.getRow(null, pos);
}
SearchRow row = table.getTemplateSimpleRow(columns.length == 1);
row.setPos(data.readInt());
row.setPos(pos);
for (Column col : columns) {
int idx = col.getColumnId();
row.setValue(idx, data.readValue());
......@@ -307,34 +310,36 @@ public class PageBtreeIndex extends BaseIndex {
*
* @param data the data
* @param offset the offset
* @param onlyPosition whether only the position of the row is stored
* @param row the row to write
*/
void writeRow(DataPage data, int offset, SearchRow row) throws SQLException {
if (offset < 0) {
int test;
System.out.println("stop");
}
void writeRow(DataPage data, int offset, SearchRow row, boolean onlyPosition) throws SQLException {
data.setPos(offset);
data.writeInt(row.getPos());
if (!onlyPosition) {
for (Column col : columns) {
int idx = col.getColumnId();
data.writeValue(row.getValue(idx));
}
}
}
/**
* Get the size of a row (only the part that is stored in the index).
*
* @param dummy a dummy data page to calculate the size
* @param row the row
* @param onlyPosition whether only the position of the row is stored
* @return the number of bytes
*/
int getRowSize(DataPage dummy, SearchRow row) throws SQLException {
int getRowSize(DataPage dummy, SearchRow row, boolean onlyPosition) throws SQLException {
int rowsize = DataPage.LENGTH_INT;
if (!onlyPosition) {
for (Column col : columns) {
Value v = row.getValue(col.getColumnId());
rowsize += dummy.getValueLen(v);
}
}
return rowsize;
}
......
......@@ -14,30 +14,30 @@ import org.h2.store.DataPage;
import org.h2.store.PageStore;
/**
* A leaf page that contains index data.
* A b-tree leaf page that contains index data.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-8: table id
* </li><li>9-10: entry count
* </li><li>overflow: 11-14: the row key
* </li><li>11-: list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li><li>data
* </li></ul>
*/
class PageBtreeLeaf extends PageBtree {
private static final int KEY_OFFSET_PAIR_LENGTH = 6;
private static final int KEY_OFFSET_PAIR_START = 11;
private static final int OFFSET_LENGTH = 2;
private static final int OFFSET_START = 11;
PageBtreeLeaf(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
super(index, pageId, parentPageId, data);
start = KEY_OFFSET_PAIR_START;
start = OFFSET_START;
}
void read() throws SQLException {
data.setPos(4);
data.readByte();
int type = data.readByte();
onlyPosition = (type & Page.FLAG_LAST) == 0;
int tableId = data.readInt();
if (tableId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
......@@ -60,14 +60,18 @@ class PageBtreeLeaf extends PageBtree {
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
int addRow(SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row);
int addRowTry(SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row, onlyPosition);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (entryCount > 0 && last - rowLength < start + KEY_OFFSET_PAIR_LENGTH) {
if (last - rowLength < start + OFFSET_LENGTH) {
if (entryCount > 0) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
}
onlyPosition = true;
rowLength = index.getRowSize(data, row, onlyPosition);
}
written = false;
int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1];
......@@ -89,23 +93,12 @@ class PageBtreeLeaf extends PageBtree {
}
}
entryCount++;
start += KEY_OFFSET_PAIR_LENGTH;
start += OFFSET_LENGTH;
newOffsets[x] = offset;
newRows[x] = row;
offsets = newOffsets;
rows = newRows;
index.getPageStore().updateRecord(this, true, data);
if (offset < start) {
if (entryCount > 1) {
Message.throwInternalError();
}
// need to write the overflow page id
start += 4;
int remaining = rowLength - (pageSize - start);
// fix offset
offset = start;
offsets[x] = offset;
}
return 0;
}
......@@ -126,7 +119,7 @@ class PageBtreeLeaf extends PageBtree {
newOffsets[j] = offsets[j + 1] + rowLength;
}
System.arraycopy(rows, i + 1, newRows, i, entryCount - i);
start -= KEY_OFFSET_PAIR_LENGTH;
start -= OFFSET_LENGTH;
offsets = newOffsets;
rows = newRows;
}
......@@ -139,7 +132,7 @@ class PageBtreeLeaf extends PageBtree {
int newPageId = index.getPageStore().allocatePage();
PageBtreeLeaf p2 = new PageBtreeLeaf(index, newPageId, parentPageId, index.getPageStore().createDataPage());
for (int i = splitPoint; i < entryCount;) {
p2.addRow(getRow(splitPoint));
p2.addRowTry(getRow(splitPoint));
removeRow(splitPoint);
}
return p2;
......@@ -190,14 +183,14 @@ class PageBtreeLeaf extends PageBtree {
readAllRows();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_BTREE_LEAF);
data.writeByte((byte) (Page.TYPE_BTREE_LEAF | (onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeInt(index.getId());
data.writeShortInt(entryCount);
for (int i = 0; i < entryCount; i++) {
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
index.writeRow(data, offsets[i], rows[i]);
index.writeRow(data, offsets[i], rows[i], onlyPosition);
}
written = true;
}
......@@ -234,7 +227,7 @@ class PageBtreeLeaf extends PageBtree {
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.nextPage(cursor, getRow(0));
next.nextPage(cursor, getPos());
}
public String toString() {
......
......@@ -7,13 +7,13 @@
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.message.Message;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
/**
* A leaf page that contains index data.
* A b-tree node page that contains index data.
* Data is organized as follows: [leaf 0] (largest value of leaf 0) [leaf 1]
* Format:
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
......@@ -43,7 +43,9 @@ class PageBtreeNode extends PageBtree {
}
void read() {
data.setPos(5);
data.setPos(4);
int type = data.readByte();
onlyPosition = (type & Page.FLAG_LAST) == 0;
entryCount = data.readShortInt();
rowCount = rowCountStored = data.readInt();
childPageIds = new int[entryCount + 1];
......@@ -58,25 +60,39 @@ class PageBtreeNode extends PageBtree {
start = data.length();
}
private int addChildTry(SearchRow row) throws SQLException {
if (entryCount == 0) {
return 0;
}
int rowLength = index.getRowSize(data, row, onlyPosition);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
}
return 0;
}
/**
* Add a row if possible. If it is possible this method returns 0, otherwise
* Add a row. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row.
*
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
private int addChild(int x, int childPageId, SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row);
private void addChild(int x, int childPageId, SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row, onlyPosition);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (entryCount > 0 && last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
if (last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) {
if (entryCount > 0) {
throw Message.throwInternalError();
}
int offset = last - rowLength;
if(offset < 0) {
throw Message.getSQLException(ErrorCode.FEATURE_NOT_SUPPORTED_1, "Wide indexes");
onlyPosition = true;
rowLength = index.getRowSize(data, row, onlyPosition);
}
int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1];
SearchRow[] newRows = new SearchRow[entryCount + 1];
int[] newChildPageIds = new int[entryCount + 2];
......@@ -84,7 +100,6 @@ class PageBtreeNode extends PageBtree {
System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
}
if (entryCount > 0) {
readAllRows();
System.arraycopy(offsets, 0, newOffsets, 0, x);
System.arraycopy(rows, 0, newRows, 0, x);
if (x < entryCount) {
......@@ -104,26 +119,25 @@ class PageBtreeNode extends PageBtree {
rows = newRows;
childPageIds = newChildPageIds;
entryCount++;
return 0;
}
int addRow(SearchRow row) throws SQLException {
int addRowTry(SearchRow row) throws SQLException {
while (true) {
int x = find(row, false, false);
PageBtree page = index.getPage(childPageIds[x]);
int splitPoint = page.addRow(row);
int splitPoint = page.addRowTry(row);
if (splitPoint == 0) {
break;
}
SearchRow pivot = page.getRow(splitPoint - 1);
int splitPoint2 = addChildTry(pivot);
if (splitPoint2 != 0) {
return splitPoint;
}
PageBtree page2 = page.split(splitPoint);
addChild(x, page2.getPageId(), pivot);
index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data);
splitPoint = addChild(x, page2.getPageId(), pivot);
if (splitPoint != 0) {
int todoSplitAtLastInsertionPoint;
return splitPoint / 2;
}
index.getPageStore().updateRecord(this, true, data);
}
updateRowCount(1);
......@@ -202,7 +216,7 @@ class PageBtreeNode extends PageBtree {
boolean remove(SearchRow row) throws SQLException {
int at = find(row, false, false);
// merge is not implemented to allow concurrent usage of btrees
// merge is not implemented to allow concurrent usage
// TODO maybe implement merge
PageBtree page = index.getPage(childPageIds[at]);
boolean empty = page.remove(row);
......@@ -265,13 +279,10 @@ class PageBtreeNode extends PageBtree {
if (written) {
return;
}
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRow(i);
}
readAllRows();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_BTREE_NODE);
data.writeByte((byte) (Page.TYPE_BTREE_NODE | (onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeShortInt(entryCount);
data.writeInt(rowCountStored);
data.writeInt(childPageIds[entryCount]);
......@@ -280,7 +291,7 @@ class PageBtreeNode extends PageBtree {
data.writeInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
index.writeRow(data, offsets[i], rows[i]);
index.writeRow(data, offsets[i], rows[i], onlyPosition);
}
written = true;
}
......@@ -319,16 +330,22 @@ class PageBtreeNode extends PageBtree {
* @param cursor the cursor
* @param row the current row
*/
void nextPage(PageBtreeCursor cursor, SearchRow row) throws SQLException {
int i = find(row, false, false) + 1;
void nextPage(PageBtreeCursor cursor, int pageId) throws SQLException {
int i;
// TODO maybe keep the index in the child page (transiently)
for (i = 0; i < childPageIds.length; i++) {
if (childPageIds[i] == pageId) {
i++;
break;
}
}
if (i > entryCount) {
if (parentPageId == Page.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
SearchRow r = entryCount == 0 ? row : getRow(entryCount - 1);
next.nextPage(cursor, r);
next.nextPage(cursor, getPos());
return;
}
PageBtree page = index.getPage(childPageIds[i]);
......
......@@ -8,7 +8,6 @@ package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.Record;
......@@ -97,13 +96,13 @@ abstract class PageData extends Record {
abstract void read() throws SQLException;
/**
* Add a row.
* Try to add a row.
*
* @param row the row
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract int addRow(Row row) throws SQLException;
abstract int addRowTry(Row row) throws SQLException;
/**
* Get a cursor.
......@@ -188,6 +187,6 @@ abstract class PageData extends Record {
* @param key the key
* @return the row
*/
abstract Row getRow(Session session, int key) throws SQLException;
abstract Row getRow(int key) throws SQLException;
}
......@@ -5,10 +5,9 @@
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.DataPage;
......@@ -89,7 +88,7 @@ class PageDataLeaf extends PageData {
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
int addRow(Row row) throws SQLException {
int addRowTry(Row row) throws SQLException {
int rowLength = row.getByteCount(data);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
......@@ -245,7 +244,7 @@ class PageDataLeaf extends PageData {
int newPageId = index.getPageStore().allocatePage();
PageDataLeaf p2 = new PageDataLeaf(index, newPageId, parentPageId, index.getPageStore().createDataPage());
for (int i = splitPoint; i < entryCount;) {
p2.addRow(getRowAt(splitPoint));
p2.addRowTry(getRowAt(splitPoint));
removeRow(splitPoint);
}
return p2;
......@@ -297,7 +296,7 @@ class PageDataLeaf extends PageData {
return false;
}
Row getRow(Session session, int key) throws SQLException {
Row getRow(int key) throws SQLException {
int index = find(key);
return getRowAt(index);
}
......
......@@ -77,24 +77,24 @@ class PageDataNode extends PageData {
entryCount++;
}
int addRow(Row row) throws SQLException {
int addRowTry(Row row) throws SQLException {
while (true) {
int x = find(row.getPos());
PageData page = index.getPage(childPageIds[x]);
int splitPoint = page.addRow(row);
int splitPoint = page.addRowTry(row);
if (splitPoint == 0) {
break;
}
int pivot = page.getKey(splitPoint - 1);
PageData page2 = page.split(splitPoint);
index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data);
addChild(x, page2.getPageId(), pivot);
int maxEntries = (index.getPageStore().getPageSize() - ENTRY_START) / ENTRY_LENGTH;
if (entryCount >= maxEntries) {
int todoSplitAtLastInsertionPoint;
return entryCount / 2;
}
int pivot = page.getKey(splitPoint - 1);
PageData page2 = page.split(splitPoint);
index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data);
addChild(x, page2.getPageId(), pivot);
index.getPageStore().updateRecord(this, true, data);
}
updateRowCount(1);
......@@ -205,10 +205,10 @@ class PageDataNode extends PageData {
return false;
}
Row getRow(Session session, int key) throws SQLException {
Row getRow(int key) throws SQLException {
int at = find(key);
PageData page = index.getPage(childPageIds[at]);
return page.getRow(session, key);
return page.getRow(key);
}
int getRowCount() throws SQLException {
......
......@@ -108,7 +108,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
while (true) {
PageData root = getPage(headPos);
int splitPoint = root.addRow(row);
int splitPoint = root.addRowTry(row);
if (splitPoint == 0) {
break;
}
......@@ -260,7 +260,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
public Row getRow(Session session, int key) throws SQLException {
PageData root = getPage(headPos);
return root.getRow(session, key);
return root.getRow(key);
}
PageStore getPageStore() {
......
......@@ -66,13 +66,14 @@ import org.h2.value.ValueString;
*/
public class PageStore implements CacheWriter {
// TODO currently working on PageBtreeNode Wide indexes
// TODO implement redo log in Recover tool
// TODO TestPowerOff
// TODO PageStore.openMetaIndex (desc and nulls first / last)
// TODO PageBtreeIndex.canGetFirstOrLast
// TODO btree index with fixed size values doesn't need offset and so on
// TODO better checksums (for example, multiple fletcher)
// TODO replace CRC32
// TODO PageBtreeNode: 4 bytes offset - others use only 2
// TODO PageBtreeLeaf: why table id
// TODO log block allocation
// TODO block compression: maybe http://en.wikipedia.org/wiki/LZJB
// with RLE, specially for 0s.
......@@ -103,6 +104,7 @@ public class PageStore implements CacheWriter {
// and delay on each commit
// TODO var int: see google protocol buffers
// TODO SessionState.logId is no longer needed
// TODO PageData and PageBtree addRowTry: try to simplify
/**
* The smallest possible page size.
......
......@@ -629,4 +629,8 @@ public class Column {
return primaryKey;
}
public String toString() {
return name;
}
}
......@@ -21,6 +21,7 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.zip.CRC32;
import org.h2.command.Parser;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
......@@ -44,6 +45,7 @@ import org.h2.store.PageStore;
import org.h2.util.ByteUtils;
import org.h2.util.FileUtils;
import org.h2.util.IOUtils;
import org.h2.util.IntArray;
import org.h2.util.MathUtils;
import org.h2.util.New;
import org.h2.util.ObjectArray;
......@@ -329,8 +331,8 @@ public class Recover extends Tool implements DataHandler {
}
private void writeDataError(PrintWriter writer, String error, byte[] data, int dumpBlocks) {
writer.println("-- ERROR: " + error + " block:" + block + " blockCount:" + blockCount + " storageId:"
+ storageId + " recordLength:" + recordLength + " valueId:" + valueId);
writer.println("-- ERROR: " + error + " block: " + block + " blockCount: " + blockCount + " storageId: "
+ storageId + " recordLength: " + recordLength + " valueId: " + valueId);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < dumpBlocks * DiskFile.BLOCK_SIZE; i++) {
int x = data[i] & 0xff;
......@@ -479,11 +481,11 @@ public class Recover extends Tool implements DataHandler {
int id = s.readInt();
int firstUncommittedPos = s.readInt();
int firstUnwrittenPos = s.readInt();
writer.println("// id:" + id);
writer.println("// firstUncommittedPos:" + firstUncommittedPos);
writer.println("// firstUnwrittenPos:" + firstUnwrittenPos);
writer.println("// id: " + id);
writer.println("// firstUncommittedPos: " + firstUncommittedPos);
writer.println("// firstUnwrittenPos: " + firstUnwrittenPos);
int max = (int) (length / blockSize);
writer.println("// max:" + max);
writer.println("// max: " + max);
while (true) {
int pos = (int) (store.getFilePointer() / blockSize);
if ((long) pos * blockSize >= length) {
......@@ -517,9 +519,9 @@ public class Recover extends Tool implements DataHandler {
int sessionId = s.readInt();
if (type == 'P') {
String transaction = s.readString();
writer.println("// prepared session:" + sessionId + " tx:" + transaction);
writer.println("// prepared session: " + sessionId + " tx: " + transaction);
} else if (type == 'C') {
writer.println("// commit session:" + sessionId);
writer.println("// commit session: " + sessionId);
} else {
int storageId = s.readInt();
int recId = s.readInt();
......@@ -535,27 +537,27 @@ public class Recover extends Tool implements DataHandler {
if (sumLength > 0) {
s.read(summary, 0, sumLength);
}
writer.println("// summary session:"+sessionId+" fileType:" + fileType + " sumLength:" + sumLength);
writer.println("// summary session: "+sessionId+" fileType: " + fileType + " sumLength: " + sumLength);
dumpSummary(writer, summary);
break;
}
case 'T':
writer.println("// truncate session:"+sessionId+" storage:" + storageId + " pos:" + recId + " blockCount:"+blockCount);
writer.println("// truncate session: "+sessionId+" storage: " + storageId + " pos: " + recId + " blockCount: "+blockCount);
break;
case 'I':
writer.println("// insert session:"+sessionId+" storage:" + storageId + " pos:" + recId + " blockCount:"+blockCount);
writer.println("// insert session: "+sessionId+" storage: " + storageId + " pos: " + recId + " blockCount: "+blockCount);
if (storageId >= 0) {
writeLogRecord(writer, s);
}
break;
case 'D':
writer.println("// delete session:"+sessionId+" storage:" + storageId + " pos:" + recId + " blockCount:"+blockCount);
writer.println("// delete session: "+sessionId+" storage: " + storageId + " pos: " + recId + " blockCount: "+blockCount);
if (storageId >= 0) {
writeLogRecord(writer, s);
}
break;
default:
writer.println("// type?:"+type+" session:"+sessionId+" storage:" + storageId + " pos:" + recId + " blockCount:"+blockCount);
writer.println("// type?: "+type+" session: "+sessionId+" storage: " + storageId + " pos: " + recId + " blockCount: "+blockCount);
break;
}
}
......@@ -582,7 +584,7 @@ public class Recover extends Tool implements DataHandler {
if ((i % 8) == 0) {
writer.print("// ");
}
writer.print(" " + Long.toString(i * 8) + ":");
writer.print(" " + Long.toString(i * 8) + ": ");
for (int j = 0; j < 8; j++) {
writer.print(((x & 1) == 1) ? "1" : "0");
x >>>= 1;
......@@ -596,7 +598,7 @@ public class Recover extends Tool implements DataHandler {
for (int i = 0; i < len; i++) {
int storageId = in.readInt();
if (storageId != -1) {
writer.println("// pos:" + (i * DiskFile.BLOCKS_PER_PAGE) + " storage:" + storageId);
writer.println("// pos: " + (i * DiskFile.BLOCKS_PER_PAGE) + " storage: " + storageId);
}
}
while (true) {
......@@ -605,7 +607,7 @@ public class Recover extends Tool implements DataHandler {
break;
}
int recordCount = in.readInt();
writer.println("// storage:" + s + " recordCount:" + recordCount);
writer.println("// storage: " + s + " recordCount: " + recordCount);
}
} catch (Throwable e) {
writeError(writer, e);
......@@ -700,7 +702,7 @@ public class Recover extends Tool implements DataHandler {
data = "root [" + rootPos + "]";
break;
}
writer.println("// [" + block + "] page:" + page + " blocks:" + blockCount + " storage:" + storageId + " " + data);
writer.println("// [" + block + "] page: " + page + " blocks: " + blockCount + " storage: " + storageId + " " + data);
}
writer.close();
} catch (Throwable e) {
......@@ -735,26 +737,47 @@ public class Recover extends Tool implements DataHandler {
int pageSize = s.readInt();
int writeVersion = s.readByte();
int readVersion = s.readByte();
int systemTableRoot = s.readInt();
int freeListHead = s.readInt();
int logHead = s.readInt();
writer.println("-- pageSize " + pageSize);
writer.println("-- writeVersion: " + writeVersion);
writer.println("-- readVersion: " + readVersion);
writer.println("-- systemTableRoot: " + systemTableRoot);
writer.println("-- freeListHead: " + freeListHead);
writer.println("-- logHead: " + logHead);
writer.println("-- pageSize: " + pageSize +
" writeVersion: " + writeVersion +
" readVersion: " + readVersion);
if (pageSize < PageStore.PAGE_SIZE_MIN || pageSize > PageStore.PAGE_SIZE_MAX) {
pageSize = PageStore.PAGE_SIZE_DEFAULT;
// use default values for other settings as well
systemTableRoot = 1;
freeListHead = 2;
logHead = 3;
writer.println("-- ERROR: page size; using " + pageSize);
}
int pageCount = (int) (length / pageSize);
blockCount = 1;
for (long page = 1; page < pageCount; page++) {
s = DataPage.create(this, pageSize);
int logFirstTrunkPage = 0, logFirstDataPage = 0;
for (int i = 1;; i++) {
if (i == 3) {
break;
}
s.reset();
store.seek(i * pageSize);
store.readFully(s.getBytes(), 0, pageSize);
long writeCounter = s.readLong();
int firstTrunkPage = s.readInt();
int firstDataPage = s.readInt();
CRC32 crc = new CRC32();
crc.update(s.getBytes(), 0, s.length());
long expected = crc.getValue();
long got = s.readLong();
if (expected == got) {
if (logFirstTrunkPage == 0) {
logFirstTrunkPage = firstTrunkPage;
logFirstDataPage = firstDataPage;
}
}
writer.println("-- head " + i +
": writeCounter: " + writeCounter +
" trunk: " + firstTrunkPage + "/" + firstDataPage +
" crc expected " + expected +
" got " + got + " (" + (expected == got ? "ok" : "different") + ")");
}
writer.println("-- firstTrunkPage: " + logFirstTrunkPage +
" firstDataPage: " + logFirstDataPage);
s = DataPage.create(this, pageSize);
for (long page = 3; page < pageCount; page++) {
s = DataPage.create(this, pageSize);
store.seek(page * pageSize);
store.readFully(s.getBytes(), 0, pageSize);
......@@ -762,9 +785,8 @@ public class Recover extends Tool implements DataHandler {
int type = s.readByte();
switch (type) {
case Page.TYPE_EMPTY:
// writer.println("-- page " + page + ": empty");
if (parentPageId != 0) {
writer.println("-- ERROR parent:" + parentPageId);
writer.println("-- ERROR empty page with parent: " + parentPageId);
}
continue;
}
......@@ -783,9 +805,15 @@ public class Recover extends Tool implements DataHandler {
break;
case Page.TYPE_BTREE_NODE:
writer.println("-- page " + page + ": btree node" + (last ? "(last)" : ""));
if (trace) {
dumpPageBtreeNode(store, pageSize, writer, s, last, page);
}
break;
case Page.TYPE_BTREE_LEAF:
writer.println("-- page " + page + ": btree leaf " + (last ? "(last)" : ""));
if (trace) {
dumpPageBtreeLeaf(store, pageSize, writer, s, last, page);
}
break;
case Page.TYPE_FREE_LIST:
writer.println("-- page " + page + ": free list " + (last ? "(last)" : ""));
......@@ -802,9 +830,7 @@ public class Recover extends Tool implements DataHandler {
}
}
writeSchema(writer);
// for (int i = 0; i < PageStore.LOG_COUNT; i++) {
// dumpPageLogStream(writer, store, logHead + i, pageSize);
// }
dumpPageLogStream(writer, store, logFirstTrunkPage, logFirstDataPage, pageSize);
writer.close();
} catch (Throwable e) {
writeError(writer, e);
......@@ -814,14 +840,11 @@ public class Recover extends Tool implements DataHandler {
}
}
private void dumpPageLogStream(PrintWriter writer, FileStore store, int logHead, int pageSize) throws IOException, SQLException {
private void dumpPageLogStream(PrintWriter writer, FileStore store, int logFirstTrunkPage, int logFirstDataPage, int pageSize) throws IOException, SQLException {
DataPage s = DataPage.create(this, pageSize);
DataInputStream in = new DataInputStream(
new PageInputStream(writer, this, store, logHead, pageSize, 0,
Page.TYPE_STREAM_TRUNK)
new PageInputStream(writer, this, store, logFirstTrunkPage, logFirstDataPage, pageSize)
);
int logId = in.readInt();
writer.println("-- log " + logId);
while (true) {
int x = in.read();
if (x < 0) {
......@@ -864,24 +887,24 @@ public class Recover extends Tool implements DataHandler {
static class PageInputStream extends InputStream {
private final PrintWriter writer;
private final int type;
private final FileStore store;
private final DataPage page;
private final int pageSize;
private int parentPage;
private int nextPage;
private int trunkPage;
private int dataPage;
private IntArray dataPages = new IntArray();
private boolean endOfFile;
private int remaining;
public PageInputStream(PrintWriter writer, DataHandler handler,
FileStore store, int firstPage, int pageSize, int parent, int type) {
FileStore store, int firstTrunkPage, int firstDataPage, int pageSize) {
this.writer = writer;
this.store = store;
this.pageSize = pageSize;
this.type = type;
this.parentPage = parent;
nextPage = firstPage;
this.trunkPage = firstTrunkPage;
this.dataPage = firstDataPage;
page = DataPage.create(handler, pageSize);
}
public int read() throws IOException {
......@@ -926,45 +949,103 @@ public class Recover extends Tool implements DataHandler {
if (remaining > 0 || endOfFile) {
return;
}
if (nextPage == 0) {
try {
if (dataPages.size() == 0) {
if (trunkPage == 0) {
endOfFile = true;
return;
}
store.seek((long) trunkPage * pageSize);
store.readFully(page.getBytes(), 0, pageSize);
page.reset();
page.readInt();
int t = page.readByte();
if (t != Page.TYPE_STREAM_TRUNK) {
writer.println("-- eof page: " +trunkPage + " type: " + t + " expected type: " + Page.TYPE_STREAM_TRUNK);
endOfFile = true;
return;
}
trunkPage = page.readInt();
int pageCount = page.readInt();
for (int i = 0; i < pageCount; i++) {
int d = page.readInt();
if (dataPage != 0) {
if (d == dataPage) {
dataPage = 0;
} else {
// ignore the pages before the starting data page
continue;
}
}
dataPages.add(d);
}
}
page.reset();
try {
int nextPage = dataPages.get(0);
dataPages.remove(0);
store.seek((long) nextPage * pageSize);
store.readFully(page.getBytes(), 0, pageSize);
page.reset();
int p = page.readInt();
int t = page.readByte();
boolean last = (t & Page.FLAG_LAST) != 0;
t &= ~Page.FLAG_LAST;
if (type != t || p != parentPage) {
writer.println("-- ERROR page:" +nextPage+ " type:" + t + " parent:" + p +
" expected type:" + type + " expected parent:" + parentPage);
}
parentPage = nextPage;
if (last) {
nextPage = 0;
remaining = page.readInt();
} else {
nextPage = page.readInt();
remaining = pageSize - page.length();
if (t != Page.TYPE_STREAM_DATA) {
writer.println("-- eof page: " +nextPage+ " type: " + t + " parent: " + p +
" expected type: " + Page.TYPE_STREAM_DATA);
endOfFile = true;
return;
}
remaining = page.readInt();
} catch (SQLException e) {
throw Message.convertToIOException(e);
}
}
}
private void dumpPageBtreeNode(FileStore store, int pageSize, PrintWriter writer, DataPage s, boolean last, long pageId) {
int entryCount = s.readShortInt();
int rowCount = s.readInt();
int[] children = new int[entryCount + 1];
int[] offsets = new int[entryCount];
children[entryCount] = s.readInt();
for (int i = 0; i < entryCount; i++) {
children[i] = s.readInt();
offsets[i] = s.readInt();
}
for (int i = 0; i < entryCount; i++) {
int off = offsets[i];
s.setPos(off);
int pos = s.readInt();
Value data;
try {
data = s.readValue();
} catch (Throwable e) {
writeDataError(writer, "exception " + e, s.getBytes(), blockCount);
continue;
}
writer.println("-- [" + i + "] child: " + children[i] + " pos: " + pos + " data: " + data);
}
writer.println("-- [" + entryCount + "] child: " + children[entryCount] + " rowCount: " + rowCount);
}
private void dumpPageLog(PrintWriter writer, DataPage s, boolean last) {
if (last) {
int size = s.readInt();
writer.println("-- size:" + size);
} else {
int next = s.readInt();
writer.println("-- next:" + next);
private void dumpPageBtreeLeaf(FileStore store, int pageSize, PrintWriter writer, DataPage s, boolean last, long pageId) {
s.readInt();
int entryCount = s.readShortInt();
int[] offsets = new int[entryCount];
for (int i = 0; i < entryCount; i++) {
offsets[i] = s.readShortInt();
}
for (int i = 0; i < entryCount; i++) {
int off = offsets[i];
s.setPos(off);
int pos = s.readInt();
Value data;
try {
data = s.readValue();
} catch (Throwable e) {
writeDataError(writer, "exception " + e, s.getBytes(), blockCount);
continue;
}
writer.println("-- [" + i + "] pos: " + pos + " data: " + data);
}
}
......@@ -991,7 +1072,7 @@ public class Recover extends Tool implements DataHandler {
int type = s2.readByte();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
int size = s2.readShortInt();
writer.println("-- chain:" + next + " type:" + type + " size:" + size);
writer.println("-- chain: " + next + " type: " + type + " size: " + size);
s.write(s2.getBytes(), 7, size);
break;
} else if (type == Page.TYPE_DATA_OVERFLOW) {
......@@ -1001,10 +1082,10 @@ public class Recover extends Tool implements DataHandler {
break;
}
int size = pageSize - 9;
writer.println("-- chain:" + next + " type:" + type + " size:" + size + " next:" + next);
writer.println("-- chain: " + next + " type: " + type + " size: " + size + " next: " + next);
s.write(s2.getBytes(), 9, size);
} else {
writeDataError(writer, "type:" + type, s2.getBytes(), 1);
writeDataError(writer, "type: " + type, s2.getBytes(), 1);
break;
}
}
......@@ -1012,7 +1093,7 @@ public class Recover extends Tool implements DataHandler {
for (int i = 0; i < entryCount; i++) {
int key = keys[i];
int off = offsets[i];
writer.println("-- [" + i + "] storage:" + storageId + " key:" + key + " off:" + off);
writer.println("-- [" + i + "] storage: " + storageId + " key: " + key + " off: " + off);
s.setPos(off);
Value[] data = createRecord(writer, s);
if (data != null) {
......
......@@ -289,7 +289,7 @@ java org.h2.test.TestAll timer
// 2009-05-15: 25 tests fail with page store (first loop)
// 2009-05-18: 18 tests fail with page store (first loop)
// 2009-05-30: 15 tests fail with page store (first loop)
// 2009-06-16: 13 tests fail with page store (first loop)
// 2009-06-19: 10 tests fail with page store (first loop)
// System.setProperty("h2.pageStore", "true");
/*
......
......@@ -178,6 +178,7 @@ public class TestPowerOff extends TestBase {
} catch (SQLException e) {
assertKnownException(e);
}
if (!SysProperties.PAGE_STORE) {
boolean deleted = false;
for (String fileName : FileLister.getDatabaseFiles(dir, dbName, false)) {
if (fileName.endsWith(Constants.SUFFIX_INDEX_FILE)) {
......@@ -186,6 +187,7 @@ public class TestPowerOff extends TestBase {
}
}
assertTrue(deleted);
}
conn = getConnection(url);
conn.close();
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论