提交 b896df02 authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store

上级 f3d6e434
......@@ -7,13 +7,9 @@
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.value.Value;
/**
* A page that contains index data.
......@@ -51,12 +47,15 @@ abstract class PageBtree extends Record {
protected int entryCount;
/**
* The start of the data area.
* The index data
*/
int start;
protected SearchRow[] rows;
/**
* The start of the data area.
*/
protected int start;
PageBtree(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
this.index = index;
this.parentPageId = parentPageId;
......@@ -79,16 +78,17 @@ abstract class PageBtree extends Record {
abstract void setRowCountStored(int rowCount) throws SQLException;
/**
* Find an entry by key.
* Find an entry.
*
* @param key the key (may not exist)
* @return the matching or next index
* @param compare the row
* @param bigger if looking for a larger row
* @return the index of the found row
*/
int find(Session session, SearchRow compare, boolean bigger) throws SQLException {
int find(SearchRow compare, boolean bigger) throws SQLException {
int l = 0, r = entryCount;
while (l < r) {
int i = (l + r) >>> 1;
SearchRow row = (SearchRow) getRow(session, i);
SearchRow row = (SearchRow) getRow(i);
int comp = index.compareRows(row, compare);
if (comp > 0 || (!bigger && comp == 0)) {
r = i;
......@@ -111,10 +111,14 @@ abstract class PageBtree extends Record {
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract int addRow(Session session, SearchRow row) throws SQLException;
abstract int addRow(SearchRow row) throws SQLException;
/**
* Find the first row.
*
* @param cursor the cursor
* @param first the row to find
* @param if the row should be bigger
*/
abstract void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException;
......@@ -124,7 +128,7 @@ abstract class PageBtree extends Record {
* @param at the index
* @return the row
*/
SearchRow getRow(Session session, int at) throws SQLException {
SearchRow getRow(int at) throws SQLException {
SearchRow row = rows[at];
if (row == null) {
row = index.readRow(data, offsets[at]);
......@@ -139,7 +143,7 @@ abstract class PageBtree extends Record {
* @param splitPoint the index where to split
* @return the new page that contains about half the entries
*/
abstract PageBtree split(Session session, int splitPoint) throws SQLException;
abstract PageBtree split(int splitPoint) throws SQLException;
/**
* Change the page id.
......@@ -180,9 +184,9 @@ abstract class PageBtree extends Record {
/**
* Remove a row.
*
* @param key the key of the row to remove
* @param row the row to remove
* @return true if this page is now empty
*/
abstract boolean remove(Session session, SearchRow row) throws SQLException;
abstract boolean remove(SearchRow row) throws SQLException;
}
......@@ -30,6 +30,12 @@ public class PageBtreeCursor implements Cursor {
this.last = last;
}
/**
* Set the position of the current row.
*
* @param current the leaf page
* @param i the index within the page
*/
void setCurrent(PageBtreeLeaf current, int i) {
this.current = current;
this.i = i;
......@@ -51,20 +57,20 @@ public class PageBtreeCursor implements Cursor {
}
public boolean next() throws SQLException {
// if (i >= current.getEntryCount()) {
// current = current.getNextPage();
// i = 0;
// if (current == null) {
// return false;
// }
// }
// currentSearchRow = current.getRowAt(i);
// if (index.compareRows(currentSearchRow, last) > 0) {
// currentSearchRow = null;
// currentRow = null;
// return false;
// }
// i++;
if (i >= current.getEntryCount()) {
current.nextPage(this);
i = 0;
if (current == null) {
return false;
}
}
currentSearchRow = current.getRow(i);
if (index.compareRows(currentSearchRow, last) > 0) {
currentSearchRow = null;
currentRow = null;
return false;
}
i++;
return true;
}
......
......@@ -17,7 +17,6 @@ import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
import org.h2.value.Value;
......@@ -47,6 +46,9 @@ public class PageBtreeIndex extends BaseIndex {
return;
}
this.store = database.getPageStore();
if (store == null) {
System.out.println("stop");
}
if (headPos == Index.EMPTY_HEAD) {
// new table
headPos = store.allocatePage();
......@@ -88,16 +90,16 @@ public class PageBtreeIndex extends BaseIndex {
}
while (true) {
PageBtree root = getPage(headPos);
int splitPoint = root.addRow(session, row);
int splitPoint = root.addRow(row);
if (splitPoint == 0) {
break;
}
if (trace.isDebugEnabled()) {
trace.debug("split " + splitPoint);
}
SearchRow pivot = root.getRow(session, splitPoint - 1);
SearchRow pivot = root.getRow(splitPoint - 1);
PageBtree page1 = root;
PageBtree page2 = root.split(session, splitPoint);
PageBtree page2 = root.split(splitPoint);
int rootPageId = root.getPageId();
int id = store.allocatePage();
page1.setPageId(id);
......@@ -201,7 +203,7 @@ public class PageBtreeIndex extends BaseIndex {
removeAllRows();
} else {
PageBtree root = getPage(headPos);
root.remove(session, row);
root.remove(row);
rowCount--;
int todoReuseKeys;
}
......@@ -276,10 +278,18 @@ public class PageBtreeIndex extends BaseIndex {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
store = null;
int todoWhyRequired;
// store = null;
int writeRowCount;
}
/**
* Read a row from the data page at the given offset.
*
* @param data the data
* @param offset the offset
* @return the row
*/
SearchRow readRow(DataPage data, int offset) throws SQLException {
data.setPos(offset);
SearchRow row = table.getTemplateSimpleRow(columns.length == 1);
......@@ -291,6 +301,22 @@ public class PageBtreeIndex extends BaseIndex {
return row;
}
/**
* Write a row to the data page at the given offset.
*
* @param data the data
* @param offset the offset
* @param row the row to write
*/
void writeRow(DataPage data, int offset, SearchRow row) throws SQLException {
data.setPos(offset);
data.writeInt(row.getPos());
for (int i = 0; i < columns.length; i++) {
int idx = columns[i].getColumnId();
data.writeValue(row.getValue(idx));
}
}
/**
* Get the size of a row (only the part that is stored in the index).
*
......@@ -300,8 +326,8 @@ public class PageBtreeIndex extends BaseIndex {
*/
int getRowSize(DataPage dummy, SearchRow row) throws SQLException {
int rowsize = DataPage.LENGTH_INT;
for (int j = 0; j < columns.length; j++) {
Value v = row.getValue(columns[j].getColumnId());
for (int i = 0; i < columns.length; i++) {
Value v = row.getValue(columns[i].getColumnId());
rowsize += dummy.getValueLen(v);
}
return rowsize;
......
......@@ -64,7 +64,7 @@ class PageBtreeLeaf extends PageBtree {
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
int addRow(Session session, SearchRow row) throws SQLException {
int addRow(SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row);
int pageSize = index.getPageStore().getPageSize();
// TODO currently the order is important
......@@ -81,7 +81,7 @@ class PageBtreeLeaf extends PageBtree {
if (entryCount == 0) {
x = 0;
} else {
x = find(session, row, false);
x = find(row, false);
System.arraycopy(offsets, 0, newOffsets, 0, x);
System.arraycopy(rows, 0, newRows, 0, x);
if (x < entryCount) {
......@@ -116,7 +116,6 @@ class PageBtreeLeaf extends PageBtree {
Message.throwInternalError();
}
int[] newOffsets = new int[entryCount];
int[] newKeys = new int[entryCount];
Row[] newRows = new Row[entryCount];
System.arraycopy(offsets, 0, newOffsets, 0, i);
System.arraycopy(rows, 0, newRows, 0, i);
......@@ -131,11 +130,11 @@ class PageBtreeLeaf extends PageBtree {
return entryCount;
}
PageBtree split(Session session, int splitPoint) throws SQLException {
PageBtree split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeLeaf p2 = new PageBtreeLeaf(index, newPageId, parentPageId, index.getPageStore().createDataPage());
for (int i = splitPoint; i < entryCount;) {
p2.addRow(session, getRow(session, splitPoint));
p2.addRow(getRow(splitPoint));
removeRow(splitPoint);
}
return p2;
......@@ -145,9 +144,9 @@ class PageBtreeLeaf extends PageBtree {
return this;
}
boolean remove(Session session, SearchRow row) throws SQLException {
int at = find(session, row, false);
if (index.compareRows(row, getRow(session, at)) != 0) {
boolean remove(SearchRow row) throws SQLException {
int at = find(row, false);
if (index.compareRows(row, getRow(at)) != 0) {
throw Message.getSQLException(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, index.getSQL() + ": " + row);
}
if (entryCount == 1) {
......@@ -180,36 +179,25 @@ class PageBtreeLeaf extends PageBtree {
}
private void write() throws SQLException {
// if (written) {
// return;
// }
// // make sure rows are read
// for (int i = 0; i < entryCount; i++) {
// getRowAt(i);
// }
// data.reset();
// data.writeInt(parentPageId);
// int type;
// if (overflowKey == 0) {
// type = Page.TYPE_BTREE_LEAF | Page.FLAG_LAST;
// } else {
// type = Page.TYPE_BTREE_LEAF;
// }
// data.writeByte((byte) type);
// data.writeInt(index.getId());
// data.writeShortInt(entryCount);
// if (overflowKey != 0) {
// data.writeInt(overflowKey);
// }
// for (int i = 0; i < entryCount; i++) {
// data.writeInt(keys[i]);
// data.writeShortInt(offsets[i]);
// }
// for (int i = 0; i < entryCount; i++) {
// data.setPos(offsets[i]);
// rows[i].write(data);
// }
// written = true;
if (written) {
return;
}
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRow(i);
}
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_BTREE_LEAF);
data.writeInt(index.getId());
data.writeShortInt(entryCount);
for (int i = 0; i < entryCount; i++) {
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
index.writeRow(data, offsets[i], rows[i]);
}
written = true;
}
DataPage getDataPage() throws SQLException {
......@@ -218,11 +206,33 @@ class PageBtreeLeaf extends PageBtree {
}
void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException {
int todo;
int i = find(first, bigger) + 1;
if (i > entryCount) {
if (parentPageId == Page.ROOT) {
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.find(cursor, first, bigger);
return;
}
cursor.setCurrent(this, i);
}
void remapChildren() throws SQLException {
int todo;
}
/**
* Set the cursor to the first row of the next page.
*
* @param cursor the cursor
*/
void nextPage(PageBtreeCursor cursor) throws SQLException {
if (parentPageId == Page.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.nextPage(cursor, getRow(entryCount - 1));
}
}
......@@ -7,10 +7,8 @@
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
......@@ -22,11 +20,14 @@ import org.h2.store.DataPage;
* </li><li>5-6: entry count
* </li><li>7-10: row count of all children (-1 if not known)
* </li><li>11-14: rightmost child page id
* </li><li>15- entries: 4 bytes leaf page id, 4 bytes key
* </li><li>15- entries: 4 bytes leaf page id, 4 bytes offset
* </li></ul>
*/
class PageBtreeNode extends PageBtree {
private static final int CHILD_OFFSET_PAIR_LENGTH = 8;
private static final int CHILD_OFFSET_PAIR_START = 15;
/**
* The page ids of the children.
*/
......@@ -62,59 +63,59 @@ class PageBtreeNode extends PageBtree {
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
private int addChild(int x, int childPageId, SearchRow row) {
// int rowLength = index.getRowSize(data, row);
//
// int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
// if (entryCount > 0 && last - rowLength < start + KEY_OFFSET_PAIR_LENGTH) {
// int todoSplitAtLastInsertionPoint;
// return (entryCount / 2) + 1;
// }
//
// int[] newOffsets = new int[entryCount + 1];
// SearchRow[] newRows = new SearchRow[entryCount + 1];
// int[] newChildPageIds = new int[entryCount + 2];
// if (childPageIds != null) {
// System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
// }
// if (entryCount > 0) {
// System.arraycopy(offsets, 0, newOffsets, 0, x);
// System.arraycopy(rows, 0, newRows, 0, x);
// if (x < entryCount) {
// System.arraycopy(offsets, x, newOffsets, x + 1, entryCount - x);
// System.arraycopy(rows, x, newRows, x + 1, entryCount - x);
// System.arraycopy(childPageIds, x, newChildPageIds, x + 1, entryCount - x + 1);
// }
// }
// newOffsets[x] = offset;
// newRows[x] = row;
// newChildPageIds[x + 1] = childPageId;
// offsets = newOffsets;
// childPageIds = newChildPageIds;
// entryCount++;
private int addChild(int x, int childPageId, SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (entryCount > 0 && last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
}
int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1];
SearchRow[] newRows = new SearchRow[entryCount + 1];
int[] newChildPageIds = new int[entryCount + 2];
if (childPageIds != null) {
System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
}
if (entryCount > 0) {
System.arraycopy(offsets, 0, newOffsets, 0, x);
System.arraycopy(rows, 0, newRows, 0, x);
if (x < entryCount) {
System.arraycopy(offsets, x, newOffsets, x + 1, entryCount - x);
System.arraycopy(rows, x, newRows, x + 1, entryCount - x);
System.arraycopy(childPageIds, x, newChildPageIds, x + 1, entryCount - x + 1);
}
}
newOffsets[x] = offset;
newRows[x] = row;
newChildPageIds[x + 1] = childPageId;
offsets = newOffsets;
childPageIds = newChildPageIds;
entryCount++;
return 0;
}
int addRow(Session session, SearchRow row) throws SQLException {
// while (true) {
// int x = find(session, row, false);
// PageBtree page = index.getPage(childPageIds[x]);
// int splitPoint = page.addRow(session, row);
// if (splitPoint == 0) {
// break;
// }
// SearchRow pivot = page.getRow(session, splitPoint - 1);
// PageBtree page2 = page.split(session, splitPoint);
// index.getPageStore().updateRecord(page, true, page.data);
// index.getPageStore().updateRecord(page2, true, page2.data);
// int splitPoint = addChild(x, page2.getPageId(), pivot);
// if (splitPoint != 0) {
// int todoSplitAtLastInsertionPoint;
// return splitPoint / 2;
// }
// index.getPageStore().updateRecord(this, true, data);
// }
// updateRowCount(1);
int addRow(SearchRow row) throws SQLException {
while (true) {
int x = find(row, false);
PageBtree page = index.getPage(childPageIds[x]);
int splitPoint = page.addRow(row);
if (splitPoint == 0) {
break;
}
SearchRow pivot = page.getRow(splitPoint - 1);
PageBtree page2 = page.split(splitPoint);
index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data);
splitPoint = addChild(x, page2.getPageId(), pivot);
if (splitPoint != 0) {
int todoSplitAtLastInsertionPoint;
return splitPoint / 2;
}
index.getPageStore().updateRecord(this, true, data);
}
updateRowCount(1);
return 0;
}
......@@ -128,7 +129,7 @@ class PageBtreeNode extends PageBtree {
}
}
PageBtree split(Session session, int splitPoint) throws SQLException {
PageBtree split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeNode p2 = new PageBtreeNode(index, newPageId, parentPageId, index.getPageStore().createDataPage());
int firstChild = childPageIds[splitPoint];
......@@ -160,15 +161,17 @@ class PageBtreeNode extends PageBtree {
* @param pivot the pivot key
* @param page2 the last child page
*/
void init(PageBtree page1, SearchRow pivot, PageBtree page2) {
entryCount = 1;
childPageIds = new int[] { page1.getPageId(), page2.getPageId() };
rows = new SearchRow[] { pivot };
void init(PageBtree page1, SearchRow pivot, PageBtree page2) throws SQLException {
entryCount = 0;
childPageIds = new int[] { page2.getPageId() };
rows = new SearchRow[0];
offsets = new int[0];
addChild(0, page1.getPageId(), pivot);
check();
}
void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException {
int i = find(cursor.getSession(), first, bigger) + 1;
int i = find(first, bigger) + 1;
if (i > entryCount) {
if (parentPageId == Page.ROOT) {
return;
......@@ -186,12 +189,12 @@ class PageBtreeNode extends PageBtree {
return index.getPage(child).getFirstLeaf();
}
boolean remove(Session session, SearchRow row) throws SQLException {
int at = find(session, row, false);
boolean remove(SearchRow row) throws SQLException {
int at = find(row, false);
// merge is not implemented to allow concurrent usage of btrees
// TODO maybe implement merge
PageBtree page = index.getPage(childPageIds[at]);
boolean empty = page.remove(session, row);
boolean empty = page.remove(row);
updateRowCount(-1);
if (!empty) {
// the first row didn't change - nothing to do
......@@ -276,4 +279,25 @@ class PageBtreeNode extends PageBtree {
childPageIds = newChildPageIds;
}
/**
* Set the cursor to the first row of the next page.
*
* @param cursor the cursor
* @param row the current row
*/
void nextPage(PageBtreeCursor cursor, SearchRow row) throws SQLException {
int i = find(row, true);
if (i > entryCount) {
if (parentPageId == Page.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.nextPage(cursor, getRow(entryCount - 1));
}
PageBtree page = index.getPage(childPageIds[i]);
PageBtreeLeaf leaf = page.getFirstLeaf();
cursor.setCurrent(leaf, 0);
}
}
\ No newline at end of file
......@@ -249,7 +249,7 @@ class PageDataLeaf extends PageData {
return getRowAt(entryCount - 1).getPos();
}
public PageDataLeaf getNextPage() throws SQLException {
PageDataLeaf getNextPage() throws SQLException {
if (parentPageId == Page.ROOT) {
return null;
}
......
......@@ -166,7 +166,7 @@ class PageDataNode extends PageData {
* @param key the last key of the current page
* @return the next leaf page
*/
public PageDataLeaf getNextPage(int key) throws SQLException {
PageDataLeaf getNextPage(int key) throws SQLException {
int i = find(key) + 1;
if (i > entryCount) {
if (parentPageId == Page.ROOT) {
......
......@@ -276,7 +276,8 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
store = null;
int todoWhyNotClose;
// store = null;
int writeRowCount;
}
......
......@@ -46,6 +46,7 @@ import org.h2.util.ObjectUtils;
*/
public class PageStore implements CacheWriter {
// TODO btree index with fixed size values doesn't need offset and so on
// TODO log block allocation
// TODO use free-space bitmap
// TODO block compression: maybe http://en.wikipedia.org/wiki/LZJB
......
......@@ -25,6 +25,7 @@ import org.h2.index.HashIndex;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.index.MultiVersionIndex;
import org.h2.index.PageBtreeIndex;
import org.h2.index.PageScanIndex;
import org.h2.index.RowIndex;
import org.h2.index.ScanIndex;
......@@ -176,7 +177,11 @@ public class TableData extends Table implements RecordReader {
}
Index index;
if (getPersistent() && indexType.getPersistent()) {
index = new BtreeIndex(session, this, indexId, indexName, cols, indexType, headPos);
if (SysProperties.PAGE_STORE) {
index = new PageBtreeIndex(this, indexId, indexName, cols, indexType, headPos);
} else {
index = new BtreeIndex(session, this, indexId, indexName, cols, indexType, headPos);
}
} else {
if (indexType.getHash()) {
index = new HashIndex(this, indexId, indexName, cols, indexType);
......
......@@ -581,4 +581,4 @@ titles headers grew orchestration social razor finder ranging friend intervals
bot jot delicious rife appenders circles spelling cash sky ecm nuxeo poland
opengeospatial sfs symmetric obsolete failing parenthesis unloading refreshed
grails reloading slightly accepting deploying conflicting recovered counters
versus extracts squirrel misdirected rle
versus extracts squirrel misdirected rle looking
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论