提交 85b075ee authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store

上级 805d71bd
...@@ -32,19 +32,34 @@ public class Page { ...@@ -32,19 +32,34 @@ public class Page {
public static final int TYPE_DATA_NODE = 2; public static final int TYPE_DATA_NODE = 2;
/** /**
* An overflow page (the last page: + FLAG_LAST). * A data overflow page (the last page: + FLAG_LAST).
*/ */
public static final int TYPE_DATA_OVERFLOW = 3; public static final int TYPE_DATA_OVERFLOW = 3;
/**
* A btree leaf page (without overflow: + FLAG_LAST).
*/
public static final int TYPE_BTREE_LEAF = 4;
/**
* A btree node page (never has overflow pages).
*/
public static final int TYPE_BTREE_NODE = 5;
/**
* A btree overflow page.
*/
public static final int TYPE_BTREE_OVERFLOW = 6;
/** /**
* A page containing a list of free pages (the last page: + FLAG_LAST). * A page containing a list of free pages (the last page: + FLAG_LAST).
*/ */
public static final int TYPE_FREE_LIST = 4; public static final int TYPE_FREE_LIST = 7;
/** /**
* A log page. * A log page.
*/ */
public static final int TYPE_LOG = 5; public static final int TYPE_LOG = 8;
/** /**
* This is a root page. * This is a root page.
......
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.value.Value;
/**
* A page that contains index data.
*/
abstract class PageBtree extends Record {
/**
* Indicator that the row count is not known.
*/
static final int UNKNOWN_ROWCOUNT = -1;
/**
* The index.
*/
protected final PageBtreeIndex index;
/**
* The page number of the parent.
*/
protected int parentPageId;
/**
* The data page.
*/
protected final DataPage data;
/**
* The row offsets.
*/
protected int[] offsets;
/**
* The number of entries.
*/
protected int entryCount;
/**
* The start of the data area.
*/
int start;
protected SearchRow[] rows;
PageBtree(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
this.index = index;
this.parentPageId = parentPageId;
this.data = data;
setPos(pageId);
}
/**
* Get the real row count. If required, this will read all child pages.
*
* @return the row count
*/
abstract int getRowCount() throws SQLException;
/**
* Set the stored row count. This will write the page.
*
* @param rowCount the stored row count
*/
abstract void setRowCountStored(int rowCount) throws SQLException;
/**
* Find an entry by key.
*
* @param key the key (may not exist)
* @return the matching or next index
*/
int find(Session session, SearchRow compare, boolean bigger) throws SQLException {
int l = 0, r = entryCount;
while (l < r) {
int i = (l + r) >>> 1;
SearchRow row = (SearchRow) getRow(session, i);
int comp = index.compareRows(row, compare);
if (comp > 0 || (!bigger && comp == 0)) {
r = i;
} else {
l = i + 1;
}
}
return l;
}
/**
* Read the data.
*/
abstract void read() throws SQLException;
/**
* Add a row.
*
* @param row the row
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract int addRow(Session session, SearchRow row) throws SQLException;
/**
* Find the first row.
*/
abstract void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException;
/**
* Get the row at this position.
*
* @param at the index
* @return the row
*/
SearchRow getRow(Session session, int at) throws SQLException {
SearchRow row = rows[at];
if (row == null) {
row = index.readRow(data, offsets[at]);
rows[at] = row;
}
return row;
}
/**
* Split the index page at the given point.
*
* @param splitPoint the index where to split
* @return the new page that contains about half the entries
*/
abstract PageBtree split(Session session, int splitPoint) throws SQLException;
/**
* Change the page id.
*
* @param id the new page id
*/
void setPageId(int id) throws SQLException {
index.getPageStore().removeRecord(getPos());
setPos(id);
remapChildren();
}
int getPageId() {
return getPos();
}
/**
* Get the first child leaf page of a page.
*
* @return the page
*/
abstract PageBtreeLeaf getFirstLeaf() throws SQLException;
/**
* Change the parent page id.
*
* @param id the new parent page id
*/
void setParentPageId(int id) {
this.parentPageId = id;
}
/**
* Update the parent id of all children.
*/
abstract void remapChildren() throws SQLException;
/**
* Remove a row.
*
* @param key the key of the row to remove
* @return true if this page is now empty
*/
abstract boolean remove(Session session, SearchRow row) throws SQLException;
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for the page btree index.
*/
public class PageBtreeCursor implements Cursor {
private final Session session;
private final PageBtreeIndex index;
private final SearchRow last;
private PageBtreeLeaf current;
private int i;
private SearchRow currentSearchRow;
private Row currentRow;
PageBtreeCursor(Session session, PageBtreeIndex index, SearchRow last) {
this.session = session;
this.index = index;
this.last = last;
}
void setCurrent(PageBtreeLeaf current, int i) {
this.current = current;
this.i = i;
}
public Row get() throws SQLException {
if (currentRow == null && currentSearchRow != null) {
currentRow = index.getRow(session, currentSearchRow.getPos());
}
return currentRow;
}
public int getPos() {
return currentSearchRow.getPos();
}
public SearchRow getSearchRow() {
return currentSearchRow;
}
public boolean next() throws SQLException {
// if (i >= current.getEntryCount()) {
// current = current.getNextPage();
// i = 0;
// if (current == null) {
// return false;
// }
// }
// currentSearchRow = current.getRowAt(i);
// if (index.compareRows(currentSearchRow, last) > 0) {
// currentSearchRow = null;
// currentRow = null;
// return false;
// }
// i++;
return true;
}
public boolean previous() throws SQLException {
i--;
int todo;
return true;
}
Session getSession() {
return session;
}
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
import org.h2.value.Value;
import org.h2.value.ValueLob;
/**
* This is the most common type of index, a b tree index.
* Only the data of the indexed columns are stored in the index.
*/
public class PageBtreeIndex extends BaseIndex {
private PageStore store;
private TableData tableData;
private int headPos;
private long rowCount;
public PageBtreeIndex(TableData table, int id, String indexName, IndexColumn[] columns,
IndexType indexType, int headPos) throws SQLException {
initBaseIndex(table, id, indexName, columns, indexType);
// trace.setLevel(TraceSystem.DEBUG);
if (database.isMultiVersion()) {
int todoMvcc;
}
tableData = table;
if (!database.isPersistent() || id < 0) {
int todo;
return;
}
this.store = database.getPageStore();
if (headPos == Index.EMPTY_HEAD) {
// new table
headPos = store.allocatePage();
PageBtreeLeaf root = new PageBtreeLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, true, root.data);
} else if (store.isNew()) {
// the system table for a new database
PageBtreeLeaf root = new PageBtreeLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, true, root.data);
} else {
rowCount = getPage(headPos).getRowCount();
int reuseKeysIfManyDeleted;
}
this.headPos = headPos;
if (trace.isDebugEnabled()) {
trace.debug("open " + rowCount);
}
}
public int getHeadPos() {
return headPos;
}
public void add(Session session, Row row) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("add " + row.getPos());
}
if (tableData.getContainsLargeObject()) {
for (int i = 0; i < row.getColumnCount(); i++) {
Value v = row.getValue(i);
Value v2 = v.link(database, getId());
if (v2.isLinked()) {
session.unlinkAtCommitStop(v2);
}
if (v != v2) {
row.setValue(i, v2);
}
}
}
while (true) {
PageBtree root = getPage(headPos);
int splitPoint = root.addRow(session, row);
if (splitPoint == 0) {
break;
}
if (trace.isDebugEnabled()) {
trace.debug("split " + splitPoint);
}
SearchRow pivot = root.getRow(session, splitPoint - 1);
PageBtree page1 = root;
PageBtree page2 = root.split(session, splitPoint);
int rootPageId = root.getPageId();
int id = store.allocatePage();
page1.setPageId(id);
page1.setParentPageId(headPos);
page2.setParentPageId(headPos);
PageBtreeNode newRoot = new PageBtreeNode(this, rootPageId, Page.ROOT, store.createDataPage());
newRoot.init(page1, pivot, page2);
store.updateRecord(page1, true, page1.data);
store.updateRecord(page2, true, page2.data);
store.updateRecord(newRoot, true, null);
root = newRoot;
}
rowCount++;
store.logAddOrRemoveRow(session, tableData.getId(), row, true);
}
/**
* Read the given page.
*
* @param id the page id
* @return the page
*/
PageBtree getPage(int id) throws SQLException {
Record rec = store.getRecord(id);
if (rec != null) {
return (PageBtree) rec;
}
DataPage data = store.readPage(id);
data.reset();
int parentPageId = data.readInt();
int type = data.readByte() & 255;
PageBtree result;
switch (type & ~Page.FLAG_LAST) {
case Page.TYPE_BTREE_LEAF:
result = new PageBtreeLeaf(this, id, parentPageId, data);
break;
case Page.TYPE_BTREE_NODE:
result = new PageBtreeNode(this, id, parentPageId, data);
break;
case Page.TYPE_EMPTY:
PageBtreeLeaf empty = new PageBtreeLeaf(this, id, parentPageId, data);
return empty;
default:
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, "page=" + id + " type=" + type);
}
result.read();
return result;
}
public boolean canGetFirstOrLast() {
return false;
}
public Cursor findNext(Session session, SearchRow first, SearchRow last) throws SQLException {
return find(session, first, true, last);
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
return find(session, first, false, last);
}
private Cursor find(Session session, SearchRow first, boolean bigger, SearchRow last) throws SQLException {
if (SysProperties.CHECK && store == null) {
throw Message.getSQLException(ErrorCode.OBJECT_CLOSED);
}
PageBtree root = getPage(headPos);
PageBtreeCursor cursor = new PageBtreeCursor(session, this, last);
root.find(cursor, first, bigger);
return cursor;
}
public Cursor findFirstOrLast(Session session, boolean first) throws SQLException {
throw Message.getUnsupportedException();
}
public double getCost(Session session, int[] masks) throws SQLException {
long cost = 10 * (tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET);
return cost;
}
public boolean needRebuild() {
return false;
}
public void remove(Session session, Row row) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("remove " + row.getPos());
}
if (tableData.getContainsLargeObject()) {
for (int i = 0; i < row.getColumnCount(); i++) {
Value v = row.getValue(i);
if (v.isLinked()) {
session.unlinkAtCommit((ValueLob) v);
}
}
}
int invalidateRowCount;
// setChanged(session);
if (rowCount == 1) {
int todoMaybeImprove;
removeAllRows();
} else {
PageBtree root = getPage(headPos);
root.remove(session, row);
rowCount--;
int todoReuseKeys;
}
store.logAddOrRemoveRow(session, tableData.getId(), row, false);
}
public void remove(Session session) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("remove");
}
int todo;
}
public void truncate(Session session) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("truncate");
}
removeAllRows();
if (tableData.getContainsLargeObject() && tableData.getPersistent()) {
ValueLob.removeAllForTable(database, table.getId());
}
tableData.setRowCount(0);
}
private void removeAllRows() throws SQLException {
store.removeRecord(headPos);
int todoLogOldData;
int freePages;
PageBtreeLeaf root = new PageBtreeLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, true, null);
rowCount = 0;
}
public void checkRename() throws SQLException {
throw Message.getUnsupportedException();
}
/**
* Get a row from the data file.
*
* @param session the session
* @param key the row key
* @return the row
*/
public Row getRow(Session session, int key) throws SQLException {
return tableData.getRow(session, key);
}
PageStore getPageStore() {
return store;
}
/**
* Read a row from the data page at the given position.
*
* @param data the data page
* @return the row
*/
Row readRow(DataPage data) throws SQLException {
return tableData.readRow(data);
}
public long getRowCountApproximation() {
return rowCount;
}
public long getRowCount(Session session) {
return rowCount;
}
public void close(Session session) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
store = null;
int writeRowCount;
}
SearchRow readRow(DataPage data, int offset) throws SQLException {
data.setPos(offset);
SearchRow row = table.getTemplateSimpleRow(columns.length == 1);
row.setPos(data.readInt());
for (int i = 0; i < columns.length; i++) {
int idx = columns[i].getColumnId();
row.setValue(idx, data.readValue());
}
return row;
}
/**
* Get the size of a row (only the part that is stored in the index).
*
* @param dummy a dummy data page to calculate the size
* @param row the row
* @return the number of bytes
*/
int getRowSize(DataPage dummy, SearchRow row) throws SQLException {
int rowsize = DataPage.LENGTH_INT;
for (int j = 0; j < columns.length; j++) {
Value v = row.getValue(columns[j].getColumnId());
rowsize += dummy.getValueLen(v);
}
return rowsize;
}
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
/**
* A leaf page that contains index data.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-8: table id
* </li><li>9-10: entry count
* </li><li>overflow: 11-14: the row key
* </li><li>11-: list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li><li>data
* </li></ul>
*/
class PageBtreeLeaf extends PageBtree {
private static final int KEY_OFFSET_PAIR_LENGTH = 6;
private static final int KEY_OFFSET_PAIR_START = 11;
private boolean written;
PageBtreeLeaf(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
super(index, pageId, parentPageId, data);
start = KEY_OFFSET_PAIR_START;
}
void read() throws SQLException {
data.setPos(4);
data.readByte();
int tableId = data.readInt();
if (tableId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPageId() + " expected table:" + index.getId() +
"got:" + tableId);
}
entryCount = data.readShortInt();
offsets = new int[entryCount];
rows = new SearchRow[entryCount];
for (int i = 0; i < entryCount; i++) {
offsets[i] = data.readShortInt();
}
start = data.length();
}
/**
* Add a row if possible. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row.
*
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
int addRow(Session session, SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row);
int pageSize = index.getPageStore().getPageSize();
// TODO currently the order is important
// TODO and can only add at the end
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (entryCount > 0 && last - rowLength < start + KEY_OFFSET_PAIR_LENGTH) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
}
int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1];
SearchRow[] newRows = new SearchRow[entryCount + 1];
int x;
if (entryCount == 0) {
x = 0;
} else {
x = find(session, row, false);
System.arraycopy(offsets, 0, newOffsets, 0, x);
System.arraycopy(rows, 0, newRows, 0, x);
if (x < entryCount) {
System.arraycopy(offsets, x, newOffsets, x + 1, entryCount - x);
System.arraycopy(rows, x, newRows, x + 1, entryCount - x);
}
}
entryCount++;
start += KEY_OFFSET_PAIR_LENGTH;
newOffsets[x] = offset;
newRows[x] = row;
offsets = newOffsets;
rows = newRows;
index.getPageStore().updateRecord(this, true, data);
if (offset < start) {
if (entryCount > 1) {
Message.throwInternalError();
}
// need to write the overflow page id
start += 4;
int remaining = rowLength - (pageSize - start);
// fix offset
offset = start;
offsets[x] = offset;
}
return 0;
}
private void removeRow(int i) throws SQLException {
entryCount--;
if (entryCount <= 0) {
Message.throwInternalError();
}
int[] newOffsets = new int[entryCount];
int[] newKeys = new int[entryCount];
Row[] newRows = new Row[entryCount];
System.arraycopy(offsets, 0, newOffsets, 0, i);
System.arraycopy(rows, 0, newRows, 0, i);
System.arraycopy(offsets, i + 1, newOffsets, i, entryCount - i);
System.arraycopy(rows, i + 1, newRows, i, entryCount - i);
start -= KEY_OFFSET_PAIR_LENGTH;
offsets = newOffsets;
rows = newRows;
}
int getEntryCount() {
return entryCount;
}
PageBtree split(Session session, int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeLeaf p2 = new PageBtreeLeaf(index, newPageId, parentPageId, index.getPageStore().createDataPage());
for (int i = splitPoint; i < entryCount;) {
p2.addRow(session, getRow(session, splitPoint));
removeRow(splitPoint);
}
return p2;
}
PageBtreeLeaf getFirstLeaf() {
return this;
}
boolean remove(Session session, SearchRow row) throws SQLException {
int at = find(session, row, false);
if (index.compareRows(row, getRow(session, at)) != 0) {
throw Message.getSQLException(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, index.getSQL() + ": " + row);
}
if (entryCount == 1) {
return true;
}
removeRow(at);
index.getPageStore().updateRecord(this, true, data);
return false;
}
int getRowCount() throws SQLException {
return entryCount;
}
void setRowCountStored(int rowCount) throws SQLException {
// ignore
}
public int getByteCount(DataPage dummy) throws SQLException {
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
write();
index.getPageStore().writePage(getPos(), data);
}
PageStore getPageStore() {
return index.getPageStore();
}
private void write() throws SQLException {
// if (written) {
// return;
// }
// // make sure rows are read
// for (int i = 0; i < entryCount; i++) {
// getRowAt(i);
// }
// data.reset();
// data.writeInt(parentPageId);
// int type;
// if (overflowKey == 0) {
// type = Page.TYPE_BTREE_LEAF | Page.FLAG_LAST;
// } else {
// type = Page.TYPE_BTREE_LEAF;
// }
// data.writeByte((byte) type);
// data.writeInt(index.getId());
// data.writeShortInt(entryCount);
// if (overflowKey != 0) {
// data.writeInt(overflowKey);
// }
// for (int i = 0; i < entryCount; i++) {
// data.writeInt(keys[i]);
// data.writeShortInt(offsets[i]);
// }
// for (int i = 0; i < entryCount; i++) {
// data.setPos(offsets[i]);
// rows[i].write(data);
// }
// written = true;
}
DataPage getDataPage() throws SQLException {
write();
return data;
}
void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException {
int todo;
}
void remapChildren() throws SQLException {
int todo;
}
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
/**
* A leaf page that contains index data.
* Format:
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
* </li><li>5-6: entry count
* </li><li>7-10: row count of all children (-1 if not known)
* </li><li>11-14: rightmost child page id
* </li><li>15- entries: 4 bytes leaf page id, 4 bytes key
* </li></ul>
*/
class PageBtreeNode extends PageBtree {
/**
* The page ids of the children.
*/
private int[] childPageIds;
private int rowCountStored = UNKNOWN_ROWCOUNT;
private int rowCount = UNKNOWN_ROWCOUNT;
PageBtreeNode(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
super(index, pageId, parentPageId, data);
}
void read() {
data.setPos(5);
entryCount = data.readShortInt();
rowCount = rowCountStored = data.readInt();
childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt();
rows = new SearchRow[entryCount];
offsets = new int[entryCount];
for (int i = 0; i < entryCount; i++) {
childPageIds[i] = data.readInt();
offsets[i] = data.readInt();
}
check();
}
/**
* Add a row if possible. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row.
*
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
private int addChild(int x, int childPageId, SearchRow row) {
// int rowLength = index.getRowSize(data, row);
//
// int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
// if (entryCount > 0 && last - rowLength < start + KEY_OFFSET_PAIR_LENGTH) {
// int todoSplitAtLastInsertionPoint;
// return (entryCount / 2) + 1;
// }
//
// int[] newOffsets = new int[entryCount + 1];
// SearchRow[] newRows = new SearchRow[entryCount + 1];
// int[] newChildPageIds = new int[entryCount + 2];
// if (childPageIds != null) {
// System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
// }
// if (entryCount > 0) {
// System.arraycopy(offsets, 0, newOffsets, 0, x);
// System.arraycopy(rows, 0, newRows, 0, x);
// if (x < entryCount) {
// System.arraycopy(offsets, x, newOffsets, x + 1, entryCount - x);
// System.arraycopy(rows, x, newRows, x + 1, entryCount - x);
// System.arraycopy(childPageIds, x, newChildPageIds, x + 1, entryCount - x + 1);
// }
// }
// newOffsets[x] = offset;
// newRows[x] = row;
// newChildPageIds[x + 1] = childPageId;
// offsets = newOffsets;
// childPageIds = newChildPageIds;
// entryCount++;
return 0;
}
int addRow(Session session, SearchRow row) throws SQLException {
// while (true) {
// int x = find(session, row, false);
// PageBtree page = index.getPage(childPageIds[x]);
// int splitPoint = page.addRow(session, row);
// if (splitPoint == 0) {
// break;
// }
// SearchRow pivot = page.getRow(session, splitPoint - 1);
// PageBtree page2 = page.split(session, splitPoint);
// index.getPageStore().updateRecord(page, true, page.data);
// index.getPageStore().updateRecord(page2, true, page2.data);
// int splitPoint = addChild(x, page2.getPageId(), pivot);
// if (splitPoint != 0) {
// int todoSplitAtLastInsertionPoint;
// return splitPoint / 2;
// }
// index.getPageStore().updateRecord(this, true, data);
// }
// updateRowCount(1);
return 0;
}
private void updateRowCount(int offset) throws SQLException {
if (rowCount != UNKNOWN_ROWCOUNT) {
rowCount += offset;
}
if (rowCountStored != UNKNOWN_ROWCOUNT) {
rowCountStored = UNKNOWN_ROWCOUNT;
index.getPageStore().updateRecord(this, true, data);
}
}
PageBtree split(Session session, int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeNode p2 = new PageBtreeNode(index, newPageId, parentPageId, index.getPageStore().createDataPage());
int firstChild = childPageIds[splitPoint];
for (int i = splitPoint; i < entryCount;) {
p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], rows[splitPoint]);
removeChild(splitPoint);
}
int lastChild = childPageIds[splitPoint - 1];
removeChild(splitPoint - 1);
childPageIds[splitPoint - 1] = lastChild;
p2.childPageIds[0] = firstChild;
p2.remapChildren();
return p2;
}
protected void remapChildren() throws SQLException {
for (int i = 0; i < childPageIds.length; i++) {
int child = childPageIds[i];
PageBtree p = index.getPage(child);
p.setParentPageId(getPos());
index.getPageStore().updateRecord(p, true, p.data);
}
}
/**
* Initialize the page.
*
* @param page1 the first child page
* @param pivot the pivot key
* @param page2 the last child page
*/
void init(PageBtree page1, SearchRow pivot, PageBtree page2) {
entryCount = 1;
childPageIds = new int[] { page1.getPageId(), page2.getPageId() };
rows = new SearchRow[] { pivot };
check();
}
void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException {
int i = find(cursor.getSession(), first, bigger) + 1;
if (i > entryCount) {
if (parentPageId == Page.ROOT) {
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.find(cursor, first, bigger);
return;
}
PageBtree page = index.getPage(childPageIds[i]);
page.find(cursor, first, bigger);
}
PageBtreeLeaf getFirstLeaf() throws SQLException {
int child = childPageIds[0];
return index.getPage(child).getFirstLeaf();
}
boolean remove(Session session, SearchRow row) throws SQLException {
int at = find(session, row, false);
// merge is not implemented to allow concurrent usage of btrees
// TODO maybe implement merge
PageBtree page = index.getPage(childPageIds[at]);
boolean empty = page.remove(session, row);
updateRowCount(-1);
if (!empty) {
// the first row didn't change - nothing to do
return false;
}
// this child is now empty
index.getPageStore().freePage(page.getPageId());
if (entryCount < 1) {
// no more children - this page is empty as well
return true;
}
removeChild(at);
index.getPageStore().updateRecord(this, true, data);
return false;
}
int getRowCount() throws SQLException {
if (rowCount == UNKNOWN_ROWCOUNT) {
int count = 0;
for (int i = 0; i < childPageIds.length; i++) {
PageBtree page = index.getPage(childPageIds[i]);
count += page.getRowCount();
}
rowCount = count;
}
return rowCount;
}
void setRowCountStored(int rowCount) throws SQLException {
this.rowCount = rowCount;
if (rowCountStored != rowCount) {
rowCountStored = rowCount;
index.getPageStore().updateRecord(this, true, data);
}
}
private void check() {
for (int i = 0; i < childPageIds.length; i++) {
if (childPageIds[i] == 0) {
Message.throwInternalError();
}
}
}
public int getByteCount(DataPage dummy) throws SQLException {
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
check();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_BTREE_NODE);
data.writeShortInt(entryCount);
data.writeInt(rowCountStored);
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
data.writeInt(offsets[i]);
}
index.getPageStore().writePage(getPos(), data);
}
private void removeChild(int i) throws SQLException {
entryCount--;
if (entryCount < 0) {
Message.throwInternalError();
}
int[] newOffsets = new int[entryCount];
SearchRow[] newRows = new SearchRow[entryCount + 1];
int[] newChildPageIds = new int[entryCount + 1];
System.arraycopy(offsets, 0, newOffsets, 0, Math.min(entryCount, i));
System.arraycopy(rows, 0, newRows, 0, Math.min(entryCount, i));
System.arraycopy(childPageIds, 0, newChildPageIds, 0, i);
if (entryCount > i) {
System.arraycopy(offsets, i + 1, newOffsets, i, entryCount - i);
System.arraycopy(rows, i + 1, newRows, i, entryCount - i);
}
System.arraycopy(childPageIds, i + 1, newChildPageIds, i, entryCount - i + 1);
offsets = newOffsets;
rows = newRows;
childPageIds = newChildPageIds;
}
}
\ No newline at end of file
...@@ -26,6 +26,10 @@ import org.h2.store.DataPage; ...@@ -26,6 +26,10 @@ import org.h2.store.DataPage;
*/ */
class PageDataNode extends PageData { class PageDataNode extends PageData {
private final static int ENTRY_START = 15;
private final static int ENTRY_LENGTH = 8;
/** /**
* The page ids of the children. * The page ids of the children.
*/ */
...@@ -86,7 +90,7 @@ class PageDataNode extends PageData { ...@@ -86,7 +90,7 @@ class PageDataNode extends PageData {
index.getPageStore().updateRecord(page, true, page.data); index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data); index.getPageStore().updateRecord(page2, true, page2.data);
addChild(x, page2.getPageId(), pivot); addChild(x, page2.getPageId(), pivot);
int maxEntries = (index.getPageStore().getPageSize() - 15) / 8; int maxEntries = (index.getPageStore().getPageSize() - ENTRY_START) / ENTRY_LENGTH;
if (entryCount >= maxEntries) { if (entryCount >= maxEntries) {
int todoSplitAtLastInsertionPoint; int todoSplitAtLastInsertionPoint;
return entryCount / 2; return entryCount / 2;
......
...@@ -155,13 +155,6 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -155,13 +155,6 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
return false; return false;
} }
public void close(Session session) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
int writeRowCount;
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException { public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
PageData root = getPage(headPos); PageData root = getPage(headPos);
return root.find(); return root.find();
...@@ -279,4 +272,12 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -279,4 +272,12 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
return -1; return -1;
} }
public void close(Session session) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
store = null;
int writeRowCount;
}
} }
...@@ -46,12 +46,17 @@ import org.h2.util.ObjectUtils; ...@@ -46,12 +46,17 @@ import org.h2.util.ObjectUtils;
*/ */
public class PageStore implements CacheWriter { public class PageStore implements CacheWriter {
// TODO log block allocation
// TODO use free-space bitmap
// TODO block compression: maybe http://en.wikipedia.org/wiki/LZJB
// with RLE, specially for 0s.
// TODO test that setPageId updates parent, overflow parent // TODO test that setPageId updates parent, overflow parent
// TODO order pages so that searching for a key // TODO order pages so that searching for a key
// doesn't seek backwards in the file // doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance) // TODO use an undo log and maybe redo log (for performance)
// TODO checksum: 0 for empty; position hash + every 128th byte, // TODO checksum: 0 for empty; position hash + every 128th byte,
// specially important for log // specially important for log; misdirected reads or writes
// TODO type, sequence (start with random); checksum (start with block id)
// TODO for lists: write sequence byte // TODO for lists: write sequence byte
// TODO completely re-use keys of deleted rows; maybe // TODO completely re-use keys of deleted rows; maybe
// remember last page with deleted keys (in the root page?), // remember last page with deleted keys (in the root page?),
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论