提交 85b075ee authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store

上级 805d71bd
......@@ -32,19 +32,34 @@ public class Page {
public static final int TYPE_DATA_NODE = 2;
/**
* An overflow page (the last page: + FLAG_LAST).
* A data overflow page (the last page: + FLAG_LAST).
*/
public static final int TYPE_DATA_OVERFLOW = 3;
/**
* A btree leaf page (without overflow: + FLAG_LAST).
*/
public static final int TYPE_BTREE_LEAF = 4;
/**
* A btree node page (never has overflow pages).
*/
public static final int TYPE_BTREE_NODE = 5;
/**
* A btree overflow page.
*/
public static final int TYPE_BTREE_OVERFLOW = 6;
/**
* A page containing a list of free pages (the last page: + FLAG_LAST).
*/
public static final int TYPE_FREE_LIST = 4;
public static final int TYPE_FREE_LIST = 7;
/**
* A log page.
*/
public static final int TYPE_LOG = 5;
public static final int TYPE_LOG = 8;
/**
* This is a root page.
......
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.value.Value;
/**
* A page that contains index data.
*/
abstract class PageBtree extends Record {
/**
* Indicator that the row count is not known.
*/
static final int UNKNOWN_ROWCOUNT = -1;
/**
* The index.
*/
protected final PageBtreeIndex index;
/**
* The page number of the parent.
*/
protected int parentPageId;
/**
* The data page.
*/
protected final DataPage data;
/**
* The row offsets.
*/
protected int[] offsets;
/**
* The number of entries.
*/
protected int entryCount;
/**
* The start of the data area.
*/
int start;
protected SearchRow[] rows;
PageBtree(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
this.index = index;
this.parentPageId = parentPageId;
this.data = data;
setPos(pageId);
}
/**
* Get the real row count. If required, this will read all child pages.
*
* @return the row count
*/
abstract int getRowCount() throws SQLException;
/**
* Set the stored row count. This will write the page.
*
* @param rowCount the stored row count
*/
abstract void setRowCountStored(int rowCount) throws SQLException;
/**
* Find an entry by key.
*
* @param key the key (may not exist)
* @return the matching or next index
*/
int find(Session session, SearchRow compare, boolean bigger) throws SQLException {
int l = 0, r = entryCount;
while (l < r) {
int i = (l + r) >>> 1;
SearchRow row = (SearchRow) getRow(session, i);
int comp = index.compareRows(row, compare);
if (comp > 0 || (!bigger && comp == 0)) {
r = i;
} else {
l = i + 1;
}
}
return l;
}
/**
* Read the data.
*/
abstract void read() throws SQLException;
/**
* Add a row.
*
* @param row the row
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract int addRow(Session session, SearchRow row) throws SQLException;
/**
* Find the first row.
*/
abstract void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException;
/**
* Get the row at this position.
*
* @param at the index
* @return the row
*/
SearchRow getRow(Session session, int at) throws SQLException {
SearchRow row = rows[at];
if (row == null) {
row = index.readRow(data, offsets[at]);
rows[at] = row;
}
return row;
}
/**
* Split the index page at the given point.
*
* @param splitPoint the index where to split
* @return the new page that contains about half the entries
*/
abstract PageBtree split(Session session, int splitPoint) throws SQLException;
/**
* Change the page id.
*
* @param id the new page id
*/
void setPageId(int id) throws SQLException {
index.getPageStore().removeRecord(getPos());
setPos(id);
remapChildren();
}
int getPageId() {
return getPos();
}
/**
* Get the first child leaf page of a page.
*
* @return the page
*/
abstract PageBtreeLeaf getFirstLeaf() throws SQLException;
/**
* Change the parent page id.
*
* @param id the new parent page id
*/
void setParentPageId(int id) {
this.parentPageId = id;
}
/**
* Update the parent id of all children.
*/
abstract void remapChildren() throws SQLException;
/**
* Remove a row.
*
* @param key the key of the row to remove
* @return true if this page is now empty
*/
abstract boolean remove(Session session, SearchRow row) throws SQLException;
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for the page btree index.
*/
public class PageBtreeCursor implements Cursor {
private final Session session;
private final PageBtreeIndex index;
private final SearchRow last;
private PageBtreeLeaf current;
private int i;
private SearchRow currentSearchRow;
private Row currentRow;
PageBtreeCursor(Session session, PageBtreeIndex index, SearchRow last) {
this.session = session;
this.index = index;
this.last = last;
}
void setCurrent(PageBtreeLeaf current, int i) {
this.current = current;
this.i = i;
}
public Row get() throws SQLException {
if (currentRow == null && currentSearchRow != null) {
currentRow = index.getRow(session, currentSearchRow.getPos());
}
return currentRow;
}
public int getPos() {
return currentSearchRow.getPos();
}
public SearchRow getSearchRow() {
return currentSearchRow;
}
public boolean next() throws SQLException {
// if (i >= current.getEntryCount()) {
// current = current.getNextPage();
// i = 0;
// if (current == null) {
// return false;
// }
// }
// currentSearchRow = current.getRowAt(i);
// if (index.compareRows(currentSearchRow, last) > 0) {
// currentSearchRow = null;
// currentRow = null;
// return false;
// }
// i++;
return true;
}
public boolean previous() throws SQLException {
i--;
int todo;
return true;
}
Session getSession() {
return session;
}
}
差异被折叠。
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
/**
* A leaf page that contains index data.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-8: table id
* </li><li>9-10: entry count
* </li><li>overflow: 11-14: the row key
* </li><li>11-: list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li><li>data
* </li></ul>
*/
class PageBtreeLeaf extends PageBtree {
private static final int KEY_OFFSET_PAIR_LENGTH = 6;
private static final int KEY_OFFSET_PAIR_START = 11;
private boolean written;
PageBtreeLeaf(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
super(index, pageId, parentPageId, data);
start = KEY_OFFSET_PAIR_START;
}
void read() throws SQLException {
data.setPos(4);
data.readByte();
int tableId = data.readInt();
if (tableId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPageId() + " expected table:" + index.getId() +
"got:" + tableId);
}
entryCount = data.readShortInt();
offsets = new int[entryCount];
rows = new SearchRow[entryCount];
for (int i = 0; i < entryCount; i++) {
offsets[i] = data.readShortInt();
}
start = data.length();
}
/**
* Add a row if possible. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row.
*
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
int addRow(Session session, SearchRow row) throws SQLException {
int rowLength = index.getRowSize(data, row);
int pageSize = index.getPageStore().getPageSize();
// TODO currently the order is important
// TODO and can only add at the end
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (entryCount > 0 && last - rowLength < start + KEY_OFFSET_PAIR_LENGTH) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
}
int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1];
SearchRow[] newRows = new SearchRow[entryCount + 1];
int x;
if (entryCount == 0) {
x = 0;
} else {
x = find(session, row, false);
System.arraycopy(offsets, 0, newOffsets, 0, x);
System.arraycopy(rows, 0, newRows, 0, x);
if (x < entryCount) {
System.arraycopy(offsets, x, newOffsets, x + 1, entryCount - x);
System.arraycopy(rows, x, newRows, x + 1, entryCount - x);
}
}
entryCount++;
start += KEY_OFFSET_PAIR_LENGTH;
newOffsets[x] = offset;
newRows[x] = row;
offsets = newOffsets;
rows = newRows;
index.getPageStore().updateRecord(this, true, data);
if (offset < start) {
if (entryCount > 1) {
Message.throwInternalError();
}
// need to write the overflow page id
start += 4;
int remaining = rowLength - (pageSize - start);
// fix offset
offset = start;
offsets[x] = offset;
}
return 0;
}
private void removeRow(int i) throws SQLException {
entryCount--;
if (entryCount <= 0) {
Message.throwInternalError();
}
int[] newOffsets = new int[entryCount];
int[] newKeys = new int[entryCount];
Row[] newRows = new Row[entryCount];
System.arraycopy(offsets, 0, newOffsets, 0, i);
System.arraycopy(rows, 0, newRows, 0, i);
System.arraycopy(offsets, i + 1, newOffsets, i, entryCount - i);
System.arraycopy(rows, i + 1, newRows, i, entryCount - i);
start -= KEY_OFFSET_PAIR_LENGTH;
offsets = newOffsets;
rows = newRows;
}
int getEntryCount() {
return entryCount;
}
PageBtree split(Session session, int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeLeaf p2 = new PageBtreeLeaf(index, newPageId, parentPageId, index.getPageStore().createDataPage());
for (int i = splitPoint; i < entryCount;) {
p2.addRow(session, getRow(session, splitPoint));
removeRow(splitPoint);
}
return p2;
}
PageBtreeLeaf getFirstLeaf() {
return this;
}
boolean remove(Session session, SearchRow row) throws SQLException {
int at = find(session, row, false);
if (index.compareRows(row, getRow(session, at)) != 0) {
throw Message.getSQLException(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, index.getSQL() + ": " + row);
}
if (entryCount == 1) {
return true;
}
removeRow(at);
index.getPageStore().updateRecord(this, true, data);
return false;
}
int getRowCount() throws SQLException {
return entryCount;
}
void setRowCountStored(int rowCount) throws SQLException {
// ignore
}
public int getByteCount(DataPage dummy) throws SQLException {
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
write();
index.getPageStore().writePage(getPos(), data);
}
PageStore getPageStore() {
return index.getPageStore();
}
private void write() throws SQLException {
// if (written) {
// return;
// }
// // make sure rows are read
// for (int i = 0; i < entryCount; i++) {
// getRowAt(i);
// }
// data.reset();
// data.writeInt(parentPageId);
// int type;
// if (overflowKey == 0) {
// type = Page.TYPE_BTREE_LEAF | Page.FLAG_LAST;
// } else {
// type = Page.TYPE_BTREE_LEAF;
// }
// data.writeByte((byte) type);
// data.writeInt(index.getId());
// data.writeShortInt(entryCount);
// if (overflowKey != 0) {
// data.writeInt(overflowKey);
// }
// for (int i = 0; i < entryCount; i++) {
// data.writeInt(keys[i]);
// data.writeShortInt(offsets[i]);
// }
// for (int i = 0; i < entryCount; i++) {
// data.setPos(offsets[i]);
// rows[i].write(data);
// }
// written = true;
}
DataPage getDataPage() throws SQLException {
write();
return data;
}
void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException {
int todo;
}
void remapChildren() throws SQLException {
int todo;
}
}
差异被折叠。
......@@ -26,6 +26,10 @@ import org.h2.store.DataPage;
*/
class PageDataNode extends PageData {
private final static int ENTRY_START = 15;
private final static int ENTRY_LENGTH = 8;
/**
* The page ids of the children.
*/
......@@ -86,7 +90,7 @@ class PageDataNode extends PageData {
index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data);
addChild(x, page2.getPageId(), pivot);
int maxEntries = (index.getPageStore().getPageSize() - 15) / 8;
int maxEntries = (index.getPageStore().getPageSize() - ENTRY_START) / ENTRY_LENGTH;
if (entryCount >= maxEntries) {
int todoSplitAtLastInsertionPoint;
return entryCount / 2;
......
......@@ -155,13 +155,6 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
return false;
}
public void close(Session session) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
int writeRowCount;
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
PageData root = getPage(headPos);
return root.find();
......@@ -279,4 +272,12 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
return -1;
}
public void close(Session session) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
store = null;
int writeRowCount;
}
}
......@@ -46,12 +46,17 @@ import org.h2.util.ObjectUtils;
*/
public class PageStore implements CacheWriter {
// TODO log block allocation
// TODO use free-space bitmap
// TODO block compression: maybe http://en.wikipedia.org/wiki/LZJB
// with RLE, specially for 0s.
// TODO test that setPageId updates parent, overflow parent
// TODO order pages so that searching for a key
// doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance)
// TODO checksum: 0 for empty; position hash + every 128th byte,
// specially important for log
// specially important for log; misdirected reads or writes
// TODO type, sequence (start with random); checksum (start with block id)
// TODO for lists: write sequence byte
// TODO completely re-use keys of deleted rows; maybe
// remember last page with deleted keys (in the root page?),
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论