提交 3c8794df authored 作者: Thomas Mueller's avatar Thomas Mueller

new experimental page store

上级 d98c7905
...@@ -14,6 +14,7 @@ import org.h2.engine.Database; ...@@ -14,6 +14,7 @@ import org.h2.engine.Database;
import org.h2.engine.Session; import org.h2.engine.Session;
import org.h2.expression.Expression; import org.h2.expression.Expression;
import org.h2.expression.Parameter; import org.h2.expression.Parameter;
import org.h2.index.Index;
import org.h2.message.Message; import org.h2.message.Message;
import org.h2.result.LocalResult; import org.h2.result.LocalResult;
import org.h2.util.ObjectArray; import org.h2.util.ObjectArray;
...@@ -37,7 +38,7 @@ public abstract class Prepared { ...@@ -37,7 +38,7 @@ public abstract class Prepared {
/** /**
* The position of the head record (used for indexes). * The position of the head record (used for indexes).
*/ */
protected int headPos = -1; protected int headPos = Index.EMPTY_HEAD;
/** /**
* The list of parameters. * The list of parameters.
......
/* /*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License, * Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0 * Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html). * (http://h2database.com/html/license.html).
* Initial Developer: H2 Group * Initial Developer: H2 Group
...@@ -15,7 +15,6 @@ import org.h2.constant.ErrorCode; ...@@ -15,7 +15,6 @@ import org.h2.constant.ErrorCode;
import org.h2.engine.Database; import org.h2.engine.Database;
import org.h2.engine.Session; import org.h2.engine.Session;
import org.h2.expression.Expression; import org.h2.expression.Expression;
import org.h2.index.Index;
import org.h2.message.Message; import org.h2.message.Message;
import org.h2.schema.Schema; import org.h2.schema.Schema;
import org.h2.schema.Sequence; import org.h2.schema.Sequence;
...@@ -63,7 +62,7 @@ public class CreateTable extends SchemaCommand { ...@@ -63,7 +62,7 @@ public class CreateTable extends SchemaCommand {
/** /**
* Add a column to this table. * Add a column to this table.
* *
* @param column the column to add * @param column the column to add
*/ */
public void addColumn(Column column) { public void addColumn(Column column) {
...@@ -76,7 +75,7 @@ public class CreateTable extends SchemaCommand { ...@@ -76,7 +75,7 @@ public class CreateTable extends SchemaCommand {
/** /**
* Add a constraint statement to this statement. * Add a constraint statement to this statement.
* The primary key definition is one possible constraint statement. * The primary key definition is one possible constraint statement.
* *
* @param command the statement to add * @param command the statement to add
*/ */
public void addConstraintCommand(Prepared command) throws SQLException { public void addConstraintCommand(Prepared command) throws SQLException {
...@@ -145,7 +144,7 @@ public class CreateTable extends SchemaCommand { ...@@ -145,7 +144,7 @@ public class CreateTable extends SchemaCommand {
} }
} }
int id = getObjectId(true, true); int id = getObjectId(true, true);
TableData table = getSchema().createTable(tableName, id, columns, persistent, clustered, Index.EMPTY_HEAD); TableData table = getSchema().createTable(tableName, id, columns, persistent, clustered, headPos);
table.setComment(comment); table.setComment(comment);
table.setTemporary(temporary); table.setTemporary(temporary);
table.setGlobalTemporary(globalTemporary); table.setGlobalTemporary(globalTemporary);
...@@ -219,7 +218,7 @@ public class CreateTable extends SchemaCommand { ...@@ -219,7 +218,7 @@ public class CreateTable extends SchemaCommand {
} }
/** /**
* Sets the primary key columns, but also check if a primary key * Sets the primary key columns, but also check if a primary key
* with different columns is already defined. * with different columns is already defined.
* *
* @param columns the primary key columns * @param columns the primary key columns
......
...@@ -1192,6 +1192,10 @@ public class Database implements DataHandler { ...@@ -1192,6 +1192,10 @@ public class Database implements DataHandler {
fileIndex.close(); fileIndex.close();
fileIndex = null; fileIndex = null;
} }
if (pageStore != null) {
pageStore.close();
pageStore = null;
}
} catch (SQLException e) { } catch (SQLException e) {
traceSystem.getTrace(Trace.DATABASE).error("close", e); traceSystem.getTrace(Trace.DATABASE).error("close", e);
} }
......
...@@ -11,31 +11,32 @@ import java.sql.SQLException; ...@@ -11,31 +11,32 @@ import java.sql.SQLException;
import org.h2.engine.Session; import org.h2.engine.Session;
import org.h2.result.Row; import org.h2.result.Row;
import org.h2.store.DataPageBinary; import org.h2.store.DataPageBinary;
import org.h2.store.Record;
/** /**
* A page that contains data rows. * A page that contains data rows.
*/ */
abstract class PageData { abstract class PageData extends Record {
/** /**
* The index. * Indicator that the row count is not known.
*/ */
protected final PageScanIndex index; static final int UNKNOWN_ROWCOUNT = -1;
/** /**
* The data page. * The index.
*/ */
protected final DataPageBinary data; protected final PageScanIndex index;
/** /**
* the page number. * The page number of the parent.
*/ */
protected int pageId; protected int parentPageId;
/** /**
* The page number of the parent. * The data page.
*/ */
protected int parentPageId; protected final DataPageBinary data;
/** /**
* The number of entries. * The number of entries.
...@@ -49,11 +50,25 @@ abstract class PageData { ...@@ -49,11 +50,25 @@ abstract class PageData {
PageData(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) { PageData(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) {
this.index = index; this.index = index;
this.pageId = pageId;
this.parentPageId = parentPageId; this.parentPageId = parentPageId;
this.data = data; this.data = data;
this.setPos(pageId);
} }
/**
* Get the real row count. If required, this will read all child pages.
*
* @return the row count
*/
abstract int getRowCount() throws SQLException;
/**
* Set the stored row count. This will write the page.
*
* @param rowCount the stored row count
*/
abstract void setRowCountStored(int rowCount) throws SQLException;
/** /**
* Find an entry by key. * Find an entry by key.
* *
...@@ -97,11 +112,6 @@ abstract class PageData { ...@@ -97,11 +112,6 @@ abstract class PageData {
*/ */
abstract Cursor find() throws SQLException; abstract Cursor find() throws SQLException;
/**
* Write the page.
*/
abstract void write() throws SQLException;
/** /**
* Get the key at this position. * Get the key at this position.
* *
...@@ -126,12 +136,14 @@ abstract class PageData { ...@@ -126,12 +136,14 @@ abstract class PageData {
* *
* @param id the new page id * @param id the new page id
*/ */
void setPageId(int id) { void setPageId(int id) throws SQLException {
this.pageId = id; index.getPageStore().removeRecord(getPos());
setPos(id);
remapChildren();
} }
int getPageId() { int getPageId() {
return pageId; return getPos();
} }
/** /**
...@@ -153,9 +165,8 @@ abstract class PageData { ...@@ -153,9 +165,8 @@ abstract class PageData {
* *
* @param id the new parent page id * @param id the new parent page id
*/ */
void setParentPageId(int id) throws SQLException { void setParentPageId(int id) {
this.parentPageId = id; this.parentPageId = id;
remapChildren();
} }
/** /**
......
...@@ -11,6 +11,7 @@ import org.h2.constant.ErrorCode; ...@@ -11,6 +11,7 @@ import org.h2.constant.ErrorCode;
import org.h2.engine.Session; import org.h2.engine.Session;
import org.h2.message.Message; import org.h2.message.Message;
import org.h2.result.Row; import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.DataPageBinary; import org.h2.store.DataPageBinary;
import org.h2.store.PageStore; import org.h2.store.PageStore;
import org.h2.util.IntArray; import org.h2.util.IntArray;
...@@ -81,67 +82,6 @@ class PageDataLeaf extends PageData { ...@@ -81,67 +82,6 @@ class PageDataLeaf extends PageData {
start = data.length(); start = data.length();
} }
void write() throws SQLException {
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRowAt(i);
}
data.reset();
data.writeInt(parentPageId);
int type;
if (firstOverflowPageId == 0) {
type = Page.TYPE_DATA_LEAF;
} else {
type = Page.TYPE_DATA_LEAF_WITH_OVERFLOW;
}
data.writeByte((byte) type);
data.writeShortInt(entryCount);
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
}
for (int i = 0; i < entryCount; i++) {
data.writeInt(keys[i]);
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
rows[i].write(data);
}
PageStore store = index.getPageStore();
int pageSize = store.getPageSize();
store.writePage(pageId, data);
// don't need to write overflow if we just update the parent page id
if (data.length() > pageSize && overflowPageIds != null) {
if (firstOverflowPageId == 0) {
throw Message.getInternalError();
}
DataPageBinary overflow = store.createDataPage();
int parent = pageId;
int pos = pageSize;
int remaining = data.length() - pageSize;
for (int i = 0; i < overflowPageIds.length; i++) {
overflow.reset();
overflow.writeInt(parent);
int size;
if (remaining > pageSize - 7) {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_WITH_MORE);
overflow.writeInt(overflowPageIds[i + 1]);
size = pageSize - overflow.length();
} else {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_LAST);
size = remaining;
overflow.writeShortInt(remaining);
}
overflow.write(data.getBytes(), pos, size);
remaining -= size;
pos += size;
int id = overflowPageIds[i];
store.writePage(id, overflow);
parent = id;
}
}
}
/** /**
* Add a row if possible. If it is possible this method returns 0, otherwise * Add a row if possible. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row. * the split point. It is always possible to add one row.
...@@ -154,7 +94,6 @@ class PageDataLeaf extends PageData { ...@@ -154,7 +94,6 @@ class PageDataLeaf extends PageData {
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (entryCount > 0 && last - rowLength < start + 6) { if (entryCount > 0 && last - rowLength < start + 6) {
int todoSplitAtLastInsertionPoint; int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
} }
int offset = last - rowLength; int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1]; int[] newOffsets = new int[entryCount + 1];
...@@ -173,6 +112,7 @@ class PageDataLeaf extends PageData { ...@@ -173,6 +112,7 @@ class PageDataLeaf extends PageData {
System.arraycopy(keys, x, newKeys, x + 1, entryCount - x); System.arraycopy(keys, x, newKeys, x + 1, entryCount - x);
System.arraycopy(rows, x, newRows, x + 1, entryCount - x); System.arraycopy(rows, x, newRows, x + 1, entryCount - x);
} }
return (entryCount / 2) + 1;
} }
entryCount++; entryCount++;
start += 6; start += 6;
...@@ -205,7 +145,7 @@ class PageDataLeaf extends PageData { ...@@ -205,7 +145,7 @@ class PageDataLeaf extends PageData {
array.toArray(overflowPageIds); array.toArray(overflowPageIds);
firstOverflowPageId = overflowPageIds[0]; firstOverflowPageId = overflowPageIds[0];
} }
write(); index.getPageStore().updateRecord(this);
return 0; return 0;
} }
...@@ -286,6 +226,9 @@ class PageDataLeaf extends PageData { ...@@ -286,6 +226,9 @@ class PageDataLeaf extends PageData {
int getLastKey() throws SQLException { int getLastKey() throws SQLException {
int todoRemove; int todoRemove;
if (entryCount == 0) {
return 0;
}
return getRowAt(entryCount - 1).getPos(); return getRowAt(entryCount - 1).getPos();
} }
...@@ -302,7 +245,16 @@ class PageDataLeaf extends PageData { ...@@ -302,7 +245,16 @@ class PageDataLeaf extends PageData {
} }
protected void remapChildren() throws SQLException { protected void remapChildren() throws SQLException {
int todoUpdateOverflowPages; if (firstOverflowPageId == 0) {
return;
}
int testIfReallyNotRequired;
// PageStore store = index.getPageStore();
// store.updateRecord(firstOverflowPageId);
// DataPageBinary overflow = store.readPage(firstOverflowPageId);
// overflow.reset();
// overflow.writeInt(getPos());
// store.writePage(firstOverflowPageId, overflow);
} }
boolean remove(int key) throws SQLException { boolean remove(int key) throws SQLException {
...@@ -314,7 +266,7 @@ class PageDataLeaf extends PageData { ...@@ -314,7 +266,7 @@ class PageDataLeaf extends PageData {
return true; return true;
} }
removeRow(i); removeRow(i);
write(); index.getPageStore().updateRecord(this);
return false; return false;
} }
...@@ -323,4 +275,77 @@ class PageDataLeaf extends PageData { ...@@ -323,4 +275,77 @@ class PageDataLeaf extends PageData {
return getRowAt(index); return getRowAt(index);
} }
int getRowCount() throws SQLException {
return entryCount;
}
void setRowCountStored(int rowCount) throws SQLException {
// ignore
}
public int getByteCount(DataPage dummy) throws SQLException {
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRowAt(i);
}
data.reset();
data.writeInt(parentPageId);
int type;
if (firstOverflowPageId == 0) {
type = Page.TYPE_DATA_LEAF;
} else {
type = Page.TYPE_DATA_LEAF_WITH_OVERFLOW;
}
data.writeByte((byte) type);
data.writeShortInt(entryCount);
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
}
for (int i = 0; i < entryCount; i++) {
data.writeInt(keys[i]);
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
rows[i].write(data);
}
PageStore store = index.getPageStore();
int pageSize = store.getPageSize();
store.writePage(getPos(), data);
// don't need to write overflow if we just update the parent page id
if (data.length() > pageSize && overflowPageIds != null) {
if (firstOverflowPageId == 0) {
throw Message.getInternalError();
}
DataPageBinary overflow = store.createDataPage();
int parent = getPos();
int pos = pageSize;
int remaining = data.length() - pageSize;
for (int i = 0; i < overflowPageIds.length; i++) {
overflow.reset();
overflow.writeInt(parent);
int size;
if (remaining > pageSize - 7) {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_WITH_MORE);
overflow.writeInt(overflowPageIds[i + 1]);
size = pageSize - overflow.length();
} else {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_LAST);
size = remaining;
overflow.writeShortInt(remaining);
}
overflow.write(data.getBytes(), pos, size);
remaining -= size;
pos += size;
int id = overflowPageIds[i];
store.writePage(id, overflow);
parent = id;
}
}
}
} }
...@@ -11,6 +11,7 @@ import java.sql.SQLException; ...@@ -11,6 +11,7 @@ import java.sql.SQLException;
import org.h2.engine.Session; import org.h2.engine.Session;
import org.h2.message.Message; import org.h2.message.Message;
import org.h2.result.Row; import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.DataPageBinary; import org.h2.store.DataPageBinary;
/** /**
...@@ -19,25 +20,30 @@ import org.h2.store.DataPageBinary; ...@@ -19,25 +20,30 @@ import org.h2.store.DataPageBinary;
* <ul><li>0-3: parent page id * <ul><li>0-3: parent page id
* </li><li>4-4: page type * </li><li>4-4: page type
* </li><li>5-6: entry count * </li><li>5-6: entry count
* </li><li>7-10: rightmost child page id * </li><li>7-10: row count of all children (-1 if not known)
* </li><li>11- entries: 4 bytes leaf page id, 4 bytes key * </li><li>11-14: rightmost child page id
* </li><li>15- entries: 4 bytes leaf page id, 4 bytes key
* </li></ul> * </li></ul>
*/ */
class PageDataNode extends PageData { class PageDataNode extends PageData {
/** /**
* The page ids of the children. * The page ids of the children.
*/ */
int[] childPageIds; private int[] childPageIds;
private int rowCountStored = UNKNOWN_ROWCOUNT;
private int rowCount = UNKNOWN_ROWCOUNT;
PageDataNode(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) { PageDataNode(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) {
super(index, pageId, parentPageId, data); super(index, pageId, parentPageId, data);
int todoOptimizationChildrenEntryCount;
} }
void read() { void read() {
data.setPos(5); data.setPos(5);
entryCount = data.readShortInt(); entryCount = data.readShortInt();
rowCount = rowCountStored = data.readInt();
childPageIds = new int[entryCount + 1]; childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt(); childPageIds[entryCount] = data.readInt();
keys = new int[entryCount]; keys = new int[entryCount];
...@@ -45,27 +51,17 @@ class PageDataNode extends PageData { ...@@ -45,27 +51,17 @@ class PageDataNode extends PageData {
childPageIds[i] = data.readInt(); childPageIds[i] = data.readInt();
keys[i] = data.readInt(); keys[i] = data.readInt();
} }
check();
} }
void write() throws SQLException {
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_DATA_NODE);
data.writeShortInt(entryCount);
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
data.writeInt(keys[i]);
}
index.getPageStore().writePage(pageId, data);
}
private void addChild(int x, int childPageId, int key) { private void addChild(int x, int childPageId, int key) {
int[] newKeys = new int[entryCount + 1]; int[] newKeys = new int[entryCount + 1];
int[] newChildPageIds = new int[entryCount + 2]; int[] newChildPageIds = new int[entryCount + 2];
if (childPageIds != null) {
System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
}
if (entryCount > 0) { if (entryCount > 0) {
System.arraycopy(keys, 0, newKeys, 0, x); System.arraycopy(keys, 0, newKeys, 0, x);
System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
if (x < entryCount) { if (x < entryCount) {
System.arraycopy(keys, x, newKeys, x + 1, entryCount - x); System.arraycopy(keys, x, newKeys, x + 1, entryCount - x);
System.arraycopy(childPageIds, x, newChildPageIds, x + 1, entryCount - x + 1); System.arraycopy(childPageIds, x, newChildPageIds, x + 1, entryCount - x + 1);
...@@ -88,18 +84,29 @@ class PageDataNode extends PageData { ...@@ -88,18 +84,29 @@ class PageDataNode extends PageData {
} }
int pivot = page.getKey(splitPoint - 1); int pivot = page.getKey(splitPoint - 1);
PageData page2 = page.split(splitPoint); PageData page2 = page.split(splitPoint);
page.write(); index.getPageStore().updateRecord(page);
page2.write(); index.getPageStore().updateRecord(page2);
addChild(x, page2.getPageId(), pivot); addChild(x, page2.getPageId(), pivot);
int maxEntries = (index.getPageStore().getPageSize() - 11) / 8; int maxEntries = (index.getPageStore().getPageSize() - 15) / 8;
if (entryCount >= maxEntries) { if (entryCount >= maxEntries) {
int todoSplitAtLastInsertionPoint; int todoSplitAtLastInsertionPoint;
return entryCount / 2; return entryCount / 2;
} }
write(); index.getPageStore().updateRecord(this);
} }
updateRowCount(1);
return 0; return 0;
} }
private void updateRowCount(int offset) throws SQLException {
if (rowCount != UNKNOWN_ROWCOUNT) {
rowCount += offset;
}
if (rowCountStored != UNKNOWN_ROWCOUNT) {
rowCountStored = UNKNOWN_ROWCOUNT;
index.getPageStore().updateRecord(this);
}
}
Cursor find() throws SQLException { Cursor find() throws SQLException {
int child = childPageIds[0]; int child = childPageIds[0];
...@@ -126,8 +133,8 @@ class PageDataNode extends PageData { ...@@ -126,8 +133,8 @@ class PageDataNode extends PageData {
for (int i = 0; i < childPageIds.length; i++) { for (int i = 0; i < childPageIds.length; i++) {
int child = childPageIds[i]; int child = childPageIds[i];
PageData p = index.getPage(child); PageData p = index.getPage(child);
p.setParentPageId(pageId); p.setParentPageId(getPos());
p.write(); index.getPageStore().updateRecord(p);
} }
} }
...@@ -156,6 +163,7 @@ class PageDataNode extends PageData { ...@@ -156,6 +163,7 @@ class PageDataNode extends PageData {
entryCount = 1; entryCount = 1;
childPageIds = new int[] { page1.getPageId(), page2.getPageId() }; childPageIds = new int[] { page1.getPageId(), page2.getPageId() };
keys = new int[] { pivot }; keys = new int[] { pivot };
check();
} }
int getLastKey() throws SQLException { int getLastKey() throws SQLException {
...@@ -194,9 +202,11 @@ class PageDataNode extends PageData { ...@@ -194,9 +202,11 @@ class PageDataNode extends PageData {
} }
int[] newKeys = new int[entryCount]; int[] newKeys = new int[entryCount];
int[] newChildPageIds = new int[entryCount + 1]; int[] newChildPageIds = new int[entryCount + 1];
System.arraycopy(keys, 0, newKeys, 0, i); System.arraycopy(keys, 0, newKeys, 0, Math.min(entryCount, i));
System.arraycopy(childPageIds, 0, newChildPageIds, 0, i); System.arraycopy(childPageIds, 0, newChildPageIds, 0, i);
System.arraycopy(keys, i + 1, newKeys, i, entryCount - i); if (entryCount > i) {
System.arraycopy(keys, i + 1, newKeys, i, entryCount - i);
}
System.arraycopy(childPageIds, i + 1, newChildPageIds, i, entryCount - i + 1); System.arraycopy(childPageIds, i + 1, newChildPageIds, i, entryCount - i + 1);
keys = newKeys; keys = newKeys;
childPageIds = newChildPageIds; childPageIds = newChildPageIds;
...@@ -208,6 +218,7 @@ class PageDataNode extends PageData { ...@@ -208,6 +218,7 @@ class PageDataNode extends PageData {
// TODO maybe implement merge // TODO maybe implement merge
PageData page = index.getPage(childPageIds[at]); PageData page = index.getPage(childPageIds[at]);
boolean empty = page.remove(key); boolean empty = page.remove(key);
updateRowCount(-1);
if (!empty) { if (!empty) {
// the first row didn't change - nothing to do // the first row didn't change - nothing to do
return false; return false;
...@@ -220,15 +231,8 @@ class PageDataNode extends PageData { ...@@ -220,15 +231,8 @@ class PageDataNode extends PageData {
// truncated // truncated
return true; return true;
} }
if (at == 0) { removeRow(at);
// the first child is empty - then the first row of this subtree index.getPageStore().updateRecord(this);
// has changed
removeRow(at);
} else {
// otherwise the first row didn't change
removeRow(at - 1);
}
write();
return false; return false;
} }
...@@ -237,5 +241,52 @@ class PageDataNode extends PageData { ...@@ -237,5 +241,52 @@ class PageDataNode extends PageData {
PageData page = index.getPage(childPageIds[at]); PageData page = index.getPage(childPageIds[at]);
return page.getRow(session, key); return page.getRow(session, key);
} }
int getRowCount() throws SQLException {
if (rowCount == UNKNOWN_ROWCOUNT) {
int count = 0;
for (int i = 0; i < childPageIds.length; i++) {
PageData page = index.getPage(childPageIds[i]);
count += page.getRowCount();
}
rowCount = count;
}
return rowCount;
}
void setRowCountStored(int rowCount) throws SQLException {
this.rowCount = rowCount;
if (rowCountStored != rowCount) {
rowCountStored = rowCount;
index.getPageStore().updateRecord(this);
}
}
private void check() {
for (int i = 0; i < childPageIds.length; i++) {
if (childPageIds[i] == 0) {
throw Message.getInternalError();
}
}
}
public int getByteCount(DataPage dummy) throws SQLException {
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
check();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_DATA_NODE);
data.writeShortInt(entryCount);
data.writeInt(rowCountStored);
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
data.writeInt(keys[i]);
}
index.getPageStore().writePage(getPos(), data);
}
} }
...@@ -16,6 +16,8 @@ import org.h2.result.Row; ...@@ -16,6 +16,8 @@ import org.h2.result.Row;
import org.h2.result.SearchRow; import org.h2.result.SearchRow;
import org.h2.store.DataPageBinary; import org.h2.store.DataPageBinary;
import org.h2.store.PageStore; import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.table.IndexColumn; import org.h2.table.IndexColumn;
import org.h2.table.TableData; import org.h2.table.TableData;
...@@ -30,14 +32,15 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -30,14 +32,15 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
private TableData tableData; private TableData tableData;
private int headPos; private int headPos;
// TODO cache the row count of all children (row count, group count) // TODO test that setPageId updates parent, overflow parent
// TODO remember last page with deleted keys (in the root page?), // TODO remember last page with deleted keys (in the root page?),
// and chain such pages // and chain such pages
// TODO order pages so that searching for a key // TODO order pages so that searching for a key
// doesn't seek backwards in the file // doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance) // TODO use an undo log and maybe redo log (for performance)
// TODO file position, content checksums // TODO file position, content checksums
private int nextKey; // TODO completely re-use keys of deleted rows
private int lastKey;
private long rowCount; private long rowCount;
private long rowCountApproximation; private long rowCountApproximation;
...@@ -56,23 +59,31 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -56,23 +59,31 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// new table // new table
headPos = store.allocatePage(); headPos = store.allocatePage();
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage()); PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
root.write(); store.updateRecord(root);
} else { } else {
int todoRowCount; lastKey = getPage(headPos).getLastKey();
rowCount = getPage(headPos).getLastKey(); rowCount = getPage(headPos).getRowCount();
int reuseKeysIfManyDeleted;
} }
this.headPos = headPos; this.headPos = headPos;
trace("open " + rowCount);
table.setRowCount(rowCount); table.setRowCount(rowCount);
} }
public int getHeadPos() {
return headPos;
}
public void add(Session session, Row row) throws SQLException { public void add(Session session, Row row) throws SQLException {
row.setPos((int) rowCount); row.setPos(++lastKey);
trace("add " + row.getPos());
while (true) { while (true) {
PageData root = getPage(headPos); PageData root = getPage(headPos);
int splitPoint = root.addRow(row); int splitPoint = root.addRow(row);
if (splitPoint == 0) { if (splitPoint == 0) {
break; break;
} }
trace("split " + splitPoint);
int pivot = root.getKey(splitPoint - 1); int pivot = root.getKey(splitPoint - 1);
PageData page1 = root; PageData page1 = root;
PageData page2 = root.split(splitPoint); PageData page2 = root.split(splitPoint);
...@@ -83,9 +94,9 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -83,9 +94,9 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
page2.setParentPageId(headPos); page2.setParentPageId(headPos);
PageDataNode newRoot = new PageDataNode(this, rootPageId, Page.ROOT, store.createDataPage()); PageDataNode newRoot = new PageDataNode(this, rootPageId, Page.ROOT, store.createDataPage());
newRoot.init(page1, pivot, page2); newRoot.init(page1, pivot, page2);
page1.write(); store.updateRecord(page1);
page2.write(); store.updateRecord(page2);
newRoot.write(); store.updateRecord(newRoot);
root = newRoot; root = newRoot;
} }
rowCount++; rowCount++;
...@@ -98,6 +109,10 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -98,6 +109,10 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
* @return the page * @return the page
*/ */
PageData getPage(int id) throws SQLException { PageData getPage(int id) throws SQLException {
Record rec = store.getRecord(id);
if (rec != null) {
return (PageData) rec;
}
DataPageBinary data = store.readPage(id); DataPageBinary data = store.readPage(id);
data.reset(); data.reset();
int parentPageId = data.readInt(); int parentPageId = data.readInt();
...@@ -123,10 +138,8 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -123,10 +138,8 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
} }
public void close(Session session) throws SQLException { public void close(Session session) throws SQLException {
trace("close");
int writeRowCount; int writeRowCount;
if (store != null) {
store = null;
}
} }
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException { public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
...@@ -139,7 +152,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -139,7 +152,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
} }
public double getCost(Session session, int[] masks) throws SQLException { public double getCost(Session session, int[] masks) throws SQLException {
long cost = 10 * tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET; long cost = 10 * (tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET);
return cost; return cost;
} }
...@@ -148,27 +161,36 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -148,27 +161,36 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
} }
public void remove(Session session, Row row) throws SQLException { public void remove(Session session, Row row) throws SQLException {
trace("remove " + row.getPos());
int invalidateRowCount; int invalidateRowCount;
// setChanged(session); // setChanged(session);
if (rowCount == 1) { if (rowCount == 1) {
truncate(session); truncate(session);
} else { } else {
int key = row.getPos();
PageData root = getPage(headPos); PageData root = getPage(headPos);
root.remove(row.getPos()); root.remove(key);
rowCount--; rowCount--;
int todoReuseKeys;
// if (key == lastKey - 1) {
// lastKey--;
// }
} }
} }
public void remove(Session session) throws SQLException { public void remove(Session session) throws SQLException {
trace("remove");
int todo; int todo;
} }
public void truncate(Session session) throws SQLException { public void truncate(Session session) throws SQLException {
int invalidateRowCount; trace("truncate");
store.removeRecord(headPos);
int freePages; int freePages;
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage()); PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
root.write(); store.updateRecord(root);
rowCount = 0; rowCount = 0;
lastKey = 0;
} }
public void checkRename() throws SQLException { public void checkRename() throws SQLException {
...@@ -199,8 +221,24 @@ public class PageScanIndex extends BaseIndex implements RowIndex { ...@@ -199,8 +221,24 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
} }
public long getRowCount(Session session) { public long getRowCount(Session session) {
int todo;
return rowCount; return rowCount;
} }
public String getCreateSQL() {
return null;
}
private void trace(String message) {
if (headPos != 1) {
int test;
// System.out.println(message);
}
}
public int getColumnIndex(Column col) {
// the scan index cannot use any columns
// TODO it can if there is an INT primary key
return -1;
}
} }
...@@ -16,7 +16,6 @@ import java.sql.SQLException; ...@@ -16,7 +16,6 @@ import java.sql.SQLException;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import org.h2.api.DatabaseEventListener; import org.h2.api.DatabaseEventListener;
import org.h2.constant.ErrorCode; import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties; import org.h2.constant.SysProperties;
......
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.util.BitField;
/**
* Transaction log mechanism.
*/
public class PageLog {
private static final int BUFFER_SIZE = 32 * 1024;
private PageStore store;
private BitField undo = new BitField();
private byte[] ringBuffer = new byte[BUFFER_SIZE];
private int bufferPos;
PageLog(PageStore store) {
this.store = store;
}
void addUndo(int pageId) {
}
private void write(byte[] data, int offset, int length) {
if (bufferPos + length > BUFFER_SIZE) {
while (length > 0) {
int len = Math.min(length, BUFFER_SIZE - bufferPos);
write(data, offset, len);
offset += len;
length -= len;
}
return;
}
System.arraycopy(data, offset, ringBuffer, bufferPos, length);
bufferPos += length;
if (bufferPos == BUFFER_SIZE) {
}
}
}
...@@ -8,7 +8,6 @@ package org.h2.store; ...@@ -8,7 +8,6 @@ package org.h2.store;
import java.io.IOException; import java.io.IOException;
import java.sql.SQLException; import java.sql.SQLException;
import org.h2.constant.ErrorCode; import org.h2.constant.ErrorCode;
import org.h2.engine.Database; import org.h2.engine.Database;
import org.h2.message.Message; import org.h2.message.Message;
...@@ -19,6 +18,7 @@ import org.h2.util.CacheLRU; ...@@ -19,6 +18,7 @@ import org.h2.util.CacheLRU;
import org.h2.util.CacheObject; import org.h2.util.CacheObject;
import org.h2.util.CacheWriter; import org.h2.util.CacheWriter;
import org.h2.util.FileUtils; import org.h2.util.FileUtils;
import org.h2.util.ObjectArray;
/** /**
* This class represents a file that is split into pages. The first page (page * This class represents a file that is split into pages. The first page (page
...@@ -56,7 +56,7 @@ public class PageStore implements CacheWriter { ...@@ -56,7 +56,7 @@ public class PageStore implements CacheWriter {
private int freeListRootPageId; private int freeListRootPageId;
private int freePageCount; private int freePageCount;
private int pageCount; private int pageCount;
private int writeCount;
/** /**
* Create a new page store object. * Create a new page store object.
* *
...@@ -99,6 +99,22 @@ public class PageStore implements CacheWriter { ...@@ -99,6 +99,22 @@ public class PageStore implements CacheWriter {
} }
} }
/**
* Flush all pending changes to disk.
*/
public void flush() throws SQLException {
synchronized (database) {
database.checkPowerOff();
ObjectArray list = cache.getAllChanged();
CacheObject.sort(list);
for (int i = 0; i < list.size(); i++) {
Record rec = (Record) list.get(i);
writeBack(rec);
}
int todoWriteDeletedPages;
}
}
private void readHeader() throws SQLException { private void readHeader() throws SQLException {
long length = file.length(); long length = file.length();
if (length < FILE_HEADER_SIZE) { if (length < FILE_HEADER_SIZE) {
...@@ -169,7 +185,10 @@ public class PageStore implements CacheWriter { ...@@ -169,7 +185,10 @@ public class PageStore implements CacheWriter {
public void close() throws SQLException { public void close() throws SQLException {
int todo; int todo;
try { try {
file.close(); flush();
if (file != null) {
file.close();
}
} catch (IOException e) { } catch (IOException e) {
throw Message.convertIOException(e, "close"); throw Message.convertIOException(e, "close");
} }
...@@ -183,8 +202,27 @@ public class PageStore implements CacheWriter { ...@@ -183,8 +202,27 @@ public class PageStore implements CacheWriter {
return database.getTrace(Trace.DATABASE); return database.getTrace(Trace.DATABASE);
} }
public void writeBack(CacheObject entry) throws SQLException { public void writeBack(CacheObject obj) throws SQLException {
int todo; synchronized (database) {
writeCount++;
Record record = (Record) obj;
record.write(null);
record.setChanged(false);
}
}
/**
* Update a record.
*
* @param record the record
*/
public void updateRecord(Record record) throws SQLException {
synchronized (database) {
record.setChanged(true);
int pos = record.getPos();
cache.update(pos, record);
int todoLogChanges;
}
} }
/** /**
...@@ -209,6 +247,17 @@ public class PageStore implements CacheWriter { ...@@ -209,6 +247,17 @@ public class PageStore implements CacheWriter {
return new DataPageBinary(database, new byte[pageSize]); return new DataPageBinary(database, new byte[pageSize]);
} }
/**
* Get the record if it is stored in the file, or null if not.
*
* @param pos the page id
* @return the record or null
*/
public Record getRecord(int pos) {
CacheObject obj = cache.find(pos);
return (Record) obj;
}
/** /**
* Read a page. * Read a page.
* *
...@@ -260,4 +309,13 @@ public class PageStore implements CacheWriter { ...@@ -260,4 +309,13 @@ public class PageStore implements CacheWriter {
int todo; int todo;
} }
/**
* Remove a page from the cache.
*
* @param pageId the page id
*/
public void removeRecord(int pageId) {
cache.remove(pageId);
}
} }
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论