提交 3c8794df authored 作者: Thomas Mueller's avatar Thomas Mueller

new experimental page store

上级 d98c7905
......@@ -14,6 +14,7 @@ import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.expression.Expression;
import org.h2.expression.Parameter;
import org.h2.index.Index;
import org.h2.message.Message;
import org.h2.result.LocalResult;
import org.h2.util.ObjectArray;
......@@ -37,7 +38,7 @@ public abstract class Prepared {
/**
* The position of the head record (used for indexes).
*/
protected int headPos = -1;
protected int headPos = Index.EMPTY_HEAD;
/**
* The list of parameters.
......
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
......@@ -15,7 +15,6 @@ import org.h2.constant.ErrorCode;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.expression.Expression;
import org.h2.index.Index;
import org.h2.message.Message;
import org.h2.schema.Schema;
import org.h2.schema.Sequence;
......@@ -63,7 +62,7 @@ public class CreateTable extends SchemaCommand {
/**
* Add a column to this table.
*
*
* @param column the column to add
*/
public void addColumn(Column column) {
......@@ -76,7 +75,7 @@ public class CreateTable extends SchemaCommand {
/**
* Add a constraint statement to this statement.
* The primary key definition is one possible constraint statement.
*
*
* @param command the statement to add
*/
public void addConstraintCommand(Prepared command) throws SQLException {
......@@ -145,7 +144,7 @@ public class CreateTable extends SchemaCommand {
}
}
int id = getObjectId(true, true);
TableData table = getSchema().createTable(tableName, id, columns, persistent, clustered, Index.EMPTY_HEAD);
TableData table = getSchema().createTable(tableName, id, columns, persistent, clustered, headPos);
table.setComment(comment);
table.setTemporary(temporary);
table.setGlobalTemporary(globalTemporary);
......@@ -219,7 +218,7 @@ public class CreateTable extends SchemaCommand {
}
/**
* Sets the primary key columns, but also check if a primary key
* Sets the primary key columns, but also check if a primary key
* with different columns is already defined.
*
* @param columns the primary key columns
......
......@@ -1192,6 +1192,10 @@ public class Database implements DataHandler {
fileIndex.close();
fileIndex = null;
}
if (pageStore != null) {
pageStore.close();
pageStore = null;
}
} catch (SQLException e) {
traceSystem.getTrace(Trace.DATABASE).error("close", e);
}
......
......@@ -11,31 +11,32 @@ import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.store.DataPageBinary;
import org.h2.store.Record;
/**
* A page that contains data rows.
*/
abstract class PageData {
abstract class PageData extends Record {
/**
* The index.
* Indicator that the row count is not known.
*/
protected final PageScanIndex index;
static final int UNKNOWN_ROWCOUNT = -1;
/**
* The data page.
* The index.
*/
protected final DataPageBinary data;
protected final PageScanIndex index;
/**
* the page number.
* The page number of the parent.
*/
protected int pageId;
protected int parentPageId;
/**
* The page number of the parent.
* The data page.
*/
protected int parentPageId;
protected final DataPageBinary data;
/**
* The number of entries.
......@@ -49,11 +50,25 @@ abstract class PageData {
PageData(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) {
this.index = index;
this.pageId = pageId;
this.parentPageId = parentPageId;
this.data = data;
this.setPos(pageId);
}
/**
* Get the real row count. If required, this will read all child pages.
*
* @return the row count
*/
abstract int getRowCount() throws SQLException;
/**
* Set the stored row count. This will write the page.
*
* @param rowCount the stored row count
*/
abstract void setRowCountStored(int rowCount) throws SQLException;
/**
* Find an entry by key.
*
......@@ -97,11 +112,6 @@ abstract class PageData {
*/
abstract Cursor find() throws SQLException;
/**
* Write the page.
*/
abstract void write() throws SQLException;
/**
* Get the key at this position.
*
......@@ -126,12 +136,14 @@ abstract class PageData {
*
* @param id the new page id
*/
void setPageId(int id) {
this.pageId = id;
void setPageId(int id) throws SQLException {
index.getPageStore().removeRecord(getPos());
setPos(id);
remapChildren();
}
int getPageId() {
return pageId;
return getPos();
}
/**
......@@ -153,9 +165,8 @@ abstract class PageData {
*
* @param id the new parent page id
*/
void setParentPageId(int id) throws SQLException {
void setParentPageId(int id) {
this.parentPageId = id;
remapChildren();
}
/**
......
......@@ -11,6 +11,7 @@ import org.h2.constant.ErrorCode;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.DataPageBinary;
import org.h2.store.PageStore;
import org.h2.util.IntArray;
......@@ -81,67 +82,6 @@ class PageDataLeaf extends PageData {
start = data.length();
}
void write() throws SQLException {
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRowAt(i);
}
data.reset();
data.writeInt(parentPageId);
int type;
if (firstOverflowPageId == 0) {
type = Page.TYPE_DATA_LEAF;
} else {
type = Page.TYPE_DATA_LEAF_WITH_OVERFLOW;
}
data.writeByte((byte) type);
data.writeShortInt(entryCount);
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
}
for (int i = 0; i < entryCount; i++) {
data.writeInt(keys[i]);
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
rows[i].write(data);
}
PageStore store = index.getPageStore();
int pageSize = store.getPageSize();
store.writePage(pageId, data);
// don't need to write overflow if we just update the parent page id
if (data.length() > pageSize && overflowPageIds != null) {
if (firstOverflowPageId == 0) {
throw Message.getInternalError();
}
DataPageBinary overflow = store.createDataPage();
int parent = pageId;
int pos = pageSize;
int remaining = data.length() - pageSize;
for (int i = 0; i < overflowPageIds.length; i++) {
overflow.reset();
overflow.writeInt(parent);
int size;
if (remaining > pageSize - 7) {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_WITH_MORE);
overflow.writeInt(overflowPageIds[i + 1]);
size = pageSize - overflow.length();
} else {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_LAST);
size = remaining;
overflow.writeShortInt(remaining);
}
overflow.write(data.getBytes(), pos, size);
remaining -= size;
pos += size;
int id = overflowPageIds[i];
store.writePage(id, overflow);
parent = id;
}
}
}
/**
* Add a row if possible. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row.
......@@ -154,7 +94,6 @@ class PageDataLeaf extends PageData {
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (entryCount > 0 && last - rowLength < start + 6) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
}
int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1];
......@@ -173,6 +112,7 @@ class PageDataLeaf extends PageData {
System.arraycopy(keys, x, newKeys, x + 1, entryCount - x);
System.arraycopy(rows, x, newRows, x + 1, entryCount - x);
}
return (entryCount / 2) + 1;
}
entryCount++;
start += 6;
......@@ -205,7 +145,7 @@ class PageDataLeaf extends PageData {
array.toArray(overflowPageIds);
firstOverflowPageId = overflowPageIds[0];
}
write();
index.getPageStore().updateRecord(this);
return 0;
}
......@@ -286,6 +226,9 @@ class PageDataLeaf extends PageData {
int getLastKey() throws SQLException {
int todoRemove;
if (entryCount == 0) {
return 0;
}
return getRowAt(entryCount - 1).getPos();
}
......@@ -302,7 +245,16 @@ class PageDataLeaf extends PageData {
}
protected void remapChildren() throws SQLException {
int todoUpdateOverflowPages;
if (firstOverflowPageId == 0) {
return;
}
int testIfReallyNotRequired;
// PageStore store = index.getPageStore();
// store.updateRecord(firstOverflowPageId);
// DataPageBinary overflow = store.readPage(firstOverflowPageId);
// overflow.reset();
// overflow.writeInt(getPos());
// store.writePage(firstOverflowPageId, overflow);
}
boolean remove(int key) throws SQLException {
......@@ -314,7 +266,7 @@ class PageDataLeaf extends PageData {
return true;
}
removeRow(i);
write();
index.getPageStore().updateRecord(this);
return false;
}
......@@ -323,4 +275,77 @@ class PageDataLeaf extends PageData {
return getRowAt(index);
}
int getRowCount() throws SQLException {
return entryCount;
}
void setRowCountStored(int rowCount) throws SQLException {
// ignore
}
public int getByteCount(DataPage dummy) throws SQLException {
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRowAt(i);
}
data.reset();
data.writeInt(parentPageId);
int type;
if (firstOverflowPageId == 0) {
type = Page.TYPE_DATA_LEAF;
} else {
type = Page.TYPE_DATA_LEAF_WITH_OVERFLOW;
}
data.writeByte((byte) type);
data.writeShortInt(entryCount);
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
}
for (int i = 0; i < entryCount; i++) {
data.writeInt(keys[i]);
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
rows[i].write(data);
}
PageStore store = index.getPageStore();
int pageSize = store.getPageSize();
store.writePage(getPos(), data);
// don't need to write overflow if we just update the parent page id
if (data.length() > pageSize && overflowPageIds != null) {
if (firstOverflowPageId == 0) {
throw Message.getInternalError();
}
DataPageBinary overflow = store.createDataPage();
int parent = getPos();
int pos = pageSize;
int remaining = data.length() - pageSize;
for (int i = 0; i < overflowPageIds.length; i++) {
overflow.reset();
overflow.writeInt(parent);
int size;
if (remaining > pageSize - 7) {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_WITH_MORE);
overflow.writeInt(overflowPageIds[i + 1]);
size = pageSize - overflow.length();
} else {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW_LAST);
size = remaining;
overflow.writeShortInt(remaining);
}
overflow.write(data.getBytes(), pos, size);
remaining -= size;
pos += size;
int id = overflowPageIds[i];
store.writePage(id, overflow);
parent = id;
}
}
}
}
......@@ -11,6 +11,7 @@ import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.DataPageBinary;
/**
......@@ -19,25 +20,30 @@ import org.h2.store.DataPageBinary;
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
* </li><li>5-6: entry count
* </li><li>7-10: rightmost child page id
* </li><li>11- entries: 4 bytes leaf page id, 4 bytes key
* </li><li>7-10: row count of all children (-1 if not known)
* </li><li>11-14: rightmost child page id
* </li><li>15- entries: 4 bytes leaf page id, 4 bytes key
* </li></ul>
*/
class PageDataNode extends PageData {
/**
* The page ids of the children.
*/
int[] childPageIds;
private int[] childPageIds;
private int rowCountStored = UNKNOWN_ROWCOUNT;
private int rowCount = UNKNOWN_ROWCOUNT;
PageDataNode(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) {
super(index, pageId, parentPageId, data);
int todoOptimizationChildrenEntryCount;
}
void read() {
data.setPos(5);
entryCount = data.readShortInt();
rowCount = rowCountStored = data.readInt();
childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt();
keys = new int[entryCount];
......@@ -45,27 +51,17 @@ class PageDataNode extends PageData {
childPageIds[i] = data.readInt();
keys[i] = data.readInt();
}
check();
}
void write() throws SQLException {
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_DATA_NODE);
data.writeShortInt(entryCount);
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
data.writeInt(keys[i]);
}
index.getPageStore().writePage(pageId, data);
}
private void addChild(int x, int childPageId, int key) {
int[] newKeys = new int[entryCount + 1];
int[] newChildPageIds = new int[entryCount + 2];
if (childPageIds != null) {
System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
}
if (entryCount > 0) {
System.arraycopy(keys, 0, newKeys, 0, x);
System.arraycopy(childPageIds, 0, newChildPageIds, 0, x + 1);
if (x < entryCount) {
System.arraycopy(keys, x, newKeys, x + 1, entryCount - x);
System.arraycopy(childPageIds, x, newChildPageIds, x + 1, entryCount - x + 1);
......@@ -88,18 +84,29 @@ class PageDataNode extends PageData {
}
int pivot = page.getKey(splitPoint - 1);
PageData page2 = page.split(splitPoint);
page.write();
page2.write();
index.getPageStore().updateRecord(page);
index.getPageStore().updateRecord(page2);
addChild(x, page2.getPageId(), pivot);
int maxEntries = (index.getPageStore().getPageSize() - 11) / 8;
int maxEntries = (index.getPageStore().getPageSize() - 15) / 8;
if (entryCount >= maxEntries) {
int todoSplitAtLastInsertionPoint;
return entryCount / 2;
}
write();
index.getPageStore().updateRecord(this);
}
updateRowCount(1);
return 0;
}
private void updateRowCount(int offset) throws SQLException {
if (rowCount != UNKNOWN_ROWCOUNT) {
rowCount += offset;
}
if (rowCountStored != UNKNOWN_ROWCOUNT) {
rowCountStored = UNKNOWN_ROWCOUNT;
index.getPageStore().updateRecord(this);
}
}
Cursor find() throws SQLException {
int child = childPageIds[0];
......@@ -126,8 +133,8 @@ class PageDataNode extends PageData {
for (int i = 0; i < childPageIds.length; i++) {
int child = childPageIds[i];
PageData p = index.getPage(child);
p.setParentPageId(pageId);
p.write();
p.setParentPageId(getPos());
index.getPageStore().updateRecord(p);
}
}
......@@ -156,6 +163,7 @@ class PageDataNode extends PageData {
entryCount = 1;
childPageIds = new int[] { page1.getPageId(), page2.getPageId() };
keys = new int[] { pivot };
check();
}
int getLastKey() throws SQLException {
......@@ -194,9 +202,11 @@ class PageDataNode extends PageData {
}
int[] newKeys = new int[entryCount];
int[] newChildPageIds = new int[entryCount + 1];
System.arraycopy(keys, 0, newKeys, 0, i);
System.arraycopy(keys, 0, newKeys, 0, Math.min(entryCount, i));
System.arraycopy(childPageIds, 0, newChildPageIds, 0, i);
System.arraycopy(keys, i + 1, newKeys, i, entryCount - i);
if (entryCount > i) {
System.arraycopy(keys, i + 1, newKeys, i, entryCount - i);
}
System.arraycopy(childPageIds, i + 1, newChildPageIds, i, entryCount - i + 1);
keys = newKeys;
childPageIds = newChildPageIds;
......@@ -208,6 +218,7 @@ class PageDataNode extends PageData {
// TODO maybe implement merge
PageData page = index.getPage(childPageIds[at]);
boolean empty = page.remove(key);
updateRowCount(-1);
if (!empty) {
// the first row didn't change - nothing to do
return false;
......@@ -220,15 +231,8 @@ class PageDataNode extends PageData {
// truncated
return true;
}
if (at == 0) {
// the first child is empty - then the first row of this subtree
// has changed
removeRow(at);
} else {
// otherwise the first row didn't change
removeRow(at - 1);
}
write();
removeRow(at);
index.getPageStore().updateRecord(this);
return false;
}
......@@ -237,5 +241,52 @@ class PageDataNode extends PageData {
PageData page = index.getPage(childPageIds[at]);
return page.getRow(session, key);
}
int getRowCount() throws SQLException {
if (rowCount == UNKNOWN_ROWCOUNT) {
int count = 0;
for (int i = 0; i < childPageIds.length; i++) {
PageData page = index.getPage(childPageIds[i]);
count += page.getRowCount();
}
rowCount = count;
}
return rowCount;
}
void setRowCountStored(int rowCount) throws SQLException {
this.rowCount = rowCount;
if (rowCountStored != rowCount) {
rowCountStored = rowCount;
index.getPageStore().updateRecord(this);
}
}
private void check() {
for (int i = 0; i < childPageIds.length; i++) {
if (childPageIds[i] == 0) {
throw Message.getInternalError();
}
}
}
public int getByteCount(DataPage dummy) throws SQLException {
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
check();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_DATA_NODE);
data.writeShortInt(entryCount);
data.writeInt(rowCountStored);
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
data.writeInt(keys[i]);
}
index.getPageStore().writePage(getPos(), data);
}
}
......@@ -16,6 +16,8 @@ import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPageBinary;
import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
......@@ -30,14 +32,15 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
private TableData tableData;
private int headPos;
// TODO cache the row count of all children (row count, group count)
// TODO test that setPageId updates parent, overflow parent
// TODO remember last page with deleted keys (in the root page?),
// and chain such pages
// TODO order pages so that searching for a key
// doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance)
// TODO file position, content checksums
private int nextKey;
// TODO completely re-use keys of deleted rows
private int lastKey;
private long rowCount;
private long rowCountApproximation;
......@@ -56,23 +59,31 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// new table
headPos = store.allocatePage();
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
root.write();
store.updateRecord(root);
} else {
int todoRowCount;
rowCount = getPage(headPos).getLastKey();
lastKey = getPage(headPos).getLastKey();
rowCount = getPage(headPos).getRowCount();
int reuseKeysIfManyDeleted;
}
this.headPos = headPos;
trace("open " + rowCount);
table.setRowCount(rowCount);
}
public int getHeadPos() {
return headPos;
}
public void add(Session session, Row row) throws SQLException {
row.setPos((int) rowCount);
row.setPos(++lastKey);
trace("add " + row.getPos());
while (true) {
PageData root = getPage(headPos);
int splitPoint = root.addRow(row);
if (splitPoint == 0) {
break;
}
trace("split " + splitPoint);
int pivot = root.getKey(splitPoint - 1);
PageData page1 = root;
PageData page2 = root.split(splitPoint);
......@@ -83,9 +94,9 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
page2.setParentPageId(headPos);
PageDataNode newRoot = new PageDataNode(this, rootPageId, Page.ROOT, store.createDataPage());
newRoot.init(page1, pivot, page2);
page1.write();
page2.write();
newRoot.write();
store.updateRecord(page1);
store.updateRecord(page2);
store.updateRecord(newRoot);
root = newRoot;
}
rowCount++;
......@@ -98,6 +109,10 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
* @return the page
*/
PageData getPage(int id) throws SQLException {
Record rec = store.getRecord(id);
if (rec != null) {
return (PageData) rec;
}
DataPageBinary data = store.readPage(id);
data.reset();
int parentPageId = data.readInt();
......@@ -123,10 +138,8 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
public void close(Session session) throws SQLException {
trace("close");
int writeRowCount;
if (store != null) {
store = null;
}
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
......@@ -139,7 +152,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
public double getCost(Session session, int[] masks) throws SQLException {
long cost = 10 * tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET;
long cost = 10 * (tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET);
return cost;
}
......@@ -148,27 +161,36 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
public void remove(Session session, Row row) throws SQLException {
trace("remove " + row.getPos());
int invalidateRowCount;
// setChanged(session);
if (rowCount == 1) {
truncate(session);
} else {
int key = row.getPos();
PageData root = getPage(headPos);
root.remove(row.getPos());
root.remove(key);
rowCount--;
int todoReuseKeys;
// if (key == lastKey - 1) {
// lastKey--;
// }
}
}
public void remove(Session session) throws SQLException {
trace("remove");
int todo;
}
public void truncate(Session session) throws SQLException {
int invalidateRowCount;
trace("truncate");
store.removeRecord(headPos);
int freePages;
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
root.write();
store.updateRecord(root);
rowCount = 0;
lastKey = 0;
}
public void checkRename() throws SQLException {
......@@ -199,8 +221,24 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
public long getRowCount(Session session) {
int todo;
return rowCount;
}
public String getCreateSQL() {
return null;
}
private void trace(String message) {
if (headPos != 1) {
int test;
// System.out.println(message);
}
}
public int getColumnIndex(Column col) {
// the scan index cannot use any columns
// TODO it can if there is an INT primary key
return -1;
}
}
......@@ -16,7 +16,6 @@ import java.sql.SQLException;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import org.h2.api.DatabaseEventListener;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
......
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.util.BitField;
/**
* Transaction log mechanism.
*/
public class PageLog {
private static final int BUFFER_SIZE = 32 * 1024;
private PageStore store;
private BitField undo = new BitField();
private byte[] ringBuffer = new byte[BUFFER_SIZE];
private int bufferPos;
PageLog(PageStore store) {
this.store = store;
}
void addUndo(int pageId) {
}
private void write(byte[] data, int offset, int length) {
if (bufferPos + length > BUFFER_SIZE) {
while (length > 0) {
int len = Math.min(length, BUFFER_SIZE - bufferPos);
write(data, offset, len);
offset += len;
length -= len;
}
return;
}
System.arraycopy(data, offset, ringBuffer, bufferPos, length);
bufferPos += length;
if (bufferPos == BUFFER_SIZE) {
}
}
}
......@@ -8,7 +8,6 @@ package org.h2.store;
import java.io.IOException;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Database;
import org.h2.message.Message;
......@@ -19,6 +18,7 @@ import org.h2.util.CacheLRU;
import org.h2.util.CacheObject;
import org.h2.util.CacheWriter;
import org.h2.util.FileUtils;
import org.h2.util.ObjectArray;
/**
* This class represents a file that is split into pages. The first page (page
......@@ -56,7 +56,7 @@ public class PageStore implements CacheWriter {
private int freeListRootPageId;
private int freePageCount;
private int pageCount;
private int writeCount;
/**
* Create a new page store object.
*
......@@ -99,6 +99,22 @@ public class PageStore implements CacheWriter {
}
}
/**
* Flush all pending changes to disk.
*/
public void flush() throws SQLException {
synchronized (database) {
database.checkPowerOff();
ObjectArray list = cache.getAllChanged();
CacheObject.sort(list);
for (int i = 0; i < list.size(); i++) {
Record rec = (Record) list.get(i);
writeBack(rec);
}
int todoWriteDeletedPages;
}
}
private void readHeader() throws SQLException {
long length = file.length();
if (length < FILE_HEADER_SIZE) {
......@@ -169,7 +185,10 @@ public class PageStore implements CacheWriter {
public void close() throws SQLException {
int todo;
try {
file.close();
flush();
if (file != null) {
file.close();
}
} catch (IOException e) {
throw Message.convertIOException(e, "close");
}
......@@ -183,8 +202,27 @@ public class PageStore implements CacheWriter {
return database.getTrace(Trace.DATABASE);
}
public void writeBack(CacheObject entry) throws SQLException {
int todo;
public void writeBack(CacheObject obj) throws SQLException {
synchronized (database) {
writeCount++;
Record record = (Record) obj;
record.write(null);
record.setChanged(false);
}
}
/**
* Update a record.
*
* @param record the record
*/
public void updateRecord(Record record) throws SQLException {
synchronized (database) {
record.setChanged(true);
int pos = record.getPos();
cache.update(pos, record);
int todoLogChanges;
}
}
/**
......@@ -209,6 +247,17 @@ public class PageStore implements CacheWriter {
return new DataPageBinary(database, new byte[pageSize]);
}
/**
* Get the record if it is stored in the file, or null if not.
*
* @param pos the page id
* @return the record or null
*/
public Record getRecord(int pos) {
CacheObject obj = cache.find(pos);
return (Record) obj;
}
/**
* Read a page.
*
......@@ -260,4 +309,13 @@ public class PageStore implements CacheWriter {
int todo;
}
/**
* Remove a page from the cache.
*
* @param pageId the page id
*/
public void removeRecord(int pageId) {
cache.remove(pageId);
}
}
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论