提交 f8a5383e authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store.

上级 70ed289c
......@@ -8,7 +8,7 @@ package org.h2.index;
import java.sql.SQLException;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.Data;
import org.h2.store.Record;
/**
......@@ -34,7 +34,7 @@ abstract class PageBtree extends Record {
/**
* The data page.
*/
protected final DataPage data;
protected final Data data;
/**
* The row offsets.
......@@ -66,7 +66,7 @@ abstract class PageBtree extends Record {
*/
protected boolean written;
PageBtree(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
PageBtree(PageBtreeIndex index, int pageId, int parentPageId, Data data) {
this.index = index;
this.parentPageId = parentPageId;
this.data = data;
......@@ -248,7 +248,8 @@ abstract class PageBtree extends Record {
* @return number of double words (4 bytes)
*/
public int getMemorySize() {
return index.getPageStore().getPageSize() >> 2;
// double the byte array size
return index.getPageStore().getPageSize() >> 1;
}
}
......@@ -13,7 +13,7 @@ import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.Data;
import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.table.Column;
......@@ -53,7 +53,7 @@ public class PageBtreeIndex extends BaseIndex {
// it should not for new tables, otherwise redo of other operations
// must ensure this page is not used for other things
store.addMeta(this, session, headPos);
PageBtreeLeaf root = new PageBtreeLeaf(this, headPos, Page.ROOT, store.createDataPage());
PageBtreeLeaf root = new PageBtreeLeaf(this, headPos, Page.ROOT, store.createData());
store.updateRecord(root, true, root.data);
} else {
this.headPos = headPos;
......@@ -93,9 +93,11 @@ public class PageBtreeIndex extends BaseIndex {
}
}
}
// safe memory
SearchRow newRow = getSearchRow(row);
while (true) {
PageBtree root = getPage(headPos);
int splitPoint = root.addRowTry(row);
int splitPoint = root.addRowTry(newRow);
if (splitPoint == 0) {
break;
}
......@@ -110,7 +112,7 @@ public class PageBtreeIndex extends BaseIndex {
page1.setPageId(id);
page1.setParentPageId(headPos);
page2.setParentPageId(headPos);
PageBtreeNode newRoot = new PageBtreeNode(this, rootPageId, Page.ROOT, store.createDataPage());
PageBtreeNode newRoot = new PageBtreeNode(this, rootPageId, Page.ROOT, store.createData());
newRoot.init(page1, pivot, page2);
store.updateRecord(page1, true, page1.data);
store.updateRecord(page2, true, page2.data);
......@@ -120,6 +122,22 @@ public class PageBtreeIndex extends BaseIndex {
rowCount++;
}
/**
* Create a search row for this row.
*
* @param row the row
* @return the search row
*/
private SearchRow getSearchRow(Row row) {
SearchRow r = table.getTemplateSimpleRow(columns.length == 1);
r.setPosAndVersion(row);
for (int j = 0; j < columns.length; j++) {
int idx = columns[j].getColumnId();
r.setValue(idx, row.getValue(idx));
}
return r;
}
/**
* Read the given page.
*
......@@ -137,7 +155,7 @@ public class PageBtreeIndex extends BaseIndex {
}
return (PageBtree) rec;
}
DataPage data = store.readPage(id);
Data data = store.readPage(id);
data.reset();
int parentPageId = data.readInt();
int type = data.readByte() & 255;
......@@ -266,7 +284,7 @@ public class PageBtreeIndex extends BaseIndex {
private void removeAllRows() throws SQLException {
PageBtree root = getPage(headPos);
root.freeChildren();
root = new PageBtreeLeaf(this, headPos, Page.ROOT, store.createDataPage());
root = new PageBtreeLeaf(this, headPos, Page.ROOT, store.createData());
store.removeRecord(headPos);
store.updateRecord(root, true, null);
rowCount = 0;
......@@ -297,7 +315,7 @@ public class PageBtreeIndex extends BaseIndex {
* @param data the data page
* @return the row
*/
Row readRow(DataPage data) throws SQLException {
Row readRow(Data data) throws SQLException {
return tableData.readRow(data);
}
......@@ -324,7 +342,7 @@ public class PageBtreeIndex extends BaseIndex {
* @param onlyPosition whether only the position of the row is stored
* @return the row
*/
SearchRow readRow(DataPage data, int offset, boolean onlyPosition) throws SQLException {
SearchRow readRow(Data data, int offset, boolean onlyPosition) throws SQLException {
data.setPos(offset);
int pos = data.readInt();
if (onlyPosition) {
......@@ -347,7 +365,7 @@ public class PageBtreeIndex extends BaseIndex {
* @param onlyPosition whether only the position of the row is stored
* @param row the row to write
*/
void writeRow(DataPage data, int offset, SearchRow row, boolean onlyPosition) throws SQLException {
void writeRow(Data data, int offset, SearchRow row, boolean onlyPosition) throws SQLException {
data.setPos(offset);
data.writeInt(row.getPos());
if (!onlyPosition) {
......@@ -366,8 +384,8 @@ public class PageBtreeIndex extends BaseIndex {
* @param onlyPosition whether only the position of the row is stored
* @return the number of bytes
*/
int getRowSize(DataPage dummy, SearchRow row, boolean onlyPosition) throws SQLException {
int rowsize = DataPage.LENGTH_INT;
int getRowSize(Data dummy, SearchRow row, boolean onlyPosition) throws SQLException {
int rowsize = Data.LENGTH_INT;
if (!onlyPosition) {
for (Column col : columns) {
Value v = row.getValue(col.getColumnId());
......
......@@ -11,6 +11,7 @@ import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.message.Message;
import org.h2.result.SearchRow;
import org.h2.store.Data;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
......@@ -30,7 +31,7 @@ class PageBtreeLeaf extends PageBtree {
private static final int OFFSET_LENGTH = 2;
private static final int OFFSET_START = 11;
PageBtreeLeaf(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
PageBtreeLeaf(PageBtreeIndex index, int pageId, int parentPageId, Data data) {
super(index, pageId, parentPageId, data);
start = OFFSET_START;
}
......@@ -140,7 +141,7 @@ class PageBtreeLeaf extends PageBtree {
PageBtree split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeLeaf p2 = new PageBtreeLeaf(index, newPageId, parentPageId, index.getPageStore().createDataPage());
PageBtreeLeaf p2 = new PageBtreeLeaf(index, newPageId, parentPageId, index.getPageStore().createData());
for (int i = splitPoint; i < entryCount;) {
p2.addRowTry(getRow(splitPoint));
removeRow(splitPoint);
......@@ -213,7 +214,7 @@ class PageBtreeLeaf extends PageBtree {
written = true;
}
DataPage getDataPage() throws SQLException {
Data getData() throws SQLException {
write();
return data;
}
......
......@@ -10,7 +10,10 @@ import java.sql.SQLException;
import org.h2.constant.SysProperties;
import org.h2.message.Message;
import org.h2.result.SearchRow;
import org.h2.store.Data;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.util.MemoryUtils;
/**
* A b-tree node page that contains index data.
......@@ -38,7 +41,7 @@ class PageBtreeNode extends PageBtree {
private int rowCount = UNKNOWN_ROWCOUNT;
PageBtreeNode(PageBtreeIndex index, int pageId, int parentPageId, DataPage data) {
PageBtreeNode(PageBtreeIndex index, int pageId, int parentPageId, Data data) {
super(index, pageId, parentPageId, data);
start = CHILD_OFFSET_PAIR_START;
}
......@@ -51,8 +54,8 @@ class PageBtreeNode extends PageBtree {
rowCount = rowCountStored = data.readInt();
childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt();
rows = new SearchRow[entryCount];
offsets = new int[entryCount];
rows = PageStore.newSearchRows(entryCount);
offsets = MemoryUtils.newInts(entryCount);
for (int i = 0; i < entryCount; i++) {
childPageIds[i] = data.readInt();
offsets[i] = data.readInt();
......@@ -164,7 +167,7 @@ class PageBtreeNode extends PageBtree {
PageBtree split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeNode p2 = new PageBtreeNode(index, newPageId, parentPageId, index.getPageStore().createDataPage());
PageBtreeNode p2 = new PageBtreeNode(index, newPageId, parentPageId, index.getPageStore().createData());
if (onlyPosition) {
// TODO optimize: maybe not required
p2.onlyPosition = true;
......@@ -204,7 +207,7 @@ class PageBtreeNode extends PageBtree {
entryCount = 0;
childPageIds = new int[] { page1.getPageId() };
rows = new SearchRow[0];
offsets = new int[0];
offsets = MemoryUtils.EMPTY_INTS;
addChild(0, page2.getPageId(), pivot);
check();
}
......@@ -336,8 +339,8 @@ class PageBtreeNode extends PageBtree {
if (entryCount < 0) {
Message.throwInternalError();
}
SearchRow[] newRows = new SearchRow[entryCount];
int[] newOffsets = new int[entryCount];
SearchRow[] newRows = PageStore.newSearchRows(entryCount);
int[] newOffsets = MemoryUtils.newInts(entryCount);
int[] newChildPageIds = new int[entryCount + 1];
System.arraycopy(offsets, 0, newOffsets, 0, Math.min(entryCount, i));
System.arraycopy(rows, 0, newRows, 0, Math.min(entryCount, i));
......
......@@ -7,10 +7,9 @@
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.Data;
import org.h2.store.Record;
/**
......@@ -36,7 +35,7 @@ abstract class PageData extends Record {
/**
* The data page.
*/
protected final DataPage data;
protected final Data data;
/**
* The number of entries.
......@@ -53,7 +52,7 @@ abstract class PageData extends Record {
*/
protected boolean written;
PageData(PageScanIndex index, int pageId, int parentPageId, DataPage data) {
PageData(PageScanIndex index, int pageId, int parentPageId, Data data) {
this.index = index;
this.parentPageId = parentPageId;
this.data = data;
......@@ -204,7 +203,8 @@ abstract class PageData extends Record {
* @return number of double words (4 bytes)
*/
public int getMemorySize() {
return index.getPageStore().getPageSize() >> 2;
// double the byte array size
return index.getPageStore().getPageSize() >> 1;
}
int getParentPageId() {
......
......@@ -6,11 +6,13 @@
*/
package org.h2.index;
import java.lang.ref.SoftReference;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.Data;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
......@@ -41,6 +43,11 @@ class PageDataLeaf extends PageData {
*/
Row[] rows;
/**
* For pages with overflow: the soft reference to the row
*/
SoftReference<Row> rowRef;
/**
* The page id of the first overflow page (0 if no overflow).
*/
......@@ -51,7 +58,12 @@ class PageDataLeaf extends PageData {
*/
int start;
PageDataLeaf(PageScanIndex index, int pageId, int parentPageId, DataPage data) {
/**
* The size of the row in bytes for large rows.
*/
private int overflowRowSize;
PageDataLeaf(PageScanIndex index, int pageId, int parentPageId, Data data) {
super(index, pageId, parentPageId, data);
start = KEY_OFFSET_PAIR_START;
}
......@@ -139,10 +151,13 @@ class PageDataLeaf extends PageData {
int previous = getPos();
int dataOffset = pageSize;
int page = index.getPageStore().allocatePage();
do {
if (firstOverflowPageId == 0) {
firstOverflowPageId = page;
}
this.overflowRowSize = pageSize + rowLength;
write();
// free up the space used by the row
rowRef = new SoftReference<Row>(rows[0]);
rows[0] = null;
do {
int type, size, next;
if (remaining <= pageSize - PageDataLeafOverflow.START_LAST) {
type = Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST;
......@@ -153,13 +168,14 @@ class PageDataLeaf extends PageData {
size = pageSize - PageDataLeafOverflow.START_MORE;
next = index.getPageStore().allocatePage();
}
PageDataLeafOverflow overflow = new PageDataLeafOverflow(this, page, type, previous, next, dataOffset, size);
PageDataLeafOverflow overflow = new PageDataLeafOverflow(this, page, type, previous, next, data, dataOffset, size);
index.getPageStore().updateRecord(overflow, true, null);
dataOffset += size;
remaining -= size;
previous = page;
page = next;
} while (remaining > 0);
data.truncate(index.getPageStore().getPageSize());
}
return 0;
}
......@@ -204,22 +220,32 @@ class PageDataLeaf extends PageData {
Row r = rows[at];
if (r == null) {
if (firstOverflowPageId != 0) {
if (rowRef != null) {
r = rowRef.get();
if (r != null) {
return r;
}
}
PageStore store = index.getPageStore();
int pageSize = store.getPageSize();
data.setPos(pageSize);
int next = firstOverflowPageId;
int offset = pageSize;
data.setPos(pageSize);
do {
PageDataLeafOverflow page = index.getPageOverflow(next, this, offset);
next = page.readInto(data);
} while (next != 0);
overflowRowSize = data.length();
}
data.setPos(offsets[at]);
r = index.readRow(data);
r.setPos(keys[at]);
if (firstOverflowPageId != 0) {
rowRef = new SoftReference<Row>(r);
} else {
rows[at] = r;
}
}
return r;
}
......@@ -229,7 +255,7 @@ class PageDataLeaf extends PageData {
PageData split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageDataLeaf p2 = new PageDataLeaf(index, newPageId, parentPageId, index.getPageStore().createDataPage());
PageDataLeaf p2 = new PageDataLeaf(index, newPageId, parentPageId, index.getPageStore().createData());
for (int i = splitPoint; i < entryCount;) {
p2.addRowTry(getRowAt(splitPoint));
removeRow(splitPoint);
......@@ -311,6 +337,7 @@ class PageDataLeaf extends PageData {
public void write(DataPage buff) throws SQLException {
write();
index.getPageStore().writePage(getPos(), data);
data.truncate(index.getPageStore().getPageSize());
}
PageStore getPageStore() {
......@@ -329,6 +356,7 @@ class PageDataLeaf extends PageData {
}
readAllRows();
data.reset();
data.checkCapacity(overflowRowSize);
data.writeInt(parentPageId);
int type;
if (firstOverflowPageId == 0) {
......@@ -348,16 +376,11 @@ class PageDataLeaf extends PageData {
}
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
rows[i].write(data);
getRowAt(i).write(data);
}
written = true;
}
DataPage getDataPage() throws SQLException {
write();
return data;
}
public String toString() {
return "page[" + getPos() + "] data leaf table:" + index.getId() + " entries:" + entryCount;
}
......
......@@ -9,8 +9,8 @@ package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.message.Message;
import org.h2.store.Data;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.store.Record;
/**
......@@ -35,7 +35,10 @@ public class PageDataLeafOverflow extends Record {
*/
static final int START_MORE = 9;
private final PageDataLeaf leaf;
/**
* The index.
*/
private final PageScanIndex index;
/**
* The page type.
......@@ -57,22 +60,24 @@ public class PageDataLeafOverflow extends Record {
*/
private int size;
/**
* The first content byte starts at the given position
* in the leaf page when the page size is unlimited.
*/
private final int offset;
private DataPage data;
private Data data;
PageDataLeafOverflow(PageDataLeaf leaf, int pageId, int type, int previous, int next, int offset, int size) {
this.leaf = leaf;
PageDataLeafOverflow(PageDataLeaf leaf, int pageId, int type, int previous, int next, Data allData, int offset, int size) {
this.index = leaf.index;
setPos(pageId);
this.type = type;
this.parentPage = previous;
this.nextPage = next;
this.offset = offset;
this.size = size;
data = index.getPageStore().createData();
data.writeInt(parentPage);
data.writeByte((byte) type);
if (type == Page.TYPE_DATA_OVERFLOW) {
data.writeInt(nextPage);
} else {
data.writeShortInt(size);
}
data.write(allData.getBytes(), offset, size);
}
/**
......@@ -80,14 +85,13 @@ public class PageDataLeafOverflow extends Record {
*
* @param leaf the leaf page
* @param pageId the page id
* @param data the data page
* @param dataAll the data page with the complete value
* @param offset the offset
*/
public PageDataLeafOverflow(PageDataLeaf leaf, int pageId, DataPage data, int offset) {
this.leaf = leaf;
public PageDataLeafOverflow(PageDataLeaf leaf, int pageId, Data data, int offset) {
this.index = leaf.index;
setPos(pageId);
this.data = data;
this.offset = offset;
}
/**
......@@ -100,7 +104,7 @@ public class PageDataLeafOverflow extends Record {
size = data.readShortInt();
nextPage = 0;
} else if (type == Page.TYPE_DATA_OVERFLOW) {
size = leaf.getPageStore().getPageSize() - START_MORE;
size = index.getPageStore().getPageSize() - START_MORE;
nextPage = data.readInt();
} else {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " type:" + type);
......@@ -113,7 +117,8 @@ public class PageDataLeafOverflow extends Record {
* @param target the target data page
* @return the next page, or 0 if no next page
*/
int readInto(DataPage target) {
int readInto(Data target) {
target.checkCapacity(size);
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
target.write(data.getBytes(), START_LAST, size);
return 0;
......@@ -127,22 +132,11 @@ public class PageDataLeafOverflow extends Record {
}
public int getByteCount(DataPage dummy) {
return leaf.getByteCount(dummy);
return index.getPageStore().getPageSize();
}
public void write(DataPage buff) throws SQLException {
PageStore store = leaf.getPageStore();
DataPage overflow = store.createDataPage();
DataPage data = leaf.getDataPage();
overflow.writeInt(parentPage);
overflow.writeByte((byte) type);
if (type == Page.TYPE_DATA_OVERFLOW) {
overflow.writeInt(nextPage);
} else {
overflow.writeShortInt(size);
}
overflow.write(data.getBytes(), offset, size);
store.writePage(getPos(), overflow);
index.getPageStore().writePage(getPos(), data);
}
public String toString() {
......@@ -155,7 +149,8 @@ public class PageDataLeafOverflow extends Record {
* @return number of double words (4 bytes)
*/
public int getMemorySize() {
return leaf.getMemorySize();
// double the byte array size
return index.getPageStore().getPageSize() >> 1;
}
int getParent() {
......
......@@ -11,7 +11,9 @@ import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.Data;
import org.h2.store.DataPage;
import org.h2.util.MemoryUtils;
/**
* A leaf page that contains data of one or multiple rows.
......@@ -39,7 +41,7 @@ class PageDataNode extends PageData {
private int rowCount = UNKNOWN_ROWCOUNT;
PageDataNode(PageScanIndex index, int pageId, int parentPageId, DataPage data) {
PageDataNode(PageScanIndex index, int pageId, int parentPageId, Data data) {
super(index, pageId, parentPageId, data);
}
......@@ -49,7 +51,7 @@ class PageDataNode extends PageData {
rowCount = rowCountStored = data.readInt();
childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt();
keys = new int[entryCount];
keys = MemoryUtils.newInts(entryCount);
for (int i = 0; i < entryCount; i++) {
childPageIds[i] = data.readInt();
keys[i] = data.readInt();
......@@ -117,7 +119,7 @@ class PageDataNode extends PageData {
PageData split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageDataNode p2 = new PageDataNode(index, newPageId, parentPageId, index.getPageStore().createDataPage());
PageDataNode p2 = new PageDataNode(index, newPageId, parentPageId, index.getPageStore().createData());
int firstChild = childPageIds[splitPoint];
for (int i = splitPoint; i < entryCount;) {
p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], keys[splitPoint]);
......@@ -273,7 +275,7 @@ class PageDataNode extends PageData {
if (entryCount < 0) {
Message.throwInternalError();
}
int[] newKeys = new int[entryCount];
int[] newKeys = MemoryUtils.newInts(entryCount);
int[] newChildPageIds = new int[entryCount + 1];
System.arraycopy(keys, 0, newKeys, 0, Math.min(entryCount, i));
System.arraycopy(childPageIds, 0, newChildPageIds, 0, i);
......
......@@ -12,7 +12,6 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
......@@ -21,7 +20,7 @@ import org.h2.log.UndoLogRecord;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.Data;
import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.table.Column;
......@@ -67,7 +66,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// it should not for new tables, otherwise redo of other operations
// must ensure this page is not used for other things
store.addMeta(this, session, headPos);
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createData());
store.updateRecord(root, true, root.data);
} else {
this.headPos = headPos;
......@@ -129,7 +128,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
page1.setPageId(id);
page1.setParentPageId(headPos);
page2.setParentPageId(headPos);
PageDataNode newRoot = new PageDataNode(this, rootPageId, Page.ROOT, store.createDataPage());
PageDataNode newRoot = new PageDataNode(this, rootPageId, Page.ROOT, store.createData());
newRoot.init(page1, pivot, page2);
store.updateRecord(page1, true, page1.data);
store.updateRecord(page2, true, page2.data);
......@@ -164,7 +163,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (rec != null) {
return (PageDataLeafOverflow) rec;
}
DataPage data = store.readPage(id);
Data data = store.readPage(id);
data.reset();
PageDataLeafOverflow result = new PageDataLeafOverflow(leaf, id, data, offset);
result.read();
......@@ -189,7 +188,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
return (PageData) rec;
}
DataPage data = store.readPage(id);
Data data = store.readPage(id);
data.reset();
int parentPageId = data.readInt();
int type = data.readByte() & 255;
......@@ -302,7 +301,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
private void removeAllRows() throws SQLException {
PageData root = getPage(headPos, 0);
root.freeChildren();
root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
root = new PageDataLeaf(this, headPos, Page.ROOT, store.createData());
store.removeRecord(headPos);
store.updateRecord(root, true, null);
rowCount = 0;
......@@ -328,7 +327,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
* @param data the data page
* @return the row
*/
Row readRow(DataPage data) throws SQLException {
Row readRow(Data data) throws SQLException {
return tableData.readRow(data);
}
......
差异被折叠。
......@@ -15,8 +15,8 @@ import java.sql.Timestamp;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.message.Message;
import org.h2.util.ByteUtils;
import org.h2.util.MathUtils;
import org.h2.util.MemoryUtils;
import org.h2.value.Value;
import org.h2.value.ValueArray;
import org.h2.value.ValueBoolean;
......@@ -68,19 +68,19 @@ public class DataPage {
/**
* The data handler responsible for lob objects.
*/
private DataHandler handler;
protected DataHandler handler;
/**
* The data itself.
*/
private byte[] data;
protected byte[] data;
/**
* The current write or read position.
*/
private int pos;
protected int pos;
private DataPage(DataHandler handler, byte[] data) {
protected DataPage(DataHandler handler, byte[] data) {
this.handler = handler;
this.data = data;
}
......@@ -266,7 +266,7 @@ public class DataPage {
*/
public void checkCapacity(int plus) {
if (pos + plus >= data.length) {
byte[] d = ByteUtils.newBytes((data.length + plus) * 2);
byte[] d = MemoryUtils.newBytes((data.length + plus) * 2);
// must copy everything, because pos could be 0 and data may be
// still required
System.arraycopy(data, 0, d, 0, data.length);
......@@ -607,13 +607,13 @@ public class DataPage {
}
case Value.JAVA_OBJECT: {
int len = readInt();
byte[] b = ByteUtils.newBytes(len);
byte[] b = MemoryUtils.newBytes(len);
read(b, 0, len);
return ValueJavaObject.getNoCopy(b);
}
case Value.BYTES: {
int len = readInt();
byte[] b = ByteUtils.newBytes(len);
byte[] b = MemoryUtils.newBytes(len);
read(b, 0, len);
return ValueBytes.getNoCopy(b);
}
......@@ -633,7 +633,7 @@ public class DataPage {
case Value.CLOB: {
int smallLen = readInt();
if (smallLen >= 0) {
byte[] small = ByteUtils.newBytes(smallLen);
byte[] small = MemoryUtils.newBytes(smallLen);
read(small, 0, smallLen);
return ValueLob.createSmallLob(dataType, small);
}
......
......@@ -27,7 +27,6 @@ import org.h2.log.RedoLogRecord;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.util.BitField;
import org.h2.util.ByteUtils;
import org.h2.util.Cache;
import org.h2.util.CacheLRU;
import org.h2.util.CacheObject;
......@@ -35,6 +34,7 @@ import org.h2.util.CacheWriter;
import org.h2.util.FileUtils;
import org.h2.util.IntArray;
import org.h2.util.MathUtils;
import org.h2.util.MemoryUtils;
import org.h2.util.New;
import org.h2.util.ObjectArray;
......@@ -575,7 +575,7 @@ public class DiskFile implements CacheWriter {
Message.throwInternalError("0 blocks to read pos=" + pos);
}
if (blockCount > 1) {
byte[] b2 = ByteUtils.newBytes(blockCount * BLOCK_SIZE);
byte[] b2 = MemoryUtils.newBytes(blockCount * BLOCK_SIZE);
System.arraycopy(buff, 0, b2, 0, BLOCK_SIZE);
buff = b2;
file.readFully(buff, BLOCK_SIZE, blockCount * BLOCK_SIZE - BLOCK_SIZE);
......
......@@ -14,7 +14,7 @@ import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.message.Message;
import org.h2.tools.CompressTool;
import org.h2.util.ByteUtils;
import org.h2.util.MemoryUtils;
/**
* An input stream that is backed by a file store.
......@@ -120,7 +120,7 @@ public class FileStoreInputStream extends InputStream {
readInt();
if (compress != null) {
int uncompressed = readInt();
byte[] buff = ByteUtils.newBytes(remainingInBuffer);
byte[] buff = MemoryUtils.newBytes(remainingInBuffer);
page.read(buff, 0, remainingInBuffer);
page.reset();
page.checkCapacity(uncompressed);
......
......@@ -28,7 +28,7 @@ public class PageFreeList extends Record {
private final BitField used = new BitField();
private final int pageCount;
private boolean full;
private DataPage data;
private Data data;
PageFreeList(PageStore store, int pageId) {
setPos(pageId);
......@@ -91,7 +91,7 @@ public class PageFreeList extends Record {
* Read the page from the disk.
*/
void read() throws SQLException {
data = store.createDataPage();
data = store.createData();
store.readPage(getPos(), data);
int p = data.readInt();
int t = data.readByte();
......@@ -113,7 +113,7 @@ public class PageFreeList extends Record {
}
public void write(DataPage buff) throws SQLException {
data = store.createDataPage();
data = store.createData();
data.writeInt(0);
int type = Page.TYPE_FREE_LIST;
data.writeByte((byte) type);
......
......@@ -111,7 +111,7 @@ public class PageLog {
private DataInputStream in;
private int firstTrunkPage;
private int firstDataPage;
private DataPage data;
private Data data;
private int logId, logPos;
private int firstLogId;
private BitField undo = new BitField();
......@@ -120,7 +120,7 @@ public class PageLog {
PageLog(PageStore store) {
this.store = store;
data = store.createDataPage();
data = store.createData();
trace = store.getTrace();
}
......@@ -196,7 +196,7 @@ public class PageLog {
pageIn = new PageInputStream(store, firstTrunkPage, firstDataPage);
in = new DataInputStream(pageIn);
int logId = 0;
DataPage data = store.createDataPage();
Data data = store.createData();
try {
pos = 0;
while (true) {
......@@ -268,14 +268,14 @@ public class PageLog {
}
}
}
if (stage == RECOVERY_STAGE_REDO) {
sessionStates = New.hashMap();
}
} catch (EOFException e) {
trace.debug("log recovery stopped: " + e.toString());
} catch (IOException e) {
throw Message.convertIOException(e, "recover");
}
if (stage == RECOVERY_STAGE_REDO) {
sessionStates = New.hashMap();
}
}
/**
......@@ -304,7 +304,7 @@ public class PageLog {
* @param data a temporary buffer
* @return the row
*/
public static Row readRow(DataInputStream in, DataPage data) throws IOException, SQLException {
public static Row readRow(DataInputStream in, Data data) throws IOException, SQLException {
int pos = in.readInt();
int len = in.readInt();
data.reset();
......@@ -328,7 +328,7 @@ public class PageLog {
* @param pageId the page id
* @param page the old page data
*/
void addUndo(int pageId, DataPage page) throws SQLException {
void addUndo(int pageId, Data page) throws SQLException {
try {
if (undo.get(pageId)) {
return;
......@@ -398,17 +398,17 @@ public class PageLog {
int pageSize = store.getPageSize();
byte[] t = StringUtils.utf8Encode(transaction);
int len = t.length;
if (1 + DataPage.LENGTH_INT * 2 + len >= PageStreamData.getCapacity(pageSize)) {
if (1 + Data.LENGTH_INT * 2 + len >= PageStreamData.getCapacity(pageSize)) {
throw Message.getInvalidValueException("transaction name too long", transaction);
}
pageOut.fillDataPage();
pageOut.fillPage();
out.write(PREPARE_COMMIT);
out.writeInt(session.getId());
out.writeInt(len);
out.write(t);
flushOut();
// store it on a separate log page
pageOut.fillDataPage();
pageOut.fillPage();
if (log.getFlushOnEachCommit()) {
flush();
}
......@@ -461,6 +461,7 @@ public class PageLog {
row.setLastLog(logId, logPos);
data.reset();
data.checkCapacity(row.getByteCount(data));
row.write(data);
out.write(add ? ADD : REMOVE);
out.writeInt(session.getId());
......@@ -497,7 +498,7 @@ public class PageLog {
}
undo = new BitField();
logId++;
pageOut.fillDataPage();
pageOut.fillPage();
int currentDataPage = pageOut.getCurrentDataPageId();
logIdPageMap.put(logId, currentDataPage);
}
......@@ -636,4 +637,14 @@ public class PageLog {
d.write(null);
}
void truncate() throws SQLException {
do {
// TODO keep trunk page in the cache
PageStreamTrunk t = new PageStreamTrunk(store, firstTrunkPage);
t.read();
firstTrunkPage = t.getNextTrunk();
t.free();
} while (firstTrunkPage != 0);
}
}
......@@ -173,7 +173,7 @@ public class PageOutputStream extends OutputStream {
* Fill the data page with zeros and write it.
* This is required for a checkpoint.
*/
void fillDataPage() throws SQLException {
void fillPage() throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("pageOut.storePage fill " + data.getPos());
}
......
......@@ -27,7 +27,7 @@ public class PageStreamData extends Record {
private final PageStore store;
private int trunk;
private DataPage data;
private Data data;
private int remaining;
private int length;
......@@ -41,7 +41,7 @@ public class PageStreamData extends Record {
* Read the page from the disk.
*/
void read() throws SQLException {
data = store.createDataPage();
data = store.createData();
store.readPage(getPos(), data);
trunk = data.readInt();
data.setPos(4);
......@@ -61,7 +61,7 @@ public class PageStreamData extends Record {
* Write the header data.
*/
void initWrite() {
data = store.createDataPage();
data = store.createData();
data.writeInt(trunk);
data.writeByte((byte) Page.TYPE_STREAM_DATA);
data.writeInt(0);
......
......@@ -10,6 +10,7 @@ import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.index.Page;
import org.h2.message.Message;
import org.h2.util.MemoryUtils;
/**
* A trunk page of a stream. It contains the page numbers of the stream, and
......@@ -31,7 +32,7 @@ public class PageStreamTrunk extends Record {
private int nextTrunk;
private int[] pageIds;
private int pageCount;
private DataPage data;
private Data data;
private int index;
PageStreamTrunk(PageStore store, int parent, int pageId, int next, int[] pageIds) {
......@@ -52,13 +53,13 @@ public class PageStreamTrunk extends Record {
* Read the page from the disk.
*/
void read() throws SQLException {
data = store.createDataPage();
data = store.createData();
store.readPage(getPos(), data);
parent = data.readInt();
int t = data.readByte();
if (t == Page.TYPE_EMPTY) {
// end of file
pageIds = new int[0];
pageIds = MemoryUtils.EMPTY_INTS;
return;
}
if (t != Page.TYPE_STREAM_TRUNK) {
......@@ -93,7 +94,7 @@ public class PageStreamTrunk extends Record {
}
public void write(DataPage buff) throws SQLException {
data = store.createDataPage();
data = store.createData();
data.writeInt(parent);
data.writeByte((byte) Page.TYPE_STREAM_TRUNK);
data.writeInt(nextTrunk);
......@@ -135,7 +136,7 @@ public class PageStreamTrunk extends Record {
* @return the number of pages freed
*/
int free() throws SQLException {
DataPage empty = store.createDataPage();
Data empty = store.createData();
store.freePage(getPos(), false, null);
int freed = 1;
for (int i = 0; i < pageCount; i++) {
......
......@@ -34,6 +34,7 @@ import org.h2.message.Trace;
import org.h2.result.Row;
import org.h2.result.SimpleRow;
import org.h2.security.SHA256;
import org.h2.store.Data;
import org.h2.store.DataHandler;
import org.h2.store.DataPage;
import org.h2.store.DiskFile;
......@@ -49,6 +50,7 @@ import org.h2.util.FileUtils;
import org.h2.util.IOUtils;
import org.h2.util.IntArray;
import org.h2.util.MathUtils;
import org.h2.util.MemoryUtils;
import org.h2.util.New;
import org.h2.util.ObjectArray;
import org.h2.util.RandomUtils;
......@@ -496,7 +498,7 @@ public class Recover extends Tool implements DataHandler {
// Math.abs(Integer.MIN_VALUE) == Integer.MIN_VALUE
blocks = MathUtils.convertLongToInt(Math.abs(s.readInt()));
if (blocks > 1) {
byte[] b2 = ByteUtils.newBytes(blocks * blockSize);
byte[] b2 = MemoryUtils.newBytes(blocks * blockSize);
System.arraycopy(buff, 0, b2, 0, blockSize);
buff = b2;
try {
......@@ -532,7 +534,7 @@ public class Recover extends Tool implements DataHandler {
case 'S': {
char fileType = (char) s.readByte();
int sumLength = s.readInt();
byte[] summary = ByteUtils.newBytes(sumLength);
byte[] summary = MemoryUtils.newBytes(sumLength);
if (sumLength > 0) {
s.read(summary, 0, sumLength);
}
......@@ -858,7 +860,7 @@ public class Recover extends Tool implements DataHandler {
}
private void dumpPageLogStream(PrintWriter writer, FileStore store, int logFirstTrunkPage, int logFirstDataPage, int pageSize) throws IOException, SQLException {
DataPage s = DataPage.create(this, pageSize);
Data s = Data.create(this, pageSize);
DataInputStream in = new DataInputStream(
new PageInputStream(writer, this, store, logFirstTrunkPage, logFirstDataPage, pageSize)
);
......
......@@ -332,13 +332,15 @@ kill -9 `jps -l | grep "org.h2.test.TestAll" | cut -d " " -f 1`
new TestTimer().runTest(test);
}
} else {
test.runTests();
int todo;
// System.setProperty(SysProperties.H2_PAGE_STORE, "true");
// test.pageStore = true;
// test.runTests();
System.setProperty(SysProperties.H2_PAGE_STORE, "true");
test.pageStore = true;
test.runTests();
TestPerformance.main(new String[]{ "-init", "-db", "1"});
System.setProperty(SysProperties.H2_PAGE_STORE, "false");
test.pageStore = false;
test.runTests();
TestPerformance.main(new String[]{ "-init", "-db", "1"});
}
System.out.println(TestBase.formatTime(System.currentTimeMillis() - time) + " total");
......
......@@ -19,6 +19,7 @@ import java.sql.Statement;
import java.sql.Types;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.LinkedList;
import org.h2.jdbc.JdbcConnection;
import org.h2.message.TraceSystem;
......@@ -47,6 +48,8 @@ public abstract class TestBase {
*/
protected long start;
private LinkedList<byte[]> memory = new LinkedList<byte[]>();
/**
* Get the test directory for this test.
*
......@@ -1068,4 +1071,43 @@ public abstract class TestBase {
return "-Dh2.pageStore=" + System.getProperty("h2.pageStore");
}
protected void eatMemory(int remainingKB) {
byte[] reserve = new byte[remainingKB * 1024];
int max = 128 * 1024 * 1024;
int div = 2;
while (true) {
long free = Runtime.getRuntime().freeMemory();
long freeTry = free / div;
int eat = (int) Math.min(max, freeTry);
try {
byte[] block = new byte[eat];
memory.add(block);
} catch (OutOfMemoryError e) {
if (eat < 32) {
break;
}
if (eat == max) {
max /= 2;
if (max < 128) {
break;
}
}
if (eat == freeTry) {
div += 1;
} else {
div = 2;
}
}
}
// silly code - makes sure there are no warnings
reserve[0] = reserve[1];
// actually it is anyway garbage collected
reserve = null;
}
protected void freeMemory() {
memory.clear();
}
}
......@@ -86,19 +86,32 @@ public class TestMemoryUsage extends TestBase {
stat.execute("SET MAX_LENGTH_INPLACE_LOB 32768");
stat.execute("SET CACHE_SIZE 8000");
stat.execute("CREATE TABLE TEST(ID IDENTITY, DATA CLOB)");
System.gc();
System.gc();
freeSoftReferences();
try {
int start = MemoryUtils.getMemoryUsed();
for (int i = 0; i < 4; i++) {
stat.execute("INSERT INTO TEST(DATA) SELECT SPACE(32000) FROM SYSTEM_RANGE(1, 200)");
System.gc();
System.gc();
freeSoftReferences();
int used = MemoryUtils.getMemoryUsed();
if ((used - start) > 16000) {
fail("Used: " + (used - start));
}
}
} finally {
conn.close();
freeMemory();
}
}
void freeSoftReferences() {
try {
eatMemory(1);
} catch (OutOfMemoryError e) {
// ignore
}
System.gc();
System.gc();
freeMemory();
}
private void testCreateIndex() throws SQLException {
......
......@@ -11,8 +11,6 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.LinkedList;
import org.h2.constant.ErrorCode;
import org.h2.test.TestBase;
......@@ -22,8 +20,6 @@ import org.h2.test.TestBase;
*/
public class TestOutOfMemory extends TestBase {
private LinkedList<byte[]> list = new LinkedList<byte[]>();
/**
* Run just this test.
*
......@@ -56,7 +52,7 @@ public class TestOutOfMemory extends TestBase {
} catch (SQLException e) {
assertEquals(ErrorCode.OUT_OF_MEMORY, e.getErrorCode());
}
list = null;
freeMemory();
ResultSet rs = stat.executeQuery("select count(*) from stuff");
rs.next();
assertEquals(2000, rs.getInt(1));
......@@ -66,38 +62,4 @@ public class TestOutOfMemory extends TestBase {
deleteDb("outOfMemory");
}
private void eatMemory(int remainingKB) {
byte[] reserve = new byte[remainingKB * 1024];
int max = 128 * 1024 * 1024;
int div = 2;
while (true) {
long free = Runtime.getRuntime().freeMemory();
long freeTry = free / div;
int eat = (int) Math.min(max, freeTry);
try {
byte[] block = new byte[eat];
list.add(block);
} catch (OutOfMemoryError e) {
if (eat < 32) {
break;
}
if (eat == max) {
max /= 2;
if (max < 128) {
break;
}
}
if (eat == freeTry) {
div += 1;
} else {
div = 2;
}
}
}
// silly code - makes sure there are no warnings
reserve[0] = reserve[1];
// actually it is anyway garbage collected
reserve = null;
}
}
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论