提交 7c23654a authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store.

上级 225e8ba1
......@@ -2345,8 +2345,10 @@ public class Database implements DataHandler {
*/
public void checkpoint() throws SQLException {
if (SysProperties.PAGE_STORE) {
if (persistent) {
pageStore.checkpoint();
}
}
getLog().checkpoint();
getTempFileDeleter().deleteUnused();
}
......
......@@ -145,6 +145,13 @@ abstract class PageBtree extends Record {
*/
abstract void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) throws SQLException;
/**
* Find the last row.
*
* @param cursor the cursor
*/
abstract void last(PageBtreeCursor cursor) throws SQLException;
/**
* Get the row at this position.
*
......@@ -190,6 +197,13 @@ abstract class PageBtree extends Record {
*/
abstract PageBtreeLeaf getFirstLeaf() throws SQLException;
/**
* Get the first child leaf page of a page.
*
* @return the page
*/
abstract PageBtreeLeaf getLastLeaf() throws SQLException;
/**
* Change the parent page id.
*
......
......@@ -62,7 +62,6 @@ public class PageBtreeCursor implements Cursor {
}
if (i >= current.getEntryCount()) {
current.nextPage(this);
i = 0;
if (current == null) {
return false;
}
......@@ -77,9 +76,19 @@ public class PageBtreeCursor implements Cursor {
return true;
}
public boolean previous() {
public boolean previous() throws SQLException {
if (current == null) {
return false;
}
if (i <= 0) {
current.previousPage(this);
if (current == null) {
return false;
}
}
currentSearchRow = current.getRow(i);
currentRow = null;
i--;
int todo;
return true;
}
......
......@@ -21,6 +21,7 @@ import org.h2.table.IndexColumn;
import org.h2.table.TableData;
import org.h2.value.Value;
import org.h2.value.ValueLob;
import org.h2.value.ValueNull;
/**
* This is the most common type of index, a b tree index.
......@@ -152,7 +153,7 @@ public class PageBtreeIndex extends BaseIndex {
}
public boolean canGetFirstOrLast() {
return false;
return true;
}
public Cursor findNext(Session session, SearchRow first, SearchRow last) throws SQLException {
......@@ -174,7 +175,34 @@ public class PageBtreeIndex extends BaseIndex {
}
public Cursor findFirstOrLast(Session session, boolean first) throws SQLException {
throw Message.getUnsupportedException("PAGE");
if (first) {
// TODO optimization: this loops through NULL elements
Cursor cursor = find(session, null, false, null);
while (cursor.next()) {
SearchRow row = cursor.getSearchRow();
Value v = row.getValue(columnIds[0]);
if (v != ValueNull.INSTANCE) {
return cursor;
}
}
return cursor;
}
PageBtree root = getPage(headPos);
PageBtreeCursor cursor = new PageBtreeCursor(session, this, null);
root.last(cursor);
cursor.previous();
// TODO optimization: this loops through NULL elements
do {
SearchRow row = cursor.getSearchRow();
if (row == null) {
break;
}
Value v = row.getValue(columnIds[0]);
if (v != ValueNull.INSTANCE) {
return cursor;
}
} while (cursor.previous());
return cursor;
}
public double getCost(Session session, int[] masks) {
......@@ -278,8 +306,6 @@ public class PageBtreeIndex extends BaseIndex {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
int todoWhyRequired;
// store = null;
int writeRowCount;
}
......
......@@ -65,7 +65,7 @@ class PageBtreeLeaf extends PageBtree {
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (last - rowLength < start + OFFSET_LENGTH) {
if (entryCount > 0) {
if (entryCount > 1) {
int todoSplitAtLastInsertionPoint;
return (entryCount / 2) + 1;
}
......@@ -142,6 +142,10 @@ class PageBtreeLeaf extends PageBtree {
return this;
}
PageBtreeLeaf getLastLeaf() {
return this;
}
boolean remove(SearchRow row) throws SQLException {
int at = find(row, false, false);
if (index.compareRows(row, getRow(at)) != 0) {
......@@ -213,6 +217,10 @@ class PageBtreeLeaf extends PageBtree {
cursor.setCurrent(this, i);
}
void last(PageBtreeCursor cursor) {
cursor.setCurrent(this, entryCount - 1);
}
void remapChildren() {
}
......@@ -230,6 +238,20 @@ class PageBtreeLeaf extends PageBtree {
next.nextPage(cursor, getPos());
}
/**
* Set the cursor to the last row of the previous page.
*
* @param cursor the cursor
*/
void previousPage(PageBtreeCursor cursor) throws SQLException {
if (parentPageId == Page.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.previousPage(cursor, getPos());
}
public String toString() {
return "page[" + getPos() + "] btree leaf table:" + index.getId() + " entries:" + entryCount;
}
......
......@@ -61,7 +61,7 @@ class PageBtreeNode extends PageBtree {
}
private int addChildTry(SearchRow row) throws SQLException {
if (entryCount == 0) {
if (entryCount < 2) {
return 0;
}
int rowLength = index.getRowSize(data, row, onlyPosition);
......@@ -86,9 +86,7 @@ class PageBtreeNode extends PageBtree {
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) {
if (entryCount > 0) {
throw Message.throwInternalError();
}
// TODO remap all children
onlyPosition = true;
rowLength = index.getRowSize(data, row, onlyPosition);
}
......@@ -132,7 +130,7 @@ class PageBtreeNode extends PageBtree {
SearchRow pivot = page.getRow(splitPoint - 1);
int splitPoint2 = addChildTry(pivot);
if (splitPoint2 != 0) {
return splitPoint;
return splitPoint2;
}
PageBtree page2 = page.split(splitPoint);
addChild(x, page2.getPageId(), pivot);
......@@ -158,6 +156,10 @@ class PageBtreeNode extends PageBtree {
PageBtree split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageBtreeNode p2 = new PageBtreeNode(index, newPageId, parentPageId, index.getPageStore().createDataPage());
if (onlyPosition) {
// TODO optimize: maybe not required
p2.onlyPosition = true;
}
int firstChild = childPageIds[splitPoint];
for (int i = splitPoint; i < entryCount;) {
p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], rows[splitPoint]);
......@@ -166,6 +168,9 @@ class PageBtreeNode extends PageBtree {
int lastChild = childPageIds[splitPoint - 1];
removeChild(splitPoint - 1);
childPageIds[splitPoint - 1] = lastChild;
if (p2.childPageIds == null) {
p2.childPageIds = new int[1];
}
p2.childPageIds[0] = firstChild;
p2.remapChildren();
return p2;
......@@ -209,11 +214,21 @@ class PageBtreeNode extends PageBtree {
page.find(cursor, first, bigger);
}
void last(PageBtreeCursor cursor) throws SQLException {
int child = childPageIds[entryCount];
index.getPage(child).last(cursor);
}
PageBtreeLeaf getFirstLeaf() throws SQLException {
int child = childPageIds[0];
return index.getPage(child).getFirstLeaf();
}
PageBtreeLeaf getLastLeaf() throws SQLException {
int child = childPageIds[entryCount - 1];
return index.getPage(child).getLastLeaf();
}
boolean remove(SearchRow row) throws SQLException {
int at = find(row, false, false);
// merge is not implemented to allow concurrent usage
......@@ -353,6 +368,36 @@ class PageBtreeNode extends PageBtree {
cursor.setCurrent(leaf, 0);
}
/**
* Set the cursor to the last row of the previous page.
*
* @param cursor the cursor
* @param row the current row
*/
void previousPage(PageBtreeCursor cursor, int pageId) throws SQLException {
int i;
// TODO maybe keep the index in the child page (transiently)
for (i = childPageIds.length - 1; i >= 0; i--) {
if (childPageIds[i] == pageId) {
i--;
break;
}
}
if (i < 0) {
if (parentPageId == Page.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode previous = (PageBtreeNode) index.getPage(parentPageId);
previous.previousPage(cursor, getPos());
return;
}
PageBtree page = index.getPage(childPageIds[i]);
PageBtreeLeaf leaf = page.getLastLeaf();
cursor.setCurrent(leaf, leaf.entryCount - 1);
}
public String toString() {
return "page[" + getPos() + "] btree node table:" + index.getId() + " entries:" + entryCount;
}
......
......@@ -196,9 +196,6 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
public void remove(Session session, Row row) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("remove " + row.getPos());
if (table.getId() == 0) {
System.out.println("table 0 remove");
}
}
if (tableData.getContainsLargeObject()) {
for (int i = 0; i < row.getColumnCount(); i++) {
......
......@@ -87,15 +87,9 @@ public class LogSystem {
*/
public void setMaxLogSize(long maxSize) {
this.maxLogSize = maxSize;
if (pageStore != null) {
pageStore.setMaxLogSize(maxSize);
}
/**
* Get the maximum log file size.
*
* @return the maximum size
*/
public long getMaxLogSize() {
return maxLogSize;
}
/**
......
......@@ -286,6 +286,10 @@ public class Message {
*/
public static SQLException convertIOException(IOException e, String message) {
if (message == null) {
Throwable t = e.getCause();
if (t != null && t instanceof SQLException) {
return (SQLException) t;
}
return getSQLException(ErrorCode.IO_EXCEPTION_1, new String[] { e.toString() }, e);
}
return getSQLException(ErrorCode.IO_EXCEPTION_2, new String[] { e.toString(), message }, e);
......
......@@ -389,18 +389,8 @@ public class PageLog {
return;
}
int firstDataPageToKeep = logIdPageMap.get(firstUncommittedLog);
trace.debug("log.removeUntil " + firstDataPageToKeep);
while (true) {
// TODO keep trunk page in the cache
PageStreamTrunk t = new PageStreamTrunk(store, firstTrunkPage);
t.read();
if (t.contains(firstDataPageToKeep)) {
store.setLogFirstPage(t.getPos(), firstDataPageToKeep);
break;
}
firstTrunkPage = t.getNextTrunk();
t.free();
}
firstTrunkPage = pageOut.removeUntil(firstTrunkPage, firstDataPageToKeep);
store.setLogFirstPage(firstTrunkPage, firstDataPageToKeep);
while (firstLogId < firstUncommittedLog) {
if (firstLogId > 0) {
// there is no entry for log 0
......@@ -473,4 +463,8 @@ public class PageLog {
return state;
}
public long getSize() {
return pageOut.getSize();
}
}
......@@ -30,6 +30,7 @@ public class PageOutputStream extends OutputStream {
private byte[] buffer = new byte[1];
private boolean needFlush;
private boolean writing;
private int pages;
/**
* Create a new page output stream.
......@@ -97,11 +98,13 @@ public class PageOutputStream extends OutputStream {
}
trunkNext = reservedPages.get(len);
trunk = new PageStreamTrunk(store, parent, trunkPageId, trunkNext, pageIds);
pages++;
trunk.write(null);
reservedPages.removeRange(0, len + 1);
nextData = trunk.getNextDataPage();
}
data = new PageStreamData(store, nextData, trunk.getPos());
pages++;
data.initWrite();
}
......@@ -174,4 +177,29 @@ public class PageOutputStream extends OutputStream {
initNextData();
}
/**
* Remove all pages until the given data page.
*
* @param firstTrunkPage the first trunk page
* @param firstDataPageToKeep the first data page to keep
* @return the trunk page of the data page to keep
*/
int removeUntil(int firstTrunkPage, int firstDataPageToKeep) throws SQLException {
trace.debug("log.removeUntil " + firstDataPageToKeep);
while (true) {
// TODO keep trunk page in the cache
PageStreamTrunk t = new PageStreamTrunk(store, firstTrunkPage);
t.read();
if (t.contains(firstDataPageToKeep)) {
return t.getPos();
}
firstTrunkPage = t.getNextTrunk();
pages -= t.free();
}
}
long getSize() {
return pages * store.getPageSize();
}
}
......@@ -12,6 +12,7 @@ import java.sql.SQLException;
import java.util.HashMap;
import java.util.zip.CRC32;
import org.h2.constant.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.index.Cursor;
......@@ -24,6 +25,7 @@ import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
import org.h2.result.Row;
import org.h2.result.SortOrder;
import org.h2.schema.Schema;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
......@@ -36,6 +38,7 @@ import org.h2.util.CacheWriter;
import org.h2.util.FileUtils;
import org.h2.util.New;
import org.h2.util.ObjectArray;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.value.CompareMode;
import org.h2.value.Value;
......@@ -66,9 +69,9 @@ import org.h2.value.ValueString;
*/
public class PageStore implements CacheWriter {
// TODO auto checkpoint
// TODO TestSampleApps
// TODO TestIndex.wideIndex: btree nodes should be full
// TODO check memory usage
// TODO TestPowerOff
// TODO PageStore.openMetaIndex (desc and nulls first / last)
// TODO PageBtreeIndex.canGetFirstOrLast
// TODO btree index with fixed size values doesn't need offset and so on
......@@ -107,6 +110,7 @@ public class PageStore implements CacheWriter {
// TODO var int: see google protocol buffers
// TODO SessionState.logId is no longer needed
// TODO PageData and PageBtree addRowTry: try to simplify
// TODO performance: maybe don't save direct parent in btree nodes (only root)
// TODO when removing DiskFile:
// remove CacheObject.blockCount
......@@ -131,6 +135,8 @@ public class PageStore implements CacheWriter {
private static final int PAGE_ID_META_ROOT = 4;
private static final int PAGE_ID_LOG_TRUNK = 5;
private static final int MIN_PAGE_COUNT = 6;
private static final int INCREMENT_PAGES = 128;
private static final int READ_VERSION = 0;
......@@ -174,6 +180,7 @@ public class PageStore implements CacheWriter {
private PageScanIndex metaIndex;
private HashMap<Integer, Index> metaObjects;
private int systemTableHeadPos;
private long maxLogSize = Constants.DEFAULT_MAX_LOG_SIZE;
/**
* Create a new page store object.
......@@ -225,16 +232,49 @@ public class PageStore implements CacheWriter {
public void open() throws SQLException {
try {
if (FileUtils.exists(fileName)) {
// existing
if (FileUtils.length(fileName) < MIN_PAGE_COUNT * PAGE_SIZE_MIN) {
// the database was not fully created
openNew();
} else {
openExisting();
}
} else {
openNew();
}
// lastUsedPage = getFreeList().getLastUsed() + 1;
} catch (SQLException e) {
close();
throw e;
}
}
private void openNew() throws SQLException {
setPageSize(PAGE_SIZE_DEFAULT);
freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize);
file = database.openFile(fileName, accessMode, false);
recoveryRunning = true;
writeStaticHeader();
writeVariableHeader();
log = new PageLog(this);
increaseFileSize(MIN_PAGE_COUNT);
openMetaIndex();
logFirstTrunkPage = allocatePage();
log.openForWriting(logFirstTrunkPage);
systemTableHeadPos = Index.EMPTY_HEAD;
recoveryRunning = false;
increaseFileSize(INCREMENT_PAGES);
}
private void openExisting() throws SQLException {
file = database.openFile(fileName, accessMode, true);
readStaticHeader();
freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize);
fileLength = file.length();
pageCount = (int) (fileLength / pageSize);
if (pageCount < 6) {
// not enough pages - must be a new database
// that didn't get created correctly
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, fileName);
if (pageCount < MIN_PAGE_COUNT) {
close();
openNew();
return;
}
readVariableHeader();
log = new PageLog(this);
......@@ -248,26 +288,6 @@ public class PageStore implements CacheWriter {
recoveryRunning = false;
checkpoint();
}
} else {
// new
setPageSize(PAGE_SIZE_DEFAULT);
freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize);
file = database.openFile(fileName, accessMode, false);
recoveryRunning = true;
increaseFileSize(INCREMENT_PAGES);
writeStaticHeader();
log = new PageLog(this);
openMetaIndex();
logFirstTrunkPage = allocatePage();
log.openForWriting(logFirstTrunkPage);
systemTableHeadPos = Index.EMPTY_HEAD;
recoveryRunning = false;
}
// lastUsedPage = getFreeList().getLastUsed() + 1;
} catch (SQLException e) {
close();
throw e;
}
}
/**
......@@ -311,19 +331,11 @@ public class PageStore implements CacheWriter {
}
}
}
try {
log.removeUntil(firstUncommittedLog);
} catch (SQLException e) {
int test;
e.printStackTrace();
}
}
private void readStaticHeader() throws SQLException {
long length = file.length();
if (length < PAGE_SIZE_MIN * 2) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, fileName);
}
database.notifyFileSize(length);
file.seek(FileStore.HEADER_LENGTH);
DataPage page = DataPage.create(database, new byte[PAGE_SIZE_MIN - FileStore.HEADER_LENGTH]);
......@@ -335,11 +347,7 @@ public class PageStore implements CacheWriter {
throw Message.getSQLException(ErrorCode.FILE_VERSION_ERROR_1, fileName);
}
if (writeVersion != 0) {
try {
file.close();
} catch (IOException e) {
throw Message.convertIOException(e, "close");
}
close();
accessMode = "r";
file = database.openFile(fileName, accessMode, true);
}
......@@ -372,7 +380,7 @@ public class PageStore implements CacheWriter {
*
* @param size the page size
*/
public void setPageSize(int size) throws SQLException {
private void setPageSize(int size) throws SQLException {
if (size < PAGE_SIZE_MIN || size > PAGE_SIZE_MAX) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, fileName);
}
......@@ -445,9 +453,11 @@ public class PageStore implements CacheWriter {
public void flushLog() throws SQLException {
if (file != null) {
synchronized (database) {
log.flush();
}
}
}
public Trace getTrace() {
return trace;
......@@ -494,13 +504,7 @@ public class PageStore implements CacheWriter {
}
private PageFreeList getFreeList(int i) throws SQLException {
int p;
if (i == 0) {
// TODO simplify
p = PAGE_ID_FREE_LIST_ROOT;
} else {
p = i * freeListPagesPerList;
}
int p = PAGE_ID_FREE_LIST_ROOT + i * freeListPagesPerList;
while (p >= pageCount) {
increaseFileSize(INCREMENT_PAGES);
}
......@@ -537,6 +541,7 @@ public class PageStore implements CacheWriter {
*/
public int allocatePage() throws SQLException {
int pos;
synchronized (database) {
// TODO could remember the first possible free list page
for (int i = 0;; i++) {
PageFreeList list = getFreeList(i);
......@@ -550,6 +555,7 @@ public class PageStore implements CacheWriter {
}
return pos;
}
}
private void increaseFileSize(int increment) throws SQLException {
pageCount += increment;
......@@ -569,6 +575,7 @@ public class PageStore implements CacheWriter {
if (trace.isDebugEnabled()) {
trace.debug("freePage " + pageId);
}
synchronized (database) {
cache.remove(pageId);
freePage(pageId);
if (recoveryRunning) {
......@@ -579,7 +586,7 @@ public class PageStore implements CacheWriter {
}
log.addUndo(pageId, old);
}
}
}
/**
......@@ -598,9 +605,11 @@ public class PageStore implements CacheWriter {
* @return the record or null
*/
public Record getRecord(int pos) {
synchronized (database) {
CacheObject obj = cache.find(pos);
return (Record) obj;
}
}
/**
* Read a page.
......@@ -621,12 +630,14 @@ public class PageStore implements CacheWriter {
* @param page the page
*/
public void readPage(int pos, DataPage page) throws SQLException {
synchronized (database) {
if (pos >= pageCount) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, pos + " of " + pageCount);
}
file.seek(pos << pageSizeShift);
file.readFully(page.getBytes(), 0, pageSize);
}
}
/**
* Get the page size.
......@@ -653,9 +664,11 @@ public class PageStore implements CacheWriter {
* @param data the data
*/
public void writePage(int pageId, DataPage data) throws SQLException {
synchronized (database) {
file.seek(((long) pageId) << pageSizeShift);
file.write(data.getBytes(), 0, pageSize);
}
}
/**
* Remove a page from the cache.
......@@ -663,8 +676,10 @@ public class PageStore implements CacheWriter {
* @param pageId the page id
*/
public void removeRecord(int pageId) {
synchronized (database) {
cache.remove(pageId);
}
}
Database getDatabase() {
return database;
......@@ -719,10 +734,12 @@ public class PageStore implements CacheWriter {
* @param add true if the row is added, false if it is removed
*/
public void logAddOrRemoveRow(Session session, int tableId, Row row, boolean add) throws SQLException {
synchronized (database) {
if (!recoveryRunning) {
log.logAddOrRemoveRow(session, tableId, row, add);
}
}
}
/**
* Mark a committed transaction.
......@@ -730,7 +747,12 @@ public class PageStore implements CacheWriter {
* @param session the session
*/
public void commit(Session session) throws SQLException {
synchronized (database) {
log.commit(session);
if (log.getSize() > maxLogSize) {
checkpoint();
}
}
}
/**
......@@ -835,12 +857,21 @@ public class PageStore implements CacheWriter {
}
TableData table = (TableData) p.getTable();
Column[] tableCols = table.getColumns();
Column[] cols = new Column[columns.length];
IndexColumn[] cols = new IndexColumn[columns.length];
for (int i = 0; i < columns.length; i++) {
cols[i] = tableCols[Integer.parseInt(columns[i])];
String c = columns[i];
IndexColumn ic = new IndexColumn();
int idx = c.indexOf('/');
if (idx >= 0) {
String s = c.substring(idx + 1);
ic.sortType = Integer.parseInt(s);
c = c.substring(0, idx);
}
IndexColumn[] indexColumns = IndexColumn.wrap(cols);
meta = table.addIndex(session, "I" + id, id, indexColumns, indexType, headPos, null);
Column column = tableCols[Integer.parseInt(c)];
ic.column = column;
cols[i] = ic;
}
meta = table.addIndex(session, "I" + id, id, cols, indexType, headPos, null);
}
metaObjects.put(id, meta);
}
......@@ -853,12 +884,19 @@ public class PageStore implements CacheWriter {
*/
public void addMeta(Index index, Session session) throws SQLException {
int type = index instanceof PageScanIndex ? META_TYPE_SCAN_INDEX : META_TYPE_BTREE_INDEX;
Column[] columns = index.getColumns();
String[] columnIndexes = new String[columns.length];
for (int i = 0; i < columns.length; i++) {
columnIndexes[i] = String.valueOf(columns[i].getColumnId());
}
String columnList = StringUtils.arrayCombine(columnIndexes, ',');
IndexColumn[] columns = index.getIndexColumns();
StatementBuilder buff = new StatementBuilder();
for (IndexColumn col : columns) {
buff.appendExceptFirst(",");
int id = col.column.getColumnId();
buff.append(id);
int sortType = col.sortType;
if (sortType != 0) {
buff.append('/');
buff.append(sortType);
}
}
String columnList = buff.toString();
Table table = index.getTable();
CompareMode mode = table.getCompareMode();
String options = mode.getName()+ "," + mode.getStrength();
......@@ -914,4 +952,13 @@ public class PageStore implements CacheWriter {
}
}
/**
* Set the maximum log file size in megabytes.
*
* @param maxSize the new maximum log file size
*/
public void setMaxLogSize(long maxSize) {
this.maxLogSize = maxSize;
}
}
......@@ -126,16 +126,21 @@ public class PageStreamTrunk extends Record {
/**
* Free this page and all data pages.
*
* @return the number of pages freed
*/
void free() throws SQLException {
int free() throws SQLException {
DataPage empty = store.createDataPage();
store.freePage(getPos(), false, null);
int freed = 1;
for (int i = 0; i < pageCount; i++) {
int page = pageIds[i];
store.freePage(page, false, null);
freed++;
store.writePage(page, empty);
}
store.writePage(getPos(), empty);
return freed;
}
/**
......
......@@ -290,10 +290,13 @@ java org.h2.test.TestAll timer
// 2009-05-18: 18 tests fail with page store (first loop)
// 2009-05-30: 15 tests fail with page store (first loop)
// 2009-06-19: 10 tests fail with page store (first loop)
// 2009-06-24: 3 tests fail with page store (first loop)
// System.setProperty("h2.pageStore", "true");
/*
Console: Start Browser: if ip number changed, try localhost
test case for running out of disk space (using a special file system)
auto-build: prepare release
......
......@@ -13,6 +13,7 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Random;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.engine.Database;
......@@ -289,7 +290,7 @@ public class TestPowerOff extends TestBase {
}
conn.close();
} catch (SQLException e) {
if (e.getSQLState().equals("90098")) {
if (e.getSQLState().equals("" + ErrorCode.SIMULATED_POWER_OFF)) {
// this is ok
} else {
throw e;
......
......@@ -6,13 +6,6 @@
*/
package org.h2.test.unit;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
......@@ -20,37 +13,15 @@ import java.sql.Statement;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import org.h2.constant.SysProperties;
import org.h2.engine.ConnectionInfo;
import org.h2.engine.Database;
import org.h2.index.Cursor;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.index.PageScanIndex;
import org.h2.result.Row;
import org.h2.schema.Schema;
import org.h2.store.PageInputStream;
import org.h2.store.PageOutputStream;
import org.h2.store.PageStore;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
import org.h2.test.TestBase;
import org.h2.util.IntArray;
import org.h2.util.ObjectArray;
import org.h2.value.Value;
import org.h2.value.ValueInt;
/**
* Test the page store.
*/
public class TestPageStore extends TestBase {
private Database db;
private Schema schema;
private TableData table;
private Index index;
/**
* Run just this test.
*
......@@ -63,11 +34,6 @@ public class TestPageStore extends TestBase {
public void test() throws Exception {
testFuzzOperations();
testScanIndex();
// testBtreeIndex();
// testAllocateFree();
// testStreamFuzz();
// testStreamPerformance(false, 1000);
}
private void testFuzzOperations() throws SQLException {
......@@ -144,236 +110,4 @@ public class TestPageStore extends TestBase {
trace(" " + m);
}
private void testBtreeIndex() throws SQLException {
if (!SysProperties.PAGE_STORE) {
return;
}
deleteDb("pageStore");
String fileName = getTestDir("/pageStore");
new File(fileName).delete();
File f = new File(fileName + ".dat");
f.delete();
db = getDatabase();
PageStore store = new PageStore(db, fileName, "rw", 8192);
store.setPageSize(1024);
store.open();
openBtreeIndex();
Row row;
for (int i = 10; i < 100; i += 10) {
row = table.getTemplateRow();
row.setValue(0, ValueInt.get(i));
row.setPos(i);
index.add(db.getSystemSession(), row);
}
row = table.getTemplateRow();
row.setValue(0, ValueInt.get(60));
row.setPos(60);
index.remove(db.getSystemSession(), row);
row = table.getTemplateRow();
row.setValue(0, ValueInt.get(60));
row.setPos(60);
index.add(db.getSystemSession(), row);
store.checkpoint();
store.close();
store = new PageStore(db, fileName, "rw", 8192);
store.open();
openBtreeIndex();
Cursor cursor = index.find(db.getSystemSession(), null, null);
for (int i = 10; i < 100; i += 10) {
assertTrue(cursor.next());
Row r = cursor.get();
assertEquals(i, r.getValue(0).getInt());
}
assertFalse(cursor.next());
store.close();
db.shutdownImmediately();
}
private void testScanIndex() throws SQLException {
if (!SysProperties.PAGE_STORE) {
return;
}
deleteDb("pageStore");
String fileName = getTestDir("/pageStore");
new File(fileName).delete();
File f = new File(fileName + ".dat");
f.delete();
db = getDatabase();
PageStore store = new PageStore(db, fileName, "rw", 8192);
store.setPageSize(1024);
store.open();
openScanIndex();
Row row;
for (int i = 10; i < 100; i += 10) {
row = table.getTemplateRow();
row.setValue(0, ValueInt.get(i));
row.setPos(i);
index.add(db.getSystemSession(), row);
}
row = table.getTemplateRow();
row.setValue(0, ValueInt.get(60));
row.setPos(60);
index.remove(db.getSystemSession(), row);
row = table.getTemplateRow();
row.setValue(0, ValueInt.get(60));
row.setPos(60);
index.add(db.getSystemSession(), row);
store.checkpoint();
store.close();
store = new PageStore(db, fileName, "rw", 8192);
store.open();
openScanIndex();
Cursor cursor = index.find(db.getSystemSession(), null, null);
for (int i = 10; i < 100; i += 10) {
assertTrue(cursor.next());
Row r = cursor.get();
assertEquals(i, r.getValue(0).getInt());
}
assertFalse(cursor.next());
store.close();
db.shutdownImmediately();
}
private Database getDatabase() throws SQLException {
String name = getTestDir("/pageStore");
ConnectionInfo ci = new ConnectionInfo(name);
return new Database(name, ci, null);
}
private void openScanIndex() throws SQLException {
ObjectArray cols = ObjectArray.newInstance();
cols.add(new Column("ID", Value.INT));
schema = new Schema(db, 0, "", null, true);
table = new TableData(schema, "PAGE_INDEX", 1, cols, true, true, false, 100, null);
index = (PageScanIndex) table.getScanIndex(
db.getSystemSession());
}
private void openBtreeIndex() throws SQLException {
ObjectArray cols = ObjectArray.newInstance();
cols.add(new Column("ID", Value.INT));
schema = new Schema(db, 0, "", null, true);
int id = db.allocateObjectId(true, true);
table = new TableData(schema, "BTREE_INDEX", id, cols, true, true, false, 100, null);
id = db.allocateObjectId(true, true);
table.addIndex(db.getSystemSession(), "BTREE", id,
IndexColumn.wrap(table.getColumns()),
IndexType.createNonUnique(true),
Index.EMPTY_HEAD, "");
index = (PageScanIndex) table.getScanIndex(
db.getSystemSession());
}
private void testAllocateFree() throws SQLException {
String fileName = getTestDir("/pageStore");
new File(fileName).delete();
File f = new File(fileName + ".dat");
f.delete();
Database db = getDatabase();
PageStore store = new PageStore(db, fileName, "rw", 8192);
store.setPageSize(1024);
store.open();
IntArray list = new IntArray();
int size = 270;
for (int i = 0; i < size; i++) {
int id = store.allocatePage();
list.add(id);
}
for (int i = 0; i < size; i++) {
int id = list.get(i);
store.freePage(id, false, null);
}
for (int i = 0; i < size; i++) {
int id = store.allocatePage();
int expected = list.get(list.size() - 1 - i);
assertEquals(expected, id);
}
store.close();
db.shutdownImmediately();
new File(fileName).delete();
f.delete();
}
private void testStreamPerformance(boolean file, int count) throws Exception {
String fileName = getTestDir("/pageStore");
new File(fileName).delete();
File f = new File(fileName + ".dat");
f.delete();
Database db = getDatabase();
PageStore store = new PageStore(db, fileName, "rw", 8192);
store.setPageSize(8 * 1024);
byte[] buff = new byte[100];
store.open();
int head = store.allocatePage();
OutputStream out;
InputStream in;
long start = System.currentTimeMillis();
if (file) {
out = new BufferedOutputStream(new FileOutputStream(f), 4 * 1024);
} else {
out = new PageOutputStream(store, 0);
}
for (int i = 0; i < count; i++) {
out.write(buff);
}
out.close();
if (file) {
in = new BufferedInputStream(new FileInputStream(f), 4 * 1024);
} else {
in = new PageInputStream(store, 0, 0);
}
while (true) {
int len = in.read(buff);
if (len < 0) {
break;
}
}
in.close();
println((file ? "file" : "pageStore") +
" " + (System.currentTimeMillis() - start));
store.close();
db.shutdownImmediately();
new File(fileName).delete();
f.delete();
}
private void testStreamFuzz() throws Exception {
String name = "mem:pageStoreStreams";
ConnectionInfo ci = new ConnectionInfo(name);
Database db = new Database(name, ci, null);
String fileName = getTestDir("/pageStoreStreams");
new File(fileName).delete();
PageStore store = new PageStore(db, fileName, "rw", 8192);
store.open();
Random random = new Random(1);
for (int i = 0; i < 10000; i += 1000) {
int len = i == 0 ? 0 : random.nextInt(i);
byte[] data = new byte[len];
random.nextBytes(data);
int head = store.allocatePage();
PageOutputStream out = new PageOutputStream(store, 0);
for (int p = 0; p < len;) {
int l = len == 0 ? 0 : Math.min(len - p, random.nextInt(len / 10));
out.write(data, p, l);
p += l;
}
out.close();
PageInputStream in = new PageInputStream(store, 0, 0);
byte[] data2 = new byte[len];
for (int off = 0;;) {
int l = random.nextInt(1 + len / 10) + 1;
l = in.read(data2, off, l);
if (l < 0) {
break;
}
off += l;
}
in.close();
assertEquals(data, data2);
}
store.close();
db.shutdownImmediately();
new File(fileName).delete();
}
}
......@@ -9,6 +9,7 @@ package org.h2.test.unit;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import org.h2.constant.SysProperties;
import org.h2.test.TestBase;
import org.h2.tools.DeleteDbFiles;
import org.h2.tools.Recover;
......@@ -43,7 +44,12 @@ public class TestRecovery extends TestBase {
conn = getConnection("recovery", "diff", "");
stat = conn.createStatement();
stat.execute("runscript from '" + baseDir + "/recovery.data.sql'");
String name = "recovery.data.sql";
if (SysProperties.PAGE_STORE) {
name = "recovery.h2.sql";
}
stat.execute("runscript from '" + baseDir + "/" + name + "'");
stat.execute("select * from test");
conn.close();
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论