提交 9b8db89c authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store.

上级 a782a8cd
......@@ -451,6 +451,9 @@ public class Database implements DataHandler {
* @return true if one exists
*/
public static boolean exists(String name) {
if (SysProperties.PAGE_STORE) {
return FileUtils.exists(name + Constants.SUFFIX_PAGE_FILE);
}
return FileUtils.exists(name + Constants.SUFFIX_DATA_FILE);
}
......
......@@ -147,6 +147,10 @@ abstract class PageBtree extends Record {
* @return the row
*/
SearchRow getRow(int at) throws SQLException {
int test;
if (at < 0) {
System.out.println("stop");
}
SearchRow row = rows[at];
if (row == null) {
row = index.readRow(data, offsets[at]);
......
......@@ -60,8 +60,11 @@ public class PageBtreeIndex extends BaseIndex {
this.headPos = headPos;
PageBtree root = getPage(headPos);
rowCount = root.getRowCount();
if (!database.isReadOnly()) {
// could have been created before, but never committed
// TODO test if really required
store.updateRecord(root, false, null);
}
int reuseKeysIfManyDeleted;
}
if (trace.isDebugEnabled()) {
......@@ -307,6 +310,10 @@ public class PageBtreeIndex extends BaseIndex {
* @param row the row to write
*/
void writeRow(DataPage data, int offset, SearchRow row) throws SQLException {
if (offset < 0) {
int test;
System.out.println("stop");
}
data.setPos(offset);
data.writeInt(row.getPos());
for (Column col : columns) {
......
......@@ -7,6 +7,7 @@
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.message.Message;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
......@@ -73,6 +74,9 @@ class PageBtreeNode extends PageBtree {
return (entryCount / 2) + 1;
}
int offset = last - rowLength;
if(offset < 0) {
throw Message.getSQLException(ErrorCode.FEATURE_NOT_SUPPORTED_1, "Wide indexes");
}
int[] newOffsets = new int[entryCount + 1];
SearchRow[] newRows = new SearchRow[entryCount + 1];
int[] newChildPageIds = new int[entryCount + 2];
......@@ -105,7 +109,7 @@ class PageBtreeNode extends PageBtree {
int addRow(SearchRow row) throws SQLException {
while (true) {
int x = find(row, false, true);
int x = find(row, false, false);
PageBtree page = index.getPage(childPageIds[x]);
int splitPoint = page.addRow(row);
if (splitPoint == 0) {
......@@ -323,7 +327,8 @@ class PageBtreeNode extends PageBtree {
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.nextPage(cursor, getRow(entryCount - 1));
SearchRow r = entryCount == 0 ? row : getRow(entryCount - 1);
next.nextPage(cursor, r);
return;
}
PageBtree page = index.getPage(childPageIds[i]);
......
......@@ -69,7 +69,10 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
lastKey = root.getLastKey();
rowCount = root.getRowCount();
// could have been created before, but never committed
if (!database.isReadOnly()) {
// TODO check if really required
store.updateRecord(root, false, null);
}
int reuseKeysIfManyDeleted;
}
if (trace.isDebugEnabled()) {
......
......@@ -107,4 +107,24 @@ public class PageInputStream extends InputStream {
remaining = data.getLength();
}
public void allocateAllPages() throws SQLException {
int trunkPage = trunkNext;
while (trunkPage != 0) {
store.allocatePage(trunkPage);
PageStreamTrunk t = new PageStreamTrunk(store, trunkPage);
t.read();
while (true) {
int n = t.getNextDataPage();
if (n == -1) {
break;
}
store.allocatePage(n);
}
trunkPage = t.getNextTrunk();
if (trunkPage != 0) {
break;
}
}
}
}
......@@ -35,6 +35,21 @@ import org.h2.value.Value;
*/
public class PageLog {
/**
* The recovery stage to undo changes (re-apply the backup).
*/
static final int RECOVERY_STAGE_UNDO = 0;
/**
* The recovery stage to allocate pages used by the transaction log.
*/
static final int RECOVERY_STAGE_ALLOCATE = 1;
/**
* The recovery stage to redo operations.
*/
static final int RECOVERY_STAGE_REDO = 2;
/**
* No operation.
*/
......@@ -133,15 +148,21 @@ public class PageLog {
}
/**
* Run the recovery process. There are two recovery stages: first only the
* undo steps are run (restoring the state before the last checkpoint). In
* the second stage the committed operations are re-applied.
* Run one recovery stage. There are three recovery stages: 0: only the undo
* steps are run (restoring the state before the last checkpoint). 1: the
* pages that are used by the transaction log are allocated. 2: the
* committed operations are re-applied.
*
* @param undo true if the undo step should be run
* @param stage the recovery stage
*/
void recover(boolean undo) throws SQLException {
void recover(int stage) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("log recover undo:" + undo);
trace.debug("log recover stage:" + stage);
}
if (stage == RECOVERY_STAGE_ALLOCATE) {
PageInputStream in = new PageInputStream(store, firstTrunkPage, firstDataPage);
in.allocateAllPages();
return;
}
in = new DataInputStream(new PageInputStream(store, firstTrunkPage, firstDataPage));
int logId = 0;
......@@ -157,7 +178,7 @@ public class PageLog {
if (x == UNDO) {
int pageId = in.readInt();
in.readFully(data.getBytes(), 0, store.getPageSize());
if (undo) {
if (stage == RECOVERY_STAGE_UNDO) {
if (trace.isDebugEnabled()) {
trace.debug("log undo " + pageId);
}
......@@ -167,7 +188,7 @@ public class PageLog {
int sessionId = in.readInt();
int tableId = in.readInt();
Row row = readRow(in, data);
if (!undo) {
if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
trace.debug("log redo " + (x == ADD ? "+" : "-") + " table:" + tableId + " " + row);
......@@ -184,7 +205,7 @@ public class PageLog {
if (trace.isDebugEnabled()) {
trace.debug("log commit " + sessionId + " pos:" + pos);
}
if (undo) {
if (stage == RECOVERY_STAGE_UNDO) {
setLastCommitForSession(sessionId, logId, pos);
}
} else if (x == NOOP) {
......@@ -198,7 +219,7 @@ public class PageLog {
}
}
}
if (!undo) {
if (stage == RECOVERY_STAGE_REDO) {
// TODO probably still required for 2 phase commit
sessionStates = New.hashMap();
}
......
......@@ -169,6 +169,7 @@ public class PageOutputStream extends OutputStream {
if (trace.isDebugEnabled()) {
trace.debug("pageOut.storePage fill " + data.getPos());
}
reserved -= data.getRemaining();
data.write(null);
initNextData();
}
......
......@@ -20,7 +20,6 @@ import org.h2.index.IndexType;
import org.h2.index.PageBtreeIndex;
import org.h2.index.PageScanIndex;
import org.h2.log.LogSystem;
import org.h2.log.SessionState;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
......@@ -67,9 +66,8 @@ import org.h2.value.ValueString;
*/
public class PageStore implements CacheWriter {
// TODO currently working on PageLog.removeUntil
// TODO unlimited number of log streams (TestPageStoreDb)
// TODO check if PageLog.reservePages is required - yes it is - change it
// TODO currently working on PageBtreeNode Wide indexes
// TODO implement redo log in Recover tool
// TODO PageStore.openMetaIndex (desc and nulls first / last)
// TODO btree index with fixed size values doesn't need offset and so on
......@@ -233,14 +231,15 @@ public class PageStore implements CacheWriter {
readVariableHeader();
log = new PageLog(this);
log.openForReading(logFirstTrunkPage, logFirstDataPage);
recover(true);
recover(false);
recover();
if (!database.isReadOnly()) {
recoveryRunning = true;
log.free();
logFirstTrunkPage = allocatePage();
log.openForWriting(logFirstTrunkPage);
recoveryRunning = false;
checkpoint();
}
} else {
// new
setPageSize(PAGE_SIZE_DEFAULT);
......@@ -476,7 +475,6 @@ public class PageStore implements CacheWriter {
record.setChanged(true);
int pos = record.getPos();
allocatePage(pos);
// getFreeList().allocate(pos);
cache.update(pos, record);
if (logUndo && !recoveryRunning) {
if (old == null) {
......@@ -514,7 +512,7 @@ public class PageStore implements CacheWriter {
list.free(pageId);
}
private void allocatePage(int pageId) throws SQLException {
void allocatePage(int pageId) throws SQLException {
PageFreeList list = getFreeList(pageId / freeListPagesPerList);
list.allocate(pageId);
}
......@@ -642,9 +640,6 @@ public class PageStore implements CacheWriter {
* @param data the data
*/
public void writePage(int pageId, DataPage data) throws SQLException {
if ((pageId << pageSizeShift) <= 0) {
System.out.println("stop");
}
file.seek(((long) pageId) << pageSizeShift);
file.write(data.getBytes(), 0, pageSize);
}
......@@ -663,26 +658,21 @@ public class PageStore implements CacheWriter {
}
/**
* Run one recovery stage. There are two recovery stages: first (undo is
* true) only the undo steps are run (restoring the state before the last
* checkpoint). In the second stage (undo is false) the committed operations
* are re-applied.
*
* @param undo true if the undo step should be run
* Run one recovery stage. There are three recovery stages: 0: only the undo
* steps are run (restoring the state before the last checkpoint). 1: the
* pages that are used by the transaction log are allocated. 2: the
* committed operations are re-applied.
*/
private void recover(boolean undo) throws SQLException {
trace.debug("log recover #" + undo);
private void recover() throws SQLException {
trace.debug("log recover");
try {
recoveryRunning = true;
if (!undo) {
log.recover(PageLog.RECOVERY_STAGE_UNDO);
log.recover(PageLog.RECOVERY_STAGE_ALLOCATE);
openMetaIndex();
readMetaData();
}
log.recover(undo);
if (!undo) {
log.recover(PageLog.RECOVERY_STAGE_REDO);
switchLog();
}
} catch (SQLException e) {
int test;
e.printStackTrace();
......@@ -694,7 +684,6 @@ public class PageStore implements CacheWriter {
} finally {
recoveryRunning = false;
}
if (!undo) {
PageScanIndex index = (PageScanIndex) metaObjects.get(0);
if (index == null) {
systemTableHeadPos = Index.EMPTY_HEAD;
......@@ -707,7 +696,6 @@ public class PageStore implements CacheWriter {
metaObjects = null;
trace.debug("log recover done");
}
}
/**
* A record is added to a table, or removed from a table.
......
......@@ -35,10 +35,6 @@ public class PageStreamData extends Record {
setPos(pageId);
this.store = store;
this.trunk = trunk;
int test;
if(pageId==5) {
System.out.println("stop!");
}
}
/**
......@@ -117,4 +113,13 @@ public class PageStreamData extends Record {
data.read(buff, off, len);
}
/**
* Get the number of remaining data bytes of this page.
*
* @return the remaining byte count
*/
int getRemaining() {
return remaining;
}
}
\ No newline at end of file
......@@ -852,9 +852,10 @@ public class Recover extends Tool implements DataHandler {
}
}
private void setStorage(int storageId) {
private String setStorage(int storageId) {
this.storageId = storageId;
this.storageName = String.valueOf(storageId).replace('-', 'M');
this.storageName = "O_" + String.valueOf(storageId).replace('-', 'M');
return storageName;
}
/**
......@@ -1070,7 +1071,7 @@ public class Recover extends Tool implements DataHandler {
private void writeRow(PrintWriter writer, DataPage s, Value[] data) {
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO O_" + storageName + " VALUES(");
sb.append("INSERT INTO " + storageName + " VALUES(");
for (valueId = 0; valueId < recordLength; valueId++) {
try {
Value v = s.readValue();
......@@ -1224,11 +1225,13 @@ public class Recover extends Tool implements DataHandler {
Integer objectId = entry.getKey();
String name = entry.getValue();
if (objectIdSet.contains(objectId)) {
writer.println("INSERT INTO " + name + " SELECT * FROM O_" + objectId + ";");
setStorage(objectId);
writer.println("INSERT INTO " + name + " SELECT * FROM " + storageName + ";");
}
}
for (Integer objectId : objectIdSet) {
writer.println("DROP TABLE O_" + objectId + ";");
setStorage(objectId);
writer.println("DROP TABLE " + storageName + ";");
}
writer.println("DROP ALIAS READ_CLOB;");
writer.println("DROP ALIAS READ_BLOB;");
......@@ -1244,7 +1247,7 @@ public class Recover extends Tool implements DataHandler {
private void createTemporaryTable(PrintWriter writer) {
if (!objectIdSet.contains(storageId)) {
objectIdSet.add(storageId);
StatementBuilder buff = new StatementBuilder("CREATE TABLE O_");
StatementBuilder buff = new StatementBuilder("CREATE TABLE ");
buff.append(storageName).append('(');
for (int i = 0; i < recordLength; i++) {
buff.appendExceptFirst(", ");
......
......@@ -289,6 +289,7 @@ java org.h2.test.TestAll timer
// 2009-05-15: 25 tests fail with page store (first loop)
// 2009-05-18: 18 tests fail with page store (first loop)
// 2009-05-30: 15 tests fail with page store (first loop)
// 2009-06-16: 13 tests fail with page store (first loop)
// System.setProperty("h2.pageStore", "true");
/*
......
......@@ -445,12 +445,20 @@ public class TestTools extends TestBase {
private void testServer() throws SQLException {
Connection conn;
deleteDb("test");
Server server = Server.createTcpServer(new String[] { "-baseDir", baseDir, "-tcpPort", "9192", "-tcpAllowOthers" }).start();
Server server = Server.createTcpServer(
new String[] {
"-baseDir", baseDir,
"-tcpPort", "9192",
"-tcpAllowOthers" }).start();
conn = DriverManager.getConnection("jdbc:h2:tcp://localhost:9192/test", "sa", "");
conn.close();
server.stop();
Server.createTcpServer(
new String[] { "-ifExists", "-tcpPassword", "abc", "-baseDir", baseDir, "-tcpPort", "9192" }).start();
new String[] {
"-ifExists",
"-tcpPassword", "abc",
"-baseDir", baseDir,
"-tcpPort", "9192" }).start();
try {
conn = DriverManager.getConnection("jdbc:h2:tcp://localhost:9192/test2", "sa", "");
fail("should not be able to create new db");
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论