提交 23b0d542 authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store.

上级 73787814
......@@ -488,7 +488,7 @@ public class Database implements DataHandler {
if (SysProperties.PAGE_STORE) {
PageStore store = getPageStore();
if (!store.isNew()) {
store.getLog().recover(true);
store.recover(true);
}
}
if (FileUtils.exists(dataFileName)) {
......@@ -572,11 +572,12 @@ public class Database implements DataHandler {
MetaRecord rec = (MetaRecord) records.get(i);
rec.execute(this, systemSession, eventListener);
}
if (SysProperties.PAGE_STORE) {
PageStore store = getPageStore();
if (!store.isNew()) {
getPageStore().getLog().recover(false);
store.checkpoint();
if (pageStore != null) {
if (!pageStore.isNew()) {
getPageStore().recover(false);
if (!readOnly) {
pageStore.checkpoint();
}
}
}
// try to recompile the views that are invalid
......
......@@ -52,7 +52,7 @@ abstract class PageData extends Record {
this.index = index;
this.parentPageId = parentPageId;
this.data = data;
this.setPos(pageId);
setPos(pageId);
}
/**
......
......@@ -13,23 +13,17 @@ import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.util.IntArray;
import org.h2.store.Record;
/**
* A leaf page that contains data of one or multiple rows.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-6: entry count
* </li><li>7-10: table id
* </li><li>only if there is overflow: 11-14: overflow page id
* </li><li>list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li></ul>
* The format of an overflow page is:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>if there is more data: 5-8: next overflow page id
* </li><li>otherwise: 5-6: remaining size
* </li><li>5-8: table id
* </li><li>9-10: entry count
* </li><li>with overflow: 11-14: the first overflow page id
* </li><li>11- or 15-: list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li><li>data
* </li></ul>
*/
......@@ -37,8 +31,6 @@ class PageDataLeaf extends PageData {
private static final int KEY_OFFSET_PAIR_LENGTH = 6;
private static final int KEY_OFFSET_PAIR_START = 11;
private static final int OVERFLOW_DATA_START_MORE = 9;
private static final int OVERFLOW_DATA_START_LAST = 7;
/**
* The row offsets.
......@@ -55,16 +47,13 @@ class PageDataLeaf extends PageData {
*/
int firstOverflowPageId;
/**
* The page ids of all overflow pages (null if no overflow).
*/
int[] overflowPageIds;
/**
* The start of the data area.
*/
int start;
private boolean written;
PageDataLeaf(PageScanIndex index, int pageId, int parentPageId, DataPage data) {
super(index, pageId, parentPageId, data);
start = KEY_OFFSET_PAIR_START;
......@@ -73,6 +62,12 @@ class PageDataLeaf extends PageData {
void read() throws SQLException {
data.setPos(4);
int type = data.readByte();
int tableId = data.readInt();
if (tableId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPageId() + " expected table:" + index.getId() +
"got:" + tableId);
}
entryCount = data.readShortInt();
offsets = new int[entryCount];
keys = new int[entryCount];
......@@ -130,6 +125,7 @@ class PageDataLeaf extends PageData {
offsets = newOffsets;
keys = newKeys;
rows = newRows;
index.getPageStore().updateRecord(this, true, data);
if (offset < start) {
if (entryCount > 1) {
Message.throwInternalError();
......@@ -140,20 +136,31 @@ class PageDataLeaf extends PageData {
// fix offset
offset = start;
offsets[x] = offset;
IntArray array = new IntArray();
int previous = getPos();
int dataOffset = pageSize;
int page = index.getPageStore().allocatePage();
do {
int next = index.getPageStore().allocatePage();
array.add(next);
remaining -= pageSize - OVERFLOW_DATA_START_LAST;
if (remaining > 0) {
remaining += 2;
if (firstOverflowPageId == 0) {
firstOverflowPageId = page;
}
int type, size, next;
if (remaining <= pageSize - PageDataLeafOverflow.START_LAST) {
type = Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST;
size = remaining;
next = 0;
} else {
type = Page.TYPE_DATA_OVERFLOW;
size = pageSize - PageDataLeafOverflow.START_MORE;
next = index.getPageStore().allocatePage();
}
PageDataLeafOverflow overflow = new PageDataLeafOverflow(this, page, type, previous, next, dataOffset, size);
index.getPageStore().updateRecord(overflow, true, null);
dataOffset += size;
remaining -= size;
previous = page;
page = next;
} while (remaining > 0);
overflowPageIds = new int[array.size()];
array.toArray(overflowPageIds);
firstOverflowPageId = overflowPageIds[0];
}
index.getPageStore().updateRecord(this, data);
return 0;
}
......@@ -195,20 +202,22 @@ class PageDataLeaf extends PageData {
int pageSize = store.getPageSize();
data.setPos(pageSize);
int next = firstOverflowPageId;
while (true) {
DataPage page = store.readPage(next);
page.setPos(4);
int type = page.readByte();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
int size = page.readShortInt();
data.write(page.getBytes(), OVERFLOW_DATA_START_LAST, size);
break;
int offset = pageSize;
data.setPos(pageSize);
do {
Record record = store.getRecord(next);
PageDataLeafOverflow page;
if (record == null) {
DataPage data = store.readPage(next);
page = new PageDataLeafOverflow(this, next, data, offset);
} else {
next = page.readInt();
int size = pageSize - OVERFLOW_DATA_START_MORE;
data.write(page.getBytes(), OVERFLOW_DATA_START_MORE, size);
if (!(record instanceof PageDataLeafOverflow)) {
throw Message.getInternalError("page:"+ next + " " + record, null);
}
page = (PageDataLeafOverflow) record;
}
}
next = page.readInto(data);
} while (next != 0);
}
data.setPos(offsets[at]);
r = index.readRow(data);
......@@ -274,7 +283,7 @@ class PageDataLeaf extends PageData {
return true;
}
removeRow(i);
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
return false;
}
......@@ -296,6 +305,18 @@ class PageDataLeaf extends PageData {
}
public void write(DataPage buff) throws SQLException {
write();
index.getPageStore().writePage(getPos(), data);
}
PageStore getPageStore() {
return index.getPageStore();
}
private void write() throws SQLException {
if (written) {
return;
}
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRowAt(i);
......@@ -309,6 +330,7 @@ class PageDataLeaf extends PageData {
type = Page.TYPE_DATA_LEAF;
}
data.writeByte((byte) type);
data.writeInt(index.getId());
data.writeShortInt(entryCount);
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
......@@ -321,39 +343,12 @@ class PageDataLeaf extends PageData {
data.setPos(offsets[i]);
rows[i].write(data);
}
PageStore store = index.getPageStore();
int pageSize = store.getPageSize();
store.writePage(getPos(), data);
// don't need to write overflow if we just update the parent page id
if (data.length() > pageSize && overflowPageIds != null) {
if (firstOverflowPageId == 0) {
Message.throwInternalError();
}
DataPage overflow = store.createDataPage();
int parent = getPos();
int pos = pageSize;
int remaining = data.length() - pageSize;
for (int i = 0; i < overflowPageIds.length; i++) {
overflow.reset();
overflow.writeInt(parent);
int size;
if (remaining > pageSize - OVERFLOW_DATA_START_LAST) {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW);
overflow.writeInt(overflowPageIds[i + 1]);
size = pageSize - overflow.length();
} else {
overflow.writeByte((byte) (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST));
size = remaining;
overflow.writeShortInt(remaining);
}
overflow.write(data.getBytes(), pos, size);
remaining -= size;
pos += size;
int id = overflowPageIds[i];
store.writePage(id, overflow);
parent = id;
}
}
written = true;
}
DataPage getDataPage() throws SQLException {
write();
return data;
}
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.jdbc.JdbcSQLException;
import org.h2.message.Message;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.store.Record;
/**
* Overflow data for a leaf page.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>if there is more data: 5-8: next overflow page id
* </li><li>otherwise: 5-6: remaining size
* </li><li>data
* </li></ul>
*/
public class PageDataLeafOverflow extends Record {
/**
* The start of the data in the last overflow page.
*/
static final int START_LAST = 7;
/**
* The start of the data in a overflow page that is not the last one.
*/
static final int START_MORE = 9;
private final PageDataLeaf leaf;
/**
* The page type.
*/
private final int type;
/**
* The previous page (overflow or leaf).
*/
private final int previous;
/**
* The next overflow page, or 0.
*/
private final int next;
/**
* The number of content bytes.
*/
private final int size;
/**
* The first content byte starts at the given position
* in the leaf page when the page size is unlimited.
*/
private final int offset;
private DataPage data;
PageDataLeafOverflow(PageDataLeaf leaf, int pageId, int type, int previous, int next, int offset, int size) {
this.leaf = leaf;
setPos(pageId);
this.type = type;
this.previous = previous;
this.next = next;
this.offset = offset;
this.size = size;
}
public PageDataLeafOverflow(PageDataLeaf leaf, int pageId, DataPage data, int offset) throws JdbcSQLException {
this.leaf = leaf;
setPos(pageId);
this.data = data;
this.offset = offset;
previous = data.readInt();
type = data.readByte();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
size = data.readShortInt();
next = 0;
} else if (type == Page.TYPE_DATA_OVERFLOW) {
size = leaf.getPageStore().getPageSize() - START_MORE;
next = data.readInt();
} else {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " type:" + type);
}
}
/**
* Read the data into a target buffer.
*
* @param target the target data page
* @return the next page, or 0 if no next page
*/
int readInto(DataPage target) {
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
target.write(data.getBytes(), START_LAST, size);
return 0;
} else {
target.write(data.getBytes(), START_MORE, size);
return next;
}
}
public int getByteCount(DataPage dummy) throws SQLException {
return leaf.getByteCount(dummy);
}
public void write(DataPage buff) throws SQLException {
PageStore store = leaf.getPageStore();
DataPage overflow = store.createDataPage();
DataPage data = leaf.getDataPage();
overflow.writeInt(previous);
overflow.writeByte((byte) type);
if (type == Page.TYPE_DATA_OVERFLOW) {
overflow.writeInt(next);
} else {
overflow.writeShortInt(size);
}
overflow.write(data.getBytes(), offset, size);
store.writePage(getPos(), overflow);
}
}
......@@ -83,15 +83,15 @@ class PageDataNode extends PageData {
}
int pivot = page.getKey(splitPoint - 1);
PageData page2 = page.split(splitPoint);
index.getPageStore().updateRecord(page, page.data);
index.getPageStore().updateRecord(page2, page2.data);
index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data);
addChild(x, page2.getPageId(), pivot);
int maxEntries = (index.getPageStore().getPageSize() - 15) / 8;
if (entryCount >= maxEntries) {
int todoSplitAtLastInsertionPoint;
return entryCount / 2;
}
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
}
updateRowCount(1);
return 0;
......@@ -103,7 +103,7 @@ class PageDataNode extends PageData {
}
if (rowCountStored != UNKNOWN_ROWCOUNT) {
rowCountStored = UNKNOWN_ROWCOUNT;
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
}
}
......@@ -133,7 +133,7 @@ class PageDataNode extends PageData {
int child = childPageIds[i];
PageData p = index.getPage(child);
p.setParentPageId(getPos());
index.getPageStore().updateRecord(p, p.data);
index.getPageStore().updateRecord(p, true, p.data);
}
}
......@@ -198,7 +198,7 @@ class PageDataNode extends PageData {
return true;
}
removeChild(at);
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
return false;
}
......@@ -224,7 +224,7 @@ class PageDataNode extends PageData {
this.rowCount = rowCount;
if (rowCountStored != rowCount) {
rowCountStored = rowCount;
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
}
}
......
......@@ -20,6 +20,8 @@ import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
import org.h2.value.Value;
import org.h2.value.ValueLob;
/**
* The scan index allows to access a row by key. It can be used to iterate over
......@@ -31,22 +33,8 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
private PageStore store;
private TableData tableData;
private int headPos;
// TODO test that setPageId updates parent, overflow parent
// TODO remember last page with deleted keys (in the root page?),
// and chain such pages
// TODO order pages so that searching for a key
// doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance)
// TODO file position, content checksums
// TODO completely re-use keys of deleted rows
// TODO remove Database.objectIds
// TODO detect circles in linked lists
// (input stream, free list, extend pages...)
private int lastKey;
private long rowCount;
private long rowCountApproximation;
public PageScanIndex(TableData table, int id, IndexColumn[] columns, IndexType indexType, int headPos) throws SQLException {
initBaseIndex(table, id, table.getName() + "_TABLE_SCAN", columns, indexType);
......@@ -64,11 +52,11 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// new table
headPos = store.allocatePage();
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, root.data);
store.updateRecord(root, true, root.data);
} else if (store.isNew()) {
// the system table for a new database
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, root.data);
store.updateRecord(root, true, root.data);
} else {
lastKey = getPage(headPos).getLastKey();
rowCount = getPage(headPos).getRowCount();
......@@ -90,6 +78,18 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (trace.isDebugEnabled()) {
trace.debug("add " + row.getPos());
}
if (tableData.getContainsLargeObject()) {
for (int i = 0; i < row.getColumnCount(); i++) {
Value v = row.getValue(i);
Value v2 = v.link(database, getId());
if (v2.isLinked()) {
session.unlinkAtCommitStop(v2);
}
if (v != v2) {
row.setValue(i, v2);
}
}
}
while (true) {
PageData root = getPage(headPos);
int splitPoint = root.addRow(row);
......@@ -109,13 +109,13 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
page2.setParentPageId(headPos);
PageDataNode newRoot = new PageDataNode(this, rootPageId, Page.ROOT, store.createDataPage());
newRoot.init(page1, pivot, page2);
store.updateRecord(page1, page1.data);
store.updateRecord(page2, page2.data);
store.updateRecord(newRoot, null);
store.updateRecord(page1, true, page1.data);
store.updateRecord(page2, true, page2.data);
store.updateRecord(newRoot, true, null);
root = newRoot;
}
rowCount++;
store.getLog().addOrRemoveRow(session, tableData.getId(), row, true);
store.logAddOrRemoveRow(session, tableData.getId(), row, true);
}
/**
......@@ -184,11 +184,19 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (trace.isDebugEnabled()) {
trace.debug("remove " + row.getPos());
}
if (tableData.getContainsLargeObject()) {
for (int i = 0; i < row.getColumnCount(); i++) {
Value v = row.getValue(i);
if (v.isLinked()) {
session.unlinkAtCommit((ValueLob) v);
}
}
}
int invalidateRowCount;
// setChanged(session);
if (rowCount == 1) {
int todoMaybeImprove;
truncate(session);
removeAllRows();
} else {
int key = row.getPos();
PageData root = getPage(headPos);
......@@ -199,7 +207,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// lastKey--;
// }
}
store.getLog().addOrRemoveRow(session, tableData.getId(), row, false);
store.logAddOrRemoveRow(session, tableData.getId(), row, false);
}
public void remove(Session session) throws SQLException {
......@@ -213,11 +221,19 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (trace.isDebugEnabled()) {
trace.debug("truncate");
}
removeAllRows();
if (tableData.getContainsLargeObject() && tableData.getPersistent()) {
ValueLob.removeAllForTable(database, table.getId());
}
tableData.setRowCount(0);
}
private void removeAllRows() throws SQLException {
store.removeRecord(headPos);
int todoLogOldData;
int freePages;
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, null);
store.updateRecord(root, true, null);
rowCount = 0;
lastKey = 0;
}
......@@ -246,7 +262,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
public long getRowCountApproximation() {
return rowCountApproximation;
return rowCount;
}
public long getRowCount(Session session) {
......
......@@ -100,7 +100,7 @@ public class TreeIndex extends BaseIndex {
}
return;
default:
Message.throwInternalError("b: " + x.balance * sign);
Message.throwInternalError("b:" + x.balance * sign);
}
if (x == root) {
return;
......
......@@ -18,7 +18,6 @@ import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.store.DataPage;
import org.h2.store.DiskFile;
import org.h2.store.PageLog;
import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.store.Storage;
......@@ -63,7 +62,7 @@ public class LogSystem {
private int keepFiles;
private boolean closed;
private String accessMode;
private PageLog pageLog;
private PageStore pageStore;
/**
* Create new transaction log object. This will not open or create files
......@@ -77,9 +76,7 @@ public class LogSystem {
*/
public LogSystem(Database database, String fileNamePrefix, boolean readOnly, String accessMode, PageStore pageStore) {
this.database = database;
if (pageStore != null) {
this.pageLog = pageStore.getLog();
}
this.pageStore = pageStore;
this.readOnly = readOnly;
this.accessMode = accessMode;
closed = true;
......@@ -472,8 +469,8 @@ public class LogSystem {
if (closed) {
return;
}
if (pageLog != null) {
pageLog.commit(session);
if (pageStore != null) {
pageStore.commit(session);
}
currentLog.commit(session);
session.setAllCommitted();
......
......@@ -41,7 +41,7 @@ public class PageFreeList extends Record {
* @return the page
*/
int allocate() throws SQLException {
store.updateRecord(this, data);
store.updateRecord(this, true, data);
int size = array.size();
if (size > 0) {
int x = array.get(size - 1);
......@@ -95,7 +95,7 @@ public class PageFreeList extends Record {
* @param pageId the page id to add
*/
void free(int pageId) throws SQLException {
store.updateRecord(this, data);
store.updateRecord(this, true, data);
if (array.size() < getMaxSize()) {
array.add(pageId);
} else {
......
......@@ -29,21 +29,44 @@ import org.h2.value.Value;
*/
public class PageLog {
private static final int NO_OP = 0;
private static final int UNDO = 1;
private static final int COMMIT = 2;
private static final int ADD = 3;
private static final int REMOVE = 4;
/**
* No operation.
*/
public static final int NO_OP = 0;
/**
* An undo log entry.
* Format: page id, page.
*/
public static final int UNDO = 1;
/**
* A commit entry of a session.
* Format: session id.
*/
public static final int COMMIT = 2;
/**
* Add a record to a table.
* Format: session id, table id, row.
*/
public static final int ADD = 3;
/**
* Remove a record from a table.
* Format: session id, table id, row.
*/
public static final int REMOVE = 4;
private PageStore store;
private Trace trace;
private BitField undo = new BitField();
private PageOutputStream pageOut;
private DataOutputStream out;
private int firstPage;
private DataPage data;
private boolean recoveryRunning;
private long operation;
private BitField undo = new BitField();
PageLog(PageStore store, int firstPage) {
this.store = store;
......@@ -69,12 +92,10 @@ public class PageLog {
*
* @param undo true if the undo step should be run
*/
public void recover(boolean undo) throws SQLException {
trace.debug("log recover");
void recover(boolean undo) throws SQLException {
DataInputStream in = new DataInputStream(new PageInputStream(store, 0, firstPage, Page.TYPE_LOG));
DataPage data = store.createDataPage();
try {
recoveryRunning = true;
while (true) {
int x = in.read();
if (x < 0) {
......@@ -84,45 +105,48 @@ public class PageLog {
// nothing to do
} else if (x == UNDO) {
int pageId = in.readInt();
in.read(data.getBytes(), 0, store.getPageSize());
in.readFully(data.getBytes(), 0, store.getPageSize());
if (undo) {
if (trace.isDebugEnabled()) {
trace.debug("log write " + pageId);
trace.debug("log undo " + pageId);
}
store.writePage(pageId, data);
}
} else if (x == ADD || x == REMOVE) {
int sessionId = in.readInt();
int tableId = in.readInt();
Row row = readRow(in);
Database db = store.getDatabase();
Row row = readRow(in, data);
if (!undo) {
Database db = store.getDatabase();
if (trace.isDebugEnabled()) {
trace.debug("log redo " + (x == ADD ? "+" : "-") + " " + row);
}
db.redo(tableId, row, x == ADD);
}
} else if (x == COMMIT) {
in.readInt();
}
}
} catch (Exception e) {
int todoOnlyIOExceptionAndSQLException;
int todoSomeExceptionAreOkSomeNot;
trace.debug("log recovery stopped: " + e.toString());
} finally {
recoveryRunning = false;
}
trace.debug("log recover done");
int todoDeleteAfterRecovering;
}
private Row readRow(DataInputStream in) throws IOException, SQLException {
/**
* Read a row from an input stream.
*
* @param in the input stream
* @param data a temporary buffer
* @return the row
*/
public static Row readRow(DataInputStream in, DataPage data) throws IOException, SQLException {
int pos = in.readInt();
int len = in.readInt();
data.reset();
data.checkCapacity(len);
in.read(data.getBytes(), 0, len);
in.readFully(data.getBytes(), 0, len);
int columnCount = data.readInt();
Value[] values = new Value[columnCount];
for (int i = 0; i < columnCount; i++) {
......@@ -141,7 +165,7 @@ public class PageLog {
* @param pageId the page id
* @param page the old page data
*/
public void addUndo(int pageId, DataPage page) throws SQLException {
void addUndo(int pageId, DataPage page) throws SQLException {
try {
if (undo.get(pageId)) {
return;
......@@ -160,7 +184,7 @@ public class PageLog {
*
* @param session the session
*/
public void commit(Session session) throws SQLException {
void commit(Session session) throws SQLException {
try {
trace.debug("log commit");
out.write(COMMIT);
......@@ -181,11 +205,8 @@ public class PageLog {
* @param row the row to add
* @param add true if the row is added, false if it is removed
*/
public void addOrRemoveRow(Session session, int tableId, Row row, boolean add) throws SQLException {
void logAddOrRemoveRow(Session session, int tableId, Row row, boolean add) throws SQLException {
try {
if (recoveryRunning) {
return;
}
if (trace.isDebugEnabled()) {
trace.debug("log " + (add?"+":"-") + " table:" + tableId +
" remaining:" + pageOut.getRemainingBytes() + " row:" + row);
......@@ -225,7 +246,7 @@ public class PageLog {
/**
* Flush the transaction log.
*/
public void flush() throws SQLException {
private void flush() throws SQLException {
try {
int todoUseLessSpace;
trace.debug("log flush");
......
......@@ -277,17 +277,26 @@ java org.h2.test.TestAll timer
System.setProperty("h2.maxMemoryRowsDistinct", "128");
System.setProperty("h2.check2", "true");
// failing tests: 11 (1st round)
// System.setProperty("h2.pageStore", "true");
/*
PageStore.switchLogIfPossible()
drop table test;
create table test(id int);
select 1 from test where 'a'=1;
Fails: Oracle, PostgreSQL, H2
Works: MySQL, HSQLDB
select for update in mvcc mode: only lock the selected records?
test case for daylight saving time enabled/move to a timezone (locking,...)
JCR: for each node type, create a table; one 'dynamic' table with parameter;
option to cache the results
<link rel="icon" type="image/png" href="/path/image.png">
create a short one page documentation
checksum: no need to checksum all data; every 128th byte is enough;
but need position+counter
create a short 4 pages documentation
http://blog.flexive.org/2008/12/05/porting-flexive-to-the-h2-database/
postgresql generate_series?
......
......@@ -495,7 +495,7 @@ public abstract class TestBase {
* @throws AssertionError if the values are not equal
*/
protected void assertEquals(byte[] expected, byte[] actual) {
assertTrue(expected.length == actual.length);
assertEquals("length", expected.length, actual.length);
for (int i = 0; i < expected.length; i++) {
if (expected[i] != actual[i]) {
fail("[" + i + "]: expected: " + (int) expected[i] + " actual: " + (int) actual[i]);
......
......@@ -20,6 +20,7 @@ import java.sql.Types;
import java.util.ArrayList;
import java.util.Random;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.store.FileLister;
import org.h2.test.TestBase;
......@@ -297,7 +298,8 @@ public class TestTools extends TestBase {
conn = DriverManager.getConnection(url, "another", "another");
stat = conn.createStatement();
stat.execute("runscript from '" + baseDir + "/toolsRecover.data.sql'");
String suffix = SysProperties.PAGE_STORE ? ".h2.sql" : ".data.sql";
stat.execute("runscript from '" + baseDir + "/toolsRecover" + suffix + "'");
rs = stat.executeQuery("select * from \"test 2\"");
assertFalse(rs.next());
rs = stat.executeQuery("select * from test");
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论