提交 23b0d542 authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store.

上级 73787814
......@@ -488,7 +488,7 @@ public class Database implements DataHandler {
if (SysProperties.PAGE_STORE) {
PageStore store = getPageStore();
if (!store.isNew()) {
store.getLog().recover(true);
store.recover(true);
}
}
if (FileUtils.exists(dataFileName)) {
......@@ -572,11 +572,12 @@ public class Database implements DataHandler {
MetaRecord rec = (MetaRecord) records.get(i);
rec.execute(this, systemSession, eventListener);
}
if (SysProperties.PAGE_STORE) {
PageStore store = getPageStore();
if (!store.isNew()) {
getPageStore().getLog().recover(false);
store.checkpoint();
if (pageStore != null) {
if (!pageStore.isNew()) {
getPageStore().recover(false);
if (!readOnly) {
pageStore.checkpoint();
}
}
}
// try to recompile the views that are invalid
......
......@@ -52,7 +52,7 @@ abstract class PageData extends Record {
this.index = index;
this.parentPageId = parentPageId;
this.data = data;
this.setPos(pageId);
setPos(pageId);
}
/**
......
......@@ -13,23 +13,17 @@ import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.util.IntArray;
import org.h2.store.Record;
/**
* A leaf page that contains data of one or multiple rows.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-6: entry count
* </li><li>7-10: table id
* </li><li>only if there is overflow: 11-14: overflow page id
* </li><li>list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li></ul>
* The format of an overflow page is:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>if there is more data: 5-8: next overflow page id
* </li><li>otherwise: 5-6: remaining size
* </li><li>5-8: table id
* </li><li>9-10: entry count
* </li><li>with overflow: 11-14: the first overflow page id
* </li><li>11- or 15-: list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li><li>data
* </li></ul>
*/
......@@ -37,8 +31,6 @@ class PageDataLeaf extends PageData {
private static final int KEY_OFFSET_PAIR_LENGTH = 6;
private static final int KEY_OFFSET_PAIR_START = 11;
private static final int OVERFLOW_DATA_START_MORE = 9;
private static final int OVERFLOW_DATA_START_LAST = 7;
/**
* The row offsets.
......@@ -55,16 +47,13 @@ class PageDataLeaf extends PageData {
*/
int firstOverflowPageId;
/**
* The page ids of all overflow pages (null if no overflow).
*/
int[] overflowPageIds;
/**
* The start of the data area.
*/
int start;
private boolean written;
PageDataLeaf(PageScanIndex index, int pageId, int parentPageId, DataPage data) {
super(index, pageId, parentPageId, data);
start = KEY_OFFSET_PAIR_START;
......@@ -73,6 +62,12 @@ class PageDataLeaf extends PageData {
void read() throws SQLException {
data.setPos(4);
int type = data.readByte();
int tableId = data.readInt();
if (tableId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPageId() + " expected table:" + index.getId() +
"got:" + tableId);
}
entryCount = data.readShortInt();
offsets = new int[entryCount];
keys = new int[entryCount];
......@@ -130,6 +125,7 @@ class PageDataLeaf extends PageData {
offsets = newOffsets;
keys = newKeys;
rows = newRows;
index.getPageStore().updateRecord(this, true, data);
if (offset < start) {
if (entryCount > 1) {
Message.throwInternalError();
......@@ -140,20 +136,31 @@ class PageDataLeaf extends PageData {
// fix offset
offset = start;
offsets[x] = offset;
IntArray array = new IntArray();
int previous = getPos();
int dataOffset = pageSize;
int page = index.getPageStore().allocatePage();
do {
int next = index.getPageStore().allocatePage();
array.add(next);
remaining -= pageSize - OVERFLOW_DATA_START_LAST;
if (remaining > 0) {
remaining += 2;
if (firstOverflowPageId == 0) {
firstOverflowPageId = page;
}
int type, size, next;
if (remaining <= pageSize - PageDataLeafOverflow.START_LAST) {
type = Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST;
size = remaining;
next = 0;
} else {
type = Page.TYPE_DATA_OVERFLOW;
size = pageSize - PageDataLeafOverflow.START_MORE;
next = index.getPageStore().allocatePage();
}
PageDataLeafOverflow overflow = new PageDataLeafOverflow(this, page, type, previous, next, dataOffset, size);
index.getPageStore().updateRecord(overflow, true, null);
dataOffset += size;
remaining -= size;
previous = page;
page = next;
} while (remaining > 0);
overflowPageIds = new int[array.size()];
array.toArray(overflowPageIds);
firstOverflowPageId = overflowPageIds[0];
}
index.getPageStore().updateRecord(this, data);
return 0;
}
......@@ -195,20 +202,22 @@ class PageDataLeaf extends PageData {
int pageSize = store.getPageSize();
data.setPos(pageSize);
int next = firstOverflowPageId;
while (true) {
DataPage page = store.readPage(next);
page.setPos(4);
int type = page.readByte();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
int size = page.readShortInt();
data.write(page.getBytes(), OVERFLOW_DATA_START_LAST, size);
break;
int offset = pageSize;
data.setPos(pageSize);
do {
Record record = store.getRecord(next);
PageDataLeafOverflow page;
if (record == null) {
DataPage data = store.readPage(next);
page = new PageDataLeafOverflow(this, next, data, offset);
} else {
next = page.readInt();
int size = pageSize - OVERFLOW_DATA_START_MORE;
data.write(page.getBytes(), OVERFLOW_DATA_START_MORE, size);
if (!(record instanceof PageDataLeafOverflow)) {
throw Message.getInternalError("page:"+ next + " " + record, null);
}
page = (PageDataLeafOverflow) record;
}
}
next = page.readInto(data);
} while (next != 0);
}
data.setPos(offsets[at]);
r = index.readRow(data);
......@@ -274,7 +283,7 @@ class PageDataLeaf extends PageData {
return true;
}
removeRow(i);
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
return false;
}
......@@ -296,6 +305,18 @@ class PageDataLeaf extends PageData {
}
public void write(DataPage buff) throws SQLException {
write();
index.getPageStore().writePage(getPos(), data);
}
PageStore getPageStore() {
return index.getPageStore();
}
private void write() throws SQLException {
if (written) {
return;
}
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRowAt(i);
......@@ -309,6 +330,7 @@ class PageDataLeaf extends PageData {
type = Page.TYPE_DATA_LEAF;
}
data.writeByte((byte) type);
data.writeInt(index.getId());
data.writeShortInt(entryCount);
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
......@@ -321,39 +343,12 @@ class PageDataLeaf extends PageData {
data.setPos(offsets[i]);
rows[i].write(data);
}
PageStore store = index.getPageStore();
int pageSize = store.getPageSize();
store.writePage(getPos(), data);
// don't need to write overflow if we just update the parent page id
if (data.length() > pageSize && overflowPageIds != null) {
if (firstOverflowPageId == 0) {
Message.throwInternalError();
}
DataPage overflow = store.createDataPage();
int parent = getPos();
int pos = pageSize;
int remaining = data.length() - pageSize;
for (int i = 0; i < overflowPageIds.length; i++) {
overflow.reset();
overflow.writeInt(parent);
int size;
if (remaining > pageSize - OVERFLOW_DATA_START_LAST) {
overflow.writeByte((byte) Page.TYPE_DATA_OVERFLOW);
overflow.writeInt(overflowPageIds[i + 1]);
size = pageSize - overflow.length();
} else {
overflow.writeByte((byte) (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST));
size = remaining;
overflow.writeShortInt(remaining);
}
overflow.write(data.getBytes(), pos, size);
remaining -= size;
pos += size;
int id = overflowPageIds[i];
store.writePage(id, overflow);
parent = id;
}
}
written = true;
}
DataPage getDataPage() throws SQLException {
write();
return data;
}
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.jdbc.JdbcSQLException;
import org.h2.message.Message;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
import org.h2.store.Record;
/**
* Overflow data for a leaf page.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>if there is more data: 5-8: next overflow page id
* </li><li>otherwise: 5-6: remaining size
* </li><li>data
* </li></ul>
*/
public class PageDataLeafOverflow extends Record {
/**
* The start of the data in the last overflow page.
*/
static final int START_LAST = 7;
/**
* The start of the data in a overflow page that is not the last one.
*/
static final int START_MORE = 9;
private final PageDataLeaf leaf;
/**
* The page type.
*/
private final int type;
/**
* The previous page (overflow or leaf).
*/
private final int previous;
/**
* The next overflow page, or 0.
*/
private final int next;
/**
* The number of content bytes.
*/
private final int size;
/**
* The first content byte starts at the given position
* in the leaf page when the page size is unlimited.
*/
private final int offset;
private DataPage data;
PageDataLeafOverflow(PageDataLeaf leaf, int pageId, int type, int previous, int next, int offset, int size) {
this.leaf = leaf;
setPos(pageId);
this.type = type;
this.previous = previous;
this.next = next;
this.offset = offset;
this.size = size;
}
public PageDataLeafOverflow(PageDataLeaf leaf, int pageId, DataPage data, int offset) throws JdbcSQLException {
this.leaf = leaf;
setPos(pageId);
this.data = data;
this.offset = offset;
previous = data.readInt();
type = data.readByte();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
size = data.readShortInt();
next = 0;
} else if (type == Page.TYPE_DATA_OVERFLOW) {
size = leaf.getPageStore().getPageSize() - START_MORE;
next = data.readInt();
} else {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " type:" + type);
}
}
/**
* Read the data into a target buffer.
*
* @param target the target data page
* @return the next page, or 0 if no next page
*/
int readInto(DataPage target) {
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
target.write(data.getBytes(), START_LAST, size);
return 0;
} else {
target.write(data.getBytes(), START_MORE, size);
return next;
}
}
public int getByteCount(DataPage dummy) throws SQLException {
return leaf.getByteCount(dummy);
}
public void write(DataPage buff) throws SQLException {
PageStore store = leaf.getPageStore();
DataPage overflow = store.createDataPage();
DataPage data = leaf.getDataPage();
overflow.writeInt(previous);
overflow.writeByte((byte) type);
if (type == Page.TYPE_DATA_OVERFLOW) {
overflow.writeInt(next);
} else {
overflow.writeShortInt(size);
}
overflow.write(data.getBytes(), offset, size);
store.writePage(getPos(), overflow);
}
}
......@@ -83,15 +83,15 @@ class PageDataNode extends PageData {
}
int pivot = page.getKey(splitPoint - 1);
PageData page2 = page.split(splitPoint);
index.getPageStore().updateRecord(page, page.data);
index.getPageStore().updateRecord(page2, page2.data);
index.getPageStore().updateRecord(page, true, page.data);
index.getPageStore().updateRecord(page2, true, page2.data);
addChild(x, page2.getPageId(), pivot);
int maxEntries = (index.getPageStore().getPageSize() - 15) / 8;
if (entryCount >= maxEntries) {
int todoSplitAtLastInsertionPoint;
return entryCount / 2;
}
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
}
updateRowCount(1);
return 0;
......@@ -103,7 +103,7 @@ class PageDataNode extends PageData {
}
if (rowCountStored != UNKNOWN_ROWCOUNT) {
rowCountStored = UNKNOWN_ROWCOUNT;
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
}
}
......@@ -133,7 +133,7 @@ class PageDataNode extends PageData {
int child = childPageIds[i];
PageData p = index.getPage(child);
p.setParentPageId(getPos());
index.getPageStore().updateRecord(p, p.data);
index.getPageStore().updateRecord(p, true, p.data);
}
}
......@@ -198,7 +198,7 @@ class PageDataNode extends PageData {
return true;
}
removeChild(at);
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
return false;
}
......@@ -224,7 +224,7 @@ class PageDataNode extends PageData {
this.rowCount = rowCount;
if (rowCountStored != rowCount) {
rowCountStored = rowCount;
index.getPageStore().updateRecord(this, data);
index.getPageStore().updateRecord(this, true, data);
}
}
......
......@@ -20,6 +20,8 @@ import org.h2.store.Record;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
import org.h2.value.Value;
import org.h2.value.ValueLob;
/**
* The scan index allows to access a row by key. It can be used to iterate over
......@@ -31,22 +33,8 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
private PageStore store;
private TableData tableData;
private int headPos;
// TODO test that setPageId updates parent, overflow parent
// TODO remember last page with deleted keys (in the root page?),
// and chain such pages
// TODO order pages so that searching for a key
// doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance)
// TODO file position, content checksums
// TODO completely re-use keys of deleted rows
// TODO remove Database.objectIds
// TODO detect circles in linked lists
// (input stream, free list, extend pages...)
private int lastKey;
private long rowCount;
private long rowCountApproximation;
public PageScanIndex(TableData table, int id, IndexColumn[] columns, IndexType indexType, int headPos) throws SQLException {
initBaseIndex(table, id, table.getName() + "_TABLE_SCAN", columns, indexType);
......@@ -64,11 +52,11 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// new table
headPos = store.allocatePage();
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, root.data);
store.updateRecord(root, true, root.data);
} else if (store.isNew()) {
// the system table for a new database
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, root.data);
store.updateRecord(root, true, root.data);
} else {
lastKey = getPage(headPos).getLastKey();
rowCount = getPage(headPos).getRowCount();
......@@ -90,6 +78,18 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (trace.isDebugEnabled()) {
trace.debug("add " + row.getPos());
}
if (tableData.getContainsLargeObject()) {
for (int i = 0; i < row.getColumnCount(); i++) {
Value v = row.getValue(i);
Value v2 = v.link(database, getId());
if (v2.isLinked()) {
session.unlinkAtCommitStop(v2);
}
if (v != v2) {
row.setValue(i, v2);
}
}
}
while (true) {
PageData root = getPage(headPos);
int splitPoint = root.addRow(row);
......@@ -109,13 +109,13 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
page2.setParentPageId(headPos);
PageDataNode newRoot = new PageDataNode(this, rootPageId, Page.ROOT, store.createDataPage());
newRoot.init(page1, pivot, page2);
store.updateRecord(page1, page1.data);
store.updateRecord(page2, page2.data);
store.updateRecord(newRoot, null);
store.updateRecord(page1, true, page1.data);
store.updateRecord(page2, true, page2.data);
store.updateRecord(newRoot, true, null);
root = newRoot;
}
rowCount++;
store.getLog().addOrRemoveRow(session, tableData.getId(), row, true);
store.logAddOrRemoveRow(session, tableData.getId(), row, true);
}
/**
......@@ -184,11 +184,19 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (trace.isDebugEnabled()) {
trace.debug("remove " + row.getPos());
}
if (tableData.getContainsLargeObject()) {
for (int i = 0; i < row.getColumnCount(); i++) {
Value v = row.getValue(i);
if (v.isLinked()) {
session.unlinkAtCommit((ValueLob) v);
}
}
}
int invalidateRowCount;
// setChanged(session);
if (rowCount == 1) {
int todoMaybeImprove;
truncate(session);
removeAllRows();
} else {
int key = row.getPos();
PageData root = getPage(headPos);
......@@ -199,7 +207,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// lastKey--;
// }
}
store.getLog().addOrRemoveRow(session, tableData.getId(), row, false);
store.logAddOrRemoveRow(session, tableData.getId(), row, false);
}
public void remove(Session session) throws SQLException {
......@@ -213,11 +221,19 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if (trace.isDebugEnabled()) {
trace.debug("truncate");
}
removeAllRows();
if (tableData.getContainsLargeObject() && tableData.getPersistent()) {
ValueLob.removeAllForTable(database, table.getId());
}
tableData.setRowCount(0);
}
private void removeAllRows() throws SQLException {
store.removeRecord(headPos);
int todoLogOldData;
int freePages;
PageDataLeaf root = new PageDataLeaf(this, headPos, Page.ROOT, store.createDataPage());
store.updateRecord(root, null);
store.updateRecord(root, true, null);
rowCount = 0;
lastKey = 0;
}
......@@ -246,7 +262,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
public long getRowCountApproximation() {
return rowCountApproximation;
return rowCount;
}
public long getRowCount(Session session) {
......
......@@ -100,7 +100,7 @@ public class TreeIndex extends BaseIndex {
}
return;
default:
Message.throwInternalError("b: " + x.balance * sign);
Message.throwInternalError("b:" + x.balance * sign);
}
if (x == root) {
return;
......
......@@ -18,7 +18,6 @@ import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.store.DataPage;
import org.h2.store.DiskFile;
import org.h2.store.PageLog;
import org.h2.store.PageStore;
import org.h2.store.Record;
import org.h2.store.Storage;
......@@ -63,7 +62,7 @@ public class LogSystem {
private int keepFiles;
private boolean closed;
private String accessMode;
private PageLog pageLog;
private PageStore pageStore;
/**
* Create new transaction log object. This will not open or create files
......@@ -77,9 +76,7 @@ public class LogSystem {
*/
public LogSystem(Database database, String fileNamePrefix, boolean readOnly, String accessMode, PageStore pageStore) {
this.database = database;
if (pageStore != null) {
this.pageLog = pageStore.getLog();
}
this.pageStore = pageStore;
this.readOnly = readOnly;
this.accessMode = accessMode;
closed = true;
......@@ -472,8 +469,8 @@ public class LogSystem {
if (closed) {
return;
}
if (pageLog != null) {
pageLog.commit(session);
if (pageStore != null) {
pageStore.commit(session);
}
currentLog.commit(session);
session.setAllCommitted();
......
......@@ -41,7 +41,7 @@ public class PageFreeList extends Record {
* @return the page
*/
int allocate() throws SQLException {
store.updateRecord(this, data);
store.updateRecord(this, true, data);
int size = array.size();
if (size > 0) {
int x = array.get(size - 1);
......@@ -95,7 +95,7 @@ public class PageFreeList extends Record {
* @param pageId the page id to add
*/
void free(int pageId) throws SQLException {
store.updateRecord(this, data);
store.updateRecord(this, true, data);
if (array.size() < getMaxSize()) {
array.add(pageId);
} else {
......
......@@ -29,21 +29,44 @@ import org.h2.value.Value;
*/
public class PageLog {
private static final int NO_OP = 0;
private static final int UNDO = 1;
private static final int COMMIT = 2;
private static final int ADD = 3;
private static final int REMOVE = 4;
/**
* No operation.
*/
public static final int NO_OP = 0;
/**
* An undo log entry.
* Format: page id, page.
*/
public static final int UNDO = 1;
/**
* A commit entry of a session.
* Format: session id.
*/
public static final int COMMIT = 2;
/**
* Add a record to a table.
* Format: session id, table id, row.
*/
public static final int ADD = 3;
/**
* Remove a record from a table.
* Format: session id, table id, row.
*/
public static final int REMOVE = 4;
private PageStore store;
private Trace trace;
private BitField undo = new BitField();
private PageOutputStream pageOut;
private DataOutputStream out;
private int firstPage;
private DataPage data;
private boolean recoveryRunning;
private long operation;
private BitField undo = new BitField();
PageLog(PageStore store, int firstPage) {
this.store = store;
......@@ -69,12 +92,10 @@ public class PageLog {
*
* @param undo true if the undo step should be run
*/
public void recover(boolean undo) throws SQLException {
trace.debug("log recover");
void recover(boolean undo) throws SQLException {
DataInputStream in = new DataInputStream(new PageInputStream(store, 0, firstPage, Page.TYPE_LOG));
DataPage data = store.createDataPage();
try {
recoveryRunning = true;
while (true) {
int x = in.read();
if (x < 0) {
......@@ -84,45 +105,48 @@ public class PageLog {
// nothing to do
} else if (x == UNDO) {
int pageId = in.readInt();
in.read(data.getBytes(), 0, store.getPageSize());
in.readFully(data.getBytes(), 0, store.getPageSize());
if (undo) {
if (trace.isDebugEnabled()) {
trace.debug("log write " + pageId);
trace.debug("log undo " + pageId);
}
store.writePage(pageId, data);
}
} else if (x == ADD || x == REMOVE) {
int sessionId = in.readInt();
int tableId = in.readInt();
Row row = readRow(in);
Database db = store.getDatabase();
Row row = readRow(in, data);
if (!undo) {
Database db = store.getDatabase();
if (trace.isDebugEnabled()) {
trace.debug("log redo " + (x == ADD ? "+" : "-") + " " + row);
}
db.redo(tableId, row, x == ADD);
}
} else if (x == COMMIT) {
in.readInt();
}
}
} catch (Exception e) {
int todoOnlyIOExceptionAndSQLException;
int todoSomeExceptionAreOkSomeNot;
trace.debug("log recovery stopped: " + e.toString());
} finally {
recoveryRunning = false;
}
trace.debug("log recover done");
int todoDeleteAfterRecovering;
}
private Row readRow(DataInputStream in) throws IOException, SQLException {
/**
* Read a row from an input stream.
*
* @param in the input stream
* @param data a temporary buffer
* @return the row
*/
public static Row readRow(DataInputStream in, DataPage data) throws IOException, SQLException {
int pos = in.readInt();
int len = in.readInt();
data.reset();
data.checkCapacity(len);
in.read(data.getBytes(), 0, len);
in.readFully(data.getBytes(), 0, len);
int columnCount = data.readInt();
Value[] values = new Value[columnCount];
for (int i = 0; i < columnCount; i++) {
......@@ -141,7 +165,7 @@ public class PageLog {
* @param pageId the page id
* @param page the old page data
*/
public void addUndo(int pageId, DataPage page) throws SQLException {
void addUndo(int pageId, DataPage page) throws SQLException {
try {
if (undo.get(pageId)) {
return;
......@@ -160,7 +184,7 @@ public class PageLog {
*
* @param session the session
*/
public void commit(Session session) throws SQLException {
void commit(Session session) throws SQLException {
try {
trace.debug("log commit");
out.write(COMMIT);
......@@ -181,11 +205,8 @@ public class PageLog {
* @param row the row to add
* @param add true if the row is added, false if it is removed
*/
public void addOrRemoveRow(Session session, int tableId, Row row, boolean add) throws SQLException {
void logAddOrRemoveRow(Session session, int tableId, Row row, boolean add) throws SQLException {
try {
if (recoveryRunning) {
return;
}
if (trace.isDebugEnabled()) {
trace.debug("log " + (add?"+":"-") + " table:" + tableId +
" remaining:" + pageOut.getRemainingBytes() + " row:" + row);
......@@ -225,7 +246,7 @@ public class PageLog {
/**
* Flush the transaction log.
*/
public void flush() throws SQLException {
private void flush() throws SQLException {
try {
int todoUseLessSpace;
trace.debug("log flush");
......
......@@ -11,10 +11,13 @@ import java.io.OutputStream;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.index.Page;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
import org.h2.result.Row;
import org.h2.util.BitField;
import org.h2.util.Cache;
import org.h2.util.Cache2Q;
import org.h2.util.CacheLRU;
......@@ -35,17 +38,48 @@ import org.h2.util.ObjectArray;
* <li>53: read version (0, otherwise opening the file fails)</li>
* <li>54-57: system table root page number (usually 1)</li>
* <li>58-61: free list head page number (usually 2)</li>
* <li>62-65: log head page number (usually 3)</li>
* <li>62-65: log[0] head page number (usually 3)</li>
* <li>66-69: log[1] head page number (usually 4)</li>
* </ul>
*/
public class PageStore implements CacheWriter {
private static final int PAGE_SIZE_MIN = 512;
private static final int PAGE_SIZE_MAX = 32768;
private static final int PAGE_SIZE_DEFAULT = 1024;
// TODO test that setPageId updates parent, overflow parent
// TODO order pages so that searching for a key
// doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance)
// TODO checksum: 0 for empty; position hash + every 128th byte,
// specially important for log
// TODO for lists: write sequence byte
// TODO completely re-use keys of deleted rows; maybe
// remember last page with deleted keys (in the root page?),
// and chain such pages
// TODO remove Database.objectIds
// TODO detect circles in linked lists
// (input stream, free list, extend pages...)
// at runtime and recovery
// synchronized correctly (on the index?)
// TODO two phase commit: append (not patch) commit & rollback
/**
* The smallest possible page size.
*/
public static final int PAGE_SIZE_MIN = 128;
/**
* The biggest possible page size.
*/
public static final int PAGE_SIZE_MAX = 32768;
/**
* The default page size.
*/
public static final int PAGE_SIZE_DEFAULT = 1024;
private static final int INCREMENT_PAGES = 128;
private static final int READ_VERSION = 0;
private static final int WRITE_VERSION = 0;
private static final int LOG_COUNT = 2;
private Database database;
private final Trace trace;
......@@ -59,7 +93,10 @@ public class PageStore implements CacheWriter {
private int pageSizeShift;
private int systemRootPageId;
private int freeListRootPageId;
private int logRootPageId;
private int activeLog;
private int[] logRootPageIds = new int[LOG_COUNT];
private boolean recoveryRunning;
/**
* The file size in bytes.
......@@ -84,9 +121,9 @@ public class PageStore implements CacheWriter {
private int freePageCount;
/**
* The transaction log.
* The transaction logs.
*/
private PageLog log;
private PageLog[] logs = new PageLog[LOG_COUNT];
/**
* True if this is a new file.
......@@ -102,12 +139,12 @@ public class PageStore implements CacheWriter {
* @param cacheSizeDefault the default cache size
*/
public PageStore(Database database, String fileName, String accessMode, int cacheSizeDefault) {
this.fileName = fileName;
this.accessMode = accessMode;
this.database = database;
trace = database.getTrace(Trace.PAGE_STORE);
int test;
// trace.setLevel(TraceSystem.DEBUG);
this.fileName = fileName;
this.accessMode = accessMode;
this.cacheSize = cacheSizeDefault;
String cacheType = database.getCacheType();
if (Cache2Q.TYPE_NAME.equals(cacheType)) {
......@@ -151,7 +188,7 @@ public class PageStore implements CacheWriter {
readHeader();
fileLength = file.length();
pageCount = (int) (fileLength / pageSize);
log = new PageLog(this, logRootPageId);
initLogs();
lastUsedPage = pageCount - 1;
while (true) {
DataPage page = readPage(lastUsedPage);
......@@ -169,27 +206,36 @@ public class PageStore implements CacheWriter {
systemRootPageId = 1;
freeListRootPageId = 2;
PageFreeList free = new PageFreeList(this, freeListRootPageId, 0);
updateRecord(free, null);
logRootPageId = 3;
lastUsedPage = 3;
pageCount = 3;
updateRecord(free, false, null);
for (int i = 0; i < LOG_COUNT; i++) {
logRootPageIds[i] = 3 + i;
}
lastUsedPage = pageCount;
int todoShouldBeOneMoreStartWith0;
pageCount = lastUsedPage;
increaseFileSize(INCREMENT_PAGES - pageCount);
writeHeader();
log = new PageLog(this, logRootPageId);
initLogs();
}
log.openForWriting();
getLog().openForWriting();
} catch (SQLException e) {
close();
throw e;
}
}
private void initLogs() {
for (int i = 0; i < LOG_COUNT; i++) {
logs[i] = new PageLog(this, logRootPageIds[i]);
}
}
/**
* Flush all pending changes to disk, and re-open the log file.
*/
public void checkpoint() throws SQLException {
trace.debug("checkpoint");
if (log == null) {
if (getLog() == null) {
// the file was never fully opened
return;
}
......@@ -202,13 +248,39 @@ public class PageStore implements CacheWriter {
writeBack(rec);
}
int todoFlushBeforeReopen;
log.reopen();
switchLogIfPossible();
int todoWriteDeletedPages;
}
pageCount = lastUsedPage + 1;
file.setLength(pageSize * pageCount);
}
private void switchLogIfPossible() {
int nextLogId = (activeLog + 1) % LOG_COUNT;
PageLog nextLog = logs[nextLogId];
// Session[] sessions = database.getSessions(true);
// int firstUncommittedLog = getLog().getId();
// int firstUncommittedPos = getLog().getPos();
// for (int i = 0; i < sessions.length; i++) {
// Session session = sessions[i];
// int log = session.getFirstUncommittedLog();
// int pos = session.getFirstUncommittedPos();
// if (pos != LOG_WRITTEN) {
// if (log < firstUncommittedLog ||
// (log == firstUncommittedLog && pos < firstUncommittedPos)) {
// firstUncommittedLog = log;
// firstUncommittedPos = pos;
// }
// }
// }
// if (nextLog.containsUncommitted())
activeLog = nextLogId;
// getLog().reopen();
}
private void readHeader() throws SQLException {
long length = file.length();
if (length < PAGE_SIZE_MIN) {
......@@ -235,7 +307,9 @@ public class PageStore implements CacheWriter {
}
systemRootPageId = page.readInt();
freeListRootPageId = page.readInt();
logRootPageId = page.readInt();
for (int i = 0; i < LOG_COUNT; i++) {
logRootPageIds[i] = page.readInt();
}
}
/**
......@@ -282,7 +356,9 @@ public class PageStore implements CacheWriter {
page.writeByte((byte) READ_VERSION);
page.writeInt(systemRootPageId);
page.writeInt(freeListRootPageId);
page.writeInt(logRootPageId);
for (int i = 0; i < LOG_COUNT; i++) {
page.writeInt(logRootPageIds[i]);
}
file.seek(FileStore.HEADER_LENGTH);
file.write(page.getBytes(), 0, pageSize - FileStore.HEADER_LENGTH);
}
......@@ -303,6 +379,7 @@ public class PageStore implements CacheWriter {
}
public void flushLog() throws SQLException {
// TODO write log entries to increase Record.lastLog / lastPos
int todo;
}
......@@ -326,9 +403,10 @@ public class PageStore implements CacheWriter {
* Update a record.
*
* @param record the record
* @param old the old data
* @param logUndo if an undo entry need to be logged
* @param old the old data (if known)
*/
public void updateRecord(Record record, DataPage old) throws SQLException {
public void updateRecord(Record record, boolean logUndo, DataPage old) throws SQLException {
int todoLogHeaderPageAsWell;
if (trace.isDebugEnabled()) {
trace.debug("updateRecord " + record.getPos() + " " + record.toString());
......@@ -337,8 +415,11 @@ public class PageStore implements CacheWriter {
record.setChanged(true);
int pos = record.getPos();
cache.update(pos, record);
if (old != null) {
log.addUndo(record.getPos(), old);
if (logUndo) {
if (old == null) {
old = readPage(pos);
}
getLog().addUndo(record.getPos(), old);
}
}
}
......@@ -372,10 +453,11 @@ public class PageStore implements CacheWriter {
freePageCount--;
return id;
}
if (lastUsedPage >= pageCount) {
int id = ++lastUsedPage;
if (id >= pageCount) {
increaseFileSize(INCREMENT_PAGES);
}
return ++lastUsedPage;
return id;
}
private void increaseFileSize(int increment) throws SQLException {
......@@ -500,7 +582,7 @@ public class PageStore implements CacheWriter {
this.freeListRootPageId = pageId;
if (!existing) {
PageFreeList free = new PageFreeList(this, pageId, next);
updateRecord(free, null);
updateRecord(free, false, null);
}
}
......@@ -513,12 +595,54 @@ public class PageStore implements CacheWriter {
return systemRootPageId;
}
public PageLog getLog() {
return log;
PageLog getLog() {
return logs[activeLog];
}
Database getDatabase() {
return database;
}
/**
* Run the recovery process. There are two recovery stages: first only the
* undo steps are run (restoring the state before the last checkpoint). In
* the second stage the committed operations are re-applied.
*
* @param undo true if the undo step should be run
*/
public void recover(boolean undo) throws SQLException {
trace.debug("log recover");
try {
recoveryRunning = true;
int todoBothMaybe;
getLog().recover(undo);
} finally {
recoveryRunning = false;
}
trace.debug("log recover done");
}
/**
* A record is added to a table, or removed from a table.
*
* @param session the session
* @param tableId the table id
* @param row the row to add
* @param add true if the row is added, false if it is removed
*/
public void logAddOrRemoveRow(Session session, int tableId, Row row, boolean add) throws SQLException {
if (!recoveryRunning) {
getLog().logAddOrRemoveRow(session, tableId, row, add);
}
}
/**
* Mark a committed transaction.
*
* @param session the session
*/
public void commit(Session session) throws SQLException {
getLog().commit(session);
}
}
......@@ -25,6 +25,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import org.h2.command.Parser;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.engine.DbObject;
import org.h2.engine.MetaRecord;
......@@ -32,6 +33,7 @@ import org.h2.index.Page;
import org.h2.log.LogFile;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.result.Row;
import org.h2.result.SimpleRow;
import org.h2.security.SHA256;
import org.h2.store.DataHandler;
......@@ -40,6 +42,8 @@ import org.h2.store.DiskFile;
import org.h2.store.FileLister;
import org.h2.store.FileStore;
import org.h2.store.FileStoreInputStream;
import org.h2.store.PageLog;
import org.h2.store.PageStore;
import org.h2.util.ByteUtils;
import org.h2.util.FileUtils;
import org.h2.util.IOUtils;
......@@ -106,7 +110,6 @@ public class Recover extends Tool implements DataHandler {
public void run(String[] args) throws SQLException {
String dir = ".";
String db = null;
boolean removePassword = false;
for (int i = 0; args != null && i < args.length; i++) {
String arg = args[i];
if ("-dir".equals(arg)) {
......@@ -114,7 +117,7 @@ public class Recover extends Tool implements DataHandler {
} else if ("-db".equals(arg)) {
db = args[++i];
} else if ("-removePassword".equals(arg)) {
removePassword = true;
remove = true;
} else if ("-trace".equals(arg)) {
trace = true;
} else if (arg.equals("-help") || arg.equals("-?")) {
......@@ -126,7 +129,7 @@ public class Recover extends Tool implements DataHandler {
return;
}
}
if (removePassword) {
if (!SysProperties.PAGE_STORE && remove) {
removePassword(dir, db);
} else {
process(dir, db);
......@@ -332,7 +335,7 @@ public class Recover extends Tool implements DataHandler {
private void writeDataError(PrintWriter writer, String error, byte[] data, int dumpBlocks) {
writer.println("-- ERROR: " + error + " block:" + block + " blockCount:" + blockCount + " storageId:"
+ storageId + " recordLength: " + recordLength + " valueId:" + valueId);
+ storageId + " recordLength:" + recordLength + " valueId:" + valueId);
StringBuffer sb = new StringBuffer();
for (int i = 0; i < dumpBlocks * DiskFile.BLOCK_SIZE; i++) {
int x = data[i] & 0xff;
......@@ -699,11 +702,16 @@ public class Recover extends Tool implements DataHandler {
writer.println("CREATE ALIAS IF NOT EXISTS READ_CLOB FOR \"" + this.getClass().getName() + ".readClob\";");
writer.println("CREATE ALIAS IF NOT EXISTS READ_BLOB FOR \"" + this.getClass().getName() + ".readBlob\";");
resetSchema();
store = FileStore.open(null, fileName, "r");
store = FileStore.open(null, fileName, remove ? "rw" : "r");
long length = store.length();
byte[] buff = new byte[128];
DataPage s = DataPage.create(this, buff);
store.readFully(buff, 0, buff.length);
try {
store.init();
} catch (Exception e) {
writeError(writer, e);
}
DataPage s = DataPage.create(this, 128);
store.seek(0);
store.readFully(s.getBytes(), 0, 128);
s.setPos(48);
int pageSize = s.readInt();
int writeVersion = (int) s.readByte();
......@@ -717,19 +725,25 @@ public class Recover extends Tool implements DataHandler {
writer.println("-- systemTableRoot: " + systemTableRoot);
writer.println("-- freeListHead: " + freeListHead);
writer.println("-- logHead: " + logHead);
if (pageSize < PageStore.PAGE_SIZE_MIN || pageSize > PageStore.PAGE_SIZE_MAX) {
pageSize = PageStore.PAGE_SIZE_DEFAULT;
// use default values for other settings as well
systemTableRoot = 1;
freeListHead = 2;
logHead = 3;
writer.println("-- ERROR: page size; using " + pageSize);
}
int pageCount = (int) (length / pageSize);
blockCount = 1;
buff = new byte[pageSize];
s = DataPage.create(this, buff);
for (int page = 1; page < pageCount; page++) {
store.seek((long) page * pageSize);
store.readFully(buff, 0, pageSize);
s.reset();
for (long page = 1; page < pageCount; page++) {
s = DataPage.create(this, pageSize);
store.seek(page * pageSize);
store.readFully(s.getBytes(), 0, pageSize);
int parentPageId = s.readInt();
int type = s.readByte();
switch (type) {
case Page.TYPE_EMPTY:
writer.println("-- page " + page + ": empty");
// writer.println("-- page " + page + ": empty");
if (parentPageId != 0) {
writer.println("-- ERROR parent:" + parentPageId);
}
......@@ -746,19 +760,48 @@ public class Recover extends Tool implements DataHandler {
break;
case Page.TYPE_DATA_LEAF:
writer.println("-- page " + page + ": data leaf " + (last ? "(last)" : ""));
dumpPageDataLeaf(store, pageSize, writer, s, last);
dumpPageDataLeaf(store, pageSize, writer, s, last, page);
break;
case Page.TYPE_FREE_LIST:
writer.println("-- page " + page + ": free list " + (last ? "(last)" : ""));
break;
case Page.TYPE_LOG:
writer.println("-- page " + page + ": log " + (last ? "(last)" : ""));
dumpPageLog(store, writer, s, last);
break;
default:
writer.println("-- page " + page + ": ERROR unknown type " + type);
break;
}
}
writeSchema(writer);
DataInputStream in = new DataInputStream(
new PageInputStream(writer, this, store, logHead, pageSize, 0, Page.TYPE_LOG)
);
writer.println("-- log");
while (true) {
int x = in.read();
if (x < 0) {
break;
}
if (x == PageLog.NO_OP) {
// nothing to do
} else if (x == PageLog.UNDO) {
int pageId = in.readInt();
in.readFully(new byte[pageSize]);
writer.println("-- undo page " + pageId);
} else if (x == PageLog.ADD || x == PageLog.REMOVE) {
int sessionId = in.readInt();
storageId = in.readInt();
Row row = PageLog.readRow(in, s);
writer.println("-- session " + sessionId +
" table " + storageId +
" " + (x == PageLog.ADD ? "add" : "remove") + " " + row.toString());
} else if (x == PageLog.COMMIT) {
int sessionId = in.readInt();
writer.println("-- commit " + sessionId);
}
}
writer.close();
} catch (Throwable e) {
writeError(writer, e);
......@@ -768,12 +811,122 @@ public class Recover extends Tool implements DataHandler {
}
}
private void dumpPageDataLeaf(FileStore store, int pageSize, PrintWriter writer, DataPage s, boolean last) throws SQLException {
/**
* An input stream that reads the data from a page store.
*/
static class PageInputStream extends InputStream {
private final PrintWriter writer;
private final int type;
private final FileStore store;
private final DataPage page;
private final int pageSize;
private int parentPage;
private int nextPage;
private boolean endOfFile;
private int remaining;
public PageInputStream(PrintWriter writer, DataHandler handler,
FileStore store, int firstPage, int pageSize, int parent, int type) {
this.writer = writer;
this.store = store;
this.pageSize = pageSize;
this.type = type;
this.parentPage = parent;
nextPage = firstPage;
page = DataPage.create(handler, pageSize);
}
public int read() throws IOException {
byte[] b = new byte[1];
int len = read(b);
return len < 0 ? -1 : (b[0] & 255);
}
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
public int read(byte[] b, int off, int len) throws IOException {
if (len == 0) {
return 0;
}
int read = 0;
while (len > 0) {
int r = readBlock(b, off, len);
if (r < 0) {
break;
}
read += r;
off += r;
len -= r;
}
return read == 0 ? -1 : read;
}
private int readBlock(byte[] buff, int off, int len) throws IOException {
fillBuffer();
if (endOfFile) {
return -1;
}
int l = Math.min(remaining, len);
page.read(buff, off, l);
remaining -= l;
return l;
}
private void fillBuffer() throws IOException {
if (remaining > 0 || endOfFile) {
return;
}
if (nextPage == 0) {
endOfFile = true;
return;
}
page.reset();
try {
store.seek((long) nextPage * pageSize);
store.readFully(page.getBytes(), 0, pageSize);
page.reset();
int p = page.readInt();
int t = page.readByte();
boolean last = (t & Page.FLAG_LAST) != 0;
t &= ~Page.FLAG_LAST;
if (type != t || p != parentPage) {
writer.println("-- ERROR page:" +nextPage+ " type:" + t + " parent:" + p +
" expected type:" + type + " expected parent:" + parentPage);
}
parentPage = nextPage;
if (last) {
nextPage = 0;
remaining = page.readInt();
} else {
nextPage = page.readInt();
remaining = pageSize - page.length();
}
} catch (SQLException e) {
throw Message.convertToIOException(e);
}
}
}
private void dumpPageLog(FileStore store, PrintWriter writer, DataPage s, boolean last) {
if (last) {
int size = s.readInt();
writer.println("-- size:" + size);
} else {
int next = s.readInt();
writer.println("-- next:" + next);
}
}
private void dumpPageDataLeaf(FileStore store, int pageSize, PrintWriter writer, DataPage s, boolean last, long pageId) throws SQLException {
storageId = s.readInt();
int entryCount = s.readShortInt();
int tableId = s.readInt();
int[] keys = new int[entryCount];
int[] offsets = new int[entryCount];
int next = 0;
long next = 0;
if (!last) {
next = s.readInt();
}
......@@ -782,8 +935,7 @@ public class Recover extends Tool implements DataHandler {
offsets[i] = s.readShortInt();
}
if (!last) {
byte[] buff = new byte[pageSize];
DataPage s2 = DataPage.create(this, buff);
DataPage s2 = DataPage.create(this, pageSize);
s.setPos(pageSize);
while (true) {
store.seek(pageSize * next);
......@@ -792,23 +944,118 @@ public class Recover extends Tool implements DataHandler {
int type = s2.readByte();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
int size = s2.readShortInt();
writer.println("-- chain:" + next + " type:" + type + " size:" + size);
s.write(s2.getBytes(), 7, size);
break;
} else {
} else if (type == Page.TYPE_DATA_OVERFLOW) {
next = s2.readInt();
if (next == 0) {
writeDataError(writer, "next:0", s2.getBytes(), 1);
break;
}
int size = pageSize - 9;
writer.println("-- chain:" + next + " type:" + type + " size:" + size + " next:" + next);
s.write(s2.getBytes(), 9, size);
} else {
writeDataError(writer, "type:" + type, s2.getBytes(), 1);
break;
}
}
}
for (int i = 0; i < entryCount; i++) {
int key = keys[i];
int off = offsets[i];
writer.println("-- [" + i + "] tableId: " + tableId + " key:" + key + " off: " + off);
writer.println("-- [" + i + "] storage:" + storageId + " key:" + key + " off:" + off);
s.setPos(off);
s.readInt();
if (remove && tableId == 0) {
writer.println("-- system table");
Value[] data = createRecord(writer, s);
if (data != null) {
createTemporaryTable(writer);
writeRow(writer, s, data);
if (remove && storageId == 0) {
String sql = data[3].getString();
if (sql.startsWith("CREATE USER ")) {
int saltIndex = ByteUtils.indexOf(s.getBytes(), "SALT ".getBytes(), off);
if (saltIndex >= 0) {
String userName = sql.substring("CREATE USER ".length(), sql.indexOf("SALT ") - 1);
if (userName.startsWith("\"")) {
// TODO doesn't work for all cases ("" inside user name)
userName = userName.substring(1, userName.length() - 1);
}
SHA256 sha = new SHA256();
byte[] userPasswordHash = sha.getKeyPasswordHash(userName, "".toCharArray());
byte[] salt = RandomUtils.getSecureBytes(Constants.SALT_LEN);
byte[] passwordHash = sha.getHashWithSalt(userPasswordHash, salt);
StringBuffer buff = new StringBuffer();
buff.append("SALT '");
buff.append(ByteUtils.convertBytesToString(salt));
buff.append("' HASH '");
buff.append(ByteUtils.convertBytesToString(passwordHash));
buff.append("'");
byte[] replacement = buff.toString().getBytes();
System.arraycopy(replacement, 0, s.getBytes(), saltIndex, replacement.length);
store.seek(pageSize * pageId);
store.write(s.getBytes(), 0, pageSize);
if (trace) {
out.println("User: " + userName);
}
remove = false;
}
}
}
}
}
}
private Value[] createRecord(PrintWriter writer, DataPage s) {
recordLength = s.readInt();
if (recordLength <= 0) {
writeDataError(writer, "recordLength<0", s.getBytes(), blockCount);
return null;
}
Value[] data;
try {
data = new Value[recordLength];
} catch (OutOfMemoryError e) {
writeDataError(writer, "out of memory", s.getBytes(), blockCount);
return null;
}
return data;
}
private void writeRow(PrintWriter writer, DataPage s, Value[] data) {
StringBuffer sb = new StringBuffer();
sb.append("INSERT INTO O_" + storageId + " VALUES(");
for (valueId = 0; valueId < recordLength; valueId++) {
try {
Value v = s.readValue();
data[valueId] = v;
if (valueId > 0) {
sb.append(", ");
}
sb.append(getSQL(v));
} catch (Exception e) {
writeDataError(writer, "exception " + e, s.getBytes(), blockCount);
continue;
} catch (OutOfMemoryError e) {
writeDataError(writer, "out of memory", s.getBytes(), blockCount);
continue;
}
}
sb.append(");");
writer.println(sb.toString());
writer.flush();
if (storageId == 0) {
try {
SimpleRow r = new SimpleRow(data);
MetaRecord meta = new MetaRecord(r);
schema.add(meta);
if (meta.getObjectType() == DbObject.TABLE_OR_VIEW) {
String sql = data[3].getString();
String name = extractTableOrViewName(sql);
tableMap.put(ObjectUtils.getInteger(meta.getId()), name);
}
} catch (Throwable t) {
writeError(writer, t);
}
}
}
......@@ -902,74 +1149,13 @@ public class Recover extends Tool implements DataHandler {
} else {
pageOwners[page] = storageId;
}
recordLength = s.readInt();
if (recordLength <= 0) {
writeDataError(writer, "recordLength<0", s.getBytes(), blockCount);
continue;
}
Value[] data;
try {
data = new Value[recordLength];
} catch (OutOfMemoryError e) {
writeDataError(writer, "out of memory", s.getBytes(), blockCount);
continue;
}
createTemporaryTable(writer);
StringBuffer sb = new StringBuffer();
sb.append("INSERT INTO O_" + storageId + " VALUES(");
for (valueId = 0; valueId < recordLength; valueId++) {
try {
Value v = s.readValue();
data[valueId] = v;
if (valueId > 0) {
sb.append(", ");
}
sb.append(getSQL(v));
} catch (Exception e) {
writeDataError(writer, "exception " + e, s.getBytes(), blockCount);
continue;
} catch (OutOfMemoryError e) {
writeDataError(writer, "out of memory", s.getBytes(), blockCount);
continue;
}
}
sb.append(");");
writer.println(sb.toString());
writer.flush();
if (storageId == 0) {
try {
SimpleRow r = new SimpleRow(data);
MetaRecord meta = new MetaRecord(r);
schema.add(meta);
if (meta.getObjectType() == DbObject.TABLE_OR_VIEW) {
String sql = data[3].getString();
String name = extractTableOrViewName(sql);
tableMap.put(ObjectUtils.getInteger(meta.getId()), name);
}
} catch (Throwable t) {
writeError(writer, t);
}
}
}
MetaRecord.sort(schema);
for (int i = 0; i < schema.size(); i++) {
MetaRecord m = (MetaRecord) schema.get(i);
writer.println(m.getSQL() + ";");
}
for (Iterator it = tableMap.entrySet().iterator(); it.hasNext();) {
Map.Entry entry = (Entry) it.next();
Integer objectId = (Integer) entry.getKey();
String name = (String) entry.getValue();
if (objectIdSet.contains(objectId)) {
writer.println("INSERT INTO " + name + " SELECT * FROM O_" + objectId + ";");
Value[] data = createRecord(writer, s);
if (data != null) {
createTemporaryTable(writer);
writeRow(writer, s, data);
}
}
for (Iterator it = objectIdSet.iterator(); it.hasNext();) {
Integer objectId = (Integer) it.next();
writer.println("DROP TABLE O_" + objectId + ";");
}
writer.println("DROP ALIAS READ_CLOB;");
writer.println("DROP ALIAS READ_BLOB;");
writeSchema(writer);
writer.close();
} catch (Throwable e) {
writeError(writer, e);
......@@ -979,6 +1165,28 @@ public class Recover extends Tool implements DataHandler {
}
}
private void writeSchema(PrintWriter writer) {
MetaRecord.sort(schema);
for (int i = 0; i < schema.size(); i++) {
MetaRecord m = (MetaRecord) schema.get(i);
writer.println(m.getSQL() + ";");
}
for (Iterator it = tableMap.entrySet().iterator(); it.hasNext();) {
Map.Entry entry = (Entry) it.next();
Integer objectId = (Integer) entry.getKey();
String name = (String) entry.getValue();
if (objectIdSet.contains(objectId)) {
writer.println("INSERT INTO " + name + " SELECT * FROM O_" + objectId + ";");
}
}
for (Iterator it = objectIdSet.iterator(); it.hasNext();) {
Integer objectId = (Integer) it.next();
writer.println("DROP TABLE O_" + objectId + ";");
}
writer.println("DROP ALIAS READ_CLOB;");
writer.println("DROP ALIAS READ_BLOB;");
}
private void createTemporaryTable(PrintWriter writer) {
if (!objectIdSet.contains(ObjectUtils.getInteger(storageId))) {
objectIdSet.add(ObjectUtils.getInteger(storageId));
......
......@@ -277,17 +277,26 @@ java org.h2.test.TestAll timer
System.setProperty("h2.maxMemoryRowsDistinct", "128");
System.setProperty("h2.check2", "true");
// failing tests: 11 (1st round)
// System.setProperty("h2.pageStore", "true");
/*
PageStore.switchLogIfPossible()
drop table test;
create table test(id int);
select 1 from test where 'a'=1;
Fails: Oracle, PostgreSQL, H2
Works: MySQL, HSQLDB
select for update in mvcc mode: only lock the selected records?
test case for daylight saving time enabled/move to a timezone (locking,...)
JCR: for each node type, create a table; one 'dynamic' table with parameter;
option to cache the results
<link rel="icon" type="image/png" href="/path/image.png">
create a short one page documentation
checksum: no need to checksum all data; every 128th byte is enough;
but need position+counter
create a short 4 pages documentation
http://blog.flexive.org/2008/12/05/porting-flexive-to-the-h2-database/
postgresql generate_series?
......
......@@ -495,7 +495,7 @@ public abstract class TestBase {
* @throws AssertionError if the values are not equal
*/
protected void assertEquals(byte[] expected, byte[] actual) {
assertTrue(expected.length == actual.length);
assertEquals("length", expected.length, actual.length);
for (int i = 0; i < expected.length; i++) {
if (expected[i] != actual[i]) {
fail("[" + i + "]: expected: " + (int) expected[i] + " actual: " + (int) actual[i]);
......
......@@ -20,6 +20,7 @@ import java.sql.Types;
import java.util.ArrayList;
import java.util.Random;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.store.FileLister;
import org.h2.test.TestBase;
......@@ -297,7 +298,8 @@ public class TestTools extends TestBase {
conn = DriverManager.getConnection(url, "another", "another");
stat = conn.createStatement();
stat.execute("runscript from '" + baseDir + "/toolsRecover.data.sql'");
String suffix = SysProperties.PAGE_STORE ? ".h2.sql" : ".data.sql";
stat.execute("runscript from '" + baseDir + "/toolsRecover" + suffix + "'");
rs = stat.executeQuery("select * from \"test 2\"");
assertFalse(rs.next());
rs = stat.executeQuery("select * from test");
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论