提交 3e152f3b authored 作者: Thomas Mueller's avatar Thomas Mueller

New experimental page store.

上级 b3ef328e
......@@ -142,6 +142,12 @@ public class PageFreeList extends Record {
return store.getPageSize() >> 2;
}
/**
* Check if a page is already in use.
*
* @param pageId the page to check
* @return true if it is in use
*/
boolean isUsed(int pageId) {
return used.get(pageId - getPos());
}
......
......@@ -146,6 +146,12 @@ public class PageLog {
void free() throws SQLException {
while (this.firstTrunkPage != 0) {
// first remove all old log pages
if (store.getRecord(firstTrunkPage) != null) {
// if the page is in use, don't free it
// TODO cleanup - this is a hack
int todoCleanup;
break;
}
PageStreamTrunk t = new PageStreamTrunk(store, this.firstTrunkPage);
try {
t.read();
......@@ -591,7 +597,7 @@ public class PageLog {
}
long getSize() {
return pageOut.getSize();
return pageOut == null ? 0 : pageOut.getSize();
}
ObjectArray<InDoubtTransaction> getInDoubtTransactions() {
......
......@@ -110,7 +110,8 @@ public class PageStore implements CacheWriter {
// TODO var int: see google protocol buffers
// TODO SessionState.logId is no longer needed
// TODO PageData and PageBtree addRowTry: try to simplify
// TODO performance: maybe don't save direct parent in btree nodes (only root)
// TODO performance: don't save direct parent in btree nodes (only root)
// TODO space re-use: run TestPerformance multiple times, size should stay
// TODO when removing DiskFile:
// remove CacheObject.blockCount
......@@ -183,6 +184,7 @@ public class PageStore implements CacheWriter {
private int systemTableHeadPos;
// TODO reduce DEFAULT_MAX_LOG_SIZE, and don't divide here
private long maxLogSize = Constants.DEFAULT_MAX_LOG_SIZE / 10;
private Session systemSession;
/**
* Create a new page store object.
......@@ -198,10 +200,11 @@ public class PageStore implements CacheWriter {
this.database = database;
trace = database.getTrace(Trace.PAGE_STORE);
int test;
// trace.setLevel(TraceSystem.DEBUG);
//trace.setLevel(TraceSystem.DEBUG);
this.cacheSize = cacheSizeDefault;
String cacheType = database.getCacheType();
this.cache = CacheLRU.getCache(this, cacheType, cacheSize);
systemSession = new Session(database, null, 0);
}
/**
......@@ -457,7 +460,9 @@ public class PageStore implements CacheWriter {
public void close() throws SQLException {
try {
trace.debug("close");
if (log != null) {
log.close();
}
if (file != null) {
file.close();
}
......@@ -733,7 +738,7 @@ public class PageStore implements CacheWriter {
systemTableHeadPos = index.getHeadPos();
}
for (Index openIndex : metaObjects.values()) {
openIndex.close(database.getSystemSession());
openIndex.close(systemSession);
}
trace.debug("log recover done");
}
......@@ -799,7 +804,7 @@ public class PageStore implements CacheWriter {
void redo(int tableId, Row row, boolean add) throws SQLException {
if (tableId == META_TABLE_ID) {
if (add) {
addMeta(row, database.getSystemSession());
addMeta(row, systemSession, true);
} else {
removeMeta(row);
}
......@@ -810,9 +815,9 @@ public class PageStore implements CacheWriter {
}
Table table = index.getTable();
if (add) {
table.addRow(database.getSystemSession(), row);
table.addRow(systemSession, row);
} else {
table.removeRow(database.getSystemSession(), row);
table.removeRow(systemSession, row);
}
}
......@@ -827,18 +832,18 @@ public class PageStore implements CacheWriter {
metaSchema = new Schema(database, 0, "", null, true);
int headPos = PAGE_ID_META_ROOT;
metaTable = new TableData(metaSchema, "PAGE_INDEX",
META_TABLE_ID, cols, true, true, false, headPos, database.getSystemSession());
META_TABLE_ID, cols, true, true, false, headPos, systemSession);
metaIndex = (PageScanIndex) metaTable.getScanIndex(
database.getSystemSession());
systemSession);
metaObjects = New.hashMap();
metaObjects.put(-1, metaIndex);
}
private void readMetaData() throws SQLException {
Cursor cursor = metaIndex.find(database.getSystemSession(), null, null);
Cursor cursor = metaIndex.find(systemSession, null, null);
while (cursor.next()) {
Row row = cursor.get();
addMeta(row, database.getSystemSession());
addMeta(row, systemSession, false);
}
}
......@@ -848,10 +853,13 @@ public class PageStore implements CacheWriter {
index.getTable().removeIndex(index);
if (index instanceof PageBtreeIndex) {
index.getSchema().remove(index);
} else if (index instanceof PageScanIndex) {
// TODO test why this doesn't work
// index.remove(null);
}
}
private void addMeta(Row row, Session session) throws SQLException {
private void addMeta(Row row, Session session, boolean redo) throws SQLException {
int id = row.getValue(0).getInt();
int type = row.getValue(1).getInt();
int parent = row.getValue(2).getInt();
......@@ -864,6 +872,13 @@ public class PageStore implements CacheWriter {
if (trace.isDebugEnabled()) {
trace.debug("addMeta id=" + id + " type=" + type + " parent=" + parent + " columns=" + columnList);
}
if (redo) {
int test;
byte[] empty = new byte[pageSize];
file.seek(headPos * pageSize);
file.write(empty, 0, pageSize);
removeRecord(headPos);
}
if (type == META_TYPE_SCAN_INDEX) {
ObjectArray<Column> columnArray = ObjectArray.newInstance();
for (int i = 0; i < columns.length; i++) {
......@@ -906,8 +921,9 @@ public class PageStore implements CacheWriter {
*
* @param index the index to add
* @param session the session
* @param headPos the head position
*/
public void addMeta(Index index, Session session) throws SQLException {
public void addMeta(Index index, Session session, int headPos) throws SQLException {
int type = index instanceof PageScanIndex ? META_TYPE_SCAN_INDEX : META_TYPE_BTREE_INDEX;
IndexColumn[] columns = index.getIndexColumns();
StatementBuilder buff = new StatementBuilder();
......@@ -925,7 +941,7 @@ public class PageStore implements CacheWriter {
Table table = index.getTable();
CompareMode mode = table.getCompareMode();
String options = mode.getName()+ "," + mode.getStrength();
addMeta(index.getId(), type, table.getId(), index.getHeadPos(), options, columnList, session);
addMeta(index.getId(), type, table.getId(), headPos, options, columnList, session);
}
private void addMeta(int id, int type, int parent, int headPos, String options, String columnList, Session session) throws SQLException {
......@@ -1013,4 +1029,13 @@ public class PageStore implements CacheWriter {
return log.getInDoubtTransactions();
}
/**
* Check whether the recovery process is currently running.
*
* @return true if it is
*/
public boolean isRecoveryRunning() {
return this.recoveryRunning;
}
}
......@@ -207,7 +207,6 @@ public class TableData extends Table implements RecordReader {
database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, getName() + ":" + index.getName(), MathUtils
.convertLongToInt(i++), MathUtils.convertLongToInt(total));
Row row = cursor.get();
// index.add(session, row);
buffer.add(row);
if (buffer.size() >= bufferSize) {
addRowsToIndex(session, buffer, index);
......
......@@ -803,14 +803,19 @@ public class Recover extends Tool implements DataHandler {
case Page.TYPE_DATA_OVERFLOW:
writer.println("-- page " + page + ": data overflow " + (last ? "(last)" : ""));
break;
case Page.TYPE_DATA_NODE:
writer.println("-- page " + page + ": data node " + (last ? "(last)" : ""));
case Page.TYPE_DATA_NODE: {
int entries = s.readShortInt();
int recordCount = s.readInt();
writer.println("-- page " + page + ": data node " + (last ? "(last)" : "") + " entries: " + entries + " record: " + recordCount);
break;
case Page.TYPE_DATA_LEAF:
}
case Page.TYPE_DATA_LEAF: {
setStorage(s.readInt());
writer.println("-- page " + page + ": data leaf " + (last ? "(last)" : "") + " table: " + storageId);
dumpPageDataLeaf(store, pageSize, writer, s, last, page);
int entries = s.readShortInt();
writer.println("-- page " + page + ": data leaf " + (last ? "(last)" : "") + " table: " + storageId + " entries: " + entries);
dumpPageDataLeaf(store, pageSize, writer, s, last, page, entries);
break;
}
case Page.TYPE_BTREE_NODE:
writer.println("-- page " + page + ": btree node" + (last ? "(last)" : ""));
if (trace) {
......@@ -1100,8 +1105,7 @@ public class Recover extends Tool implements DataHandler {
}
}
private void dumpPageDataLeaf(FileStore store, int pageSize, PrintWriter writer, DataPage s, boolean last, long pageId) throws SQLException {
int entryCount = s.readShortInt();
private void dumpPageDataLeaf(FileStore store, int pageSize, PrintWriter writer, DataPage s, boolean last, long pageId, int entryCount) throws SQLException {
int[] keys = new int[entryCount];
int[] offsets = new int[entryCount];
long next = 0;
......
......@@ -31,9 +31,28 @@ public class TestPageStore extends TestBase {
}
public void test() throws Exception {
testCreateIndexLater();
testFuzzOperations();
}
private void testCreateIndexLater() throws SQLException {
deleteDb("pageStore");
Connection conn = getConnection("pageStore");
Statement stat = conn.createStatement();
stat.execute("CREATE TABLE TEST(NAME VARCHAR) AS SELECT 1");
stat.execute("CREATE INDEX IDX_N ON TEST(NAME)");
stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(20, 100)");
stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(1000, 1100)");
stat.execute("SHUTDOWN IMMEDIATELY");
try {
conn.close();
} catch (SQLException e) {
// ignore
}
conn = getConnection("pageStore");
conn.close();
}
private void testFuzzOperations() throws SQLException {
int best = Integer.MAX_VALUE;
for (int i = 0; i < 10; i++) {
......@@ -46,8 +65,8 @@ public class TestPageStore extends TestBase {
}
private int testFuzzOperationsSeed(int seed, int len) throws SQLException {
deleteDb("test");
Connection conn = getConnection("test");
deleteDb("pageStore");
Connection conn = getConnection("pageStore");
Statement stat = conn.createStatement();
log("DROP TABLE IF EXISTS TEST;");
stat.execute("DROP TABLE IF EXISTS TEST");
......@@ -75,7 +94,7 @@ public class TestPageStore extends TestBase {
break;
case 2:
conn.close();
conn = getConnection("test");
conn = getConnection("pageStore");
stat = conn.createStatement();
ResultSet rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID");
log("--reconnect");
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论