提交 cd7f1007 authored 作者: Thomas Mueller's avatar Thomas Mueller

When killing the process while the database was writing a checkpoint or while it…

When killing the process while the database was writing a checkpoint or while it was closing, the database could become corrupt. (work in progress)
上级 6dd1a3f4
......@@ -305,10 +305,6 @@ public class PageDataLeaf extends PageData {
* @return the row
*/
Row getRowAt(int at) {
int test;
if (rows == null || rows.length == 0) {
System.out.println("stop " + getPos());
}
Row r = rows[at];
if (r == null) {
if (firstOverflowPageId == 0) {
......@@ -376,10 +372,6 @@ if (rows == null || rows.length == 0) {
return null;
}
PageDataNode next = (PageDataNode) index.getPage(parentPageId, -1);
int test;
if (next == null || keys == null) {
System.out.println("stop " + getPos());
}
return next.getNextPage(keys[entryCount - 1]);
}
......
......@@ -296,15 +296,6 @@ public class PageLog {
int sessionId = in.readVarInt();
int tableId = in.readVarInt();
long key = in.readVarLong();
int todo;
// can not commit immediately
// because the index root may have started to be moved,
// without arriving at the destination yet
// if (stage == RECOVERY_STAGE_UNDO && tableId == -1) {
// // immediately commit,
// // because the pages may be re-used
// setLastCommitForSession(sessionId, logId, pos);
// }
if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
......
......@@ -12,7 +12,6 @@ import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.zip.CRC32;
import org.h2.command.ddl.CreateTableData;
import org.h2.constant.ErrorCode;
......@@ -35,14 +34,13 @@ import org.h2.index.PageDelegateIndex;
import org.h2.index.PageIndex;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.schema.Schema;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.Table;
import org.h2.table.RegularTable;
import org.h2.table.Table;
import org.h2.util.Cache;
import org.h2.util.CacheLRU;
import org.h2.util.CacheObject;
......@@ -179,7 +177,7 @@ public class PageStore implements CacheWriter {
private PageDataIndex metaIndex;
private IntIntHashMap metaRootPageId = new IntIntHashMap();
private HashMap<Integer, PageIndex> metaObjects = New.hashMap();
private ArrayList<PageIndex> tempIndexes = New.arrayList();
private HashMap<Integer, PageIndex> tempObjects;
/**
* The map of reserved pages, to ensure index head pages
......@@ -220,9 +218,8 @@ public class PageStore implements CacheWriter {
this.accessMode = accessMode;
this.database = database;
trace = database.getTrace(Trace.PAGE_STORE);
int test;
//if (!fileName.endsWith("reopen.h2.db"))
//trace.setLevel(TraceSystem.DEBUG);
// if (!fileName.endsWith("reopen.h2.db"))
// trace.setLevel(TraceSystem.DEBUG);
String cacheType = database.getCacheType();
this.cache = CacheLRU.getCache(this, cacheType, cacheSizeDefault);
systemSession = new Session(database, null, 0);
......@@ -322,16 +319,19 @@ int test;
}
private void removeOldTempIndexes() {
for (PageIndex index: tempIndexes) {
index.truncate(systemSession);
index.remove(systemSession);
}
if (tempIndexes.size() > 0) {
if (tempObjects != null) {
metaObjects.putAll(tempObjects);
for (PageIndex index: tempObjects.values()) {
if (index.getTable().isTemporary()) {
index.truncate(systemSession);
index.remove(systemSession);
}
}
systemSession.commit(true);
tempObjects = null;
}
metaObjects.clear();
metaObjects.put(-1, metaIndex);
tempIndexes = null;
}
private void writeIndexRowCounts() {
......@@ -361,23 +361,18 @@ int test;
database.checkPowerOff();
writeIndexRowCounts();
int test;
// writeBack();
// log.checkpoint();
log.checkpoint();
writeBack();
log.checkpoint();
writeBack();
int firstUncommittedSection = getFirstUncommittedSection();
log.removeUntil(firstUncommittedSection);
// write back the free list
writeBack();
int test2;
// ensure the free list is backed up again
log.checkpoint();
// ensure the free list is backed up again
log.checkpoint();
byte[] empty = new byte[pageSize];
......@@ -424,8 +419,8 @@ log.checkpoint();
allocatePage(logFirstTrunkPage);
log.openForWriting(logFirstTrunkPage, true);
// ensure the free list is backed up again
log.checkpoint();
// ensure the free list is backed up again
log.checkpoint();
} finally {
recoveryRunning = false;
......@@ -444,22 +439,16 @@ log.checkpoint();
break;
}
}
this.checkpoint();
int test;
log.checkpoint();
// TODO can most likely be simplified
checkpoint();
log.checkpoint();
writeIndexRowCounts();
log.checkpoint();
log.checkpoint();
writeBack();
int test3;
commit(systemSession);
writeBack();
log.checkpoint();
commit(systemSession);
writeBack();
log.checkpoint();
// truncate the log
recoveryRunning = true;
......@@ -510,29 +499,11 @@ log.checkpoint();
Page p = getPage(full);
if (p != null) {
trace.debug("move " + p.getPos() + " to " + free);
long logSection = log.getLogSectionId(), logPos = log.getLogPos();
try {
p.moveTo(systemSession, free);
} finally {
changeCount++;
}
//if (log.getLogSectionId() != logSection || log.getLogPos() != logPos) {
// // commit if an index root page moved
//int test;
//// need to write the log first, then the moved
//// need to write the moved root node,
//// and then we can commit
////log.checkpoint();
//writeBack();
//// commit(systemSession);
//
//int testForceProblem;
//log.checkpoint();
//writeBack();
//log.checkpoint();
//writeBack();
//
// }
} else {
freePage(full);
}
......@@ -1129,33 +1100,18 @@ log.checkpoint();
setReadOnly = true;
}
}
// remove temp tables
// PageDataIndex systemTable = (PageDataIndex) metaObjects.get(0);
// isNew = systemTable == null;
// for (Iterator<PageIndex> it = metaObjects.values().iterator(); it.hasNext();) {
// Index openIndex = it.next();
// if (openIndex.getTable().isTemporary()) {
// openIndex.truncate(systemSession);
// openIndex.remove(systemSession);
// removeMetaIndex(openIndex, systemSession);
// it.remove();
// } else {
// openIndex.close(systemSession);
// }
// }
PageDataIndex systemTable = (PageDataIndex) metaObjects.get(0);
isNew = systemTable == null;
for (Iterator<PageIndex> it = metaObjects.values().iterator(); it.hasNext();) {
PageIndex openIndex = it.next();
if (openIndex.getTable().isTemporary()) {
tempIndexes.add(openIndex);
// System.out.println("temp: " + openIndex);
for (PageIndex index : metaObjects.values()) {
if (index.getTable().isTemporary()) {
// temporary indexes are removed after opening
if (tempObjects == null) {
tempObjects = New.hashMap();
}
tempObjects.put(index.getId(), index);
} else {
openIndex.close(systemSession);
index.close(systemSession);
}
int test;
}
allocatePage(PAGE_ID_META_ROOT);
......@@ -1167,14 +1123,10 @@ log.checkpoint();
// clear the cache because it contains pages with closed indexes
cache.clear();
freeLists.clear();
metaObjects.clear();
metaObjects.put(-1, metaIndex);
int test;
for (PageIndex index : tempIndexes) {
metaObjects.put(index.getId(), index);
}
if (setReadOnly) {
database.setReadOnly(true);
}
......@@ -1356,17 +1308,12 @@ int test;
PageIndex index = metaObjects.get(id);
int rootPageId = index.getRootPageId();
index.getTable().removeIndex(index);
int test;
// if (index instanceof PageBtreeIndex) {
if (index instanceof PageBtreeIndex || index instanceof PageDelegateIndex) {
if (index.isTemporary()) {
systemSession.removeLocalTempTableIndex(index);
} else {
index.getSchema().remove(index);
}
// } else if (index instanceof PageDelegateIndex) {
// index.getSchema().remove(index);
}
index.remove(systemSession);
metaObjects.remove(id);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论