提交 ded7361f authored 作者: Thomas Mueller's avatar Thomas Mueller

Page store: compact the database file when closing (currently always completely,…

Page store: compact the database file when closing (currently always completely, which may be slow).
上级 5434756e
......@@ -352,7 +352,13 @@ public class PageBtreeIndex extends PageIndex {
return true;
}
public void setRootPageId(Session session, int newPos) throws SQLException {
/**
* The root page has changed.
*
* @param session the session
* @param newPos the new position
*/
void setRootPageId(Session session, int newPos) throws SQLException {
store.removeMeta(this, session);
this.rootPageId = newPos;
store.addMeta(this, session);
......
......@@ -504,7 +504,13 @@ public class PageBtreeNode extends PageBtree {
store.freePage(getPos(), true, data);
}
public void moveChild(int oldPos, int newPos) throws SQLException {
/**
* One of the children has moved to a new page.
*
* @param oldPos the old position
* @param newPos the new position
*/
void moveChild(int oldPos, int newPos) throws SQLException {
for (int i = 0; i < childPageIds.length; i++) {
if (childPageIds[i] == oldPos) {
written = false;
......
......@@ -306,7 +306,7 @@ public class PageDataLeaf extends PageData {
return;
}
PageDataOverflow overflow = index.getPageOverflow(firstOverflowPageId);
overflow.setParent(getPos());
overflow.setParentPageId(getPos());
index.getPageStore().updateRecord(overflow, true, null);
}
......@@ -316,6 +316,7 @@ public class PageDataLeaf extends PageData {
throw Message.getSQLException(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, index.getSQL() + ": " + key);
}
if (entryCount == 1) {
freeChildren();
return true;
}
removeRow(i);
......@@ -396,27 +397,39 @@ public class PageDataLeaf extends PageData {
}
public String toString() {
return "page[" + getPos() + "] data leaf table:" + index.getId() + " entries:" + entryCount;
return "page[" + getPos() + "] data leaf table:" + index.getId() +
" entries:" + entryCount + " parent:" + parentPageId +
(firstOverflowPageId == 0 ? "" : " overflow:" + firstOverflowPageId);
}
public void moveTo(Session session, int newPos) throws SQLException {
// PageStore store = index.getPageStore();
// PageBtreeLeaf p2 = new PageBtreeLeaf(index, newPos, store.createData());
// readAllRows();
// p2.rows = rows;
// p2.entryCount = entryCount;
// p2.offsets = offsets;
// p2.onlyPosition = onlyPosition;
// p2.parentPageId = parentPageId;
// p2.start = start;
// store.updateRecord(p2, false, null);
// if (firstOverflowPageId != 0) {
// }
// if (parentPageId == ROOT) {
// } else {
// PageBtreeNode p = (PageBtreeNode) store.getPage(parentPageId);
// p.moveChild(getPos(), newPos);
// }
PageStore store = index.getPageStore();
PageDataLeaf p2 = new PageDataLeaf(index, newPos, store.createData());
readAllRows();
p2.keys = keys;
p2.overflowRowSize = overflowRowSize;
p2.firstOverflowPageId = firstOverflowPageId;
p2.rowRef = rowRef;
p2.rows = rows;
p2.entryCount = entryCount;
p2.offsets = offsets;
p2.parentPageId = parentPageId;
p2.start = start;
store.updateRecord(p2, false, null);
p2.remapChildren();
store.freePage(getPos(), true, data);
if (parentPageId == ROOT) {
index.setRootPageId(session, newPos);
} else {
PageDataNode p = (PageDataNode) store.getPage(parentPageId);
p.moveChild(getPos(), newPos);
}
}
void setOverflow(int overflow) throws SQLException {
written = false;
this.firstOverflowPageId = overflow;
index.getPageStore().updateRecord(this, true, data);
}
}
......@@ -7,6 +7,7 @@
package org.h2.index;
import java.sql.SQLException;
import java.util.Arrays;
import org.h2.constant.ErrorCode;
import org.h2.engine.Session;
import org.h2.message.Message;
......@@ -14,6 +15,7 @@ import org.h2.result.Row;
import org.h2.store.Data;
import org.h2.store.DataPage;
import org.h2.store.Page;
import org.h2.store.PageStore;
import org.h2.util.MemoryUtils;
/**
......@@ -326,12 +328,49 @@ public class PageDataNode extends PageData {
}
public String toString() {
return "page[" + getPos() + "] data node table:" + index.getId() + " entries:" + entryCount;
return "page[" + getPos() + "] data node table:" + index.getId() + " entries:" + entryCount + " " + Arrays.toString(childPageIds);
}
public void moveTo(Session session, int newPos) throws SQLException {
// TODO Auto-generated method stub
PageStore store = index.getPageStore();
PageDataNode p2 = new PageDataNode(index, newPos, store.createData());
p2.rowCountStored = rowCountStored;
p2.rowCount = rowCount;
p2.childPageIds = childPageIds;
p2.keys = keys;
p2.entryCount = entryCount;
p2.parentPageId = parentPageId;
store.updateRecord(p2, false, null);
if (parentPageId == ROOT) {
index.setRootPageId(session, newPos);
} else {
PageDataNode p = (PageDataNode) store.getPage(parentPageId);
p.moveChild(getPos(), newPos);
}
for (int i = 0; i < childPageIds.length; i++) {
PageData p = (PageData) store.getPage(childPageIds[i]);
p.setParentPageId(newPos);
store.updateRecord(p, true, p.data);
}
store.freePage(getPos(), true, data);
}
/**
* One of the children has moved to another page.
*
* @param oldPos the old position
* @param newPos the new position
*/
void moveChild(int oldPos, int newPos) throws SQLException {
for (int i = 0; i < childPageIds.length; i++) {
if (childPageIds[i] == oldPos) {
written = false;
childPageIds[i] = newPos;
index.getPageStore().updateRecord(this, true, data);
return;
}
}
throw Message.throwInternalError();
}
}
......@@ -13,6 +13,7 @@ import org.h2.message.Message;
import org.h2.store.Data;
import org.h2.store.DataPage;
import org.h2.store.Page;
import org.h2.store.PageStore;
/**
* Overflow data for a leaf page.
......@@ -37,6 +38,8 @@ public class PageDataOverflow extends Page {
*/
static final int START_MORE = 13;
private static final int START_NEXT_OVERFLOW = 9;
/**
* The index.
*/
......@@ -177,13 +180,37 @@ public class PageDataOverflow extends Page {
return index.getPageStore().getPageSize() >> 1;
}
void setParent(int parent) {
void setParentPageId(int parent) {
this.parentPage = parent;
}
public void moveTo(Session session, int newPos) throws SQLException {
// TODO Auto-generated method stub
PageStore store = index.getPageStore();
int start = type == Page.TYPE_DATA_OVERFLOW ? START_MORE : START_LAST;
PageDataOverflow p2 = new PageDataOverflow(index, newPos, type, parentPage, nextPage, data, start, size);
store.updateRecord(p2, false, null);
if (nextPage != 0) {
PageDataOverflow p3 = (PageDataOverflow) store.getPage(nextPage);
p3.setParentPageId(newPos);
}
Page p = store.getPage(parentPage);
if (p == null) {
throw Message.throwInternalError();
}
if (p instanceof PageDataOverflow) {
PageDataOverflow p1 = (PageDataOverflow) p;
p1.setOverflow(newPos);
} else {
PageDataLeaf p1 = (PageDataLeaf) p;
p1.setOverflow(newPos);
}
store.freePage(getPos(), true, data);
}
private void setOverflow(int nextPage) throws SQLException {
this.nextPage = nextPage;
data.setInt(START_NEXT_OVERFLOW, nextPage);
index.getPageStore().updateRecord(this, true, data);
}
}
......@@ -11,6 +11,9 @@ package org.h2.index;
*/
public abstract class PageIndex extends BaseIndex {
/**
* The root page of this index.
*/
protected int rootPageId;
public int getRootPageId() {
......
......@@ -373,4 +373,17 @@ public class PageScanIndex extends PageIndex implements RowIndex {
}
}
/**
* The root page has changed.
*
* @param session the session
* @param newPos the new position
*/
void setRootPageId(Session session, int newPos) throws SQLException {
store.removeMeta(this, session);
this.rootPageId = newPos;
store.addMeta(this, session);
store.addIndex(this);
}
}
......@@ -170,11 +170,12 @@ public class PageLog {
* must be run first.
*
* @param firstTrunkPage the first trunk page
* @param atEnd whether only pages at the end of the file should be used
*/
void openForWriting(int firstTrunkPage) throws SQLException {
void openForWriting(int firstTrunkPage, boolean atEnd) throws SQLException {
trace.debug("log openForWriting firstPage:" + firstTrunkPage);
this.firstTrunkPage = firstTrunkPage;
pageOut = new PageOutputStream(store, firstTrunkPage, undoAll);
pageOut = new PageOutputStream(store, firstTrunkPage, undoAll, atEnd);
pageOut.reserve(1);
store.setLogFirstPage(firstTrunkPage, pageOut.getCurrentDataPageId());
buffer = new ByteArrayOutputStream();
......@@ -185,11 +186,8 @@ public class PageLog {
* Free up all pages allocated by the log.
*/
void free() throws SQLException {
while (this.firstTrunkPage != 0) {
if (store.getRecord(firstTrunkPage) != null) {
throw Message.throwInternalError("" + store.getRecord(firstTrunkPage));
}
PageStreamTrunk t = (PageStreamTrunk) store.getPage(this.firstTrunkPage);
while (firstTrunkPage != 0) {
PageStreamTrunk t = (PageStreamTrunk) store.getPage(firstTrunkPage);
if (t == null) {
store.freePage(firstTrunkPage, false, null);
// EOF
......
......@@ -34,18 +34,21 @@ public class PageOutputStream extends OutputStream {
private boolean needFlush;
private boolean writing;
private int pages;
private boolean atEnd;
/**
* Create a new page output stream.
*
* @param store the page store
* @param trunkPage the first trunk page (already allocated)
* @param atEnd whether only pages at the end of the file should be used
*/
public PageOutputStream(PageStore store, int trunkPage, BitField exclude) {
public PageOutputStream(PageStore store, int trunkPage, BitField exclude, boolean atEnd) {
this.trace = store.getTrace();
this.store = store;
this.trunkPageId = trunkPage;
this.exclude = exclude;
this.atEnd = atEnd;
}
/**
......@@ -68,7 +71,8 @@ public class PageOutputStream extends OutputStream {
}
// allocate the next trunk page as well
pagesToAllocate++;
store.allocatePages(reservedPages, pagesToAllocate, exclude);
int firstPageToUse = atEnd ? trunkPageId : 0;
store.allocatePages(reservedPages, pagesToAllocate, exclude, firstPageToUse);
reserved += totalCapacity;
if (data == null) {
initNextData();
......
......@@ -30,7 +30,6 @@ import org.h2.log.InDoubtTransaction;
import org.h2.log.LogSystem;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.schema.Schema;
......@@ -78,13 +77,12 @@ import org.h2.value.ValueString;
*/
public class PageStore implements CacheWriter {
// TODO database head pos = id? head pos > root pos?
// TODO a correctly closed database should not contain log pages
// TODO shrinking: a way to load pages centrally
// TODO shrinking: Page.moveTo(int pageId).
// TODO utf-x: test if it's faster
// TODO value serialization: test (100% coverage)
// TODO after opening the database, delay writing until required
// TODO scan index: support long keys, and use var long
// TODO don't save the direct parent (only root); remove setPageId
......@@ -201,7 +199,7 @@ public class PageStore implements CacheWriter {
private TableData metaTable;
private PageScanIndex metaIndex;
private IntIntHashMap metaRootPageId = new IntIntHashMap();
private HashMap<Integer, PageIndex> metaObjects = New.hashMap();
private HashMap<Integer, Index> metaObjects = New.hashMap();
/**
* The map of reserved pages, to ensure index head pages
......@@ -293,7 +291,7 @@ public class PageStore implements CacheWriter {
increaseFileSize(MIN_PAGE_COUNT);
openMetaIndex();
logFirstTrunkPage = allocatePage();
log.openForWriting(logFirstTrunkPage);
log.openForWriting(logFirstTrunkPage, false);
systemTableHeadPos = Index.EMPTY_HEAD;
recoveryRunning = false;
increaseFileSize(INCREMENT_PAGES);
......@@ -318,7 +316,7 @@ public class PageStore implements CacheWriter {
recoveryRunning = true;
log.free();
logFirstTrunkPage = allocatePage();
log.openForWriting(logFirstTrunkPage);
log.openForWriting(logFirstTrunkPage, false);
recoveryRunning = false;
checkpoint();
}
......@@ -364,18 +362,47 @@ public class PageStore implements CacheWriter {
* Shrink the file so there are no empty pages at the end.
*/
public void trim() throws SQLException {
int test;
int maxMove = 100000;
for (int x = pageCount - 1, j = 0; x > MIN_PAGE_COUNT && j < maxMove; x--, j++) {
// find the last used page
int lastUsed = -1;
for (int i = getFreeListId(pageCount); i >= 0; i--) {
lastUsed = getFreeList(i).getLastUsed();
if (lastUsed != -1) {
break;
}
}
// open a new log at the very end
// (to be truncated later)
writeBack();
recoveryRunning = true;
try {
log.free();
logFirstTrunkPage = lastUsed + 1;
allocatePage(logFirstTrunkPage);
log.openForWriting(logFirstTrunkPage, true);
} finally {
recoveryRunning = false;
}
int maxMove = Integer.MAX_VALUE;
for (int x = lastUsed, j = 0; x > MIN_PAGE_COUNT && j < maxMove; x--, j++) {
compact(x);
}
writeBack();
// truncate the log
recoveryRunning = true;
try {
log.free();
setLogFirstPage(0, 0);
} finally {
recoveryRunning = false;
}
writeBack();
for (int i = getFreeListId(pageCount); i >= 0; i--) {
int last = getFreeList(i).getLastUsed();
if (last != -1) {
pageCount = last + 1;
lastUsed = getFreeList(i).getLastUsed();
if (lastUsed != -1) {
break;
}
}
pageCount = lastUsed + 1;
trace.debug("pageCount:" + pageCount);
file.setLength((long) pageCount << pageSizeShift);
}
......@@ -395,13 +422,17 @@ public class PageStore implements CacheWriter {
if (f != null) {
Message.throwInternalError("not free: " + f);
}
Page p = getPage(full);
if (p != null) {
trace.debug("move " + p.getPos() + " to " + free);
long logSection = log.getLogSectionId(), logPos = log.getLogPos();
p.moveTo(systemSession, free);
if (log.getLogSectionId() == logSection || log.getLogPos() != logPos) {
commit(systemSession);
if (isUsed(full)) {
Page p = getPage(full);
if (p != null) {
trace.debug("move " + p.getPos() + " to " + free);
long logSection = log.getLogSectionId(), logPos = log.getLogPos();
p.moveTo(systemSession, free);
if (log.getLogSectionId() == logSection || log.getLogPos() != logPos) {
commit(systemSession);
}
} else {
freePage(full);
}
}
}
......@@ -727,12 +758,12 @@ public class PageStore implements CacheWriter {
* @param list the list where to add the allocated pages
* @param pagesToAllocate the number of pages to allocate
* @param exclude the exclude list
* @param after all allocated pages are higher than this page
*/
void allocatePages(IntArray list, int pagesToAllocate, BitField exclude) throws SQLException {
int first = 0;
void allocatePages(IntArray list, int pagesToAllocate, BitField exclude, int after) throws SQLException {
for (int i = 0; i < pagesToAllocate; i++) {
int page = allocatePage(exclude, first);
first = page;
int page = allocatePage(exclude, after);
after = page;
list.add(page);
}
}
......@@ -1033,7 +1064,7 @@ public class PageStore implements CacheWriter {
removeMeta(logPos, row);
}
}
PageScanIndex index = (PageScanIndex) metaObjects.get(tableId);
Index index = metaObjects.get(tableId);
if (index == null) {
throw Message.throwInternalError("Table not found: " + tableId + " " + row + " " + add);
}
......@@ -1051,7 +1082,7 @@ public class PageStore implements CacheWriter {
* @param tableId the object id of the table
*/
void redoTruncate(int tableId) throws SQLException {
PageScanIndex index = (PageScanIndex) metaObjects.get(tableId);
Index index = metaObjects.get(tableId);
Table table = index.getTable();
table.truncate(systemSession);
}
......@@ -1134,7 +1165,7 @@ public class PageStore implements CacheWriter {
table.setCompareMode(mode);
meta = table.getScanIndex(session);
} else {
PageScanIndex p = (PageScanIndex) metaObjects.get(parent);
Index p = metaObjects.get(parent);
if (p == null) {
throw Message.throwInternalError("parent not found:" + parent);
}
......@@ -1156,7 +1187,7 @@ public class PageStore implements CacheWriter {
}
meta = table.addIndex(session, "I" + id, id, cols, indexType, id, null);
}
metaObjects.put(id, (PageIndex) meta);
metaObjects.put(id, meta);
}
/**
......@@ -1319,6 +1350,12 @@ public class PageStore implements CacheWriter {
return this.logFirstDataPage;
}
/**
* Get the root page of an index.
*
* @param index the index
* @return the root page
*/
public int getRootPageId(PageIndex index) {
return metaRootPageId.get(index.getId());
}
......
......@@ -161,23 +161,8 @@ public class PageStreamData extends Page {
remaining = length;
}
public void moveTo(Session session, int newPos) throws SQLException {
// PageStreamData d2 = new PageStreamData(store, newPos, trunk);
// d2.initWrite();
// initRead();
// byte[] buff = new byte[remaining];
// read(buff, 0, remaining);
// d2.write(buff, 0, remaining);
// store.updateRecord(d2, false, null);
// PageStreamTrunk t = (PageStreamTrunk) store.getPage(trunk);
// t.moveChild(getPos(), newPos);
// store.freePage(getPos(), true, data);
}
void setTrunkPage(int newTrunk) throws SQLException {
this.trunk = newTrunk;
data.setInt(0, trunk);
store.updateRecord(this, true, data);
public void moveTo(Session session, int newPos) {
// not required
}
}
\ No newline at end of file
......@@ -197,21 +197,8 @@ public class PageStreamTrunk extends Page {
store.updateRecord(this, true, data);
}
public void moveTo(Session session, int newPos) throws SQLException {
// PageStreamTrunk p2 = new PageStreamTrunk(store, parent, newPos, nextTrunk, pageIds);
// store.updateRecord(p2, false, null);
// for (int i = 0; i < pageCount; i++) {
// int p = pageIds[i];
// PageStreamData d = (PageStreamData) store.getPage(p);
// if (d != null) {
// d.setTrunkPage(newPos);
// }
// }
// if (store.getLogFirstTrunkPage() == getPos()) {
// int dataPageId = store.getLogFirstDataPage();
// store.setLogFirstPage(newPos, dataPageId);
// }
// store.freePage(getPos(), true, data);
public void moveTo(Session session, int newPos) {
// not required
}
}
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论