提交 8b03f980 authored 作者: Thomas Mueller's avatar Thomas Mueller

new experimental page store

上级 d9568981
...@@ -114,6 +114,9 @@ The theoretical limit is currently 256 GB for the data. This number is excluding ...@@ -114,6 +114,9 @@ The theoretical limit is currently 256 GB for the data. This number is excluding
Every CLOB or BLOB can be up to 256 GB as well. The size limit of the index data is 256 GB as well. Every CLOB or BLOB can be up to 256 GB as well. The size limit of the index data is 256 GB as well.
</p> </p>
<p> <p>
The maximum number of rows per table is 2'147'483'648.
</p>
<p>
The maximum file size for FAT or FAT32 file systems is 4 GB. So if you use FAT or FAT32, the The maximum file size for FAT or FAT32 file systems is 4 GB. So if you use FAT or FAT32, the
limit is 4 GB for the data. limit is 4 GB for the data.
</p> </p>
......
...@@ -123,7 +123,7 @@ This seems to be a structural problem, because all operations are really slow. ...@@ -123,7 +123,7 @@ This seems to be a structural problem, because all operations are really slow.
It will not be easy for the developers of Derby to improve the performance to a reasonable level. It will not be easy for the developers of Derby to improve the performance to a reasonable level.
A few problems have been identified: Leaving autocommit on is a problem for Derby. A few problems have been identified: Leaving autocommit on is a problem for Derby.
If it is switched off during the whole test, the results are about 20% better for Derby. If it is switched off during the whole test, the results are about 20% better for Derby.
Derby supports a testing mode (java -Dderby.system.durability=test) where durablity is Derby supports a testing mode (system property derby.system.durability=test) where durability is
disabled. According to the documentation, this setting should be used for testing only, disabled. According to the documentation, this setting should be used for testing only,
as the database may not recover after a crash. Enabling this setting improves performance as the database may not recover after a crash. Enabling this setting improves performance
by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -3753,7 +3753,7 @@ public class Parser { ...@@ -3753,7 +3753,7 @@ public class Parser {
columns.add(new Column(cols[i], Value.STRING)); columns.add(new Column(cols[i], Value.STRING));
} }
int id = database.allocateObjectId(true, true); int id = database.allocateObjectId(true, true);
recursiveTable = schema.createTable(tempViewName, id, columns, false, false); recursiveTable = schema.createTable(tempViewName, id, columns, false, false, Index.EMPTY_HEAD);
recursiveTable.setTemporary(true); recursiveTable.setTemporary(true);
session.addLocalTempTable(recursiveTable); session.addLocalTempTable(recursiveTable);
String querySQL = StringCache.getNew(sqlCommand.substring(parseIndex)); String querySQL = StringCache.getNew(sqlCommand.substring(parseIndex));
......
...@@ -241,7 +241,7 @@ public class AlterTableAlterColumn extends SchemaCommand { ...@@ -241,7 +241,7 @@ public class AlterTableAlterColumn extends SchemaCommand {
// still need a new id because using 0 would mean: the new table tries // still need a new id because using 0 would mean: the new table tries
// to use the rows of the table 0 (the meta table) // to use the rows of the table 0 (the meta table)
int id = -1; int id = -1;
TableData newTable = getSchema().createTable(tempName, id, newColumns, persistent, false); TableData newTable = getSchema().createTable(tempName, id, newColumns, persistent, false, Index.EMPTY_HEAD);
newTable.setComment(table.getComment()); newTable.setComment(table.getComment());
StringBuffer buff = new StringBuffer(newTable.getCreateSQL()); StringBuffer buff = new StringBuffer(newTable.getCreateSQL());
StringBuffer columnList = new StringBuffer(); StringBuffer columnList = new StringBuffer();
......
...@@ -15,6 +15,7 @@ import org.h2.constant.ErrorCode; ...@@ -15,6 +15,7 @@ import org.h2.constant.ErrorCode;
import org.h2.engine.Database; import org.h2.engine.Database;
import org.h2.engine.Session; import org.h2.engine.Session;
import org.h2.expression.Expression; import org.h2.expression.Expression;
import org.h2.index.Index;
import org.h2.message.Message; import org.h2.message.Message;
import org.h2.schema.Schema; import org.h2.schema.Schema;
import org.h2.schema.Sequence; import org.h2.schema.Sequence;
...@@ -144,7 +145,7 @@ public class CreateTable extends SchemaCommand { ...@@ -144,7 +145,7 @@ public class CreateTable extends SchemaCommand {
} }
} }
int id = getObjectId(true, true); int id = getObjectId(true, true);
TableData table = getSchema().createTable(tableName, id, columns, persistent, clustered); TableData table = getSchema().createTable(tableName, id, columns, persistent, clustered, Index.EMPTY_HEAD);
table.setComment(comment); table.setComment(comment);
table.setTemporary(temporary); table.setTemporary(temporary);
table.setGlobalTemporary(globalTemporary); table.setGlobalTemporary(globalTemporary);
......
...@@ -216,15 +216,17 @@ public class SysProperties { ...@@ -216,15 +216,17 @@ public class SysProperties {
public static final boolean DOLLAR_QUOTING = getBooleanSetting("h2.dollarQuoting", true); public static final boolean DOLLAR_QUOTING = getBooleanSetting("h2.dollarQuoting", true);
/** /**
* System property <code>h2.estimatedFunctionTableRows</code> (default: 1000).<br /> * System property <code>h2.estimatedFunctionTableRows</code> (default:
* The estimated number of rows in a function table (for example, CSVREAD or FTL_SEARCH). * 1000).<br />
* This value is used by the optimizer. * The estimated number of rows in a function table (for example, CSVREAD or
* FTL_SEARCH). This value is used by the optimizer.
*/ */
public static final int ESTIMATED_FUNCTION_TABLE_ROWS = getIntSetting("h2.estimatedFunctionTableRows", 1000); public static final int ESTIMATED_FUNCTION_TABLE_ROWS = getIntSetting("h2.estimatedFunctionTableRows", 1000);
/** /**
* System property <code>h2.largeResultBufferSize</code> (default: 4096).<br /> * System property <code>h2.largeResultBufferSize</code> (default: 4096).<br />
* Buffer size for large result sets. Set this value to 0 to disable the buffer. * Buffer size for large result sets. Set this value to 0 to disable the
* buffer.
*/ */
public static final int LARGE_RESULT_BUFFER_SIZE = getIntSetting("h2.largeResultBufferSize", 4 * 1024); public static final int LARGE_RESULT_BUFFER_SIZE = getIntSetting("h2.largeResultBufferSize", 4 * 1024);
...@@ -432,6 +434,12 @@ public class SysProperties { ...@@ -432,6 +434,12 @@ public class SysProperties {
*/ */
public static final boolean OVERFLOW_EXCEPTIONS = getBooleanSetting("h2.overflowExceptions", true); public static final boolean OVERFLOW_EXCEPTIONS = getBooleanSetting("h2.overflowExceptions", true);
/**
* System property <code>h2.pageStore</code> (default: false).<br />
* Use the page store file format (experimental).
*/
public static final boolean PAGE_STORE = getBooleanSetting("h2.pageStore", false);
/** /**
* System property <code>h2.recompileAlways</code> (default: false).<br /> * System property <code>h2.recompileAlways</code> (default: false).<br />
* Always recompile prepared statements. * Always recompile prepared statements.
......
...@@ -479,6 +479,11 @@ public class Constants { ...@@ -479,6 +479,11 @@ public class Constants {
*/ */
public static final String SUFFIX_DATA_FILE = ".data.db"; public static final String SUFFIX_DATA_FILE = ".data.db";
/**
* The file name suffix of page files.
*/
public static final String SUFFIX_PAGE_FILE = ".h2.db";
/** /**
* The file name suffix of all database files. * The file name suffix of all database files.
*/ */
......
...@@ -40,6 +40,7 @@ import org.h2.store.DataPage; ...@@ -40,6 +40,7 @@ import org.h2.store.DataPage;
import org.h2.store.DiskFile; import org.h2.store.DiskFile;
import org.h2.store.FileLock; import org.h2.store.FileLock;
import org.h2.store.FileStore; import org.h2.store.FileStore;
import org.h2.store.PageStore;
import org.h2.store.RecordReader; import org.h2.store.RecordReader;
import org.h2.store.Storage; import org.h2.store.Storage;
import org.h2.store.WriterThread; import org.h2.store.WriterThread;
...@@ -163,6 +164,7 @@ public class Database implements DataHandler { ...@@ -163,6 +164,7 @@ public class Database implements DataHandler {
private Server server; private Server server;
private HashMap linkConnections; private HashMap linkConnections;
private TempFileDeleter tempFileDeleter = TempFileDeleter.getInstance(); private TempFileDeleter tempFileDeleter = TempFileDeleter.getInstance();
private PageStore pageStore;
public Database(String name, ConnectionInfo ci, String cipher) throws SQLException { public Database(String name, ConnectionInfo ci, String cipher) throws SQLException {
this.compareMode = new CompareMode(null, null, 0); this.compareMode = new CompareMode(null, null, 0);
...@@ -400,6 +402,14 @@ public class Database implements DataHandler { ...@@ -400,6 +402,14 @@ public class Database implements DataHandler {
} }
fileIndex = null; fileIndex = null;
} }
if (pageStore != null) {
try {
pageStore.close();
} catch (SQLException e) {
// ignore
}
pageStore = null;
}
if (lock != null) { if (lock != null) {
stopServer(); stopServer();
lock.unlock(); lock.unlock();
...@@ -576,7 +586,7 @@ public class Database implements DataHandler { ...@@ -576,7 +586,7 @@ public class Database implements DataHandler {
cols.add(new Column("HEAD", Value.INT)); cols.add(new Column("HEAD", Value.INT));
cols.add(new Column("TYPE", Value.INT)); cols.add(new Column("TYPE", Value.INT));
cols.add(new Column("SQL", Value.STRING)); cols.add(new Column("SQL", Value.STRING));
meta = mainSchema.createTable("SYS", 0, cols, persistent, false); meta = mainSchema.createTable("SYS", 0, cols, persistent, false, 1);
IndexColumn[] pkCols = IndexColumn.wrap(new Column[] { columnId }); IndexColumn[] pkCols = IndexColumn.wrap(new Column[] { columnId });
metaIdIndex = meta.addIndex(systemSession, "SYS_ID", 0, pkCols, IndexType.createPrimaryKey( metaIdIndex = meta.addIndex(systemSession, "SYS_ID", 0, pkCols, IndexType.createPrimaryKey(
false, false), Index.EMPTY_HEAD, null); false, false), Index.EMPTY_HEAD, null);
...@@ -2097,4 +2107,13 @@ public class Database implements DataHandler { ...@@ -2097,4 +2107,13 @@ public class Database implements DataHandler {
return getTrace(Trace.DATABASE); return getTrace(Trace.DATABASE);
} }
public PageStore getPageStorage() throws SQLException {
if (pageStore == null) {
pageStore = new PageStore(this, databaseName + Constants.SUFFIX_PAGE_FILE, accessModeData,
SysProperties.CACHE_SIZE_DEFAULT);
pageStore.open();
}
return pageStore;
}
} }
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
/**
* A page.
*/
class Page {
/**
* An empty page.
*/
static final int TYPE_EMPTY = 0;
/**
* A data leaf page without overflow.
*/
static final int TYPE_DATA_LEAF = 2;
/**
* A data leaf page with overflow.
*/
static final int TYPE_DATA_LEAF_WITH_OVERFLOW = 3;
/**
* A data node page without overflow.
*/
static final int TYPE_DATA_NODE = 4;
/**
* The last overflow page.
*/
static final int TYPE_DATA_OVERFLOW_LAST = 6;
/**
* An overflow pages (more to come).
*/
static final int TYPE_DATA_OVERFLOW_WITH_MORE = 7;
}
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.result.Row;
import org.h2.store.DataPageBinary;
/**
* A page that contains data rows.
*/
abstract class PageData {
/**
* The index.
*/
protected final PageScanIndex index;
/**
* The data page.
*/
protected final DataPageBinary data;
/**
* the page number.
*/
protected final int pageId;
/**
* The page number of the parent.
*/
protected final int parentPageId;
/**
* The number of entries.
*/
protected int entryCount;
/**
* If the page has unwritten changes.
*/
protected boolean changed;
PageData(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) {
this.index = index;
this.pageId = pageId;
this.parentPageId = parentPageId;
this.data = data;
}
/**
* Read the data.
*/
abstract void read() throws SQLException;
/**
* Add a row.
*
* @param row the row
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract int addRow(Row row) throws SQLException;
/**
* Get a cursor.
*
* @return the cursor
*/
abstract Cursor find();
/**
* Write the page.
*/
abstract void write() throws SQLException;
}
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.store.DataPageBinary;
/**
* A leaf page that contains data of one or multiple rows.
* Format:
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
* </li><li>5-5: entry count
* </li><li>only if there is overflow: 6-9: overflow page id
* </li><li>list of offsets (2 bytes each)
* </li></ul>
* The format of an overflow page is:
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
* </li><li>only if there is overflow: 5-8: next overflow page id
* </li><li>data
* </li></ul>
*/
class PageDataLeaf extends PageData {
/**
* The row offsets.
*/
int[] offsets;
/**
* The rows.
*/
Row[] rows;
/**
* The page id of the first overflow page (0 for no overflow).
*/
int overflowPageId;
/**
* The start of the data area.
*/
int start;
PageDataLeaf(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) {
super(index, pageId, parentPageId, data);
}
void read() throws SQLException {
data.setPos(4);
int type = data.readByte();
entryCount = data.readByte() & 255;
offsets = new int[entryCount];
rows = new Row[entryCount];
if (type == Page.TYPE_DATA_LEAF_WITH_OVERFLOW) {
overflowPageId = data.readInt();
}
for (int i = 0; i < entryCount; i++) {
offsets[i] = data.readShortInt();
}
start = data.length();
}
void write() throws SQLException {
// make sure rows are read
for (int i = 0; i < entryCount; i++) {
getRow(i);
}
data.reset();
data.writeInt(parentPageId);
int type;
if (overflowPageId == 0) {
type = Page.TYPE_DATA_LEAF;
} else {
type = Page.TYPE_DATA_LEAF_WITH_OVERFLOW;
}
data.writeByte((byte) type);
data.writeByte((byte) entryCount);
if (overflowPageId != 0) {
data.writeInt(overflowPageId);
}
for (int i = 0; i < entryCount; i++) {
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
rows[i].write(data);
}
int pageSize = index.getPageStore().getPageSize();
if (data.length() > pageSize) {
if (overflowPageId == 0) {
throw Message.getInternalError();
}
int todoWriteOverflow;
} else {
if (overflowPageId != 0) {
throw Message.getInternalError();
}
}
index.getPageStore().writePage(pageId, data);
}
/**
* Add a row if possible. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row.
*
* @return the split point of this page, or 0 if no split is required
*/
int addRow(Row row) throws SQLException {
if (entryCount >= 255) {
return entryCount / 2;
}
int rowLength = row.getByteCount(data);
int last = entryCount == 0 ? index.getPageStore().getPageSize() : offsets[entryCount - 1];
int offset = last - rowLength;
if (offset < start + 2) {
if (entryCount > 0) {
return entryCount / 2;
}
offset = start + 2;
overflowPageId = index.getPageStore().allocatePage();
}
changed = true;
entryCount++;
int[] newOffsets = new int[entryCount];
Row[] newRows = new Row[entryCount];
System.arraycopy(offsets, 0, newOffsets, 0, entryCount - 1);
System.arraycopy(rows, 0, newRows, 0, entryCount - 1);
start += 2;
newOffsets[entryCount - 1] = offset;
newRows[entryCount - 1] = row;
offsets = newOffsets;
rows = newRows;
write();
return 0;
}
Cursor find() {
return new PageScanCursor(this, 0);
}
/**
* Get the row at the given index.
*
* @param index the index
* @return the row
*/
Row getRow(int index) throws SQLException {
Row r = rows[index];
if (r == null) {
data.setPos(offsets[index]);
r = this.index.readRow(data);
rows[index] = r;
}
return r;
}
int getEntryCount() {
return entryCount;
}
}
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.result.Row;
import org.h2.store.DataPageBinary;
/**
* A leaf page that contains data of one or multiple rows.
* Format:
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
* </li><li>5-5: entry count
* </li><li>6- entries: 4 bytes leaf page id, 4 bytes key
* </li></ul>
*/
class PageDataNode extends PageData {
// optimization
// int childrenEntryCount;
PageDataNode(PageScanIndex index, int pageId, int parentPageId, DataPageBinary data) {
super(index, pageId, parentPageId, data);
}
void read() {
int todo;
}
int addRow(Row row) throws SQLException {
int todo;
return 0;
}
Cursor find() {
int todo;
return null;
}
void write() throws SQLException {
int todo;
}
}
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for the page scan index.
*/
class PageScanCursor implements Cursor {
private PageDataLeaf current;
private int index;
private Row row;
PageScanCursor(PageDataLeaf current, int index) {
this.current = current;
this.index = index;
}
public Row get() throws SQLException {
return row;
}
public int getPos() {
return row.getPos();
}
public SearchRow getSearchRow() throws SQLException {
return get();
}
public boolean next() throws SQLException {
int todo;
if (index < current.getEntryCount()) {
row = current.getRow(index);
index++;
return true;
}
return false;
}
public boolean previous() throws SQLException {
index--;
int todo;
return true;
}
}
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPageBinary;
import org.h2.store.PageStore;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
/**
* The scan index allows to access a row by key. It can be used to iterate over
* all rows of a table. Each regular table has one such object, even if no
* primary key or indexes are defined.
*/
public class PageScanIndex extends BaseIndex implements RowIndex {
private PageStore store;
private TableData tableData;
private int headPos;
public PageScanIndex(TableData table, int id, IndexColumn[] columns, IndexType indexType, int headPos) throws SQLException {
initBaseIndex(table, id, table.getName() + "_TABLE_SCAN", columns, indexType);
if (database.isMultiVersion()) {
int todoMvcc;
}
tableData = table;
if (!database.isPersistent() || id < 0) {
int todo;
return;
}
this.store = database.getPageStorage();
if (headPos == Index.EMPTY_HEAD || headPos >= store.getPageCount()) {
// new table
headPos = store.allocatePage();
PageDataLeaf root = new PageDataLeaf(this, headPos, 1, store.createDataPage());
root.write();
} else {
int todo;
rowCount = 10;
}
this.headPos = headPos;
table.setRowCount(rowCount);
}
public void add(Session session, Row row) throws SQLException {
int invalidateRowCount;
PageData root = getPage(headPos);
root.addRow(row);
rowCount++;
}
private PageData getPage(int id) throws SQLException {
DataPageBinary data = store.readPage(id);
int parentPageId = data.readInt();
int type = data.readByte() & 255;
PageData result;
switch (type) {
case Page.TYPE_DATA_LEAF:
case Page.TYPE_DATA_LEAF_WITH_OVERFLOW:
result = new PageDataLeaf(this, id, parentPageId, data);
break;
case Page.TYPE_DATA_NODE:
result = new PageDataNode(this, id, parentPageId, data);
break;
default:
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, "type=" + type);
}
result.read();
return result;
}
public boolean canGetFirstOrLast() {
return false;
}
public void close(Session session) throws SQLException {
int writeRowCount;
if (store != null) {
store = null;
}
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
PageData root = getPage(headPos);
return root.find();
}
public Cursor findFirstOrLast(Session session, boolean first) throws SQLException {
throw Message.getUnsupportedException();
}
public double getCost(Session session, int[] masks) throws SQLException {
long cost = 10 * tableData.getRowCount(session) + Constants.COST_ROW_OFFSET;
return cost;
}
public boolean needRebuild() {
return false;
}
public void remove(Session session, Row row) throws SQLException {
int invalidateRowCount;
int todo;
rowCount++;
}
public void remove(Session session) throws SQLException {
int todo;
}
public void truncate(Session session) throws SQLException {
int invalidateRowCount;
int todo;
rowCount = 0;
}
public void checkRename() throws SQLException {
throw Message.getUnsupportedException();
}
public Row getRow(Session session, int key) throws SQLException {
int todo;
return null;
}
PageStore getPageStore() {
return store;
}
/**
* Read a row from the data page at the given position.
*
* @param data the data page
* @return the row
*/
Row readRow(DataPageBinary data) throws SQLException {
return tableData.readRow(data);
}
}
...@@ -15,7 +15,7 @@ import org.h2.value.ValueLong; ...@@ -15,7 +15,7 @@ import org.h2.value.ValueLong;
/** /**
* The cursor implementation for the range index. * The cursor implementation for the range index.
*/ */
public class RangeCursor implements Cursor { class RangeCursor implements Cursor {
private boolean beforeFirst; private boolean beforeFirst;
private long current; private long current;
......
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.result.Row;
/**
* An index that can address individual rows directly.
*/
public interface RowIndex extends Index {
/**
* Get the row with the given key.
*
* @param session the session
* @param key the position
* @return the row
*/
Row getRow(Session session, int key) throws SQLException;
}
...@@ -33,7 +33,7 @@ import org.h2.value.ValueLob; ...@@ -33,7 +33,7 @@ import org.h2.value.ValueLob;
* of a table. Each regular table has one such object, even if no primary key or * of a table. Each regular table has one such object, even if no primary key or
* indexes are defined. * indexes are defined.
*/ */
public class ScanIndex extends BaseIndex { public class ScanIndex extends BaseIndex implements RowIndex {
private int firstFree = -1; private int firstFree = -1;
private ObjectArray rows = new ObjectArray(); private ObjectArray rows = new ObjectArray();
private Storage storage; private Storage storage;
...@@ -93,13 +93,6 @@ public class ScanIndex extends BaseIndex { ...@@ -93,13 +93,6 @@ public class ScanIndex extends BaseIndex {
} }
} }
/**
* Get the row at the given position.
*
* @param session the session
* @param key the position
* @return the row
*/
public Row getRow(Session session, int key) throws SQLException { public Row getRow(Session session, int key) throws SQLException {
if (storage != null) { if (storage != null) {
return (Row) storage.getRecord(session, key); return (Row) storage.getRecord(session, key);
......
...@@ -44,7 +44,7 @@ public class ResultTempTable implements ResultExternal { ...@@ -44,7 +44,7 @@ public class ResultTempTable implements ResultExternal {
columns.add(column); columns.add(column);
int tableId = session.getDatabase().allocateObjectId(true, true); int tableId = session.getDatabase().allocateObjectId(true, true);
String tableName = "TEMP_RESULT_SET_" + tableId; String tableName = "TEMP_RESULT_SET_" + tableId;
table = schema.createTable(tableName, tableId, columns, false, false); table = schema.createTable(tableName, tableId, columns, false, false, Index.EMPTY_HEAD);
int indexId = session.getDatabase().allocateObjectId(true, true); int indexId = session.getDatabase().allocateObjectId(true, true);
IndexColumn indexColumn = new IndexColumn(); IndexColumn indexColumn = new IndexColumn();
indexColumn.column = column; indexColumn.column = column;
......
...@@ -457,9 +457,9 @@ public class Schema extends DbObjectBase { ...@@ -457,9 +457,9 @@ public class Schema extends DbObjectBase {
* @param clustered if a clustered table should be created * @param clustered if a clustered table should be created
* @return the created {@link TableData} object * @return the created {@link TableData} object
*/ */
public TableData createTable(String tableName, int id, ObjectArray columns, boolean persistent, boolean clustered) public TableData createTable(String tableName, int id, ObjectArray columns, boolean persistent, boolean clustered, int headPos)
throws SQLException { throws SQLException {
return new TableData(this, tableName, id, columns, persistent, clustered); return new TableData(this, tableName, id, columns, persistent, clustered, headPos);
} }
/** /**
......
...@@ -69,6 +69,29 @@ public class DataPageBinary extends DataPage { ...@@ -69,6 +69,29 @@ public class DataPageBinary extends DataPage {
return (buff[pos++] << 24) + ((buff[pos++] & 0xff) << 16) + ((buff[pos++] & 0xff) << 8) + (buff[pos++] & 0xff); return (buff[pos++] << 24) + ((buff[pos++] & 0xff) << 16) + ((buff[pos++] & 0xff) << 8) + (buff[pos++] & 0xff);
} }
/**
* Write a short integer at the current position.
* The current position is incremented.
*
* @param x the value
*/
public void writeShortInt(int x) {
byte[] buff = data;
buff[pos++] = (byte) (x >> 8);
buff[pos++] = (byte) x;
}
/**
* Read an short integer at the current position.
* The current position is incremented.
*
* @return the value
*/
public int readShortInt() {
byte[] buff = data;
return ((buff[pos++] & 0xff) << 8) + (buff[pos++] & 0xff);
}
// private static int getStringLenChar(String s) { // private static int getStringLenChar(String s) {
// return 4 + s.length() * 2; // return 4 + s.length() * 2;
// } // }
......
...@@ -54,7 +54,7 @@ import org.h2.util.ObjectUtils; ...@@ -54,7 +54,7 @@ import org.h2.util.ObjectUtils;
* individual objects (multiple blocks at a time) are read or written. * individual objects (multiple blocks at a time) are read or written.
* <p> * <p>
* Currently there are no in-place updates. Each row occupies one or multiple * Currently there are no in-place updates. Each row occupies one or multiple
* blocks. Row can occupy multiple pages. Rows are always contiguous (except * blocks. Rows can occupy multiple pages. Rows are always contiguous (except
* LOBs, they are stored in their own files). * LOBs, they are stored in their own files).
*/ */
public class DiskFile implements CacheWriter { public class DiskFile implements CacheWriter {
...@@ -1283,5 +1283,4 @@ public class DiskFile implements CacheWriter { ...@@ -1283,5 +1283,4 @@ public class DiskFile implements CacheWriter {
return database.getTrace(Trace.DATABASE); return database.getTrace(Trace.DATABASE);
} }
} }
...@@ -67,6 +67,8 @@ public class FileLister { ...@@ -67,6 +67,8 @@ public class FileLister {
ok = true; ok = true;
} else if (f.endsWith(Constants.SUFFIX_LOB_FILE)) { } else if (f.endsWith(Constants.SUFFIX_LOB_FILE)) {
ok = true; ok = true;
} else if (f.endsWith(Constants.SUFFIX_PAGE_FILE)) {
ok = true;
} else if (all) { } else if (all) {
if (f.endsWith(Constants.SUFFIX_LOCK_FILE)) { if (f.endsWith(Constants.SUFFIX_LOCK_FILE)) {
ok = true; ok = true;
......
/*
* Copyright 2004-2008 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.engine.Database;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.util.Cache;
import org.h2.util.Cache2Q;
import org.h2.util.CacheLRU;
import org.h2.util.CacheObject;
import org.h2.util.CacheWriter;
import org.h2.util.FileUtils;
/**
* This class represents a file that is split into pages. The first page (page
* 0) contains the file header, the second page (page 1) is the root of the
* system table. The file header is 128 bytes, the format is:
* <ul><li>0-47: file header (3 time "-- H2 0.5/B -- \n")
* </li><li>48-51: database page size in bytes
* (512 - 32768, must be a power of 2)
* </li><li>52: write version (0, otherwise the file is opened in read-only mode)
* </li><li>53: read version (0, otherwise opening the file fails)
* </li><li>54-57: page number of the system table root
* </li><li>58-61: page number of the first free list page
* </li><li>62-65: number of free pages
* </li></ul>
*/
public class PageStore implements CacheWriter {
private static final int PAGE_SIZE_MIN = 512;
private static final int PAGE_SIZE_MAX = 32768;
private static final int PAGE_SIZE_DEFAULT = 1024;
private static final int FILE_HEADER_SIZE = 128;
private static final int READ_VERSION = 0;
private static final int WRITE_VERSION = 0;
private Database database;
private int pageSize;
private int pageSizeShift;
private String fileName;
private FileStore file;
private String accessMode;
private int cacheSize;
private Cache cache;
private DataPageBinary fileHeader;
private int systemRootPageId;
private int freeListRootPageId;
private int freePageCount;
private int pageCount;
/**
* Create a new page store object.
*
* @param database the database
* @param fileName the file name
* @param accessMode the access mode
* @param cacheSizeDefault the default cache size
*/
public PageStore(Database database, String fileName, String accessMode, int cacheSizeDefault) {
this.database = database;
this.fileName = fileName;
this.accessMode = accessMode;
this.cacheSize = cacheSizeDefault;
String cacheType = database.getCacheType();
if (Cache2Q.TYPE_NAME.equals(cacheType)) {
this.cache = new Cache2Q(this, cacheSize);
} else {
this.cache = new CacheLRU(this, cacheSize);
}
}
/**
* Open the file and read the header.
*/
public void open() throws SQLException {
try {
fileHeader = new DataPageBinary(database, new byte[FILE_HEADER_SIZE - FileStore.HEADER_LENGTH]);
if (FileUtils.exists(fileName)) {
file = database.openFile(fileName, accessMode, true);
readHeader();
} else {
setPageSize(PAGE_SIZE_DEFAULT);
file = database.openFile(fileName, accessMode, false);
writeHeader();
}
pageCount = (int) (file.length() / pageSize);
} catch (SQLException e) {
close();
throw e;
}
}
private void readHeader() throws SQLException {
long length = file.length();
if (length < FILE_HEADER_SIZE) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, fileName);
}
database.notifyFileSize(length);
file.seek(FileStore.HEADER_LENGTH);
file.readFully(fileHeader.getBytes(), 0, FILE_HEADER_SIZE - FileStore.HEADER_LENGTH);
setPageSize(fileHeader.readInt());
int writeVersion = fileHeader.readByte();
int readVersion = fileHeader.readByte();
if (readVersion != 0) {
throw Message.getSQLException(ErrorCode.FILE_VERSION_ERROR_1, fileName);
}
if (writeVersion != 0) {
try {
file.close();
} catch (IOException e) {
throw Message.convertIOException(e, "close");
}
accessMode = "r";
file = database.openFile(fileName, accessMode, true);
}
fileHeader.reset();
systemRootPageId = fileHeader.readInt();
freeListRootPageId = fileHeader.readInt();
freePageCount = fileHeader.readInt();
}
private void setPageSize(int size) throws SQLException {
if (size < PAGE_SIZE_MIN || size > PAGE_SIZE_MAX) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, fileName);
}
boolean good = false;
int shift = 0;
for (int i = 1; i <= size;) {
if (size == i) {
good = true;
break;
}
shift++;
i += i;
}
if (!good) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1, fileName);
}
pageSize = size;
pageSizeShift = shift;
}
private void writeHeader() throws SQLException {
fileHeader.reset();
fileHeader.writeInt(pageSize);
fileHeader.writeByte((byte) WRITE_VERSION);
fileHeader.writeByte((byte) READ_VERSION);
fileHeader.writeInt(systemRootPageId);
fileHeader.writeInt(freeListRootPageId);
fileHeader.writeInt(freePageCount);
file.seek(FileStore.HEADER_LENGTH);
file.write(fileHeader.getBytes(), 0, FILE_HEADER_SIZE - FileStore.HEADER_LENGTH);
byte[] filler = new byte[pageSize - FILE_HEADER_SIZE];
file.write(filler, 0, filler.length);
}
/**
* Close the file.
*/
public void close() throws SQLException {
int todo;
try {
file.close();
} catch (IOException e) {
throw Message.convertIOException(e, "close");
}
}
public void flushLog() throws SQLException {
int todo;
}
public Trace getTrace() {
return database.getTrace(Trace.DATABASE);
}
public void writeBack(CacheObject entry) throws SQLException {
int todo;
}
/**
* Allocate a page.
*
* @return the page id
*/
public int allocatePage() {
if (freePageCount == 0) {
return pageCount++;
}
int todoReturnAFreePage;
return 0;
}
/**
* Create a data page.
*
* @return the data page.
*/
public DataPageBinary createDataPage() {
return new DataPageBinary(database, new byte[pageSize]);
}
/**
* Read a page.
*
* @param pos the page id
* @return the page
*/
public DataPageBinary readPage(int pos) throws SQLException {
file.seek(pos << pageSizeShift);
DataPageBinary page = createDataPage();
file.readFully(page.getBytes(), 0, pageSize);
return page;
}
/**
* Get the page size.
*
* @return the page size
*/
public int getPageSize() {
return pageSize;
}
/**
* Get the number of pages (including free pages).
*
* @return the page count
*/
public int getPageCount() {
return pageCount;
}
/**
* Write a page.
*
* @param pageId the page id
* @param data the data
*/
public void writePage(int pageId, DataPageBinary data) throws SQLException {
file.seek(pageId << pageSizeShift);
file.write(data.getBytes(), 0, pageSize);
}
}
...@@ -25,6 +25,8 @@ import org.h2.index.HashIndex; ...@@ -25,6 +25,8 @@ import org.h2.index.HashIndex;
import org.h2.index.Index; import org.h2.index.Index;
import org.h2.index.IndexType; import org.h2.index.IndexType;
import org.h2.index.MultiVersionIndex; import org.h2.index.MultiVersionIndex;
import org.h2.index.PageScanIndex;
import org.h2.index.RowIndex;
import org.h2.index.ScanIndex; import org.h2.index.ScanIndex;
import org.h2.index.TreeIndex; import org.h2.index.TreeIndex;
import org.h2.message.Message; import org.h2.message.Message;
...@@ -47,7 +49,7 @@ import org.h2.value.Value; ...@@ -47,7 +49,7 @@ import org.h2.value.Value;
*/ */
public class TableData extends Table implements RecordReader { public class TableData extends Table implements RecordReader {
private final boolean clustered; private final boolean clustered;
private ScanIndex scanIndex; private RowIndex scanIndex;
private long rowCount; private long rowCount;
private Session lockExclusive; private Session lockExclusive;
private HashSet lockShared = new HashSet(); private HashSet lockShared = new HashSet();
...@@ -58,14 +60,18 @@ public class TableData extends Table implements RecordReader { ...@@ -58,14 +60,18 @@ public class TableData extends Table implements RecordReader {
private boolean containsLargeObject; private boolean containsLargeObject;
public TableData(Schema schema, String tableName, int id, ObjectArray columns, public TableData(Schema schema, String tableName, int id, ObjectArray columns,
boolean persistent, boolean clustered) throws SQLException { boolean persistent, boolean clustered, int headPos) throws SQLException {
super(schema, id, tableName, persistent); super(schema, id, tableName, persistent);
Column[] cols = new Column[columns.size()]; Column[] cols = new Column[columns.size()];
columns.toArray(cols); columns.toArray(cols);
setColumns(cols); setColumns(cols);
this.clustered = clustered; this.clustered = clustered;
if (!clustered) { if (!clustered) {
if (SysProperties.PAGE_STORE && persistent) {
scanIndex = new PageScanIndex(this, id, IndexColumn.wrap(cols), IndexType.createScan(persistent), headPos);
} else {
scanIndex = new ScanIndex(this, id, IndexColumn.wrap(cols), IndexType.createScan(persistent)); scanIndex = new ScanIndex(this, id, IndexColumn.wrap(cols), IndexType.createScan(persistent));
}
indexes.add(scanIndex); indexes.add(scanIndex);
} }
for (int i = 0; i < cols.length; i++) { for (int i = 0; i < cols.length; i++) {
...@@ -573,6 +579,16 @@ public class TableData extends Table implements RecordReader { ...@@ -573,6 +579,16 @@ public class TableData extends Table implements RecordReader {
} }
public Record read(Session session, DataPage s) throws SQLException { public Record read(Session session, DataPage s) throws SQLException {
return readRow(s);
}
/**
* Read a row from the data page.
*
* @param s the data page
* @return the row
*/
public Row readRow(DataPage s) throws SQLException {
int len = s.readInt(); int len = s.readInt();
Value[] data = new Value[len]; Value[] data = new Value[len];
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
...@@ -587,7 +603,7 @@ public class TableData extends Table implements RecordReader { ...@@ -587,7 +603,7 @@ public class TableData extends Table implements RecordReader {
* *
* @param count the row count * @param count the row count
*/ */
public void setRowCount(int count) { public void setRowCount(long count) {
this.rowCount = count; this.rowCount = count;
} }
......
...@@ -283,9 +283,10 @@ java org.h2.test.TestAll timer ...@@ -283,9 +283,10 @@ java org.h2.test.TestAll timer
System.setProperty("h2.check2", "true"); System.setProperty("h2.check2", "true");
/* /*
remove emergencyReserve?
build.sh from mac (test in Ubuntu)
don't store default values (store a special value)
build.sh from mac (test in Ubuntu)
btree: maybe split at the insertion point
split files (1 GB max size) split files (1 GB max size)
multithreaded kernel multithreaded kernel
......
...@@ -571,3 +571,5 @@ factorial blogspot displaying thedevcloud dayof safety chrome favorite thumbs ...@@ -571,3 +571,5 @@ factorial blogspot displaying thedevcloud dayof safety chrome favorite thumbs
localization olivier hprof jps jstack qua processor casting brasilia leap localization olivier hprof jps jstack qua processor casting brasilia leap
daylight vision declarative shape formula webapp catalina study impact daylight vision declarative shape formula webapp catalina study impact
statisticlog activeobjects manske redeployment michael kaspersky datatext statisticlog activeobjects manske redeployment michael kaspersky datatext
bleyl donald conservative offsets diabetes ansorg allocating osmond gluco
joachim
\ No newline at end of file
...@@ -38,7 +38,8 @@ public class FileViewer extends Tool { ...@@ -38,7 +38,8 @@ public class FileViewer extends Tool {
" [-tail] Display the last lines\n" + " [-tail] Display the last lines\n" +
" [-lines <x>] Display only x lines (default: 30)\n" + " [-lines <x>] Display only x lines (default: 30)\n" +
" [-quiet] Do not print progress information)"); " [-quiet] Do not print progress information)");
// out.println("See also http://h2database.com/javadoc/" + getClass().getName().replace('.', '/') + ".html"); // out.println("See also http://h2database.com/javadoc/" +
// getClass().getName().replace('.', '/') + ".html");
} }
public void run(String[] args) { public void run(String[] args) {
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论