提交 616351ea authored 作者: Thomas Mueller's avatar Thomas Mueller

Page store: support clustered primary keys

上级 cc21adca
......@@ -4541,63 +4541,66 @@ Supporters
Many thanks for those who helped by finding and reporting bugs, gave valuable feedback, spread the word and have translated this project. Also many thanks to the donors who contributed via PayPal:
@history_1025_a
NetSuxxess GmbH, Germany
@history_1026_a
SkyCash, Poland
@history_1026_li
@history_1027_li
Donald Bleyl, USA
@history_1027_li
@history_1028_li
lumber-mill.co.jp, Japan
@history_1028_li
@history_1029_li
Frank Berger, Germany
@history_1029_li
@history_1030_li
Ashwin Jayaprakash, USA
@history_1030_li
@history_1031_li
Florent Ramiere, France
@history_1031_li
@history_1032_li
Jun Iyama, Japan
@history_1032_li
@history_1033_li
Antonio Casqueiro, Portugal
@history_1033_li
@history_1034_li
Oliver Computing LLC, USA
@history_1034_li
@history_1035_li
Harpal Grover Consulting Inc., USA
@history_1035_li
@history_1036_li
Elisabetta Berlini, Italy
@history_1036_li
@history_1037_li
William Gilbert, USA
@history_1037_li
@history_1038_li
Antonio Dieguez, Chile
@history_1038_a
@history_1039_a
Ontology Works, USA
@history_1039_li
@history_1040_li
Pete Haidinyak, USA
@history_1040_li
@history_1041_li
William Osmond, USA
@history_1041_li
@history_1042_li
Joachim Ansorg, Germany
@history_1042_li
@history_1043_li
Oliver Soerensen, Germany
@history_1043_li
@history_1044_li
Christos Vasilakis, Greece
@history_1044_li
@history_1045_li
Fyodor Kupolov, Denmark
@installation_1000_h1
......@@ -8935,13 +8938,16 @@ Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE.
@roadmap_1414_li
Issue 107: Prefer using the ORDER BY index if LIMIT is used.
@roadmap_1415_h2
@roadmap_1415_li
Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL.
@roadmap_1416_h2
Not Planned
@roadmap_1416_li
@roadmap_1417_li
HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
@roadmap_1417_li
@roadmap_1418_li
String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively.
@sourceError_1000_h1
......
......@@ -4541,63 +4541,66 @@ Unicodeをサポート
#Many thanks for those who helped by finding and reporting bugs, gave valuable feedback, spread the word and have translated this project. Also many thanks to the donors who contributed via PayPal:
@history_1025_a
#NetSuxxess GmbH, Germany
@history_1026_a
#SkyCash, Poland
@history_1026_li
@history_1027_li
#Donald Bleyl, USA
@history_1027_li
@history_1028_li
#lumber-mill.co.jp, Japan
@history_1028_li
@history_1029_li
#Frank Berger, Germany
@history_1029_li
@history_1030_li
#Ashwin Jayaprakash, USA
@history_1030_li
@history_1031_li
#Florent Ramiere, France
@history_1031_li
@history_1032_li
#Jun Iyama, Japan
@history_1032_li
@history_1033_li
#Antonio Casqueiro, Portugal
@history_1033_li
@history_1034_li
#Oliver Computing LLC, USA
@history_1034_li
@history_1035_li
#Harpal Grover Consulting Inc., USA
@history_1035_li
@history_1036_li
#Elisabetta Berlini, Italy
@history_1036_li
@history_1037_li
#William Gilbert, USA
@history_1037_li
@history_1038_li
#Antonio Dieguez, Chile
@history_1038_a
@history_1039_a
#Ontology Works, USA
@history_1039_li
@history_1040_li
#Pete Haidinyak, USA
@history_1040_li
@history_1041_li
#William Osmond, USA
@history_1041_li
@history_1042_li
#Joachim Ansorg, Germany
@history_1042_li
@history_1043_li
#Oliver Soerensen, Germany
@history_1043_li
@history_1044_li
#Christos Vasilakis, Greece
@history_1044_li
@history_1045_li
#Fyodor Kupolov, Denmark
@installation_1000_h1
......@@ -8935,13 +8938,16 @@ SQLコマンドがコマンドエリアに表示されます。
@roadmap_1414_li
#Issue 107: Prefer using the ORDER BY index if LIMIT is used.
@roadmap_1415_h2
@roadmap_1415_li
#Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL.
@roadmap_1416_h2
#Not Planned
@roadmap_1416_li
@roadmap_1417_li
#HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
@roadmap_1417_li
@roadmap_1418_li
#String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively.
@sourceError_1000_h1
......
......@@ -1510,26 +1510,27 @@ history_1021_p=Java is also future proof\: a lot of companies support Java, and
history_1022_p=This software does not rely on many Java libraries or other software, to increase the portability and ease of use, and for performance reasons. For example, the encryption algorithms and many library functions are implemented in the database instead of using the existing libraries. Libraries that are not available in open source Java implementations (such as Swing) are not used or only used for specific features.
history_1023_h2=Supporters
history_1024_p=Many thanks for those who helped by finding and reporting bugs, gave valuable feedback, spread the word and have translated this project. Also many thanks to the donors who contributed via PayPal\:
history_1025_a=SkyCash, Poland
history_1026_li=Donald Bleyl, USA
history_1027_li=lumber-mill.co.jp, Japan
history_1028_li=Frank Berger, Germany
history_1029_li=Ashwin Jayaprakash, USA
history_1030_li=Florent Ramiere, France
history_1031_li=Jun Iyama, Japan
history_1032_li=Antonio Casqueiro, Portugal
history_1033_li=Oliver Computing LLC, USA
history_1034_li=Harpal Grover Consulting Inc., USA
history_1035_li=Elisabetta Berlini, Italy
history_1036_li=William Gilbert, USA
history_1037_li=Antonio Dieguez, Chile
history_1038_a=Ontology Works, USA
history_1039_li=Pete Haidinyak, USA
history_1040_li=William Osmond, USA
history_1041_li=Joachim Ansorg, Germany
history_1042_li=Oliver Soerensen, Germany
history_1043_li=Christos Vasilakis, Greece
history_1044_li=Fyodor Kupolov, Denmark
history_1025_a=NetSuxxess GmbH, Germany
history_1026_a=SkyCash, Poland
history_1027_li=Donald Bleyl, USA
history_1028_li=lumber-mill.co.jp, Japan
history_1029_li=Frank Berger, Germany
history_1030_li=Ashwin Jayaprakash, USA
history_1031_li=Florent Ramiere, France
history_1032_li=Jun Iyama, Japan
history_1033_li=Antonio Casqueiro, Portugal
history_1034_li=Oliver Computing LLC, USA
history_1035_li=Harpal Grover Consulting Inc., USA
history_1036_li=Elisabetta Berlini, Italy
history_1037_li=William Gilbert, USA
history_1038_li=Antonio Dieguez, Chile
history_1039_a=Ontology Works, USA
history_1040_li=Pete Haidinyak, USA
history_1041_li=William Osmond, USA
history_1042_li=Joachim Ansorg, Germany
history_1043_li=Oliver Soerensen, Germany
history_1044_li=Christos Vasilakis, Greece
history_1045_li=Fyodor Kupolov, Denmark
installation_1000_h1=Installation
installation_1001_a=Requirements
installation_1002_a=Supported Platforms
......@@ -2975,9 +2976,10 @@ roadmap_1411_li=Optimization for EXISTS\: convert to inner join if possible.
roadmap_1412_li=Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future').
roadmap_1413_li=Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE.
roadmap_1414_li=Issue 107\: Prefer using the ORDER BY index if LIMIT is used.
roadmap_1415_h2=Not Planned
roadmap_1416_li=HSQLDB (did) support this\: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
roadmap_1417_li=String.intern (so that Strings can be compared with \=\=) will not be used because some VMs have problems when used extensively.
roadmap_1415_li=Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL.
roadmap_1416_h2=Not Planned
roadmap_1417_li=HSQLDB (did) support this\: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
roadmap_1418_li=String.intern (so that Strings can be compared with \=\=) will not be used because some VMs have problems when used extensively.
sourceError_1000_h1=Online Error Analyzer
sourceError_1001_a=Home
sourceError_1002_a=Input
......
......@@ -31,6 +31,7 @@ import org.h2.command.ddl.CreateRole;
import org.h2.command.ddl.CreateSchema;
import org.h2.command.ddl.CreateSequence;
import org.h2.command.ddl.CreateTable;
import org.h2.command.ddl.CreateTableData;
import org.h2.command.ddl.CreateTrigger;
import org.h2.command.ddl.CreateUser;
import org.h2.command.ddl.CreateUserDataType;
......@@ -3783,15 +3784,22 @@ public class Parser {
for (String c : cols) {
columns.add(new Column(c, Value.STRING));
}
int id = database.allocateObjectId(true, true);
recursiveTable = schema.createTable(tempViewName, id, columns, true, false, true, false, Index.EMPTY_HEAD, session);
CreateTableData data = new CreateTableData();
data.id = database.allocateObjectId(true, true);
data.tableName = tempViewName;
data.temporary = true;
data.persistData = true;
data.persistIndexes = false;
data.headPos = Index.EMPTY_HEAD;
data.session = session;
recursiveTable = schema.createTable(data);
session.addLocalTempTable(recursiveTable);
String querySQL = StringCache.getNew(sqlCommand.substring(parseIndex));
read("AS");
Query withQuery = parseSelect();
withQuery.prepare();
session.removeLocalTempTable(recursiveTable);
id = database.allocateObjectId(true, true);
int id = database.allocateObjectId(true, true);
TableView view = new TableView(schema, id, tempViewName, querySQL, null, cols, session, true);
view.setTemporary(true);
// view.setOnCommitDrop(true);
......@@ -4720,9 +4728,6 @@ public class Parser {
read("PERSISTENT");
command.setPersistData(false);
}
if (readIf("CLUSTERED")) {
command.setClustered(true);
}
return command;
}
......
......@@ -244,7 +244,16 @@ public class AlterTableAlterColumn extends SchemaCommand {
// still need a new id because using 0 would mean: the new table tries
// to use the rows of the table 0 (the meta table)
int id = db.allocateObjectId(true, true);
TableData newTable = getSchema().createTable(tempName, id, newColumns, table.isTemporary(), table.isPersistIndexes(), table.isPersistData(), false, Index.EMPTY_HEAD, session);
CreateTableData data = new CreateTableData();
data.tableName = tempName;
data.id = id;
data.columns = newColumns;
data.temporary = table.isTemporary();
data.persistData = table.isPersistData();
data.persistIndexes = table.isPersistIndexes();
data.headPos = Index.EMPTY_HEAD;
data.session = session;
TableData newTable = getSchema().createTable(data);
newTable.setComment(table.getComment());
StringBuilder buff = new StringBuilder();
buff.append(newTable.getCreateSQL());
......
......@@ -7,7 +7,6 @@
package org.h2.command.ddl;
import java.sql.SQLException;
import org.h2.command.Prepared;
import org.h2.command.dml.Insert;
import org.h2.command.dml.Query;
......@@ -30,23 +29,20 @@ import org.h2.value.DataType;
*/
public class CreateTable extends SchemaCommand {
private String tableName;
private CreateTableData data = new CreateTableData();
private ObjectArray<Prepared> constraintCommands = ObjectArray.newInstance();
private ObjectArray<Column> columns = ObjectArray.newInstance();
private IndexColumn[] pkColumns;
private boolean ifNotExists;
private boolean persistIndexes = true;
private boolean persistData = true;
private boolean temporary;
private boolean globalTemporary;
private boolean onCommitDrop;
private boolean onCommitTruncate;
private Query asQuery;
private String comment;
private boolean clustered;
public CreateTable(Session session, Schema schema) {
super(session, schema);
data.persistIndexes = true;
data.persistData = true;
}
public void setQuery(Query query) {
......@@ -54,11 +50,11 @@ public class CreateTable extends SchemaCommand {
}
public void setTemporary(boolean temporary) {
this.temporary = temporary;
data.temporary = temporary;
}
public void setTableName(String tableName) {
this.tableName = tableName;
data.tableName = tableName;
}
/**
......@@ -67,10 +63,7 @@ public class CreateTable extends SchemaCommand {
* @param column the column to add
*/
public void addColumn(Column column) {
if (columns == null) {
columns = ObjectArray.newInstance();
}
columns.add(column);
data.columns.add(column);
}
/**
......@@ -104,24 +97,24 @@ public class CreateTable extends SchemaCommand {
session.commit(true);
Database db = session.getDatabase();
if (!db.isPersistent()) {
persistIndexes = false;
data.persistIndexes = false;
}
if (getSchema().findTableOrView(session, tableName) != null) {
if (getSchema().findTableOrView(session, data.tableName) != null) {
if (ifNotExists) {
return 0;
}
throw Message.getSQLException(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, tableName);
throw Message.getSQLException(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, data.tableName);
}
if (asQuery != null) {
asQuery.prepare();
if (columns.size() == 0) {
if (data.columns.size() == 0) {
generateColumnsFromQuery();
} else if (columns.size() != asQuery.getColumnCount()) {
} else if (data.columns.size() != asQuery.getColumnCount()) {
throw Message.getSQLException(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH);
}
}
if (pkColumns != null) {
for (Column c : columns) {
for (Column c : data.columns) {
for (IndexColumn idxCol : pkColumns) {
if (c.getName().equals(idxCol.columnName)) {
c.setNullable(false);
......@@ -130,21 +123,23 @@ public class CreateTable extends SchemaCommand {
}
}
ObjectArray<Sequence> sequences = ObjectArray.newInstance();
for (Column c : columns) {
for (Column c : data.columns) {
if (c.isAutoIncrement()) {
int objId = getObjectId(true, true);
c.convertAutoIncrementToSequence(session, getSchema(), objId, temporary);
c.convertAutoIncrementToSequence(session, getSchema(), objId, data.temporary);
}
Sequence seq = c.getSequence();
if (seq != null) {
sequences.add(seq);
}
}
int id = getObjectId(true, true);
TableData table = getSchema().createTable(tableName, id, columns, temporary, persistIndexes, persistData, clustered, headPos, session);
data.id = getObjectId(true, true);
data.headPos = headPos;
data.session = session;
TableData table = getSchema().createTable(data);
table.setComment(comment);
table.setGlobalTemporary(globalTemporary);
if (temporary && !globalTemporary) {
if (data.temporary && !globalTemporary) {
if (onCommitDrop) {
table.setOnCommitDrop(true);
}
......@@ -156,7 +151,7 @@ public class CreateTable extends SchemaCommand {
db.addSchemaObject(session, table);
}
try {
for (Column c : columns) {
for (Column c : data.columns) {
c.prepareExpression(session);
}
for (Sequence sequence : sequences) {
......@@ -234,7 +229,7 @@ public class CreateTable extends SchemaCommand {
}
public void setPersistIndexes(boolean persistIndexes) {
this.persistIndexes = persistIndexes;
data.persistIndexes = persistIndexes;
}
public void setGlobalTemporary(boolean globalTemporary) {
......@@ -259,12 +254,8 @@ public class CreateTable extends SchemaCommand {
this.comment = comment;
}
public void setClustered(boolean clustered) {
this.clustered = clustered;
}
public void setPersistData(boolean persistData) {
this.persistData = persistData;
data.persistData = persistData;
}
}
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command.ddl;
import org.h2.engine.Session;
import org.h2.schema.Schema;
import org.h2.table.Column;
import org.h2.util.ObjectArray;
/**
* The data required to create a table.
*/
public class CreateTableData {
/**
* The schema.
*/
public Schema schema;
/**
* The table name.
*/
public String tableName;
/**
* The object id.
*/
public int id;
/**
* The column list.
*/
public ObjectArray<Column> columns = ObjectArray.newInstance();
/**
* Whether this is a temporary table.
*/
public boolean temporary;
/**
* Whether the indexes should be persisted.
*/
public boolean persistIndexes;
/**
* Whether the data should be persisted.
*/
public boolean persistData;
/**
* The head position.
*/
public int headPos;
/**
* The session.
*/
public Session session;
}
......@@ -17,6 +17,7 @@ import java.util.Set;
import java.util.StringTokenizer;
import org.h2.api.DatabaseEventListener;
import org.h2.command.ddl.CreateTableData;
import org.h2.command.dml.SetTypes;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
......@@ -626,7 +627,8 @@ public class Database implements DataHandler {
roles.put(Constants.PUBLIC_ROLE_NAME, publicRole);
systemUser.setAdmin(true);
systemSession = new Session(this, systemUser, ++nextSessionId);
ObjectArray<Column> cols = ObjectArray.newInstance();
CreateTableData data = new CreateTableData();
ObjectArray<Column> cols = data.columns;
Column columnId = new Column("ID", Value.INT);
columnId.setNullable(false);
cols.add(columnId);
......@@ -637,7 +639,14 @@ public class Database implements DataHandler {
if (pageStore != null) {
headPos = pageStore.getSystemTableHeadPos();
}
meta = mainSchema.createTable("SYS", 0, cols, false, persistent, persistent, false, headPos, systemSession);
data.tableName = "SYS";
data.id = 0;
data.temporary = false;
data.persistData = persistent;
data.persistIndexes = persistent;
data.headPos = headPos;
data.session = systemSession;
meta = mainSchema.createTable(data);
IndexColumn[] pkCols = IndexColumn.wrap(new Column[] { columnId });
metaIdIndex = meta.addIndex(systemSession, "SYS_ID", 0, pkCols, IndexType.createPrimaryKey(
false, false), Index.EMPTY_HEAD, null);
......
......@@ -56,7 +56,7 @@ public class PageBtreeIndex extends PageIndex {
root.parentPageId = PageBtree.ROOT;
store.updateRecord(root, true, root.data);
} else {
rootPageId = store.getRootPageId(this);
rootPageId = store.getRootPageId(id);
PageBtree root = getPage(rootPageId);
rowCount = root.getRowCount();
if (rowCount == 0 && store.isRecoveryRunning()) {
......@@ -297,12 +297,12 @@ public class PageBtreeIndex extends PageIndex {
*/
SearchRow readRow(Data data, int offset, boolean onlyPosition) throws SQLException {
data.setPos(offset);
int pos = data.readInt();
long pos = data.readVarLong();
if (onlyPosition) {
return tableData.getRow(null, pos);
return tableData.getRow(null, (int) pos);
}
SearchRow row = table.getTemplateSimpleRow(columns.length == 1);
row.setPos(pos);
row.setPos((int) pos);
for (Column col : columns) {
int idx = col.getColumnId();
row.setValue(idx, data.readValue());
......@@ -320,7 +320,7 @@ public class PageBtreeIndex extends PageIndex {
*/
void writeRow(Data data, int offset, SearchRow row, boolean onlyPosition) throws SQLException {
data.setPos(offset);
data.writeInt(row.getPos());
data.writeVarLong(row.getPos());
if (!onlyPosition) {
for (Column col : columns) {
int idx = col.getColumnId();
......
......@@ -20,22 +20,36 @@ import org.h2.store.PageStore;
/**
* A b-tree leaf page that contains index data.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-8: index id
* </li><li>9-10: entry count
* </li><li>11-: list offsets (2 bytes each)
* </li><li>data
* <ul><li>parent page id (0 for root): int
* </li><li>page type: byte
* </li><li>index id: varInt
* </li><li>entry count: short
* </li><li>list of offsets: shortInt
* </li><li>data (pos: varLong, value,...)
* </li></ul>
*/
public class PageBtreeLeaf extends PageBtree {
private static final int OFFSET_LENGTH = 2;
private static final int OFFSET_START = 11;
PageBtreeLeaf(PageBtreeIndex index, int pageId, Data data) {
super(index, pageId, data);
start = OFFSET_START;
}
/**
* Create a new page.
*
* @param index the index
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
static PageBtreeLeaf create(PageBtreeIndex index, int pageId, int parentPageId) {
PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, index.getPageStore().createData());
p.parentPageId = parentPageId;
p.writeHead();
p.start = p.data.length();
return p;
}
/**
......@@ -57,7 +71,7 @@ public class PageBtreeLeaf extends PageBtree {
this.parentPageId = data.readInt();
int type = data.readByte();
onlyPosition = (type & Page.FLAG_LAST) == 0;
int indexId = data.readInt();
int indexId = data.readVarInt();
if (indexId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected index:" + index.getId() +
......@@ -78,11 +92,16 @@ public class PageBtreeLeaf extends PageBtree {
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (last - rowLength < start + OFFSET_LENGTH) {
if (entryCount > 1) {
// split at the insertion point to better fill pages
int x = find(row, false, true, true);
if (entryCount < 5) {
// required, otherwise the index doesn't work correctly
return entryCount / 2;
}
// split near the insertion point to better fill pages
// split in half would be:
// return entryCount / 2;
int x = find(row, false, true, true);
return x < 2 ? 2 : x >= entryCount - 3 ? entryCount - 3 : x;
int third = entryCount / 3;
return x < third ? third : x >= 2 * third ? 2 * third : x;
}
onlyPosition = true;
// change the offsets (now storing only positions)
......@@ -213,16 +232,20 @@ public class PageBtreeLeaf extends PageBtree {
index.getPageStore().writePage(getPos(), data);
}
private void writeHead() {
data.writeInt(parentPageId);
data.writeByte((byte) (Page.TYPE_BTREE_LEAF | (onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeVarInt(index.getId());
data.writeShortInt(entryCount);
}
private void write() throws SQLException {
if (written) {
return;
}
readAllRows();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) (Page.TYPE_BTREE_LEAF | (onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeInt(index.getId());
data.writeShortInt(entryCount);
writeHead();
for (int i = 0; i < entryCount; i++) {
data.writeShortInt(offsets[i]);
}
......
......@@ -28,7 +28,7 @@ import org.h2.util.MemoryUtils;
* <li>9-10: entry count</li>
* <li>11-14: row count of all children (-1 if not known)</li>
* <li>15-18: rightmost child page id</li>
* <li>19- entries: 4 bytes leaf page id, 2 bytes offset to data</li>
* <li>19- entries: leaf page id: int, offset: short</li>
* </ul>
* The row is the largest row of the respective child, meaning
* row[0] is the largest row of child[0].
......
......@@ -112,9 +112,11 @@ abstract class PageData extends Page {
* Get a cursor.
*
* @param session the session
* @param min the smallest key
* @param max the largest key
* @return the cursor
*/
abstract Cursor find(Session session) throws SQLException;
abstract Cursor find(Session session, long min, long max) throws SQLException;
/**
* Get the key at this position.
......@@ -151,7 +153,7 @@ abstract class PageData extends Page {
*
* @return the last key
*/
abstract int getLastKey() throws SQLException;
abstract long getLastKey() throws SQLException;
/**
* Get the first child leaf page of a page.
......@@ -194,7 +196,7 @@ abstract class PageData extends Page {
* @param key the key
* @return the row
*/
abstract Row getRow(int key) throws SQLException;
abstract Row getRow(long key) throws SQLException;
/**
* Get the estimated memory size.
......
......@@ -9,7 +9,6 @@ package org.h2.index;
import java.lang.ref.SoftReference;
import java.sql.SQLException;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
......@@ -21,19 +20,18 @@ import org.h2.store.PageStore;
/**
* A leaf page that contains data of one or multiple rows.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-8: table id
* </li><li>9-10: entry count
* </li><li>with overflow: 11-14: the first overflow page id
* </li><li>11- or 15-: list of key / offset pairs (varLong key, 2 bytes offset)
* <ul><li>parent page id (0 for root): int
* </li><li>page type: byte
* </li><li>table id: varInt
* </li><li>column count: varInt
* </li><li>entry count: short
* </li><li>with overflow: the first overflow page id: int
* </li><li>list of key / offset pairs (key: varLong, offset: shortInt)
* </li><li>data
* </li></ul>
*/
public class PageDataLeaf extends PageData {
private static final int KEY_OFFSET_PAIR_START = 11;
/**
* The row offsets.
*/
......@@ -64,9 +62,27 @@ public class PageDataLeaf extends PageData {
*/
private int overflowRowSize;
PageDataLeaf(PageScanIndex index, int pageId, Data data) {
private int columnCount;
private PageDataLeaf(PageScanIndex index, int pageId, Data data) {
super(index, pageId, data);
start = KEY_OFFSET_PAIR_START;
}
/**
* Create a new page.
*
* @param index the index
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
static PageDataLeaf create(PageScanIndex index, int pageId, int parentPageId) {
PageDataLeaf p = new PageDataLeaf(index, pageId, index.getPageStore().createData());
p.parentPageId = parentPageId;
p.columnCount = index.getTable().getColumns().length;
p.writeHead();
p.start = p.data.length();
return p;
}
/**
......@@ -87,12 +103,13 @@ public class PageDataLeaf extends PageData {
data.reset();
this.parentPageId = data.readInt();
int type = data.readByte();
int tableId = data.readInt();
int tableId = data.readVarInt();
if (tableId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected table:" + index.getId() +
" got:" + tableId + " type:" + type);
}
columnCount = data.readVarInt();
entryCount = data.readShortInt();
offsets = new int[entryCount];
keys = new long[entryCount];
......@@ -107,18 +124,33 @@ public class PageDataLeaf extends PageData {
start = data.length();
}
private int getRowLength(Row row) throws SQLException {
int size = 0;
for (int i = 0; i < columnCount; i++) {
size += data.getValueLen(row.getValue(i));
}
return size;
}
int addRowTry(Row row) throws SQLException {
int rowLength = row.getByteCount(data);
int rowLength = getRowLength(row);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
int keyOffsetPairLen = 2 + data.getVarLongLen(row.getPos());
if (entryCount > 0 && last - rowLength < start + keyOffsetPairLen) {
// split at the insertion point to better fill pages
// split in half would be:
// if (entryCount > 1) {
// return entryCount / 2;
// }
return find(row.getPos());
int x = find(row.getPos());
if (entryCount > 1) {
if (entryCount < 5) {
// required, otherwise the index doesn't work correctly
return entryCount / 2;
}
// split near the insertion point to better fill pages
// split in half would be:
// return entryCount / 2;
int third = entryCount / 3;
return x < third ? third : x >= 2 * third ? 2 * third : x;
}
return x;
}
int offset = last - rowLength;
int[] newOffsets = new int[entryCount + 1];
......@@ -130,8 +162,8 @@ public class PageDataLeaf extends PageData {
} else {
readAllRows();
x = find(row.getPos());
if (SysProperties.CHECK && x < keys.length && keys[x] == row.getPos()) {
throw Message.throwInternalError("" + row.getPos());
if (x < keys.length && keys[x] == row.getPos()) {
throw index.getDuplicateKeyException();
}
System.arraycopy(offsets, 0, newOffsets, 0, x);
System.arraycopy(keys, 0, newKeys, 0, x);
......@@ -225,8 +257,9 @@ public class PageDataLeaf extends PageData {
rows = newRows;
}
Cursor find(Session session) {
return new PageScanCursor(session, this, 0, index.isMultiVersion);
Cursor find(Session session, long min, long max) {
int x = find(min);
return new PageScanCursor(session, this, x, max, index.isMultiVersion);
}
/**
......@@ -238,7 +271,10 @@ public class PageDataLeaf extends PageData {
Row getRowAt(int at) throws SQLException {
Row r = rows[at];
if (r == null) {
if (firstOverflowPageId != 0) {
if (firstOverflowPageId == 0) {
data.setPos(offsets[at]);
r = index.readRow(data, columnCount);
} else {
if (rowRef != null) {
r = rowRef.get();
if (r != null) {
......@@ -246,17 +282,19 @@ public class PageDataLeaf extends PageData {
}
}
PageStore store = index.getPageStore();
Data buff = store.createData();
int pageSize = store.getPageSize();
data.setPos(pageSize);
int offset = offsets[at];
buff.write(data.getBytes(), offset, pageSize - offset);
int next = firstOverflowPageId;
do {
PageDataOverflow page = index.getPageOverflow(next);
next = page.readInto(data);
next = page.readInto(buff);
} while (next != 0);
overflowRowSize = data.length();
overflowRowSize = pageSize + buff.length();
buff.setPos(0);
r = index.readRow(buff, columnCount);
}
data.setPos(offsets[at]);
r = index.readRow(data);
r.setPos((int) keys[at]);
if (firstOverflowPageId != 0) {
rowRef = new SoftReference<Row>(r);
......@@ -273,8 +311,7 @@ public class PageDataLeaf extends PageData {
PageData split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageDataLeaf p2 = new PageDataLeaf(index, newPageId, index.getPageStore().createData());
p2.parentPageId = parentPageId;
PageDataLeaf p2 = PageDataLeaf.create(index, newPageId, parentPageId);
for (int i = splitPoint; i < entryCount;) {
p2.addRowTry(getRowAt(splitPoint));
removeRow(splitPoint);
......@@ -282,7 +319,7 @@ public class PageDataLeaf extends PageData {
return p2;
}
int getLastKey() throws SQLException {
long getLastKey() throws SQLException {
// TODO re-use keys, but remove this mechanism
if (entryCount == 0) {
return 0;
......@@ -337,7 +374,7 @@ public class PageDataLeaf extends PageData {
}
}
Row getRow(int key) throws SQLException {
Row getRow(long key) throws SQLException {
int index = find(key);
return getRowAt(index);
}
......@@ -366,13 +403,7 @@ public class PageDataLeaf extends PageData {
}
}
private void write() throws SQLException {
if (written) {
return;
}
readAllRows();
data.reset();
data.checkCapacity(overflowRowSize);
private void writeHead() {
data.writeInt(parentPageId);
int type;
if (firstOverflowPageId == 0) {
......@@ -381,8 +412,19 @@ public class PageDataLeaf extends PageData {
type = Page.TYPE_DATA_LEAF;
}
data.writeByte((byte) type);
data.writeInt(index.getId());
data.writeVarInt(index.getId());
data.writeVarInt(columnCount);
data.writeShortInt(entryCount);
}
private void write() throws SQLException {
if (written) {
return;
}
readAllRows();
data.reset();
data.checkCapacity(overflowRowSize);
writeHead();
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
}
......@@ -392,7 +434,10 @@ public class PageDataLeaf extends PageData {
}
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
getRowAt(i).write(data);
Row r = getRowAt(i);
for (int j = 0; j < columnCount; j++) {
data.writeValue(r.getValue(j));
}
}
written = true;
}
......@@ -405,7 +450,7 @@ public class PageDataLeaf extends PageData {
public void moveTo(Session session, int newPos) throws SQLException {
PageStore store = index.getPageStore();
PageDataLeaf p2 = new PageDataLeaf(index, newPos, store.createData());
PageDataLeaf p2 = PageDataLeaf.create(index, newPos, parentPageId);
readAllRows();
p2.keys = keys;
p2.overflowRowSize = overflowRowSize;
......
......@@ -146,9 +146,10 @@ public class PageDataNode extends PageData {
}
}
Cursor find(Session session) throws SQLException {
int child = childPageIds[0];
return index.getPage(child, getPos()).find(session);
Cursor find(Session session, long min, long max) throws SQLException {
int x = find(min);
int child = childPageIds[x];
return index.getPage(child, getPos()).find(session, min, max);
}
PageData split(int splitPoint) throws SQLException {
......@@ -191,7 +192,7 @@ public class PageDataNode extends PageData {
check();
}
int getLastKey() throws SQLException {
long getLastKey() throws SQLException {
return index.getPage(childPageIds[entryCount], getPos()).getLastKey();
}
......@@ -250,7 +251,7 @@ public class PageDataNode extends PageData {
}
}
Row getRow(int key) throws SQLException {
Row getRow(long key) throws SQLException {
int at = find(key);
PageData page = index.getPage(childPageIds[at], getPos());
return page.getRow(key);
......
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.sql.SQLException;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.PageStore;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
/**
* An index that delegates indexing to the page data index.
*/
public class PageDelegateIndex extends PageIndex {
private final PageScanIndex mainIndex;
public PageDelegateIndex(TableData table, int id, String name, IndexType indexType, PageScanIndex mainIndex, int headPos, Session session) throws SQLException {
IndexColumn[] columns = IndexColumn.wrap(new Column[] { table.getColumn(mainIndex.getMainIndexColumn())});
this.initBaseIndex(table, id, name, columns, indexType);
this.mainIndex = mainIndex;
if (!database.isPersistent() || id < 0) {
throw Message.throwInternalError("" + name);
}
PageStore store = database.getPageStore();
store.addIndex(this);
if (headPos == Index.EMPTY_HEAD) {
store.addMeta(this, session);
}
}
public void add(Session session, Row row) {
// nothing to do
}
public boolean canFindNext() {
return false;
}
public boolean canGetFirstOrLast() {
return true;
}
public void close(Session session) {
// nothing to do
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
return mainIndex.find(session, first, last);
}
public Cursor findFirstOrLast(Session session, boolean first) throws SQLException {
return mainIndex.findFirstOrLast(session, first);
}
public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) {
throw Message.throwInternalError();
}
public int getColumnIndex(Column col) {
return mainIndex.getColumnIndex(col);
}
public double getCost(Session session, int[] masks) {
return 10 * getCostRangeIndex(masks, mainIndex.getRowCount(session));
}
public boolean needRebuild() {
return false;
}
public void remove(Session session, Row row) {
// nothing to do
}
public void remove(Session session) throws SQLException {
session.getDatabase().getPageStore().removeMeta(this, session);
}
public void truncate(Session session) {
// nothing to do
}
public void checkRename() {
// ok
}
public long getRowCount(Session session) {
return mainIndex.getRowCount(session);
}
public long getRowCountApproximation() {
return mainIndex.getRowCountApproximation();
}
}
......@@ -21,14 +21,16 @@ class PageScanCursor implements Cursor {
private PageDataLeaf current;
private int idx;
private final long max;
private Row row;
private final boolean multiVersion;
private final Session session;
private Iterator<Row> delta;
PageScanCursor(Session session, PageDataLeaf current, int idx, boolean multiVersion) {
PageScanCursor(Session session, PageDataLeaf current, int idx, long max, boolean multiVersion) {
this.current = current;
this.idx = idx;
this.max = max;
this.multiVersion = multiVersion;
this.session = session;
if (multiVersion) {
......@@ -50,7 +52,8 @@ class PageScanCursor implements Cursor {
public boolean next() throws SQLException {
if (!multiVersion) {
return nextRow();
nextRow();
return checkMax();
}
while (true) {
if (delta != null) {
......@@ -71,21 +74,34 @@ class PageScanCursor implements Cursor {
}
break;
}
return row != null;
return checkMax();
}
private boolean nextRow() throws SQLException {
private boolean checkMax() throws SQLException {
if (row != null) {
if (max != Long.MAX_VALUE) {
long x = current.index.getLong(row, Long.MAX_VALUE);
if (x > max) {
row = null;
return false;
}
}
return true;
}
return false;
}
private void nextRow() throws SQLException {
if (idx >= current.getEntryCount()) {
current = current.getNextPage();
idx = 0;
if (current == null) {
row = null;
return false;
return;
}
}
row = current.getRowAt(idx);
idx++;
return true;
}
public boolean previous() {
......
......@@ -27,6 +27,7 @@ import org.h2.util.MathUtils;
import org.h2.util.New;
import org.h2.value.Value;
import org.h2.value.ValueLob;
import org.h2.value.ValueNull;
/**
* The scan index allows to access a row by key. It can be used to iterate over
......@@ -37,11 +38,12 @@ public class PageScanIndex extends PageIndex implements RowIndex {
private PageStore store;
private TableData tableData;
private int lastKey;
private long lastKey;
private long rowCount;
private HashSet<Row> delta;
private int rowCountDiff;
private HashMap<Integer, Integer> sessionRowCount;
private int mainIndexColumn = -1;
public PageScanIndex(TableData table, int id, IndexColumn[] columns, IndexType indexType, int headPos, Session session) throws SQLException {
initBaseIndex(table, id, table.getName() + "_TABLE_SCAN", columns, indexType);
......@@ -63,11 +65,10 @@ public class PageScanIndex extends PageIndex implements RowIndex {
// it should not for new tables, otherwise redo of other operations
// must ensure this page is not used for other things
store.addMeta(this, session);
PageDataLeaf root = new PageDataLeaf(this, rootPageId, store.createData());
root.parentPageId = PageData.ROOT;
PageDataLeaf root = PageDataLeaf.create(this, rootPageId, PageData.ROOT);
store.updateRecord(root, true, root.data);
} else {
rootPageId = store.getRootPageId(this);
rootPageId = store.getRootPageId(id);
PageData root = getPage(rootPageId, 0);
lastKey = root.getLastKey();
rowCount = root.getRowCount();
......@@ -85,10 +86,14 @@ public class PageScanIndex extends PageIndex implements RowIndex {
}
public void add(Session session, Row row) throws SQLException {
if (row.getPos() == 0) {
row.setPos(++lastKey);
if (mainIndexColumn != -1) {
row.setPos(row.getValue(mainIndexColumn).getInt());
} else {
lastKey = Math.max(lastKey, row.getPos() + 1);
if (row.getPos() == 0) {
row.setPos((int) ++lastKey);
} else {
lastKey = Math.max(lastKey, row.getPos() + 1);
}
}
if (trace.isDebugEnabled()) {
trace.debug("add table:" + table.getId() + " " + row);
......@@ -166,9 +171,7 @@ public class PageScanIndex extends PageIndex implements RowIndex {
PageData getPage(int id, int parent) throws SQLException {
PageData p = (PageData) store.getPage(id);
if (p == null) {
Data data = store.createData();
PageDataLeaf empty = new PageDataLeaf(this, id, data);
empty.parentPageId = parent;
PageDataLeaf empty = PageDataLeaf.create(this, id, parent);
return empty;
}
if (p.index.rootPageId != rootPageId) {
......@@ -186,13 +189,42 @@ public class PageScanIndex extends PageIndex implements RowIndex {
return false;
}
/**
* Get the key from the row
*
* @param row the row
* @param ifEmpty the value to use if the row is empty
* @return the key
*/
long getLong(SearchRow row, long ifEmpty) throws SQLException {
if (row == null) {
return ifEmpty;
}
Value v = row.getValue(mainIndexColumn);
if (v == null || v == ValueNull.INSTANCE) {
return ifEmpty;
}
return v.getLong();
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
long min = getLong(first, Long.MIN_VALUE);
long max = getLong(last, Long.MAX_VALUE);
PageData root = getPage(rootPageId, 0);
return root.find(session);
return root.find(session, min, max);
}
public Cursor findFirstOrLast(Session session, boolean first) throws SQLException {
throw Message.getUnsupportedException("PAGE");
Cursor cursor;
PageData root = getPage(rootPageId, 0);
if (first) {
cursor = root.find(session, Long.MIN_VALUE, Long.MAX_VALUE);
} else {
long lastKey = root.getLastKey();
cursor = root.find(session, lastKey, lastKey);
}
cursor.next();
return cursor;
}
public double getCost(Session session, int[] masks) {
......@@ -273,8 +305,7 @@ public class PageScanIndex extends PageIndex implements RowIndex {
private void removeAllRows() throws SQLException {
PageData root = getPage(rootPageId, 0);
root.freeChildren();
root = new PageDataLeaf(this, rootPageId, store.createData());
root.parentPageId = PageData.ROOT;
root = PageDataLeaf.create(this, rootPageId, PageData.ROOT);
store.removeRecord(rootPageId);
store.updateRecord(root, true, null);
rowCount = 0;
......@@ -298,10 +329,15 @@ public class PageScanIndex extends PageIndex implements RowIndex {
* Read a row from the data page at the given position.
*
* @param data the data page
* @param columnCount the number of columns
* @return the row
*/
Row readRow(Data data) throws SQLException {
return tableData.readRow(data);
Row readRow(Data data, int columnCount) throws SQLException {
Value[] values = new Value[columnCount];
for (int i = 0; i < columnCount; i++) {
values[i] = data.readValue();
}
return tableData.createRow(values);
}
public long getRowCountApproximation() {
......@@ -324,8 +360,9 @@ public class PageScanIndex extends PageIndex implements RowIndex {
}
public int getColumnIndex(Column col) {
// the scan index cannot use any columns
// TODO it can if there is an INT primary key
if (col.getColumnId() == mainIndexColumn) {
return 0;
}
return -1;
}
......@@ -386,4 +423,12 @@ public class PageScanIndex extends PageIndex implements RowIndex {
store.addIndex(this);
}
public void setMainIndexColumn(int mainIndexColumn) {
this.mainIndexColumn = mainIndexColumn;
}
public int getMainIndexColumn() {
return mainIndexColumn;
}
}
......@@ -7,6 +7,7 @@
package org.h2.result;
import java.sql.SQLException;
import org.h2.command.ddl.CreateTableData;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.index.BtreeIndex;
......@@ -40,11 +41,16 @@ public class ResultTempTable implements ResultExternal {
Schema schema = session.getDatabase().getSchema(Constants.SCHEMA_MAIN);
Column column = new Column(COLUMN_NAME, Value.ARRAY);
column.setNullable(false);
ObjectArray<Column> columns = ObjectArray.newInstance();
columns.add(column);
int tableId = session.getDatabase().allocateObjectId(true, true);
String tableName = "TEMP_RESULT_SET_" + tableId;
table = schema.createTable(tableName, tableId, columns, true, false, true, false, Index.EMPTY_HEAD, session);
CreateTableData data = new CreateTableData();
data.columns.add(column);
data.id = session.getDatabase().allocateObjectId(true, true);
data.tableName = "TEMP_RESULT_SET_" + data.id;
data.temporary = true;
data.persistIndexes = false;
data.persistData = true;
data.headPos = Index.EMPTY_HEAD;
data.session = session;
table = schema.createTable(data);
session.addLocalTempTable(table);
int indexId = session.getDatabase().allocateObjectId(true, false);
IndexColumn indexColumn = new IndexColumn();
......@@ -54,9 +60,9 @@ public class ResultTempTable implements ResultExternal {
indexType = IndexType.createPrimaryKey(true, false);
IndexColumn[] indexCols = new IndexColumn[]{indexColumn};
if (session.getDatabase().isPageStoreEnabled()) {
index = new PageBtreeIndex(table, indexId, tableName, indexCols, indexType, Index.EMPTY_HEAD, session);
index = new PageBtreeIndex(table, indexId, data.tableName, indexCols, indexType, Index.EMPTY_HEAD, session);
} else {
index = new BtreeIndex(session, table, indexId, tableName, indexCols, indexType, Index.EMPTY_HEAD);
index = new BtreeIndex(session, table, indexId, data.tableName, indexCols, indexType, Index.EMPTY_HEAD);
}
index.setTemporary(true);
session.addLocalTempTableIndex(index);
......
......@@ -9,7 +9,7 @@ package org.h2.schema;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.HashSet;
import org.h2.command.ddl.CreateTableData;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.constraint.Constraint;
......@@ -21,7 +21,6 @@ import org.h2.engine.User;
import org.h2.index.Index;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.table.Column;
import org.h2.table.Table;
import org.h2.table.TableData;
import org.h2.table.TableLink;
......@@ -483,21 +482,14 @@ public class Schema extends DbObjectBase {
/**
* Add a table to the schema.
*
* @param tableName the table name
* @param id the object id
* @param columns the column list
* @param temporary whether this is a temporary table
* @param persistIndexes whether indexes of the table should be persistent
* @param persistData whether data of the table should be persistent
* @param clustered whether a clustered table should be created
* @param headPos the position (page number) of the head
* @param session the session
* @param data the create table information
* @return the created {@link TableData} object
*/
public TableData createTable(String tableName, int id, ObjectArray<Column> columns, boolean temporary, boolean persistIndexes, boolean persistData, boolean clustered, int headPos, Session session)
public TableData createTable(CreateTableData data)
throws SQLException {
synchronized (database) {
return new TableData(this, tableName, id, columns, temporary, persistIndexes, persistData, clustered, headPos, session);
data.schema = this;
return new TableData(data);
}
}
......
......@@ -883,7 +883,7 @@ public class Data extends DataPage {
*
* @param x the value
*/
private void writeVarInt(int x) {
public void writeVarInt(int x) {
while ((x & ~0x7f) != 0) {
data[pos++] = (byte) (0x80 | (x & 0x7f));
x >>>= 7;
......
......@@ -11,6 +11,7 @@ import java.io.OutputStream;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.zip.CRC32;
import org.h2.command.ddl.CreateTableData;
import org.h2.constant.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Database;
......@@ -24,6 +25,7 @@ import org.h2.index.PageBtreeNode;
import org.h2.index.PageDataLeaf;
import org.h2.index.PageDataNode;
import org.h2.index.PageDataOverflow;
import org.h2.index.PageDelegateIndex;
import org.h2.index.PageIndex;
import org.h2.index.PageScanIndex;
import org.h2.log.InDoubtTransaction;
......@@ -77,8 +79,13 @@ import org.h2.value.ValueString;
*/
public class PageStore implements CacheWriter {
// TODO can not use delegating index when starting if it was created later
// TODO re-use deleted keys; specially if the primary key is removed
// TODO table row: number of columns should be varInt not int
// TODO implement checksum; 0 for empty pages
// TODO in log, don't store empty space
// TODO in log, don't store empty space between page head and page data
// TODO long primary keys don't use delegating index yet (setPos(): int)
// TODO utf-x: test if it's faster
// TODO after opening the database, delay writing until required
......@@ -111,7 +118,7 @@ public class PageStore implements CacheWriter {
// TODO check for file size (exception if not exact size expected)
// TODO implement missing code for STORE_BTREE_ROWCOUNT (maybe enable)
// TODO store dates differently in Data; test moving db to another timezone
// TODO online backup using bsdif
// TODO online backup using bsdiff
// TODO when removing DiskFile:
// remove CacheObject.blockCount
......@@ -452,7 +459,7 @@ public class PageStore implements CacheWriter {
p = PageFreeList.read(this, data, pageId);
break;
case Page.TYPE_DATA_LEAF: {
int indexId = data.readInt();
int indexId = data.readVarInt();
PageScanIndex index = (PageScanIndex) metaObjects.get(indexId);
if (index == null) {
Message.throwInternalError("index not found " + indexId);
......@@ -479,7 +486,7 @@ public class PageStore implements CacheWriter {
break;
}
case Page.TYPE_BTREE_LEAF: {
int indexId = data.readInt();
int indexId = data.readVarInt();
PageBtreeIndex index = (PageBtreeIndex) metaObjects.get(indexId);
if (index == null) {
Message.throwInternalError("index not found " + indexId);
......@@ -784,7 +791,7 @@ public class PageStore implements CacheWriter {
increaseFileSize(INCREMENT_PAGES);
}
if (trace.isDebugEnabled()) {
trace.debug("allocatePage " + pos);
// trace.debug("allocatePage " + pos);
}
return pos;
}
......@@ -807,7 +814,7 @@ public class PageStore implements CacheWriter {
*/
public void freePage(int pageId, boolean logUndo, Data old) throws SQLException {
if (trace.isDebugEnabled()) {
trace.debug("freePage " + pageId);
// trace.debug("freePage " + pageId);
}
synchronized (database) {
cache.remove(pageId);
......@@ -900,6 +907,9 @@ public class PageStore implements CacheWriter {
* @param data the data
*/
public void writePage(int pageId, Data data) throws SQLException {
if (pageId <= 0) {
Message.throwInternalError("write to page " + pageId);
}
synchronized (database) {
file.seek((long) pageId << pageSizeShift);
file.write(data.getBytes(), 0, pageSize);
......@@ -1080,7 +1090,8 @@ public class PageStore implements CacheWriter {
}
private void openMetaIndex() throws SQLException {
ObjectArray<Column> cols = ObjectArray.newInstance();
CreateTableData data = new CreateTableData();
ObjectArray<Column> cols = data.columns;
cols.add(new Column("ID", Value.INT));
cols.add(new Column("TYPE", Value.INT));
cols.add(new Column("PARENT", Value.INT));
......@@ -1088,8 +1099,15 @@ public class PageStore implements CacheWriter {
cols.add(new Column("OPTIONS", Value.STRING));
cols.add(new Column("COLUMNS", Value.STRING));
metaSchema = new Schema(database, 0, "", null, true);
metaTable = new TableData(metaSchema, "PAGE_INDEX",
META_TABLE_ID, cols, false, true, true, false, 0, systemSession);
data.schema = metaSchema;
data.tableName = "PAGE_INDEX";
data.id = META_TABLE_ID;
data.temporary = false;
data.persistData = true;
data.persistIndexes = true;
data.headPos = 0;
data.session = systemSession;
metaTable = new TableData(data);
metaIndex = (PageScanIndex) metaTable.getScanIndex(
systemSession);
metaObjects.clear();
......@@ -1115,6 +1133,8 @@ public class PageStore implements CacheWriter {
} else {
index.getSchema().remove(index);
}
} else if (index instanceof PageDelegateIndex) {
index.getSchema().remove(index);
}
index.remove(systemSession);
if (reservedPages != null && reservedPages.containsKey(headPos)) {
......@@ -1134,25 +1154,31 @@ public class PageStore implements CacheWriter {
String options = row.getValue(4).getString();
String columnList = row.getValue(5).getString();
String[] columns = StringUtils.arraySplit(columnList, ',', false);
IndexType indexType = IndexType.createNonUnique(true);
String[] ops = StringUtils.arraySplit(options, ',', false);
Index meta;
if (trace.isDebugEnabled()) {
trace.debug("addMeta id=" + id + " type=" + type + " parent=" + parent + " columns=" + columnList);
}
if (redo) {
if (redo && rootPageId != 0) {
writePage(rootPageId, createData());
allocatePage(rootPageId);
}
metaRootPageId.put(id, rootPageId);
if (type == META_TYPE_SCAN_INDEX) {
ObjectArray<Column> columnArray = ObjectArray.newInstance();
CreateTableData data = new CreateTableData();
for (int i = 0; i < columns.length; i++) {
Column col = new Column("C" + i, Value.INT);
columnArray.add(col);
data.columns.add(col);
}
String[] ops = StringUtils.arraySplit(options, ',', true);
boolean temp = ops.length == 3 && ops[2].equals("temp");
TableData table = new TableData(metaSchema, "T" + id, id, columnArray, temp, true, true, false, 0, session);
data.schema = metaSchema;
data.tableName = "T" + id;
data.id = id;
data.temporary = ops[2].equals("temp");
data.persistData = true;
data.persistIndexes = true;
data.headPos = 0;
data.session = session;
TableData table = new TableData(data);
CompareMode mode = CompareMode.getInstance(ops[0], Integer.parseInt(ops[1]));
table.setCompareMode(mode);
meta = table.getScanIndex(session);
......@@ -1177,6 +1203,16 @@ public class PageStore implements CacheWriter {
ic.column = column;
cols[i] = ic;
}
IndexType indexType;
if (ops[3].equals("d")) {
indexType = IndexType.createPrimaryKey(true, false);
Column[] tableColumns = table.getColumns();
for (int i = 0; i < cols.length; i++) {
tableColumns[cols[i].column.getColumnId()].setNullable(false);
}
} else {
indexType = IndexType.createNonUnique(true);
}
meta = table.addIndex(session, "I" + id, id, cols, indexType, id, null);
}
metaObjects.put(id, meta);
......@@ -1214,9 +1250,13 @@ public class PageStore implements CacheWriter {
String columnList = buff.toString();
Table table = index.getTable();
CompareMode mode = table.getCompareMode();
String options = mode.getName()+ "," + mode.getStrength();
String options = mode.getName()+ "," + mode.getStrength() + ",";
if (table.isTemporary()) {
options += ",temp";
options += "temp";
}
options += ",";
if (index instanceof PageDelegateIndex) {
options += "d";
}
Row row = metaTable.getTemplateRow();
row.setValue(0, ValueInt.get(index.getId()));
......@@ -1345,11 +1385,11 @@ public class PageStore implements CacheWriter {
/**
* Get the root page of an index.
*
* @param index the index
* @param indexId the index id
* @return the root page
*/
public int getRootPageId(PageIndex index) {
return metaRootPageId.get(index.getId());
public int getRootPageId(int indexId) {
return metaRootPageId.get(indexId);
}
// TODO implement checksum
......
......@@ -869,10 +869,6 @@ public abstract class Table extends SchemaObjectBase {
this.onCommitTruncate = onCommitTruncate;
}
boolean getClustered() {
return false;
}
/**
* If the index is still required by a constraint, transfer the ownership to
* it. Otherwise, the index is removed.
......
......@@ -12,6 +12,7 @@ import java.util.HashSet;
import java.util.Set;
import org.h2.api.DatabaseEventListener;
import org.h2.command.ddl.CreateTableData;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.constraint.Constraint;
......@@ -27,6 +28,7 @@ import org.h2.index.IndexType;
import org.h2.index.MultiVersionIndex;
import org.h2.index.NonUniqueHashIndex;
import org.h2.index.PageBtreeIndex;
import org.h2.index.PageDelegateIndex;
import org.h2.index.PageScanIndex;
import org.h2.index.RowIndex;
import org.h2.index.ScanIndex;
......@@ -34,7 +36,7 @@ import org.h2.index.TreeIndex;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.result.Row;
import org.h2.schema.Schema;
import org.h2.result.SortOrder;
import org.h2.schema.SchemaObject;
import org.h2.store.DataPage;
import org.h2.store.PageStore;
......@@ -55,7 +57,6 @@ import org.h2.value.Value;
* indexes. There is at least one index, the scan index.
*/
public class TableData extends Table implements RecordReader {
private final boolean clustered;
private RowIndex scanIndex;
private long rowCount;
private Session lockExclusive;
......@@ -65,23 +66,21 @@ public class TableData extends Table implements RecordReader {
private final ObjectArray<Index> indexes = ObjectArray.newInstance();
private long lastModificationId;
private boolean containsLargeObject;
private PageScanIndex mainIndex;
public TableData(Schema schema, String tableName, int id, ObjectArray<Column> columns,
boolean temporary, boolean persistIndexes, boolean persistData, boolean clustered, int headPos, Session session) throws SQLException {
super(schema, id, tableName, persistIndexes, persistData);
Column[] cols = new Column[columns.size()];
columns.toArray(cols);
public TableData(CreateTableData data) throws SQLException {
super(data.schema, data.id, data.tableName, data.persistIndexes, data.persistData);
Column[] cols = new Column[data.columns.size()];
data.columns.toArray(cols);
setColumns(cols);
setTemporary(temporary);
this.clustered = clustered;
if (!clustered) {
if (database.isPageStoreEnabled() && persistData && database.isPersistent()) {
scanIndex = new PageScanIndex(this, id, IndexColumn.wrap(cols), IndexType.createScan(persistData), headPos, session);
} else {
scanIndex = new ScanIndex(this, id, IndexColumn.wrap(cols), IndexType.createScan(persistData));
}
indexes.add(scanIndex);
setTemporary(data.temporary);
if (database.isPageStoreEnabled() && data.persistData && database.isPersistent()) {
mainIndex = new PageScanIndex(this, data.id, IndexColumn.wrap(cols), IndexType.createScan(data.persistData), data.headPos, data.session);
scanIndex = mainIndex;
} else {
scanIndex = new ScanIndex(this, data.id, IndexColumn.wrap(cols), IndexType.createScan(data.persistData));
}
indexes.add(scanIndex);
for (Column col : cols) {
if (DataType.isLargeObject(col.getType())) {
containsLargeObject = true;
......@@ -188,7 +187,20 @@ public class TableData extends Table implements RecordReader {
Index index;
if (isPersistIndexes() && indexType.isPersistent()) {
if (database.isPageStoreEnabled()) {
index = new PageBtreeIndex(this, indexId, indexName, cols, indexType, headPos, session);
int mainIndexColumn;
if (database.isStarting() && database.getPageStore().getRootPageId(indexId) != 0) {
mainIndexColumn = -1;
} else if (!database.isStarting() && mainIndex.getRowCount(session) != 0) {
mainIndexColumn = -1;
} else {
mainIndexColumn = getMainIndexColumn(indexType, cols);
}
if (mainIndexColumn != -1) {
mainIndex.setMainIndexColumn(mainIndexColumn);
index = new PageDelegateIndex(this, indexId, indexName, indexType, mainIndex, headPos, session);
} else {
index = new PageBtreeIndex(this, indexId, indexName, cols, indexType, headPos, session);
}
} else {
index = new BtreeIndex(session, this, indexId, indexName, cols, indexType, headPos);
}
......@@ -272,6 +284,30 @@ public class TableData extends Table implements RecordReader {
return index;
}
private int getMainIndexColumn(IndexType indexType, IndexColumn[] cols) {
if (mainIndex.getMainIndexColumn() != -1) {
return -1;
}
if (!indexType.isPrimaryKey() || cols.length != 1) {
return -1;
}
IndexColumn first = cols[0];
if (first.sortType != SortOrder.ASCENDING) {
return -1;
}
switch(first.column.getType()) {
case Value.BYTE:
case Value.SHORT:
case Value.INT:
int todoPosIsInt;
// case Value.LONG:
break;
default:
return -1;
}
return first.column.getColumnId();
}
public boolean canGetRowCount() {
return true;
}
......@@ -625,8 +661,17 @@ public class TableData extends Table implements RecordReader {
for (int i = 0; i < len; i++) {
data[i] = s.readValue();
}
Row row = new Row(data, memoryPerRow);
return row;
return createRow(data);
}
/**
* Create a row from the values.
*
* @param data the value list
* @return the row
*/
public Row createRow(Value[] data) {
return new Row(data, memoryPerRow);
}
/**
......@@ -706,10 +751,6 @@ public class TableData extends Table implements RecordReader {
return lastModificationId;
}
boolean getClustered() {
return clustered;
}
public boolean getContainsLargeObject() {
return containsLargeObject;
}
......
......@@ -338,14 +338,7 @@ public class TableFilter implements ColumnResolver {
*/
public Row get() throws SQLException {
if (current == null && currentSearchRow != null) {
if (table.getClustered()) {
current = table.getTemplateRow();
for (int i = 0; i < currentSearchRow.getColumnCount(); i++) {
current.setValue(i, currentSearchRow.getValue(i));
}
} else {
current = cursor.get();
}
current = cursor.get();
}
return current;
}
......
......@@ -810,10 +810,11 @@ public class Recover extends Tool implements DataHandler {
// type 1
case Page.TYPE_DATA_LEAF: {
pageTypeCount[type]++;
setStorage(s.readInt());
setStorage(s.readVarInt());
int columnCount = s.readVarInt();
int entries = s.readShortInt();
writer.println("-- page " + page + ": data leaf " + (last ? "(last)" : "") + " table: " + storageId + " entries: " + entries);
dumpPageDataLeaf(store, pageSize, writer, s, last, page, entries);
writer.println("-- page " + page + ": data leaf " + (last ? "(last)" : "") + " table: " + storageId + " entries: " + entries + " columns: " + columnCount);
dumpPageDataLeaf(store, pageSize, writer, s, last, page, columnCount, entries);
break;
}
// type 2
......@@ -832,7 +833,7 @@ public class Recover extends Tool implements DataHandler {
// type 4
case Page.TYPE_BTREE_LEAF: {
pageTypeCount[type]++;
setStorage(s.readInt());
setStorage(s.readVarInt());
int entries = s.readShortInt();
writer.println("-- page " + page + ": b-tree leaf " + (last ? "(last)" : "") + " table: " + storageId + " entries: " + entries);
if (trace) {
......@@ -1167,7 +1168,7 @@ public class Recover extends Tool implements DataHandler {
}
}
private void dumpPageDataLeaf(FileStore store, int pageSize, PrintWriter writer, Data s, boolean last, long pageId, int entryCount) throws SQLException {
private void dumpPageDataLeaf(FileStore store, int pageSize, PrintWriter writer, Data s, boolean last, long pageId, int columnCount, int entryCount) throws SQLException {
long[] keys = new long[entryCount];
int[] offsets = new int[entryCount];
long next = 0;
......@@ -1194,10 +1195,12 @@ public class Recover extends Tool implements DataHandler {
store.readFully(s2.getBytes(), 0, pageSize);
s2.setPos(4);
int type = s2.readByte();
int indexId = s2.readInt();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
int size = s2.readShortInt();
writer.println("-- chain: " + next + " type: " + type + " size: " + size);
s.write(s2.getBytes(), 7, size);
s.checkCapacity(size);
s.write(s2.getBytes(), s2.length(), size);
break;
} else if (type == Page.TYPE_DATA_OVERFLOW) {
next = s2.readInt();
......@@ -1205,9 +1208,10 @@ public class Recover extends Tool implements DataHandler {
writeDataError(writer, "next:0", s2.getBytes(), 1);
break;
}
int size = pageSize - 9;
int size = pageSize - s2.length();
writer.println("-- chain: " + next + " type: " + type + " size: " + size + " next: " + next);
s.write(s2.getBytes(), 9, size);
s.checkCapacity(size);
s.write(s2.getBytes(), s2.length(), size);
} else {
writeDataError(writer, "type: " + type, s2.getBytes(), 1);
break;
......@@ -1219,7 +1223,7 @@ public class Recover extends Tool implements DataHandler {
int off = offsets[i];
writer.println("-- [" + i + "] storage: " + storageId + " key: " + key + " off: " + off);
s.setPos(off);
Value[] data = createRecord(writer, s);
Value[] data = createRecord(writer, s, columnCount);
if (data != null) {
createTemporaryTable(writer);
writeRow(writer, s, data);
......@@ -1259,14 +1263,18 @@ public class Recover extends Tool implements DataHandler {
}
private Value[] createRecord(PrintWriter writer, DataPage s) {
recordLength = s.readInt();
if (recordLength <= 0) {
writeDataError(writer, "recordLength<0", s.getBytes(), blockCount);
return createRecord(writer, s, s.readInt());
}
private Value[] createRecord(PrintWriter writer, DataPage s, int columnCount) {
recordLength = columnCount;
if (columnCount <= 0) {
writeDataError(writer, "columnCount<0", s.getBytes(), blockCount);
return null;
}
Value[] data;
try {
data = new Value[recordLength];
data = new Value[columnCount];
} catch (OutOfMemoryError e) {
writeDataError(writer, "out of memory", s.getBytes(), blockCount);
return null;
......
......@@ -135,6 +135,8 @@ import org.h2.test.unit.TestValueHashMap;
import org.h2.test.unit.TestValueMemory;
import org.h2.test.utils.OutputCatcher;
import org.h2.tools.DeleteDbFiles;
import org.h2.tools.Recover;
import org.h2.tools.RunScript;
import org.h2.tools.Server;
import org.h2.util.MemoryUtils;
import org.h2.util.StringUtils;
......@@ -293,6 +295,7 @@ java org.h2.test.TestAll timer
System.setProperty("h2.maxMemoryRowsDistinct", "128");
System.setProperty("h2.check2", "true");
int testRecoverToolProcessLog;
/*
System.setProperty("h2.optimizeInList", "true");
......@@ -345,16 +348,19 @@ kill -9 `jps -l | grep "org.h2.test." | cut -d " " -f 1`
new TestTimer().runTest(test);
}
} else {
// System.setProperty(SysProperties.H2_PAGE_STORE, "true");
// test.pageStore = true;
// test.runTests();
// TestPerformance.main("-init", "-db", "1");
// Recover.execute("data", null);
System.setProperty(SysProperties.H2_PAGE_STORE, "false");
test.pageStore = false;
System.setProperty(SysProperties.H2_PAGE_STORE, "true");
test.pageStore = true;
test.runTests();
TestPerformance.main("-init", "-db", "1");
Recover.execute("data", null);
RunScript.execute("jdbc:h2:data/test2", "sa1", "sa1", "data/test.h2.sql", null, false);
Recover.execute("data", null);
// System.setProperty(SysProperties.H2_PAGE_STORE, "false");
// test.pageStore = false;
// test.runTests();
// TestPerformance.main("-init", "-db", "1");
}
System.out.println(TestBase.formatTime(System.currentTimeMillis() - time) + " total");
}
......
......@@ -61,6 +61,7 @@ public class TestBackup extends TestBase {
private void testBackup() throws SQLException {
deleteDb("backup");
deleteDb("restored");
Connection conn1, conn2, conn3;
Statement stat1, stat2, stat3;
conn1 = getConnection("backup");
......
......@@ -126,8 +126,12 @@ public class TestBigResult extends TestBase {
Connection conn = getConnection("bigResult");
Statement stat = conn.createStatement();
stat.execute("DROP TABLE IF EXISTS TEST");
stat.execute("CREATE TABLE TEST(" + "ID INT PRIMARY KEY, " + "Name VARCHAR(255), " + "FirstName VARCHAR(255), "
+ "Points INT," + "LicenseID INT)");
stat.execute("CREATE TABLE TEST(" +
"ID INT PRIMARY KEY, " +
"Name VARCHAR(255), " +
"FirstName VARCHAR(255), " +
"Points INT," +
"LicenseID INT)");
int len = getSize(10, 5000);
PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?, ?, ?, ?)");
for (int i = 0; i < len; i++) {
......
......@@ -32,6 +32,7 @@ public class TestPageStore extends TestBase {
}
public void test() throws Exception {
testCreatePkLater();
testTruncate();
testLargeIndex();
testUniqueIndex();
......@@ -39,6 +40,25 @@ public class TestPageStore extends TestBase {
testFuzzOperations();
}
private void testCreatePkLater() throws SQLException {
if (config.memory) {
return;
}
deleteDb("pageStore");
Connection conn;
Statement stat;
conn = getConnection("pageStore");
stat = conn.createStatement();
stat.execute("create table test(id int not null) as select 100");
stat.execute("create primary key on test(id)");
conn.close();
conn = getConnection("pageStore");
stat = conn.createStatement();
ResultSet rs = stat.executeQuery("select * from test where id = 100");
assertTrue(rs.next());
conn.close();
}
private void testTruncate() throws SQLException {
if (config.memory) {
return;
......
......@@ -610,4 +610,4 @@ lrem lstore monitorexit lmul monitorenter fadd interpreting ishl istore dcmpg
daload dstore saload anewarray tableswitch lushr ladd lshr lreturn acmpne
locals multianewarray icmpne fneg faload ifeq decompiler zeroes forgot
modern slight boost characteristics significantly gae vfs centrally ten
approach risky getters
\ No newline at end of file
approach risky getters suxxess gmb delegate delegating delegates
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论