提交 45d8571a authored 作者: Thomas Mueller's avatar Thomas Mueller

Page store format changes.

上级 79106678
......@@ -96,8 +96,7 @@ public class PageBtreeIndex extends PageIndex {
page1.setPageId(id);
page1.setParentPageId(rootPageId);
page2.setParentPageId(rootPageId);
PageBtreeNode newRoot = new PageBtreeNode(this, rootPageId, store.createData());
newRoot.parentPageId = PageBtree.ROOT;
PageBtreeNode newRoot = PageBtreeNode.create(this, rootPageId, PageBtree.ROOT);
newRoot.init(page1, pivot, page2);
store.updateRecord(page1, true, page1.data);
store.updateRecord(page2, true, page2.data);
......
......@@ -24,8 +24,8 @@ import org.h2.store.PageStore;
* </li><li>page type: byte
* </li><li>index id: varInt
* </li><li>entry count: short
* </li><li>list of offsets: shortInt
* </li><li>data (pos: varLong, value,...)
* </li><li>list of offsets: short
* </li><li>data (key: varLong, value,...)
* </li></ul>
*/
public class PageBtreeLeaf extends PageBtree {
......@@ -37,32 +37,32 @@ public class PageBtreeLeaf extends PageBtree {
}
/**
* Create a new page.
* Read a b-tree leaf page.
*
* @param index the index
* @param data the data
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
static PageBtreeLeaf create(PageBtreeIndex index, int pageId, int parentPageId) {
PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, index.getPageStore().createData());
p.parentPageId = parentPageId;
p.writeHead();
p.start = p.data.length();
public static Page read(PageBtreeIndex index, Data data, int pageId) throws SQLException {
PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, data);
p.read();
return p;
}
/**
* Read a b-tree leaf page.
* Create a new page.
*
* @param index the index
* @param data the data
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
public static Page read(PageBtreeIndex index, Data data, int pageId) throws SQLException {
PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, data);
p.read();
static PageBtreeLeaf create(PageBtreeIndex index, int pageId, int parentPageId) {
PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, index.getPageStore().createData());
p.parentPageId = parentPageId;
p.writeHead();
p.start = p.data.length();
return p;
}
......
......@@ -19,24 +19,21 @@ import org.h2.store.PageStore;
import org.h2.util.MemoryUtils;
/**
* A b-tree node page that contains index data. Data is organized as follows:
* [leaf 0] (largest value of leaf 0) [leaf 1] Format:
* <ul>
* <li>0-3: parent page id</li>
* <li>4-4: page type</li>
* <li>5-8: index id</li>
* <li>9-10: entry count</li>
* <li>11-14: row count of all children (-1 if not known)</li>
* <li>15-18: rightmost child page id</li>
* <li>19- entries: leaf page id: int, offset: short</li>
* </ul>
* The row is the largest row of the respective child, meaning
* row[0] is the largest row of child[0].
* A b-tree node page that contains index data.
* Format:
* <ul><li>parent page id (0 for root): int
* </li><li>page type: byte
* </li><li>index id: varInt
* </li><li>count of all children (-1 if not known): int
* </li><li>entry count: short
* </li><li>rightmost child page id: int
* </li><li>entries (child page id: int, offset: short)
* The row contains the largest key of the respective child, meaning
* row[0] contains the largest key of child[0].
*/
public class PageBtreeNode extends PageBtree {
private static final int CHILD_OFFSET_PAIR_LENGTH = 6;
private static final int CHILD_OFFSET_PAIR_START = 19;
/**
* The page ids of the children.
......@@ -47,9 +44,8 @@ public class PageBtreeNode extends PageBtree {
private int rowCount = UNKNOWN_ROWCOUNT;
PageBtreeNode(PageBtreeIndex index, int pageId, Data data) {
private PageBtreeNode(PageBtreeIndex index, int pageId, Data data) {
super(index, pageId, data);
start = CHILD_OFFSET_PAIR_START;
}
/**
......@@ -66,19 +62,36 @@ public class PageBtreeNode extends PageBtree {
return p;
}
/**
* Create a new b-tree node page.
*
* @param index the index
* @param data the data
* @param pageId the page id
* @return the page
*/
public static PageBtreeNode create(PageBtreeIndex index, int pageId, int parentPageId) {
PageBtreeNode p = new PageBtreeNode(index, pageId, index.getPageStore().createData());
p.parentPageId = parentPageId;
p.writeHead();
// 4 bytes for the rightmost child page id
p.start = p.data.length() + 4;
return p;
}
private void read() throws SQLException {
data.reset();
this.parentPageId = data.readInt();
int type = data.readByte();
onlyPosition = (type & Page.FLAG_LAST) == 0;
int indexId = data.readInt();
int indexId = data.readVarInt();
if (indexId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected index:" + index.getId() +
"got:" + indexId);
}
entryCount = data.readShortInt();
rowCount = data.readInt();
entryCount = data.readShortInt();
if (!PageStore.STORE_BTREE_ROWCOUNT) {
rowCount = UNKNOWN_ROWCOUNT;
}
......@@ -356,17 +369,21 @@ public class PageBtreeNode extends PageBtree {
index.getPageStore().writePage(getPos(), data);
}
private void writeHead() {
data.writeInt(parentPageId);
data.writeByte((byte) (Page.TYPE_BTREE_NODE | (onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeVarInt(index.getId());
data.writeInt(rowCount);
data.writeShortInt(entryCount);
}
private void write() throws SQLException {
if (written) {
return;
}
readAllRows();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) (Page.TYPE_BTREE_NODE | (onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeInt(index.getId());
data.writeShortInt(entryCount);
data.writeInt(rowCount);
writeHead();
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
......
......@@ -114,9 +114,10 @@ abstract class PageData extends Page {
* @param session the session
* @param min the smallest key
* @param max the largest key
* @param multiVersion if the delta should be used
* @return the cursor
*/
abstract Cursor find(Session session, long min, long max) throws SQLException;
abstract Cursor find(Session session, long min, long max, boolean multiVersion) throws SQLException;
/**
* Get the key at this position.
......
......@@ -258,9 +258,9 @@ public class PageDataLeaf extends PageData {
rows = newRows;
}
Cursor find(Session session, long min, long max) {
Cursor find(Session session, long min, long max, boolean multiVersion) {
int x = find(min);
return new PageScanCursor(session, this, x, max, index.isMultiVersion);
return new PageScanCursor(session, this, x, max, multiVersion);
}
/**
......@@ -447,7 +447,7 @@ public class PageDataLeaf extends PageData {
return "page[" + getPos() + "] data leaf table:" + index.getId() +
" entries:" + entryCount + " parent:" + parentPageId +
(firstOverflowPageId == 0 ? "" : " overflow:" + firstOverflowPageId) +
" keys:" + Arrays.toString(keys) + " offsets:" + Arrays.toString(offsets);
" keys:" + Arrays.toString(keys) + " offsets:" + Arrays.toString(offsets);
}
public void moveTo(Session session, int newPos) throws SQLException {
......
......@@ -21,21 +21,19 @@ import org.h2.util.MemoryUtils;
/**
* A leaf page that contains data of one or multiple rows.
* Format:
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
* </li><li>5-8: index id
* </li><li>9-10: entry count
* </li><li>11-14: row count of all children (-1 if not known)
* </li><li>15-18: rightmost child page id
* </li><li>19- entries: 4 bytes leaf page id, varLong key
* <ul><li>parent page id (0 for root): int
* </li><li>page type: byte
* </li><li>table id: varInt
* </li><li>count of all children (-1 if not known): int
* </li><li>entry count: short
* </li><li>rightmost child page id: int
* </li><li>entries (key: varLong, child page id: int)
* </li></ul>
* The key is the largest key of the respective child, meaning
* key[0] is the largest key of child[0].
*/
public class PageDataNode extends PageData {
private static final int ENTRY_START = 19;
/**
* The page ids of the children.
*/
......@@ -48,12 +46,29 @@ public class PageDataNode extends PageData {
/**
* The number of bytes used in the page
*/
private int length = ENTRY_START;
private int length;
PageDataNode(PageScanIndex index, int pageId, Data data) {
private PageDataNode(PageScanIndex index, int pageId, Data data) {
super(index, pageId, data);
}
/**
* Create a new page.
*
* @param index the index
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
static PageDataNode create(PageScanIndex index, int pageId, int parentPageId) {
PageDataNode p = new PageDataNode(index, pageId, index.getPageStore().createData());
p.parentPageId = parentPageId;
p.writeHead();
// 4 bytes for the rightmost child page id
p.length = p.data.length() + 4;
return p;
}
/**
* Read a data node page.
*
......@@ -72,14 +87,14 @@ public class PageDataNode extends PageData {
data.reset();
this.parentPageId = data.readInt();
data.readByte();
int indexId = data.readInt();
int indexId = data.readVarInt();
if (indexId != index.getId()) {
throw Message.getSQLException(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected index:" + index.getId() +
"got:" + indexId);
}
entryCount = data.readShortInt();
rowCount = rowCountStored = data.readInt();
entryCount = data.readShortInt();
childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt();
keys = MemoryUtils.newLongArray(entryCount);
......@@ -146,16 +161,15 @@ public class PageDataNode extends PageData {
}
}
Cursor find(Session session, long min, long max) throws SQLException {
Cursor find(Session session, long min, long max, boolean multiVersion) throws SQLException {
int x = find(min);
int child = childPageIds[x];
return index.getPage(child, getPos()).find(session, min, max);
return index.getPage(child, getPos()).find(session, min, max, multiVersion);
}
PageData split(int splitPoint) throws SQLException {
int newPageId = index.getPageStore().allocatePage();
PageDataNode p2 = new PageDataNode(index, newPageId, index.getPageStore().createData());
p2.parentPageId = parentPageId;
PageDataNode p2 = PageDataNode.create(index, newPageId, parentPageId);
int firstChild = childPageIds[splitPoint];
for (int i = splitPoint; i < entryCount;) {
p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], keys[splitPoint]);
......@@ -297,17 +311,21 @@ public class PageDataNode extends PageData {
index.getPageStore().writePage(getPos(), data);
}
private void writeHead() {
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_DATA_NODE);
data.writeVarInt(index.getId());
data.writeInt(rowCountStored);
data.writeShortInt(entryCount);
}
private void write() {
if (written) {
return;
}
check();
data.reset();
data.writeInt(parentPageId);
data.writeByte((byte) Page.TYPE_DATA_NODE);
data.writeInt(index.getId());
data.writeShortInt(entryCount);
data.writeInt(rowCountStored);
writeHead();
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
......@@ -345,13 +363,12 @@ public class PageDataNode extends PageData {
public void moveTo(Session session, int newPos) throws SQLException {
PageStore store = index.getPageStore();
PageDataNode p2 = new PageDataNode(index, newPos, store.createData());
PageDataNode p2 = PageDataNode.create(index, newPos, parentPageId);
p2.rowCountStored = rowCountStored;
p2.rowCount = rowCount;
p2.childPageIds = childPageIds;
p2.keys = keys;
p2.entryCount = entryCount;
p2.parentPageId = parentPageId;
p2.length = length;
store.updateRecord(p2, false, null);
if (parentPageId == ROOT) {
......
......@@ -18,11 +18,11 @@ import org.h2.store.PageStore;
/**
* Overflow data for a leaf page.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-8: index id
* </li><li>if there is more data: 9-12: next overflow page id
* </li><li>otherwise: 9-10: remaining size
* <ul><li>0-3: parent page id (0 for root): int
* </li><li>4-4: page type: byte
* </li><li>5-8: table id: int
* </li><li>9-12: if more data: next overflow page id: int
* </li><li>9-10: else remaining size: short
* </li><li>data
* </li></ul>
*/
......
......@@ -54,11 +54,21 @@ public class PageDelegateIndex extends PageIndex {
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
return mainIndex.find(session, first, last);
long min = mainIndex.getLong(first, Long.MIN_VALUE);
long max = mainIndex.getLong(last, Long.MAX_VALUE);
return mainIndex.find(session, min, max, false);
}
public Cursor findFirstOrLast(Session session, boolean first) throws SQLException {
return mainIndex.findFirstOrLast(session, first);
Cursor cursor;
if (first) {
cursor = mainIndex.find(session, Long.MIN_VALUE, Long.MAX_VALUE, false);
} else {
long x = mainIndex.getLastKey();
cursor = mainIndex.find(session, x, x, false);
}
cursor.next();
return cursor;
}
public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) {
......@@ -82,6 +92,7 @@ public class PageDelegateIndex extends PageIndex {
}
public void remove(Session session) throws SQLException {
mainIndex.setMainIndexColumn(-1);
session.getDatabase().getPageStore().removeMeta(this, session);
}
......
......@@ -44,6 +44,7 @@ public class PageScanIndex extends PageIndex implements RowIndex {
private int rowCountDiff;
private HashMap<Integer, Integer> sessionRowCount;
private int mainIndexColumn = -1;
private SQLException fastDuplicateKeyException;
public PageScanIndex(TableData table, int id, IndexColumn[] columns, IndexType indexType, int headPos, Session session) throws SQLException {
initBaseIndex(table, id, table.getName() + "_TABLE_SCAN", columns, indexType);
......@@ -61,9 +62,6 @@ public class PageScanIndex extends PageIndex implements RowIndex {
if (headPos == Index.EMPTY_HEAD) {
// new table
rootPageId = store.allocatePage();
// TODO currently the head position is stored in the log
// it should not for new tables, otherwise redo of other operations
// must ensure this page is not used for other things
store.addMeta(this, session);
PageDataLeaf root = PageDataLeaf.create(this, rootPageId, PageData.ROOT);
store.updateRecord(root, true, root.data);
......@@ -77,22 +75,26 @@ public class PageScanIndex extends PageIndex implements RowIndex {
// TODO check if really required
store.updateRecord(root, false, null);
}
// TODO re-use keys after many rows are deleted
}
if (trace.isDebugEnabled()) {
trace.debug("opened " + getName() + " rows:" + rowCount);
}
table.setRowCount(rowCount);
fastDuplicateKeyException = super.getDuplicateKeyException();
}
public SQLException getDuplicateKeyException() {
return fastDuplicateKeyException;
}
public void add(Session session, Row row) throws SQLException {
boolean retry = false;
if (mainIndexColumn != -1) {
row.setPos(row.getValue(mainIndexColumn).getInt());
} else {
if (row.getPos() == 0) {
row.setPos((int) ++lastKey);
} else {
lastKey = Math.max(lastKey, row.getPos() + 1);
retry = true;
}
}
if (trace.isDebugEnabled()) {
......@@ -110,6 +112,36 @@ public class PageScanIndex extends PageIndex implements RowIndex {
}
}
}
// when using auto-generated values, it's possible that multiple
// tries are required (specially if there was originally a primary key)
long add = 0;
while (true) {
try {
addTry(session, row);
break;
} catch (SQLException e) {
if (e != fastDuplicateKeyException) {
throw e;
}
if (!retry) {
throw super.getDuplicateKeyException();
}
if (add == 0) {
// in the first re-try add a small random number,
// to avoid collisions after a re-start
// TODO use long
row.setPos((int) (row.getPos() + Math.random() * 10000));
} else {
// TODO use long
row.setPos((int) (row.getPos() + add));
}
add++;
}
}
lastKey = Math.max(lastKey, row.getPos() + 1);
}
private void addTry(Session session, Row row) throws SQLException {
while (true) {
PageData root = getPage(rootPageId, 0);
int splitPoint = root.addRowTry(row);
......@@ -127,8 +159,7 @@ public class PageScanIndex extends PageIndex implements RowIndex {
page1.setPageId(id);
page1.setParentPageId(rootPageId);
page2.setParentPageId(rootPageId);
PageDataNode newRoot = new PageDataNode(this, rootPageId, store.createData());
newRoot.parentPageId = PageData.ROOT;
PageDataNode newRoot = PageDataNode.create(this, rootPageId, PageData.ROOT);
newRoot.init(page1, pivot, page2);
store.updateRecord(page1, true, page1.data);
store.updateRecord(page2, true, page2.data);
......@@ -208,23 +239,32 @@ public class PageScanIndex extends PageIndex implements RowIndex {
}
public Cursor find(Session session, SearchRow first, SearchRow last) throws SQLException {
long min = getLong(first, Long.MIN_VALUE);
long max = getLong(last, Long.MAX_VALUE);
// ignore first and last
PageData root = getPage(rootPageId, 0);
return root.find(session, min, max);
return root.find(session, Long.MIN_VALUE, Long.MAX_VALUE, isMultiVersion);
}
public Cursor findFirstOrLast(Session session, boolean first) throws SQLException {
Cursor cursor;
/**
* Search for a specific row or a set of rows.
*
* @param session the session
* @param first the key of the first row
* @param last the key of the last row
* @param multiVersion if mvcc should be used
* @return the cursor
*/
Cursor find(Session session, long first, long last, boolean multiVersion) throws SQLException {
PageData root = getPage(rootPageId, 0);
if (first) {
cursor = root.find(session, Long.MIN_VALUE, Long.MAX_VALUE);
} else {
long lastKey = root.getLastKey();
cursor = root.find(session, lastKey, lastKey);
}
cursor.next();
return cursor;
return root.find(session, first, last, multiVersion);
}
public Cursor findFirstOrLast(Session session, boolean first) {
throw Message.throwInternalError();
}
long getLastKey() throws SQLException {
PageData root = getPage(rootPageId, 0);
return root.getLastKey();
}
public double getCost(Session session, int[] masks) {
......@@ -256,7 +296,6 @@ public class PageScanIndex extends PageIndex implements RowIndex {
root.remove(key);
invalidateRowCount();
rowCount--;
// TODO re-use keys
}
if (database.isMultiVersion()) {
// if storage is null, the delete flag is not yet set
......
......@@ -79,10 +79,7 @@ import org.h2.value.ValueString;
*/
public class PageStore implements CacheWriter {
// TODO can not use delegating index when starting if it was created later
// TODO re-use deleted keys; specially if the primary key is removed
// TODO table row: number of columns should be varInt not int
// TODO fix page format of data overflow and so on
// TODO implement checksum; 0 for empty pages
// TODO in log, don't store empty space between page head and page data
// TODO long primary keys don't use delegating index yet (setPos(): int)
......@@ -468,7 +465,7 @@ public class PageStore implements CacheWriter {
break;
}
case Page.TYPE_DATA_NODE: {
int indexId = data.readInt();
int indexId = data.readVarInt();
PageScanIndex index = (PageScanIndex) metaObjects.get(indexId);
if (index == null) {
Message.throwInternalError("index not found " + indexId);
......@@ -495,7 +492,7 @@ public class PageStore implements CacheWriter {
break;
}
case Page.TYPE_BTREE_NODE: {
int indexId = data.readInt();
int indexId = data.readVarInt();
PageBtreeIndex index = (PageBtreeIndex) metaObjects.get(indexId);
if (index == null) {
Message.throwInternalError("index not found " + indexId);
......
......@@ -62,6 +62,7 @@ import org.h2.util.Tool;
import org.h2.value.Value;
import org.h2.value.ValueInt;
import org.h2.value.ValueLob;
import org.h2.value.ValueLong;
/**
* Helps recovering a corrupted database.
......@@ -820,8 +821,9 @@ public class Recover extends Tool implements DataHandler {
// type 2
case Page.TYPE_DATA_NODE: {
pageTypeCount[type]++;
int entries = s.readShortInt();
setStorage(s.readVarInt());
int rowCount = s.readInt();
int entries = s.readShortInt();
writer.println("-- page " + page + ": data node " + (last ? "(last)" : "") + " entries: " + entries + " rowCount: " + rowCount);
break;
}
......@@ -835,7 +837,7 @@ public class Recover extends Tool implements DataHandler {
pageTypeCount[type]++;
setStorage(s.readVarInt());
int entries = s.readShortInt();
writer.println("-- page " + page + ": b-tree leaf " + (last ? "(last)" : "") + " table: " + storageId + " entries: " + entries);
writer.println("-- page " + page + ": b-tree leaf " + (last ? "(last)" : "") + " index: " + storageId + " entries: " + entries);
if (trace) {
dumpPageBtreeLeaf(writer, s, entries, !last);
}
......@@ -844,7 +846,8 @@ public class Recover extends Tool implements DataHandler {
// type 5
case Page.TYPE_BTREE_NODE:
pageTypeCount[type]++;
writer.println("-- page " + page + ": b-tree node" + (last ? "(last)" : ""));
setStorage(s.readVarInt());
writer.println("-- page " + page + ": b-tree node" + (last ? "(last)" : "") + " index: " + storageId);
if (trace) {
dumpPageBtreeNode(writer, s, !last);
}
......@@ -1078,15 +1081,15 @@ public class Recover extends Tool implements DataHandler {
}
private void dumpPageBtreeNode(PrintWriter writer, Data s, boolean positionOnly) {
int entryCount = s.readShortInt();
int rowCount = s.readInt();
int entryCount = s.readShortInt();
int[] children = new int[entryCount + 1];
int[] offsets = new int[entryCount];
children[entryCount] = s.readInt();
int empty = Integer.MAX_VALUE;
for (int i = 0; i < entryCount; i++) {
children[i] = s.readInt();
int off = s.readInt();
int off = s.readShortInt();
empty = Math.min(off, empty);
offsets[i] = off;
}
......@@ -1095,10 +1098,10 @@ public class Recover extends Tool implements DataHandler {
for (int i = 0; i < entryCount; i++) {
int off = offsets[i];
s.setPos(off);
int pos = s.readInt();
long key = s.readVarLong();
Value data;
if (positionOnly) {
data = ValueInt.get(pos);
data = ValueLong.get(key);
} else {
try {
data = s.readValue();
......@@ -1107,7 +1110,7 @@ public class Recover extends Tool implements DataHandler {
continue;
}
}
writer.println("-- [" + i + "] child: " + children[i] + " pos: " + pos + " data: " + data);
writer.println("-- [" + i + "] child: " + children[i] + " key: " + key + " data: " + data);
}
writer.println("-- [" + entryCount + "] child: " + children[entryCount] + " rowCount: " + rowCount);
}
......@@ -1186,7 +1189,9 @@ public class Recover extends Tool implements DataHandler {
empty = empty - s.length();
pageDataHead += s.length();
pageDataEmpty += empty;
writer.println("-- empty: " + empty);
if (trace) {
writer.println("-- empty: " + empty);
}
if (!last) {
DataPage s2 = DataPage.create(this, pageSize);
s.setPos(pageSize);
......@@ -1221,7 +1226,9 @@ public class Recover extends Tool implements DataHandler {
for (int i = 0; i < entryCount; i++) {
long key = keys[i];
int off = offsets[i];
writer.println("-- [" + i + "] storage: " + storageId + " key: " + key + " off: " + off);
if (trace) {
writer.println("-- [" + i + "] storage: " + storageId + " key: " + key + " off: " + off);
}
s.setPos(off);
Value[] data = createRecord(writer, s, columnCount);
if (data != null) {
......
......@@ -32,6 +32,7 @@ public class TestPageStore extends TestBase {
}
public void test() throws Exception {
testDropPk();
testCreatePkLater();
testTruncate();
testLargeIndex();
......@@ -40,6 +41,25 @@ public class TestPageStore extends TestBase {
testFuzzOperations();
}
private void testDropPk() throws SQLException {
if (config.memory) {
return;
}
deleteDb("pageStore");
Connection conn;
Statement stat;
conn = getConnection("pageStore");
stat = conn.createStatement();
stat.execute("create table test(id int primary key)");
stat.execute("insert into test values(" + Integer.MIN_VALUE+ "), (" + Integer.MAX_VALUE + ")");
stat.execute("alter table test drop primary key");
conn.close();
conn = getConnection("pageStore");
stat = conn.createStatement();
stat.execute("insert into test values(" + Integer.MIN_VALUE+ "), (" + Integer.MAX_VALUE + ")");
conn.close();
}
private void testCreatePkLater() throws SQLException {
if (config.memory) {
return;
......
......@@ -610,4 +610,4 @@ lrem lstore monitorexit lmul monitorenter fadd interpreting ishl istore dcmpg
daload dstore saload anewarray tableswitch lushr ladd lshr lreturn acmpne
locals multianewarray icmpne fneg faload ifeq decompiler zeroes forgot
modern slight boost characteristics significantly gae vfs centrally ten
approach risky getters suxxess gmb delegate delegating delegates
\ No newline at end of file
approach risky getters suxxess gmb delegate delegating delegates collisions
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论