Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
796e386a
提交
796e386a
authored
6月 19, 2009
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
New experimental page store.
上级
69f5efd2
显示空白字符变更
内嵌
并排
正在显示
14 个修改的文件
包含
296 行增加
和
193 行删除
+296
-193
PageBtree.java
h2/src/main/org/h2/index/PageBtree.java
+8
-7
PageBtreeCursor.java
h2/src/main/org/h2/index/PageBtreeCursor.java
+1
-1
PageBtreeIndex.java
h2/src/main/org/h2/index/PageBtreeIndex.java
+22
-17
PageBtreeLeaf.java
h2/src/main/org/h2/index/PageBtreeLeaf.java
+21
-28
PageBtreeNode.java
h2/src/main/org/h2/index/PageBtreeNode.java
+49
-32
PageData.java
h2/src/main/org/h2/index/PageData.java
+3
-4
PageDataLeaf.java
h2/src/main/org/h2/index/PageDataLeaf.java
+4
-5
PageDataNode.java
h2/src/main/org/h2/index/PageDataNode.java
+9
-9
PageScanIndex.java
h2/src/main/org/h2/index/PageScanIndex.java
+2
-2
PageStore.java
h2/src/main/org/h2/store/PageStore.java
+5
-3
Column.java
h2/src/main/org/h2/table/Column.java
+4
-0
Recover.java
h2/src/main/org/h2/tools/Recover.java
+159
-78
TestAll.java
h2/src/test/org/h2/test/TestAll.java
+1
-1
TestPowerOff.java
h2/src/test/org/h2/test/db/TestPowerOff.java
+8
-6
没有找到文件。
h2/src/main/org/h2/index/PageBtree.java
浏览文件 @
796e386a
...
...
@@ -56,6 +56,11 @@ abstract class PageBtree extends Record {
*/
protected
int
start
;
/**
* If only the position of the row is stored in the page
*/
protected
boolean
onlyPosition
;
/**
* If the page was already written to the buffer.
*/
...
...
@@ -123,13 +128,13 @@ abstract class PageBtree extends Record {
abstract
void
read
()
throws
SQLException
;
/**
*
A
dd a row.
*
Try to a
dd a row.
*
* @param row the row
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract
int
addRow
(
SearchRow
row
)
throws
SQLException
;
abstract
int
addRow
Try
(
SearchRow
row
)
throws
SQLException
;
/**
* Find the first row.
...
...
@@ -147,13 +152,9 @@ abstract class PageBtree extends Record {
* @return the row
*/
SearchRow
getRow
(
int
at
)
throws
SQLException
{
int
test
;
if
(
at
<
0
)
{
System
.
out
.
println
(
"stop"
);
}
SearchRow
row
=
rows
[
at
];
if
(
row
==
null
)
{
row
=
index
.
readRow
(
data
,
offsets
[
at
]);
row
=
index
.
readRow
(
data
,
offsets
[
at
]
,
onlyPosition
);
rows
[
at
]
=
row
;
}
return
row
;
...
...
h2/src/main/org/h2/index/PageBtreeCursor.java
浏览文件 @
796e386a
...
...
@@ -77,7 +77,7 @@ public class PageBtreeCursor implements Cursor {
return
true
;
}
public
boolean
previous
()
throws
SQLException
{
public
boolean
previous
()
{
i
--;
int
todo
;
return
true
;
...
...
h2/src/main/org/h2/index/PageBtreeIndex.java
浏览文件 @
796e386a
...
...
@@ -11,7 +11,6 @@ import org.h2.constant.ErrorCode;
import
org.h2.constant.SysProperties
;
import
org.h2.engine.Session
;
import
org.h2.message.Message
;
import
org.h2.message.TraceSystem
;
import
org.h2.result.Row
;
import
org.h2.result.SearchRow
;
import
org.h2.store.DataPage
;
...
...
@@ -94,7 +93,7 @@ public class PageBtreeIndex extends BaseIndex {
}
while
(
true
)
{
PageBtree
root
=
getPage
(
headPos
);
int
splitPoint
=
root
.
addRow
(
row
);
int
splitPoint
=
root
.
addRow
Try
(
row
);
if
(
splitPoint
==
0
)
{
break
;
}
...
...
@@ -275,7 +274,7 @@ public class PageBtreeIndex extends BaseIndex {
return
rowCount
;
}
public
void
close
(
Session
session
)
throws
SQLException
{
public
void
close
(
Session
session
)
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"close"
);
}
...
...
@@ -291,10 +290,14 @@ public class PageBtreeIndex extends BaseIndex {
* @param offset the offset
* @return the row
*/
SearchRow
readRow
(
DataPage
data
,
int
offset
)
throws
SQLException
{
SearchRow
readRow
(
DataPage
data
,
int
offset
,
boolean
onlyPosition
)
throws
SQLException
{
data
.
setPos
(
offset
);
int
pos
=
data
.
readInt
();
if
(
onlyPosition
)
{
return
tableData
.
getRow
(
null
,
pos
);
}
SearchRow
row
=
table
.
getTemplateSimpleRow
(
columns
.
length
==
1
);
row
.
setPos
(
data
.
readInt
()
);
row
.
setPos
(
pos
);
for
(
Column
col
:
columns
)
{
int
idx
=
col
.
getColumnId
();
row
.
setValue
(
idx
,
data
.
readValue
());
...
...
@@ -307,34 +310,36 @@ public class PageBtreeIndex extends BaseIndex {
*
* @param data the data
* @param offset the offset
* @param onlyPosition whether only the position of the row is stored
* @param row the row to write
*/
void
writeRow
(
DataPage
data
,
int
offset
,
SearchRow
row
)
throws
SQLException
{
if
(
offset
<
0
)
{
int
test
;
System
.
out
.
println
(
"stop"
);
}
void
writeRow
(
DataPage
data
,
int
offset
,
SearchRow
row
,
boolean
onlyPosition
)
throws
SQLException
{
data
.
setPos
(
offset
);
data
.
writeInt
(
row
.
getPos
());
if
(!
onlyPosition
)
{
for
(
Column
col
:
columns
)
{
int
idx
=
col
.
getColumnId
();
data
.
writeValue
(
row
.
getValue
(
idx
));
}
}
}
/**
* Get the size of a row (only the part that is stored in the index).
*
* @param dummy a dummy data page to calculate the size
* @param row the row
* @param onlyPosition whether only the position of the row is stored
* @return the number of bytes
*/
int
getRowSize
(
DataPage
dummy
,
SearchRow
row
)
throws
SQLException
{
int
getRowSize
(
DataPage
dummy
,
SearchRow
row
,
boolean
onlyPosition
)
throws
SQLException
{
int
rowsize
=
DataPage
.
LENGTH_INT
;
if
(!
onlyPosition
)
{
for
(
Column
col
:
columns
)
{
Value
v
=
row
.
getValue
(
col
.
getColumnId
());
rowsize
+=
dummy
.
getValueLen
(
v
);
}
}
return
rowsize
;
}
...
...
h2/src/main/org/h2/index/PageBtreeLeaf.java
浏览文件 @
796e386a
...
...
@@ -14,30 +14,30 @@ import org.h2.store.DataPage;
import
org.h2.store.PageStore
;
/**
* A leaf page that contains index data.
* A
b-tree
leaf page that contains index data.
* Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-8: table id
* </li><li>9-10: entry count
* </li><li>overflow: 11-14: the row key
* </li><li>11-: list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li><li>data
* </li></ul>
*/
class
PageBtreeLeaf
extends
PageBtree
{
private
static
final
int
KEY_OFFSET_PAIR_LENGTH
=
6
;
private
static
final
int
KEY_OFFSET_PAIR
_START
=
11
;
private
static
final
int
OFFSET_LENGTH
=
2
;
private
static
final
int
OFFSET
_START
=
11
;
PageBtreeLeaf
(
PageBtreeIndex
index
,
int
pageId
,
int
parentPageId
,
DataPage
data
)
{
super
(
index
,
pageId
,
parentPageId
,
data
);
start
=
KEY_OFFSET_PAIR
_START
;
start
=
OFFSET
_START
;
}
void
read
()
throws
SQLException
{
data
.
setPos
(
4
);
data
.
readByte
();
int
type
=
data
.
readByte
();
onlyPosition
=
(
type
&
Page
.
FLAG_LAST
)
==
0
;
int
tableId
=
data
.
readInt
();
if
(
tableId
!=
index
.
getId
())
{
throw
Message
.
getSQLException
(
ErrorCode
.
FILE_CORRUPTED_1
,
...
...
@@ -60,14 +60,18 @@ class PageBtreeLeaf extends PageBtree {
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
int
addRow
(
SearchRow
row
)
throws
SQLException
{
int
rowLength
=
index
.
getRowSize
(
data
,
row
);
int
addRow
Try
(
SearchRow
row
)
throws
SQLException
{
int
rowLength
=
index
.
getRowSize
(
data
,
row
,
onlyPosition
);
int
pageSize
=
index
.
getPageStore
().
getPageSize
();
int
last
=
entryCount
==
0
?
pageSize
:
offsets
[
entryCount
-
1
];
if
(
entryCount
>
0
&&
last
-
rowLength
<
start
+
KEY_OFFSET_PAIR_LENGTH
)
{
if
(
last
-
rowLength
<
start
+
OFFSET_LENGTH
)
{
if
(
entryCount
>
0
)
{
int
todoSplitAtLastInsertionPoint
;
return
(
entryCount
/
2
)
+
1
;
}
onlyPosition
=
true
;
rowLength
=
index
.
getRowSize
(
data
,
row
,
onlyPosition
);
}
written
=
false
;
int
offset
=
last
-
rowLength
;
int
[]
newOffsets
=
new
int
[
entryCount
+
1
];
...
...
@@ -89,23 +93,12 @@ class PageBtreeLeaf extends PageBtree {
}
}
entryCount
++;
start
+=
KEY_OFFSET_PAIR
_LENGTH
;
start
+=
OFFSET
_LENGTH
;
newOffsets
[
x
]
=
offset
;
newRows
[
x
]
=
row
;
offsets
=
newOffsets
;
rows
=
newRows
;
index
.
getPageStore
().
updateRecord
(
this
,
true
,
data
);
if
(
offset
<
start
)
{
if
(
entryCount
>
1
)
{
Message
.
throwInternalError
();
}
// need to write the overflow page id
start
+=
4
;
int
remaining
=
rowLength
-
(
pageSize
-
start
);
// fix offset
offset
=
start
;
offsets
[
x
]
=
offset
;
}
return
0
;
}
...
...
@@ -126,7 +119,7 @@ class PageBtreeLeaf extends PageBtree {
newOffsets
[
j
]
=
offsets
[
j
+
1
]
+
rowLength
;
}
System
.
arraycopy
(
rows
,
i
+
1
,
newRows
,
i
,
entryCount
-
i
);
start
-=
KEY_OFFSET_PAIR
_LENGTH
;
start
-=
OFFSET
_LENGTH
;
offsets
=
newOffsets
;
rows
=
newRows
;
}
...
...
@@ -139,7 +132,7 @@ class PageBtreeLeaf extends PageBtree {
int
newPageId
=
index
.
getPageStore
().
allocatePage
();
PageBtreeLeaf
p2
=
new
PageBtreeLeaf
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createDataPage
());
for
(
int
i
=
splitPoint
;
i
<
entryCount
;)
{
p2
.
addRow
(
getRow
(
splitPoint
));
p2
.
addRow
Try
(
getRow
(
splitPoint
));
removeRow
(
splitPoint
);
}
return
p2
;
...
...
@@ -190,14 +183,14 @@ class PageBtreeLeaf extends PageBtree {
readAllRows
();
data
.
reset
();
data
.
writeInt
(
parentPageId
);
data
.
writeByte
((
byte
)
Page
.
TYPE_BTREE_LEAF
);
data
.
writeByte
((
byte
)
(
Page
.
TYPE_BTREE_LEAF
|
(
onlyPosition
?
0
:
Page
.
FLAG_LAST
))
);
data
.
writeInt
(
index
.
getId
());
data
.
writeShortInt
(
entryCount
);
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
data
.
writeShortInt
(
offsets
[
i
]);
}
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
index
.
writeRow
(
data
,
offsets
[
i
],
rows
[
i
]);
index
.
writeRow
(
data
,
offsets
[
i
],
rows
[
i
]
,
onlyPosition
);
}
written
=
true
;
}
...
...
@@ -234,7 +227,7 @@ class PageBtreeLeaf extends PageBtree {
return
;
}
PageBtreeNode
next
=
(
PageBtreeNode
)
index
.
getPage
(
parentPageId
);
next
.
nextPage
(
cursor
,
get
Row
(
0
));
next
.
nextPage
(
cursor
,
get
Pos
(
));
}
public
String
toString
()
{
...
...
h2/src/main/org/h2/index/PageBtreeNode.java
浏览文件 @
796e386a
...
...
@@ -7,13 +7,13 @@
package
org
.
h2
.
index
;
import
java.sql.SQLException
;
import
org.h2.constant.ErrorCode
;
import
org.h2.message.Message
;
import
org.h2.result.SearchRow
;
import
org.h2.store.DataPage
;
/**
* A leaf page that contains index data.
* A b-tree node page that contains index data.
* Data is organized as follows: [leaf 0] (largest value of leaf 0) [leaf 1]
* Format:
* <ul><li>0-3: parent page id
* </li><li>4-4: page type
...
...
@@ -43,7 +43,9 @@ class PageBtreeNode extends PageBtree {
}
void
read
()
{
data
.
setPos
(
5
);
data
.
setPos
(
4
);
int
type
=
data
.
readByte
();
onlyPosition
=
(
type
&
Page
.
FLAG_LAST
)
==
0
;
entryCount
=
data
.
readShortInt
();
rowCount
=
rowCountStored
=
data
.
readInt
();
childPageIds
=
new
int
[
entryCount
+
1
];
...
...
@@ -58,25 +60,39 @@ class PageBtreeNode extends PageBtree {
start
=
data
.
length
();
}
private
int
addChildTry
(
SearchRow
row
)
throws
SQLException
{
if
(
entryCount
==
0
)
{
return
0
;
}
int
rowLength
=
index
.
getRowSize
(
data
,
row
,
onlyPosition
);
int
pageSize
=
index
.
getPageStore
().
getPageSize
();
int
last
=
entryCount
==
0
?
pageSize
:
offsets
[
entryCount
-
1
];
if
(
last
-
rowLength
<
start
+
CHILD_OFFSET_PAIR_LENGTH
)
{
int
todoSplitAtLastInsertionPoint
;
return
(
entryCount
/
2
)
+
1
;
}
return
0
;
}
/**
* Add a row
if possible
. If it is possible this method returns 0, otherwise
* Add a row. If it is possible this method returns 0, otherwise
* the split point. It is always possible to add one row.
*
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
private
int
addChild
(
int
x
,
int
childPageId
,
SearchRow
row
)
throws
SQLException
{
int
rowLength
=
index
.
getRowSize
(
data
,
row
);
private
void
addChild
(
int
x
,
int
childPageId
,
SearchRow
row
)
throws
SQLException
{
int
rowLength
=
index
.
getRowSize
(
data
,
row
,
onlyPosition
);
int
pageSize
=
index
.
getPageStore
().
getPageSize
();
int
last
=
entryCount
==
0
?
pageSize
:
offsets
[
entryCount
-
1
];
if
(
entryCount
>
0
&&
last
-
rowLength
<
start
+
CHILD_OFFSET_PAIR_LENGTH
)
{
i
nt
todoSplitAtLastInsertionPoint
;
return
(
entryCount
/
2
)
+
1
;
if
(
last
-
rowLength
<
start
+
CHILD_OFFSET_PAIR_LENGTH
)
{
i
f
(
entryCount
>
0
)
{
throw
Message
.
throwInternalError
()
;
}
int
offset
=
last
-
rowLength
;
if
(
offset
<
0
)
{
throw
Message
.
getSQLException
(
ErrorCode
.
FEATURE_NOT_SUPPORTED_1
,
"Wide indexes"
);
onlyPosition
=
true
;
rowLength
=
index
.
getRowSize
(
data
,
row
,
onlyPosition
);
}
int
offset
=
last
-
rowLength
;
int
[]
newOffsets
=
new
int
[
entryCount
+
1
];
SearchRow
[]
newRows
=
new
SearchRow
[
entryCount
+
1
];
int
[]
newChildPageIds
=
new
int
[
entryCount
+
2
];
...
...
@@ -84,7 +100,6 @@ class PageBtreeNode extends PageBtree {
System
.
arraycopy
(
childPageIds
,
0
,
newChildPageIds
,
0
,
x
+
1
);
}
if
(
entryCount
>
0
)
{
readAllRows
();
System
.
arraycopy
(
offsets
,
0
,
newOffsets
,
0
,
x
);
System
.
arraycopy
(
rows
,
0
,
newRows
,
0
,
x
);
if
(
x
<
entryCount
)
{
...
...
@@ -104,26 +119,25 @@ class PageBtreeNode extends PageBtree {
rows
=
newRows
;
childPageIds
=
newChildPageIds
;
entryCount
++;
return
0
;
}
int
addRow
(
SearchRow
row
)
throws
SQLException
{
int
addRow
Try
(
SearchRow
row
)
throws
SQLException
{
while
(
true
)
{
int
x
=
find
(
row
,
false
,
false
);
PageBtree
page
=
index
.
getPage
(
childPageIds
[
x
]);
int
splitPoint
=
page
.
addRow
(
row
);
int
splitPoint
=
page
.
addRow
Try
(
row
);
if
(
splitPoint
==
0
)
{
break
;
}
SearchRow
pivot
=
page
.
getRow
(
splitPoint
-
1
);
int
splitPoint2
=
addChildTry
(
pivot
);
if
(
splitPoint2
!=
0
)
{
return
splitPoint
;
}
PageBtree
page2
=
page
.
split
(
splitPoint
);
addChild
(
x
,
page2
.
getPageId
(),
pivot
);
index
.
getPageStore
().
updateRecord
(
page
,
true
,
page
.
data
);
index
.
getPageStore
().
updateRecord
(
page2
,
true
,
page2
.
data
);
splitPoint
=
addChild
(
x
,
page2
.
getPageId
(),
pivot
);
if
(
splitPoint
!=
0
)
{
int
todoSplitAtLastInsertionPoint
;
return
splitPoint
/
2
;
}
index
.
getPageStore
().
updateRecord
(
this
,
true
,
data
);
}
updateRowCount
(
1
);
...
...
@@ -202,7 +216,7 @@ class PageBtreeNode extends PageBtree {
boolean
remove
(
SearchRow
row
)
throws
SQLException
{
int
at
=
find
(
row
,
false
,
false
);
// merge is not implemented to allow concurrent usage
of btrees
// merge is not implemented to allow concurrent usage
// TODO maybe implement merge
PageBtree
page
=
index
.
getPage
(
childPageIds
[
at
]);
boolean
empty
=
page
.
remove
(
row
);
...
...
@@ -265,13 +279,10 @@ class PageBtreeNode extends PageBtree {
if
(
written
)
{
return
;
}
// make sure rows are read
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
getRow
(
i
);
}
readAllRows
();
data
.
reset
();
data
.
writeInt
(
parentPageId
);
data
.
writeByte
((
byte
)
Page
.
TYPE_BTREE_NODE
);
data
.
writeByte
((
byte
)
(
Page
.
TYPE_BTREE_NODE
|
(
onlyPosition
?
0
:
Page
.
FLAG_LAST
))
);
data
.
writeShortInt
(
entryCount
);
data
.
writeInt
(
rowCountStored
);
data
.
writeInt
(
childPageIds
[
entryCount
]);
...
...
@@ -280,7 +291,7 @@ class PageBtreeNode extends PageBtree {
data
.
writeInt
(
offsets
[
i
]);
}
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
index
.
writeRow
(
data
,
offsets
[
i
],
rows
[
i
]);
index
.
writeRow
(
data
,
offsets
[
i
],
rows
[
i
]
,
onlyPosition
);
}
written
=
true
;
}
...
...
@@ -319,16 +330,22 @@ class PageBtreeNode extends PageBtree {
* @param cursor the cursor
* @param row the current row
*/
void
nextPage
(
PageBtreeCursor
cursor
,
SearchRow
row
)
throws
SQLException
{
int
i
=
find
(
row
,
false
,
false
)
+
1
;
void
nextPage
(
PageBtreeCursor
cursor
,
int
pageId
)
throws
SQLException
{
int
i
;
// TODO maybe keep the index in the child page (transiently)
for
(
i
=
0
;
i
<
childPageIds
.
length
;
i
++)
{
if
(
childPageIds
[
i
]
==
pageId
)
{
i
++;
break
;
}
}
if
(
i
>
entryCount
)
{
if
(
parentPageId
==
Page
.
ROOT
)
{
cursor
.
setCurrent
(
null
,
0
);
return
;
}
PageBtreeNode
next
=
(
PageBtreeNode
)
index
.
getPage
(
parentPageId
);
SearchRow
r
=
entryCount
==
0
?
row
:
getRow
(
entryCount
-
1
);
next
.
nextPage
(
cursor
,
r
);
next
.
nextPage
(
cursor
,
getPos
());
return
;
}
PageBtree
page
=
index
.
getPage
(
childPageIds
[
i
]);
...
...
h2/src/main/org/h2/index/PageData.java
浏览文件 @
796e386a
...
...
@@ -8,7 +8,6 @@ package org.h2.index;
import
java.sql.SQLException
;
import
org.h2.engine.Session
;
import
org.h2.result.Row
;
import
org.h2.store.DataPage
;
import
org.h2.store.Record
;
...
...
@@ -97,13 +96,13 @@ abstract class PageData extends Record {
abstract
void
read
()
throws
SQLException
;
/**
*
A
dd a row.
*
Try to a
dd a row.
*
* @param row the row
* @return 0 if successful, or the split position if the page needs to be
* split
*/
abstract
int
addRow
(
Row
row
)
throws
SQLException
;
abstract
int
addRow
Try
(
Row
row
)
throws
SQLException
;
/**
* Get a cursor.
...
...
@@ -188,6 +187,6 @@ abstract class PageData extends Record {
* @param key the key
* @return the row
*/
abstract
Row
getRow
(
Session
session
,
int
key
)
throws
SQLException
;
abstract
Row
getRow
(
int
key
)
throws
SQLException
;
}
h2/src/main/org/h2/index/PageDataLeaf.java
浏览文件 @
796e386a
...
...
@@ -5,10 +5,9 @@
* Initial Developer: H2 Group
*/
package
org
.
h2
.
index
;
import
java.sql.SQLException
;
import
java.sql.SQLException
;
import
org.h2.constant.ErrorCode
;
import
org.h2.engine.Session
;
import
org.h2.message.Message
;
import
org.h2.result.Row
;
import
org.h2.store.DataPage
;
...
...
@@ -89,7 +88,7 @@ class PageDataLeaf extends PageData {
* @param row the now to add
* @return the split point of this page, or 0 if no split is required
*/
int
addRow
(
Row
row
)
throws
SQLException
{
int
addRow
Try
(
Row
row
)
throws
SQLException
{
int
rowLength
=
row
.
getByteCount
(
data
);
int
pageSize
=
index
.
getPageStore
().
getPageSize
();
int
last
=
entryCount
==
0
?
pageSize
:
offsets
[
entryCount
-
1
];
...
...
@@ -245,7 +244,7 @@ class PageDataLeaf extends PageData {
int
newPageId
=
index
.
getPageStore
().
allocatePage
();
PageDataLeaf
p2
=
new
PageDataLeaf
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createDataPage
());
for
(
int
i
=
splitPoint
;
i
<
entryCount
;)
{
p2
.
addRow
(
getRowAt
(
splitPoint
));
p2
.
addRow
Try
(
getRowAt
(
splitPoint
));
removeRow
(
splitPoint
);
}
return
p2
;
...
...
@@ -297,7 +296,7 @@ class PageDataLeaf extends PageData {
return
false
;
}
Row
getRow
(
Session
session
,
int
key
)
throws
SQLException
{
Row
getRow
(
int
key
)
throws
SQLException
{
int
index
=
find
(
key
);
return
getRowAt
(
index
);
}
...
...
h2/src/main/org/h2/index/PageDataNode.java
浏览文件 @
796e386a
...
...
@@ -77,24 +77,24 @@ class PageDataNode extends PageData {
entryCount
++;
}
int
addRow
(
Row
row
)
throws
SQLException
{
int
addRow
Try
(
Row
row
)
throws
SQLException
{
while
(
true
)
{
int
x
=
find
(
row
.
getPos
());
PageData
page
=
index
.
getPage
(
childPageIds
[
x
]);
int
splitPoint
=
page
.
addRow
(
row
);
int
splitPoint
=
page
.
addRow
Try
(
row
);
if
(
splitPoint
==
0
)
{
break
;
}
int
pivot
=
page
.
getKey
(
splitPoint
-
1
);
PageData
page2
=
page
.
split
(
splitPoint
);
index
.
getPageStore
().
updateRecord
(
page
,
true
,
page
.
data
);
index
.
getPageStore
().
updateRecord
(
page2
,
true
,
page2
.
data
);
addChild
(
x
,
page2
.
getPageId
(),
pivot
);
int
maxEntries
=
(
index
.
getPageStore
().
getPageSize
()
-
ENTRY_START
)
/
ENTRY_LENGTH
;
if
(
entryCount
>=
maxEntries
)
{
int
todoSplitAtLastInsertionPoint
;
return
entryCount
/
2
;
}
int
pivot
=
page
.
getKey
(
splitPoint
-
1
);
PageData
page2
=
page
.
split
(
splitPoint
);
index
.
getPageStore
().
updateRecord
(
page
,
true
,
page
.
data
);
index
.
getPageStore
().
updateRecord
(
page2
,
true
,
page2
.
data
);
addChild
(
x
,
page2
.
getPageId
(),
pivot
);
index
.
getPageStore
().
updateRecord
(
this
,
true
,
data
);
}
updateRowCount
(
1
);
...
...
@@ -205,10 +205,10 @@ class PageDataNode extends PageData {
return
false
;
}
Row
getRow
(
Session
session
,
int
key
)
throws
SQLException
{
Row
getRow
(
int
key
)
throws
SQLException
{
int
at
=
find
(
key
);
PageData
page
=
index
.
getPage
(
childPageIds
[
at
]);
return
page
.
getRow
(
session
,
key
);
return
page
.
getRow
(
key
);
}
int
getRowCount
()
throws
SQLException
{
...
...
h2/src/main/org/h2/index/PageScanIndex.java
浏览文件 @
796e386a
...
...
@@ -108,7 +108,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
while
(
true
)
{
PageData
root
=
getPage
(
headPos
);
int
splitPoint
=
root
.
addRow
(
row
);
int
splitPoint
=
root
.
addRow
Try
(
row
);
if
(
splitPoint
==
0
)
{
break
;
}
...
...
@@ -260,7 +260,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
public
Row
getRow
(
Session
session
,
int
key
)
throws
SQLException
{
PageData
root
=
getPage
(
headPos
);
return
root
.
getRow
(
session
,
key
);
return
root
.
getRow
(
key
);
}
PageStore
getPageStore
()
{
...
...
h2/src/main/org/h2/store/PageStore.java
浏览文件 @
796e386a
...
...
@@ -66,13 +66,14 @@ import org.h2.value.ValueString;
*/
public
class
PageStore
implements
CacheWriter
{
// TODO currently working on PageBtreeNode Wide indexes
// TODO implement redo log in Recover tool
// TODO TestPowerOff
// TODO PageStore.openMetaIndex (desc and nulls first / last)
// TODO PageBtreeIndex.canGetFirstOrLast
// TODO btree index with fixed size values doesn't need offset and so on
// TODO better checksums (for example, multiple fletcher)
// TODO replace CRC32
// TODO PageBtreeNode: 4 bytes offset - others use only 2
// TODO PageBtreeLeaf: why table id
// TODO log block allocation
// TODO block compression: maybe http://en.wikipedia.org/wiki/LZJB
// with RLE, specially for 0s.
...
...
@@ -103,6 +104,7 @@ public class PageStore implements CacheWriter {
// and delay on each commit
// TODO var int: see google protocol buffers
// TODO SessionState.logId is no longer needed
// TODO PageData and PageBtree addRowTry: try to simplify
/**
* The smallest possible page size.
...
...
h2/src/main/org/h2/table/Column.java
浏览文件 @
796e386a
...
...
@@ -629,4 +629,8 @@ public class Column {
return
primaryKey
;
}
public
String
toString
()
{
return
name
;
}
}
h2/src/main/org/h2/tools/Recover.java
浏览文件 @
796e386a
...
...
@@ -21,6 +21,7 @@ import java.util.ArrayList;
import
java.util.HashMap
;
import
java.util.HashSet
;
import
java.util.Map
;
import
java.util.zip.CRC32
;
import
org.h2.command.Parser
;
import
org.h2.constant.SysProperties
;
import
org.h2.engine.Constants
;
...
...
@@ -44,6 +45,7 @@ import org.h2.store.PageStore;
import
org.h2.util.ByteUtils
;
import
org.h2.util.FileUtils
;
import
org.h2.util.IOUtils
;
import
org.h2.util.IntArray
;
import
org.h2.util.MathUtils
;
import
org.h2.util.New
;
import
org.h2.util.ObjectArray
;
...
...
@@ -329,8 +331,8 @@ public class Recover extends Tool implements DataHandler {
}
private
void
writeDataError
(
PrintWriter
writer
,
String
error
,
byte
[]
data
,
int
dumpBlocks
)
{
writer
.
println
(
"-- ERROR: "
+
error
+
" block:
"
+
block
+
" blockCount:"
+
blockCount
+
" storageId:
"
+
storageId
+
" recordLength:
"
+
recordLength
+
" valueId:
"
+
valueId
);
writer
.
println
(
"-- ERROR: "
+
error
+
" block:
"
+
block
+
" blockCount: "
+
blockCount
+
" storageId:
"
+
storageId
+
" recordLength:
"
+
recordLength
+
" valueId:
"
+
valueId
);
StringBuilder
sb
=
new
StringBuilder
();
for
(
int
i
=
0
;
i
<
dumpBlocks
*
DiskFile
.
BLOCK_SIZE
;
i
++)
{
int
x
=
data
[
i
]
&
0xff
;
...
...
@@ -479,11 +481,11 @@ public class Recover extends Tool implements DataHandler {
int
id
=
s
.
readInt
();
int
firstUncommittedPos
=
s
.
readInt
();
int
firstUnwrittenPos
=
s
.
readInt
();
writer
.
println
(
"// id:"
+
id
);
writer
.
println
(
"// firstUncommittedPos:"
+
firstUncommittedPos
);
writer
.
println
(
"// firstUnwrittenPos:"
+
firstUnwrittenPos
);
writer
.
println
(
"// id:
"
+
id
);
writer
.
println
(
"// firstUncommittedPos:
"
+
firstUncommittedPos
);
writer
.
println
(
"// firstUnwrittenPos:
"
+
firstUnwrittenPos
);
int
max
=
(
int
)
(
length
/
blockSize
);
writer
.
println
(
"// max:"
+
max
);
writer
.
println
(
"// max:
"
+
max
);
while
(
true
)
{
int
pos
=
(
int
)
(
store
.
getFilePointer
()
/
blockSize
);
if
((
long
)
pos
*
blockSize
>=
length
)
{
...
...
@@ -517,9 +519,9 @@ public class Recover extends Tool implements DataHandler {
int
sessionId
=
s
.
readInt
();
if
(
type
==
'P'
)
{
String
transaction
=
s
.
readString
();
writer
.
println
(
"// prepared session:
"
+
sessionId
+
" tx:
"
+
transaction
);
writer
.
println
(
"// prepared session:
"
+
sessionId
+
" tx:
"
+
transaction
);
}
else
if
(
type
==
'C'
)
{
writer
.
println
(
"// commit session:"
+
sessionId
);
writer
.
println
(
"// commit session:
"
+
sessionId
);
}
else
{
int
storageId
=
s
.
readInt
();
int
recId
=
s
.
readInt
();
...
...
@@ -535,27 +537,27 @@ public class Recover extends Tool implements DataHandler {
if
(
sumLength
>
0
)
{
s
.
read
(
summary
,
0
,
sumLength
);
}
writer
.
println
(
"// summary session:
"
+
sessionId
+
" fileType:"
+
fileType
+
" sumLength:
"
+
sumLength
);
writer
.
println
(
"// summary session:
"
+
sessionId
+
" fileType: "
+
fileType
+
" sumLength:
"
+
sumLength
);
dumpSummary
(
writer
,
summary
);
break
;
}
case
'T'
:
writer
.
println
(
"// truncate session:
"
+
sessionId
+
" storage:"
+
storageId
+
" pos:"
+
recId
+
" blockCount:
"
+
blockCount
);
writer
.
println
(
"// truncate session:
"
+
sessionId
+
" storage: "
+
storageId
+
" pos: "
+
recId
+
" blockCount:
"
+
blockCount
);
break
;
case
'I'
:
writer
.
println
(
"// insert session:
"
+
sessionId
+
" storage:"
+
storageId
+
" pos:"
+
recId
+
" blockCount:
"
+
blockCount
);
writer
.
println
(
"// insert session:
"
+
sessionId
+
" storage: "
+
storageId
+
" pos: "
+
recId
+
" blockCount:
"
+
blockCount
);
if
(
storageId
>=
0
)
{
writeLogRecord
(
writer
,
s
);
}
break
;
case
'D'
:
writer
.
println
(
"// delete session:
"
+
sessionId
+
" storage:"
+
storageId
+
" pos:"
+
recId
+
" blockCount:
"
+
blockCount
);
writer
.
println
(
"// delete session:
"
+
sessionId
+
" storage: "
+
storageId
+
" pos: "
+
recId
+
" blockCount:
"
+
blockCount
);
if
(
storageId
>=
0
)
{
writeLogRecord
(
writer
,
s
);
}
break
;
default
:
writer
.
println
(
"// type?:
"
+
type
+
" session:"
+
sessionId
+
" storage:"
+
storageId
+
" pos:"
+
recId
+
" blockCount:
"
+
blockCount
);
writer
.
println
(
"// type?:
"
+
type
+
" session: "
+
sessionId
+
" storage: "
+
storageId
+
" pos: "
+
recId
+
" blockCount:
"
+
blockCount
);
break
;
}
}
...
...
@@ -582,7 +584,7 @@ public class Recover extends Tool implements DataHandler {
if
((
i
%
8
)
==
0
)
{
writer
.
print
(
"// "
);
}
writer
.
print
(
" "
+
Long
.
toString
(
i
*
8
)
+
":"
);
writer
.
print
(
" "
+
Long
.
toString
(
i
*
8
)
+
":
"
);
for
(
int
j
=
0
;
j
<
8
;
j
++)
{
writer
.
print
(((
x
&
1
)
==
1
)
?
"1"
:
"0"
);
x
>>>=
1
;
...
...
@@ -596,7 +598,7 @@ public class Recover extends Tool implements DataHandler {
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
int
storageId
=
in
.
readInt
();
if
(
storageId
!=
-
1
)
{
writer
.
println
(
"// pos:
"
+
(
i
*
DiskFile
.
BLOCKS_PER_PAGE
)
+
" storage:
"
+
storageId
);
writer
.
println
(
"// pos:
"
+
(
i
*
DiskFile
.
BLOCKS_PER_PAGE
)
+
" storage:
"
+
storageId
);
}
}
while
(
true
)
{
...
...
@@ -605,7 +607,7 @@ public class Recover extends Tool implements DataHandler {
break
;
}
int
recordCount
=
in
.
readInt
();
writer
.
println
(
"// storage:
"
+
s
+
" recordCount:
"
+
recordCount
);
writer
.
println
(
"// storage:
"
+
s
+
" recordCount:
"
+
recordCount
);
}
}
catch
(
Throwable
e
)
{
writeError
(
writer
,
e
);
...
...
@@ -700,7 +702,7 @@ public class Recover extends Tool implements DataHandler {
data
=
"root ["
+
rootPos
+
"]"
;
break
;
}
writer
.
println
(
"// ["
+
block
+
"] page:
"
+
page
+
" blocks:"
+
blockCount
+
" storage:
"
+
storageId
+
" "
+
data
);
writer
.
println
(
"// ["
+
block
+
"] page:
"
+
page
+
" blocks: "
+
blockCount
+
" storage:
"
+
storageId
+
" "
+
data
);
}
writer
.
close
();
}
catch
(
Throwable
e
)
{
...
...
@@ -735,26 +737,47 @@ public class Recover extends Tool implements DataHandler {
int
pageSize
=
s
.
readInt
();
int
writeVersion
=
s
.
readByte
();
int
readVersion
=
s
.
readByte
();
int
systemTableRoot
=
s
.
readInt
();
int
freeListHead
=
s
.
readInt
();
int
logHead
=
s
.
readInt
();
writer
.
println
(
"-- pageSize "
+
pageSize
);
writer
.
println
(
"-- writeVersion: "
+
writeVersion
);
writer
.
println
(
"-- readVersion: "
+
readVersion
);
writer
.
println
(
"-- systemTableRoot: "
+
systemTableRoot
);
writer
.
println
(
"-- freeListHead: "
+
freeListHead
);
writer
.
println
(
"-- logHead: "
+
logHead
);
writer
.
println
(
"-- pageSize: "
+
pageSize
+
" writeVersion: "
+
writeVersion
+
" readVersion: "
+
readVersion
);
if
(
pageSize
<
PageStore
.
PAGE_SIZE_MIN
||
pageSize
>
PageStore
.
PAGE_SIZE_MAX
)
{
pageSize
=
PageStore
.
PAGE_SIZE_DEFAULT
;
// use default values for other settings as well
systemTableRoot
=
1
;
freeListHead
=
2
;
logHead
=
3
;
writer
.
println
(
"-- ERROR: page size; using "
+
pageSize
);
}
int
pageCount
=
(
int
)
(
length
/
pageSize
);
blockCount
=
1
;
for
(
long
page
=
1
;
page
<
pageCount
;
page
++)
{
s
=
DataPage
.
create
(
this
,
pageSize
);
int
logFirstTrunkPage
=
0
,
logFirstDataPage
=
0
;
for
(
int
i
=
1
;;
i
++)
{
if
(
i
==
3
)
{
break
;
}
s
.
reset
();
store
.
seek
(
i
*
pageSize
);
store
.
readFully
(
s
.
getBytes
(),
0
,
pageSize
);
long
writeCounter
=
s
.
readLong
();
int
firstTrunkPage
=
s
.
readInt
();
int
firstDataPage
=
s
.
readInt
();
CRC32
crc
=
new
CRC32
();
crc
.
update
(
s
.
getBytes
(),
0
,
s
.
length
());
long
expected
=
crc
.
getValue
();
long
got
=
s
.
readLong
();
if
(
expected
==
got
)
{
if
(
logFirstTrunkPage
==
0
)
{
logFirstTrunkPage
=
firstTrunkPage
;
logFirstDataPage
=
firstDataPage
;
}
}
writer
.
println
(
"-- head "
+
i
+
": writeCounter: "
+
writeCounter
+
" trunk: "
+
firstTrunkPage
+
"/"
+
firstDataPage
+
" crc expected "
+
expected
+
" got "
+
got
+
" ("
+
(
expected
==
got
?
"ok"
:
"different"
)
+
")"
);
}
writer
.
println
(
"-- firstTrunkPage: "
+
logFirstTrunkPage
+
" firstDataPage: "
+
logFirstDataPage
);
s
=
DataPage
.
create
(
this
,
pageSize
);
for
(
long
page
=
3
;
page
<
pageCount
;
page
++)
{
s
=
DataPage
.
create
(
this
,
pageSize
);
store
.
seek
(
page
*
pageSize
);
store
.
readFully
(
s
.
getBytes
(),
0
,
pageSize
);
...
...
@@ -762,9 +785,8 @@ public class Recover extends Tool implements DataHandler {
int
type
=
s
.
readByte
();
switch
(
type
)
{
case
Page
.
TYPE_EMPTY
:
// writer.println("-- page " + page + ": empty");
if
(
parentPageId
!=
0
)
{
writer
.
println
(
"-- ERROR
parent:
"
+
parentPageId
);
writer
.
println
(
"-- ERROR
empty page with parent:
"
+
parentPageId
);
}
continue
;
}
...
...
@@ -783,9 +805,15 @@ public class Recover extends Tool implements DataHandler {
break
;
case
Page
.
TYPE_BTREE_NODE
:
writer
.
println
(
"-- page "
+
page
+
": btree node"
+
(
last
?
"(last)"
:
""
));
if
(
trace
)
{
dumpPageBtreeNode
(
store
,
pageSize
,
writer
,
s
,
last
,
page
);
}
break
;
case
Page
.
TYPE_BTREE_LEAF
:
writer
.
println
(
"-- page "
+
page
+
": btree leaf "
+
(
last
?
"(last)"
:
""
));
if
(
trace
)
{
dumpPageBtreeLeaf
(
store
,
pageSize
,
writer
,
s
,
last
,
page
);
}
break
;
case
Page
.
TYPE_FREE_LIST
:
writer
.
println
(
"-- page "
+
page
+
": free list "
+
(
last
?
"(last)"
:
""
));
...
...
@@ -802,9 +830,7 @@ public class Recover extends Tool implements DataHandler {
}
}
writeSchema
(
writer
);
// for (int i = 0; i < PageStore.LOG_COUNT; i++) {
// dumpPageLogStream(writer, store, logHead + i, pageSize);
// }
dumpPageLogStream
(
writer
,
store
,
logFirstTrunkPage
,
logFirstDataPage
,
pageSize
);
writer
.
close
();
}
catch
(
Throwable
e
)
{
writeError
(
writer
,
e
);
...
...
@@ -814,14 +840,11 @@ public class Recover extends Tool implements DataHandler {
}
}
private
void
dumpPageLogStream
(
PrintWriter
writer
,
FileStore
store
,
int
log
Head
,
int
pageSize
)
throws
IOException
,
SQLException
{
private
void
dumpPageLogStream
(
PrintWriter
writer
,
FileStore
store
,
int
log
FirstTrunkPage
,
int
logFirstDataPage
,
int
pageSize
)
throws
IOException
,
SQLException
{
DataPage
s
=
DataPage
.
create
(
this
,
pageSize
);
DataInputStream
in
=
new
DataInputStream
(
new
PageInputStream
(
writer
,
this
,
store
,
logHead
,
pageSize
,
0
,
Page
.
TYPE_STREAM_TRUNK
)
new
PageInputStream
(
writer
,
this
,
store
,
logFirstTrunkPage
,
logFirstDataPage
,
pageSize
)
);
int
logId
=
in
.
readInt
();
writer
.
println
(
"-- log "
+
logId
);
while
(
true
)
{
int
x
=
in
.
read
();
if
(
x
<
0
)
{
...
...
@@ -864,24 +887,24 @@ public class Recover extends Tool implements DataHandler {
static
class
PageInputStream
extends
InputStream
{
private
final
PrintWriter
writer
;
private
final
int
type
;
private
final
FileStore
store
;
private
final
DataPage
page
;
private
final
int
pageSize
;
private
int
parentPage
;
private
int
nextPage
;
private
int
trunkPage
;
private
int
dataPage
;
private
IntArray
dataPages
=
new
IntArray
();
private
boolean
endOfFile
;
private
int
remaining
;
public
PageInputStream
(
PrintWriter
writer
,
DataHandler
handler
,
FileStore
store
,
int
first
Page
,
int
pageSize
,
int
parent
,
int
typ
e
)
{
FileStore
store
,
int
first
TrunkPage
,
int
firstDataPage
,
int
pageSiz
e
)
{
this
.
writer
=
writer
;
this
.
store
=
store
;
this
.
pageSize
=
pageSize
;
this
.
type
=
type
;
this
.
parentPage
=
parent
;
nextPage
=
firstPage
;
this
.
trunkPage
=
firstTrunkPage
;
this
.
dataPage
=
firstDataPage
;
page
=
DataPage
.
create
(
handler
,
pageSize
);
}
public
int
read
()
throws
IOException
{
...
...
@@ -926,45 +949,103 @@ public class Recover extends Tool implements DataHandler {
if
(
remaining
>
0
||
endOfFile
)
{
return
;
}
if
(
nextPage
==
0
)
{
try
{
if
(
dataPages
.
size
()
==
0
)
{
if
(
trunkPage
==
0
)
{
endOfFile
=
true
;
return
;
}
store
.
seek
((
long
)
trunkPage
*
pageSize
);
store
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
page
.
reset
();
page
.
readInt
();
int
t
=
page
.
readByte
();
if
(
t
!=
Page
.
TYPE_STREAM_TRUNK
)
{
writer
.
println
(
"-- eof page: "
+
trunkPage
+
" type: "
+
t
+
" expected type: "
+
Page
.
TYPE_STREAM_TRUNK
);
endOfFile
=
true
;
return
;
}
trunkPage
=
page
.
readInt
();
int
pageCount
=
page
.
readInt
();
for
(
int
i
=
0
;
i
<
pageCount
;
i
++)
{
int
d
=
page
.
readInt
();
if
(
dataPage
!=
0
)
{
if
(
d
==
dataPage
)
{
dataPage
=
0
;
}
else
{
// ignore the pages before the starting data page
continue
;
}
}
dataPages
.
add
(
d
);
}
}
page
.
reset
();
try
{
int
nextPage
=
dataPages
.
get
(
0
);
dataPages
.
remove
(
0
);
store
.
seek
((
long
)
nextPage
*
pageSize
);
store
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
page
.
reset
();
int
p
=
page
.
readInt
();
int
t
=
page
.
readByte
();
boolean
last
=
(
t
&
Page
.
FLAG_LAST
)
!=
0
;
t
&=
~
Page
.
FLAG_LAST
;
if
(
type
!=
t
||
p
!=
parentPage
)
{
writer
.
println
(
"-- ERROR page:"
+
nextPage
+
" type:"
+
t
+
" parent:"
+
p
+
" expected type:"
+
type
+
" expected parent:"
+
parentPage
);
}
parentPage
=
nextPage
;
if
(
last
)
{
nextPage
=
0
;
remaining
=
page
.
readInt
();
}
else
{
nextPage
=
page
.
readInt
();
remaining
=
pageSize
-
page
.
length
();
if
(
t
!=
Page
.
TYPE_STREAM_DATA
)
{
writer
.
println
(
"-- eof page: "
+
nextPage
+
" type: "
+
t
+
" parent: "
+
p
+
" expected type: "
+
Page
.
TYPE_STREAM_DATA
);
endOfFile
=
true
;
return
;
}
remaining
=
page
.
readInt
();
}
catch
(
SQLException
e
)
{
throw
Message
.
convertToIOException
(
e
);
}
}
}
private
void
dumpPageBtreeNode
(
FileStore
store
,
int
pageSize
,
PrintWriter
writer
,
DataPage
s
,
boolean
last
,
long
pageId
)
{
int
entryCount
=
s
.
readShortInt
();
int
rowCount
=
s
.
readInt
();
int
[]
children
=
new
int
[
entryCount
+
1
];
int
[]
offsets
=
new
int
[
entryCount
];
children
[
entryCount
]
=
s
.
readInt
();
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
children
[
i
]
=
s
.
readInt
();
offsets
[
i
]
=
s
.
readInt
();
}
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
int
off
=
offsets
[
i
];
s
.
setPos
(
off
);
int
pos
=
s
.
readInt
();
Value
data
;
try
{
data
=
s
.
readValue
();
}
catch
(
Throwable
e
)
{
writeDataError
(
writer
,
"exception "
+
e
,
s
.
getBytes
(),
blockCount
);
continue
;
}
writer
.
println
(
"-- ["
+
i
+
"] child: "
+
children
[
i
]
+
" pos: "
+
pos
+
" data: "
+
data
);
}
writer
.
println
(
"-- ["
+
entryCount
+
"] child: "
+
children
[
entryCount
]
+
" rowCount: "
+
rowCount
);
}
private
void
dumpPageLog
(
PrintWriter
writer
,
DataPage
s
,
boolean
last
)
{
if
(
last
)
{
int
size
=
s
.
readInt
();
writer
.
println
(
"-- size:"
+
size
);
}
else
{
int
next
=
s
.
readInt
();
writer
.
println
(
"-- next:"
+
next
);
private
void
dumpPageBtreeLeaf
(
FileStore
store
,
int
pageSize
,
PrintWriter
writer
,
DataPage
s
,
boolean
last
,
long
pageId
)
{
s
.
readInt
();
int
entryCount
=
s
.
readShortInt
();
int
[]
offsets
=
new
int
[
entryCount
];
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
offsets
[
i
]
=
s
.
readShortInt
();
}
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
int
off
=
offsets
[
i
];
s
.
setPos
(
off
);
int
pos
=
s
.
readInt
();
Value
data
;
try
{
data
=
s
.
readValue
();
}
catch
(
Throwable
e
)
{
writeDataError
(
writer
,
"exception "
+
e
,
s
.
getBytes
(),
blockCount
);
continue
;
}
writer
.
println
(
"-- ["
+
i
+
"] pos: "
+
pos
+
" data: "
+
data
);
}
}
...
...
@@ -991,7 +1072,7 @@ public class Recover extends Tool implements DataHandler {
int
type
=
s2
.
readByte
();
if
(
type
==
(
Page
.
TYPE_DATA_OVERFLOW
|
Page
.
FLAG_LAST
))
{
int
size
=
s2
.
readShortInt
();
writer
.
println
(
"-- chain:
"
+
next
+
" type:"
+
type
+
" size:
"
+
size
);
writer
.
println
(
"-- chain:
"
+
next
+
" type: "
+
type
+
" size:
"
+
size
);
s
.
write
(
s2
.
getBytes
(),
7
,
size
);
break
;
}
else
if
(
type
==
Page
.
TYPE_DATA_OVERFLOW
)
{
...
...
@@ -1001,10 +1082,10 @@ public class Recover extends Tool implements DataHandler {
break
;
}
int
size
=
pageSize
-
9
;
writer
.
println
(
"-- chain:
"
+
next
+
" type:"
+
type
+
" size:"
+
size
+
" next:
"
+
next
);
writer
.
println
(
"-- chain:
"
+
next
+
" type: "
+
type
+
" size: "
+
size
+
" next:
"
+
next
);
s
.
write
(
s2
.
getBytes
(),
9
,
size
);
}
else
{
writeDataError
(
writer
,
"type:"
+
type
,
s2
.
getBytes
(),
1
);
writeDataError
(
writer
,
"type:
"
+
type
,
s2
.
getBytes
(),
1
);
break
;
}
}
...
...
@@ -1012,7 +1093,7 @@ public class Recover extends Tool implements DataHandler {
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
int
key
=
keys
[
i
];
int
off
=
offsets
[
i
];
writer
.
println
(
"-- ["
+
i
+
"] storage:
"
+
storageId
+
" key:"
+
key
+
" off:
"
+
off
);
writer
.
println
(
"-- ["
+
i
+
"] storage:
"
+
storageId
+
" key: "
+
key
+
" off:
"
+
off
);
s
.
setPos
(
off
);
Value
[]
data
=
createRecord
(
writer
,
s
);
if
(
data
!=
null
)
{
...
...
h2/src/test/org/h2/test/TestAll.java
浏览文件 @
796e386a
...
...
@@ -289,7 +289,7 @@ java org.h2.test.TestAll timer
// 2009-05-15: 25 tests fail with page store (first loop)
// 2009-05-18: 18 tests fail with page store (first loop)
// 2009-05-30: 15 tests fail with page store (first loop)
// 2009-06-1
6: 13
tests fail with page store (first loop)
// 2009-06-1
9: 10
tests fail with page store (first loop)
// System.setProperty("h2.pageStore", "true");
/*
...
...
h2/src/test/org/h2/test/db/TestPowerOff.java
浏览文件 @
796e386a
...
...
@@ -178,6 +178,7 @@ public class TestPowerOff extends TestBase {
}
catch
(
SQLException
e
)
{
assertKnownException
(
e
);
}
if
(!
SysProperties
.
PAGE_STORE
)
{
boolean
deleted
=
false
;
for
(
String
fileName
:
FileLister
.
getDatabaseFiles
(
dir
,
dbName
,
false
))
{
if
(
fileName
.
endsWith
(
Constants
.
SUFFIX_INDEX_FILE
))
{
...
...
@@ -186,6 +187,7 @@ public class TestPowerOff extends TestBase {
}
}
assertTrue
(
deleted
);
}
conn
=
getConnection
(
url
);
conn
.
close
();
}
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论