Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
06214924
提交
06214924
authored
9月 20, 2009
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Page store bugfixes
上级
6dc3587d
隐藏空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
89 行增加
和
39 行删除
+89
-39
roadmap.html
h2/src/docsrc/html/roadmap.html
+1
-0
PageDataLeaf.java
h2/src/main/org/h2/index/PageDataLeaf.java
+27
-3
PageDataNode.java
h2/src/main/org/h2/index/PageDataNode.java
+3
-3
PageStore.java
h2/src/main/org/h2/store/PageStore.java
+9
-5
Recover.java
h2/src/main/org/h2/tools/Recover.java
+49
-28
没有找到文件。
h2/src/docsrc/html/roadmap.html
浏览文件 @
06214924
...
@@ -452,6 +452,7 @@ See also <a href="build.html#providing_patches">Providing Patches</a>.
...
@@ -452,6 +452,7 @@ See also <a href="build.html#providing_patches">Providing Patches</a>.
</li><li>
Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}).
</li><li>
Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}).
See PostgreSQL.
See PostgreSQL.
</li><li>
Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true).
</li><li>
Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true).
</li><li>
Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2).
</li></ul>
</li></ul>
<h2>
Not Planned
</h2>
<h2>
Not Planned
</h2>
...
...
h2/src/main/org/h2/index/PageDataLeaf.java
浏览文件 @
06214924
...
@@ -21,9 +21,9 @@ import org.h2.store.PageStore;
...
@@ -21,9 +21,9 @@ import org.h2.store.PageStore;
/**
/**
* A leaf page that contains data of one or multiple rows. Format:
* A leaf page that contains data of one or multiple rows. Format:
* <ul>
* <ul>
* <li>page type: byte</li>
* <li>page type: byte
(0)
</li>
* <li>checksum: short</li>
* <li>checksum: short
(1-2)
</li>
* <li>parent page id (0 for root): int</li>
* <li>parent page id (0 for root): int
(3-6)
</li>
* <li>table id: varInt</li>
* <li>table id: varInt</li>
* <li>column count: varInt</li>
* <li>column count: varInt</li>
* <li>entry count: short</li>
* <li>entry count: short</li>
...
@@ -33,6 +33,11 @@ import org.h2.store.PageStore;
...
@@ -33,6 +33,11 @@ import org.h2.store.PageStore;
* </ul>
* </ul>
*/
*/
public
class
PageDataLeaf
extends
PageData
{
public
class
PageDataLeaf
extends
PageData
{
/**
* The start of the data in the last overflow page.
*/
static
final
int
START_PARENT
=
3
;
/**
/**
* The row offsets.
* The row offsets.
...
@@ -217,6 +222,11 @@ public class PageDataLeaf extends PageData {
...
@@ -217,6 +222,11 @@ public class PageDataLeaf extends PageData {
all
.
checkCapacity
(
data
.
length
());
all
.
checkCapacity
(
data
.
length
());
all
.
write
(
data
.
getBytes
(),
0
,
data
.
length
());
all
.
write
(
data
.
getBytes
(),
0
,
data
.
length
());
data
.
truncate
(
index
.
getPageStore
().
getPageSize
());
data
.
truncate
(
index
.
getPageStore
().
getPageSize
());
// write the page now to disk, to avoid problems
// when the page needs to be written before the overflow
// is written to disk (the cache first removes elements,
// moves them in a write queue, and then writes them)
write
(
null
);
do
{
do
{
int
type
,
size
,
next
;
int
type
,
size
,
next
;
if
(
remaining
<=
pageSize
-
PageDataOverflow
.
START_LAST
)
{
if
(
remaining
<=
pageSize
-
PageDataOverflow
.
START_LAST
)
{
...
@@ -497,5 +507,19 @@ public class PageDataLeaf extends PageData {
...
@@ -497,5 +507,19 @@ public class PageDataLeaf extends PageData {
public
int
getMemorySize
()
{
public
int
getMemorySize
()
{
return
index
.
getMemorySizePerPage
();
return
index
.
getMemorySizePerPage
();
}
}
void
setParentPageId
(
int
id
)
{
// never reset the written flag not only for speed, but also
// because if would cause the page to be written again if
// it contains overflow, which would cause the data to be read,
// and that's not possible because the overflow page may be
// not in the cache but in the write queue already
if
(
written
)
{
data
.
setInt
(
START_PARENT
,
id
);
this
.
parentPageId
=
id
;
}
else
{
super
.
setParentPageId
(
id
);
}
}
}
}
h2/src/main/org/h2/index/PageDataNode.java
浏览文件 @
06214924
...
@@ -21,9 +21,9 @@ import org.h2.util.MemoryUtils;
...
@@ -21,9 +21,9 @@ import org.h2.util.MemoryUtils;
/**
/**
* A leaf page that contains data of one or multiple rows. Format:
* A leaf page that contains data of one or multiple rows. Format:
* <ul>
* <ul>
* <li>page type: byte</li>
* <li>page type: byte
(0)
</li>
* <li>checksum: short</li>
* <li>checksum: short
(1-2)
</li>
* <li>parent page id (0 for root): int</li>
* <li>parent page id (0 for root): int
(3-6)
</li>
* <li>table id: varInt</li>
* <li>table id: varInt</li>
* <li>count of all children (-1 if not known): int</li>
* <li>count of all children (-1 if not known): int</li>
* <li>entry count: short</li>
* <li>entry count: short</li>
...
...
h2/src/main/org/h2/store/PageStore.java
浏览文件 @
06214924
...
@@ -967,6 +967,9 @@ public class PageStore implements CacheWriter {
...
@@ -967,6 +967,9 @@ public class PageStore implements CacheWriter {
log
.
recover
(
PageLog
.
RECOVERY_STAGE_UNDO
);
log
.
recover
(
PageLog
.
RECOVERY_STAGE_UNDO
);
if
(
reservedPages
!=
null
)
{
if
(
reservedPages
!=
null
)
{
for
(
int
r
:
reservedPages
.
keySet
())
{
for
(
int
r
:
reservedPages
.
keySet
())
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"reserve "
+
r
);
}
allocatePage
(
r
);
allocatePage
(
r
);
}
}
}
}
...
@@ -1164,8 +1167,8 @@ public class PageStore implements CacheWriter {
...
@@ -1164,8 +1167,8 @@ public class PageStore implements CacheWriter {
private
void
removeMeta
(
int
logPos
,
Row
row
)
throws
SQLException
{
private
void
removeMeta
(
int
logPos
,
Row
row
)
throws
SQLException
{
int
id
=
row
.
getValue
(
0
).
getInt
();
int
id
=
row
.
getValue
(
0
).
getInt
();
Index
index
=
metaObjects
.
get
(
id
);
PageIndex
index
=
(
PageIndex
)
metaObjects
.
get
(
id
);
int
headPos
=
index
.
getHeadPos
();
int
rootPageId
=
index
.
getRootPageId
();
index
.
getTable
().
removeIndex
(
index
);
index
.
getTable
().
removeIndex
(
index
);
if
(
index
instanceof
PageBtreeIndex
)
{
if
(
index
instanceof
PageBtreeIndex
)
{
if
(
index
.
isTemporary
())
{
if
(
index
.
isTemporary
())
{
...
@@ -1178,11 +1181,11 @@ public class PageStore implements CacheWriter {
...
@@ -1178,11 +1181,11 @@ public class PageStore implements CacheWriter {
}
}
index
.
remove
(
systemSession
);
index
.
remove
(
systemSession
);
metaObjects
.
remove
(
id
);
metaObjects
.
remove
(
id
);
if
(
reservedPages
!=
null
&&
reservedPages
.
containsKey
(
headPos
))
{
if
(
reservedPages
!=
null
&&
reservedPages
.
containsKey
(
rootPageId
))
{
// re-allocate the page if it is used later on again
// re-allocate the page if it is used later on again
int
latestPos
=
reservedPages
.
get
(
headPos
);
int
latestPos
=
reservedPages
.
get
(
rootPageId
);
if
(
latestPos
>
logPos
)
{
if
(
latestPos
>
logPos
)
{
allocatePage
(
headPos
);
allocatePage
(
rootPageId
);
}
}
}
}
}
}
...
@@ -1201,6 +1204,7 @@ public class PageStore implements CacheWriter {
...
@@ -1201,6 +1204,7 @@ public class PageStore implements CacheWriter {
trace
.
debug
(
"addMeta id="
+
id
+
" type="
+
type
+
" parent="
+
parent
+
" columns="
+
columnList
);
trace
.
debug
(
"addMeta id="
+
id
+
" type="
+
type
+
" parent="
+
parent
+
" columns="
+
columnList
);
}
}
if
(
redo
&&
rootPageId
!=
0
)
{
if
(
redo
&&
rootPageId
!=
0
)
{
// ensure the page is empty, but not used by regular data
writePage
(
rootPageId
,
createData
());
writePage
(
rootPageId
,
createData
());
allocatePage
(
rootPageId
);
allocatePage
(
rootPageId
);
}
}
...
...
h2/src/main/org/h2/tools/Recover.java
浏览文件 @
06214924
...
@@ -973,9 +973,16 @@ public class Recover extends Tool implements DataHandler {
...
@@ -973,9 +973,16 @@ public class Recover extends Tool implements DataHandler {
if
(
x
<
0
)
{
if
(
x
<
0
)
{
break
;
break
;
}
}
if
(
x
==
PageLog
.
UNDO
)
{
if
(
x
==
PageLog
.
NOOP
)
{
// ignore
}
else
if
(
x
==
PageLog
.
UNDO
)
{
int
pageId
=
in
.
readVarInt
();
int
pageId
=
in
.
readVarInt
();
in
.
readFully
(
new
byte
[
pageSize
],
0
,
pageSize
);
int
size
=
in
.
readVarInt
();
if
(
size
==
0
)
{
in
.
readFully
(
new
byte
[
pageSize
],
0
,
pageSize
);
}
else
{
in
.
readFully
(
new
byte
[
size
],
0
,
size
);
}
writer
.
println
(
"-- undo page "
+
pageId
);
writer
.
println
(
"-- undo page "
+
pageId
);
}
else
if
(
x
==
PageLog
.
ADD
)
{
}
else
if
(
x
==
PageLog
.
ADD
)
{
int
sessionId
=
in
.
readVarInt
();
int
sessionId
=
in
.
readVarInt
();
...
@@ -1019,7 +1026,7 @@ public class Recover extends Tool implements DataHandler {
...
@@ -1019,7 +1026,7 @@ public class Recover extends Tool implements DataHandler {
}
}
writer
.
println
(
buff
);
writer
.
println
(
buff
);
}
else
{
}
else
{
writer
.
println
(
"--
end
"
+
x
);
writer
.
println
(
"--
ERROR: unknown operation
"
+
x
);
break
;
break
;
}
}
}
}
...
@@ -1101,7 +1108,7 @@ public class Recover extends Tool implements DataHandler {
...
@@ -1101,7 +1108,7 @@ public class Recover extends Tool implements DataHandler {
return
;
return
;
}
}
try
{
try
{
if
(
dataPages
.
size
()
==
0
)
{
while
(
dataPages
.
size
()
==
0
)
{
if
(
trunkPage
==
0
)
{
if
(
trunkPage
==
0
)
{
endOfFile
=
true
;
endOfFile
=
true
;
return
;
return
;
...
@@ -1109,19 +1116,25 @@ public class Recover extends Tool implements DataHandler {
...
@@ -1109,19 +1116,25 @@ public class Recover extends Tool implements DataHandler {
store
.
seek
((
long
)
trunkPage
*
pageSize
);
store
.
seek
((
long
)
trunkPage
*
pageSize
);
store
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
store
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
page
.
reset
();
page
.
reset
();
if
(!
PageStore
.
checksumTest
(
page
.
getBytes
(),
trunkPage
,
pageSize
))
{
writer
.
println
(
"-- ERROR: checksum mismatch page: "
+
trunkPage
);
endOfFile
=
true
;
return
;
}
int
t
=
page
.
readByte
();
int
t
=
page
.
readByte
();
page
.
readInt
();
page
.
read
Short
Int
();
if
(
t
!=
Page
.
TYPE_STREAM_TRUNK
)
{
if
(
t
!=
Page
.
TYPE_STREAM_TRUNK
)
{
writer
.
println
(
"-- eof page: "
+
trunkPage
+
" type: "
+
t
+
" expected type: "
+
Page
.
TYPE_STREAM_TRUNK
);
writer
.
println
(
"-- eof page: "
+
trunkPage
+
" type: "
+
t
+
" expected type: "
+
Page
.
TYPE_STREAM_TRUNK
);
endOfFile
=
true
;
endOfFile
=
true
;
return
;
return
;
}
}
trunkPage
=
page
.
readInt
();
page
.
readInt
();
int
key
=
page
.
readInt
();
int
key
=
page
.
readInt
();
logKey
++;
logKey
++;
if
(
key
!=
logKey
)
{
if
(
key
!=
logKey
)
{
writer
.
println
(
"-- eof page: "
+
trunkPage
+
" type: "
+
t
+
" expected key: "
+
logKey
+
" got: "
+
key
);
writer
.
println
(
"-- eof page: "
+
trunkPage
+
" type: "
+
t
+
" expected key: "
+
logKey
+
" got: "
+
key
);
}
}
trunkPage
=
page
.
readInt
();
int
pageCount
=
page
.
readShortInt
();
int
pageCount
=
page
.
readShortInt
();
for
(
int
i
=
0
;
i
<
pageCount
;
i
++)
{
for
(
int
i
=
0
;
i
<
pageCount
;
i
++)
{
int
d
=
page
.
readInt
();
int
d
=
page
.
readInt
();
...
@@ -1136,27 +1149,35 @@ public class Recover extends Tool implements DataHandler {
...
@@ -1136,27 +1149,35 @@ public class Recover extends Tool implements DataHandler {
dataPages
.
add
(
d
);
dataPages
.
add
(
d
);
}
}
}
}
page
.
reset
();
if
(
dataPages
.
size
()
>
0
)
{
int
nextPage
=
dataPages
.
get
(
0
);
page
.
reset
();
dataPages
.
remove
(
0
);
int
nextPage
=
dataPages
.
get
(
0
);
store
.
seek
((
long
)
nextPage
*
pageSize
);
dataPages
.
remove
(
0
);
store
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
store
.
seek
((
long
)
nextPage
*
pageSize
);
page
.
reset
();
store
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
int
t
=
page
.
readByte
();
page
.
reset
();
int
p
=
page
.
readInt
();
if
(!
PageStore
.
checksumTest
(
page
.
getBytes
(),
nextPage
,
pageSize
))
{
int
k
=
page
.
readInt
();
writer
.
println
(
"-- ERROR: checksum mismatch page: "
+
nextPage
);
if
(
t
!=
Page
.
TYPE_STREAM_DATA
)
{
endOfFile
=
true
;
writer
.
println
(
"-- eof page: "
+
nextPage
+
" type: "
+
t
+
" parent: "
+
p
+
return
;
" expected type: "
+
Page
.
TYPE_STREAM_DATA
);
}
endOfFile
=
true
;
int
t
=
page
.
readByte
();
return
;
page
.
readShortInt
();
}
else
if
(
k
!=
logKey
)
{
int
p
=
page
.
readInt
();
writer
.
println
(
"-- eof page: "
+
nextPage
+
" type: "
+
t
+
" parent: "
+
p
+
int
k
=
page
.
readInt
();
" expected key: "
+
logKey
+
" got: "
+
k
);
if
(
t
!=
Page
.
TYPE_STREAM_DATA
)
{
endOfFile
=
true
;
writer
.
println
(
"-- eof page: "
+
nextPage
+
" type: "
+
t
+
" parent: "
+
p
+
return
;
" expected type: "
+
Page
.
TYPE_STREAM_DATA
);
endOfFile
=
true
;
return
;
}
else
if
(
k
!=
logKey
)
{
writer
.
println
(
"-- eof page: "
+
nextPage
+
" type: "
+
t
+
" parent: "
+
p
+
" expected key: "
+
logKey
+
" got: "
+
k
);
endOfFile
=
true
;
return
;
}
remaining
=
pageSize
-
page
.
length
();
}
}
remaining
=
page
.
readInt
();
}
catch
(
SQLException
e
)
{
}
catch
(
SQLException
e
)
{
throw
Message
.
convertToIOException
(
e
);
throw
Message
.
convertToIOException
(
e
);
}
}
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论