Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
fbe949a4
提交
fbe949a4
authored
3月 29, 2011
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Improved performance.
上级
6086bebb
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
249 行增加
和
289 行删除
+249
-289
PageStore.java
h2/src/main/org/h2/store/PageStore.java
+249
-289
没有找到文件。
h2/src/main/org/h2/store/PageStore.java
浏览文件 @
fbe949a4
...
@@ -243,18 +243,16 @@ public class PageStore implements CacheWriter {
...
@@ -243,18 +243,16 @@ public class PageStore implements CacheWriter {
* @param out the output stream
* @param out the output stream
* @return the new position, or -1 if there is no more data to copy
* @return the new position, or -1 if there is no more data to copy
*/
*/
public
int
copyDirect
(
int
pageId
,
OutputStream
out
)
throws
IOException
{
public
synchronized
int
copyDirect
(
int
pageId
,
OutputStream
out
)
throws
IOException
{
synchronized
(
database
)
{
byte
[]
buffer
=
new
byte
[
pageSize
];
byte
[]
buffer
=
new
byte
[
pageSize
];
if
(
pageId
>=
pageCount
)
{
if
(
pageId
>=
pageCount
)
{
return
-
1
;
return
-
1
;
}
file
.
seek
((
long
)
pageId
<<
pageSizeShift
);
file
.
readFullyDirect
(
buffer
,
0
,
pageSize
);
readCount
++;
out
.
write
(
buffer
,
0
,
pageSize
);
return
pageId
+
1
;
}
}
file
.
seek
((
long
)
pageId
<<
pageSizeShift
);
file
.
readFullyDirect
(
buffer
,
0
,
pageSize
);
readCount
++;
out
.
write
(
buffer
,
0
,
pageSize
);
return
pageId
+
1
;
}
}
/**
/**
...
@@ -395,50 +393,48 @@ public class PageStore implements CacheWriter {
...
@@ -395,50 +393,48 @@ public class PageStore implements CacheWriter {
/**
/**
* Flush all pending changes to disk, and switch the new transaction log.
* Flush all pending changes to disk, and switch the new transaction log.
*/
*/
public
void
checkpoint
()
{
public
synchronized
void
checkpoint
()
{
trace
.
debug
(
"checkpoint"
);
trace
.
debug
(
"checkpoint"
);
if
(
log
==
null
||
database
.
isReadOnly
())
{
if
(
log
==
null
||
database
.
isReadOnly
())
{
// the file was never fully opened
// the file was never fully opened
return
;
return
;
}
}
synchronized
(
database
)
{
database
.
checkPowerOff
();
database
.
checkPowerOff
();
writeIndexRowCounts
();
writeIndexRowCounts
();
log
.
checkpoint
();
log
.
checkpoint
();
writeBack
();
writeBack
();
int
firstUncommittedSection
=
getFirstUncommittedSection
();
int
firstUncommittedSection
=
getFirstUncommittedSection
();
log
.
removeUntil
(
firstUncommittedSection
);
log
.
removeUntil
(
firstUncommittedSection
);
// write back the free list
// write back the free list
writeBack
();
writeBack
();
// ensure the free list is backed up again
// ensure the free list is backed up again
log
.
checkpoint
();
log
.
checkpoint
();
if
(
trace
.
isDebugEnabled
())
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"writeFree"
);
trace
.
debug
(
"writeFree"
);
}
}
byte
[]
test
=
new
byte
[
16
];
byte
[]
test
=
new
byte
[
16
];
byte
[]
empty
=
new
byte
[
pageSize
];
byte
[]
empty
=
new
byte
[
pageSize
];
for
(
int
i
=
PAGE_ID_FREE_LIST_ROOT
;
i
<
pageCount
;
i
++)
{
for
(
int
i
=
PAGE_ID_FREE_LIST_ROOT
;
i
<
pageCount
;
i
++)
{
if
(
isUsed
(
i
))
{
if
(
isUsed
(
i
))
{
freed
.
clear
(
i
);
freed
.
clear
(
i
);
}
else
if
(!
freed
.
get
(
i
))
{
}
else
if
(!
freed
.
get
(
i
))
{
if
(
trace
.
isDebugEnabled
())
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"free {0}"
,
i
);
trace
.
debug
(
"free {0}"
,
i
);
}
}
file
.
seek
((
long
)
i
<<
pageSizeShift
);
file
.
readFully
(
test
,
0
,
16
);
if
(
test
[
0
]
!=
0
)
{
file
.
seek
((
long
)
i
<<
pageSizeShift
);
file
.
seek
((
long
)
i
<<
pageSizeShift
);
file
.
readFully
(
test
,
0
,
16
);
file
.
write
(
empty
,
0
,
pageSize
);
if
(
test
[
0
]
!=
0
)
{
writeCount
++;
file
.
seek
((
long
)
i
<<
pageSizeShift
);
file
.
write
(
empty
,
0
,
pageSize
);
writeCount
++;
}
freed
.
set
(
i
);
}
}
freed
.
set
(
i
);
}
}
}
}
}
}
...
@@ -495,7 +491,7 @@ public class PageStore implements CacheWriter {
...
@@ -495,7 +491,7 @@ public class PageStore implements CacheWriter {
for
(
int
x
=
lastUsed
,
j
=
0
;
x
>
MIN_PAGE_COUNT
&&
j
<
maxMove
;
x
-=
blockSize
)
{
for
(
int
x
=
lastUsed
,
j
=
0
;
x
>
MIN_PAGE_COUNT
&&
j
<
maxMove
;
x
-=
blockSize
)
{
for
(
int
full
=
x
-
blockSize
+
1
;
full
<=
x
;
full
++)
{
for
(
int
full
=
x
-
blockSize
+
1
;
full
<=
x
;
full
++)
{
if
(
full
>
MIN_PAGE_COUNT
&&
isUsed
(
full
))
{
if
(
full
>
MIN_PAGE_COUNT
&&
isUsed
(
full
))
{
synchronized
(
database
)
{
synchronized
(
this
)
{
firstFree
=
getFirstFree
(
firstFree
);
firstFree
=
getFirstFree
(
firstFree
);
if
(
firstFree
==
-
1
||
firstFree
>=
full
)
{
if
(
firstFree
==
-
1
||
firstFree
>=
full
)
{
j
=
maxMove
;
j
=
maxMove
;
...
@@ -695,111 +691,109 @@ public class PageStore implements CacheWriter {
...
@@ -695,111 +691,109 @@ public class PageStore implements CacheWriter {
* @param pageId the page id
* @param pageId the page id
* @return the page
* @return the page
*/
*/
public
Page
getPage
(
int
pageId
)
{
public
synchronized
Page
getPage
(
int
pageId
)
{
synchronized
(
database
)
{
Page
p
=
(
Page
)
cache
.
get
(
pageId
);
Page
p
=
(
Page
)
cache
.
get
(
pageId
);
if
(
p
!=
null
)
{
if
(
p
!=
null
)
{
return
p
;
return
p
;
}
}
Data
data
=
createData
();
Data
data
=
createData
();
readPage
(
pageId
,
data
);
readPage
(
pageId
,
data
);
int
type
=
data
.
readByte
();
int
type
=
data
.
readByte
();
if
(
type
==
Page
.
TYPE_EMPTY
)
{
if
(
type
==
Page
.
TYPE_EMPTY
)
{
return
null
;
return
null
;
}
data
.
readShortInt
();
data
.
readInt
();
if
(!
checksumTest
(
data
.
getBytes
(),
pageId
,
pageSize
))
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"wrong checksum"
);
}
switch
(
type
&
~
Page
.
FLAG_LAST
)
{
case
Page
.
TYPE_FREE_LIST
:
p
=
PageFreeList
.
read
(
this
,
data
,
pageId
);
break
;
case
Page
.
TYPE_DATA_LEAF
:
{
int
indexId
=
data
.
readVarInt
();
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
if
(
idx
==
null
)
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
}
}
data
.
readShortInt
();
if
(!(
idx
instanceof
PageDataIndex
))
{
data
.
readInt
();
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a data index "
+
indexId
+
" "
+
idx
);
if
(!
checksumTest
(
data
.
getBytes
(),
pageId
,
pageSize
))
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"wrong checksum"
);
}
}
switch
(
type
&
~
Page
.
FLAG_LAST
)
{
PageDataIndex
index
=
(
PageDataIndex
)
idx
;
case
Page
.
TYPE_FREE_LIST
:
if
(
statistics
!=
null
)
{
p
=
PageFreeList
.
read
(
this
,
data
,
pageId
);
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
break
;
case
Page
.
TYPE_DATA_LEAF
:
{
int
indexId
=
data
.
readVarInt
();
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
if
(
idx
==
null
)
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
}
if
(!(
idx
instanceof
PageDataIndex
))
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a data index "
+
indexId
+
" "
+
idx
);
}
PageDataIndex
index
=
(
PageDataIndex
)
idx
;
if
(
statistics
!=
null
)
{
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
}
p
=
PageDataLeaf
.
read
(
index
,
data
,
pageId
);
break
;
}
}
case
Page
.
TYPE_DATA_NODE
:
{
p
=
PageDataLeaf
.
read
(
index
,
data
,
pageId
);
int
indexId
=
data
.
readVarInt
();
break
;
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
}
if
(
idx
==
null
)
{
case
Page
.
TYPE_DATA_NODE
:
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
int
indexId
=
data
.
readVarInt
();
}
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
if
(!(
idx
instanceof
PageDataIndex
))
{
if
(
idx
==
null
)
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a data index "
+
indexId
+
" "
+
idx
);
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
}
PageDataIndex
index
=
(
PageDataIndex
)
idx
;
if
(
statistics
!=
null
)
{
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
}
p
=
PageDataNode
.
read
(
index
,
data
,
pageId
);
break
;
}
}
case
Page
.
TYPE_DATA_OVERFLOW
:
{
if
(!(
idx
instanceof
PageDataIndex
))
{
p
=
PageDataOverflow
.
read
(
this
,
data
,
pageId
);
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a data index "
+
indexId
+
" "
+
idx
);
if
(
statistics
!=
null
)
{
statisticsIncrement
(
"overflow read"
);
}
break
;
}
}
case
Page
.
TYPE_BTREE_LEAF
:
{
PageDataIndex
index
=
(
PageDataIndex
)
idx
;
int
indexId
=
data
.
readVarInt
();
if
(
statistics
!=
null
)
{
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
if
(
idx
==
null
)
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
}
if
(!(
idx
instanceof
PageBtreeIndex
))
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a btree index "
+
indexId
+
" "
+
idx
);
}
PageBtreeIndex
index
=
(
PageBtreeIndex
)
idx
;
if
(
statistics
!=
null
)
{
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
}
p
=
PageBtreeLeaf
.
read
(
index
,
data
,
pageId
);
break
;
}
}
case
Page
.
TYPE_BTREE_NODE
:
{
p
=
PageDataNode
.
read
(
index
,
data
,
pageId
);
int
indexId
=
data
.
readVarInt
();
break
;
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
}
if
(
idx
==
null
)
{
case
Page
.
TYPE_DATA_OVERFLOW
:
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
p
=
PageDataOverflow
.
read
(
this
,
data
,
pageId
);
}
if
(
statistics
!=
null
)
{
if
(!(
idx
instanceof
PageBtreeIndex
))
{
statisticsIncrement
(
"overflow read"
);
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a btree index "
+
indexId
+
" "
+
idx
);
}
PageBtreeIndex
index
=
(
PageBtreeIndex
)
idx
;
if
(
statistics
!=
null
)
{
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
}
p
=
PageBtreeNode
.
read
(
index
,
data
,
pageId
);
break
;
}
}
case
Page
.
TYPE_STREAM_TRUNK
:
break
;
p
=
PageStreamTrunk
.
read
(
this
,
data
,
pageId
);
}
break
;
case
Page
.
TYPE_BTREE_LEAF
:
{
case
Page
.
TYPE_STREAM_DATA
:
int
indexId
=
data
.
readVarInt
();
p
=
PageStreamData
.
read
(
this
,
data
,
pageId
);
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
break
;
if
(
idx
==
null
)
{
default
:
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"page="
+
pageId
+
" type="
+
type
);
}
}
cache
.
put
(
p
);
if
(!(
idx
instanceof
PageBtreeIndex
))
{
return
p
;
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a btree index "
+
indexId
+
" "
+
idx
);
}
PageBtreeIndex
index
=
(
PageBtreeIndex
)
idx
;
if
(
statistics
!=
null
)
{
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
}
p
=
PageBtreeLeaf
.
read
(
index
,
data
,
pageId
);
break
;
}
case
Page
.
TYPE_BTREE_NODE
:
{
int
indexId
=
data
.
readVarInt
();
PageIndex
idx
=
metaObjects
.
get
(
indexId
);
if
(
idx
==
null
)
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"index not found "
+
indexId
);
}
if
(!(
idx
instanceof
PageBtreeIndex
))
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"not a btree index "
+
indexId
+
" "
+
idx
);
}
PageBtreeIndex
index
=
(
PageBtreeIndex
)
idx
;
if
(
statistics
!=
null
)
{
statisticsIncrement
(
index
.
getTable
().
getName
()
+
"."
+
index
.
getName
()
+
" read"
);
}
p
=
PageBtreeNode
.
read
(
index
,
data
,
pageId
);
break
;
}
}
case
Page
.
TYPE_STREAM_TRUNK
:
p
=
PageStreamTrunk
.
read
(
this
,
data
,
pageId
);
break
;
case
Page
.
TYPE_STREAM_DATA
:
p
=
PageStreamData
.
read
(
this
,
data
,
pageId
);
break
;
default
:
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
"page="
+
pageId
+
" type="
+
type
);
}
cache
.
put
(
p
);
return
p
;
}
}
private
int
getFirstUncommittedSection
()
{
private
int
getFirstUncommittedSection
()
{
...
@@ -937,41 +931,35 @@ public class PageStore implements CacheWriter {
...
@@ -937,41 +931,35 @@ public class PageStore implements CacheWriter {
/**
/**
* Close the file without further writing.
* Close the file without further writing.
*/
*/
public
void
close
()
{
public
synchronized
void
close
()
{
trace
.
debug
(
"close"
);
trace
.
debug
(
"close"
);
synchronized
(
database
)
{
if
(
log
!=
null
)
{
if
(
log
!=
null
)
{
log
.
close
();
log
.
close
();
log
=
null
;
log
=
null
;
}
}
if
(
file
!=
null
)
{
if
(
file
!=
null
)
{
try
{
try
{
file
.
releaseLock
();
file
.
releaseLock
();
file
.
close
();
file
.
close
();
}
finally
{
}
finally
{
file
=
null
;
file
=
null
;
}
}
}
}
}
}
}
public
void
flushLog
()
{
public
synchronized
void
flushLog
()
{
if
(
file
!=
null
)
{
if
(
file
!=
null
)
{
synchronized
(
database
)
{
log
.
flush
();
log
.
flush
();
}
}
}
}
}
/**
/**
* Flush the transaction log and sync the file.
* Flush the transaction log and sync the file.
*/
*/
public
void
sync
()
{
public
synchronized
void
sync
()
{
if
(
file
!=
null
)
{
if
(
file
!=
null
)
{
synchronized
(
database
)
{
log
.
flush
();
log
.
flush
();
file
.
sync
();
file
.
sync
();
}
}
}
}
}
...
@@ -979,15 +967,13 @@ public class PageStore implements CacheWriter {
...
@@ -979,15 +967,13 @@ public class PageStore implements CacheWriter {
return
trace
;
return
trace
;
}
}
public
void
writeBack
(
CacheObject
obj
)
{
public
synchronized
void
writeBack
(
CacheObject
obj
)
{
Page
record
=
(
Page
)
obj
;
Page
record
=
(
Page
)
obj
;
if
(
trace
.
isDebugEnabled
())
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"writeBack {0}"
,
record
);
trace
.
debug
(
"writeBack {0}"
,
record
);
}
}
synchronized
(
database
)
{
record
.
write
();
record
.
write
();
record
.
setChanged
(
false
);
record
.
setChanged
(
false
);
}
}
}
/**
/**
...
@@ -996,21 +982,19 @@ public class PageStore implements CacheWriter {
...
@@ -996,21 +982,19 @@ public class PageStore implements CacheWriter {
* @param page the page
* @param page the page
* @param old the old data (if known) or null
* @param old the old data (if known) or null
*/
*/
public
void
logUndo
(
Page
page
,
Data
old
)
{
public
synchronized
void
logUndo
(
Page
page
,
Data
old
)
{
if
(
logMode
==
LOG_MODE_OFF
)
{
if
(
logMode
==
LOG_MODE_OFF
)
{
return
;
return
;
}
}
synchronized
(
database
)
{
checkOpen
();
checkOpen
();
database
.
checkWritingAllowed
();
database
.
checkWritingAllowed
();
if
(!
recoveryRunning
)
{
if
(!
recoveryRunning
)
{
int
pos
=
page
.
getPos
();
int
pos
=
page
.
getPos
();
if
(!
log
.
getUndo
(
pos
))
{
if
(!
log
.
getUndo
(
pos
))
{
if
(
old
==
null
)
{
if
(
old
==
null
)
{
old
=
readPage
(
pos
);
old
=
readPage
(
pos
);
}
log
.
addUndo
(
pos
,
old
);
}
}
log
.
addUndo
(
pos
,
old
);
}
}
}
}
}
}
...
@@ -1020,26 +1004,24 @@ public class PageStore implements CacheWriter {
...
@@ -1020,26 +1004,24 @@ public class PageStore implements CacheWriter {
*
*
* @param page the page
* @param page the page
*/
*/
public
void
update
(
Page
page
)
{
public
synchronized
void
update
(
Page
page
)
{
synchronized
(
database
)
{
if
(
trace
.
isDebugEnabled
())
{
if
(
trace
.
isDebugEnabled
())
{
if
(!
page
.
isChanged
())
{
if
(!
page
.
isChanged
())
{
trace
.
debug
(
"updateRecord {0}"
,
page
.
toString
());
trace
.
debug
(
"updateRecord {0}"
,
page
.
toString
());
}
}
}
checkOpen
();
}
database
.
checkWritingAllowed
();
checkOpen
();
page
.
setChanged
(
true
);
database
.
checkWritingAllowed
(
);
int
pos
=
page
.
getPos
(
);
page
.
setChanged
(
true
);
if
(
SysProperties
.
CHECK
&&
!
recoveryRunning
)
{
int
pos
=
page
.
getPos
();
// ensure the undo entry is already written
if
(
SysProperties
.
CHECK
&&
!
recoveryRunning
)
{
if
(
logMode
!=
LOG_MODE_OFF
)
{
// ensure the undo entry is already written
log
.
addUndo
(
pos
,
null
);
if
(
logMode
!=
LOG_MODE_OFF
)
{
}
log
.
addUndo
(
pos
,
null
);
}
}
allocatePage
(
pos
);
cache
.
update
(
pos
,
page
);
}
}
allocatePage
(
pos
);
cache
.
update
(
pos
,
page
);
}
}
private
int
getFreeListId
(
int
pageId
)
{
private
int
getFreeListId
(
int
pageId
)
{
...
@@ -1050,7 +1032,7 @@ public class PageStore implements CacheWriter {
...
@@ -1050,7 +1032,7 @@ public class PageStore implements CacheWriter {
return
getFreeList
(
getFreeListId
(
pageId
));
return
getFreeList
(
getFreeListId
(
pageId
));
}
}
private
PageFreeList
getFreeList
(
int
i
)
{
private
synchronized
PageFreeList
getFreeList
(
int
i
)
{
PageFreeList
list
=
null
;
PageFreeList
list
=
null
;
if
(
i
<
freeLists
.
size
())
{
if
(
i
<
freeLists
.
size
())
{
list
=
freeLists
.
get
(
i
);
list
=
freeLists
.
get
(
i
);
...
@@ -1058,24 +1040,22 @@ public class PageStore implements CacheWriter {
...
@@ -1058,24 +1040,22 @@ public class PageStore implements CacheWriter {
return
list
;
return
list
;
}
}
}
}
synchronized
(
database
)
{
int
p
=
PAGE_ID_FREE_LIST_ROOT
+
i
*
freeListPagesPerList
;
int
p
=
PAGE_ID_FREE_LIST_ROOT
+
i
*
freeListPagesPerList
;
while
(
p
>=
pageCount
)
{
while
(
p
>=
pageCount
)
{
increaseFileSize
();
increaseFileSize
();
}
}
if
(
p
<
pageCount
)
{
if
(
p
<
pageCount
)
{
list
=
(
PageFreeList
)
getPage
(
p
);
list
=
(
PageFreeList
)
getPage
(
p
);
}
}
if
(
list
==
null
)
{
if
(
list
==
null
)
{
list
=
PageFreeList
.
create
(
this
,
p
);
list
=
PageFreeList
.
create
(
this
,
p
);
cache
.
put
(
list
);
cache
.
put
(
list
);
}
while
(
freeLists
.
size
()
<=
i
)
{
freeLists
.
add
(
null
);
}
freeLists
.
set
(
i
,
list
);
return
list
;
}
}
while
(
freeLists
.
size
()
<=
i
)
{
freeLists
.
add
(
null
);
}
freeLists
.
set
(
i
,
list
);
return
list
;
}
}
private
void
freePage
(
int
pageId
)
{
private
void
freePage
(
int
pageId
)
{
...
@@ -1129,25 +1109,23 @@ public class PageStore implements CacheWriter {
...
@@ -1129,25 +1109,23 @@ public class PageStore implements CacheWriter {
return
pos
;
return
pos
;
}
}
private
int
allocatePage
(
BitField
exclude
,
int
first
)
{
private
synchronized
int
allocatePage
(
BitField
exclude
,
int
first
)
{
int
page
;
int
page
;
synchronized
(
database
)
{
// TODO could remember the first possible free list page
// TODO could remember the first possible free list page
for
(
int
i
=
0
;;
i
++)
{
for
(
int
i
=
0
;;
i
++)
{
PageFreeList
list
=
getFreeList
(
i
);
PageFreeList
list
=
getFreeList
(
i
);
page
=
list
.
allocate
(
exclude
,
first
);
page
=
list
.
allocate
(
exclude
,
first
);
if
(
page
>=
0
)
{
if
(
page
>=
0
)
{
break
;
break
;
}
}
while
(
page
>=
pageCount
)
{
increaseFileSize
();
}
if
(
trace
.
isDebugEnabled
())
{
// trace.debug("allocatePage " + pos);
}
}
return
page
;
}
}
while
(
page
>=
pageCount
)
{
increaseFileSize
();
}
if
(
trace
.
isDebugEnabled
())
{
// trace.debug("allocatePage " + pos);
}
return
page
;
}
}
private
void
increaseFileSize
()
{
private
void
increaseFileSize
()
{
...
@@ -1185,27 +1163,25 @@ public class PageStore implements CacheWriter {
...
@@ -1185,27 +1163,25 @@ public class PageStore implements CacheWriter {
* @param pageId the page id
* @param pageId the page id
* @param undo if the undo record must have been written
* @param undo if the undo record must have been written
*/
*/
void
free
(
int
pageId
,
boolean
undo
)
{
synchronized
void
free
(
int
pageId
,
boolean
undo
)
{
if
(
trace
.
isDebugEnabled
())
{
if
(
trace
.
isDebugEnabled
())
{
// trace.debug("free " + pageId + " " + undo);
// trace.debug("free " + pageId + " " + undo);
}
}
synchronized
(
database
)
{
cache
.
remove
(
pageId
);
cache
.
remove
(
pageId
);
if
(
SysProperties
.
CHECK
&&
!
recoveryRunning
&&
undo
)
{
if
(
SysProperties
.
CHECK
&&
!
recoveryRunning
&&
undo
)
{
// ensure the undo entry is already written
// ensure the undo entry is already written
if
(
logMode
!=
LOG_MODE_OFF
)
{
if
(
logMode
!=
LOG_MODE_OFF
)
{
log
.
addUndo
(
pageId
,
null
);
log
.
addUndo
(
pageId
,
null
);
}
}
}
freePage
(
pageId
);
}
if
(
recoveryRunning
)
{
freePage
(
pageId
);
writePage
(
pageId
,
createData
());
if
(
recoveryRunning
)
{
if
(
reservedPages
!=
null
&&
reservedPages
.
containsKey
(
pageId
))
{
writePage
(
pageId
,
createData
());
// re-allocate the page if it is used later on again
if
(
reservedPages
!=
null
&&
reservedPages
.
containsKey
(
pageId
))
{
int
latestPos
=
reservedPages
.
get
(
pageId
);
// re-allocate the page if it is used later on again
if
(
latestPos
>
log
.
getLogPos
())
{
int
latestPos
=
reservedPages
.
get
(
pageId
);
allocatePage
(
pageId
);
if
(
latestPos
>
log
.
getLogPos
())
{
}
allocatePage
(
pageId
);
}
}
}
}
}
}
...
@@ -1217,15 +1193,13 @@ public class PageStore implements CacheWriter {
...
@@ -1217,15 +1193,13 @@ public class PageStore implements CacheWriter {
*
*
* @param pageId the page id
* @param pageId the page id
*/
*/
void
freeUnused
(
int
pageId
)
{
synchronized
void
freeUnused
(
int
pageId
)
{
if
(
trace
.
isDebugEnabled
())
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"freeUnused {0}"
,
pageId
);
trace
.
debug
(
"freeUnused {0}"
,
pageId
);
}
}
synchronized
(
database
)
{
cache
.
remove
(
pageId
);
cache
.
remove
(
pageId
);
freePage
(
pageId
);
freePage
(
pageId
);
freed
.
set
(
pageId
);
freed
.
set
(
pageId
);
}
}
}
/**
/**
...
@@ -1255,21 +1229,19 @@ public class PageStore implements CacheWriter {
...
@@ -1255,21 +1229,19 @@ public class PageStore implements CacheWriter {
* @param pos the page id
* @param pos the page id
* @param page the page
* @param page the page
*/
*/
void
readPage
(
int
pos
,
Data
page
)
{
synchronized
void
readPage
(
int
pos
,
Data
page
)
{
if
(
recordPageReads
)
{
if
(
recordPageReads
)
{
if
(
pos
>=
MIN_PAGE_COUNT
&&
recordedPagesIndex
.
get
(
pos
)
==
IntIntHashMap
.
NOT_FOUND
)
{
if
(
pos
>=
MIN_PAGE_COUNT
&&
recordedPagesIndex
.
get
(
pos
)
==
IntIntHashMap
.
NOT_FOUND
)
{
recordedPagesIndex
.
put
(
pos
,
recordedPagesList
.
size
());
recordedPagesIndex
.
put
(
pos
,
recordedPagesList
.
size
());
recordedPagesList
.
add
(
pos
);
recordedPagesList
.
add
(
pos
);
}
}
}
}
synchronized
(
database
)
{
if
(
pos
<
0
||
pos
>=
pageCount
)
{
if
(
pos
<
0
||
pos
>=
pageCount
)
{
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
pos
+
" of "
+
pageCount
);
throw
DbException
.
get
(
ErrorCode
.
FILE_CORRUPTED_1
,
pos
+
" of "
+
pageCount
);
}
file
.
seek
((
long
)
pos
<<
pageSizeShift
);
file
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
readCount
++;
}
}
file
.
seek
((
long
)
pos
<<
pageSizeShift
);
file
.
readFully
(
page
.
getBytes
(),
0
,
pageSize
);
readCount
++;
}
}
/**
/**
...
@@ -1296,7 +1268,7 @@ public class PageStore implements CacheWriter {
...
@@ -1296,7 +1268,7 @@ public class PageStore implements CacheWriter {
* @param pageId the page id
* @param pageId the page id
* @param data the data
* @param data the data
*/
*/
public
void
writePage
(
int
pageId
,
Data
data
)
{
public
synchronized
void
writePage
(
int
pageId
,
Data
data
)
{
if
(
pageId
<=
0
)
{
if
(
pageId
<=
0
)
{
DbException
.
throwInternalError
(
"write to page "
+
pageId
);
DbException
.
throwInternalError
(
"write to page "
+
pageId
);
}
}
...
@@ -1309,11 +1281,9 @@ public class PageStore implements CacheWriter {
...
@@ -1309,11 +1281,9 @@ public class PageStore implements CacheWriter {
}
}
}
}
checksumSet
(
bytes
,
pageId
);
checksumSet
(
bytes
,
pageId
);
synchronized
(
database
)
{
file
.
seek
((
long
)
pageId
<<
pageSizeShift
);
file
.
seek
((
long
)
pageId
<<
pageSizeShift
);
file
.
write
(
bytes
,
0
,
pageSize
);
file
.
write
(
bytes
,
0
,
pageSize
);
writeCount
++;
writeCount
++;
}
}
}
/**
/**
...
@@ -1321,10 +1291,8 @@ public class PageStore implements CacheWriter {
...
@@ -1321,10 +1291,8 @@ public class PageStore implements CacheWriter {
*
*
* @param pageId the page id
* @param pageId the page id
*/
*/
public
void
removeRecord
(
int
pageId
)
{
public
synchronized
void
removeRecord
(
int
pageId
)
{
synchronized
(
database
)
{
cache
.
remove
(
pageId
);
cache
.
remove
(
pageId
);
}
}
}
Database
getDatabase
()
{
Database
getDatabase
()
{
...
@@ -1401,12 +1369,10 @@ public class PageStore implements CacheWriter {
...
@@ -1401,12 +1369,10 @@ public class PageStore implements CacheWriter {
* @param row the row to add
* @param row the row to add
* @param add true if the row is added, false if it is removed
* @param add true if the row is added, false if it is removed
*/
*/
public
void
logAddOrRemoveRow
(
Session
session
,
int
tableId
,
Row
row
,
boolean
add
)
{
public
synchronized
void
logAddOrRemoveRow
(
Session
session
,
int
tableId
,
Row
row
,
boolean
add
)
{
if
(
logMode
!=
LOG_MODE_OFF
)
{
if
(
logMode
!=
LOG_MODE_OFF
)
{
if
(!
recoveryRunning
)
{
if
(!
recoveryRunning
)
{
synchronized
(
database
)
{
log
.
logAddOrRemoveRow
(
session
,
tableId
,
row
,
add
);
log
.
logAddOrRemoveRow
(
session
,
tableId
,
row
,
add
);
}
}
}
}
}
}
}
...
@@ -1416,14 +1382,12 @@ public class PageStore implements CacheWriter {
...
@@ -1416,14 +1382,12 @@ public class PageStore implements CacheWriter {
*
*
* @param session the session
* @param session the session
*/
*/
public
void
commit
(
Session
session
)
{
public
synchronized
void
commit
(
Session
session
)
{
synchronized
(
database
)
{
checkOpen
();
checkOpen
();
log
.
commit
(
session
.
getId
());
log
.
commit
(
session
.
getId
());
if
(
log
.
getSize
()
-
logSizeBase
>
maxLogSize
)
{
if
(
log
.
getSize
()
-
logSizeBase
>
maxLogSize
)
{
checkpoint
();
checkpoint
();
logSizeBase
=
log
.
getSize
();
logSizeBase
=
log
.
getSize
();
}
}
}
}
}
...
@@ -1433,10 +1397,8 @@ public class PageStore implements CacheWriter {
...
@@ -1433,10 +1397,8 @@ public class PageStore implements CacheWriter {
* @param session the session
* @param session the session
* @param transaction the name of the transaction
* @param transaction the name of the transaction
*/
*/
public
void
prepareCommit
(
Session
session
,
String
transaction
)
{
public
synchronized
void
prepareCommit
(
Session
session
,
String
transaction
)
{
synchronized
(
database
)
{
log
.
prepareCommit
(
session
,
transaction
);
log
.
prepareCommit
(
session
,
transaction
);
}
}
}
/**
/**
...
@@ -1822,11 +1784,9 @@ public class PageStore implements CacheWriter {
...
@@ -1822,11 +1784,9 @@ public class PageStore implements CacheWriter {
* @param session the session
* @param session the session
* @param tableId the table id
* @param tableId the table id
*/
*/
public
void
logTruncate
(
Session
session
,
int
tableId
)
{
public
synchronized
void
logTruncate
(
Session
session
,
int
tableId
)
{
synchronized
(
database
)
{
if
(!
recoveryRunning
)
{
if
(!
recoveryRunning
)
{
log
.
logTruncate
(
session
,
tableId
);
log
.
logTruncate
(
session
,
tableId
);
}
}
}
}
}
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论