Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
adec62cb
提交
adec62cb
authored
9月 03, 2018
作者:
Noel Grandin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
limit the size of the thread-pool
上级
1a79c0ac
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
96 行增加
和
91 行删除
+96
-91
MVStore.java
h2/src/main/org/h2/mvstore/MVStore.java
+47
-57
Page.java
h2/src/main/org/h2/mvstore/Page.java
+49
-34
没有找到文件。
h2/src/main/org/h2/mvstore/MVStore.java
浏览文件 @
adec62cb
...
...
@@ -309,11 +309,6 @@ public class MVStore {
private
long
lastFreeUnusedChunks
;
/**
* Service for executing multiple reads in parallel when doing garbage collection.
*/
final
ExecutorService
executorService
;
/**
* Create and open the store.
*
...
...
@@ -364,8 +359,6 @@ public class MVStore {
keysPerPage
=
DataUtils
.
getConfigParam
(
config
,
"keysPerPage"
,
48
);
backgroundExceptionHandler
=
(
UncaughtExceptionHandler
)
config
.
get
(
"backgroundExceptionHandler"
);
executorService
=
new
ThreadPoolExecutor
(
0
,
10
,
10L
,
TimeUnit
.
SECONDS
,
new
ArrayBlockingQueue
<
Runnable
>(
keysPerPage
+
1
));
meta
=
new
MVMap
<>(
this
);
meta
.
init
();
if
(
this
.
fileStore
!=
null
)
{
...
...
@@ -952,7 +945,6 @@ public class MVStore {
return
;
}
stopBackgroundThread
();
executorService
.
shutdownNow
();
// no need to wait for reads
closed
=
true
;
storeLock
.
lock
();
try
{
...
...
@@ -1352,41 +1344,48 @@ public class MVStore {
}
private
Set
<
Integer
>
collectReferencedChunks
()
{
ChunkIdsCollector
collector
=
new
ChunkIdsCollector
(
meta
.
getId
());
Set
<
Long
>
inspectedRoots
=
new
HashSet
<>();
long
pos
=
lastChunk
.
metaRootPos
;
inspectedRoots
.
add
(
pos
);
collector
.
visit
(
pos
);
long
oldestVersionToKeep
=
getOldestVersionToKeep
();
MVMap
.
RootReference
rootReference
=
meta
.
getRoot
();
do
{
Page
rootPage
=
rootReference
.
root
;
pos
=
rootPage
.
getPos
();
if
(!
rootPage
.
isSaved
())
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
rootPage
);
}
else
if
(
inspectedRoots
.
add
(
pos
))
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
pos
);
}
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();
)
{
String
key
=
c
.
next
();
assert
key
!=
null
;
if
(!
key
.
startsWith
(
"root."
))
{
break
;
final
ThreadPoolExecutor
executorService
=
new
ThreadPoolExecutor
(
10
,
10
,
10L
,
TimeUnit
.
SECONDS
,
new
ArrayBlockingQueue
<
Runnable
>(
keysPerPage
+
1
));
final
AtomicInteger
executingThreadCounter
=
new
AtomicInteger
(
0
);
try
{
ChunkIdsCollector
collector
=
new
ChunkIdsCollector
(
meta
.
getId
());
Set
<
Long
>
inspectedRoots
=
new
HashSet
<>();
long
pos
=
lastChunk
.
metaRootPos
;
inspectedRoots
.
add
(
pos
);
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
long
oldestVersionToKeep
=
getOldestVersionToKeep
();
MVMap
.
RootReference
rootReference
=
meta
.
getRoot
();
do
{
Page
rootPage
=
rootReference
.
root
;
pos
=
rootPage
.
getPos
();
if
(!
rootPage
.
isSaved
())
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
rootPage
,
executorService
,
executingThreadCounter
);
}
else
if
(
inspectedRoots
.
add
(
pos
))
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
}
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
inspectedRoots
.
add
(
pos
))
{
// to allow for something like "root.tmp.123" to be processed
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
collector
.
setMapId
(
mapId
);
collector
.
visit
(
pos
);
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();)
{
String
key
=
c
.
next
();
assert
key
!=
null
;
if
(!
key
.
startsWith
(
"root."
))
{
break
;
}
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
inspectedRoots
.
add
(
pos
))
{
// to allow for something like "root.tmp.123" to be
// processed
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
collector
.
setMapId
(
mapId
);
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
}
}
}
}
while
(
rootReference
.
version
>=
oldestVersionToKeep
&&
(
rootReference
=
rootReference
.
previous
)
!=
null
);
return
collector
.
getReferenced
();
}
while
(
rootReference
.
version
>=
oldestVersionToKeep
&&
(
rootReference
=
rootReference
.
previous
)
!=
null
);
return
collector
.
getReferenced
();
}
finally
{
executorService
.
shutdownNow
();
}
}
final
class
ChunkIdsCollector
{
...
...
@@ -1417,7 +1416,7 @@ public class MVStore {
return
referencedChunks
;
}
public
void
visit
(
Page
page
)
{
public
void
visit
(
Page
page
,
ThreadPoolExecutor
executorService
,
AtomicInteger
executingThreadCounter
)
{
long
pos
=
page
.
getPos
();
if
(
DataUtils
.
isPageSaved
(
pos
))
{
registerChunk
(
DataUtils
.
getPageChunkId
(
pos
));
...
...
@@ -1430,9 +1429,9 @@ public class MVStore {
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
Page
childPage
=
page
.
getChildPageIfLoaded
(
i
);
if
(
childPage
!=
null
)
{
childCollector
.
visit
(
childPage
);
childCollector
.
visit
(
childPage
,
executorService
,
executingThreadCounter
);
}
else
{
childCollector
.
visit
(
page
.
getChildPagePos
(
i
));
childCollector
.
visit
(
page
.
getChildPagePos
(
i
)
,
executorService
,
executingThreadCounter
);
}
}
// and cache resulting set of chunk ids
...
...
@@ -1442,7 +1441,7 @@ public class MVStore {
}
}
public
void
visit
(
long
pos
)
{
public
void
visit
(
long
pos
,
ThreadPoolExecutor
executorService
,
AtomicInteger
executingThreadCounter
)
{
if
(!
DataUtils
.
isPageSaved
(
pos
))
{
return
;
}
...
...
@@ -1461,7 +1460,7 @@ public class MVStore {
Page
page
;
if
(
cache
!=
null
&&
(
page
=
cache
.
get
(
pos
))
!=
null
)
{
// there is a full page in cache, use it
childCollector
.
visit
(
page
);
childCollector
.
visit
(
page
,
executorService
,
executingThreadCounter
);
}
else
{
// page was not cached: read the data
Chunk
chunk
=
getChunk
(
pos
);
...
...
@@ -1472,17 +1471,8 @@ public class MVStore {
"Negative position {0}; p={1}, c={2}"
,
filePos
,
pos
,
chunk
.
toString
());
}
long
maxPos
=
(
chunk
.
block
+
chunk
.
len
)
*
BLOCK_SIZE
;
final
List
<
Future
<?>>
futures
=
Page
.
readChildrenPositions
(
fileStore
,
pos
,
filePos
,
maxPos
,
childCollector
,
executorService
);
for
(
Future
<?>
f
:
futures
)
{
try
{
f
.
get
();
}
catch
(
InterruptedException
ex
)
{
throw
new
RuntimeException
(
ex
);
}
catch
(
ExecutionException
ex
)
{
throw
DbException
.
convert
(
ex
);
}
}
Page
.
readChildrenPositions
(
fileStore
,
pos
,
filePos
,
maxPos
,
childCollector
,
executorService
,
executingThreadCounter
);
}
// and cache resulting set of chunk ids
if
(
cacheChunkRef
!=
null
)
{
...
...
h2/src/main/org/h2/mvstore/Page.java
浏览文件 @
adec62cb
...
...
@@ -13,10 +13,12 @@ import java.nio.ByteBuffer;
import
java.util.ArrayList
;
import
java.util.Arrays
;
import
java.util.List
;
import
java.util.concurrent.ExecutorService
;
import
java.util.concurrent.ForkJoinPool
;
import
java.util.concurrent.ExecutionException
;
import
java.util.concurrent.Future
;
import
java.util.concurrent.ThreadPoolExecutor
;
import
java.util.concurrent.atomic.AtomicInteger
;
import
org.h2.compress.Compressor
;
import
org.h2.message.DbException
;
import
org.h2.mvstore.type.DataType
;
import
org.h2.util.Utils
;
...
...
@@ -252,10 +254,9 @@ public abstract class Page implements Cloneable
* @param maxPos the maximum position (the end of the chunk)
* @param collector to report child pages positions to
*/
static
List
<
Future
<?>>
readChildrenPositions
(
FileStore
fileStore
,
long
pos
,
long
filePos
,
long
maxPos
,
final
MVStore
.
ChunkIdsCollector
collector
,
ExecutorService
executorService
)
{
static
void
readChildrenPositions
(
FileStore
fileStore
,
long
pos
,
long
filePos
,
long
maxPos
,
final
MVStore
.
ChunkIdsCollector
collector
,
final
ThreadPoolExecutor
executorService
,
final
AtomicInteger
executingThreadCounter
)
{
ByteBuffer
buff
;
int
maxLength
=
DataUtils
.
getPageMaxLength
(
pos
);
if
(
maxLength
==
DataUtils
.
PAGE_LARGE
)
{
...
...
@@ -266,10 +267,8 @@ public abstract class Page implements Cloneable
maxLength
=
(
int
)
Math
.
min
(
maxPos
-
filePos
,
maxLength
);
int
length
=
maxLength
;
if
(
length
<
0
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Illegal page length {0} reading at {1}; max pos {2} "
,
length
,
filePos
,
maxPos
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Illegal page length {0} reading at {1}; max pos {2} "
,
length
,
filePos
,
maxPos
);
}
buff
=
fileStore
.
readFully
(
filePos
,
length
);
int
chunkId
=
DataUtils
.
getPageChunkId
(
pos
);
...
...
@@ -277,49 +276,65 @@ public abstract class Page implements Cloneable
int
start
=
buff
.
position
();
int
pageLength
=
buff
.
getInt
();
if
(
pageLength
>
maxLength
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}"
,
chunkId
,
maxLength
,
pageLength
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}"
,
chunkId
,
maxLength
,
pageLength
);
}
buff
.
limit
(
start
+
pageLength
);
short
check
=
buff
.
getShort
();
int
m
=
DataUtils
.
readVarInt
(
buff
);
int
mapId
=
collector
.
getMapId
();
if
(
m
!=
mapId
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected map id {1}, got {2}"
,
chunkId
,
mapId
,
m
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected map id {1}, got {2}"
,
chunkId
,
mapId
,
m
);
}
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
offset
)
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
offset
)
^
DataUtils
.
getCheckValue
(
pageLength
);
if
(
check
!=
(
short
)
checkTest
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected check value {1}, got {2}"
,
chunkId
,
checkTest
,
check
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected check value {1}, got {2}"
,
chunkId
,
checkTest
,
check
);
}
int
len
=
DataUtils
.
readVarInt
(
buff
);
int
type
=
buff
.
get
();
if
((
type
&
1
)
!=
DataUtils
.
PAGE_TYPE_NODE
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Position {0} expected to be a non-leaf"
,
pos
);
}
/**
* The logic here is a little awkward. We want to (a) execute reads in parallel, but (b)
* limit the number of threads we create. This is complicated by (a) the algorithm is
* recursive and needs to wait for children before returning up the call-stack, (b) checking
* the size of the thread-pool is not reliable.
*/
final
List
<
Future
<?>>
futures
=
new
ArrayList
<>(
len
);
for
(
int
i
=
0
;
i
<=
len
;
i
++)
{
final
long
childPagePos
=
buff
.
getLong
();
Future
<?>
f
=
executorService
.
submit
(
new
Runnable
()
{
@Override
public
void
run
()
{
collector
.
visit
(
childPagePos
);
}
});
futures
.
add
(
f
);
if
(
executingThreadCounter
.
get
()
>=
executorService
.
getMaximumPoolSize
())
{
collector
.
visit
(
childPagePos
,
executorService
,
executingThreadCounter
);
}
else
{
executingThreadCounter
.
incrementAndGet
();
Future
<?>
f
=
executorService
.
submit
(
new
Runnable
()
{
@Override
public
void
run
()
{
try
{
collector
.
visit
(
childPagePos
,
executorService
,
executingThreadCounter
);
}
finally
{
executingThreadCounter
.
decrementAndGet
();
}
}
});
futures
.
add
(
f
);
}
}
for
(
Future
<?>
f
:
futures
)
{
try
{
f
.
get
();
}
catch
(
InterruptedException
ex
)
{
throw
new
RuntimeException
(
ex
);
}
catch
(
ExecutionException
ex
)
{
throw
DbException
.
convert
(
ex
);
}
}
return
futures
;
}
/**
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论