Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
c0a838a1
Unverified
提交
c0a838a1
authored
6 年前
作者:
Noel Grandin
提交者:
GitHub
6 年前
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1413 from grandinj/gc_gather_io2
improvements to MVStore garbage collection
上级
12ec6d9d
aeea8c9f
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
171 行增加
和
130 行删除
+171
-130
MVStore.java
h2/src/main/org/h2/mvstore/MVStore.java
+107
-102
Page.java
h2/src/main/org/h2/mvstore/Page.java
+64
-28
没有找到文件。
h2/src/main/org/h2/mvstore/MVStore.java
浏览文件 @
c0a838a1
...
...
@@ -5,6 +5,7 @@
*/
package
org
.
h2
.
mvstore
;
import
static
org
.
h2
.
mvstore
.
MVMap
.
INITIAL_VERSION
;
import
java.lang.Thread.UncaughtExceptionHandler
;
import
java.nio.ByteBuffer
;
import
java.nio.charset.StandardCharsets
;
...
...
@@ -17,11 +18,17 @@ import java.util.HashMap;
import
java.util.HashSet
;
import
java.util.Iterator
;
import
java.util.LinkedList
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.PriorityQueue
;
import
java.util.Queue
;
import
java.util.Set
;
import
java.util.concurrent.ArrayBlockingQueue
;
import
java.util.concurrent.ConcurrentHashMap
;
import
java.util.concurrent.ExecutionException
;
import
java.util.concurrent.ExecutorService
;
import
java.util.concurrent.Future
;
import
java.util.concurrent.ThreadPoolExecutor
;
import
java.util.concurrent.TimeUnit
;
import
java.util.concurrent.atomic.AtomicInteger
;
import
java.util.concurrent.atomic.AtomicLong
;
...
...
@@ -30,9 +37,9 @@ import org.h2.compress.CompressDeflate;
import
org.h2.compress.CompressLZF
;
import
org.h2.compress.Compressor
;
import
org.h2.engine.Constants
;
import
org.h2.message.DbException
;
import
org.h2.mvstore.cache.CacheLongKeyLIRS
;
import
org.h2.util.MathUtils
;
import
static
org
.
h2
.
mvstore
.
MVMap
.
INITIAL_VERSION
;
import
org.h2.util.Utils
;
/*
...
...
@@ -1337,49 +1344,55 @@ public class MVStore {
}
private
Set
<
Integer
>
collectReferencedChunks
()
{
ChunkIdsCollector
collector
=
new
ChunkIdsCollector
(
meta
.
getId
());
Set
<
Long
>
inspectedRoots
=
new
HashSet
<>();
long
pos
=
lastChunk
.
metaRootPos
;
inspectedRoots
.
add
(
pos
);
collector
.
visit
(
pos
);
long
oldestVersionToKeep
=
getOldestVersionToKeep
();
MVMap
.
RootReference
rootReference
=
meta
.
getRoot
();
do
{
Page
rootPage
=
rootReference
.
root
;
pos
=
rootPage
.
getPos
();
if
(!
rootPage
.
isSaved
())
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
rootPage
);
}
else
if
(
inspectedRoots
.
add
(
pos
))
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
pos
);
}
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();
)
{
String
key
=
c
.
next
();
assert
key
!=
null
;
if
(!
key
.
startsWith
(
"root."
))
{
break
;
final
ThreadPoolExecutor
executorService
=
new
ThreadPoolExecutor
(
10
,
10
,
10L
,
TimeUnit
.
SECONDS
,
new
ArrayBlockingQueue
<
Runnable
>(
keysPerPage
+
1
));
final
AtomicInteger
executingThreadCounter
=
new
AtomicInteger
(
0
);
try
{
ChunkIdsCollector
collector
=
new
ChunkIdsCollector
(
meta
.
getId
());
Set
<
Long
>
inspectedRoots
=
new
HashSet
<>();
long
pos
=
lastChunk
.
metaRootPos
;
inspectedRoots
.
add
(
pos
);
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
long
oldestVersionToKeep
=
getOldestVersionToKeep
();
MVMap
.
RootReference
rootReference
=
meta
.
getRoot
();
do
{
Page
rootPage
=
rootReference
.
root
;
pos
=
rootPage
.
getPos
();
if
(!
rootPage
.
isSaved
())
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
rootPage
,
executorService
,
executingThreadCounter
);
}
else
if
(
inspectedRoots
.
add
(
pos
))
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
}
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
inspectedRoots
.
add
(
pos
))
{
// to allow for something like "root.tmp.123" to be processed
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
collector
.
setMapId
(
mapId
);
collector
.
visit
(
pos
);
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();)
{
String
key
=
c
.
next
();
assert
key
!=
null
;
if
(!
key
.
startsWith
(
"root."
))
{
break
;
}
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
inspectedRoots
.
add
(
pos
))
{
// to allow for something like "root.tmp.123" to be
// processed
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
collector
.
setMapId
(
mapId
);
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
}
}
}
}
while
(
rootReference
.
version
>=
oldestVersionToKeep
&&
(
rootReference
=
rootReference
.
previous
)
!=
null
);
return
collector
.
getReferenced
();
}
while
(
rootReference
.
version
>=
oldestVersionToKeep
&&
(
rootReference
=
rootReference
.
previous
)
!=
null
);
return
collector
.
getReferenced
();
}
finally
{
executorService
.
shutdownNow
();
}
}
final
class
ChunkIdsCollector
{
private
final
Set
<
Integer
>
referenced
=
new
HashSet
<>();
/** really a set */
private
final
ConcurrentHashMap
<
Integer
,
Integer
>
referencedChunks
=
new
ConcurrentHashMap
<>();
private
final
ChunkIdsCollector
parent
;
private
ChunkIdsCollector
child
;
private
int
mapId
;
ChunkIdsCollector
(
int
mapId
)
{
...
...
@@ -1398,98 +1411,90 @@ public class MVStore {
public
void
setMapId
(
int
mapId
)
{
this
.
mapId
=
mapId
;
if
(
child
!=
null
)
{
child
.
setMapId
(
mapId
);
}
}
public
Set
<
Integer
>
getReferenced
()
{
return
referenced
;
Set
<
Integer
>
set
=
new
HashSet
<>();
set
.
addAll
(
referencedChunks
.
keySet
());
return
set
;
}
public
void
visit
(
Page
page
)
{
public
void
visit
(
Page
page
,
ThreadPoolExecutor
executorService
,
AtomicInteger
executingThreadCounter
)
{
long
pos
=
page
.
getPos
();
if
(
DataUtils
.
isPageSaved
(
pos
))
{
register
(
DataUtils
.
getPageChunkId
(
pos
));
register
Chunk
(
DataUtils
.
getPageChunkId
(
pos
));
}
int
count
=
page
.
map
.
getChildPageCount
(
page
);
if
(
count
>
0
)
{
ChunkIdsCollector
childCollector
=
getChild
();
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
Page
childPage
=
page
.
getChildPageIfLoaded
(
i
);
if
(
childPage
!=
null
)
{
childCollector
.
visit
(
childPage
);
}
else
{
childCollector
.
visit
(
page
.
getChildPagePos
(
i
));
}
}
// and cache resulting set of chunk ids
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
cacheChunkRef
!=
null
)
{
int
[]
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
if
(
count
==
0
)
{
return
;
}
final
ChunkIdsCollector
childCollector
=
new
ChunkIdsCollector
(
this
);
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
Page
childPage
=
page
.
getChildPageIfLoaded
(
i
);
if
(
childPage
!=
null
)
{
childCollector
.
visit
(
childPage
,
executorService
,
executingThreadCounter
);
}
else
{
childCollector
.
visit
(
page
.
getChildPagePos
(
i
),
executorService
,
executingThreadCounter
);
}
}
// and cache resulting set of chunk ids
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
cacheChunkRef
!=
null
)
{
int
[]
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
}
}
public
void
visit
(
long
pos
)
{
public
void
visit
(
long
pos
,
ThreadPoolExecutor
executorService
,
AtomicInteger
executingThreadCounter
)
{
if
(!
DataUtils
.
isPageSaved
(
pos
))
{
return
;
}
register
(
DataUtils
.
getPageChunkId
(
pos
));
if
(
DataUtils
.
getPageType
(
pos
)
!=
DataUtils
.
PAGE_TYPE_LEAF
)
{
int
chunkIds
[];
if
(
cacheChunkRef
!=
null
&&
(
chunkIds
=
cacheChunkRef
.
get
(
pos
))
!=
null
)
{
// there is a cached set of chunk ids for this position
for
(
int
chunkId
:
chunkIds
)
{
register
(
chunkId
);
}
registerChunk
(
DataUtils
.
getPageChunkId
(
pos
));
if
(
DataUtils
.
getPageType
(
pos
)
==
DataUtils
.
PAGE_TYPE_LEAF
)
{
return
;
}
int
chunkIds
[];
if
(
cacheChunkRef
!=
null
&&
(
chunkIds
=
cacheChunkRef
.
get
(
pos
))
!=
null
)
{
// there is a cached set of chunk ids for this position
for
(
int
chunkId
:
chunkIds
)
{
registerChunk
(
chunkId
);
}
}
else
{
final
ChunkIdsCollector
childCollector
=
new
ChunkIdsCollector
(
this
);
Page
page
;
if
(
cache
!=
null
&&
(
page
=
cache
.
get
(
pos
))
!=
null
)
{
// there is a full page in cache, use it
childCollector
.
visit
(
page
,
executorService
,
executingThreadCounter
);
}
else
{
ChunkIdsCollector
childCollector
=
getChild
();
Page
page
;
if
(
cache
!=
null
&&
(
page
=
cache
.
get
(
pos
))
!=
null
)
{
// there is a full page in cache, use it
childCollector
.
visit
(
page
);
}
else
{
// page was not cached: read the data
Chunk
chunk
=
getChunk
(
pos
);
long
filePos
=
chunk
.
block
*
BLOCK_SIZE
;
filePos
+=
DataUtils
.
getPageOffset
(
pos
);
if
(
filePos
<
0
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Negative position {0}; p={1}, c={2}"
,
filePos
,
pos
,
chunk
.
toString
());
}
long
maxPos
=
(
chunk
.
block
+
chunk
.
len
)
*
BLOCK_SIZE
;
Page
.
readChildrenPositions
(
fileStore
,
pos
,
filePos
,
maxPos
,
childCollector
);
}
// and cache resulting set of chunk ids
if
(
cacheChunkRef
!=
null
)
{
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
// page was not cached: read the data
Chunk
chunk
=
getChunk
(
pos
);
long
filePos
=
chunk
.
block
*
BLOCK_SIZE
;
filePos
+=
DataUtils
.
getPageOffset
(
pos
);
if
(
filePos
<
0
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Negative position {0}; p={1}, c={2}"
,
filePos
,
pos
,
chunk
.
toString
());
}
long
maxPos
=
(
chunk
.
block
+
chunk
.
len
)
*
BLOCK_SIZE
;
Page
.
readChildrenPositions
(
fileStore
,
pos
,
filePos
,
maxPos
,
childCollector
,
executorService
,
executingThreadCounter
);
}
// and cache resulting set of chunk ids
if
(
cacheChunkRef
!=
null
)
{
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
}
}
}
private
ChunkIdsCollector
getChild
()
{
if
(
child
==
null
)
{
child
=
new
ChunkIdsCollector
(
this
);
}
else
{
child
.
referenced
.
clear
();
}
return
child
;
}
private
void
register
(
int
chunkId
)
{
if
(
referenced
.
add
(
chunkId
)
&&
parent
!=
null
)
{
parent
.
register
(
chunkId
);
private
void
registerChunk
(
int
chunkId
)
{
if
(
referencedChunks
.
put
(
chunkId
,
1
)
==
null
&&
parent
!=
null
)
{
parent
.
registerChunk
(
chunkId
);
}
}
private
int
[]
getChunkIds
()
{
int
chunkIds
[]
=
new
int
[
referenced
.
size
()];
int
chunkIds
[]
=
new
int
[
referenced
Chunks
.
size
()];
int
index
=
0
;
for
(
int
chunkId
:
referenced
)
{
for
(
Integer
chunkId
:
referencedChunks
.
keySet
()
)
{
chunkIds
[
index
++]
=
chunkId
;
}
return
chunkIds
;
...
...
This diff is collapsed.
Click to expand it.
h2/src/main/org/h2/mvstore/Page.java
浏览文件 @
c0a838a1
...
...
@@ -5,15 +5,22 @@
*/
package
org
.
h2
.
mvstore
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_ARRAY
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_OBJECT
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_POINTER
;
import
static
org
.
h2
.
mvstore
.
DataUtils
.
PAGE_TYPE_LEAF
;
import
java.nio.ByteBuffer
;
import
java.util.ArrayList
;
import
java.util.Arrays
;
import
java.util.List
;
import
java.util.concurrent.ExecutionException
;
import
java.util.concurrent.Future
;
import
java.util.concurrent.ThreadPoolExecutor
;
import
java.util.concurrent.atomic.AtomicInteger
;
import
org.h2.compress.Compressor
;
import
org.h2.message.DbException
;
import
org.h2.mvstore.type.DataType
;
import
org.h2.util.Utils
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_ARRAY
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_OBJECT
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_POINTER
;
import
static
org
.
h2
.
mvstore
.
DataUtils
.
PAGE_TYPE_LEAF
;
/**
* A page (a node or a leaf).
...
...
@@ -247,9 +254,9 @@ public abstract class Page implements Cloneable
* @param maxPos the maximum position (the end of the chunk)
* @param collector to report child pages positions to
*/
static
void
readChildrenPositions
(
FileStore
fileStore
,
long
pos
,
long
filePos
,
long
maxPos
,
MVStore
.
ChunkIdsCollector
collecto
r
)
{
static
void
readChildrenPositions
(
FileStore
fileStore
,
long
pos
,
long
filePos
,
long
maxPos
,
final
MVStore
.
ChunkIdsCollector
collector
,
final
ThreadPoolExecutor
executorService
,
final
AtomicInteger
executingThreadCounte
r
)
{
ByteBuffer
buff
;
int
maxLength
=
DataUtils
.
getPageMaxLength
(
pos
);
if
(
maxLength
==
DataUtils
.
PAGE_LARGE
)
{
...
...
@@ -260,10 +267,8 @@ public abstract class Page implements Cloneable
maxLength
=
(
int
)
Math
.
min
(
maxPos
-
filePos
,
maxLength
);
int
length
=
maxLength
;
if
(
length
<
0
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Illegal page length {0} reading at {1}; max pos {2} "
,
length
,
filePos
,
maxPos
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Illegal page length {0} reading at {1}; max pos {2} "
,
length
,
filePos
,
maxPos
);
}
buff
=
fileStore
.
readFully
(
filePos
,
length
);
int
chunkId
=
DataUtils
.
getPageChunkId
(
pos
);
...
...
@@ -271,39 +276,70 @@ public abstract class Page implements Cloneable
int
start
=
buff
.
position
();
int
pageLength
=
buff
.
getInt
();
if
(
pageLength
>
maxLength
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}"
,
chunkId
,
maxLength
,
pageLength
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}"
,
chunkId
,
maxLength
,
pageLength
);
}
buff
.
limit
(
start
+
pageLength
);
short
check
=
buff
.
getShort
();
int
m
=
DataUtils
.
readVarInt
(
buff
);
int
mapId
=
collector
.
getMapId
();
if
(
m
!=
mapId
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected map id {1}, got {2}"
,
chunkId
,
mapId
,
m
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected map id {1}, got {2}"
,
chunkId
,
mapId
,
m
);
}
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
offset
)
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
offset
)
^
DataUtils
.
getCheckValue
(
pageLength
);
if
(
check
!=
(
short
)
checkTest
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected check value {1}, got {2}"
,
chunkId
,
checkTest
,
check
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected check value {1}, got {2}"
,
chunkId
,
checkTest
,
check
);
}
int
len
=
DataUtils
.
readVarInt
(
buff
);
int
type
=
buff
.
get
();
if
((
type
&
1
)
!=
DataUtils
.
PAGE_TYPE_NODE
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Position {0} expected to be a non-leaf"
,
pos
);
}
/**
* The logic here is a little awkward. We want to (a) execute reads in parallel, but (b)
* limit the number of threads we create. This is complicated by (a) the algorithm is
* recursive and needs to wait for children before returning up the call-stack, (b) checking
* the size of the thread-pool is not reliable.
*/
final
List
<
Future
<?>>
futures
=
new
ArrayList
<>(
len
);
for
(
int
i
=
0
;
i
<=
len
;
i
++)
{
collector
.
visit
(
buff
.
getLong
());
final
long
childPagePos
=
buff
.
getLong
();
for
(;;)
{
int
counter
=
executingThreadCounter
.
get
();
if
(
counter
>=
executorService
.
getMaximumPoolSize
())
{
collector
.
visit
(
childPagePos
,
executorService
,
executingThreadCounter
);
break
;
}
else
{
if
(
executingThreadCounter
.
compareAndSet
(
counter
,
counter
+
1
))
{
Future
<?>
f
=
executorService
.
submit
(
new
Runnable
()
{
@Override
public
void
run
()
{
try
{
collector
.
visit
(
childPagePos
,
executorService
,
executingThreadCounter
);
}
finally
{
executingThreadCounter
.
decrementAndGet
();
}
}
});
futures
.
add
(
f
);
break
;
}
}
}
}
for
(
Future
<?>
f
:
futures
)
{
try
{
f
.
get
();
}
catch
(
InterruptedException
ex
)
{
throw
new
RuntimeException
(
ex
);
}
catch
(
ExecutionException
ex
)
{
throw
DbException
.
convert
(
ex
);
}
}
}
...
...
This diff is collapsed.
Click to expand it.
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论