Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
36c4613d
提交
36c4613d
authored
9月 06, 2018
作者:
tledkov-gridgain
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' into localresult-interface
上级
c4f30b28
7d989e4f
隐藏空白字符变更
内嵌
并排
正在显示
6 个修改的文件
包含
179 行增加
和
181 行删除
+179
-181
changelog.html
h2/src/docsrc/html/changelog.html
+10
-0
Parser.java
h2/src/main/org/h2/command/Parser.java
+2
-42
GeneratedKeys.java
h2/src/main/org/h2/engine/GeneratedKeys.java
+1
-1
SysProperties.java
h2/src/main/org/h2/engine/SysProperties.java
+0
-8
MVStore.java
h2/src/main/org/h2/mvstore/MVStore.java
+102
-102
Page.java
h2/src/main/org/h2/mvstore/Page.java
+64
-28
没有找到文件。
h2/src/docsrc/html/changelog.html
浏览文件 @
36c4613d
...
...
@@ -21,6 +21,16 @@ Change Log
<h2>
Next Version (unreleased)
</h2>
<ul>
<li>
Issue #1421: Remove old-style outer join
</li>
<li>
PR #1419: Assorted minor changes
</li>
<li>
PR #1414: DEFRAG and COMPACT mixup
</li>
<li>
PR #1413: improvements to MVStore garbage collection
</li>
<li>
PR #1412: Added org.h2.store.fs package to exported osgi bundles
</li>
<li>
PR #1409: Map all remaining error codes to custom exception classes
</li>
<li>
Issue #1407: Add a MODE() aggregate function
...
...
h2/src/main/org/h2/command/Parser.java
浏览文件 @
36c4613d
...
...
@@ -141,7 +141,6 @@ import org.h2.engine.Mode.ModeEnum;
import
org.h2.engine.Procedure
;
import
org.h2.engine.Right
;
import
org.h2.engine.Session
;
import
org.h2.engine.SysProperties
;
import
org.h2.engine.User
;
import
org.h2.engine.UserAggregate
;
import
org.h2.engine.UserDataType
;
...
...
@@ -2809,41 +2808,7 @@ public class Parser {
}
read
(
CLOSE_PAREN
);
}
else
{
Expression
right
=
readConcat
();
if
(
SysProperties
.
OLD_STYLE_OUTER_JOIN
&&
readIf
(
OPEN_PAREN
)
&&
readIf
(
PLUS_SIGN
)
&&
readIf
(
CLOSE_PAREN
))
{
// support for a subset of old-fashioned Oracle outer
// join with (+)
if
(
r
instanceof
ExpressionColumn
&&
right
instanceof
ExpressionColumn
)
{
ExpressionColumn
leftCol
=
(
ExpressionColumn
)
r
;
ExpressionColumn
rightCol
=
(
ExpressionColumn
)
right
;
ArrayList
<
TableFilter
>
filters
=
currentSelect
.
getTopFilters
();
for
(
TableFilter
f
:
filters
)
{
while
(
f
!=
null
)
{
leftCol
.
mapColumns
(
f
,
0
);
rightCol
.
mapColumns
(
f
,
0
);
f
=
f
.
getJoin
();
}
}
TableFilter
leftFilter
=
leftCol
.
getTableFilter
();
TableFilter
rightFilter
=
rightCol
.
getTableFilter
();
r
=
new
Comparison
(
session
,
compareType
,
r
,
right
);
if
(
leftFilter
!=
null
&&
rightFilter
!=
null
)
{
int
idx
=
filters
.
indexOf
(
rightFilter
);
if
(
idx
>=
0
)
{
filters
.
remove
(
idx
);
leftFilter
.
addJoin
(
rightFilter
,
true
,
r
);
}
else
{
rightFilter
.
mapAndAddFilter
(
r
);
}
r
=
ValueExpression
.
get
(
ValueBoolean
.
TRUE
);
}
}
}
else
{
r
=
new
Comparison
(
session
,
compareType
,
r
,
right
);
}
r
=
new
Comparison
(
session
,
compareType
,
r
,
readConcat
());
}
}
if
(
not
)
{
...
...
@@ -3313,12 +3278,7 @@ public class Parser {
}
String
name
=
readColumnIdentifier
();
Schema
s
=
database
.
findSchema
(
objectName
);
if
((!
SysProperties
.
OLD_STYLE_OUTER_JOIN
||
s
!=
null
)
&&
readIf
(
OPEN_PAREN
))
{
// only if the token before the dot is a valid schema name,
// otherwise the old style Oracle outer join doesn't work:
// t.x = t2.x(+)
// this additional check is not required
// if the old style outer joins are not supported
if
(
readIf
(
OPEN_PAREN
))
{
return
readFunction
(
s
,
name
);
}
else
if
(
readIf
(
DOT
))
{
String
schema
=
objectName
;
...
...
h2/src/main/org/h2/engine/GeneratedKeys.java
浏览文件 @
36c4613d
...
...
@@ -126,7 +126,7 @@ public final class GeneratedKeys {
* @return local result with generated keys
*/
public
LocalResult
getKeys
(
Session
session
)
{
Database
db
=
session
==
null
?
null
:
session
.
getDatabase
();
Database
db
=
session
.
getDatabase
();
if
(
Boolean
.
FALSE
.
equals
(
generatedKeysRequest
))
{
clear
(
null
);
return
LocalResultFactory
.
createRow
(
session
);
...
...
h2/src/main/org/h2/engine/SysProperties.java
浏览文件 @
36c4613d
...
...
@@ -326,14 +326,6 @@ public class SysProperties {
}
}
/**
* System property <code>h2.oldStyleOuterJoin</code>
* (default: false).<br />
* Limited support for the old-style Oracle outer join with "(+)".
*/
public
static
final
boolean
OLD_STYLE_OUTER_JOIN
=
Utils
.
getProperty
(
"h2.oldStyleOuterJoin"
,
false
);
/**
* System property {@code h2.oldResultSetGetObject}, {@code true} by default
* unless {@code h2.preview} is enabled.
...
...
h2/src/main/org/h2/mvstore/MVStore.java
浏览文件 @
36c4613d
...
...
@@ -5,6 +5,7 @@
*/
package
org
.
h2
.
mvstore
;
import
static
org
.
h2
.
mvstore
.
MVMap
.
INITIAL_VERSION
;
import
java.lang.Thread.UncaughtExceptionHandler
;
import
java.nio.ByteBuffer
;
import
java.nio.charset.StandardCharsets
;
...
...
@@ -21,7 +22,9 @@ import java.util.Map;
import
java.util.PriorityQueue
;
import
java.util.Queue
;
import
java.util.Set
;
import
java.util.concurrent.ArrayBlockingQueue
;
import
java.util.concurrent.ConcurrentHashMap
;
import
java.util.concurrent.ThreadPoolExecutor
;
import
java.util.concurrent.TimeUnit
;
import
java.util.concurrent.atomic.AtomicInteger
;
import
java.util.concurrent.atomic.AtomicLong
;
...
...
@@ -32,7 +35,6 @@ import org.h2.compress.Compressor;
import
org.h2.engine.Constants
;
import
org.h2.mvstore.cache.CacheLongKeyLIRS
;
import
org.h2.util.MathUtils
;
import
static
org
.
h2
.
mvstore
.
MVMap
.
INITIAL_VERSION
;
import
org.h2.util.Utils
;
/*
...
...
@@ -1337,49 +1339,55 @@ public class MVStore {
}
private
Set
<
Integer
>
collectReferencedChunks
()
{
ChunkIdsCollector
collector
=
new
ChunkIdsCollector
(
meta
.
getId
());
Set
<
Long
>
inspectedRoots
=
new
HashSet
<>();
long
pos
=
lastChunk
.
metaRootPos
;
inspectedRoots
.
add
(
pos
);
collector
.
visit
(
pos
);
long
oldestVersionToKeep
=
getOldestVersionToKeep
();
MVMap
.
RootReference
rootReference
=
meta
.
getRoot
();
do
{
Page
rootPage
=
rootReference
.
root
;
pos
=
rootPage
.
getPos
();
if
(!
rootPage
.
isSaved
())
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
rootPage
);
}
else
if
(
inspectedRoots
.
add
(
pos
))
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
pos
);
}
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();
)
{
String
key
=
c
.
next
();
assert
key
!=
null
;
if
(!
key
.
startsWith
(
"root."
))
{
break
;
final
ThreadPoolExecutor
executorService
=
new
ThreadPoolExecutor
(
10
,
10
,
10L
,
TimeUnit
.
SECONDS
,
new
ArrayBlockingQueue
<
Runnable
>(
keysPerPage
+
1
));
final
AtomicInteger
executingThreadCounter
=
new
AtomicInteger
(
0
);
try
{
ChunkIdsCollector
collector
=
new
ChunkIdsCollector
(
meta
.
getId
());
Set
<
Long
>
inspectedRoots
=
new
HashSet
<>();
long
pos
=
lastChunk
.
metaRootPos
;
inspectedRoots
.
add
(
pos
);
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
long
oldestVersionToKeep
=
getOldestVersionToKeep
();
MVMap
.
RootReference
rootReference
=
meta
.
getRoot
();
do
{
Page
rootPage
=
rootReference
.
root
;
pos
=
rootPage
.
getPos
();
if
(!
rootPage
.
isSaved
())
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
rootPage
,
executorService
,
executingThreadCounter
);
}
else
if
(
inspectedRoots
.
add
(
pos
))
{
collector
.
setMapId
(
meta
.
getId
());
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
}
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
inspectedRoots
.
add
(
pos
))
{
// to allow for something like "root.tmp.123" to be processed
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
collector
.
setMapId
(
mapId
);
collector
.
visit
(
pos
);
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();)
{
String
key
=
c
.
next
();
assert
key
!=
null
;
if
(!
key
.
startsWith
(
"root."
))
{
break
;
}
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
inspectedRoots
.
add
(
pos
))
{
// to allow for something like "root.tmp.123" to be
// processed
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
collector
.
setMapId
(
mapId
);
collector
.
visit
(
pos
,
executorService
,
executingThreadCounter
);
}
}
}
}
while
(
rootReference
.
version
>=
oldestVersionToKeep
&&
(
rootReference
=
rootReference
.
previous
)
!=
null
);
return
collector
.
getReferenced
();
}
while
(
rootReference
.
version
>=
oldestVersionToKeep
&&
(
rootReference
=
rootReference
.
previous
)
!=
null
);
return
collector
.
getReferenced
();
}
finally
{
executorService
.
shutdownNow
();
}
}
final
class
ChunkIdsCollector
{
private
final
Set
<
Integer
>
referenced
=
new
HashSet
<>();
/** really a set */
private
final
ConcurrentHashMap
<
Integer
,
Integer
>
referencedChunks
=
new
ConcurrentHashMap
<>();
private
final
ChunkIdsCollector
parent
;
private
ChunkIdsCollector
child
;
private
int
mapId
;
ChunkIdsCollector
(
int
mapId
)
{
...
...
@@ -1398,98 +1406,90 @@ public class MVStore {
public
void
setMapId
(
int
mapId
)
{
this
.
mapId
=
mapId
;
if
(
child
!=
null
)
{
child
.
setMapId
(
mapId
);
}
}
public
Set
<
Integer
>
getReferenced
()
{
return
referenced
;
Set
<
Integer
>
set
=
new
HashSet
<>();
set
.
addAll
(
referencedChunks
.
keySet
());
return
set
;
}
public
void
visit
(
Page
page
)
{
public
void
visit
(
Page
page
,
ThreadPoolExecutor
executorService
,
AtomicInteger
executingThreadCounter
)
{
long
pos
=
page
.
getPos
();
if
(
DataUtils
.
isPageSaved
(
pos
))
{
register
(
DataUtils
.
getPageChunkId
(
pos
));
register
Chunk
(
DataUtils
.
getPageChunkId
(
pos
));
}
int
count
=
page
.
map
.
getChildPageCount
(
page
);
if
(
count
>
0
)
{
ChunkIdsCollector
childCollector
=
getChild
();
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
Page
childPage
=
page
.
getChildPageIfLoaded
(
i
);
if
(
childPage
!=
null
)
{
childCollector
.
visit
(
childPage
);
}
else
{
childCollector
.
visit
(
page
.
getChildPagePos
(
i
));
}
}
// and cache resulting set of chunk ids
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
cacheChunkRef
!=
null
)
{
int
[]
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
if
(
count
==
0
)
{
return
;
}
final
ChunkIdsCollector
childCollector
=
new
ChunkIdsCollector
(
this
);
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
Page
childPage
=
page
.
getChildPageIfLoaded
(
i
);
if
(
childPage
!=
null
)
{
childCollector
.
visit
(
childPage
,
executorService
,
executingThreadCounter
);
}
else
{
childCollector
.
visit
(
page
.
getChildPagePos
(
i
),
executorService
,
executingThreadCounter
);
}
}
// and cache resulting set of chunk ids
if
(
DataUtils
.
isPageSaved
(
pos
)
&&
cacheChunkRef
!=
null
)
{
int
[]
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
}
}
public
void
visit
(
long
pos
)
{
public
void
visit
(
long
pos
,
ThreadPoolExecutor
executorService
,
AtomicInteger
executingThreadCounter
)
{
if
(!
DataUtils
.
isPageSaved
(
pos
))
{
return
;
}
register
(
DataUtils
.
getPageChunkId
(
pos
));
if
(
DataUtils
.
getPageType
(
pos
)
!=
DataUtils
.
PAGE_TYPE_LEAF
)
{
int
chunkIds
[];
if
(
cacheChunkRef
!=
null
&&
(
chunkIds
=
cacheChunkRef
.
get
(
pos
))
!=
null
)
{
// there is a cached set of chunk ids for this position
for
(
int
chunkId
:
chunkIds
)
{
register
(
chunkId
);
}
registerChunk
(
DataUtils
.
getPageChunkId
(
pos
));
if
(
DataUtils
.
getPageType
(
pos
)
==
DataUtils
.
PAGE_TYPE_LEAF
)
{
return
;
}
int
chunkIds
[];
if
(
cacheChunkRef
!=
null
&&
(
chunkIds
=
cacheChunkRef
.
get
(
pos
))
!=
null
)
{
// there is a cached set of chunk ids for this position
for
(
int
chunkId
:
chunkIds
)
{
registerChunk
(
chunkId
);
}
}
else
{
final
ChunkIdsCollector
childCollector
=
new
ChunkIdsCollector
(
this
);
Page
page
;
if
(
cache
!=
null
&&
(
page
=
cache
.
get
(
pos
))
!=
null
)
{
// there is a full page in cache, use it
childCollector
.
visit
(
page
,
executorService
,
executingThreadCounter
);
}
else
{
ChunkIdsCollector
childCollector
=
getChild
();
Page
page
;
if
(
cache
!=
null
&&
(
page
=
cache
.
get
(
pos
))
!=
null
)
{
// there is a full page in cache, use it
childCollector
.
visit
(
page
);
}
else
{
// page was not cached: read the data
Chunk
chunk
=
getChunk
(
pos
);
long
filePos
=
chunk
.
block
*
BLOCK_SIZE
;
filePos
+=
DataUtils
.
getPageOffset
(
pos
);
if
(
filePos
<
0
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Negative position {0}; p={1}, c={2}"
,
filePos
,
pos
,
chunk
.
toString
());
}
long
maxPos
=
(
chunk
.
block
+
chunk
.
len
)
*
BLOCK_SIZE
;
Page
.
readChildrenPositions
(
fileStore
,
pos
,
filePos
,
maxPos
,
childCollector
);
}
// and cache resulting set of chunk ids
if
(
cacheChunkRef
!=
null
)
{
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
// page was not cached: read the data
Chunk
chunk
=
getChunk
(
pos
);
long
filePos
=
chunk
.
block
*
BLOCK_SIZE
;
filePos
+=
DataUtils
.
getPageOffset
(
pos
);
if
(
filePos
<
0
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Negative position {0}; p={1}, c={2}"
,
filePos
,
pos
,
chunk
.
toString
());
}
long
maxPos
=
(
chunk
.
block
+
chunk
.
len
)
*
BLOCK_SIZE
;
Page
.
readChildrenPositions
(
fileStore
,
pos
,
filePos
,
maxPos
,
childCollector
,
executorService
,
executingThreadCounter
);
}
// and cache resulting set of chunk ids
if
(
cacheChunkRef
!=
null
)
{
chunkIds
=
childCollector
.
getChunkIds
();
cacheChunkRef
.
put
(
pos
,
chunkIds
,
Constants
.
MEMORY_ARRAY
+
4
*
chunkIds
.
length
);
}
}
}
private
ChunkIdsCollector
getChild
()
{
if
(
child
==
null
)
{
child
=
new
ChunkIdsCollector
(
this
);
}
else
{
child
.
referenced
.
clear
();
}
return
child
;
}
private
void
register
(
int
chunkId
)
{
if
(
referenced
.
add
(
chunkId
)
&&
parent
!=
null
)
{
parent
.
register
(
chunkId
);
private
void
registerChunk
(
int
chunkId
)
{
if
(
referencedChunks
.
put
(
chunkId
,
1
)
==
null
&&
parent
!=
null
)
{
parent
.
registerChunk
(
chunkId
);
}
}
private
int
[]
getChunkIds
()
{
int
chunkIds
[]
=
new
int
[
referenced
.
size
()];
int
chunkIds
[]
=
new
int
[
referenced
Chunks
.
size
()];
int
index
=
0
;
for
(
int
chunkId
:
referenced
)
{
for
(
Integer
chunkId
:
referencedChunks
.
keySet
()
)
{
chunkIds
[
index
++]
=
chunkId
;
}
return
chunkIds
;
...
...
h2/src/main/org/h2/mvstore/Page.java
浏览文件 @
36c4613d
...
...
@@ -5,15 +5,22 @@
*/
package
org
.
h2
.
mvstore
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_ARRAY
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_OBJECT
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_POINTER
;
import
static
org
.
h2
.
mvstore
.
DataUtils
.
PAGE_TYPE_LEAF
;
import
java.nio.ByteBuffer
;
import
java.util.ArrayList
;
import
java.util.Arrays
;
import
java.util.List
;
import
java.util.concurrent.ExecutionException
;
import
java.util.concurrent.Future
;
import
java.util.concurrent.ThreadPoolExecutor
;
import
java.util.concurrent.atomic.AtomicInteger
;
import
org.h2.compress.Compressor
;
import
org.h2.message.DbException
;
import
org.h2.mvstore.type.DataType
;
import
org.h2.util.Utils
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_ARRAY
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_OBJECT
;
import
static
org
.
h2
.
engine
.
Constants
.
MEMORY_POINTER
;
import
static
org
.
h2
.
mvstore
.
DataUtils
.
PAGE_TYPE_LEAF
;
/**
* A page (a node or a leaf).
...
...
@@ -247,9 +254,9 @@ public abstract class Page implements Cloneable
* @param maxPos the maximum position (the end of the chunk)
* @param collector to report child pages positions to
*/
static
void
readChildrenPositions
(
FileStore
fileStore
,
long
pos
,
long
filePos
,
long
maxPos
,
MVStore
.
ChunkIdsCollector
collecto
r
)
{
static
void
readChildrenPositions
(
FileStore
fileStore
,
long
pos
,
long
filePos
,
long
maxPos
,
final
MVStore
.
ChunkIdsCollector
collector
,
final
ThreadPoolExecutor
executorService
,
final
AtomicInteger
executingThreadCounte
r
)
{
ByteBuffer
buff
;
int
maxLength
=
DataUtils
.
getPageMaxLength
(
pos
);
if
(
maxLength
==
DataUtils
.
PAGE_LARGE
)
{
...
...
@@ -260,10 +267,8 @@ public abstract class Page implements Cloneable
maxLength
=
(
int
)
Math
.
min
(
maxPos
-
filePos
,
maxLength
);
int
length
=
maxLength
;
if
(
length
<
0
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Illegal page length {0} reading at {1}; max pos {2} "
,
length
,
filePos
,
maxPos
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Illegal page length {0} reading at {1}; max pos {2} "
,
length
,
filePos
,
maxPos
);
}
buff
=
fileStore
.
readFully
(
filePos
,
length
);
int
chunkId
=
DataUtils
.
getPageChunkId
(
pos
);
...
...
@@ -271,39 +276,70 @@ public abstract class Page implements Cloneable
int
start
=
buff
.
position
();
int
pageLength
=
buff
.
getInt
();
if
(
pageLength
>
maxLength
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}"
,
chunkId
,
maxLength
,
pageLength
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}"
,
chunkId
,
maxLength
,
pageLength
);
}
buff
.
limit
(
start
+
pageLength
);
short
check
=
buff
.
getShort
();
int
m
=
DataUtils
.
readVarInt
(
buff
);
int
mapId
=
collector
.
getMapId
();
if
(
m
!=
mapId
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected map id {1}, got {2}"
,
chunkId
,
mapId
,
m
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected map id {1}, got {2}"
,
chunkId
,
mapId
,
m
);
}
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
offset
)
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
offset
)
^
DataUtils
.
getCheckValue
(
pageLength
);
if
(
check
!=
(
short
)
checkTest
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected check value {1}, got {2}"
,
chunkId
,
checkTest
,
check
);
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"File corrupted in chunk {0}, expected check value {1}, got {2}"
,
chunkId
,
checkTest
,
check
);
}
int
len
=
DataUtils
.
readVarInt
(
buff
);
int
type
=
buff
.
get
();
if
((
type
&
1
)
!=
DataUtils
.
PAGE_TYPE_NODE
)
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_FILE_CORRUPT
,
"Position {0} expected to be a non-leaf"
,
pos
);
}
/**
* The logic here is a little awkward. We want to (a) execute reads in parallel, but (b)
* limit the number of threads we create. This is complicated by (a) the algorithm is
* recursive and needs to wait for children before returning up the call-stack, (b) checking
* the size of the thread-pool is not reliable.
*/
final
List
<
Future
<?>>
futures
=
new
ArrayList
<>(
len
);
for
(
int
i
=
0
;
i
<=
len
;
i
++)
{
collector
.
visit
(
buff
.
getLong
());
final
long
childPagePos
=
buff
.
getLong
();
for
(;;)
{
int
counter
=
executingThreadCounter
.
get
();
if
(
counter
>=
executorService
.
getMaximumPoolSize
())
{
collector
.
visit
(
childPagePos
,
executorService
,
executingThreadCounter
);
break
;
}
else
{
if
(
executingThreadCounter
.
compareAndSet
(
counter
,
counter
+
1
))
{
Future
<?>
f
=
executorService
.
submit
(
new
Runnable
()
{
@Override
public
void
run
()
{
try
{
collector
.
visit
(
childPagePos
,
executorService
,
executingThreadCounter
);
}
finally
{
executingThreadCounter
.
decrementAndGet
();
}
}
});
futures
.
add
(
f
);
break
;
}
}
}
}
for
(
Future
<?>
f
:
futures
)
{
try
{
f
.
get
();
}
catch
(
InterruptedException
ex
)
{
throw
new
RuntimeException
(
ex
);
}
catch
(
ExecutionException
ex
)
{
throw
DbException
.
convert
(
ex
);
}
}
}
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论