Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
f33de19b
Unverified
提交
f33de19b
authored
11月 21, 2018
作者:
Andrei Tokar
提交者:
GitHub
11月 21, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1560 from h2database/defrag_oom
Defrag OOM
上级
c7038923
2da176f4
显示空白字符变更
内嵌
并排
正在显示
6 个修改的文件
包含
208 行增加
和
66 行删除
+208
-66
MVMap.java
h2/src/main/org/h2/mvstore/MVMap.java
+22
-11
MVStore.java
h2/src/main/org/h2/mvstore/MVStore.java
+23
-15
MVStoreTool.java
h2/src/main/org/h2/mvstore/MVStoreTool.java
+14
-16
Page.java
h2/src/main/org/h2/mvstore/Page.java
+76
-24
TestAll.java
h2/src/test/org/h2/test/TestAll.java
+2
-0
TestDefrag.java
h2/src/test/org/h2/test/store/TestDefrag.java
+71
-0
没有找到文件。
h2/src/main/org/h2/mvstore/MVMap.java
浏览文件 @
f33de19b
...
@@ -1053,7 +1053,10 @@ public class MVMap<K, V> extends AbstractMap<K, V>
...
@@ -1053,7 +1053,10 @@ public class MVMap<K, V> extends AbstractMap<K, V>
* @return version
* @return version
*/
*/
public
final
long
getVersion
()
{
public
final
long
getVersion
()
{
RootReference
rootReference
=
getRoot
();
return
getVersion
(
getRoot
());
}
private
long
getVersion
(
RootReference
rootReference
)
{
RootReference
previous
=
rootReference
.
previous
;
RootReference
previous
=
rootReference
.
previous
;
return
previous
==
null
||
previous
.
root
!=
rootReference
.
root
||
return
previous
==
null
||
previous
.
root
!=
rootReference
.
root
||
previous
.
appendCounter
!=
rootReference
.
appendCounter
?
previous
.
appendCounter
!=
rootReference
.
appendCounter
?
...
@@ -1061,7 +1064,10 @@ public class MVMap<K, V> extends AbstractMap<K, V>
...
@@ -1061,7 +1064,10 @@ public class MVMap<K, V> extends AbstractMap<K, V>
}
}
final
boolean
hasChangesSince
(
long
version
)
{
final
boolean
hasChangesSince
(
long
version
)
{
return
getVersion
()
>
version
;
RootReference
rootReference
=
getRoot
();
Page
root
=
rootReference
.
root
;
return
!
root
.
isSaved
()
&&
root
.
getTotalCount
()
>
0
||
getVersion
(
rootReference
)
>
version
;
}
}
public
boolean
isSingleWriter
()
{
public
boolean
isSingleWriter
()
{
...
@@ -1153,28 +1159,33 @@ public class MVMap<K, V> extends AbstractMap<K, V>
...
@@ -1153,28 +1159,33 @@ public class MVMap<K, V> extends AbstractMap<K, V>
MVStore
.
TxCounter
txCounter
=
store
.
registerVersionUsage
();
MVStore
.
TxCounter
txCounter
=
store
.
registerVersionUsage
();
try
{
try
{
beforeWrite
();
beforeWrite
();
setRoot
(
copy
(
sourceMap
.
getRootPage
())
);
copy
(
sourceMap
.
getRootPage
(),
null
,
0
);
}
finally
{
}
finally
{
store
.
deregisterVersionUsage
(
txCounter
);
store
.
deregisterVersionUsage
(
txCounter
);
}
}
}
}
private
Page
copy
(
Page
source
)
{
private
Page
copy
(
Page
source
,
Page
parent
,
int
index
)
{
Page
target
=
source
.
copy
(
this
);
Page
target
=
source
.
copy
(
this
);
store
.
registerUnsavedPage
(
target
.
getMemory
());
if
(
parent
==
null
)
{
setRoot
(
target
);
}
else
{
parent
.
setChild
(
index
,
target
);
}
if
(!
source
.
isLeaf
())
{
if
(!
source
.
isLeaf
())
{
for
(
int
i
=
0
;
i
<
getChildPageCount
(
target
);
i
++)
{
for
(
int
i
=
0
;
i
<
getChildPageCount
(
target
);
i
++)
{
if
(
source
.
getChildPagePos
(
i
)
!=
0
)
{
if
(
source
.
getChildPagePos
(
i
)
!=
0
)
{
// position 0 means no child
// position 0 means no child
// (for example the last entry of an r-tree node)
// (for example the last entry of an r-tree node)
// (the MVMap is also used for r-trees for compacting)
// (the MVMap is also used for r-trees for compacting)
Page
child
=
copy
(
source
.
getChildPage
(
i
));
copy
(
source
.
getChildPage
(
i
),
target
,
i
);
target
.
setChild
(
i
,
child
);
}
}
}
}
target
.
setComplete
();
setRoot
(
target
);
}
beforeWrite
();
store
.
registerUnsavedPage
(
target
.
getMemory
());
if
(
store
.
isSaveNeeded
())
{
store
.
commit
();
}
}
return
target
;
return
target
;
}
}
...
@@ -1186,7 +1197,6 @@ public class MVMap<K, V> extends AbstractMap<K, V>
...
@@ -1186,7 +1197,6 @@ public class MVMap<K, V> extends AbstractMap<K, V>
* @return potentially updated RootReference
* @return potentially updated RootReference
*/
*/
private
RootReference
flushAppendBuffer
(
RootReference
rootReference
)
{
private
RootReference
flushAppendBuffer
(
RootReference
rootReference
)
{
beforeWrite
();
int
attempt
=
0
;
int
attempt
=
0
;
int
keyCount
;
int
keyCount
;
while
((
keyCount
=
rootReference
.
getAppendCounter
())
>
0
)
{
while
((
keyCount
=
rootReference
.
getAppendCounter
())
>
0
)
{
...
@@ -1276,6 +1286,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
...
@@ -1276,6 +1286,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
RootReference
rootReference
=
getRootInternal
();
RootReference
rootReference
=
getRootInternal
();
int
appendCounter
=
rootReference
.
getAppendCounter
();
int
appendCounter
=
rootReference
.
getAppendCounter
();
if
(
appendCounter
>=
keysPerPage
)
{
if
(
appendCounter
>=
keysPerPage
)
{
beforeWrite
();
rootReference
=
flushAppendBuffer
(
rootReference
);
rootReference
=
flushAppendBuffer
(
rootReference
);
appendCounter
=
rootReference
.
getAppendCounter
();
appendCounter
=
rootReference
.
getAppendCounter
();
assert
appendCounter
<
keysPerPage
;
assert
appendCounter
<
keysPerPage
;
...
...
h2/src/main/org/h2/mvstore/MVStore.java
浏览文件 @
f33de19b
...
@@ -130,7 +130,7 @@ MVStore:
...
@@ -130,7 +130,7 @@ MVStore:
/**
/**
* A persistent storage for maps.
* A persistent storage for maps.
*/
*/
public
class
MVStore
{
public
class
MVStore
implements
AutoCloseable
{
/**
/**
* The block size (physical sector size) of the disk. The store header is
* The block size (physical sector size) of the disk. The store header is
...
@@ -1326,10 +1326,8 @@ public class MVStore {
...
@@ -1326,10 +1326,8 @@ public class MVStore {
shrinkFileIfPossible
(
1
);
shrinkFileIfPossible
(
1
);
}
}
for
(
Page
p
:
changed
)
{
for
(
Page
p
:
changed
)
{
if
(
p
.
getTotalCount
()
>
0
)
{
p
.
writeEnd
();
p
.
writeEnd
();
}
}
}
metaRoot
.
writeEnd
();
metaRoot
.
writeEnd
();
// some pages might have been changed in the meantime (in the newest
// some pages might have been changed in the meantime (in the newest
...
@@ -1346,7 +1344,7 @@ public class MVStore {
...
@@ -1346,7 +1344,7 @@ public class MVStore {
*/
*/
private
void
freeUnusedIfNeeded
(
long
time
)
{
private
void
freeUnusedIfNeeded
(
long
time
)
{
int
freeDelay
=
retentionTime
/
5
;
int
freeDelay
=
retentionTime
/
5
;
if
(
time
>=
lastFreeUnusedChunks
+
freeDelay
)
{
if
(
time
-
lastFreeUnusedChunks
>=
freeDelay
)
{
// set early in case it fails (out of memory or so)
// set early in case it fails (out of memory or so)
lastFreeUnusedChunks
=
time
;
lastFreeUnusedChunks
=
time
;
freeUnusedChunks
(
true
);
freeUnusedChunks
(
true
);
...
@@ -1391,6 +1389,7 @@ public class MVStore {
...
@@ -1391,6 +1389,7 @@ public class MVStore {
* @param fast if true, simplified version is used, which assumes that recent chunks
* @param fast if true, simplified version is used, which assumes that recent chunks
* are still in-use and do not scan recent versions of the store.
* are still in-use and do not scan recent versions of the store.
* Also is this case only oldest available version of the store is scanned.
* Also is this case only oldest available version of the store is scanned.
* @return set of chunk ids in-use, or null if all chunks should be considered in-use
*/
*/
private
Set
<
Integer
>
collectReferencedChunks
(
boolean
fast
)
{
private
Set
<
Integer
>
collectReferencedChunks
(
boolean
fast
)
{
assert
lastChunk
!=
null
;
assert
lastChunk
!=
null
;
...
@@ -1428,6 +1427,15 @@ public class MVStore {
...
@@ -1428,6 +1427,15 @@ public class MVStore {
}
}
}
}
/**
* Scans all map of a particular store version and marks visited chunks as in-use.
* @param rootReference of the meta map of the version
* @param collector to report visited chunks to
* @param executorService to use for parallel processing
* @param executingThreadCounter counter for threads already in use
* @param inspectedRoots set of page positions for map's roots already inspected
* or null if not to be used
*/
private
void
inspectVersion
(
MVMap
.
RootReference
rootReference
,
ChunkIdsCollector
collector
,
private
void
inspectVersion
(
MVMap
.
RootReference
rootReference
,
ChunkIdsCollector
collector
,
ThreadPoolExecutor
executorService
,
ThreadPoolExecutor
executorService
,
AtomicInteger
executingThreadCounter
,
AtomicInteger
executingThreadCounter
,
...
@@ -1443,12 +1451,11 @@ public class MVStore {
...
@@ -1443,12 +1451,11 @@ public class MVStore {
}
}
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();
)
{
for
(
Cursor
<
String
,
String
>
c
=
new
Cursor
<>(
rootPage
,
"root."
);
c
.
hasNext
();
)
{
String
key
=
c
.
next
();
String
key
=
c
.
next
();
assert
key
!=
null
;
if
(!
key
.
startsWith
(
"root."
))
{
if
(!
key
.
startsWith
(
"root."
))
{
break
;
break
;
}
}
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
pos
=
DataUtils
.
parseHexLong
(
c
.
getValue
());
assert
DataUtils
.
isPageSaved
(
pos
);
if
(
DataUtils
.
isPageSaved
(
pos
))
{
if
(
inspectedRoots
==
null
||
inspectedRoots
.
add
(
pos
))
{
if
(
inspectedRoots
==
null
||
inspectedRoots
.
add
(
pos
))
{
// to allow for something like "root.tmp.123" to be processed
// to allow for something like "root.tmp.123" to be processed
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
int
mapId
=
DataUtils
.
parseHexInt
(
key
.
substring
(
key
.
lastIndexOf
(
'.'
)
+
1
));
...
@@ -1457,6 +1464,7 @@ public class MVStore {
...
@@ -1457,6 +1464,7 @@ public class MVStore {
}
}
}
}
}
}
}
final
class
ChunkIdsCollector
{
final
class
ChunkIdsCollector
{
...
@@ -1641,12 +1649,12 @@ public class MVStore {
...
@@ -1641,12 +1649,12 @@ public class MVStore {
}
}
freedPageSpace
.
clear
();
freedPageSpace
.
clear
();
}
}
for
(
Chunk
c
:
modified
)
{
meta
.
put
(
Chunk
.
getMetaKey
(
c
.
id
),
c
.
asString
());
}
if
(
modified
.
isEmpty
())
{
if
(
modified
.
isEmpty
())
{
break
;
break
;
}
}
for
(
Chunk
c
:
modified
)
{
meta
.
put
(
Chunk
.
getMetaKey
(
c
.
id
),
c
.
asString
());
}
markMetaChanged
();
markMetaChanged
();
}
}
}
}
...
...
h2/src/main/org/h2/mvstore/MVStoreTool.java
浏览文件 @
f33de19b
...
@@ -481,30 +481,22 @@ public class MVStoreTool {
...
@@ -481,30 +481,22 @@ public class MVStoreTool {
* @param compress whether to compress the data
* @param compress whether to compress the data
*/
*/
public
static
void
compact
(
String
sourceFileName
,
String
targetFileName
,
boolean
compress
)
{
public
static
void
compact
(
String
sourceFileName
,
String
targetFileName
,
boolean
compress
)
{
MVStore
source
=
new
MVStore
.
Builder
().
try
(
MVStore
source
=
new
MVStore
.
Builder
().
fileName
(
sourceFileName
).
fileName
(
sourceFileName
).
readOnly
().
open
())
{
readOnly
().
open
();
// Bugfix - Add double "try-finally" statements to close source and target stores for
// Bugfix - Add double "try-finally" statements to close source and target stores for
//releasing lock and file resources in these stores even if OOM occurs.
//releasing lock and file resources in these stores even if OOM occurs.
// Fix issues such as "Cannot delete file "/h2/data/test.mv.db.tempFile" [90025-197]"
// Fix issues such as "Cannot delete file "/h2/data/test.mv.db.tempFile" [90025-197]"
//when client connects to this server and reopens this store database in this process.
//when client connects to this server and reopens this store database in this process.
// @since 2018-09-13 little-pan
// @since 2018-09-13 little-pan
try
{
FileUtils
.
delete
(
targetFileName
);
FileUtils
.
delete
(
targetFileName
);
MVStore
.
Builder
b
=
new
MVStore
.
Builder
().
MVStore
.
Builder
b
=
new
MVStore
.
Builder
().
fileName
(
targetFileName
);
fileName
(
targetFileName
);
if
(
compress
)
{
if
(
compress
)
{
b
.
compress
();
b
.
compress
();
}
}
MVStore
target
=
b
.
open
();
try
(
MVStore
target
=
b
.
open
())
{
try
{
compact
(
source
,
target
);
compact
(
source
,
target
);
}
finally
{
target
.
close
();
}
}
}
finally
{
source
.
close
();
}
}
}
}
...
@@ -515,6 +507,10 @@ public class MVStoreTool {
...
@@ -515,6 +507,10 @@ public class MVStoreTool {
* @param target the target store
* @param target the target store
*/
*/
public
static
void
compact
(
MVStore
source
,
MVStore
target
)
{
public
static
void
compact
(
MVStore
source
,
MVStore
target
)
{
int
autoCommitDelay
=
target
.
getAutoCommitDelay
();
int
retentionTime
=
target
.
getRetentionTime
();
target
.
setAutoCommitDelay
(
0
);
target
.
setRetentionTime
(
Integer
.
MAX_VALUE
);
// disable unused chunks collection
MVMap
<
String
,
String
>
sourceMeta
=
source
.
getMetaMap
();
MVMap
<
String
,
String
>
sourceMeta
=
source
.
getMetaMap
();
MVMap
<
String
,
String
>
targetMeta
=
target
.
getMetaMap
();
MVMap
<
String
,
String
>
targetMeta
=
target
.
getMetaMap
();
for
(
Entry
<
String
,
String
>
m
:
sourceMeta
.
entrySet
())
{
for
(
Entry
<
String
,
String
>
m
:
sourceMeta
.
entrySet
())
{
...
@@ -540,6 +536,8 @@ public class MVStoreTool {
...
@@ -540,6 +536,8 @@ public class MVStoreTool {
MVMap
<
Object
,
Object
>
targetMap
=
target
.
openMap
(
mapName
,
mp
);
MVMap
<
Object
,
Object
>
targetMap
=
target
.
openMap
(
mapName
,
mp
);
targetMap
.
copyFrom
(
sourceMap
);
targetMap
.
copyFrom
(
sourceMap
);
}
}
target
.
setRetentionTime
(
retentionTime
);
target
.
setAutoCommitDelay
(
autoCommitDelay
);
}
}
/**
/**
...
...
h2/src/main/org/h2/mvstore/Page.java
浏览文件 @
f33de19b
...
@@ -813,6 +813,12 @@ public abstract class Page implements Cloneable
...
@@ -813,6 +813,12 @@ public abstract class Page implements Cloneable
return
mem
;
return
mem
;
}
}
public
boolean
isComplete
()
{
return
true
;
}
public
void
setComplete
()
{}
/**
/**
* Remove the page.
* Remove the page.
*/
*/
...
@@ -883,16 +889,15 @@ public abstract class Page implements Cloneable
...
@@ -883,16 +889,15 @@ public abstract class Page implements Cloneable
void
clearPageReference
()
{
void
clearPageReference
()
{
if
(
page
!=
null
)
{
if
(
page
!=
null
)
{
if
(!
page
.
isSaved
())
{
throw
DataUtils
.
newIllegalStateException
(
DataUtils
.
ERROR_INTERNAL
,
"Page not written"
);
}
page
.
writeEnd
();
page
.
writeEnd
();
assert
page
.
isSaved
()
||
!
page
.
isComplete
();
if
(
page
.
isSaved
())
{
assert
pos
==
page
.
getPos
();
assert
pos
==
page
.
getPos
();
assert
count
==
page
.
getTotalCount
();
assert
count
==
page
.
getTotalCount
()
:
count
+
" != "
+
page
.
getTotalCount
();
page
=
null
;
page
=
null
;
}
}
}
}
}
long
getPos
()
{
long
getPos
()
{
return
pos
;
return
pos
;
...
@@ -900,7 +905,7 @@ public abstract class Page implements Cloneable
...
@@ -900,7 +905,7 @@ public abstract class Page implements Cloneable
void
resetPos
()
{
void
resetPos
()
{
Page
p
=
page
;
Page
p
=
page
;
if
(
p
!=
null
)
{
if
(
p
!=
null
&&
p
.
isSaved
()
)
{
pos
=
p
.
getPos
();
pos
=
p
.
getPos
();
assert
count
==
p
.
getTotalCount
();
assert
count
==
p
.
getTotalCount
();
}
}
...
@@ -910,12 +915,12 @@ public abstract class Page implements Cloneable
...
@@ -910,12 +915,12 @@ public abstract class Page implements Cloneable
public
String
toString
()
{
public
String
toString
()
{
return
"Cnt:"
+
count
+
", pos:"
+
DataUtils
.
getPageChunkId
(
pos
)
+
return
"Cnt:"
+
count
+
", pos:"
+
DataUtils
.
getPageChunkId
(
pos
)
+
"-"
+
DataUtils
.
getPageOffset
(
pos
)
+
":"
+
DataUtils
.
getPageMaxLength
(
pos
)
+
"-"
+
DataUtils
.
getPageOffset
(
pos
)
+
":"
+
DataUtils
.
getPageMaxLength
(
pos
)
+
(
DataUtils
.
getPageType
(
pos
)
==
0
?
" leaf"
:
" node"
)
+
", "
+
page
;
(
page
==
null
?
DataUtils
.
getPageType
(
pos
)
==
0
:
page
.
isLeaf
()
?
" leaf"
:
" node"
)
+
", "
+
page
;
}
}
}
}
private
static
final
class
NonLeaf
extends
Page
private
static
class
NonLeaf
extends
Page
{
{
/**
/**
* The child page references.
* The child page references.
...
@@ -950,10 +955,7 @@ public abstract class Page implements Cloneable
...
@@ -950,10 +955,7 @@ public abstract class Page implements Cloneable
@Override
@Override
public
Page
copy
(
MVMap
<?,
?>
map
)
{
public
Page
copy
(
MVMap
<?,
?>
map
)
{
// replace child pages with empty pages
return
new
IncompleteNonLeaf
(
map
,
this
);
PageReference
[]
children
=
new
PageReference
[
this
.
children
.
length
];
Arrays
.
fill
(
children
,
PageReference
.
EMPTY
);
return
new
NonLeaf
(
map
,
this
,
children
,
0
);
}
}
@Override
@Override
...
@@ -1012,7 +1014,7 @@ public abstract class Page implements Cloneable
...
@@ -1012,7 +1014,7 @@ public abstract class Page implements Cloneable
@Override
@Override
public
long
getTotalCount
()
{
public
long
getTotalCount
()
{
assert
totalCount
==
calculateTotalCount
()
:
assert
!
isComplete
()
||
totalCount
==
calculateTotalCount
()
:
"Total count: "
+
totalCount
+
" != "
+
calculateTotalCount
();
"Total count: "
+
totalCount
+
" != "
+
calculateTotalCount
();
return
totalCount
;
return
totalCount
;
}
}
...
@@ -1026,6 +1028,10 @@ public abstract class Page implements Cloneable
...
@@ -1026,6 +1028,10 @@ public abstract class Page implements Cloneable
return
check
;
return
check
;
}
}
protected
void
recalculateTotalCount
()
{
totalCount
=
calculateTotalCount
();
}
@Override
@Override
long
getCounts
(
int
index
)
{
long
getCounts
(
int
index
)
{
return
children
[
index
].
count
;
return
children
[
index
].
count
;
...
@@ -1150,6 +1156,15 @@ public abstract class Page implements Cloneable
...
@@ -1150,6 +1156,15 @@ public abstract class Page implements Cloneable
void
writeUnsavedRecursive
(
Chunk
chunk
,
WriteBuffer
buff
)
{
void
writeUnsavedRecursive
(
Chunk
chunk
,
WriteBuffer
buff
)
{
if
(!
isSaved
())
{
if
(!
isSaved
())
{
int
patch
=
write
(
chunk
,
buff
);
int
patch
=
write
(
chunk
,
buff
);
writeChildrenRecursive
(
chunk
,
buff
);
int
old
=
buff
.
position
();
buff
.
position
(
patch
);
writeChildren
(
buff
,
false
);
buff
.
position
(
old
);
}
}
void
writeChildrenRecursive
(
Chunk
chunk
,
WriteBuffer
buff
)
{
int
len
=
getRawChildPageCount
();
int
len
=
getRawChildPageCount
();
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
PageReference
ref
=
children
[
i
];
PageReference
ref
=
children
[
i
];
...
@@ -1159,11 +1174,6 @@ public abstract class Page implements Cloneable
...
@@ -1159,11 +1174,6 @@ public abstract class Page implements Cloneable
ref
.
resetPos
();
ref
.
resetPos
();
}
}
}
}
int
old
=
buff
.
position
();
buff
.
position
(
patch
);
writeChildren
(
buff
,
false
);
buff
.
position
(
old
);
}
}
}
@Override
@Override
...
@@ -1202,6 +1212,48 @@ public abstract class Page implements Cloneable
...
@@ -1202,6 +1212,48 @@ public abstract class Page implements Cloneable
}
}
private
static
class
IncompleteNonLeaf
extends
NonLeaf
{
private
boolean
complete
;
IncompleteNonLeaf
(
MVMap
<?,
?>
map
,
NonLeaf
source
)
{
super
(
map
,
source
,
constructEmptyPageRefs
(
source
.
getRawChildPageCount
()),
source
.
getTotalCount
());
}
private
static
PageReference
[]
constructEmptyPageRefs
(
int
size
)
{
// replace child pages with empty pages
PageReference
[]
children
=
new
PageReference
[
size
];
Arrays
.
fill
(
children
,
PageReference
.
EMPTY
);
return
children
;
}
@Override
void
writeUnsavedRecursive
(
Chunk
chunk
,
WriteBuffer
buff
)
{
if
(
complete
)
{
super
.
writeUnsavedRecursive
(
chunk
,
buff
);
}
else
if
(!
isSaved
())
{
writeChildrenRecursive
(
chunk
,
buff
);
}
}
public
boolean
isComplete
()
{
return
complete
;
}
public
void
setComplete
()
{
recalculateTotalCount
();
complete
=
true
;
}
@Override
public
void
dump
(
StringBuilder
buff
)
{
super
.
dump
(
buff
);
buff
.
append
(
", complete:"
).
append
(
complete
);
}
}
private
static
class
Leaf
extends
Page
private
static
class
Leaf
extends
Page
{
{
/**
/**
...
...
h2/src/test/org/h2/test/TestAll.java
浏览文件 @
f33de19b
...
@@ -133,6 +133,7 @@ import org.h2.test.store.TestCacheLIRS;
...
@@ -133,6 +133,7 @@ import org.h2.test.store.TestCacheLIRS;
import
org.h2.test.store.TestCacheLongKeyLIRS
;
import
org.h2.test.store.TestCacheLongKeyLIRS
;
import
org.h2.test.store.TestConcurrent
;
import
org.h2.test.store.TestConcurrent
;
import
org.h2.test.store.TestDataUtils
;
import
org.h2.test.store.TestDataUtils
;
import
org.h2.test.store.TestDefrag
;
import
org.h2.test.store.TestFreeSpace
;
import
org.h2.test.store.TestFreeSpace
;
import
org.h2.test.store.TestKillProcessWhileWriting
;
import
org.h2.test.store.TestKillProcessWhileWriting
;
import
org.h2.test.store.TestMVRTree
;
import
org.h2.test.store.TestMVRTree
;
...
@@ -922,6 +923,7 @@ kill -9 `jps -l | grep "org.h2.test." | cut -d " " -f 1`
...
@@ -922,6 +923,7 @@ kill -9 `jps -l | grep "org.h2.test." | cut -d " " -f 1`
addTest
(
new
TestFileLockSerialized
());
addTest
(
new
TestFileLockSerialized
());
addTest
(
new
TestFileLockProcess
());
addTest
(
new
TestFileLockProcess
());
addTest
(
new
TestFileSystem
());
addTest
(
new
TestFileSystem
());
addTest
(
new
TestDefrag
());
addTest
(
new
TestTools
());
addTest
(
new
TestTools
());
addTest
(
new
TestSampleApps
());
addTest
(
new
TestSampleApps
());
...
...
h2/src/test/org/h2/test/store/TestDefrag.java
0 → 100644
浏览文件 @
f33de19b
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
test
.
store
;
import
static
org
.
h2
.
engine
.
Constants
.
SUFFIX_MV_FILE
;
import
org.h2.test.TestBase
;
import
org.h2.test.TestDb
;
import
java.io.File
;
import
java.sql.Connection
;
import
java.sql.DriverManager
;
import
java.sql.ResultSet
;
import
java.sql.Statement
;
/**
* Test off-line compaction procedure used by SHUTDOWN DEFRAG command
*
* @author <a href='mailto:andrei.tokar@gmail.com'>Andrei Tokar</a>
*/
public
class
TestDefrag
extends
TestDb
{
/**
* Run just this test.
*
* @param a ignored
*/
public
static
void
main
(
String
...
a
)
throws
Exception
{
TestBase
.
createCaller
().
init
().
test
();
}
@Override
public
boolean
isEnabled
()
{
return
config
.
mvStore
&&
!
config
.
memory
&&
config
.
big
&&
!
config
.
travis
;
}
@Override
public
void
test
()
throws
Exception
{
String
dbName
=
getTestName
();
deleteDb
(
dbName
);
File
dbFile
=
new
File
(
getBaseDir
(),
dbName
+
SUFFIX_MV_FILE
);
try
(
Connection
c
=
getConnection
(
dbName
))
{
try
(
Statement
st
=
c
.
createStatement
())
{
st
.
execute
(
"CREATE TABLE IF NOT EXISTS test (id INT PRIMARY KEY, txt varchar)"
+
" AS SELECT x, x || SPACE(200) FROM SYSTEM_RANGE(1,10000000)"
);
}
long
origSize
=
dbFile
.
length
();
assertTrue
(
origSize
>
4_000_000_000L
);
try
(
Statement
st
=
c
.
createStatement
())
{
st
.
execute
(
"shutdown defrag"
);
}
long
compactedSize
=
dbFile
.
length
();
assertTrue
(
compactedSize
<
400_000_000
);
}
try
(
Connection
c
=
getConnection
(
dbName
+
";LAZY_QUERY_EXECUTION=1"
))
{
try
(
Statement
st
=
c
.
createStatement
())
{
ResultSet
rs
=
st
.
executeQuery
(
"SELECT * FROM test"
);
int
count
=
0
;
while
(
rs
.
next
())
{
++
count
;
assertEquals
(
count
,
rs
.
getInt
(
1
));
assertTrue
(
rs
.
getString
(
2
).
startsWith
(
count
+
" "
));
}
assertEquals
(
10_000_000
,
count
);
}
}
deleteDb
(
dbName
);
}
}
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论