Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
e920b890
提交
e920b890
authored
8月 11, 2012
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
A persistent tree map (work in progress).
上级
6c9eede0
隐藏空白字符变更
内嵌
并排
正在显示
7 个修改的文件
包含
172 行增加
和
67 行删除
+172
-67
TestBtreeMapStore.java
h2/src/test/org/h2/test/store/TestBtreeMapStore.java
+26
-0
TestDataUtils.java
h2/src/test/org/h2/test/store/TestDataUtils.java
+20
-8
BtreeMap.java
h2/src/tools/org/h2/dev/store/btree/BtreeMap.java
+7
-10
BtreeMapStore.java
h2/src/tools/org/h2/dev/store/btree/BtreeMapStore.java
+35
-5
DataUtils.java
h2/src/tools/org/h2/dev/store/btree/DataUtils.java
+40
-9
Dump.java
h2/src/tools/org/h2/dev/store/btree/Dump.java
+21
-23
Page.java
h2/src/tools/org/h2/dev/store/btree/Page.java
+23
-12
没有找到文件。
h2/src/test/org/h2/test/store/TestBtreeMapStore.java
浏览文件 @
e920b890
...
@@ -29,6 +29,7 @@ public class TestBtreeMapStore extends TestBase {
...
@@ -29,6 +29,7 @@ public class TestBtreeMapStore extends TestBase {
}
}
public
void
test
()
{
public
void
test
()
{
testFastDelete
();
testRollbackInMemory
();
testRollbackInMemory
();
testRollbackStored
();
testRollbackStored
();
testMeta
();
testMeta
();
...
@@ -42,6 +43,31 @@ public class TestBtreeMapStore extends TestBase {
...
@@ -42,6 +43,31 @@ public class TestBtreeMapStore extends TestBase {
testSimple
();
testSimple
();
}
}
private
void
testFastDelete
()
{
String
fileName
=
getBaseDir
()
+
"/testMeta.h3"
;
FileUtils
.
delete
(
fileName
);
BtreeMapStore
s
;
BtreeMap
<
Integer
,
String
>
m
;
s
=
openStore
(
fileName
);
s
.
setMaxPageSize
(
100
);
m
=
s
.
openMap
(
"data"
,
Integer
.
class
,
String
.
class
);
for
(
int
i
=
0
;
i
<
1000
;
i
++)
{
m
.
put
(
i
,
"Hello World"
);
}
s
.
store
();
assertEquals
(
3
,
s
.
getWriteCount
());
s
.
close
();
s
=
openStore
(
fileName
);
m
=
s
.
openMap
(
"data"
,
Integer
.
class
,
String
.
class
);
m
.
clear
();
s
.
store
();
// ensure only nodes are read, but not leaves
assertEquals
(
4
,
s
.
getReadCount
());
assertEquals
(
2
,
s
.
getWriteCount
());
s
.
close
();
}
private
void
testRollbackStored
()
{
private
void
testRollbackStored
()
{
String
fileName
=
getBaseDir
()
+
"/testMeta.h3"
;
String
fileName
=
getBaseDir
()
+
"/testMeta.h3"
;
FileUtils
.
delete
(
fileName
);
FileUtils
.
delete
(
fileName
);
...
...
h2/src/test/org/h2/test/store/TestDataUtils.java
浏览文件 @
e920b890
...
@@ -44,13 +44,25 @@ public class TestDataUtils extends TestBase {
...
@@ -44,13 +44,25 @@ public class TestDataUtils extends TestBase {
}
}
private
void
testPagePos
()
{
private
void
testPagePos
()
{
for
(
int
chunkId
=
0
;
chunkId
<
67000000
;
chunkId
+=
670000
)
{
assertEquals
(
0
,
DataUtils
.
PAGE_TYPE_LEAF
);
for
(
long
offset
=
0
;
offset
<
Integer
.
MAX_VALUE
;
offset
+=
Integer
.
MAX_VALUE
/
100
)
{
assertEquals
(
1
,
DataUtils
.
PAGE_TYPE_NODE
);
for
(
int
length
=
0
;
length
<
2000000
;
length
+=
200000
)
{
for
(
int
i
=
0
;
i
<
67000000
;
i
++)
{
long
pos
=
DataUtils
.
getPos
(
chunkId
,
(
int
)
offset
,
length
);
long
pos
=
DataUtils
.
getPagePos
(
i
,
3
,
128
,
1
);
assertEquals
(
chunkId
,
DataUtils
.
getChunkId
(
pos
));
assertEquals
(
i
,
DataUtils
.
getPageChunkId
(
pos
));
assertEquals
(
offset
,
DataUtils
.
getOffset
(
pos
));
assertEquals
(
3
,
DataUtils
.
getPageOffset
(
pos
));
assertTrue
(
DataUtils
.
getMaxLength
(
pos
)
>=
length
);
assertEquals
(
128
,
DataUtils
.
getPageMaxLength
(
pos
));
assertEquals
(
1
,
DataUtils
.
getPageType
(
pos
));
}
for
(
int
type
=
0
;
type
<=
1
;
type
++)
{
for
(
int
chunkId
=
0
;
chunkId
<
67000000
;
chunkId
+=
670000
)
{
for
(
long
offset
=
0
;
offset
<
Integer
.
MAX_VALUE
;
offset
+=
Integer
.
MAX_VALUE
/
100
)
{
for
(
int
length
=
0
;
length
<
2000000
;
length
+=
200000
)
{
long
pos
=
DataUtils
.
getPagePos
(
chunkId
,
(
int
)
offset
,
length
,
type
);
assertEquals
(
chunkId
,
DataUtils
.
getPageChunkId
(
pos
));
assertEquals
(
offset
,
DataUtils
.
getPageOffset
(
pos
));
assertTrue
(
DataUtils
.
getPageMaxLength
(
pos
)
>=
length
);
assertTrue
(
DataUtils
.
getPageType
(
pos
)
==
type
);
}
}
}
}
}
}
}
...
@@ -75,7 +87,7 @@ public class TestDataUtils extends TestBase {
...
@@ -75,7 +87,7 @@ public class TestDataUtils extends TestBase {
if
(
code
>
lastCode
)
{
if
(
code
>
lastCode
)
{
lastCode
=
code
;
lastCode
=
code
;
}
}
int
max
=
DataUtils
.
get
MaxLength
(
code
);
int
max
=
DataUtils
.
get
PageMaxLength
(
code
<<
1
);
assertTrue
(
max
>=
i
&&
max
>=
32
);
assertTrue
(
max
>=
i
&&
max
>=
32
);
}
}
}
}
...
...
h2/src/tools/org/h2/dev/store/btree/BtreeMap.java
浏览文件 @
e920b890
...
@@ -95,7 +95,7 @@ public class BtreeMap<K, V> {
...
@@ -95,7 +95,7 @@ public class BtreeMap<K, V> {
}
}
/**
/**
* Remove all entries, and
remove the map. The map becomes invalid
.
* Remove all entries, and
close the map
.
*/
*/
public
void
remove
()
{
public
void
remove
()
{
checkWrite
();
checkWrite
();
...
@@ -103,10 +103,14 @@ public class BtreeMap<K, V> {
...
@@ -103,10 +103,14 @@ public class BtreeMap<K, V> {
root
.
removeAllRecursive
();
root
.
removeAllRecursive
();
}
}
store
.
removeMap
(
id
);
store
.
removeMap
(
id
);
close
();
}
public
void
close
()
{
readOnly
=
true
;
store
=
null
;
oldRoots
.
clear
();
oldRoots
.
clear
();
root
=
null
;
root
=
null
;
store
=
null
;
readOnly
=
true
;
}
}
public
boolean
isClosed
()
{
public
boolean
isClosed
()
{
...
@@ -278,13 +282,6 @@ public class BtreeMap<K, V> {
...
@@ -278,13 +282,6 @@ public class BtreeMap<K, V> {
return
buff
.
toString
();
return
buff
.
toString
();
}
}
public
void
close
()
{
readOnly
=
true
;
store
=
null
;
oldRoots
.
clear
();
root
=
null
;
}
public
int
hashCode
()
{
public
int
hashCode
()
{
return
id
;
return
id
;
}
}
...
...
h2/src/tools/org/h2/dev/store/btree/BtreeMapStore.java
浏览文件 @
e920b890
...
@@ -38,7 +38,6 @@ header:
...
@@ -38,7 +38,6 @@ header:
blockSize=4096
blockSize=4096
TODO:
TODO:
- keep page type (leaf/node) in pos to speed up large deletes
- support fast range deletes
- support fast range deletes
- support custom pager for r-tree, kd-tree
- support custom pager for r-tree, kd-tree
- need an 'end of chunk' marker to verify all data is written
- need an 'end of chunk' marker to verify all data is written
...
@@ -57,6 +56,7 @@ TODO:
...
@@ -57,6 +56,7 @@ TODO:
- file header could be a regular chunk, end of file the second
- file header could be a regular chunk, end of file the second
- possibly split chunk data into immutable and mutable
- possibly split chunk data into immutable and mutable
- reduce minimum chunk size, speed up very small transactions
- reduce minimum chunk size, speed up very small transactions
- defragment: use total max length instead of page count (liveCount)
*/
*/
...
@@ -107,6 +107,8 @@ public class BtreeMapStore {
...
@@ -107,6 +107,8 @@ public class BtreeMapStore {
private
Compressor
compressor
=
new
CompressLZF
();
private
Compressor
compressor
=
new
CompressLZF
();
private
long
currentVersion
;
private
long
currentVersion
;
private
int
readCount
;
private
int
writeCount
;
private
BtreeMapStore
(
String
fileName
,
DataTypeFactory
typeFactory
)
{
private
BtreeMapStore
(
String
fileName
,
DataTypeFactory
typeFactory
)
{
this
.
fileName
=
fileName
;
this
.
fileName
=
fileName
;
...
@@ -310,6 +312,7 @@ public class BtreeMapStore {
...
@@ -310,6 +312,7 @@ public class BtreeMapStore {
"rootChunk:"
+
rootChunkStart
+
"\n"
+
"rootChunk:"
+
rootChunkStart
+
"\n"
+
"lastMapId:"
+
lastMapId
+
"\n"
+
"lastMapId:"
+
lastMapId
+
"\n"
+
"version:"
+
currentVersion
+
"\n"
).
getBytes
(
"UTF-8"
));
"version:"
+
currentVersion
+
"\n"
).
getBytes
(
"UTF-8"
));
writeCount
++;
file
.
position
(
0
);
file
.
position
(
0
);
file
.
write
(
header
);
file
.
write
(
header
);
file
.
position
(
blockSize
);
file
.
position
(
blockSize
);
...
@@ -321,8 +324,9 @@ public class BtreeMapStore {
...
@@ -321,8 +324,9 @@ public class BtreeMapStore {
private
void
readHeader
()
{
private
void
readHeader
()
{
try
{
try
{
file
.
position
(
0
);
byte
[]
header
=
new
byte
[
blockSize
];
byte
[]
header
=
new
byte
[
blockSize
];
readCount
++;
file
.
position
(
0
);
// TODO read fully; read both headers
// TODO read fully; read both headers
file
.
read
(
ByteBuffer
.
wrap
(
header
));
file
.
read
(
ByteBuffer
.
wrap
(
header
));
Properties
prop
=
new
Properties
();
Properties
prop
=
new
Properties
();
...
@@ -365,16 +369,16 @@ public class BtreeMapStore {
...
@@ -365,16 +369,16 @@ public class BtreeMapStore {
}
}
private
Chunk
getChunk
(
long
pos
)
{
private
Chunk
getChunk
(
long
pos
)
{
return
chunks
.
get
(
DataUtils
.
getChunkId
(
pos
));
return
chunks
.
get
(
DataUtils
.
get
Page
ChunkId
(
pos
));
}
}
private
long
getFilePosition
(
long
pos
)
{
private
long
getFilePosition
(
long
pos
)
{
Chunk
c
=
getChunk
(
pos
);
Chunk
c
=
getChunk
(
pos
);
if
(
c
==
null
)
{
if
(
c
==
null
)
{
throw
new
RuntimeException
(
"Chunk "
+
DataUtils
.
getChunkId
(
pos
)
+
" not found"
);
throw
new
RuntimeException
(
"Chunk "
+
DataUtils
.
get
Page
ChunkId
(
pos
)
+
" not found"
);
}
}
long
filePos
=
c
.
start
;
long
filePos
=
c
.
start
;
filePos
+=
DataUtils
.
getOffset
(
pos
);
filePos
+=
DataUtils
.
get
Page
Offset
(
pos
);
return
filePos
;
return
filePos
;
}
}
...
@@ -467,6 +471,7 @@ public class BtreeMapStore {
...
@@ -467,6 +471,7 @@ public class BtreeMapStore {
buff
.
putLong
(
meta
.
getRoot
().
getPos
());
buff
.
putLong
(
meta
.
getRoot
().
getPos
());
buff
.
rewind
();
buff
.
rewind
();
try
{
try
{
writeCount
++;
file
.
position
(
filePos
);
file
.
position
(
filePos
);
file
.
write
(
buff
);
file
.
write
(
buff
);
}
catch
(
IOException
e
)
{
}
catch
(
IOException
e
)
{
...
@@ -575,6 +580,7 @@ public class BtreeMapStore {
...
@@ -575,6 +580,7 @@ public class BtreeMapStore {
private
Chunk
readChunkHeader
(
long
start
)
{
private
Chunk
readChunkHeader
(
long
start
)
{
try
{
try
{
readCount
++;
file
.
position
(
start
);
file
.
position
(
start
);
ByteBuffer
buff
=
ByteBuffer
.
wrap
(
new
byte
[
32
]);
ByteBuffer
buff
=
ByteBuffer
.
wrap
(
new
byte
[
32
]);
DataUtils
.
readFully
(
file
,
buff
);
DataUtils
.
readFully
(
file
,
buff
);
...
@@ -724,6 +730,7 @@ public class BtreeMapStore {
...
@@ -724,6 +730,7 @@ public class BtreeMapStore {
Page
p
=
cache
.
get
(
pos
);
Page
p
=
cache
.
get
(
pos
);
if
(
p
==
null
)
{
if
(
p
==
null
)
{
long
filePos
=
getFilePosition
(
pos
);
long
filePos
=
getFilePosition
(
pos
);
readCount
++;
p
=
Page
.
read
(
file
,
map
,
filePos
,
pos
);
p
=
Page
.
read
(
file
,
map
,
filePos
,
pos
);
cache
.
put
(
pos
,
p
);
cache
.
put
(
pos
,
p
);
}
}
...
@@ -775,6 +782,11 @@ public class BtreeMapStore {
...
@@ -775,6 +782,11 @@ public class BtreeMapStore {
this
.
maxPageSize
=
maxPageSize
;
this
.
maxPageSize
=
maxPageSize
;
}
}
/**
* The maximum number of key-value pairs in a page.
*
* @return the maximum number of entries
*/
int
getMaxPageSize
()
{
int
getMaxPageSize
()
{
return
maxPageSize
;
return
maxPageSize
;
}
}
...
@@ -914,4 +926,22 @@ public class BtreeMapStore {
...
@@ -914,4 +926,22 @@ public class BtreeMapStore {
return
currentVersion
;
return
currentVersion
;
}
}
/**
* Get the number of write operations since this store was opened.
*
* @return the number of write operations
*/
public
int
getWriteCount
()
{
return
writeCount
;
}
/**
* Get the number of read operations since this store was opened.
*
* @return the number of read operations
*/
public
int
getReadCount
()
{
return
readCount
;
}
}
}
h2/src/tools/org/h2/dev/store/btree/DataUtils.java
浏览文件 @
e920b890
...
@@ -15,6 +15,21 @@ import java.nio.channels.FileChannel;
...
@@ -15,6 +15,21 @@ import java.nio.channels.FileChannel;
*/
*/
public
class
DataUtils
{
public
class
DataUtils
{
/**
* The type for leaf page.
*/
public
static
final
int
PAGE_TYPE_LEAF
=
0
;
/**
* The type for node page.
*/
public
static
final
int
PAGE_TYPE_NODE
=
1
;
/**
* The bit mask for compressed pages.
*/
public
static
final
int
PAGE_COMPRESSED
=
2
;
/**
/**
* The maximum length of a variable size int.
* The maximum length of a variable size int.
*/
*/
...
@@ -211,8 +226,8 @@ public class DataUtils {
...
@@ -211,8 +226,8 @@ public class DataUtils {
* @param pos the position
* @param pos the position
* @return the chunk id
* @return the chunk id
*/
*/
public
static
int
getChunkId
(
long
pos
)
{
public
static
int
get
Page
ChunkId
(
long
pos
)
{
return
(
int
)
(
pos
>>>
3
7
);
return
(
int
)
(
pos
>>>
3
8
);
}
}
/**
/**
...
@@ -222,8 +237,8 @@ public class DataUtils {
...
@@ -222,8 +237,8 @@ public class DataUtils {
* @param pos the position
* @param pos the position
* @return the maximum length
* @return the maximum length
*/
*/
public
static
int
getMaxLength
(
long
pos
)
{
public
static
int
get
Page
MaxLength
(
long
pos
)
{
int
code
=
(
int
)
(
pos
&
31
);
int
code
=
(
int
)
(
(
pos
>>
1
)
&
31
);
if
(
code
==
31
)
{
if
(
code
==
31
)
{
return
Integer
.
MAX_VALUE
;
return
Integer
.
MAX_VALUE
;
}
}
...
@@ -236,21 +251,37 @@ public class DataUtils {
...
@@ -236,21 +251,37 @@ public class DataUtils {
* @param pos the position
* @param pos the position
* @return the offset
* @return the offset
*/
*/
public
static
int
getOffset
(
long
pos
)
{
public
static
int
getPageOffset
(
long
pos
)
{
return
(
int
)
(
pos
>>
5
);
return
(
int
)
(
pos
>>
6
);
}
/**
* Get the page type from the position.
*
* @param pos the position
* @return the page type (PAGE_TYPE_NODE or PAGE_TYPE_LEAF)
*/
public
static
int
getPageType
(
long
pos
)
{
return
((
int
)
pos
)
&
1
;
}
}
/**
/**
* Get the position of this page. The following information is encoded in
* Get the position of this page. The following information is encoded in
* the position: the chunk id, the offset, and the maximum length.
* the position: the chunk id, the offset, the maximum length, and the type
* (node or leaf).
*
*
* @param chunkId the chunk id
* @param chunkId the chunk id
* @param offset the offset
* @param offset the offset
* @param length the length
* @param length the length
* @param type the page type (1 for node, 0 for leaf)
* @return the position
* @return the position
*/
*/
public
static
long
getPos
(
int
chunkId
,
int
offset
,
int
length
)
{
public
static
long
getPagePos
(
int
chunkId
,
int
offset
,
int
length
,
int
type
)
{
return
((
long
)
chunkId
<<
37
)
|
((
long
)
offset
<<
5
)
|
encodeLength
(
length
);
long
pos
=
(
long
)
chunkId
<<
38
;
pos
|=
(
long
)
offset
<<
6
;
pos
|=
encodeLength
(
length
)
<<
1
;
pos
|=
type
;
return
pos
;
}
}
/**
/**
...
...
h2/src/tools/org/h2/dev/store/btree/Dump.java
浏览文件 @
e920b890
...
@@ -12,7 +12,6 @@ import java.io.PrintWriter;
...
@@ -12,7 +12,6 @@ import java.io.PrintWriter;
import
java.io.StringReader
;
import
java.io.StringReader
;
import
java.nio.ByteBuffer
;
import
java.nio.ByteBuffer
;
import
java.nio.channels.FileChannel
;
import
java.nio.channels.FileChannel
;
import
java.util.Arrays
;
import
java.util.Properties
;
import
java.util.Properties
;
import
org.h2.store.fs.FilePath
;
import
org.h2.store.fs.FilePath
;
import
org.h2.store.fs.FileUtils
;
import
org.h2.store.fs.FileUtils
;
...
@@ -68,7 +67,7 @@ public class Dump {
...
@@ -68,7 +67,7 @@ public class Dump {
writer
.
println
(
"file "
+
fileName
);
writer
.
println
(
"file "
+
fileName
);
writer
.
println
(
" length "
+
fileLength
);
writer
.
println
(
" length "
+
fileLength
);
writer
.
println
(
" "
+
prop
);
writer
.
println
(
" "
+
prop
);
ByteBuffer
block
=
ByteBuffer
.
wrap
(
new
byte
[
16
]);
ByteBuffer
block
=
ByteBuffer
.
wrap
(
new
byte
[
32
]);
for
(
long
pos
=
0
;
pos
<
fileLength
;)
{
for
(
long
pos
=
0
;
pos
<
fileLength
;)
{
file
.
position
(
pos
);
file
.
position
(
pos
);
block
.
rewind
();
block
.
rewind
();
...
@@ -78,34 +77,33 @@ public class Dump {
...
@@ -78,34 +77,33 @@ public class Dump {
pos
+=
blockSize
;
pos
+=
blockSize
;
continue
;
continue
;
}
}
int
l
ength
=
block
.
getInt
();
int
chunkL
ength
=
block
.
getInt
();
int
chunkId
=
block
.
getInt
();
int
chunkId
=
block
.
getInt
();
int
metaRootOffset
=
block
.
getInt
();
long
metaRootPos
=
block
.
getLong
();
writer
.
println
(
" chunk "
+
chunkId
+
" at "
+
pos
+
writer
.
println
(
" chunk "
+
chunkId
+
" at "
+
pos
+
" length "
+
length
+
" offset "
+
metaRootOffset
);
" length "
+
chunkLength
+
" root "
+
metaRootPos
);
ByteBuffer
chunk
=
ByteBuffer
.
allocate
(
l
ength
);
ByteBuffer
chunk
=
ByteBuffer
.
allocate
(
chunkL
ength
);
file
.
position
(
pos
);
file
.
position
(
pos
);
FileUtils
.
readFully
(
file
,
chunk
);
FileUtils
.
readFully
(
file
,
chunk
);
int
p
=
block
.
position
();
int
p
=
block
.
position
();
pos
=
(
pos
+
l
ength
+
blockSize
)
/
blockSize
*
blockSize
;
pos
=
(
pos
+
chunkL
ength
+
blockSize
)
/
blockSize
*
blockSize
;
l
ength
-=
p
;
chunkL
ength
-=
p
;
while
(
l
ength
>
0
)
{
while
(
chunkL
ength
>
0
)
{
chunk
.
position
(
p
);
chunk
.
position
(
p
);
int
len
=
chunk
.
getInt
();
int
pageLength
=
chunk
.
getInt
();
long
mapId
=
chunk
.
getLong
();
// check value (ignored)
chunk
.
getShort
();
long
mapId
=
DataUtils
.
readVarInt
(
chunk
);
int
len
=
DataUtils
.
readVarInt
(
chunk
);
int
type
=
chunk
.
get
();
int
type
=
chunk
.
get
();
int
count
=
DataUtils
.
readVarInt
(
chunk
);
boolean
compressed
=
(
type
&
2
)
!=
0
;
if
(
type
==
1
)
{
boolean
node
=
(
type
&
1
)
!=
0
;
long
[]
children
=
new
long
[
count
];
writer
.
println
(
" map "
+
mapId
+
" at "
+
p
+
" "
+
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
(
node
?
"node"
:
"leaf"
)
+
" "
+
children
[
i
]
=
chunk
.
getLong
();
(
compressed
?
"compressed"
:
""
)
+
" "
+
}
"len: "
+
pageLength
+
" entries: "
+
len
);
writer
.
println
(
" map "
+
mapId
+
" at "
+
p
+
" node, "
+
count
+
" children: "
+
Arrays
.
toString
(
children
));
p
+=
pageLength
;
}
else
{
chunkLength
-=
pageLength
;
writer
.
println
(
" map "
+
mapId
+
" at "
+
p
+
" leaf, "
+
count
+
" rows"
);
}
p
+=
len
;
length
-=
len
;
}
}
}
}
}
catch
(
IOException
e
)
{
}
catch
(
IOException
e
)
{
...
...
h2/src/tools/org/h2/dev/store/btree/Page.java
浏览文件 @
e920b890
...
@@ -21,6 +21,7 @@ import org.h2.compress.Compressor;
...
@@ -21,6 +21,7 @@ import org.h2.compress.Compressor;
* File format:
* File format:
* page length (including length): int
* page length (including length): int
* check value: short
* check value: short
* map id: varInt
* number of keys: varInt
* number of keys: varInt
* type: byte (0: leaf, 1: node; +2: compressed)
* type: byte (0: leaf, 1: node; +2: compressed)
* compressed: bytes saved (varInt)
* compressed: bytes saved (varInt)
...
@@ -71,7 +72,7 @@ public class Page {
...
@@ -71,7 +72,7 @@ public class Page {
* @return the page
* @return the page
*/
*/
static
Page
read
(
FileChannel
file
,
BtreeMap
<?,
?>
map
,
long
filePos
,
long
pos
)
{
static
Page
read
(
FileChannel
file
,
BtreeMap
<?,
?>
map
,
long
filePos
,
long
pos
)
{
int
maxLength
=
DataUtils
.
getMaxLength
(
pos
),
length
=
maxLength
;
int
maxLength
=
DataUtils
.
get
Page
MaxLength
(
pos
),
length
=
maxLength
;
ByteBuffer
buff
;
ByteBuffer
buff
;
try
{
try
{
file
.
position
(
filePos
);
file
.
position
(
filePos
);
...
@@ -88,8 +89,8 @@ public class Page {
...
@@ -88,8 +89,8 @@ public class Page {
}
}
Page
p
=
new
Page
(
map
,
0
);
Page
p
=
new
Page
(
map
,
0
);
p
.
pos
=
pos
;
p
.
pos
=
pos
;
int
chunkId
=
DataUtils
.
getChunkId
(
pos
);
int
chunkId
=
DataUtils
.
get
Page
ChunkId
(
pos
);
int
offset
=
DataUtils
.
getOffset
(
pos
);
int
offset
=
DataUtils
.
get
Page
Offset
(
pos
);
p
.
read
(
buff
,
chunkId
,
offset
,
maxLength
);
p
.
read
(
buff
,
chunkId
,
offset
,
maxLength
);
return
p
;
return
p
;
}
}
...
@@ -427,7 +428,12 @@ public class Page {
...
@@ -427,7 +428,12 @@ public class Page {
void
removeAllRecursive
()
{
void
removeAllRecursive
()
{
if
(
children
!=
null
)
{
if
(
children
!=
null
)
{
for
(
long
c
:
children
)
{
for
(
long
c
:
children
)
{
map
.
readPage
(
c
).
removeAllRecursive
();
int
type
=
DataUtils
.
getPageType
(
c
);
if
(
type
==
DataUtils
.
PAGE_TYPE_LEAF
)
{
getStore
().
removePage
(
c
);
}
else
{
map
.
readPage
(
c
).
removeAllRecursive
();
}
}
}
}
}
getStore
().
removePage
(
pos
);
getStore
().
removePage
(
pos
);
...
@@ -524,6 +530,10 @@ public class Page {
...
@@ -524,6 +530,10 @@ public class Page {
throw
new
RuntimeException
(
"Length too large, expected =< "
+
maxLength
+
" got "
+
pageLength
);
throw
new
RuntimeException
(
"Length too large, expected =< "
+
maxLength
+
" got "
+
pageLength
);
}
}
short
check
=
buff
.
getShort
();
short
check
=
buff
.
getShort
();
int
mapId
=
DataUtils
.
readVarInt
(
buff
);
if
(
mapId
!=
map
.
getId
())
{
throw
new
RuntimeException
(
"Error reading page, expected map "
+
map
.
getId
()
+
" got "
+
mapId
);
}
int
len
=
DataUtils
.
readVarInt
(
buff
);
int
len
=
DataUtils
.
readVarInt
(
buff
);
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
map
.
getId
())
^
DataUtils
.
getCheckValue
(
map
.
getId
())
^
...
@@ -535,8 +545,8 @@ public class Page {
...
@@ -535,8 +545,8 @@ public class Page {
}
}
keys
=
new
Object
[
len
];
keys
=
new
Object
[
len
];
int
type
=
buff
.
get
();
int
type
=
buff
.
get
();
boolean
node
=
(
type
&
1
)
!=
0
;
boolean
node
=
(
type
&
1
)
==
DataUtils
.
PAGE_TYPE_NODE
;
boolean
compressed
=
(
type
&
2
)
!=
0
;
boolean
compressed
=
(
type
&
DataUtils
.
PAGE_COMPRESSED
)
!=
0
;
if
(
compressed
)
{
if
(
compressed
)
{
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
int
lenAdd
=
DataUtils
.
readVarInt
(
buff
);
int
lenAdd
=
DataUtils
.
readVarInt
(
buff
);
...
@@ -573,16 +583,17 @@ public class Page {
...
@@ -573,16 +583,17 @@ public class Page {
int
start
=
buff
.
position
();
int
start
=
buff
.
position
();
buff
.
putInt
(
0
);
buff
.
putInt
(
0
);
buff
.
putShort
((
byte
)
0
);
buff
.
putShort
((
byte
)
0
);
DataUtils
.
writeVarInt
(
buff
,
map
.
getId
());
int
len
=
keys
.
length
;
int
len
=
keys
.
length
;
DataUtils
.
writeVarInt
(
buff
,
len
);
DataUtils
.
writeVarInt
(
buff
,
len
);
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
int
type
=
children
!=
null
?
1
:
0
;
int
type
=
children
!=
null
?
DataUtils
.
PAGE_TYPE_NODE
:
DataUtils
.
PAGE_TYPE_LEAF
;
buff
.
put
((
byte
)
type
);
buff
.
put
((
byte
)
type
);
int
compressStart
=
buff
.
position
();
int
compressStart
=
buff
.
position
();
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
map
.
getKeyType
().
write
(
buff
,
keys
[
i
]);
map
.
getKeyType
().
write
(
buff
,
keys
[
i
]);
}
}
if
(
type
==
1
)
{
if
(
type
==
DataUtils
.
PAGE_TYPE_NODE
)
{
for
(
int
i
=
0
;
i
<
len
+
1
;
i
++)
{
for
(
int
i
=
0
;
i
<
len
+
1
;
i
++)
{
buff
.
putLong
(
children
[
i
]);
buff
.
putLong
(
children
[
i
]);
}
}
...
@@ -600,7 +611,7 @@ public class Page {
...
@@ -600,7 +611,7 @@ public class Page {
int
compLen
=
compressor
.
compress
(
exp
,
exp
.
length
,
comp
,
0
);
int
compLen
=
compressor
.
compress
(
exp
,
exp
.
length
,
comp
,
0
);
if
(
compLen
+
DataUtils
.
getVarIntLen
(
compLen
-
expLen
)
<
expLen
)
{
if
(
compLen
+
DataUtils
.
getVarIntLen
(
compLen
-
expLen
)
<
expLen
)
{
buff
.
position
(
compressStart
-
1
);
buff
.
position
(
compressStart
-
1
);
buff
.
put
((
byte
)
(
type
+
2
));
buff
.
put
((
byte
)
(
type
+
DataUtils
.
PAGE_COMPRESSED
));
DataUtils
.
writeVarInt
(
buff
,
expLen
-
compLen
);
DataUtils
.
writeVarInt
(
buff
,
expLen
-
compLen
);
buff
.
put
(
comp
,
0
,
compLen
);
buff
.
put
(
comp
,
0
,
compLen
);
}
}
...
@@ -614,7 +625,7 @@ public class Page {
...
@@ -614,7 +625,7 @@ public class Page {
DataUtils
.
getCheckValue
(
pageLength
)
^
DataUtils
.
getCheckValue
(
pageLength
)
^
DataUtils
.
getCheckValue
(
len
);
DataUtils
.
getCheckValue
(
len
);
buff
.
putShort
(
start
+
4
,
(
short
)
check
);
buff
.
putShort
(
start
+
4
,
(
short
)
check
);
this
.
pos
=
DataUtils
.
getP
os
(
chunkId
,
start
,
pageLength
);
this
.
pos
=
DataUtils
.
getP
agePos
(
chunkId
,
start
,
pageLength
,
type
);
}
}
/**
/**
...
@@ -623,8 +634,8 @@ public class Page {
...
@@ -623,8 +634,8 @@ public class Page {
* @return the next page id
* @return the next page id
*/
*/
int
getMaxLengthTempRecursive
()
{
int
getMaxLengthTempRecursive
()
{
// length, check, key length, type
// length, check,
map id,
key length, type
int
maxLength
=
4
+
2
+
DataUtils
.
MAX_VAR_INT_LEN
+
1
;
int
maxLength
=
4
+
2
+
DataUtils
.
MAX_VAR_INT_LEN
+
DataUtils
.
MAX_VAR_INT_LEN
+
1
;
int
len
=
keys
.
length
;
int
len
=
keys
.
length
;
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
maxLength
+=
map
.
getKeyType
().
getMaxLength
(
keys
[
i
]);
maxLength
+=
map
.
getKeyType
().
getMaxLength
(
keys
[
i
]);
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论