Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
4c34bc07
提交
4c34bc07
authored
11月 29, 2012
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
A persistent multi-version map: move to the main source tree
上级
b222ae75
显示空白字符变更
内嵌
并排
正在显示
11 个修改的文件
包含
0 行增加
和
3635 行删除
+0
-3635
ChangeCursor.java
h2/src/tools/org/h2/dev/store/btree/ChangeCursor.java
+0
-221
Chunk.java
h2/src/tools/org/h2/dev/store/btree/Chunk.java
+0
-170
Cursor.java
h2/src/tools/org/h2/dev/store/btree/Cursor.java
+0
-122
CursorPos.java
h2/src/tools/org/h2/dev/store/btree/CursorPos.java
+0
-36
DataUtils.java
h2/src/tools/org/h2/dev/store/btree/DataUtils.java
+0
-535
MVMap.java
h2/src/tools/org/h2/dev/store/btree/MVMap.java
+0
-1000
MVStoreBuilder.java
h2/src/tools/org/h2/dev/store/btree/MVStoreBuilder.java
+0
-116
MVStoreTool.java
h2/src/tools/org/h2/dev/store/btree/MVStoreTool.java
+0
-129
Page.java
h2/src/tools/org/h2/dev/store/btree/Page.java
+0
-889
StreamStore.java
h2/src/tools/org/h2/dev/store/btree/StreamStore.java
+0
-402
package.html
h2/src/tools/org/h2/dev/store/btree/package.html
+0
-15
没有找到文件。
h2/src/tools/org/h2/dev/store/btree/ChangeCursor.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.util.Iterator
;
/**
* A cursor to iterate over all keys in new pages.
*
* @param <K> the key type
* @param <V> the value type
*/
public
class
ChangeCursor
<
K
,
V
>
implements
Iterator
<
K
>
{
private
final
MVMap
<
K
,
V
>
map
;
private
final
Page
root1
,
root2
;
/**
* The state of this cursor.
* 0: not initialized
* 1: reading from root1
* 2: reading from root2
* 3: closed
*/
private
int
state
;
private
CursorPos
pos1
,
pos2
;
private
K
current
;
ChangeCursor
(
MVMap
<
K
,
V
>
map
,
Page
root1
,
Page
root2
)
{
this
.
map
=
map
;
this
.
root1
=
root1
;
this
.
root2
=
root2
;
}
public
K
next
()
{
K
c
=
current
;
fetchNext
();
return
c
;
}
public
boolean
hasNext
()
{
if
(
state
==
0
)
{
pos1
=
new
CursorPos
(
root1
,
0
,
null
);
pos1
=
min
(
pos1
);
state
=
1
;
fetchNext
();
}
return
current
!=
null
;
}
public
void
remove
()
{
throw
new
UnsupportedOperationException
();
}
private
void
fetchNext
()
{
while
(
fetchNextKey
())
{
if
(
pos1
==
null
||
pos2
==
null
)
{
break
;
}
@SuppressWarnings
(
"unchecked"
)
V
v1
=
(
V
)
map
.
binarySearch
(
root1
,
current
);
@SuppressWarnings
(
"unchecked"
)
V
v2
=
(
V
)
map
.
binarySearch
(
root2
,
current
);
if
(!
v1
.
equals
(
v2
))
{
break
;
}
}
}
private
boolean
fetchNextKey
()
{
while
(
true
)
{
if
(
state
==
3
)
{
return
false
;
}
if
(
state
==
1
)
{
// read from root1
pos1
=
fetchNext
(
pos1
);
if
(
pos1
==
null
)
{
// reached the end of pos1
state
=
2
;
pos2
=
null
;
continue
;
}
pos2
=
find
(
root2
,
current
);
if
(
pos2
==
null
)
{
// not found in root2
return
true
;
}
if
(!
pos1
.
page
.
equals
(
pos2
.
page
))
{
// the page is different,
// so the entry has possibly changed
return
true
;
}
while
(
true
)
{
pos1
=
pos1
.
parent
;
if
(
pos1
==
null
)
{
// reached end of pos1
state
=
2
;
pos2
=
null
;
break
;
}
pos2
=
pos2
.
parent
;
if
(
pos2
==
null
||
!
pos1
.
page
.
equals
(
pos2
.
page
))
{
if
(
pos1
.
index
+
1
<
map
.
getChildPageCount
(
pos1
.
page
))
{
pos1
=
new
CursorPos
(
pos1
.
page
.
getChildPage
(++
pos1
.
index
),
0
,
pos1
);
pos1
=
min
(
pos1
);
break
;
}
}
}
}
if
(
state
==
2
)
{
if
(
pos2
==
null
)
{
// init reading from root2
pos2
=
new
CursorPos
(
root2
,
0
,
null
);
pos2
=
min
(
pos2
);
}
// read from root2
pos2
=
fetchNext
(
pos2
);
if
(
pos2
==
null
)
{
// reached the end of pos2
state
=
3
;
current
=
null
;
continue
;
}
pos1
=
find
(
root1
,
current
);
if
(
pos1
!=
null
)
{
// found a corresponding record
// so it was not deleted
// but now we may need to skip pages
if
(!
pos1
.
page
.
equals
(
pos2
.
page
))
{
// the page is different
pos1
=
null
;
continue
;
}
while
(
true
)
{
pos2
=
pos2
.
parent
;
if
(
pos2
==
null
)
{
// reached end of pos1
state
=
3
;
current
=
null
;
pos1
=
null
;
break
;
}
pos1
=
pos1
.
parent
;
if
(
pos1
==
null
||
!
pos2
.
page
.
equals
(
pos1
.
page
))
{
if
(
pos2
.
index
+
1
<
map
.
getChildPageCount
(
pos2
.
page
))
{
pos2
=
new
CursorPos
(
pos2
.
page
.
getChildPage
(++
pos2
.
index
),
0
,
pos2
);
pos2
=
min
(
pos2
);
break
;
}
}
}
pos1
=
null
;
continue
;
}
// found no corresponding record
// so it was deleted
return
true
;
}
}
}
private
CursorPos
find
(
Page
p
,
K
key
)
{
// TODO combine with RangeCursor.min
// possibly move to MVMap
CursorPos
pos
=
null
;
while
(
true
)
{
if
(
p
.
isLeaf
())
{
int
x
=
key
==
null
?
0
:
p
.
binarySearch
(
key
);
if
(
x
<
0
)
{
return
null
;
}
return
new
CursorPos
(
p
,
x
,
pos
);
}
int
x
=
key
==
null
?
-
1
:
p
.
binarySearch
(
key
);
if
(
x
<
0
)
{
x
=
-
x
-
1
;
}
else
{
x
++;
}
pos
=
new
CursorPos
(
p
,
x
,
pos
);
p
=
p
.
getChildPage
(
x
);
}
}
@SuppressWarnings
(
"unchecked"
)
private
CursorPos
fetchNext
(
CursorPos
p
)
{
while
(
p
!=
null
)
{
if
(
p
.
index
<
p
.
page
.
getKeyCount
())
{
current
=
(
K
)
p
.
page
.
getKey
(
p
.
index
++);
return
p
;
}
p
=
p
.
parent
;
if
(
p
==
null
)
{
break
;
}
if
(
p
.
index
+
1
<
map
.
getChildPageCount
(
p
.
page
))
{
p
=
new
CursorPos
(
p
.
page
.
getChildPage
(++
p
.
index
),
0
,
p
);
p
=
min
(
p
);
}
}
current
=
null
;
return
p
;
}
private
static
CursorPos
min
(
CursorPos
p
)
{
while
(
true
)
{
if
(
p
.
page
.
isLeaf
())
{
return
p
;
}
Page
c
=
p
.
page
.
getChildPage
(
0
);
p
=
new
CursorPos
(
c
,
0
,
p
);
}
}
}
h2/src/tools/org/h2/dev/store/btree/Chunk.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.nio.ByteBuffer
;
import
java.util.HashMap
;
/**
* A chunk of data, containing one or multiple pages.
* <p>
* Chunks are page aligned (each page is usually 4096 bytes).
* There are at most 67 million (2^26) chunks,
* each chunk is at most 2 GB large.
* File format:
* 1 byte: 'c'
* 4 bytes: length
* 4 bytes: chunk id (an incrementing number)
* 4 bytes: pageCount
* 8 bytes: metaRootPos
* 8 bytes: maxLengthLive
* [ Page ] *
*/
public
class
Chunk
{
/**
* The chunk id.
*/
final
int
id
;
/**
* The start position within the file.
*/
long
start
;
/**
* The length in bytes.
*/
int
length
;
/**
* The number of pages.
*/
int
pageCount
;
/**
* The sum of the max length of all pages.
*/
long
maxLength
;
/**
* The sum of the max length of all pages that are in use.
*/
long
maxLengthLive
;
/**
* The garbage collection priority.
*/
int
collectPriority
;
/**
* The position of the meta root.
*/
long
metaRootPos
;
/**
* The version stored in this chunk.
*/
long
version
;
Chunk
(
int
id
)
{
this
.
id
=
id
;
}
/**
* Read the header from the byte buffer.
*
* @param buff the source buffer
* @param start the start of the chunk in the file
* @return the chunk
*/
static
Chunk
fromHeader
(
ByteBuffer
buff
,
long
start
)
{
if
(
buff
.
get
()
!=
'c'
)
{
throw
new
RuntimeException
(
"File corrupt"
);
}
int
length
=
buff
.
getInt
();
int
chunkId
=
buff
.
getInt
();
int
pageCount
=
buff
.
getInt
();
long
metaRootPos
=
buff
.
getLong
();
long
maxLength
=
buff
.
getLong
();
long
maxLengthLive
=
buff
.
getLong
();
Chunk
c
=
new
Chunk
(
chunkId
);
c
.
length
=
length
;
c
.
pageCount
=
pageCount
;
c
.
start
=
start
;
c
.
metaRootPos
=
metaRootPos
;
c
.
maxLength
=
maxLength
;
c
.
maxLengthLive
=
maxLengthLive
;
return
c
;
}
/**
* Write the header.
*
* @param buff the target buffer
*/
void
writeHeader
(
ByteBuffer
buff
)
{
buff
.
put
((
byte
)
'c'
);
buff
.
putInt
(
length
);
buff
.
putInt
(
id
);
buff
.
putInt
(
pageCount
);
buff
.
putLong
(
metaRootPos
);
buff
.
putLong
(
maxLength
);
buff
.
putLong
(
maxLengthLive
);
}
/**
* Build a block from the given string.
*
* @param s the string
* @return the block
*/
public
static
Chunk
fromString
(
String
s
)
{
HashMap
<
String
,
String
>
map
=
DataUtils
.
parseMap
(
s
);
int
id
=
Integer
.
parseInt
(
map
.
get
(
"id"
));
Chunk
c
=
new
Chunk
(
id
);
c
.
start
=
Long
.
parseLong
(
map
.
get
(
"start"
));
c
.
length
=
Integer
.
parseInt
(
map
.
get
(
"length"
));
c
.
pageCount
=
Integer
.
parseInt
(
map
.
get
(
"pageCount"
));
c
.
maxLength
=
Long
.
parseLong
(
map
.
get
(
"maxLength"
));
c
.
maxLengthLive
=
Long
.
parseLong
(
map
.
get
(
"maxLengthLive"
));
c
.
metaRootPos
=
Long
.
parseLong
(
map
.
get
(
"metaRoot"
));
c
.
version
=
Long
.
parseLong
(
map
.
get
(
"version"
));
return
c
;
}
public
int
getFillRate
()
{
return
(
int
)
(
maxLength
==
0
?
0
:
100
*
maxLengthLive
/
maxLength
);
}
public
int
hashCode
()
{
return
id
;
}
public
boolean
equals
(
Object
o
)
{
return
o
instanceof
Chunk
&&
((
Chunk
)
o
).
id
==
id
;
}
/**
* Get the chunk data as a string.
*
* @return the string
*/
public
String
asString
()
{
return
"id:"
+
id
+
","
+
"start:"
+
start
+
","
+
"length:"
+
length
+
","
+
"pageCount:"
+
pageCount
+
","
+
"maxLength:"
+
maxLength
+
","
+
"maxLengthLive:"
+
maxLengthLive
+
","
+
"metaRoot:"
+
metaRootPos
+
","
+
"version:"
+
version
;
}
}
h2/src/tools/org/h2/dev/store/btree/Cursor.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.util.Iterator
;
/**
* A cursor to iterate over elements in ascending order.
*
* @param <K> the key type
*/
public
class
Cursor
<
K
>
implements
Iterator
<
K
>
{
protected
final
MVMap
<
K
,
?>
map
;
protected
final
K
from
;
protected
CursorPos
pos
;
protected
K
current
;
private
final
Page
root
;
private
boolean
initialized
;
protected
Cursor
(
MVMap
<
K
,
?>
map
,
Page
root
,
K
from
)
{
this
.
map
=
map
;
this
.
root
=
root
;
this
.
from
=
from
;
}
public
K
next
()
{
hasNext
();
K
c
=
current
;
fetchNext
();
return
c
;
}
public
boolean
hasNext
()
{
if
(!
initialized
)
{
min
(
root
,
from
);
initialized
=
true
;
fetchNext
();
}
return
current
!=
null
;
}
/**
* Skip over that many entries. This method is relatively fast (for this map
* implementation) even if many entries need to be skipped.
*
* @param n the number of entries to skip
*/
public
void
skip
(
long
n
)
{
if
(!
hasNext
())
{
return
;
}
if
(
n
<
10
)
{
while
(
n
--
>
0
)
{
fetchNext
();
}
return
;
}
long
index
=
map
.
getKeyIndex
(
current
);
K
k
=
map
.
getKey
(
index
+
n
);
min
(
root
,
k
);
fetchNext
();
}
public
void
remove
()
{
throw
new
UnsupportedOperationException
();
}
/**
* Fetch the next entry that is equal or larger than the given key, starting
* from the given page.
*
* @param p the page to start
* @param from the key to search
*/
protected
void
min
(
Page
p
,
K
from
)
{
while
(
true
)
{
if
(
p
.
isLeaf
())
{
int
x
=
from
==
null
?
0
:
p
.
binarySearch
(
from
);
if
(
x
<
0
)
{
x
=
-
x
-
1
;
}
pos
=
new
CursorPos
(
p
,
x
,
pos
);
break
;
}
int
x
=
from
==
null
?
-
1
:
p
.
binarySearch
(
from
);
if
(
x
<
0
)
{
x
=
-
x
-
1
;
}
else
{
x
++;
}
pos
=
new
CursorPos
(
p
,
x
+
1
,
pos
);
p
=
p
.
getChildPage
(
x
);
}
}
/**
* Fetch the next entry if there is one.
*/
@SuppressWarnings
(
"unchecked"
)
protected
void
fetchNext
()
{
while
(
pos
!=
null
)
{
if
(
pos
.
index
<
pos
.
page
.
getKeyCount
())
{
current
=
(
K
)
pos
.
page
.
getKey
(
pos
.
index
++);
return
;
}
pos
=
pos
.
parent
;
if
(
pos
==
null
)
{
break
;
}
if
(
pos
.
index
<
map
.
getChildPageCount
(
pos
.
page
))
{
min
(
pos
.
page
.
getChildPage
(
pos
.
index
++),
null
);
}
}
current
=
null
;
}
}
h2/src/tools/org/h2/dev/store/btree/CursorPos.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
/**
* A position in a cursor
*/
public
class
CursorPos
{
/**
* The current page.
*/
public
final
Page
page
;
/**
* The current index.
*/
public
int
index
;
/**
* The position in the parent page, if any.
*/
public
final
CursorPos
parent
;
public
CursorPos
(
Page
page
,
int
index
,
CursorPos
parent
)
{
this
.
page
=
page
;
this
.
index
=
index
;
this
.
parent
=
parent
;
}
}
h2/src/tools/org/h2/dev/store/btree/DataUtils.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.io.EOFException
;
import
java.io.IOException
;
import
java.io.OutputStream
;
import
java.nio.ByteBuffer
;
import
java.nio.channels.FileChannel
;
import
java.util.ArrayList
;
import
java.util.Collections
;
import
java.util.HashMap
;
import
org.h2.util.New
;
/**
* Utility methods
*/
public
class
DataUtils
{
/**
* The type for leaf page.
*/
public
static
final
int
PAGE_TYPE_LEAF
=
0
;
/**
* The type for node page.
*/
public
static
final
int
PAGE_TYPE_NODE
=
1
;
/**
* The bit mask for compressed pages.
*/
public
static
final
int
PAGE_COMPRESSED
=
2
;
/**
* The maximum length of a variable size int.
*/
public
static
final
int
MAX_VAR_INT_LEN
=
5
;
/**
* The maximum length of a variable size long.
*/
public
static
final
int
MAX_VAR_LONG_LEN
=
10
;
/**
* The maximum integer that needs less space when using variable size
* encoding (only 3 bytes instead of 4).
*/
public
static
final
int
COMPRESSED_VAR_INT_MAX
=
0x1fffff
;
/**
* The maximum long that needs less space when using variable size
* encoding (only 7 bytes instead of 8).
*/
public
static
final
long
COMPRESSED_VAR_LONG_MAX
=
0x1ffffffffffff
L
;
/**
* The estimated number of bytes used per page object.
*/
public
static
final
int
PAGE_MEMORY
=
128
;
/**
* The estimated number of bytes used per child entry.
*/
public
static
final
int
PAGE_MEMORY_CHILD
=
16
;
/**
* Get the length of the variable size int.
*
* @param x the value
* @return the length in bytes
*/
public
static
int
getVarIntLen
(
int
x
)
{
if
((
x
&
(-
1
<<
7
))
==
0
)
{
return
1
;
}
else
if
((
x
&
(-
1
<<
14
))
==
0
)
{
return
2
;
}
else
if
((
x
&
(-
1
<<
21
))
==
0
)
{
return
3
;
}
else
if
((
x
&
(-
1
<<
28
))
==
0
)
{
return
4
;
}
return
5
;
}
/**
* Get the length of the variable size long.
*
* @param x the value
* @return the length in bytes
*/
public
static
int
getVarLongLen
(
long
x
)
{
int
i
=
1
;
while
(
true
)
{
x
>>>=
7
;
if
(
x
==
0
)
{
return
i
;
}
i
++;
}
}
/**
* Read a variable size int.
*
* @param buff the source buffer
* @return the value
*/
public
static
int
readVarInt
(
ByteBuffer
buff
)
{
int
b
=
buff
.
get
();
if
(
b
>=
0
)
{
return
b
;
}
// a separate function so that this one can be inlined
return
readVarIntRest
(
buff
,
b
);
}
private
static
int
readVarIntRest
(
ByteBuffer
buff
,
int
b
)
{
int
x
=
b
&
0x7f
;
b
=
buff
.
get
();
if
(
b
>=
0
)
{
return
x
|
(
b
<<
7
);
}
x
|=
(
b
&
0x7f
)
<<
7
;
b
=
buff
.
get
();
if
(
b
>=
0
)
{
return
x
|
(
b
<<
14
);
}
x
|=
(
b
&
0x7f
)
<<
14
;
b
=
buff
.
get
();
if
(
b
>=
0
)
{
return
x
|
b
<<
21
;
}
x
|=
((
b
&
0x7f
)
<<
21
)
|
(
buff
.
get
()
<<
28
);
return
x
;
}
/**
* Read a variable size long.
*
* @param buff the source buffer
* @return the value
*/
public
static
long
readVarLong
(
ByteBuffer
buff
)
{
long
x
=
buff
.
get
();
if
(
x
>=
0
)
{
return
x
;
}
x
&=
0x7f
;
for
(
int
s
=
7
;
s
<
64
;
s
+=
7
)
{
long
b
=
buff
.
get
();
x
|=
(
b
&
0x7f
)
<<
s
;
if
(
b
>=
0
)
{
break
;
}
}
return
x
;
}
/**
* Write a variable size int.
*
* @param out the output stream
* @param x the value
*/
public
static
void
writeVarInt
(
OutputStream
out
,
int
x
)
throws
IOException
{
while
((
x
&
~
0x7f
)
!=
0
)
{
out
.
write
((
byte
)
(
0x80
|
(
x
&
0x7f
)));
x
>>>=
7
;
}
out
.
write
((
byte
)
x
);
}
/**
* Write a variable size int.
*
* @param buff the source buffer
* @param x the value
*/
public
static
void
writeVarInt
(
ByteBuffer
buff
,
int
x
)
{
while
((
x
&
~
0x7f
)
!=
0
)
{
buff
.
put
((
byte
)
(
0x80
|
(
x
&
0x7f
)));
x
>>>=
7
;
}
buff
.
put
((
byte
)
x
);
}
/**
* Write characters from a string (without the length).
*
* @param buff the target buffer
* @param s the string
* @param len the number of characters
*/
public
static
void
writeStringData
(
ByteBuffer
buff
,
String
s
,
int
len
)
{
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
int
c
=
s
.
charAt
(
i
);
if
(
c
<
0x80
)
{
buff
.
put
((
byte
)
c
);
}
else
if
(
c
>=
0x800
)
{
buff
.
put
((
byte
)
(
0xe0
|
(
c
>>
12
)));
buff
.
put
((
byte
)
(((
c
>>
6
)
&
0x3f
)));
buff
.
put
((
byte
)
(
c
&
0x3f
));
}
else
{
buff
.
put
((
byte
)
(
0xc0
|
(
c
>>
6
)));
buff
.
put
((
byte
)
(
c
&
0x3f
));
}
}
}
/**
* Read a string.
*
* @param buff the source buffer
* @param len the number of characters
* @return the value
*/
public
static
String
readString
(
ByteBuffer
buff
,
int
len
)
{
char
[]
chars
=
new
char
[
len
];
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
int
x
=
buff
.
get
()
&
0xff
;
if
(
x
<
0x80
)
{
chars
[
i
]
=
(
char
)
x
;
}
else
if
(
x
>=
0xe0
)
{
chars
[
i
]
=
(
char
)
(((
x
&
0xf
)
<<
12
)
+
((
buff
.
get
()
&
0x3f
)
<<
6
)
+
(
buff
.
get
()
&
0x3f
));
}
else
{
chars
[
i
]
=
(
char
)
(((
x
&
0x1f
)
<<
6
)
+
(
buff
.
get
()
&
0x3f
));
}
}
return
new
String
(
chars
);
}
/**
* Write a variable size long.
*
* @param buff the target buffer
* @param x the value
*/
public
static
void
writeVarLong
(
ByteBuffer
buff
,
long
x
)
{
while
((
x
&
~
0x7f
)
!=
0
)
{
buff
.
put
((
byte
)
(
0x80
|
(
x
&
0x7f
)));
x
>>>=
7
;
}
buff
.
put
((
byte
)
x
);
}
/**
* Write a variable size long.
*
* @param out the output stream
* @param x the value
*/
public
static
void
writeVarLong
(
OutputStream
out
,
long
x
)
throws
IOException
{
while
((
x
&
~
0x7f
)
!=
0
)
{
out
.
write
((
byte
)
(
0x80
|
(
x
&
0x7f
)));
x
>>>=
7
;
}
out
.
write
((
byte
)
x
);
}
/**
* Copy the elements of an array, with a gap.
*
* @param src the source array
* @param dst the target array
* @param oldSize the size of the old array
* @param gapIndex the index of the gap
*/
public
static
void
copyWithGap
(
Object
src
,
Object
dst
,
int
oldSize
,
int
gapIndex
)
{
if
(
gapIndex
>
0
)
{
System
.
arraycopy
(
src
,
0
,
dst
,
0
,
gapIndex
);
}
if
(
gapIndex
<
oldSize
)
{
System
.
arraycopy
(
src
,
gapIndex
,
dst
,
gapIndex
+
1
,
oldSize
-
gapIndex
);
}
}
/**
* Copy the elements of an array, and remove one element.
*
* @param src the source array
* @param dst the target array
* @param oldSize the size of the old array
* @param removeIndex the index of the entry to remove
*/
public
static
void
copyExcept
(
Object
src
,
Object
dst
,
int
oldSize
,
int
removeIndex
)
{
if
(
removeIndex
>
0
&&
oldSize
>
0
)
{
System
.
arraycopy
(
src
,
0
,
dst
,
0
,
removeIndex
);
}
if
(
removeIndex
<
oldSize
)
{
System
.
arraycopy
(
src
,
removeIndex
+
1
,
dst
,
removeIndex
,
oldSize
-
removeIndex
-
1
);
}
}
/**
* Read from a file channel until the buffer is full, or end-of-file
* has been reached. The buffer is rewind at the end.
*
* @param file the file channel
* @param pos the absolute position within the file
* @param dst the byte buffer
*/
public
static
void
readFully
(
FileChannel
file
,
long
pos
,
ByteBuffer
dst
)
throws
IOException
{
do
{
int
len
=
file
.
read
(
dst
,
pos
);
if
(
len
<
0
)
{
throw
new
EOFException
();
}
pos
+=
len
;
}
while
(
dst
.
remaining
()
>
0
);
dst
.
rewind
();
}
/**
* Write to a file channel.
*
* @param file the file channel
* @param pos the absolute position within the file
* @param src the source buffer
*/
public
static
void
writeFully
(
FileChannel
file
,
long
pos
,
ByteBuffer
src
)
throws
IOException
{
int
off
=
0
;
do
{
int
len
=
file
.
write
(
src
,
pos
+
off
);
off
+=
len
;
}
while
(
src
.
remaining
()
>
0
);
}
/**
* Convert the length to a length code 0..31. 31 means more than 1 MB.
*
* @param len the length
* @return the length code
*/
public
static
int
encodeLength
(
int
len
)
{
if
(
len
<=
32
)
{
return
0
;
}
int
x
=
len
;
int
shift
=
0
;
while
(
x
>
3
)
{
shift
++;
x
=
(
x
>>
1
)
+
(
x
&
1
);
}
shift
=
Math
.
max
(
0
,
shift
-
4
);
int
code
=
(
shift
<<
1
)
+
(
x
&
1
);
return
Math
.
min
(
31
,
code
);
}
/**
* Get the chunk id from the position.
*
* @param pos the position
* @return the chunk id
*/
public
static
int
getPageChunkId
(
long
pos
)
{
return
(
int
)
(
pos
>>>
38
);
}
/**
* Get the maximum length for the given code.
* For the code 31, Integer.MAX_VALUE is returned.
*
* @param pos the position
* @return the maximum length
*/
public
static
int
getPageMaxLength
(
long
pos
)
{
int
code
=
(
int
)
((
pos
>>
1
)
&
31
);
if
(
code
==
31
)
{
return
Integer
.
MAX_VALUE
;
}
return
(
2
+
(
code
&
1
))
<<
((
code
>>
1
)
+
4
);
}
/**
* Get the offset from the position.
*
* @param pos the position
* @return the offset
*/
public
static
int
getPageOffset
(
long
pos
)
{
return
(
int
)
(
pos
>>
6
);
}
/**
* Get the page type from the position.
*
* @param pos the position
* @return the page type (PAGE_TYPE_NODE or PAGE_TYPE_LEAF)
*/
public
static
int
getPageType
(
long
pos
)
{
return
((
int
)
pos
)
&
1
;
}
/**
* Get the position of this page. The following information is encoded in
* the position: the chunk id, the offset, the maximum length, and the type
* (node or leaf).
*
* @param chunkId the chunk id
* @param offset the offset
* @param length the length
* @param type the page type (1 for node, 0 for leaf)
* @return the position
*/
public
static
long
getPagePos
(
int
chunkId
,
int
offset
,
int
length
,
int
type
)
{
long
pos
=
(
long
)
chunkId
<<
38
;
pos
|=
(
long
)
offset
<<
6
;
pos
|=
encodeLength
(
length
)
<<
1
;
pos
|=
type
;
return
pos
;
}
/**
* Calculate a check value for the given integer. A check value is mean to
* verify the data is consistent with a high probability, but not meant to
* protect against media failure or deliberate changes.
*
* @param x the value
* @return the check value
*/
public
static
short
getCheckValue
(
int
x
)
{
return
(
short
)
((
x
>>
16
)
^
x
);
}
/**
* Append a map to the string builder, sorted by key.
*
* @param buff the target buffer
* @param map the map
* @return the string builder
*/
public
static
StringBuilder
appendMap
(
StringBuilder
buff
,
HashMap
<
String
,
?>
map
)
{
ArrayList
<
String
>
list
=
New
.
arrayList
(
map
.
keySet
());
Collections
.
sort
(
list
);
for
(
String
k
:
list
)
{
appendMap
(
buff
,
k
,
map
.
get
(
k
));
}
return
buff
;
}
/**
* Append a key-value pair to the string builder. Keys may not contain a
* colon. Values that contain a comma or a double quote are enclosed in
* double quotes, with special characters escaped using a backslash.
*
* @param buff the target buffer
* @param key the key
* @param value the value
*/
public
static
void
appendMap
(
StringBuilder
buff
,
String
key
,
Object
value
)
{
if
(
buff
.
length
()
>
0
)
{
buff
.
append
(
','
);
}
buff
.
append
(
key
).
append
(
':'
);
String
v
=
value
.
toString
();
if
(
v
.
indexOf
(
','
)
<
0
&&
v
.
indexOf
(
'\"'
)
<
0
)
{
buff
.
append
(
value
);
}
else
{
buff
.
append
(
'\"'
);
for
(
int
i
=
0
,
size
=
v
.
length
();
i
<
size
;
i
++)
{
char
c
=
v
.
charAt
(
i
);
if
(
c
==
'\"'
)
{
buff
.
append
(
'\\'
);
}
buff
.
append
(
c
);
}
buff
.
append
(
'\"'
);
}
}
/**
* Parse a key-value pair list.
*
* @param s the list
* @return the map
*/
public
static
HashMap
<
String
,
String
>
parseMap
(
String
s
)
{
HashMap
<
String
,
String
>
map
=
New
.
hashMap
();
for
(
int
i
=
0
,
size
=
s
.
length
();
i
<
size
;)
{
int
startKey
=
i
;
i
=
s
.
indexOf
(
':'
,
i
);
String
key
=
s
.
substring
(
startKey
,
i
++);
StringBuilder
buff
=
new
StringBuilder
();
while
(
i
<
size
)
{
char
c
=
s
.
charAt
(
i
++);
if
(
c
==
','
)
{
break
;
}
else
if
(
c
==
'\"'
)
{
while
(
i
<
size
)
{
c
=
s
.
charAt
(
i
++);
if
(
c
==
'\\'
)
{
c
=
s
.
charAt
(
i
++);
}
else
if
(
c
==
'\"'
)
{
break
;
}
buff
.
append
(
c
);
}
}
else
{
buff
.
append
(
c
);
}
}
map
.
put
(
key
,
buff
.
toString
());
}
return
map
;
}
/**
* Calculate the Fletcher32 checksum.
*
* @param bytes the bytes
* @param length the message length (must be a multiple of 2)
* @return the checksum
*/
public
static
int
getFletcher32
(
byte
[]
bytes
,
int
length
)
{
int
s1
=
0xffff
,
s2
=
0xffff
;
for
(
int
i
=
0
;
i
<
length
;)
{
for
(
int
end
=
Math
.
min
(
i
+
718
,
length
);
i
<
end
;)
{
int
x
=
((
bytes
[
i
++]
&
0xff
)
<<
8
)
|
(
bytes
[
i
++]
&
0xff
);
s2
+=
s1
+=
x
;
}
s1
=
(
s1
&
0xffff
)
+
(
s1
>>>
16
);
s2
=
(
s2
&
0xffff
)
+
(
s2
>>>
16
);
}
s1
=
(
s1
&
0xffff
)
+
(
s1
>>>
16
);
s2
=
(
s2
&
0xffff
)
+
(
s2
>>>
16
);
return
(
s2
<<
16
)
|
s1
;
}
}
h2/src/tools/org/h2/dev/store/btree/MVMap.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.util.AbstractList
;
import
java.util.AbstractMap
;
import
java.util.AbstractSet
;
import
java.util.ArrayList
;
import
java.util.HashMap
;
import
java.util.Iterator
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.Set
;
import
java.util.concurrent.ConcurrentMap
;
import
org.h2.dev.store.type.DataType
;
import
org.h2.util.New
;
/**
* A stored map.
*
* @param <K> the key class
* @param <V> the value class
*/
public
class
MVMap
<
K
,
V
>
extends
AbstractMap
<
K
,
V
>
implements
ConcurrentMap
<
K
,
V
>
{
/**
* The store.
*/
protected
MVStore
store
;
/**
* The current root page (may not be null).
*/
protected
volatile
Page
root
;
private
int
id
;
private
String
name
;
private
long
createVersion
;
private
final
DataType
keyType
;
private
final
DataType
valueType
;
private
ArrayList
<
Page
>
oldRoots
=
new
ArrayList
<
Page
>();
private
boolean
closed
;
private
boolean
readOnly
;
public
MVMap
(
DataType
keyType
,
DataType
valueType
)
{
this
.
keyType
=
keyType
;
this
.
valueType
=
valueType
;
this
.
root
=
Page
.
createEmpty
(
this
,
-
1
);
}
/**
* Open this map with the given store and configuration.
*
* @param store the store
* @param config the configuration
*/
public
void
open
(
MVStore
store
,
HashMap
<
String
,
String
>
config
)
{
this
.
store
=
store
;
this
.
id
=
Integer
.
parseInt
(
config
.
get
(
"id"
));
this
.
name
=
config
.
get
(
"name"
);
this
.
createVersion
=
Long
.
parseLong
(
config
.
get
(
"createVersion"
));
}
/**
* Add or replace a key-value pair.
*
* @param key the key (may not be null)
* @param value the value (may not be null)
* @return the old value if the key existed, or null otherwise
*/
@SuppressWarnings
(
"unchecked"
)
public
V
put
(
K
key
,
V
value
)
{
checkWrite
();
long
writeVersion
=
store
.
getCurrentVersion
();
Page
p
=
root
.
copyOnWrite
(
writeVersion
);
if
(
p
.
getMemory
()
>
store
.
getPageSize
()
&&
p
.
getKeyCount
()
>
1
)
{
int
at
=
p
.
getKeyCount
()
/
2
;
long
totalCount
=
p
.
getTotalCount
();
Object
k
=
p
.
getKey
(
at
);
Page
split
=
p
.
split
(
at
);
Object
[]
keys
=
{
k
};
long
[]
children
=
{
p
.
getPos
(),
split
.
getPos
()
};
Page
[]
childrenPages
=
{
p
,
split
};
long
[]
counts
=
{
p
.
getTotalCount
(),
split
.
getTotalCount
()
};
p
=
Page
.
create
(
this
,
writeVersion
,
1
,
keys
,
null
,
children
,
childrenPages
,
counts
,
totalCount
,
0
,
0
);
store
.
registerUnsavedPage
();
// now p is a node; insert continues
}
Object
result
=
put
(
p
,
writeVersion
,
key
,
value
);
newRoot
(
p
);
return
(
V
)
result
;
}
/**
* Add or update a key-value pair.
*
* @param p the page
* @param writeVersion the write version
* @param key the key (may not be null)
* @param value the value (may not be null)
* @return the old value, or null
*/
protected
Object
put
(
Page
p
,
long
writeVersion
,
Object
key
,
Object
value
)
{
if
(
p
.
isLeaf
())
{
int
index
=
p
.
binarySearch
(
key
);
if
(
index
<
0
)
{
index
=
-
index
-
1
;
p
.
insertLeaf
(
index
,
key
,
value
);
return
null
;
}
return
p
.
setValue
(
index
,
value
);
}
// p is a node
int
index
=
p
.
binarySearch
(
key
);
if
(
index
<
0
)
{
index
=
-
index
-
1
;
}
else
{
index
++;
}
Page
c
=
p
.
getChildPage
(
index
).
copyOnWrite
(
writeVersion
);
if
(
c
.
getMemory
()
>
store
.
getPageSize
()
&&
c
.
getKeyCount
()
>
1
)
{
// split on the way down
int
at
=
c
.
getKeyCount
()
/
2
;
Object
k
=
c
.
getKey
(
at
);
Page
split
=
c
.
split
(
at
);
p
.
setChild
(
index
,
split
);
p
.
insertNode
(
index
,
k
,
c
);
// now we are not sure where to add
return
put
(
p
,
writeVersion
,
key
,
value
);
}
Object
result
=
put
(
c
,
writeVersion
,
key
,
value
);
p
.
setChild
(
index
,
c
);
return
result
;
}
/**
* Get the first key, or null if the map is empty.
*
* @return the first key, or null
*/
public
K
firstKey
()
{
return
getFirstLast
(
true
);
}
/**
* Get the last key, or null if the map is empty.
*
* @return the last key, or null
*/
public
K
lastKey
()
{
return
getFirstLast
(
false
);
}
/**
* Get the key at the given index.
* <p>
* This is a O(log(size)) operation.
*
* @param index the index
* @return the key
*/
@SuppressWarnings
(
"unchecked"
)
public
K
getKey
(
long
index
)
{
checkOpen
();
if
(
index
<
0
||
index
>=
size
())
{
return
null
;
}
Page
p
=
root
;
long
offset
=
0
;
while
(
true
)
{
if
(
p
.
isLeaf
())
{
if
(
index
>=
offset
+
p
.
getKeyCount
())
{
return
null
;
}
return
(
K
)
p
.
getKey
((
int
)
(
index
-
offset
));
}
int
i
=
0
,
size
=
p
.
getChildPageCount
();
for
(;
i
<
size
;
i
++)
{
long
c
=
p
.
getCounts
(
i
);
if
(
index
<
c
+
offset
)
{
break
;
}
offset
+=
c
;
}
if
(
i
==
size
)
{
return
null
;
}
p
=
p
.
getChildPage
(
i
);
}
}
/**
* Get the key list. The list is a read-only representation of all keys.
* <p>
* The get and indexOf methods are O(log(size)) operations. The result of
* indexOf is cast to an int.
*
* @return the key list
*/
public
List
<
K
>
keyList
()
{
return
new
AbstractList
<
K
>()
{
public
K
get
(
int
index
)
{
return
getKey
(
index
);
}
public
int
size
()
{
return
MVMap
.
this
.
size
();
}
@SuppressWarnings
(
"unchecked"
)
public
int
indexOf
(
Object
key
)
{
return
(
int
)
getKeyIndex
((
K
)
key
);
}
};
}
/**
* Get the index of the given key in the map.
* <p>
* This is a O(log(size)) operation.
* <p>
* If the key was found, the returned value is the index in the key array.
* If not found, the returned value is negative, where -1 means the provided
* key is smaller than any keys. See also Arrays.binarySearch.
*
* @param key the key
* @return the index
*/
public
long
getKeyIndex
(
K
key
)
{
checkOpen
();
if
(
size
()
==
0
)
{
return
-
1
;
}
Page
p
=
root
;
long
offset
=
0
;
while
(
true
)
{
if
(
p
.
isLeaf
())
{
int
x
=
p
.
binarySearch
(
key
);
if
(
x
<
0
)
{
return
-
offset
+
x
;
}
return
offset
+
x
;
}
int
x
=
key
==
null
?
-
1
:
p
.
binarySearch
(
key
);
if
(
x
<
0
)
{
x
=
-
x
-
1
;
}
else
{
x
++;
}
for
(
int
i
=
0
;
i
<
x
;
i
++)
{
offset
+=
p
.
getCounts
(
i
);
}
p
=
p
.
getChildPage
(
x
);
}
}
@SuppressWarnings
(
"unchecked"
)
private
K
getFirstLast
(
boolean
first
)
{
checkOpen
();
if
(
size
()
==
0
)
{
return
null
;
}
Page
p
=
root
;
while
(
true
)
{
if
(
p
.
isLeaf
())
{
return
(
K
)
p
.
getKey
(
first
?
0
:
p
.
getKeyCount
()
-
1
);
}
p
=
p
.
getChildPage
(
first
?
0
:
p
.
getChildPageCount
()
-
1
);
}
}
/**
* Get the smallest key that is larger than the given key, or null if no
* such key exists.
*
* @param key the key (may not be null)
* @return the result
*/
public
K
higherKey
(
K
key
)
{
return
getMinMax
(
key
,
false
,
true
);
}
/**
* Get the smallest key that is larger or equal to this key.
*
* @param key the key (may not be null)
* @return the result
*/
public
K
ceilingKey
(
K
key
)
{
return
getMinMax
(
key
,
false
,
false
);
}
/**
* Get the largest key that is smaller or equal to this key.
*
* @param key the key (may not be null)
* @return the result
*/
public
K
floorKey
(
K
key
)
{
return
getMinMax
(
key
,
true
,
false
);
}
/**
* Get the largest key that is smaller than the given key, or null if no
* such key exists.
*
* @param key the key (may not be null)
* @return the result
*/
public
K
lowerKey
(
K
key
)
{
return
getMinMax
(
key
,
true
,
true
);
}
private
K
getMinMax
(
K
key
,
boolean
min
,
boolean
excluding
)
{
checkOpen
();
if
(
size
()
==
0
)
{
return
null
;
}
return
getMinMax
(
root
,
key
,
min
,
excluding
);
}
@SuppressWarnings
(
"unchecked"
)
private
K
getMinMax
(
Page
p
,
K
key
,
boolean
min
,
boolean
excluding
)
{
if
(
p
.
isLeaf
())
{
if
(
key
==
null
)
{
return
(
K
)
p
.
getKey
(
min
?
0
:
p
.
getKeyCount
()
-
1
);
}
int
x
=
p
.
binarySearch
(
key
);
if
(
x
<
0
)
{
x
=
-
x
-
(
min
?
2
:
1
);
}
else
if
(
excluding
)
{
x
+=
min
?
-
1
:
1
;
}
if
(
x
<
0
||
x
>=
p
.
getKeyCount
())
{
return
null
;
}
return
(
K
)
p
.
getKey
(
x
);
}
int
x
;
if
(
key
==
null
)
{
x
=
min
?
0
:
p
.
getKeyCount
()
-
1
;
}
else
{
x
=
p
.
binarySearch
(
key
);
if
(
x
<
0
)
{
x
=
-
x
-
1
;
}
else
{
x
++;
}
}
while
(
true
)
{
if
(
x
<
0
||
x
>=
p
.
getChildPageCount
())
{
return
null
;
}
K
k
=
getMinMax
(
p
.
getChildPage
(
x
),
key
,
min
,
excluding
);
if
(
k
!=
null
)
{
return
k
;
}
x
+=
min
?
-
1
:
1
;
}
}
/**
* Get a value.
*
* @param key the key
* @return the value, or null if not found
*/
@SuppressWarnings
(
"unchecked"
)
public
V
get
(
Object
key
)
{
checkOpen
();
return
(
V
)
binarySearch
(
root
,
key
);
}
/**
* Get the value for the given key, or null if not found.
*
* @param p the page
* @param key the key
* @return the value or null
*/
protected
Object
binarySearch
(
Page
p
,
Object
key
)
{
int
x
=
p
.
binarySearch
(
key
);
if
(!
p
.
isLeaf
())
{
if
(
x
<
0
)
{
x
=
-
x
-
1
;
}
else
{
x
++;
}
p
=
p
.
getChildPage
(
x
);
return
binarySearch
(
p
,
key
);
}
if
(
x
>=
0
)
{
return
p
.
getValue
(
x
);
}
return
null
;
}
public
boolean
containsKey
(
Object
key
)
{
return
get
(
key
)
!=
null
;
}
/**
* Get the page for the given value.
*
* @param key the key
* @return the value, or null if not found
*/
protected
Page
getPage
(
K
key
)
{
return
binarySearchPage
(
root
,
key
);
}
/**
* Get the value for the given key, or null if not found.
*
* @param p the parent page
* @param key the key
* @return the page or null
*/
protected
Page
binarySearchPage
(
Page
p
,
Object
key
)
{
int
x
=
p
.
binarySearch
(
key
);
if
(!
p
.
isLeaf
())
{
if
(
x
<
0
)
{
x
=
-
x
-
1
;
}
else
{
x
++;
}
p
=
p
.
getChildPage
(
x
);
return
binarySearchPage
(
p
,
key
);
}
if
(
x
>=
0
)
{
return
p
;
}
return
null
;
}
/**
* Remove all entries.
*/
public
void
clear
()
{
checkWrite
();
root
.
removeAllRecursive
();
newRoot
(
Page
.
createEmpty
(
this
,
store
.
getCurrentVersion
()));
}
/**
* Remove all entries, and close the map.
*/
public
void
removeMap
()
{
if
(
this
!=
store
.
getMetaMap
())
{
checkWrite
();
root
.
removeAllRecursive
();
store
.
removeMap
(
name
);
close
();
}
}
/**
* Close the map, making it read only and release the memory.
*/
public
void
close
()
{
closed
=
true
;
readOnly
=
true
;
removeAllOldVersions
();
root
=
null
;
}
public
boolean
isClosed
()
{
return
closed
;
}
/**
* Remove a key-value pair, if the key exists.
*
* @param key the key (may not be null)
* @return the old value if the key existed, or null otherwise
*/
public
V
remove
(
Object
key
)
{
checkWrite
();
long
writeVersion
=
store
.
getCurrentVersion
();
Page
p
=
root
.
copyOnWrite
(
writeVersion
);
@SuppressWarnings
(
"unchecked"
)
V
result
=
(
V
)
remove
(
p
,
writeVersion
,
key
);
newRoot
(
p
);
return
result
;
}
/**
* Add a key-value pair if it does not yet exist.
*
* @param key the key (may not be null)
* @param value the new value
* @return the old value if the key existed, or null otherwise
*/
public
synchronized
V
putIfAbsent
(
K
key
,
V
value
)
{
V
old
=
get
(
key
);
if
(
old
==
null
)
{
put
(
key
,
value
);
}
return
old
;
}
/**
* Remove a key-value pair if the value matches the stored one.
*
* @param key the key (may not be null)
* @param value the expected value
* @return true if the item was removed
*/
public
synchronized
boolean
remove
(
Object
key
,
Object
value
)
{
V
old
=
get
(
key
);
if
(
old
.
equals
(
value
))
{
remove
(
key
);
return
true
;
}
return
false
;
}
/**
* Replace a value for an existing key, if the value matches.
*
* @param key the key (may not be null)
* @param oldValue the expected value
* @param newValue the new value
* @return true if the value was replaced
*/
public
synchronized
boolean
replace
(
K
key
,
V
oldValue
,
V
newValue
)
{
V
old
=
get
(
key
);
if
(
old
.
equals
(
oldValue
))
{
put
(
key
,
newValue
);
return
true
;
}
return
false
;
}
/**
* Replace a value for an existing key.
*
* @param key the key (may not be null)
* @param value the new value
* @return true if the value was replaced
*/
public
synchronized
V
replace
(
K
key
,
V
value
)
{
V
old
=
get
(
key
);
if
(
old
!=
null
)
{
put
(
key
,
value
);
return
old
;
}
return
null
;
}
/**
* Remove a key-value pair.
*
* @param p the page (may not be null)
* @param writeVersion the write version
* @param key the key
* @return the old value, or null if the key did not exist
*/
protected
Object
remove
(
Page
p
,
long
writeVersion
,
Object
key
)
{
int
index
=
p
.
binarySearch
(
key
);
Object
result
=
null
;
if
(
p
.
isLeaf
())
{
if
(
index
>=
0
)
{
result
=
p
.
getValue
(
index
);
p
.
remove
(
index
);
if
(
p
.
getKeyCount
()
==
0
)
{
removePage
(
p
);
}
}
return
result
;
}
// node
if
(
index
<
0
)
{
index
=
-
index
-
1
;
}
else
{
index
++;
}
Page
cOld
=
p
.
getChildPage
(
index
);
Page
c
=
cOld
.
copyOnWrite
(
writeVersion
);
long
oldCount
=
c
.
getTotalCount
();
result
=
remove
(
c
,
writeVersion
,
key
);
if
(
oldCount
==
c
.
getTotalCount
())
{
return
null
;
}
if
(
c
.
getTotalCount
()
==
0
)
{
// this child was deleted
if
(
p
.
getKeyCount
()
==
0
)
{
p
.
setChild
(
index
,
c
);
removePage
(
p
);
}
else
{
p
.
remove
(
index
);
}
}
else
{
p
.
setChild
(
index
,
c
);
}
return
result
;
}
/**
* Use the new root page from now on.
*
* @param newRoot the new root page
*/
protected
void
newRoot
(
Page
newRoot
)
{
if
(
root
!=
newRoot
)
{
removeUnusedOldVersions
();
if
(
root
.
getVersion
()
!=
newRoot
.
getVersion
())
{
ArrayList
<
Page
>
list
=
oldRoots
;
if
(
list
.
size
()
>
0
)
{
Page
last
=
list
.
get
(
list
.
size
()
-
1
);
if
(
last
.
getVersion
()
!=
root
.
getVersion
())
{
list
.
add
(
root
);
}
}
else
{
list
.
add
(
root
);
}
store
.
markChanged
(
this
);
}
root
=
newRoot
;
}
}
/**
* Check whether this map has any unsaved changes.
*
* @return true if there are unsaved changes.
*/
public
boolean
hasUnsavedChanges
()
{
return
!
oldRoots
.
isEmpty
();
}
/**
* Compare two keys.
*
* @param a the first key
* @param b the second key
* @return -1 if the first key is smaller, 1 if bigger, 0 if equal
*/
int
compare
(
Object
a
,
Object
b
)
{
return
keyType
.
compare
(
a
,
b
);
}
/**
* Get the key type.
*
* @return the key type
*/
protected
DataType
getKeyType
()
{
return
keyType
;
}
/**
* Get the value type.
*
* @return the value type
*/
protected
DataType
getValueType
()
{
return
valueType
;
}
/**
* Read a page.
*
* @param pos the position of the page
* @return the page
*/
Page
readPage
(
long
pos
)
{
return
store
.
readPage
(
this
,
pos
);
}
/**
* Set the position of the root page.
*
* @param rootPos the position, 0 for empty
*/
void
setRootPos
(
long
rootPos
)
{
root
=
rootPos
==
0
?
Page
.
createEmpty
(
this
,
-
1
)
:
readPage
(
rootPos
);
}
/**
* Iterate over all keys.
*
* @param from the first key to return
* @return the iterator
*/
public
Cursor
<
K
>
keyIterator
(
K
from
)
{
checkOpen
();
return
new
Cursor
<
K
>(
this
,
root
,
from
);
}
/**
* Iterate over all keys in changed pages.
*
* @param version the old version
* @return the iterator
*/
public
Iterator
<
K
>
changeIterator
(
long
version
)
{
checkOpen
();
MVMap
<
K
,
V
>
old
=
openVersion
(
version
);
return
new
ChangeCursor
<
K
,
V
>(
this
,
root
,
old
.
root
);
}
public
Set
<
Map
.
Entry
<
K
,
V
>>
entrySet
()
{
HashMap
<
K
,
V
>
map
=
new
HashMap
<
K
,
V
>();
for
(
K
k
:
keySet
())
{
map
.
put
(
k
,
get
(
k
));
}
return
map
.
entrySet
();
}
public
Set
<
K
>
keySet
()
{
checkOpen
();
final
MVMap
<
K
,
V
>
map
=
this
;
final
Page
root
=
this
.
root
;
return
new
AbstractSet
<
K
>()
{
@Override
public
Iterator
<
K
>
iterator
()
{
return
new
Cursor
<
K
>(
map
,
root
,
null
);
}
@Override
public
int
size
()
{
return
MVMap
.
this
.
size
();
}
@Override
public
boolean
contains
(
Object
o
)
{
return
MVMap
.
this
.
containsKey
(
o
);
}
};
}
/**
* Get the root page.
*
* @return the root page
*/
public
Page
getRoot
()
{
return
root
;
}
/**
* Get the map name.
*
* @return the name
*/
String
getName
()
{
return
name
;
}
MVStore
getStore
()
{
return
store
;
}
int
getId
()
{
return
id
;
}
/**
* Rollback to the given version.
*
* @param version the version
*/
void
rollbackTo
(
long
version
)
{
checkWrite
();
removeUnusedOldVersions
();
if
(
version
<=
createVersion
)
{
removeMap
();
}
else
if
(
root
.
getVersion
()
>=
version
)
{
// iterating in descending order -
// this is not terribly efficient if there are many versions
ArrayList
<
Page
>
list
=
oldRoots
;
while
(
list
.
size
()
>
0
)
{
int
i
=
list
.
size
()
-
1
;
Page
p
=
list
.
get
(
i
);
root
=
p
;
list
.
remove
(
i
);
if
(
p
.
getVersion
()
<
version
)
{
break
;
}
}
}
}
/**
* Forget all old versions.
*/
void
removeAllOldVersions
()
{
// create a new instance
// because another thread might iterate over it
oldRoots
=
new
ArrayList
<
Page
>();
}
/**
* Forget those old versions that are no longer needed.
*/
void
removeUnusedOldVersions
()
{
long
oldest
=
store
.
getRetainVersion
();
if
(
oldest
==
-
1
)
{
return
;
}
int
i
=
searchRoot
(
oldest
);
if
(
i
<
0
)
{
return
;
}
// create a new instance
// because another thread might iterate over it
ArrayList
<
Page
>
list
=
new
ArrayList
<
Page
>();
list
.
addAll
(
oldRoots
.
subList
(
i
,
oldRoots
.
size
()));
oldRoots
=
list
;
}
public
void
setReadOnly
(
boolean
readOnly
)
{
this
.
readOnly
=
readOnly
;
}
public
boolean
isReadOnly
()
{
return
readOnly
;
}
/**
* Check whether the map is open.
*
* @throws IllegalStateException if the map is closed
*/
protected
void
checkOpen
()
{
if
(
closed
)
{
throw
new
IllegalStateException
(
"This map is closed"
);
}
}
/**
* Check whether writing is allowed.
*
* @throws IllegalStateException if the map is read-only
*/
protected
void
checkWrite
()
{
if
(
readOnly
)
{
checkOpen
();
throw
new
IllegalStateException
(
"This map is read-only"
);
}
}
public
int
hashCode
()
{
return
id
;
}
public
boolean
equals
(
Object
o
)
{
return
this
==
o
;
}
public
int
size
()
{
long
size
=
getSize
();
return
size
>
Integer
.
MAX_VALUE
?
Integer
.
MAX_VALUE
:
(
int
)
size
;
}
public
long
getSize
()
{
return
root
.
getTotalCount
();
}
long
getCreateVersion
()
{
return
createVersion
;
}
/**
* Remove the given page (make the space available).
*
* @param p the page
*/
protected
void
removePage
(
Page
p
)
{
store
.
removePage
(
p
.
getPos
());
}
/**
* Open an old version for the given map.
*
* @param version the version
* @return the map
*/
public
MVMap
<
K
,
V
>
openVersion
(
long
version
)
{
if
(
readOnly
)
{
throw
new
IllegalArgumentException
(
"This map is read-only - need to call the method on the writable map"
);
}
if
(
version
<
createVersion
)
{
throw
new
IllegalArgumentException
(
"Unknown version"
);
}
Page
newest
=
null
;
// need to copy because it can change
Page
r
=
root
;
if
(
r
.
getVersion
()
==
version
)
{
newest
=
r
;
}
else
{
// find the newest page that has a getVersion() <= version
int
i
=
searchRoot
(
version
);
if
(
i
<
0
)
{
// not found
if
(
i
==
-
1
)
{
// smaller than all in-memory versions
return
store
.
openMapVersion
(
version
,
name
,
this
);
}
i
=
-
i
-
2
;
}
newest
=
oldRoots
.
get
(
i
);
}
MVMap
<
K
,
V
>
m
=
openReadOnly
();
m
.
root
=
newest
;
return
m
;
}
/**
* Open a copy of the map in read-only mode.
*
* @return the opened map
*/
protected
MVMap
<
K
,
V
>
openReadOnly
()
{
MVMap
<
K
,
V
>
m
=
new
MVMap
<
K
,
V
>(
keyType
,
valueType
);
m
.
readOnly
=
true
;
HashMap
<
String
,
String
>
config
=
New
.
hashMap
();
config
.
put
(
"id"
,
String
.
valueOf
(
id
));
config
.
put
(
"name"
,
name
);
config
.
put
(
"createVersion"
,
String
.
valueOf
(
createVersion
));
m
.
open
(
store
,
config
);
m
.
root
=
root
;
return
m
;
}
private
int
searchRoot
(
long
version
)
{
int
low
=
0
,
high
=
oldRoots
.
size
()
-
1
;
while
(
low
<=
high
)
{
int
x
=
(
low
+
high
)
>>>
1
;
long
v
=
oldRoots
.
get
(
x
).
getVersion
();
if
(
v
<
version
)
{
low
=
x
+
1
;
}
else
if
(
version
<
v
)
{
high
=
x
-
1
;
}
else
{
return
x
;
}
}
return
-(
low
+
1
);
}
public
long
getVersion
()
{
return
root
.
getVersion
();
}
/**
* Get the child page count for this page. This is to allow another map
* implementation to override the default, in case the last child is not to
* be used.
*
* @param p the page
* @return the number of direct children
*/
protected
int
getChildPageCount
(
Page
p
)
{
return
p
.
getChildPageCount
();
}
/**
* Get the map type. When opening an existing map, the map type must match.
*
* @return the map type
*/
public
String
getType
()
{
return
"btree"
;
}
/**
* Get the map metadata as a string.
*
* @return the string
*/
public
String
asString
()
{
StringBuilder
buff
=
new
StringBuilder
();
DataUtils
.
appendMap
(
buff
,
"id"
,
id
);
DataUtils
.
appendMap
(
buff
,
"name"
,
name
);
DataUtils
.
appendMap
(
buff
,
"type"
,
getType
());
DataUtils
.
appendMap
(
buff
,
"createVersion"
,
createVersion
);
if
(
keyType
!=
null
)
{
DataUtils
.
appendMap
(
buff
,
"key"
,
keyType
.
asString
());
}
if
(
valueType
!=
null
)
{
DataUtils
.
appendMap
(
buff
,
"value"
,
valueType
.
asString
());
}
return
buff
.
toString
();
}
}
h2/src/tools/org/h2/dev/store/btree/MVStoreBuilder.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.util.HashMap
;
import
org.h2.dev.store.type.DataTypeFactory
;
import
org.h2.util.New
;
/**
* A builder for an MVStore.
*/
public
class
MVStoreBuilder
{
private
final
HashMap
<
String
,
Object
>
config
=
New
.
hashMap
();
/**
* Use the following file name. If the file does not exist, it is
* automatically created.
*
* @param fileName the file name
* @return this
*/
public
static
MVStoreBuilder
fileBased
(
String
fileName
)
{
return
new
MVStoreBuilder
().
fileName
(
fileName
);
}
/**
* Open the store in-memory. In this case, no data may be saved.
*
* @return the store
*/
public
static
MVStoreBuilder
inMemory
()
{
return
new
MVStoreBuilder
();
}
private
MVStoreBuilder
set
(
String
key
,
Object
value
)
{
if
(
config
.
containsKey
(
key
))
{
throw
new
IllegalArgumentException
(
"Parameter "
+
config
.
get
(
key
)
+
" is already set"
);
}
config
.
put
(
key
,
value
);
return
this
;
}
private
MVStoreBuilder
fileName
(
String
fileName
)
{
return
set
(
"fileName"
,
fileName
);
}
/**
* Open the file in read-only mode. In this case, a shared lock will be
* acquired to ensure the file is not concurrently opened in write mode.
* <p>
* If this option is not used, the file is locked exclusively.
* <p>
* Please note a store may only be opened once in every JVM (no matter
* whether it is opened in read-only or read-write mode), because each file
* may be locked only once in a process.
*
* @return this
*/
public
MVStoreBuilder
readOnly
()
{
return
set
(
"openMode"
,
"r"
);
}
/**
* Set the read cache size in MB. The default is 16 MB.
*
* @param mb the cache size
* @return this
*/
public
MVStoreBuilder
cacheSizeMB
(
int
mb
)
{
return
set
(
"cacheSize"
,
Integer
.
toString
(
mb
));
}
/**
* Use the given data type factory.
*
* @param factory the data type factory
* @return this
*/
public
MVStoreBuilder
with
(
DataTypeFactory
factory
)
{
return
set
(
"dataTypeFactory"
,
factory
);
}
/**
* Open the store.
*
* @return the opened store
*/
public
MVStore
open
()
{
MVStore
s
=
new
MVStore
(
config
);
s
.
open
();
return
s
;
}
public
String
toString
()
{
return
DataUtils
.
appendMap
(
new
StringBuilder
(),
config
).
toString
();
}
/**
* Read the configuration from a string.
*
* @param s the string representation
* @return the builder
*/
public
static
MVStoreBuilder
fromString
(
String
s
)
{
HashMap
<
String
,
String
>
config
=
DataUtils
.
parseMap
(
s
);
MVStoreBuilder
builder
=
new
MVStoreBuilder
();
builder
.
config
.
putAll
(
config
);
return
builder
;
}
}
h2/src/tools/org/h2/dev/store/btree/MVStoreTool.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.io.ByteArrayInputStream
;
import
java.io.IOException
;
import
java.io.PrintWriter
;
import
java.io.StringReader
;
import
java.nio.ByteBuffer
;
import
java.nio.channels.FileChannel
;
import
java.util.Properties
;
import
org.h2.store.fs.FilePath
;
import
org.h2.store.fs.FileUtils
;
/**
* Utility methods used in combination with the MVStore.
*/
public
class
MVStoreTool
{
/**
* Runs this tool.
* Options are case sensitive. Supported options are:
* <table>
* <tr><td>[-dump <dir>]</td>
* <td>Dump the contends of the file</td></tr>
* </table>
*
* @param args the command line arguments
*/
public
static
void
main
(
String
...
args
)
throws
IOException
{
for
(
int
i
=
0
;
i
<
args
.
length
;
i
++)
{
if
(
"-dump"
.
equals
(
args
[
i
]))
{
String
fileName
=
args
[++
i
];
dump
(
fileName
,
new
PrintWriter
(
System
.
out
));
}
}
}
/**
* Read the contents of the file and display them in a human-readable
* format.
*
* @param fileName the name of the file
* @param writer the print writer
*/
public
static
void
dump
(
String
fileName
,
PrintWriter
writer
)
throws
IOException
{
if
(!
FileUtils
.
exists
(
fileName
))
{
writer
.
println
(
"File not found: "
+
fileName
);
return
;
}
FileChannel
file
=
null
;
int
blockSize
=
MVStore
.
BLOCK_SIZE
;
try
{
file
=
FilePath
.
get
(
fileName
).
open
(
"r"
);
long
fileLength
=
file
.
size
();
byte
[]
header
=
new
byte
[
blockSize
];
file
.
read
(
ByteBuffer
.
wrap
(
header
),
0
);
Properties
prop
=
new
Properties
();
prop
.
load
(
new
ByteArrayInputStream
(
header
));
prop
.
load
(
new
StringReader
(
new
String
(
header
,
"UTF-8"
)));
writer
.
println
(
"file "
+
fileName
);
writer
.
println
(
" length "
+
fileLength
);
writer
.
println
(
" "
+
prop
);
ByteBuffer
block
=
ByteBuffer
.
allocate
(
40
);
for
(
long
pos
=
0
;
pos
<
fileLength
;)
{
block
.
rewind
();
DataUtils
.
readFully
(
file
,
pos
,
block
);
block
.
rewind
();
if
(
block
.
get
()
!=
'c'
)
{
pos
+=
blockSize
;
continue
;
}
int
chunkLength
=
block
.
getInt
();
int
chunkId
=
block
.
getInt
();
int
pageCount
=
block
.
getInt
();
long
metaRootPos
=
block
.
getLong
();
long
maxLength
=
block
.
getLong
();
long
maxLengthLive
=
block
.
getLong
();
writer
.
println
(
" chunk "
+
chunkId
+
" at "
+
pos
+
" length "
+
chunkLength
+
" pageCount "
+
pageCount
+
" root "
+
metaRootPos
+
" maxLength "
+
maxLength
+
" maxLengthLive "
+
maxLengthLive
);
ByteBuffer
chunk
=
ByteBuffer
.
allocate
(
chunkLength
);
DataUtils
.
readFully
(
file
,
pos
,
chunk
);
int
p
=
block
.
position
();
pos
=
(
pos
+
chunkLength
+
blockSize
)
/
blockSize
*
blockSize
;
chunkLength
-=
p
;
while
(
chunkLength
>
0
)
{
chunk
.
position
(
p
);
int
pageLength
=
chunk
.
getInt
();
// check value (ignored)
chunk
.
getShort
();
long
mapId
=
DataUtils
.
readVarInt
(
chunk
);
int
len
=
DataUtils
.
readVarInt
(
chunk
);
int
type
=
chunk
.
get
();
boolean
compressed
=
(
type
&
2
)
!=
0
;
boolean
node
=
(
type
&
1
)
!=
0
;
writer
.
println
(
" map "
+
mapId
+
" at "
+
p
+
" "
+
(
node
?
"node"
:
"leaf"
)
+
" "
+
(
compressed
?
"compressed "
:
""
)
+
"len: "
+
pageLength
+
" entries: "
+
len
);
p
+=
pageLength
;
chunkLength
-=
pageLength
;
}
}
}
catch
(
IOException
e
)
{
writer
.
println
(
"ERROR: "
+
e
);
throw
e
;
}
finally
{
if
(
file
!=
null
)
{
try
{
file
.
close
();
}
catch
(
IOException
e
)
{
// ignore
}
}
}
writer
.
println
();
writer
.
flush
();
}
}
h2/src/tools/org/h2/dev/store/btree/Page.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.io.IOException
;
import
java.nio.ByteBuffer
;
import
java.nio.channels.FileChannel
;
import
java.util.Arrays
;
import
org.h2.compress.Compressor
;
import
org.h2.dev.store.type.DataType
;
/**
* A page (a node or a leaf).
* <p>
* For b-tree nodes, the key at a given index is larger than the largest key of
* the child at the same index.
* <p>
* File format:
* page length (including length): int
* check value: short
* map id: varInt
* number of keys: varInt
* type: byte (0: leaf, 1: node; +2: compressed)
* compressed: bytes saved (varInt)
* keys
* leaf: values (one for each key)
* node: children (1 more than keys)
*/
public
class
Page
{
private
static
final
int
SHARED_KEYS
=
1
,
SHARED_VALUES
=
2
,
SHARED_CHILDREN
=
4
,
SHARED_COUNTS
=
8
;
private
static
final
Object
[]
EMPTY_OBJECT_ARRAY
=
new
Object
[
0
];
private
final
MVMap
<?,
?>
map
;
private
final
long
version
;
private
long
pos
;
private
long
totalCount
;
private
int
keyCount
;
/**
* The last result of a find operation is cached.
*/
private
int
cachedCompare
;
/**
* Which arrays are shared with another version of this page.
*/
private
int
sharedFlags
;
/**
* The estimated memory used.
*/
private
int
memory
;
private
Object
[]
keys
;
private
Object
[]
values
;
private
long
[]
children
;
private
Page
[]
childrenPages
;
private
long
[]
counts
;
Page
(
MVMap
<?,
?>
map
,
long
version
)
{
this
.
map
=
map
;
this
.
version
=
version
;
}
/**
* Create a new, empty page.
*
* @param map the map
* @param version the version
* @return the new page
*/
public
static
Page
createEmpty
(
MVMap
<?,
?>
map
,
long
version
)
{
return
create
(
map
,
version
,
0
,
EMPTY_OBJECT_ARRAY
,
EMPTY_OBJECT_ARRAY
,
null
,
null
,
null
,
0
,
0
,
DataUtils
.
PAGE_MEMORY
);
}
/**
* Create a new page. The arrays are not cloned.
*
* @param map the map
* @param version the version
* @param keyCount the number of keys
* @param keys the keys
* @param values the values
* @param children the children
* @param childrenPages the children pages
* @param counts the children counts
* @param totalCount the total number of keys
* @param sharedFlags which arrays are shared
* @param memory the memory used in bytes
* @return the page
*/
public
static
Page
create
(
MVMap
<?,
?>
map
,
long
version
,
int
keyCount
,
Object
[]
keys
,
Object
[]
values
,
long
[]
children
,
Page
[]
childrenPages
,
long
[]
counts
,
long
totalCount
,
int
sharedFlags
,
int
memory
)
{
Page
p
=
new
Page
(
map
,
version
);
// the position is 0
p
.
keys
=
keys
;
p
.
keyCount
=
keyCount
;
p
.
values
=
values
;
p
.
children
=
children
;
p
.
childrenPages
=
childrenPages
;
p
.
counts
=
counts
;
p
.
totalCount
=
totalCount
;
p
.
sharedFlags
=
sharedFlags
;
p
.
memory
=
memory
==
0
?
p
.
calculateMemory
()
:
memory
;
return
p
;
}
/**
* Read a page.
*
* @param file the file
* @param map the map
* @param pos the page position
* @param filePos the position in the file
* @param fileSize the file size (to avoid reading past EOF)
* @return the page
*/
static
Page
read
(
FileChannel
file
,
MVMap
<?,
?>
map
,
long
pos
,
long
filePos
,
long
fileSize
)
{
ByteBuffer
buff
;
int
maxLength
=
DataUtils
.
getPageMaxLength
(
pos
);
try
{
maxLength
=
(
int
)
Math
.
min
(
fileSize
-
filePos
,
maxLength
);
int
length
=
maxLength
;
if
(
maxLength
==
Integer
.
MAX_VALUE
)
{
buff
=
ByteBuffer
.
allocate
(
128
);
DataUtils
.
readFully
(
file
,
filePos
,
buff
);
maxLength
=
buff
.
getInt
();
//read the first bytes again
}
buff
=
ByteBuffer
.
allocate
(
length
);
DataUtils
.
readFully
(
file
,
filePos
,
buff
);
}
catch
(
IOException
e
)
{
throw
new
RuntimeException
(
e
);
}
Page
p
=
new
Page
(
map
,
0
);
p
.
pos
=
pos
;
int
chunkId
=
DataUtils
.
getPageChunkId
(
pos
);
int
offset
=
DataUtils
.
getPageOffset
(
pos
);
p
.
read
(
buff
,
chunkId
,
offset
,
maxLength
);
return
p
;
}
/**
* Get the key at the given index.
*
* @param index the index
* @return the key
*/
public
Object
getKey
(
int
index
)
{
return
keys
[
index
];
}
/**
* Get the child page at the given index.
*
* @param index the index
* @return the child page
*/
public
Page
getChildPage
(
int
index
)
{
Page
p
=
childrenPages
[
index
];
return
p
!=
null
?
p
:
map
.
readPage
(
children
[
index
]);
}
/**
* Get the position of the child page at the given index.
*
* @param index the index
* @return the position
*/
long
getChildPagePos
(
int
index
)
{
return
children
[
index
];
}
/**
* Get the value at the given index.
*
* @param index the index
* @return the value
*/
public
Object
getValue
(
int
index
)
{
return
values
[
index
];
}
/**
* Get the number of keys in this page.
*
* @return the number of keys
*/
public
int
getKeyCount
()
{
return
keyCount
;
}
/**
* Check whether this is a leaf page.
*
* @return true if it is a leaf
*/
public
boolean
isLeaf
()
{
return
children
==
null
;
}
/**
* Get the position of the page
*
* @return the position
*/
public
long
getPos
()
{
return
pos
;
}
public
String
toString
()
{
StringBuilder
buff
=
new
StringBuilder
();
buff
.
append
(
"pos: "
).
append
(
pos
).
append
(
"\n"
);
for
(
int
i
=
0
;
i
<=
keyCount
;
i
++)
{
if
(
i
>
0
)
{
buff
.
append
(
" "
);
}
if
(
children
!=
null
)
{
buff
.
append
(
"["
+
children
[
i
]
+
"] "
);
}
if
(
i
<
keyCount
)
{
buff
.
append
(
keys
[
i
]);
if
(
values
!=
null
)
{
buff
.
append
(
':'
);
buff
.
append
(
values
[
i
]);
}
}
}
return
buff
.
toString
();
}
/**
* Create a copy of this page, if the write version is higher than the
* current version.
*
* @param writeVersion the write version
* @return a page with the given write version
*/
public
Page
copyOnWrite
(
long
writeVersion
)
{
if
(
version
==
writeVersion
)
{
return
this
;
}
map
.
getStore
().
removePage
(
pos
);
Page
newPage
=
create
(
map
,
writeVersion
,
keyCount
,
keys
,
values
,
children
,
childrenPages
,
counts
,
totalCount
,
SHARED_KEYS
|
SHARED_VALUES
|
SHARED_CHILDREN
|
SHARED_COUNTS
,
memory
);
map
.
getStore
().
registerUnsavedPage
();
newPage
.
cachedCompare
=
cachedCompare
;
return
newPage
;
}
/**
* Search the key in this page using a binary search. Instead of always
* starting the search in the middle, the last found index is cached.
* <p>
* If the key was found, the returned value is the index in the key array.
* If not found, the returned value is negative, where -1 means the provided
* key is smaller than any keys in this page. See also Arrays.binarySearch.
*
* @param key the key
* @return the value or null
*/
public
int
binarySearch
(
Object
key
)
{
int
low
=
0
,
high
=
keyCount
-
1
;
int
x
=
cachedCompare
-
1
;
if
(
x
<
0
||
x
>
high
)
{
x
=
(
low
+
high
)
>>>
1
;
}
Object
[]
k
=
keys
;
while
(
low
<=
high
)
{
int
compare
=
map
.
compare
(
key
,
k
[
x
]);
if
(
compare
>
0
)
{
low
=
x
+
1
;
}
else
if
(
compare
<
0
)
{
high
=
x
-
1
;
}
else
{
cachedCompare
=
x
+
1
;
return
x
;
}
x
=
(
low
+
high
)
>>>
1
;
}
cachedCompare
=
low
;
return
-(
low
+
1
);
// regular binary search (without caching)
// int low = 0, high = keyCount - 1;
// while (low <= high) {
// int x = (low + high) >>> 1;
// int compare = map.compare(key, keys[x]);
// if (compare > 0) {
// low = x + 1;
// } else if (compare < 0) {
// high = x - 1;
// } else {
// return x;
// }
// }
// return -(low + 1);
}
/**
* Split the page. This modifies the current page.
*
* @param at the split index
* @return the page with the entries after the split index
*/
public
Page
split
(
int
at
)
{
return
isLeaf
()
?
splitLeaf
(
at
)
:
splitNode
(
at
);
}
private
Page
splitLeaf
(
int
at
)
{
int
a
=
at
,
b
=
keyCount
-
a
;
Object
[]
aKeys
=
new
Object
[
a
];
Object
[]
bKeys
=
new
Object
[
b
];
System
.
arraycopy
(
keys
,
0
,
aKeys
,
0
,
a
);
System
.
arraycopy
(
keys
,
a
,
bKeys
,
0
,
b
);
keys
=
aKeys
;
keyCount
=
a
;
Object
[]
aValues
=
new
Object
[
a
];
Object
[]
bValues
=
new
Object
[
b
];
bValues
=
new
Object
[
b
];
System
.
arraycopy
(
values
,
0
,
aValues
,
0
,
a
);
System
.
arraycopy
(
values
,
a
,
bValues
,
0
,
b
);
values
=
aValues
;
sharedFlags
&=
~(
SHARED_KEYS
|
SHARED_VALUES
);
totalCount
=
a
;
Page
newPage
=
create
(
map
,
version
,
b
,
bKeys
,
bValues
,
null
,
null
,
null
,
bKeys
.
length
,
0
,
0
);
map
.
getStore
().
registerUnsavedPage
();
memory
=
calculateMemory
();
newPage
.
memory
=
newPage
.
calculateMemory
();
return
newPage
;
}
private
Page
splitNode
(
int
at
)
{
int
a
=
at
,
b
=
keyCount
-
a
;
Object
[]
aKeys
=
new
Object
[
a
];
Object
[]
bKeys
=
new
Object
[
b
-
1
];
System
.
arraycopy
(
keys
,
0
,
aKeys
,
0
,
a
);
System
.
arraycopy
(
keys
,
a
+
1
,
bKeys
,
0
,
b
-
1
);
keys
=
aKeys
;
keyCount
=
a
;
long
[]
aChildren
=
new
long
[
a
+
1
];
long
[]
bChildren
=
new
long
[
b
];
System
.
arraycopy
(
children
,
0
,
aChildren
,
0
,
a
+
1
);
System
.
arraycopy
(
children
,
a
+
1
,
bChildren
,
0
,
b
);
children
=
aChildren
;
Page
[]
aChildrenPages
=
new
Page
[
a
+
1
];
Page
[]
bChildrenPages
=
new
Page
[
b
];
System
.
arraycopy
(
childrenPages
,
0
,
aChildrenPages
,
0
,
a
+
1
);
System
.
arraycopy
(
childrenPages
,
a
+
1
,
bChildrenPages
,
0
,
b
);
childrenPages
=
aChildrenPages
;
long
[]
aCounts
=
new
long
[
a
+
1
];
long
[]
bCounts
=
new
long
[
b
];
System
.
arraycopy
(
counts
,
0
,
aCounts
,
0
,
a
+
1
);
System
.
arraycopy
(
counts
,
a
+
1
,
bCounts
,
0
,
b
);
counts
=
aCounts
;
sharedFlags
&=
~(
SHARED_KEYS
|
SHARED_CHILDREN
|
SHARED_COUNTS
);
long
t
=
0
;
for
(
long
x
:
aCounts
)
{
t
+=
x
;
}
totalCount
=
t
;
t
=
0
;
for
(
long
x
:
bCounts
)
{
t
+=
x
;
}
Page
newPage
=
create
(
map
,
version
,
b
-
1
,
bKeys
,
null
,
bChildren
,
bChildrenPages
,
bCounts
,
t
,
0
,
0
);
map
.
getStore
().
registerUnsavedPage
();
memory
=
calculateMemory
();
newPage
.
memory
=
newPage
.
calculateMemory
();
return
newPage
;
}
/**
* Get the total number of key-value pairs, including child pages.
*
* @return the number of key-value pairs
*/
public
long
getTotalCount
()
{
if
(
MVStore
.
ASSERT
)
{
long
check
=
0
;
if
(
isLeaf
())
{
check
=
keyCount
;
}
else
{
for
(
long
x
:
counts
)
{
check
+=
x
;
}
}
if
(
check
!=
totalCount
)
{
throw
new
AssertionError
(
"Expected: "
+
check
+
" got: "
+
totalCount
);
}
}
return
totalCount
;
}
/**
* Get the descendant counts for the given child.
*
* @param index the child index
* @return the descendant count
*/
long
getCounts
(
int
index
)
{
return
counts
[
index
];
}
/**
* Replace the child page.
*
* @param index the index
* @param c the new child page
*/
public
void
setChild
(
int
index
,
Page
c
)
{
if
(
c
!=
childrenPages
[
index
]
||
c
.
getPos
()
!=
children
[
index
])
{
if
((
sharedFlags
&
SHARED_CHILDREN
)
!=
0
)
{
children
=
Arrays
.
copyOf
(
children
,
children
.
length
);
childrenPages
=
Arrays
.
copyOf
(
childrenPages
,
childrenPages
.
length
);
sharedFlags
&=
~
SHARED_CHILDREN
;
}
children
[
index
]
=
c
.
getPos
();
childrenPages
[
index
]
=
c
;
}
if
(
c
.
getTotalCount
()
!=
counts
[
index
])
{
if
((
sharedFlags
&
SHARED_COUNTS
)
!=
0
)
{
counts
=
Arrays
.
copyOf
(
counts
,
counts
.
length
);
sharedFlags
&=
~
SHARED_COUNTS
;
}
long
oldCount
=
counts
[
index
];
counts
[
index
]
=
c
.
getTotalCount
();
totalCount
+=
counts
[
index
]
-
oldCount
;
}
}
/**
* Replace the key.
*
* @param index the index
* @param key the new key
*/
public
void
setKey
(
int
index
,
Object
key
)
{
if
((
sharedFlags
&
SHARED_KEYS
)
!=
0
)
{
keys
=
Arrays
.
copyOf
(
keys
,
keys
.
length
);
sharedFlags
&=
~
SHARED_KEYS
;
}
Object
old
=
keys
[
index
];
if
(
old
!=
null
)
{
memory
-=
map
.
getKeyType
().
getMemory
(
old
);
}
memory
+=
map
.
getKeyType
().
getMemory
(
key
);
keys
[
index
]
=
key
;
}
/**
* Replace the value.
*
* @param index the index
* @param value the new value
* @return the old value
*/
public
Object
setValue
(
int
index
,
Object
value
)
{
Object
old
=
values
[
index
];
if
((
sharedFlags
&
SHARED_VALUES
)
!=
0
)
{
values
=
Arrays
.
copyOf
(
values
,
values
.
length
);
sharedFlags
&=
~
SHARED_VALUES
;
}
memory
-=
map
.
getValueType
().
getMemory
(
old
);
memory
+=
map
.
getValueType
().
getMemory
(
value
);
values
[
index
]
=
value
;
return
old
;
}
/**
* Remove this page and all child pages.
*/
void
removeAllRecursive
()
{
if
(
children
!=
null
)
{
for
(
int
i
=
0
,
size
=
children
.
length
;
i
<
size
;
i
++)
{
Page
p
=
childrenPages
[
i
];
if
(
p
!=
null
)
{
p
.
removeAllRecursive
();
}
else
{
long
c
=
children
[
i
];
int
type
=
DataUtils
.
getPageType
(
c
);
if
(
type
==
DataUtils
.
PAGE_TYPE_LEAF
)
{
map
.
getStore
().
removePage
(
c
);
}
else
{
map
.
readPage
(
c
).
removeAllRecursive
();
}
}
}
}
map
.
getStore
().
removePage
(
pos
);
}
/**
* Insert a key-value pair into this leaf.
*
* @param index the index
* @param key the key
* @param value the value
*/
public
void
insertLeaf
(
int
index
,
Object
key
,
Object
value
)
{
if
(((
sharedFlags
&
SHARED_KEYS
)
==
0
)
&&
keys
.
length
>
keyCount
+
1
)
{
if
(
index
<
keyCount
)
{
System
.
arraycopy
(
keys
,
index
,
keys
,
index
+
1
,
keyCount
-
index
);
System
.
arraycopy
(
values
,
index
,
values
,
index
+
1
,
keyCount
-
index
);
}
}
else
{
int
len
=
keyCount
+
6
;
Object
[]
newKeys
=
new
Object
[
len
];
DataUtils
.
copyWithGap
(
keys
,
newKeys
,
keyCount
,
index
);
keys
=
newKeys
;
Object
[]
newValues
=
new
Object
[
len
];
DataUtils
.
copyWithGap
(
values
,
newValues
,
keyCount
,
index
);
values
=
newValues
;
}
keys
[
index
]
=
key
;
values
[
index
]
=
value
;
keyCount
++;
sharedFlags
&=
~(
SHARED_KEYS
|
SHARED_VALUES
);
totalCount
++;
memory
+=
map
.
getKeyType
().
getMemory
(
key
);
memory
+=
map
.
getValueType
().
getMemory
(
value
);
}
/**
* Insert a child into this node.
*
* @param index the index
* @param key the key
* @param childPage the child page
*/
public
void
insertNode
(
int
index
,
Object
key
,
Page
childPage
)
{
Object
[]
newKeys
=
new
Object
[
keyCount
+
1
];
DataUtils
.
copyWithGap
(
keys
,
newKeys
,
keyCount
,
index
);
newKeys
[
index
]
=
key
;
keys
=
newKeys
;
keyCount
++;
long
[]
newChildren
=
new
long
[
children
.
length
+
1
];
DataUtils
.
copyWithGap
(
children
,
newChildren
,
children
.
length
,
index
);
newChildren
[
index
]
=
childPage
.
getPos
();
children
=
newChildren
;
Page
[]
newChildrenPages
=
new
Page
[
childrenPages
.
length
+
1
];
DataUtils
.
copyWithGap
(
childrenPages
,
newChildrenPages
,
childrenPages
.
length
,
index
);
newChildrenPages
[
index
]
=
childPage
;
childrenPages
=
newChildrenPages
;
long
[]
newCounts
=
new
long
[
counts
.
length
+
1
];
DataUtils
.
copyWithGap
(
counts
,
newCounts
,
counts
.
length
,
index
);
newCounts
[
index
]
=
childPage
.
getTotalCount
();
counts
=
newCounts
;
sharedFlags
&=
~(
SHARED_KEYS
|
SHARED_CHILDREN
|
SHARED_COUNTS
);
totalCount
+=
childPage
.
getTotalCount
();
memory
+=
map
.
getKeyType
().
getMemory
(
key
);
memory
+=
DataUtils
.
PAGE_MEMORY_CHILD
;
}
/**
* Remove the key and value (or child) at the given index.
*
* @param index the index
*/
public
void
remove
(
int
index
)
{
int
keyIndex
=
index
>=
keyCount
?
index
-
1
:
index
;
Object
old
=
keys
[
keyIndex
];
memory
-=
map
.
getKeyType
().
getMemory
(
old
);
if
((
sharedFlags
&
SHARED_KEYS
)
==
0
&&
keys
.
length
>
keyCount
-
4
)
{
if
(
keyIndex
<
keyCount
-
1
)
{
System
.
arraycopy
(
keys
,
keyIndex
+
1
,
keys
,
keyIndex
,
keyCount
-
keyIndex
-
1
);
}
keys
[
keyCount
-
1
]
=
null
;
}
else
{
Object
[]
newKeys
=
new
Object
[
keyCount
-
1
];
DataUtils
.
copyExcept
(
keys
,
newKeys
,
keyCount
,
keyIndex
);
keys
=
newKeys
;
sharedFlags
&=
~
SHARED_KEYS
;
}
if
(
values
!=
null
)
{
old
=
values
[
index
];
memory
-=
map
.
getValueType
().
getMemory
(
old
);
if
((
sharedFlags
&
SHARED_VALUES
)
==
0
&&
values
.
length
>
keyCount
-
4
)
{
if
(
index
<
keyCount
-
1
)
{
System
.
arraycopy
(
values
,
index
+
1
,
values
,
index
,
keyCount
-
index
-
1
);
}
values
[
keyCount
-
1
]
=
null
;
}
else
{
Object
[]
newValues
=
new
Object
[
keyCount
-
1
];
DataUtils
.
copyExcept
(
values
,
newValues
,
keyCount
,
index
);
values
=
newValues
;
sharedFlags
&=
~
SHARED_VALUES
;
}
totalCount
--;
}
keyCount
--;
if
(
children
!=
null
)
{
memory
-=
DataUtils
.
PAGE_MEMORY_CHILD
;
long
countOffset
=
counts
[
index
];
long
[]
newChildren
=
new
long
[
children
.
length
-
1
];
DataUtils
.
copyExcept
(
children
,
newChildren
,
children
.
length
,
index
);
children
=
newChildren
;
Page
[]
newChildrenPages
=
new
Page
[
childrenPages
.
length
-
1
];
DataUtils
.
copyExcept
(
childrenPages
,
newChildrenPages
,
childrenPages
.
length
,
index
);
childrenPages
=
newChildrenPages
;
long
[]
newCounts
=
new
long
[
counts
.
length
-
1
];
DataUtils
.
copyExcept
(
counts
,
newCounts
,
counts
.
length
,
index
);
counts
=
newCounts
;
sharedFlags
&=
~(
SHARED_CHILDREN
|
SHARED_COUNTS
);
totalCount
-=
countOffset
;
}
}
/**
* Read the page from the buffer.
*
* @param buff the buffer
* @param chunkId the chunk id
* @param offset the offset within the chunk
* @param maxLength the maximum length
*/
void
read
(
ByteBuffer
buff
,
int
chunkId
,
int
offset
,
int
maxLength
)
{
int
start
=
buff
.
position
();
int
pageLength
=
buff
.
getInt
();
if
(
pageLength
>
maxLength
)
{
throw
new
RuntimeException
(
"Length too large, expected =< "
+
maxLength
+
" got "
+
pageLength
);
}
short
check
=
buff
.
getShort
();
int
mapId
=
DataUtils
.
readVarInt
(
buff
);
if
(
mapId
!=
map
.
getId
())
{
throw
new
RuntimeException
(
"Error reading page, expected map "
+
map
.
getId
()
+
" got "
+
mapId
);
}
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
offset
)
^
DataUtils
.
getCheckValue
(
pageLength
);
if
(
check
!=
(
short
)
checkTest
)
{
throw
new
RuntimeException
(
"Error in check value, expected "
+
checkTest
+
" got "
+
check
);
}
int
len
=
DataUtils
.
readVarInt
(
buff
);
keys
=
new
Object
[
len
];
keyCount
=
len
;
int
type
=
buff
.
get
();
boolean
node
=
(
type
&
1
)
==
DataUtils
.
PAGE_TYPE_NODE
;
boolean
compressed
=
(
type
&
DataUtils
.
PAGE_COMPRESSED
)
!=
0
;
if
(
compressed
)
{
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
int
lenAdd
=
DataUtils
.
readVarInt
(
buff
);
int
compLen
=
pageLength
+
start
-
buff
.
position
();
byte
[]
comp
=
new
byte
[
compLen
];
buff
.
get
(
comp
);
int
l
=
compLen
+
lenAdd
;
buff
=
ByteBuffer
.
allocate
(
l
);
compressor
.
expand
(
comp
,
0
,
compLen
,
buff
.
array
(),
0
,
l
);
}
DataType
keyType
=
map
.
getKeyType
();
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
Object
k
=
keyType
.
read
(
buff
);
keys
[
i
]
=
k
;
}
if
(
node
)
{
children
=
new
long
[
len
+
1
];
for
(
int
i
=
0
;
i
<=
len
;
i
++)
{
children
[
i
]
=
buff
.
getLong
();
}
childrenPages
=
new
Page
[
len
+
1
];
counts
=
new
long
[
len
+
1
];
long
total
=
0
;
for
(
int
i
=
0
;
i
<=
len
;
i
++)
{
long
s
=
DataUtils
.
readVarLong
(
buff
);
total
+=
s
;
counts
[
i
]
=
s
;
}
totalCount
=
total
;
}
else
{
values
=
new
Object
[
len
];
DataType
valueType
=
map
.
getValueType
();
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
Object
v
=
valueType
.
read
(
buff
);
values
[
i
]
=
v
;
}
totalCount
=
len
;
}
memory
=
calculateMemory
();
}
/**
* Store the page and update the position.
*
* @param chunk the chunk
* @param buff the target buffer
*/
private
void
write
(
Chunk
chunk
,
ByteBuffer
buff
)
{
int
start
=
buff
.
position
();
buff
.
putInt
(
0
);
buff
.
putShort
((
byte
)
0
);
DataUtils
.
writeVarInt
(
buff
,
map
.
getId
());
int
len
=
keyCount
;
DataUtils
.
writeVarInt
(
buff
,
len
);
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
int
type
=
children
!=
null
?
DataUtils
.
PAGE_TYPE_NODE
:
DataUtils
.
PAGE_TYPE_LEAF
;
buff
.
put
((
byte
)
type
);
int
compressStart
=
buff
.
position
();
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
map
.
getKeyType
().
write
(
buff
,
keys
[
i
]);
}
if
(
type
==
DataUtils
.
PAGE_TYPE_NODE
)
{
for
(
int
i
=
0
;
i
<=
len
;
i
++)
{
buff
.
putLong
(
children
[
i
]);
}
for
(
int
i
=
0
;
i
<=
len
;
i
++)
{
DataUtils
.
writeVarLong
(
buff
,
counts
[
i
]);
}
}
else
{
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
map
.
getValueType
().
write
(
buff
,
values
[
i
]);
}
}
if
(
compressor
!=
null
)
{
int
expLen
=
buff
.
position
()
-
compressStart
;
byte
[]
exp
=
new
byte
[
expLen
];
buff
.
position
(
compressStart
);
buff
.
get
(
exp
);
byte
[]
comp
=
new
byte
[
exp
.
length
*
2
];
int
compLen
=
compressor
.
compress
(
exp
,
exp
.
length
,
comp
,
0
);
if
(
compLen
+
DataUtils
.
getVarIntLen
(
compLen
-
expLen
)
<
expLen
)
{
buff
.
position
(
compressStart
-
1
);
buff
.
put
((
byte
)
(
type
+
DataUtils
.
PAGE_COMPRESSED
));
DataUtils
.
writeVarInt
(
buff
,
expLen
-
compLen
);
buff
.
put
(
comp
,
0
,
compLen
);
}
}
int
pageLength
=
buff
.
position
()
-
start
;
buff
.
putInt
(
start
,
pageLength
);
int
chunkId
=
chunk
.
id
;
int
check
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
start
)
^
DataUtils
.
getCheckValue
(
pageLength
);
buff
.
putShort
(
start
+
4
,
(
short
)
check
);
this
.
pos
=
DataUtils
.
getPagePos
(
chunkId
,
start
,
pageLength
,
type
);
long
max
=
DataUtils
.
getPageMaxLength
(
pos
);
chunk
.
maxLength
+=
max
;
chunk
.
maxLengthLive
+=
max
;
chunk
.
pageCount
++;
}
/**
* Get the maximum length in bytes to store temporary records, recursively.
*
* @return the maximum length
*/
int
getMaxLengthTempRecursive
()
{
int
maxLength
=
getMaxLength
();
if
(!
isLeaf
())
{
for
(
int
i
=
0
;
i
<=
keyCount
;
i
++)
{
Page
p
=
childrenPages
[
i
];
if
(
p
!=
null
)
{
maxLength
+=
p
.
getMaxLengthTempRecursive
();
}
}
}
return
maxLength
;
}
int
getMaxLength
()
{
// length, check, map id, key length, type
int
maxLength
=
4
+
2
+
DataUtils
.
MAX_VAR_INT_LEN
+
DataUtils
.
MAX_VAR_INT_LEN
+
1
;
int
len
=
keyCount
;
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
maxLength
+=
map
.
getKeyType
().
getMaxLength
(
keys
[
i
]);
}
if
(
isLeaf
())
{
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
maxLength
+=
map
.
getValueType
().
getMaxLength
(
values
[
i
]);
}
}
else
{
maxLength
+=
8
*
len
;
maxLength
+=
DataUtils
.
MAX_VAR_LONG_LEN
*
len
;
}
return
maxLength
;
}
/**
* Store this page and all children that are changed, in reverse order, and
* update the position and the children.
*
* @param chunk the chunk
* @param buff the target buffer
* @return the page id
*/
long
writeUnsavedRecursive
(
Chunk
chunk
,
ByteBuffer
buff
)
{
if
(!
isLeaf
())
{
int
len
=
children
.
length
;
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
Page
p
=
childrenPages
[
i
];
if
(
p
!=
null
)
{
children
[
i
]
=
p
.
writeUnsavedRecursive
(
chunk
,
buff
);
childrenPages
[
i
]
=
null
;
}
}
}
write
(
chunk
,
buff
);
return
pos
;
}
long
getVersion
()
{
return
version
;
}
public
int
getChildPageCount
()
{
return
children
.
length
;
}
public
boolean
equals
(
Object
other
)
{
if
(
other
==
this
)
{
return
true
;
}
if
(
other
instanceof
Page
)
{
if
(
pos
!=
0
&&
((
Page
)
other
).
pos
==
pos
)
{
return
true
;
}
return
this
==
other
;
}
return
false
;
}
public
int
hashCode
()
{
return
pos
!=
0
?
(
int
)
(
pos
|
(
pos
>>>
32
))
:
super
.
hashCode
();
}
public
int
getMemory
()
{
if
(
MVStore
.
ASSERT
)
{
if
(
memory
!=
calculateMemory
())
{
throw
new
RuntimeException
(
"Memory calculation error"
);
}
}
return
memory
;
}
private
int
calculateMemory
()
{
int
mem
=
DataUtils
.
PAGE_MEMORY
;
for
(
int
i
=
0
;
i
<
keyCount
;
i
++)
{
mem
+=
map
.
getKeyType
().
getMemory
(
keys
[
i
]);
}
if
(
this
.
isLeaf
())
{
for
(
int
i
=
0
;
i
<
keyCount
;
i
++)
{
mem
+=
map
.
getValueType
().
getMemory
(
values
[
i
]);
}
}
else
{
mem
+=
this
.
getChildPageCount
()
*
DataUtils
.
PAGE_MEMORY_CHILD
;
}
return
mem
;
}
}
h2/src/tools/org/h2/dev/store/btree/StreamStore.java
deleted
100644 → 0
浏览文件 @
b222ae75
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
dev
.
store
.
btree
;
import
java.io.ByteArrayInputStream
;
import
java.io.ByteArrayOutputStream
;
import
java.io.IOException
;
import
java.io.InputStream
;
import
java.nio.ByteBuffer
;
import
java.util.Map
;
import
java.util.concurrent.atomic.AtomicLong
;
import
org.h2.util.IOUtils
;
/**
* A facility to store streams in a map. Streams are split into blocks, which
* are stored in a map. Very small streams are inlined in the stream id.
* <p>
* The key of the map is a long (incremented for each stored block). The default
* initial value is 0. Before storing blocks into the map, the stream store
* checks if there is already a block with the next key, and if necessary
* searches the next free entry using a binary search (0 to Long.MAX_VALUE).
* <p>
* The format of the binary id is: An empty id represents 0 bytes of data.
* In-place data is encoded as 0, the size (a variable size int), then the data.
* A stored block is encoded as 1, the length of the block (a variable size
* int), then the key (a variable size long). Multiple ids can be concatenated
* to concatenate the data. If the id is large, it is stored itself, which is
* encoded as 2, the total length (a variable size long), and the key of the
* block that contains the id (a variable size long).
*/
public
class
StreamStore
{
private
final
Map
<
Long
,
byte
[]>
map
;
private
int
minBlockSize
=
256
;
private
int
maxBlockSize
=
256
*
1024
;
private
final
AtomicLong
nextKey
=
new
AtomicLong
();
/**
* Create a stream store instance.
*
* @param map the map to store blocks of data
*/
public
StreamStore
(
Map
<
Long
,
byte
[]>
map
)
{
this
.
map
=
map
;
}
public
Map
<
Long
,
byte
[]>
getMap
()
{
return
map
;
}
public
void
setNextKey
(
long
nextKey
)
{
this
.
nextKey
.
set
(
nextKey
);
}
public
long
getNextKey
()
{
return
nextKey
.
get
();
}
public
void
setMinBlockSize
(
int
minBlockSize
)
{
this
.
minBlockSize
=
minBlockSize
;
}
public
int
getMinBlockSize
()
{
return
minBlockSize
;
}
public
void
setMaxBlockSize
(
int
maxBlockSize
)
{
this
.
maxBlockSize
=
maxBlockSize
;
}
public
long
getMaxBlockSize
()
{
return
maxBlockSize
;
}
/**
* Store the stream, and return the id.
*
* @param in the stream
* @return the id (potentially an empty array)
*/
public
byte
[]
put
(
InputStream
in
)
throws
IOException
{
ByteArrayOutputStream
id
=
new
ByteArrayOutputStream
();
int
level
=
0
;
while
(
true
)
{
if
(
put
(
id
,
in
,
level
))
{
break
;
}
if
(
id
.
size
()
>
maxBlockSize
/
2
)
{
id
=
putIndirectId
(
id
);
level
++;
}
}
if
(
id
.
size
()
>
minBlockSize
*
2
)
{
id
=
putIndirectId
(
id
);
}
return
id
.
toByteArray
();
}
private
boolean
put
(
ByteArrayOutputStream
id
,
InputStream
in
,
int
level
)
throws
IOException
{
if
(
level
>
0
)
{
ByteArrayOutputStream
id2
=
new
ByteArrayOutputStream
();
while
(
true
)
{
boolean
eof
=
put
(
id2
,
in
,
level
-
1
);
if
(
id2
.
size
()
>
maxBlockSize
/
2
)
{
id2
=
putIndirectId
(
id2
);
id2
.
writeTo
(
id
);
return
eof
;
}
else
if
(
eof
)
{
id2
.
writeTo
(
id
);
return
true
;
}
}
}
ByteArrayOutputStream
buffer
=
new
ByteArrayOutputStream
();
int
len
=
(
int
)
IOUtils
.
copy
(
in
,
buffer
,
maxBlockSize
);
if
(
len
==
0
)
{
return
true
;
}
boolean
eof
=
len
<
maxBlockSize
;
byte
[]
data
=
buffer
.
toByteArray
();
if
(
len
<
minBlockSize
)
{
id
.
write
(
0
);
DataUtils
.
writeVarInt
(
id
,
len
);
id
.
write
(
data
);
}
else
{
id
.
write
(
1
);
DataUtils
.
writeVarInt
(
id
,
len
);
DataUtils
.
writeVarLong
(
id
,
writeBlock
(
data
));
}
return
eof
;
}
private
ByteArrayOutputStream
putIndirectId
(
ByteArrayOutputStream
id
)
throws
IOException
{
byte
[]
data
=
id
.
toByteArray
();
id
=
new
ByteArrayOutputStream
();
id
.
write
(
2
);
DataUtils
.
writeVarLong
(
id
,
length
(
data
));
DataUtils
.
writeVarLong
(
id
,
writeBlock
(
data
));
return
id
;
}
private
long
writeBlock
(
byte
[]
data
)
{
long
key
=
getAndIncrementNextKey
();
map
.
put
(
key
,
data
);
return
key
;
}
private
long
getAndIncrementNextKey
()
{
long
key
=
nextKey
.
getAndIncrement
();
if
(!
map
.
containsKey
(
key
))
{
return
key
;
}
// search the next free id using binary search
synchronized
(
this
)
{
long
low
=
key
,
high
=
Long
.
MAX_VALUE
;
while
(
low
<
high
)
{
long
x
=
(
low
+
high
)
>>>
1
;
if
(
map
.
containsKey
(
x
))
{
low
=
x
+
1
;
}
else
{
high
=
x
;
}
}
key
=
low
;
nextKey
.
set
(
key
+
1
);
return
key
;
}
}
/**
* Remove all stored blocks for the given id.
*
* @param id the id
*/
public
void
remove
(
byte
[]
id
)
{
ByteBuffer
idBuffer
=
ByteBuffer
.
wrap
(
id
);
while
(
idBuffer
.
hasRemaining
())
{
switch
(
idBuffer
.
get
())
{
case
0
:
int
len
=
DataUtils
.
readVarInt
(
idBuffer
);
idBuffer
.
position
(
idBuffer
.
position
()
+
len
);
break
;
case
1
:
DataUtils
.
readVarInt
(
idBuffer
);
long
k
=
DataUtils
.
readVarLong
(
idBuffer
);
map
.
remove
(
k
);
break
;
case
2
:
DataUtils
.
readVarLong
(
idBuffer
);
long
k2
=
DataUtils
.
readVarLong
(
idBuffer
);
// recurse
remove
(
map
.
get
(
k2
));
map
.
remove
(
k2
);
break
;
default
:
throw
new
IllegalArgumentException
(
"Unsupported id"
);
}
}
}
/**
* Calculate the number of data bytes for the given id. As the length is
* encoded in the id, this operation does not cause any reads in the map.
*
* @param id the id
* @return the length
*/
public
long
length
(
byte
[]
id
)
{
ByteBuffer
idBuffer
=
ByteBuffer
.
wrap
(
id
);
long
length
=
0
;
while
(
idBuffer
.
hasRemaining
())
{
switch
(
idBuffer
.
get
())
{
case
0
:
int
len
=
DataUtils
.
readVarInt
(
idBuffer
);
idBuffer
.
position
(
idBuffer
.
position
()
+
len
);
length
+=
len
;
break
;
case
1
:
length
+=
DataUtils
.
readVarInt
(
idBuffer
);
DataUtils
.
readVarLong
(
idBuffer
);
break
;
case
2
:
length
+=
DataUtils
.
readVarLong
(
idBuffer
);
DataUtils
.
readVarLong
(
idBuffer
);
break
;
default
:
throw
new
IllegalArgumentException
(
"Unsupported id"
);
}
}
return
length
;
}
/**
* Check whether the id itself contains all the data. This operation does
* not cause any reads in the map.
*
* @param id the id
* @return if the id contains the data
*/
public
boolean
isInPlace
(
byte
[]
id
)
{
ByteBuffer
idBuffer
=
ByteBuffer
.
wrap
(
id
);
while
(
idBuffer
.
hasRemaining
())
{
if
(
idBuffer
.
get
()
!=
0
)
{
return
false
;
}
int
len
=
DataUtils
.
readVarInt
(
idBuffer
);
idBuffer
.
position
(
idBuffer
.
position
()
+
len
);
}
return
true
;
}
/**
* Open an input stream to read data.
*
* @param id the id
* @return the stream
*/
public
InputStream
get
(
byte
[]
id
)
{
return
new
Stream
(
this
,
id
);
}
/**
* Get the block.
*
* @param key the key
* @return the block
*/
byte
[]
getBlock
(
long
key
)
{
return
map
.
get
(
key
);
}
/**
* A stream backed by a map.
*/
static
class
Stream
extends
InputStream
{
private
final
StreamStore
store
;
private
byte
[]
oneByteBuffer
;
private
ByteBuffer
idBuffer
;
private
ByteArrayInputStream
buffer
;
private
long
skip
;
private
final
long
length
;
private
long
pos
;
Stream
(
StreamStore
store
,
byte
[]
id
)
{
this
.
store
=
store
;
this
.
length
=
store
.
length
(
id
);
this
.
idBuffer
=
ByteBuffer
.
wrap
(
id
);
}
@Override
public
int
read
()
{
byte
[]
buffer
=
oneByteBuffer
;
if
(
buffer
==
null
)
{
buffer
=
oneByteBuffer
=
new
byte
[
1
];
}
int
len
=
read
(
buffer
,
0
,
1
);
return
len
==
-
1
?
-
1
:
(
buffer
[
0
]
&
255
);
}
@Override
public
long
skip
(
long
n
)
{
n
=
Math
.
min
(
length
-
pos
,
n
);
if
(
n
==
0
)
{
return
0
;
}
if
(
buffer
!=
null
)
{
long
s
=
buffer
.
skip
(
n
);
if
(
s
>
0
)
{
n
=
s
;
}
else
{
buffer
=
null
;
skip
+=
n
;
}
}
else
{
skip
+=
n
;
}
pos
+=
n
;
return
n
;
}
@Override
public
void
close
()
{
buffer
=
null
;
idBuffer
.
position
(
idBuffer
.
limit
());
pos
=
length
;
}
@Override
public
int
read
(
byte
[]
b
,
int
off
,
int
len
)
{
while
(
true
)
{
if
(
buffer
==
null
)
{
buffer
=
nextBuffer
();
if
(
buffer
==
null
)
{
return
-
1
;
}
}
int
result
=
buffer
.
read
(
b
,
off
,
len
);
if
(
result
>
0
)
{
pos
+=
result
;
return
result
;
}
buffer
=
null
;
}
}
private
ByteArrayInputStream
nextBuffer
()
{
while
(
idBuffer
.
hasRemaining
())
{
switch
(
idBuffer
.
get
())
{
case
0
:
{
int
len
=
DataUtils
.
readVarInt
(
idBuffer
);
if
(
skip
>=
len
)
{
skip
-=
len
;
idBuffer
.
position
(
idBuffer
.
position
()
+
len
);
continue
;
}
int
p
=
(
int
)
(
idBuffer
.
position
()
+
skip
);
int
l
=
(
int
)
(
len
-
skip
);
idBuffer
.
position
(
p
+
l
);
return
new
ByteArrayInputStream
(
idBuffer
.
array
(),
p
,
l
);
}
case
1
:
{
int
len
=
DataUtils
.
readVarInt
(
idBuffer
);
long
key
=
DataUtils
.
readVarLong
(
idBuffer
);
if
(
skip
>=
len
)
{
skip
-=
len
;
continue
;
}
byte
[]
data
=
store
.
getBlock
(
key
);
int
s
=
(
int
)
skip
;
skip
=
0
;
return
new
ByteArrayInputStream
(
data
,
s
,
data
.
length
-
s
);
}
case
2
:
{
long
len
=
DataUtils
.
readVarInt
(
idBuffer
);
long
key
=
DataUtils
.
readVarLong
(
idBuffer
);
if
(
skip
>=
len
)
{
skip
-=
len
;
continue
;
}
byte
[]
k
=
store
.
getBlock
(
key
);
ByteBuffer
newBuffer
=
ByteBuffer
.
allocate
(
k
.
length
+
idBuffer
.
limit
()
-
idBuffer
.
position
());
newBuffer
.
put
(
k
);
newBuffer
.
put
(
idBuffer
);
newBuffer
.
flip
();
idBuffer
=
newBuffer
;
return
nextBuffer
();
}
default
:
throw
new
IllegalArgumentException
(
"Unsupported id"
);
}
}
return
null
;
}
}
}
h2/src/tools/org/h2/dev/store/btree/package.html
deleted
100644 → 0
浏览文件 @
b222ae75
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<!--
Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License, Version 1.0,
and under the Eclipse Public License, Version 1.0
(http://h2database.com/html/license.html).
Initial Developer: H2 Group
-->
<html
xmlns=
"http://www.w3.org/1999/xhtml"
lang=
"en"
xml:lang=
"en"
>
<head><meta
http-equiv=
"Content-Type"
content=
"text/html;charset=utf-8"
/><title>
Javadoc package documentation
</title></head><body
style=
"font: 9pt/130% Tahoma, Arial, Helvetica, sans-serif; font-weight: normal;"
><p>
A persistent storage for tree maps.
</p></body></html>
\ No newline at end of file
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论