Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
f8a5383e
提交
f8a5383e
authored
7月 13, 2009
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
New experimental page store.
上级
70ed289c
显示空白字符变更
内嵌
并排
正在显示
24 个修改的文件
包含
950 行增加
和
228 行删除
+950
-228
PageBtree.java
h2/src/main/org/h2/index/PageBtree.java
+5
-4
PageBtreeIndex.java
h2/src/main/org/h2/index/PageBtreeIndex.java
+29
-11
PageBtreeLeaf.java
h2/src/main/org/h2/index/PageBtreeLeaf.java
+4
-3
PageBtreeNode.java
h2/src/main/org/h2/index/PageBtreeNode.java
+10
-7
PageData.java
h2/src/main/org/h2/index/PageData.java
+5
-5
PageDataLeaf.java
h2/src/main/org/h2/index/PageDataLeaf.java
+37
-14
PageDataLeafOverflow.java
h2/src/main/org/h2/index/PageDataLeafOverflow.java
+27
-32
PageDataNode.java
h2/src/main/org/h2/index/PageDataNode.java
+6
-4
PageScanIndex.java
h2/src/main/org/h2/index/PageScanIndex.java
+7
-8
Data.java
h2/src/main/org/h2/store/Data.java
+640
-0
DataPage.java
h2/src/main/org/h2/store/DataPage.java
+9
-9
DiskFile.java
h2/src/main/org/h2/store/DiskFile.java
+2
-2
FileStoreInputStream.java
h2/src/main/org/h2/store/FileStoreInputStream.java
+2
-2
PageFreeList.java
h2/src/main/org/h2/store/PageFreeList.java
+3
-3
PageLog.java
h2/src/main/org/h2/store/PageLog.java
+23
-12
PageOutputStream.java
h2/src/main/org/h2/store/PageOutputStream.java
+1
-1
PageStore.java
h2/src/main/org/h2/store/PageStore.java
+52
-45
PageStreamData.java
h2/src/main/org/h2/store/PageStreamData.java
+3
-3
PageStreamTrunk.java
h2/src/main/org/h2/store/PageStreamTrunk.java
+6
-5
Recover.java
h2/src/main/org/h2/tools/Recover.java
+5
-3
TestAll.java
h2/src/test/org/h2/test/TestAll.java
+7
-5
TestBase.java
h2/src/test/org/h2/test/TestBase.java
+42
-0
TestMemoryUsage.java
h2/src/test/org/h2/test/db/TestMemoryUsage.java
+24
-11
TestOutOfMemory.java
h2/src/test/org/h2/test/db/TestOutOfMemory.java
+1
-39
没有找到文件。
h2/src/main/org/h2/index/PageBtree.java
浏览文件 @
f8a5383e
...
...
@@ -8,7 +8,7 @@ package org.h2.index;
import
java.sql.SQLException
;
import
org.h2.result.SearchRow
;
import
org.h2.store.Data
Page
;
import
org.h2.store.Data
;
import
org.h2.store.Record
;
/**
...
...
@@ -34,7 +34,7 @@ abstract class PageBtree extends Record {
/**
* The data page.
*/
protected
final
Data
Page
data
;
protected
final
Data
data
;
/**
* The row offsets.
...
...
@@ -66,7 +66,7 @@ abstract class PageBtree extends Record {
*/
protected
boolean
written
;
PageBtree
(
PageBtreeIndex
index
,
int
pageId
,
int
parentPageId
,
Data
Page
data
)
{
PageBtree
(
PageBtreeIndex
index
,
int
pageId
,
int
parentPageId
,
Data
data
)
{
this
.
index
=
index
;
this
.
parentPageId
=
parentPageId
;
this
.
data
=
data
;
...
...
@@ -248,7 +248,8 @@ abstract class PageBtree extends Record {
* @return number of double words (4 bytes)
*/
public
int
getMemorySize
()
{
return
index
.
getPageStore
().
getPageSize
()
>>
2
;
// double the byte array size
return
index
.
getPageStore
().
getPageSize
()
>>
1
;
}
}
h2/src/main/org/h2/index/PageBtreeIndex.java
浏览文件 @
f8a5383e
...
...
@@ -13,7 +13,7 @@ import org.h2.engine.Session;
import
org.h2.message.Message
;
import
org.h2.result.Row
;
import
org.h2.result.SearchRow
;
import
org.h2.store.Data
Page
;
import
org.h2.store.Data
;
import
org.h2.store.PageStore
;
import
org.h2.store.Record
;
import
org.h2.table.Column
;
...
...
@@ -53,7 +53,7 @@ public class PageBtreeIndex extends BaseIndex {
// it should not for new tables, otherwise redo of other operations
// must ensure this page is not used for other things
store
.
addMeta
(
this
,
session
,
headPos
);
PageBtreeLeaf
root
=
new
PageBtreeLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
Page
());
PageBtreeLeaf
root
=
new
PageBtreeLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
());
store
.
updateRecord
(
root
,
true
,
root
.
data
);
}
else
{
this
.
headPos
=
headPos
;
...
...
@@ -93,9 +93,11 @@ public class PageBtreeIndex extends BaseIndex {
}
}
}
// safe memory
SearchRow
newRow
=
getSearchRow
(
row
);
while
(
true
)
{
PageBtree
root
=
getPage
(
headPos
);
int
splitPoint
=
root
.
addRowTry
(
r
ow
);
int
splitPoint
=
root
.
addRowTry
(
newR
ow
);
if
(
splitPoint
==
0
)
{
break
;
}
...
...
@@ -110,7 +112,7 @@ public class PageBtreeIndex extends BaseIndex {
page1
.
setPageId
(
id
);
page1
.
setParentPageId
(
headPos
);
page2
.
setParentPageId
(
headPos
);
PageBtreeNode
newRoot
=
new
PageBtreeNode
(
this
,
rootPageId
,
Page
.
ROOT
,
store
.
createData
Page
());
PageBtreeNode
newRoot
=
new
PageBtreeNode
(
this
,
rootPageId
,
Page
.
ROOT
,
store
.
createData
());
newRoot
.
init
(
page1
,
pivot
,
page2
);
store
.
updateRecord
(
page1
,
true
,
page1
.
data
);
store
.
updateRecord
(
page2
,
true
,
page2
.
data
);
...
...
@@ -120,6 +122,22 @@ public class PageBtreeIndex extends BaseIndex {
rowCount
++;
}
/**
* Create a search row for this row.
*
* @param row the row
* @return the search row
*/
private
SearchRow
getSearchRow
(
Row
row
)
{
SearchRow
r
=
table
.
getTemplateSimpleRow
(
columns
.
length
==
1
);
r
.
setPosAndVersion
(
row
);
for
(
int
j
=
0
;
j
<
columns
.
length
;
j
++)
{
int
idx
=
columns
[
j
].
getColumnId
();
r
.
setValue
(
idx
,
row
.
getValue
(
idx
));
}
return
r
;
}
/**
* Read the given page.
*
...
...
@@ -137,7 +155,7 @@ public class PageBtreeIndex extends BaseIndex {
}
return
(
PageBtree
)
rec
;
}
Data
Page
data
=
store
.
readPage
(
id
);
Data
data
=
store
.
readPage
(
id
);
data
.
reset
();
int
parentPageId
=
data
.
readInt
();
int
type
=
data
.
readByte
()
&
255
;
...
...
@@ -266,7 +284,7 @@ public class PageBtreeIndex extends BaseIndex {
private
void
removeAllRows
()
throws
SQLException
{
PageBtree
root
=
getPage
(
headPos
);
root
.
freeChildren
();
root
=
new
PageBtreeLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
Page
());
root
=
new
PageBtreeLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
());
store
.
removeRecord
(
headPos
);
store
.
updateRecord
(
root
,
true
,
null
);
rowCount
=
0
;
...
...
@@ -297,7 +315,7 @@ public class PageBtreeIndex extends BaseIndex {
* @param data the data page
* @return the row
*/
Row
readRow
(
Data
Page
data
)
throws
SQLException
{
Row
readRow
(
Data
data
)
throws
SQLException
{
return
tableData
.
readRow
(
data
);
}
...
...
@@ -324,7 +342,7 @@ public class PageBtreeIndex extends BaseIndex {
* @param onlyPosition whether only the position of the row is stored
* @return the row
*/
SearchRow
readRow
(
Data
Page
data
,
int
offset
,
boolean
onlyPosition
)
throws
SQLException
{
SearchRow
readRow
(
Data
data
,
int
offset
,
boolean
onlyPosition
)
throws
SQLException
{
data
.
setPos
(
offset
);
int
pos
=
data
.
readInt
();
if
(
onlyPosition
)
{
...
...
@@ -347,7 +365,7 @@ public class PageBtreeIndex extends BaseIndex {
* @param onlyPosition whether only the position of the row is stored
* @param row the row to write
*/
void
writeRow
(
Data
Page
data
,
int
offset
,
SearchRow
row
,
boolean
onlyPosition
)
throws
SQLException
{
void
writeRow
(
Data
data
,
int
offset
,
SearchRow
row
,
boolean
onlyPosition
)
throws
SQLException
{
data
.
setPos
(
offset
);
data
.
writeInt
(
row
.
getPos
());
if
(!
onlyPosition
)
{
...
...
@@ -366,8 +384,8 @@ public class PageBtreeIndex extends BaseIndex {
* @param onlyPosition whether only the position of the row is stored
* @return the number of bytes
*/
int
getRowSize
(
Data
Page
dummy
,
SearchRow
row
,
boolean
onlyPosition
)
throws
SQLException
{
int
rowsize
=
Data
Page
.
LENGTH_INT
;
int
getRowSize
(
Data
dummy
,
SearchRow
row
,
boolean
onlyPosition
)
throws
SQLException
{
int
rowsize
=
Data
.
LENGTH_INT
;
if
(!
onlyPosition
)
{
for
(
Column
col
:
columns
)
{
Value
v
=
row
.
getValue
(
col
.
getColumnId
());
...
...
h2/src/main/org/h2/index/PageBtreeLeaf.java
浏览文件 @
f8a5383e
...
...
@@ -11,6 +11,7 @@ import org.h2.constant.ErrorCode;
import
org.h2.constant.SysProperties
;
import
org.h2.message.Message
;
import
org.h2.result.SearchRow
;
import
org.h2.store.Data
;
import
org.h2.store.DataPage
;
import
org.h2.store.PageStore
;
...
...
@@ -30,7 +31,7 @@ class PageBtreeLeaf extends PageBtree {
private
static
final
int
OFFSET_LENGTH
=
2
;
private
static
final
int
OFFSET_START
=
11
;
PageBtreeLeaf
(
PageBtreeIndex
index
,
int
pageId
,
int
parentPageId
,
Data
Page
data
)
{
PageBtreeLeaf
(
PageBtreeIndex
index
,
int
pageId
,
int
parentPageId
,
Data
data
)
{
super
(
index
,
pageId
,
parentPageId
,
data
);
start
=
OFFSET_START
;
}
...
...
@@ -140,7 +141,7 @@ class PageBtreeLeaf extends PageBtree {
PageBtree
split
(
int
splitPoint
)
throws
SQLException
{
int
newPageId
=
index
.
getPageStore
().
allocatePage
();
PageBtreeLeaf
p2
=
new
PageBtreeLeaf
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
Page
());
PageBtreeLeaf
p2
=
new
PageBtreeLeaf
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
());
for
(
int
i
=
splitPoint
;
i
<
entryCount
;)
{
p2
.
addRowTry
(
getRow
(
splitPoint
));
removeRow
(
splitPoint
);
...
...
@@ -213,7 +214,7 @@ class PageBtreeLeaf extends PageBtree {
written
=
true
;
}
Data
Page
getDataPage
()
throws
SQLException
{
Data
getData
()
throws
SQLException
{
write
();
return
data
;
}
...
...
h2/src/main/org/h2/index/PageBtreeNode.java
浏览文件 @
f8a5383e
...
...
@@ -10,7 +10,10 @@ import java.sql.SQLException;
import
org.h2.constant.SysProperties
;
import
org.h2.message.Message
;
import
org.h2.result.SearchRow
;
import
org.h2.store.Data
;
import
org.h2.store.DataPage
;
import
org.h2.store.PageStore
;
import
org.h2.util.MemoryUtils
;
/**
* A b-tree node page that contains index data.
...
...
@@ -38,7 +41,7 @@ class PageBtreeNode extends PageBtree {
private
int
rowCount
=
UNKNOWN_ROWCOUNT
;
PageBtreeNode
(
PageBtreeIndex
index
,
int
pageId
,
int
parentPageId
,
Data
Page
data
)
{
PageBtreeNode
(
PageBtreeIndex
index
,
int
pageId
,
int
parentPageId
,
Data
data
)
{
super
(
index
,
pageId
,
parentPageId
,
data
);
start
=
CHILD_OFFSET_PAIR_START
;
}
...
...
@@ -51,8 +54,8 @@ class PageBtreeNode extends PageBtree {
rowCount
=
rowCountStored
=
data
.
readInt
();
childPageIds
=
new
int
[
entryCount
+
1
];
childPageIds
[
entryCount
]
=
data
.
readInt
();
rows
=
new
SearchRow
[
entryCount
]
;
offsets
=
new
int
[
entryCount
]
;
rows
=
PageStore
.
newSearchRows
(
entryCount
)
;
offsets
=
MemoryUtils
.
newInts
(
entryCount
)
;
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
childPageIds
[
i
]
=
data
.
readInt
();
offsets
[
i
]
=
data
.
readInt
();
...
...
@@ -164,7 +167,7 @@ class PageBtreeNode extends PageBtree {
PageBtree
split
(
int
splitPoint
)
throws
SQLException
{
int
newPageId
=
index
.
getPageStore
().
allocatePage
();
PageBtreeNode
p2
=
new
PageBtreeNode
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
Page
());
PageBtreeNode
p2
=
new
PageBtreeNode
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
());
if
(
onlyPosition
)
{
// TODO optimize: maybe not required
p2
.
onlyPosition
=
true
;
...
...
@@ -204,7 +207,7 @@ class PageBtreeNode extends PageBtree {
entryCount
=
0
;
childPageIds
=
new
int
[]
{
page1
.
getPageId
()
};
rows
=
new
SearchRow
[
0
];
offsets
=
new
int
[
0
]
;
offsets
=
MemoryUtils
.
EMPTY_INTS
;
addChild
(
0
,
page2
.
getPageId
(),
pivot
);
check
();
}
...
...
@@ -336,8 +339,8 @@ class PageBtreeNode extends PageBtree {
if
(
entryCount
<
0
)
{
Message
.
throwInternalError
();
}
SearchRow
[]
newRows
=
new
SearchRow
[
entryCount
]
;
int
[]
newOffsets
=
new
int
[
entryCount
]
;
SearchRow
[]
newRows
=
PageStore
.
newSearchRows
(
entryCount
)
;
int
[]
newOffsets
=
MemoryUtils
.
newInts
(
entryCount
)
;
int
[]
newChildPageIds
=
new
int
[
entryCount
+
1
];
System
.
arraycopy
(
offsets
,
0
,
newOffsets
,
0
,
Math
.
min
(
entryCount
,
i
));
System
.
arraycopy
(
rows
,
0
,
newRows
,
0
,
Math
.
min
(
entryCount
,
i
));
...
...
h2/src/main/org/h2/index/PageData.java
浏览文件 @
f8a5383e
...
...
@@ -7,10 +7,9 @@
package
org
.
h2
.
index
;
import
java.sql.SQLException
;
import
org.h2.engine.Session
;
import
org.h2.result.Row
;
import
org.h2.store.Data
Page
;
import
org.h2.store.Data
;
import
org.h2.store.Record
;
/**
...
...
@@ -36,7 +35,7 @@ abstract class PageData extends Record {
/**
* The data page.
*/
protected
final
Data
Page
data
;
protected
final
Data
data
;
/**
* The number of entries.
...
...
@@ -53,7 +52,7 @@ abstract class PageData extends Record {
*/
protected
boolean
written
;
PageData
(
PageScanIndex
index
,
int
pageId
,
int
parentPageId
,
Data
Page
data
)
{
PageData
(
PageScanIndex
index
,
int
pageId
,
int
parentPageId
,
Data
data
)
{
this
.
index
=
index
;
this
.
parentPageId
=
parentPageId
;
this
.
data
=
data
;
...
...
@@ -204,7 +203,8 @@ abstract class PageData extends Record {
* @return number of double words (4 bytes)
*/
public
int
getMemorySize
()
{
return
index
.
getPageStore
().
getPageSize
()
>>
2
;
// double the byte array size
return
index
.
getPageStore
().
getPageSize
()
>>
1
;
}
int
getParentPageId
()
{
...
...
h2/src/main/org/h2/index/PageDataLeaf.java
浏览文件 @
f8a5383e
...
...
@@ -6,11 +6,13 @@
*/
package
org
.
h2
.
index
;
import
java.lang.ref.SoftReference
;
import
java.sql.SQLException
;
import
org.h2.constant.ErrorCode
;
import
org.h2.engine.Session
;
import
org.h2.message.Message
;
import
org.h2.result.Row
;
import
org.h2.store.Data
;
import
org.h2.store.DataPage
;
import
org.h2.store.PageStore
;
...
...
@@ -41,6 +43,11 @@ class PageDataLeaf extends PageData {
*/
Row
[]
rows
;
/**
* For pages with overflow: the soft reference to the row
*/
SoftReference
<
Row
>
rowRef
;
/**
* The page id of the first overflow page (0 if no overflow).
*/
...
...
@@ -51,7 +58,12 @@ class PageDataLeaf extends PageData {
*/
int
start
;
PageDataLeaf
(
PageScanIndex
index
,
int
pageId
,
int
parentPageId
,
DataPage
data
)
{
/**
* The size of the row in bytes for large rows.
*/
private
int
overflowRowSize
;
PageDataLeaf
(
PageScanIndex
index
,
int
pageId
,
int
parentPageId
,
Data
data
)
{
super
(
index
,
pageId
,
parentPageId
,
data
);
start
=
KEY_OFFSET_PAIR_START
;
}
...
...
@@ -139,10 +151,13 @@ class PageDataLeaf extends PageData {
int
previous
=
getPos
();
int
dataOffset
=
pageSize
;
int
page
=
index
.
getPageStore
().
allocatePage
();
do
{
if
(
firstOverflowPageId
==
0
)
{
firstOverflowPageId
=
page
;
}
this
.
overflowRowSize
=
pageSize
+
rowLength
;
write
();
// free up the space used by the row
rowRef
=
new
SoftReference
<
Row
>(
rows
[
0
]);
rows
[
0
]
=
null
;
do
{
int
type
,
size
,
next
;
if
(
remaining
<=
pageSize
-
PageDataLeafOverflow
.
START_LAST
)
{
type
=
Page
.
TYPE_DATA_OVERFLOW
|
Page
.
FLAG_LAST
;
...
...
@@ -153,13 +168,14 @@ class PageDataLeaf extends PageData {
size
=
pageSize
-
PageDataLeafOverflow
.
START_MORE
;
next
=
index
.
getPageStore
().
allocatePage
();
}
PageDataLeafOverflow
overflow
=
new
PageDataLeafOverflow
(
this
,
page
,
type
,
previous
,
next
,
dataOffset
,
size
);
PageDataLeafOverflow
overflow
=
new
PageDataLeafOverflow
(
this
,
page
,
type
,
previous
,
next
,
data
,
data
Offset
,
size
);
index
.
getPageStore
().
updateRecord
(
overflow
,
true
,
null
);
dataOffset
+=
size
;
remaining
-=
size
;
previous
=
page
;
page
=
next
;
}
while
(
remaining
>
0
);
data
.
truncate
(
index
.
getPageStore
().
getPageSize
());
}
return
0
;
}
...
...
@@ -204,22 +220,32 @@ class PageDataLeaf extends PageData {
Row
r
=
rows
[
at
];
if
(
r
==
null
)
{
if
(
firstOverflowPageId
!=
0
)
{
if
(
rowRef
!=
null
)
{
r
=
rowRef
.
get
();
if
(
r
!=
null
)
{
return
r
;
}
}
PageStore
store
=
index
.
getPageStore
();
int
pageSize
=
store
.
getPageSize
();
data
.
setPos
(
pageSize
);
int
next
=
firstOverflowPageId
;
int
offset
=
pageSize
;
data
.
setPos
(
pageSize
);
do
{
PageDataLeafOverflow
page
=
index
.
getPageOverflow
(
next
,
this
,
offset
);
next
=
page
.
readInto
(
data
);
}
while
(
next
!=
0
);
overflowRowSize
=
data
.
length
();
}
data
.
setPos
(
offsets
[
at
]);
r
=
index
.
readRow
(
data
);
r
.
setPos
(
keys
[
at
]);
if
(
firstOverflowPageId
!=
0
)
{
rowRef
=
new
SoftReference
<
Row
>(
r
);
}
else
{
rows
[
at
]
=
r
;
}
}
return
r
;
}
...
...
@@ -229,7 +255,7 @@ class PageDataLeaf extends PageData {
PageData
split
(
int
splitPoint
)
throws
SQLException
{
int
newPageId
=
index
.
getPageStore
().
allocatePage
();
PageDataLeaf
p2
=
new
PageDataLeaf
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
Page
());
PageDataLeaf
p2
=
new
PageDataLeaf
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
());
for
(
int
i
=
splitPoint
;
i
<
entryCount
;)
{
p2
.
addRowTry
(
getRowAt
(
splitPoint
));
removeRow
(
splitPoint
);
...
...
@@ -311,6 +337,7 @@ class PageDataLeaf extends PageData {
public
void
write
(
DataPage
buff
)
throws
SQLException
{
write
();
index
.
getPageStore
().
writePage
(
getPos
(),
data
);
data
.
truncate
(
index
.
getPageStore
().
getPageSize
());
}
PageStore
getPageStore
()
{
...
...
@@ -329,6 +356,7 @@ class PageDataLeaf extends PageData {
}
readAllRows
();
data
.
reset
();
data
.
checkCapacity
(
overflowRowSize
);
data
.
writeInt
(
parentPageId
);
int
type
;
if
(
firstOverflowPageId
==
0
)
{
...
...
@@ -348,16 +376,11 @@ class PageDataLeaf extends PageData {
}
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
data
.
setPos
(
offsets
[
i
]);
rows
[
i
]
.
write
(
data
);
getRowAt
(
i
)
.
write
(
data
);
}
written
=
true
;
}
DataPage
getDataPage
()
throws
SQLException
{
write
();
return
data
;
}
public
String
toString
()
{
return
"page["
+
getPos
()
+
"] data leaf table:"
+
index
.
getId
()
+
" entries:"
+
entryCount
;
}
...
...
h2/src/main/org/h2/index/PageDataLeafOverflow.java
浏览文件 @
f8a5383e
...
...
@@ -9,8 +9,8 @@ package org.h2.index;
import
java.sql.SQLException
;
import
org.h2.constant.ErrorCode
;
import
org.h2.message.Message
;
import
org.h2.store.Data
;
import
org.h2.store.DataPage
;
import
org.h2.store.PageStore
;
import
org.h2.store.Record
;
/**
...
...
@@ -35,7 +35,10 @@ public class PageDataLeafOverflow extends Record {
*/
static
final
int
START_MORE
=
9
;
private
final
PageDataLeaf
leaf
;
/**
* The index.
*/
private
final
PageScanIndex
index
;
/**
* The page type.
...
...
@@ -57,22 +60,24 @@ public class PageDataLeafOverflow extends Record {
*/
private
int
size
;
/**
* The first content byte starts at the given position
* in the leaf page when the page size is unlimited.
*/
private
final
int
offset
;
private
DataPage
data
;
private
Data
data
;
PageDataLeafOverflow
(
PageDataLeaf
leaf
,
int
pageId
,
int
type
,
int
previous
,
int
next
,
int
offset
,
int
size
)
{
this
.
leaf
=
leaf
;
PageDataLeafOverflow
(
PageDataLeaf
leaf
,
int
pageId
,
int
type
,
int
previous
,
int
next
,
Data
allData
,
int
offset
,
int
size
)
{
this
.
index
=
leaf
.
index
;
setPos
(
pageId
);
this
.
type
=
type
;
this
.
parentPage
=
previous
;
this
.
nextPage
=
next
;
this
.
offset
=
offset
;
this
.
size
=
size
;
data
=
index
.
getPageStore
().
createData
();
data
.
writeInt
(
parentPage
);
data
.
writeByte
((
byte
)
type
);
if
(
type
==
Page
.
TYPE_DATA_OVERFLOW
)
{
data
.
writeInt
(
nextPage
);
}
else
{
data
.
writeShortInt
(
size
);
}
data
.
write
(
allData
.
getBytes
(),
offset
,
size
);
}
/**
...
...
@@ -80,14 +85,13 @@ public class PageDataLeafOverflow extends Record {
*
* @param leaf the leaf page
* @param pageId the page id
* @param data
the data pag
e
* @param data
All the data page with the complete valu
e
* @param offset the offset
*/
public
PageDataLeafOverflow
(
PageDataLeaf
leaf
,
int
pageId
,
Data
Page
data
,
int
offset
)
{
this
.
leaf
=
leaf
;
public
PageDataLeafOverflow
(
PageDataLeaf
leaf
,
int
pageId
,
Data
data
,
int
offset
)
{
this
.
index
=
leaf
.
index
;
setPos
(
pageId
);
this
.
data
=
data
;
this
.
offset
=
offset
;
}
/**
...
...
@@ -100,7 +104,7 @@ public class PageDataLeafOverflow extends Record {
size
=
data
.
readShortInt
();
nextPage
=
0
;
}
else
if
(
type
==
Page
.
TYPE_DATA_OVERFLOW
)
{
size
=
leaf
.
getPageStore
().
getPageSize
()
-
START_MORE
;
size
=
index
.
getPageStore
().
getPageSize
()
-
START_MORE
;
nextPage
=
data
.
readInt
();
}
else
{
throw
Message
.
getSQLException
(
ErrorCode
.
FILE_CORRUPTED_1
,
"page:"
+
getPos
()
+
" type:"
+
type
);
...
...
@@ -113,7 +117,8 @@ public class PageDataLeafOverflow extends Record {
* @param target the target data page
* @return the next page, or 0 if no next page
*/
int
readInto
(
DataPage
target
)
{
int
readInto
(
Data
target
)
{
target
.
checkCapacity
(
size
);
if
(
type
==
(
Page
.
TYPE_DATA_OVERFLOW
|
Page
.
FLAG_LAST
))
{
target
.
write
(
data
.
getBytes
(),
START_LAST
,
size
);
return
0
;
...
...
@@ -127,22 +132,11 @@ public class PageDataLeafOverflow extends Record {
}
public
int
getByteCount
(
DataPage
dummy
)
{
return
leaf
.
getByteCount
(
dummy
);
return
index
.
getPageStore
().
getPageSize
(
);
}
public
void
write
(
DataPage
buff
)
throws
SQLException
{
PageStore
store
=
leaf
.
getPageStore
();
DataPage
overflow
=
store
.
createDataPage
();
DataPage
data
=
leaf
.
getDataPage
();
overflow
.
writeInt
(
parentPage
);
overflow
.
writeByte
((
byte
)
type
);
if
(
type
==
Page
.
TYPE_DATA_OVERFLOW
)
{
overflow
.
writeInt
(
nextPage
);
}
else
{
overflow
.
writeShortInt
(
size
);
}
overflow
.
write
(
data
.
getBytes
(),
offset
,
size
);
store
.
writePage
(
getPos
(),
overflow
);
index
.
getPageStore
().
writePage
(
getPos
(),
data
);
}
public
String
toString
()
{
...
...
@@ -155,7 +149,8 @@ public class PageDataLeafOverflow extends Record {
* @return number of double words (4 bytes)
*/
public
int
getMemorySize
()
{
return
leaf
.
getMemorySize
();
// double the byte array size
return
index
.
getPageStore
().
getPageSize
()
>>
1
;
}
int
getParent
()
{
...
...
h2/src/main/org/h2/index/PageDataNode.java
浏览文件 @
f8a5383e
...
...
@@ -11,7 +11,9 @@ import java.sql.SQLException;
import
org.h2.engine.Session
;
import
org.h2.message.Message
;
import
org.h2.result.Row
;
import
org.h2.store.Data
;
import
org.h2.store.DataPage
;
import
org.h2.util.MemoryUtils
;
/**
* A leaf page that contains data of one or multiple rows.
...
...
@@ -39,7 +41,7 @@ class PageDataNode extends PageData {
private
int
rowCount
=
UNKNOWN_ROWCOUNT
;
PageDataNode
(
PageScanIndex
index
,
int
pageId
,
int
parentPageId
,
Data
Page
data
)
{
PageDataNode
(
PageScanIndex
index
,
int
pageId
,
int
parentPageId
,
Data
data
)
{
super
(
index
,
pageId
,
parentPageId
,
data
);
}
...
...
@@ -49,7 +51,7 @@ class PageDataNode extends PageData {
rowCount
=
rowCountStored
=
data
.
readInt
();
childPageIds
=
new
int
[
entryCount
+
1
];
childPageIds
[
entryCount
]
=
data
.
readInt
();
keys
=
new
int
[
entryCount
]
;
keys
=
MemoryUtils
.
newInts
(
entryCount
)
;
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
childPageIds
[
i
]
=
data
.
readInt
();
keys
[
i
]
=
data
.
readInt
();
...
...
@@ -117,7 +119,7 @@ class PageDataNode extends PageData {
PageData
split
(
int
splitPoint
)
throws
SQLException
{
int
newPageId
=
index
.
getPageStore
().
allocatePage
();
PageDataNode
p2
=
new
PageDataNode
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
Page
());
PageDataNode
p2
=
new
PageDataNode
(
index
,
newPageId
,
parentPageId
,
index
.
getPageStore
().
createData
());
int
firstChild
=
childPageIds
[
splitPoint
];
for
(
int
i
=
splitPoint
;
i
<
entryCount
;)
{
p2
.
addChild
(
p2
.
entryCount
,
childPageIds
[
splitPoint
+
1
],
keys
[
splitPoint
]);
...
...
@@ -273,7 +275,7 @@ class PageDataNode extends PageData {
if
(
entryCount
<
0
)
{
Message
.
throwInternalError
();
}
int
[]
newKeys
=
new
int
[
entryCount
]
;
int
[]
newKeys
=
MemoryUtils
.
newInts
(
entryCount
)
;
int
[]
newChildPageIds
=
new
int
[
entryCount
+
1
];
System
.
arraycopy
(
keys
,
0
,
newKeys
,
0
,
Math
.
min
(
entryCount
,
i
));
System
.
arraycopy
(
childPageIds
,
0
,
newChildPageIds
,
0
,
i
);
...
...
h2/src/main/org/h2/index/PageScanIndex.java
浏览文件 @
f8a5383e
...
...
@@ -12,7 +12,6 @@ import java.util.HashMap;
import
java.util.HashSet
;
import
java.util.Iterator
;
import
java.util.List
;
import
org.h2.constant.ErrorCode
;
import
org.h2.constant.SysProperties
;
import
org.h2.engine.Constants
;
...
...
@@ -21,7 +20,7 @@ import org.h2.log.UndoLogRecord;
import
org.h2.message.Message
;
import
org.h2.result.Row
;
import
org.h2.result.SearchRow
;
import
org.h2.store.Data
Page
;
import
org.h2.store.Data
;
import
org.h2.store.PageStore
;
import
org.h2.store.Record
;
import
org.h2.table.Column
;
...
...
@@ -67,7 +66,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
// it should not for new tables, otherwise redo of other operations
// must ensure this page is not used for other things
store
.
addMeta
(
this
,
session
,
headPos
);
PageDataLeaf
root
=
new
PageDataLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
Page
());
PageDataLeaf
root
=
new
PageDataLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
());
store
.
updateRecord
(
root
,
true
,
root
.
data
);
}
else
{
this
.
headPos
=
headPos
;
...
...
@@ -129,7 +128,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
page1
.
setPageId
(
id
);
page1
.
setParentPageId
(
headPos
);
page2
.
setParentPageId
(
headPos
);
PageDataNode
newRoot
=
new
PageDataNode
(
this
,
rootPageId
,
Page
.
ROOT
,
store
.
createData
Page
());
PageDataNode
newRoot
=
new
PageDataNode
(
this
,
rootPageId
,
Page
.
ROOT
,
store
.
createData
());
newRoot
.
init
(
page1
,
pivot
,
page2
);
store
.
updateRecord
(
page1
,
true
,
page1
.
data
);
store
.
updateRecord
(
page2
,
true
,
page2
.
data
);
...
...
@@ -164,7 +163,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
if
(
rec
!=
null
)
{
return
(
PageDataLeafOverflow
)
rec
;
}
Data
Page
data
=
store
.
readPage
(
id
);
Data
data
=
store
.
readPage
(
id
);
data
.
reset
();
PageDataLeafOverflow
result
=
new
PageDataLeafOverflow
(
leaf
,
id
,
data
,
offset
);
result
.
read
();
...
...
@@ -189,7 +188,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
}
return
(
PageData
)
rec
;
}
Data
Page
data
=
store
.
readPage
(
id
);
Data
data
=
store
.
readPage
(
id
);
data
.
reset
();
int
parentPageId
=
data
.
readInt
();
int
type
=
data
.
readByte
()
&
255
;
...
...
@@ -302,7 +301,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
private
void
removeAllRows
()
throws
SQLException
{
PageData
root
=
getPage
(
headPos
,
0
);
root
.
freeChildren
();
root
=
new
PageDataLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
Page
());
root
=
new
PageDataLeaf
(
this
,
headPos
,
Page
.
ROOT
,
store
.
createData
());
store
.
removeRecord
(
headPos
);
store
.
updateRecord
(
root
,
true
,
null
);
rowCount
=
0
;
...
...
@@ -328,7 +327,7 @@ public class PageScanIndex extends BaseIndex implements RowIndex {
* @param data the data page
* @return the row
*/
Row
readRow
(
Data
Page
data
)
throws
SQLException
{
Row
readRow
(
Data
data
)
throws
SQLException
{
return
tableData
.
readRow
(
data
);
}
...
...
h2/src/main/org/h2/store/Data.java
0 → 100644
浏览文件 @
f8a5383e
/*
* Copyright 2004-2009 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package
org
.
h2
.
store
;
import
java.math.BigDecimal
;
import
java.sql.Date
;
import
java.sql.SQLException
;
import
java.sql.Time
;
import
java.sql.Timestamp
;
import
org.h2.constant.SysProperties
;
import
org.h2.message.Message
;
import
org.h2.util.MemoryUtils
;
import
org.h2.value.Value
;
import
org.h2.value.ValueArray
;
import
org.h2.value.ValueBoolean
;
import
org.h2.value.ValueByte
;
import
org.h2.value.ValueBytes
;
import
org.h2.value.ValueDate
;
import
org.h2.value.ValueDecimal
;
import
org.h2.value.ValueDouble
;
import
org.h2.value.ValueFloat
;
import
org.h2.value.ValueInt
;
import
org.h2.value.ValueJavaObject
;
import
org.h2.value.ValueLob
;
import
org.h2.value.ValueLong
;
import
org.h2.value.ValueNull
;
import
org.h2.value.ValueShort
;
import
org.h2.value.ValueString
;
import
org.h2.value.ValueStringFixed
;
import
org.h2.value.ValueStringIgnoreCase
;
import
org.h2.value.ValueTime
;
import
org.h2.value.ValueTimestamp
;
import
org.h2.value.ValueUuid
;
/**
* A data page is a byte buffer that contains persistent data of a page.
*/
public
class
Data
extends
DataPage
{
/**
* The space required for the checksum and additional fillers.
*/
public
static
final
int
LENGTH_FILLER
=
2
;
/**
* The length of an integer value.
*/
public
static
final
int
LENGTH_INT
=
4
;
/**
* The length of a long value.
*/
public
static
final
int
LENGTH_LONG
=
8
;
private
Data
(
DataHandler
handler
,
byte
[]
data
)
{
super
(
handler
,
data
);
}
/**
* Update an integer at the given position.
* The current position is not change.
*
* @param pos the position
* @param x the value
*/
public
void
setInt
(
int
pos
,
int
x
)
{
byte
[]
buff
=
data
;
buff
[
pos
]
=
(
byte
)
(
x
>>
24
);
buff
[
pos
+
1
]
=
(
byte
)
(
x
>>
16
);
buff
[
pos
+
2
]
=
(
byte
)
(
x
>>
8
);
buff
[
pos
+
3
]
=
(
byte
)
x
;
}
/**
* Write an integer at the current position.
* The current position is incremented.
*
* @param x the value
*/
public
void
writeInt
(
int
x
)
{
byte
[]
buff
=
data
;
buff
[
pos
++]
=
(
byte
)
(
x
>>
24
);
buff
[
pos
++]
=
(
byte
)
(
x
>>
16
);
buff
[
pos
++]
=
(
byte
)
(
x
>>
8
);
buff
[
pos
++]
=
(
byte
)
x
;
}
/**
* Read an integer at the current position.
* The current position is incremented.
*
* @return the value
*/
public
int
readInt
()
{
byte
[]
buff
=
data
;
return
(
buff
[
pos
++]
<<
24
)
+
((
buff
[
pos
++]
&
0xff
)
<<
16
)
+
((
buff
[
pos
++]
&
0xff
)
<<
8
)
+
(
buff
[
pos
++]
&
0xff
);
}
/**
* Get the length of a String value.
*
* @param s the value
* @return the length
*/
public
int
getStringLen
(
String
s
)
{
return
getStringLenUTF8
(
s
);
}
/**
* Read a String value.
* The current position is incremented.
*
* @return the value
*/
public
String
readString
()
{
byte
[]
buff
=
data
;
int
p
=
pos
;
int
len
=
((
buff
[
p
++]
&
0xff
)
<<
24
)
+
((
buff
[
p
++]
&
0xff
)
<<
16
)
+
((
buff
[
p
++]
&
0xff
)
<<
8
)
+
(
buff
[
p
++]
&
0xff
);
char
[]
chars
=
new
char
[
len
];
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
int
x
=
buff
[
p
++]
&
0xff
;
if
(
x
<
0x80
)
{
chars
[
i
]
=
(
char
)
x
;
}
else
if
(
x
>=
0xe0
)
{
chars
[
i
]
=
(
char
)
(((
x
&
0xf
)
<<
12
)
+
((
buff
[
p
++]
&
0x3f
)
<<
6
)
+
(
buff
[
p
++]
&
0x3f
));
}
else
{
chars
[
i
]
=
(
char
)
(((
x
&
0x1f
)
<<
6
)
+
(
buff
[
p
++]
&
0x3f
));
}
}
pos
=
p
;
return
new
String
(
chars
);
}
/**
* Write a String value.
* The current position is incremented.
*
* @param s the value
*/
public
void
writeString
(
String
s
)
{
int
len
=
s
.
length
();
int
p
=
pos
;
byte
[]
buff
=
data
;
buff
[
p
++]
=
(
byte
)
(
len
>>
24
);
buff
[
p
++]
=
(
byte
)
(
len
>>
16
);
buff
[
p
++]
=
(
byte
)
(
len
>>
8
);
buff
[
p
++]
=
(
byte
)
len
;
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
int
c
=
s
.
charAt
(
i
);
if
(
c
>
0
&&
c
<
0x80
)
{
buff
[
p
++]
=
(
byte
)
c
;
}
else
if
(
c
>=
0x800
)
{
buff
[
p
++]
=
(
byte
)
(
0xe0
|
(
c
>>
12
));
buff
[
p
++]
=
(
byte
)
(
0x80
|
((
c
>>
6
)
&
0x3f
));
buff
[
p
++]
=
(
byte
)
(
0x80
|
(
c
&
0x3f
));
}
else
{
buff
[
p
++]
=
(
byte
)
(
0xc0
|
(
c
>>
6
));
buff
[
p
++]
=
(
byte
)
(
0x80
|
(
c
&
0x3f
));
}
}
pos
=
p
;
}
/**
* Increase the size to the given length.
* The current position is set to the given value.
*
* @param len the new length
*/
public
void
fill
(
int
len
)
{
if
(
pos
>
len
)
{
pos
=
len
;
}
pos
=
len
;
}
/**
* Create a new data page for the given handler. The
* handler will decide what type of buffer is created.
*
* @param handler the data handler
* @param capacity the initial capacity of the buffer
* @return the data page
*/
public
static
Data
create
(
DataHandler
handler
,
int
capacity
)
{
return
new
Data
(
handler
,
new
byte
[
capacity
]);
}
/**
* Create a new data page using the given data for the given handler. The
* handler will decide what type of buffer is created.
*
* @param handler the data handler
* @param buff the data
* @return the data page
*/
public
static
Data
create
(
DataHandler
handler
,
byte
[]
buff
)
{
return
new
Data
(
handler
,
buff
);
}
/**
* Get the current write position of this data page, which is the current
* length.
*
* @return the length
*/
public
int
length
()
{
return
pos
;
}
/**
* Get the byte array used for this page.
*
* @return the byte array
*/
public
byte
[]
getBytes
()
{
return
data
;
}
/**
* Set the position to 0.
*/
public
void
reset
()
{
pos
=
0
;
}
/**
* Append the contents of the given data page to this page.
* The filler is not appended.
*
* @param page the page that will be appended
*/
public
void
writeDataPageNoSize
(
Data
page
)
{
// don't write filler
int
len
=
page
.
pos
-
LENGTH_FILLER
;
System
.
arraycopy
(
page
.
data
,
0
,
data
,
pos
,
len
);
pos
+=
len
;
}
/**
* Append a number of bytes to this data page.
*
* @param buff the data
* @param off the offset in the data
* @param len the length in bytes
*/
public
void
write
(
byte
[]
buff
,
int
off
,
int
len
)
{
System
.
arraycopy
(
buff
,
off
,
data
,
pos
,
len
);
pos
+=
len
;
}
/**
* Copy a number of bytes to the given buffer from the current position. The
* current position is incremented accordingly.
*
* @param buff the output buffer
* @param off the offset in the output buffer
* @param len the number of bytes to copy
*/
public
void
read
(
byte
[]
buff
,
int
off
,
int
len
)
{
System
.
arraycopy
(
data
,
pos
,
buff
,
off
,
len
);
pos
+=
len
;
}
/**
* Append one single byte.
*
* @param x the value
*/
public
void
writeByte
(
byte
x
)
{
data
[
pos
++]
=
x
;
}
/**
* Read one single byte.
*
* @return the value
*/
public
int
readByte
()
{
return
data
[
pos
++];
}
/**
* Read a long value. This method reads two int values and combines them.
*
* @return the long value
*/
public
long
readLong
()
{
return
((
long
)
(
readInt
())
<<
32
)
+
(
readInt
()
&
0xffffffff
L
);
}
/**
* Append a long value. This method writes two int values.
*
* @param x the value
*/
public
void
writeLong
(
long
x
)
{
writeInt
((
int
)
(
x
>>>
32
));
writeInt
((
int
)
x
);
}
/**
* Append a value.
*
* @param v the value
*/
public
void
writeValue
(
Value
v
)
throws
SQLException
{
// TODO text output: could be in the Value... classes
if
(
v
==
ValueNull
.
INSTANCE
)
{
data
[
pos
++]
=
'-'
;
return
;
}
int
start
=
pos
;
data
[
pos
++]
=
(
byte
)
(
v
.
getType
()
+
'a'
);
switch
(
v
.
getType
())
{
case
Value
.
BOOLEAN
:
case
Value
.
BYTE
:
case
Value
.
SHORT
:
case
Value
.
INT
:
writeInt
(
v
.
getInt
());
break
;
case
Value
.
LONG
:
writeLong
(
v
.
getLong
());
break
;
case
Value
.
DECIMAL
:
String
s
=
v
.
getString
();
writeString
(
s
);
break
;
case
Value
.
TIME
:
writeLong
(
v
.
getTimeNoCopy
().
getTime
());
break
;
case
Value
.
DATE
:
writeLong
(
v
.
getDateNoCopy
().
getTime
());
break
;
case
Value
.
TIMESTAMP
:
{
Timestamp
ts
=
v
.
getTimestampNoCopy
();
writeLong
(
ts
.
getTime
());
writeInt
(
ts
.
getNanos
());
break
;
}
case
Value
.
JAVA_OBJECT
:
case
Value
.
BYTES
:
{
byte
[]
b
=
v
.
getBytesNoCopy
();
writeInt
(
b
.
length
);
write
(
b
,
0
,
b
.
length
);
break
;
}
case
Value
.
UUID
:
{
ValueUuid
uuid
=
(
ValueUuid
)
v
;
writeLong
(
uuid
.
getHigh
());
writeLong
(
uuid
.
getLow
());
break
;
}
case
Value
.
STRING
:
case
Value
.
STRING_IGNORECASE
:
case
Value
.
STRING_FIXED
:
writeString
(
v
.
getString
());
break
;
case
Value
.
DOUBLE
:
writeLong
(
Double
.
doubleToLongBits
(
v
.
getDouble
()));
break
;
case
Value
.
FLOAT
:
writeInt
(
Float
.
floatToIntBits
(
v
.
getFloat
()));
break
;
case
Value
.
BLOB
:
case
Value
.
CLOB
:
{
ValueLob
lob
=
(
ValueLob
)
v
;
lob
.
convertToFileIfRequired
(
handler
);
byte
[]
small
=
lob
.
getSmall
();
if
(
small
==
null
)
{
// -2 for historical reasons (-1 didn't store precision)
int
type
=
-
2
;
if
(!
lob
.
isLinked
())
{
type
=
-
3
;
}
writeInt
(
type
);
writeInt
(
lob
.
getTableId
());
writeInt
(
lob
.
getObjectId
());
writeLong
(
lob
.
getPrecision
());
writeByte
((
byte
)
(
lob
.
useCompression
()
?
1
:
0
));
if
(
type
==
-
3
)
{
writeString
(
lob
.
getFileName
());
}
}
else
{
writeInt
(
small
.
length
);
write
(
small
,
0
,
small
.
length
);
}
break
;
}
case
Value
.
ARRAY
:
{
Value
[]
list
=
((
ValueArray
)
v
).
getList
();
writeInt
(
list
.
length
);
for
(
Value
x
:
list
)
{
writeValue
(
x
);
}
break
;
}
default
:
Message
.
throwInternalError
(
"type="
+
v
.
getType
());
}
if
(
SysProperties
.
CHECK2
)
{
if
(
pos
-
start
!=
getValueLen
(
v
))
{
throw
Message
.
throwInternalError
(
"value size error: got "
+
(
pos
-
start
)
+
" expected "
+
getValueLen
(
v
));
}
}
}
/**
* Calculate the number of bytes required to encode the given value.
*
* @param v the value
* @return the number of bytes required to store this value
*/
public
int
getValueLen
(
Value
v
)
throws
SQLException
{
if
(
v
==
ValueNull
.
INSTANCE
)
{
return
1
;
}
switch
(
v
.
getType
())
{
case
Value
.
BOOLEAN
:
case
Value
.
BYTE
:
case
Value
.
SHORT
:
case
Value
.
INT
:
return
1
+
LENGTH_INT
;
case
Value
.
LONG
:
return
1
+
LENGTH_LONG
;
case
Value
.
DOUBLE
:
return
1
+
LENGTH_LONG
;
case
Value
.
FLOAT
:
return
1
+
LENGTH_INT
;
case
Value
.
STRING
:
case
Value
.
STRING_IGNORECASE
:
case
Value
.
STRING_FIXED
:
return
1
+
getStringLen
(
v
.
getString
());
case
Value
.
DECIMAL
:
return
1
+
getStringLen
(
v
.
getString
());
case
Value
.
JAVA_OBJECT
:
case
Value
.
BYTES
:
{
int
len
=
v
.
getBytesNoCopy
().
length
;
return
1
+
LENGTH_INT
+
len
;
}
case
Value
.
UUID
:
return
1
+
LENGTH_LONG
+
LENGTH_LONG
;
case
Value
.
TIME
:
return
1
+
LENGTH_LONG
;
case
Value
.
DATE
:
return
1
+
LENGTH_LONG
;
case
Value
.
TIMESTAMP
:
return
1
+
LENGTH_LONG
+
LENGTH_INT
;
case
Value
.
BLOB
:
case
Value
.
CLOB
:
{
int
len
=
1
;
ValueLob
lob
=
(
ValueLob
)
v
;
lob
.
convertToFileIfRequired
(
handler
);
byte
[]
small
=
lob
.
getSmall
();
if
(
small
!=
null
)
{
len
+=
LENGTH_INT
+
small
.
length
;
}
else
{
len
+=
LENGTH_INT
+
LENGTH_INT
+
LENGTH_INT
+
LENGTH_LONG
+
1
;
if
(!
lob
.
isLinked
())
{
len
+=
getStringLen
(
lob
.
getFileName
());
}
}
return
len
;
}
case
Value
.
ARRAY
:
{
Value
[]
list
=
((
ValueArray
)
v
).
getList
();
int
len
=
1
+
LENGTH_INT
;
for
(
Value
x
:
list
)
{
len
+=
getValueLen
(
x
);
}
return
len
;
}
default
:
throw
Message
.
throwInternalError
(
"type="
+
v
.
getType
());
}
}
/**
* Read a value.
*
* @return the value
*/
public
Value
readValue
()
throws
SQLException
{
int
dataType
=
data
[
pos
++];
if
(
dataType
==
'-'
)
{
return
ValueNull
.
INSTANCE
;
}
dataType
=
dataType
-
'a'
;
switch
(
dataType
)
{
case
Value
.
BOOLEAN
:
return
ValueBoolean
.
get
(
readInt
()
==
1
);
case
Value
.
BYTE
:
return
ValueByte
.
get
((
byte
)
readInt
());
case
Value
.
SHORT
:
return
ValueShort
.
get
((
short
)
readInt
());
case
Value
.
INT
:
return
ValueInt
.
get
(
readInt
());
case
Value
.
LONG
:
return
ValueLong
.
get
(
readLong
());
case
Value
.
DECIMAL
:
return
ValueDecimal
.
get
(
new
BigDecimal
(
readString
()));
case
Value
.
DATE
:
return
ValueDate
.
getNoCopy
(
new
Date
(
readLong
()));
case
Value
.
TIME
:
// need to normalize the year, month and day
return
ValueTime
.
get
(
new
Time
(
readLong
()));
case
Value
.
TIMESTAMP
:
{
Timestamp
ts
=
new
Timestamp
(
readLong
());
ts
.
setNanos
(
readInt
());
return
ValueTimestamp
.
getNoCopy
(
ts
);
}
case
Value
.
JAVA_OBJECT
:
{
int
len
=
readInt
();
byte
[]
b
=
MemoryUtils
.
newBytes
(
len
);
read
(
b
,
0
,
len
);
return
ValueJavaObject
.
getNoCopy
(
b
);
}
case
Value
.
BYTES
:
{
int
len
=
readInt
();
byte
[]
b
=
MemoryUtils
.
newBytes
(
len
);
read
(
b
,
0
,
len
);
return
ValueBytes
.
getNoCopy
(
b
);
}
case
Value
.
UUID
:
return
ValueUuid
.
get
(
readLong
(),
readLong
());
case
Value
.
STRING
:
return
ValueString
.
get
(
readString
());
case
Value
.
STRING_IGNORECASE
:
return
ValueStringIgnoreCase
.
get
(
readString
());
case
Value
.
STRING_FIXED
:
return
ValueStringFixed
.
get
(
readString
());
case
Value
.
DOUBLE
:
return
ValueDouble
.
get
(
Double
.
longBitsToDouble
(
readLong
()));
case
Value
.
FLOAT
:
return
ValueFloat
.
get
(
Float
.
intBitsToFloat
(
readInt
()));
case
Value
.
BLOB
:
case
Value
.
CLOB
:
{
int
smallLen
=
readInt
();
if
(
smallLen
>=
0
)
{
byte
[]
small
=
MemoryUtils
.
newBytes
(
smallLen
);
read
(
small
,
0
,
smallLen
);
return
ValueLob
.
createSmallLob
(
dataType
,
small
);
}
int
tableId
=
readInt
();
int
objectId
=
readInt
();
long
precision
=
0
;
boolean
compression
=
false
;
// -1: historical (didn't store precision)
// -2: regular
// -3: regular, but not linked (in this case: including file name)
if
(
smallLen
==
-
2
||
smallLen
==
-
3
)
{
precision
=
readLong
();
compression
=
readByte
()
==
1
;
}
ValueLob
lob
=
ValueLob
.
open
(
dataType
,
handler
,
tableId
,
objectId
,
precision
,
compression
);
if
(
smallLen
==
-
3
)
{
lob
.
setFileName
(
readString
(),
false
);
}
return
lob
;
}
case
Value
.
ARRAY
:
{
int
len
=
readInt
();
Value
[]
list
=
new
Value
[
len
];
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
list
[
i
]
=
readValue
();
}
return
ValueArray
.
get
(
list
);
}
default
:
throw
Message
.
throwInternalError
(
"type="
+
dataType
);
}
}
/**
* Set the current read / write position.
*
* @param pos the new position
*/
public
void
setPos
(
int
pos
)
{
this
.
pos
=
pos
;
}
/**
* Write a short integer at the current position.
* The current position is incremented.
*
* @param x the value
*/
public
void
writeShortInt
(
int
x
)
{
byte
[]
buff
=
data
;
buff
[
pos
++]
=
(
byte
)
(
x
>>
8
);
buff
[
pos
++]
=
(
byte
)
x
;
}
/**
* Read an short integer at the current position.
* The current position is incremented.
*
* @return the value
*/
public
int
readShortInt
()
{
byte
[]
buff
=
data
;
return
((
buff
[
pos
++]
&
0xff
)
<<
8
)
+
(
buff
[
pos
++]
&
0xff
);
}
private
static
int
getStringLenUTF8
(
String
s
)
{
int
plus
=
4
,
len
=
s
.
length
();
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
char
c
=
s
.
charAt
(
i
);
if
(
c
>=
0x800
)
{
plus
+=
2
;
}
else
if
(
c
==
0
||
c
>=
0x80
)
{
plus
++;
}
}
return
len
+
plus
;
}
/**
* Shrink the array to this size.
*
* @param size the new size
*/
public
void
truncate
(
int
size
)
{
if
(
pos
>
size
)
{
byte
[]
buff
=
new
byte
[
size
];
System
.
arraycopy
(
data
,
0
,
buff
,
0
,
size
);
this
.
pos
=
size
;
data
=
buff
;
}
}
}
h2/src/main/org/h2/store/DataPage.java
浏览文件 @
f8a5383e
...
...
@@ -15,8 +15,8 @@ import java.sql.Timestamp;
import
org.h2.constant.SysProperties
;
import
org.h2.engine.Constants
;
import
org.h2.message.Message
;
import
org.h2.util.ByteUtils
;
import
org.h2.util.MathUtils
;
import
org.h2.util.MemoryUtils
;
import
org.h2.value.Value
;
import
org.h2.value.ValueArray
;
import
org.h2.value.ValueBoolean
;
...
...
@@ -68,19 +68,19 @@ public class DataPage {
/**
* The data handler responsible for lob objects.
*/
pr
ivate
DataHandler
handler
;
pr
otected
DataHandler
handler
;
/**
* The data itself.
*/
pr
ivate
byte
[]
data
;
pr
otected
byte
[]
data
;
/**
* The current write or read position.
*/
pr
ivate
int
pos
;
pr
otected
int
pos
;
pr
ivate
DataPage
(
DataHandler
handler
,
byte
[]
data
)
{
pr
otected
DataPage
(
DataHandler
handler
,
byte
[]
data
)
{
this
.
handler
=
handler
;
this
.
data
=
data
;
}
...
...
@@ -266,7 +266,7 @@ public class DataPage {
*/
public
void
checkCapacity
(
int
plus
)
{
if
(
pos
+
plus
>=
data
.
length
)
{
byte
[]
d
=
Byte
Utils
.
newBytes
((
data
.
length
+
plus
)
*
2
);
byte
[]
d
=
Memory
Utils
.
newBytes
((
data
.
length
+
plus
)
*
2
);
// must copy everything, because pos could be 0 and data may be
// still required
System
.
arraycopy
(
data
,
0
,
d
,
0
,
data
.
length
);
...
...
@@ -607,13 +607,13 @@ public class DataPage {
}
case
Value
.
JAVA_OBJECT
:
{
int
len
=
readInt
();
byte
[]
b
=
Byte
Utils
.
newBytes
(
len
);
byte
[]
b
=
Memory
Utils
.
newBytes
(
len
);
read
(
b
,
0
,
len
);
return
ValueJavaObject
.
getNoCopy
(
b
);
}
case
Value
.
BYTES
:
{
int
len
=
readInt
();
byte
[]
b
=
Byte
Utils
.
newBytes
(
len
);
byte
[]
b
=
Memory
Utils
.
newBytes
(
len
);
read
(
b
,
0
,
len
);
return
ValueBytes
.
getNoCopy
(
b
);
}
...
...
@@ -633,7 +633,7 @@ public class DataPage {
case
Value
.
CLOB
:
{
int
smallLen
=
readInt
();
if
(
smallLen
>=
0
)
{
byte
[]
small
=
Byte
Utils
.
newBytes
(
smallLen
);
byte
[]
small
=
Memory
Utils
.
newBytes
(
smallLen
);
read
(
small
,
0
,
smallLen
);
return
ValueLob
.
createSmallLob
(
dataType
,
small
);
}
...
...
h2/src/main/org/h2/store/DiskFile.java
浏览文件 @
f8a5383e
...
...
@@ -27,7 +27,6 @@ import org.h2.log.RedoLogRecord;
import
org.h2.message.Message
;
import
org.h2.message.Trace
;
import
org.h2.util.BitField
;
import
org.h2.util.ByteUtils
;
import
org.h2.util.Cache
;
import
org.h2.util.CacheLRU
;
import
org.h2.util.CacheObject
;
...
...
@@ -35,6 +34,7 @@ import org.h2.util.CacheWriter;
import
org.h2.util.FileUtils
;
import
org.h2.util.IntArray
;
import
org.h2.util.MathUtils
;
import
org.h2.util.MemoryUtils
;
import
org.h2.util.New
;
import
org.h2.util.ObjectArray
;
...
...
@@ -575,7 +575,7 @@ public class DiskFile implements CacheWriter {
Message
.
throwInternalError
(
"0 blocks to read pos="
+
pos
);
}
if
(
blockCount
>
1
)
{
byte
[]
b2
=
Byte
Utils
.
newBytes
(
blockCount
*
BLOCK_SIZE
);
byte
[]
b2
=
Memory
Utils
.
newBytes
(
blockCount
*
BLOCK_SIZE
);
System
.
arraycopy
(
buff
,
0
,
b2
,
0
,
BLOCK_SIZE
);
buff
=
b2
;
file
.
readFully
(
buff
,
BLOCK_SIZE
,
blockCount
*
BLOCK_SIZE
-
BLOCK_SIZE
);
...
...
h2/src/main/org/h2/store/FileStoreInputStream.java
浏览文件 @
f8a5383e
...
...
@@ -14,7 +14,7 @@ import org.h2.constant.SysProperties;
import
org.h2.engine.Constants
;
import
org.h2.message.Message
;
import
org.h2.tools.CompressTool
;
import
org.h2.util.
Byte
Utils
;
import
org.h2.util.
Memory
Utils
;
/**
* An input stream that is backed by a file store.
...
...
@@ -120,7 +120,7 @@ public class FileStoreInputStream extends InputStream {
readInt
();
if
(
compress
!=
null
)
{
int
uncompressed
=
readInt
();
byte
[]
buff
=
Byte
Utils
.
newBytes
(
remainingInBuffer
);
byte
[]
buff
=
Memory
Utils
.
newBytes
(
remainingInBuffer
);
page
.
read
(
buff
,
0
,
remainingInBuffer
);
page
.
reset
();
page
.
checkCapacity
(
uncompressed
);
...
...
h2/src/main/org/h2/store/PageFreeList.java
浏览文件 @
f8a5383e
...
...
@@ -28,7 +28,7 @@ public class PageFreeList extends Record {
private
final
BitField
used
=
new
BitField
();
private
final
int
pageCount
;
private
boolean
full
;
private
Data
Page
data
;
private
Data
data
;
PageFreeList
(
PageStore
store
,
int
pageId
)
{
setPos
(
pageId
);
...
...
@@ -91,7 +91,7 @@ public class PageFreeList extends Record {
* Read the page from the disk.
*/
void
read
()
throws
SQLException
{
data
=
store
.
createData
Page
();
data
=
store
.
createData
();
store
.
readPage
(
getPos
(),
data
);
int
p
=
data
.
readInt
();
int
t
=
data
.
readByte
();
...
...
@@ -113,7 +113,7 @@ public class PageFreeList extends Record {
}
public
void
write
(
DataPage
buff
)
throws
SQLException
{
data
=
store
.
createData
Page
();
data
=
store
.
createData
();
data
.
writeInt
(
0
);
int
type
=
Page
.
TYPE_FREE_LIST
;
data
.
writeByte
((
byte
)
type
);
...
...
h2/src/main/org/h2/store/PageLog.java
浏览文件 @
f8a5383e
...
...
@@ -111,7 +111,7 @@ public class PageLog {
private
DataInputStream
in
;
private
int
firstTrunkPage
;
private
int
firstDataPage
;
private
Data
Page
data
;
private
Data
data
;
private
int
logId
,
logPos
;
private
int
firstLogId
;
private
BitField
undo
=
new
BitField
();
...
...
@@ -120,7 +120,7 @@ public class PageLog {
PageLog
(
PageStore
store
)
{
this
.
store
=
store
;
data
=
store
.
createData
Page
();
data
=
store
.
createData
();
trace
=
store
.
getTrace
();
}
...
...
@@ -196,7 +196,7 @@ public class PageLog {
pageIn
=
new
PageInputStream
(
store
,
firstTrunkPage
,
firstDataPage
);
in
=
new
DataInputStream
(
pageIn
);
int
logId
=
0
;
Data
Page
data
=
store
.
createDataPage
();
Data
data
=
store
.
createData
();
try
{
pos
=
0
;
while
(
true
)
{
...
...
@@ -268,14 +268,14 @@ public class PageLog {
}
}
}
if
(
stage
==
RECOVERY_STAGE_REDO
)
{
sessionStates
=
New
.
hashMap
();
}
}
catch
(
EOFException
e
)
{
trace
.
debug
(
"log recovery stopped: "
+
e
.
toString
());
}
catch
(
IOException
e
)
{
throw
Message
.
convertIOException
(
e
,
"recover"
);
}
if
(
stage
==
RECOVERY_STAGE_REDO
)
{
sessionStates
=
New
.
hashMap
();
}
}
/**
...
...
@@ -304,7 +304,7 @@ public class PageLog {
* @param data a temporary buffer
* @return the row
*/
public
static
Row
readRow
(
DataInputStream
in
,
Data
Page
data
)
throws
IOException
,
SQLException
{
public
static
Row
readRow
(
DataInputStream
in
,
Data
data
)
throws
IOException
,
SQLException
{
int
pos
=
in
.
readInt
();
int
len
=
in
.
readInt
();
data
.
reset
();
...
...
@@ -328,7 +328,7 @@ public class PageLog {
* @param pageId the page id
* @param page the old page data
*/
void
addUndo
(
int
pageId
,
Data
Page
page
)
throws
SQLException
{
void
addUndo
(
int
pageId
,
Data
page
)
throws
SQLException
{
try
{
if
(
undo
.
get
(
pageId
))
{
return
;
...
...
@@ -398,17 +398,17 @@ public class PageLog {
int
pageSize
=
store
.
getPageSize
();
byte
[]
t
=
StringUtils
.
utf8Encode
(
transaction
);
int
len
=
t
.
length
;
if
(
1
+
Data
Page
.
LENGTH_INT
*
2
+
len
>=
PageStreamData
.
getCapacity
(
pageSize
))
{
if
(
1
+
Data
.
LENGTH_INT
*
2
+
len
>=
PageStreamData
.
getCapacity
(
pageSize
))
{
throw
Message
.
getInvalidValueException
(
"transaction name too long"
,
transaction
);
}
pageOut
.
fill
Data
Page
();
pageOut
.
fillPage
();
out
.
write
(
PREPARE_COMMIT
);
out
.
writeInt
(
session
.
getId
());
out
.
writeInt
(
len
);
out
.
write
(
t
);
flushOut
();
// store it on a separate log page
pageOut
.
fill
Data
Page
();
pageOut
.
fillPage
();
if
(
log
.
getFlushOnEachCommit
())
{
flush
();
}
...
...
@@ -461,6 +461,7 @@ public class PageLog {
row
.
setLastLog
(
logId
,
logPos
);
data
.
reset
();
data
.
checkCapacity
(
row
.
getByteCount
(
data
));
row
.
write
(
data
);
out
.
write
(
add
?
ADD
:
REMOVE
);
out
.
writeInt
(
session
.
getId
());
...
...
@@ -497,7 +498,7 @@ public class PageLog {
}
undo
=
new
BitField
();
logId
++;
pageOut
.
fill
Data
Page
();
pageOut
.
fillPage
();
int
currentDataPage
=
pageOut
.
getCurrentDataPageId
();
logIdPageMap
.
put
(
logId
,
currentDataPage
);
}
...
...
@@ -636,4 +637,14 @@ public class PageLog {
d
.
write
(
null
);
}
void
truncate
()
throws
SQLException
{
do
{
// TODO keep trunk page in the cache
PageStreamTrunk
t
=
new
PageStreamTrunk
(
store
,
firstTrunkPage
);
t
.
read
();
firstTrunkPage
=
t
.
getNextTrunk
();
t
.
free
();
}
while
(
firstTrunkPage
!=
0
);
}
}
h2/src/main/org/h2/store/PageOutputStream.java
浏览文件 @
f8a5383e
...
...
@@ -173,7 +173,7 @@ public class PageOutputStream extends OutputStream {
* Fill the data page with zeros and write it.
* This is required for a checkpoint.
*/
void
fill
Data
Page
()
throws
SQLException
{
void
fillPage
()
throws
SQLException
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"pageOut.storePage fill "
+
data
.
getPos
());
}
...
...
h2/src/main/org/h2/store/PageStore.java
浏览文件 @
f8a5383e
...
...
@@ -25,6 +25,7 @@ import org.h2.log.LogSystem;
import
org.h2.message.Message
;
import
org.h2.message.Trace
;
import
org.h2.result.Row
;
import
org.h2.result.SearchRow
;
import
org.h2.schema.Schema
;
import
org.h2.table.Column
;
import
org.h2.table.IndexColumn
;
...
...
@@ -68,31 +69,20 @@ import org.h2.value.ValueString;
*/
public
class
PageStore
implements
CacheWriter
{
// TODO TestTwoPhaseCommit
// TODO TestIndex.wideIndex: btree nodes should be full
// TODO check memory usage
// TODO PageStore.openMetaIndex (desc and nulls first / last)
// TODO PageBtreeIndex.canGetFirstOrLast
// TODO btree index with fixed size values doesn't need offset and so on
// TODO better checksums (for example, multiple fletcher)
// TODO somehow remember rowcount
// TODO implement checksum - 0 for empty
// TODO remove parent, use tableId if required
// TODO replace CRC32
// TODO PageBtreeNode: 4 bytes offset - others use only 2
// TODO PageBtreeLeaf: why table id
// TODO log block allocation
// TODO block compression: maybe http://en.wikipedia.org/wiki/LZJB
// with RLE, specially for 0s.
// TODO test that setPageId updates parent, overflow parent
// TODO order pages so that searching for a key
// doesn't seek backwards in the file
// TODO use an undo log and maybe redo log (for performance)
// TODO checksum: 0 for empty; position hash + every 128th byte,
// specially important for log; misdirected reads or writes
// TODO type, sequence (start with random); checksum (start with block id)
// TODO for lists: write sequence byte
// TODO don't save parent (only root); remove setPageId
// TODO order pages so that searching for a key only seeks forward
// TODO completely re-use keys of deleted rows; maybe
// remember last page with deleted keys (in the root page?),
// and chain such pages
// TODO remove Database.objectIds
// TODO detect circles in linked lists
// (input stream, free list, extend pages...)
// at runtime and recovery
...
...
@@ -102,17 +92,13 @@ public class PageStore implements CacheWriter {
// TODO recover tool: don't re-do uncommitted operations
// TODO no need to log old page if it was always empty
// TODO don't store default values (store a special value)
// TODO
btree: maybe split at the
insertion point
// TODO
maybe split at the last
insertion point
// TODO split files (1 GB max size)
// TODO add a setting (that can be changed at runtime) to call fsync
// and delay on each commit
// TODO var int: see google protocol buffers
// TODO SessionState.logId is no longer needed
// TODO PageData and PageBtree addRowTry: try to simplify
// TODO performance: don't save direct parent in btree nodes (only root)
// TODO space re-use: run TestPerformance multiple times, size should stay
// TODO when inserting many rows, do not split at (entryCount / 2) + 1
// TODO maybe split at the last insertion point
// TODO test running out of disk space (using a special file system)
// TODO check for file size (exception if not exact size expected)
...
...
@@ -121,6 +107,9 @@ public class PageStore implements CacheWriter {
// remove Record.getMemorySize
// simplify InDoubtTransaction
// remove parameter in Record.write(DataPage buff)
// remove Record.getByteCount
// remove Database.objectIds
/**
* The smallest possible page size.
...
...
@@ -151,6 +140,8 @@ public class PageStore implements CacheWriter {
private
static
final
int
META_TYPE_BTREE_INDEX
=
1
;
private
static
final
int
META_TABLE_ID
=
-
1
;
private
static
final
SearchRow
[]
EMPTY_SEARCH_ROW
=
new
SearchRow
[
0
];
private
Database
database
;
private
final
Trace
trace
;
private
String
fileName
;
...
...
@@ -297,6 +288,14 @@ public class PageStore implements CacheWriter {
}
}
private
void
writeBack
()
throws
SQLException
{
ObjectArray
<
CacheObject
>
list
=
cache
.
getAllChanged
();
CacheObject
.
sort
(
list
);
for
(
CacheObject
rec
:
list
)
{
writeBack
(
rec
);
}
}
/**
* Flush all pending changes to disk, and re-open the log file.
*/
...
...
@@ -308,17 +307,11 @@ public class PageStore implements CacheWriter {
}
synchronized
(
database
)
{
database
.
checkPowerOff
();
ObjectArray
<
CacheObject
>
list
=
cache
.
getAllChanged
();
CacheObject
.
sort
(
list
);
for
(
CacheObject
rec
:
list
)
{
writeBack
(
rec
);
}
writeBack
();
log
.
checkpoint
();
switchLog
();
// write back the free list
for
(
CacheObject
rec
:
list
)
{
writeBack
(
rec
);
}
writeBack
();
byte
[]
empty
=
new
byte
[
pageSize
];
// TODO avoid to write empty pages
for
(
int
i
=
PAGE_ID_FREE_LIST_ROOT
;
i
<
pageCount
;
i
++)
{
...
...
@@ -357,7 +350,7 @@ public class PageStore implements CacheWriter {
long
length
=
file
.
length
();
database
.
notifyFileSize
(
length
);
file
.
seek
(
FileStore
.
HEADER_LENGTH
);
Data
Page
page
=
DataPage
.
create
(
database
,
new
byte
[
PAGE_SIZE_MIN
-
FileStore
.
HEADER_LENGTH
]);
Data
page
=
Data
.
create
(
database
,
new
byte
[
PAGE_SIZE_MIN
-
FileStore
.
HEADER_LENGTH
]);
file
.
readFully
(
page
.
getBytes
(),
0
,
PAGE_SIZE_MIN
-
FileStore
.
HEADER_LENGTH
);
setPageSize
(
page
.
readInt
());
int
writeVersion
=
page
.
readByte
();
...
...
@@ -374,7 +367,7 @@ public class PageStore implements CacheWriter {
}
private
void
readVariableHeader
()
throws
SQLException
{
Data
Page
page
=
DataPage
.
create
(
database
,
pageSize
);
Data
page
=
Data
.
create
(
database
,
pageSize
);
for
(
int
i
=
1
;;
i
++)
{
if
(
i
==
3
)
{
throw
Message
.
getSQLException
(
ErrorCode
.
FILE_CORRUPTED_1
,
fileName
);
...
...
@@ -422,7 +415,7 @@ public class PageStore implements CacheWriter {
}
private
void
writeStaticHeader
()
throws
SQLException
{
Data
Page
page
=
DataPage
.
create
(
database
,
new
byte
[
pageSize
-
FileStore
.
HEADER_LENGTH
]);
Data
page
=
Data
.
create
(
database
,
new
byte
[
pageSize
-
FileStore
.
HEADER_LENGTH
]);
page
.
writeInt
(
pageSize
);
page
.
writeByte
((
byte
)
WRITE_VERSION
);
page
.
writeByte
((
byte
)
READ_VERSION
);
...
...
@@ -443,7 +436,7 @@ public class PageStore implements CacheWriter {
}
private
void
writeVariableHeader
()
throws
SQLException
{
Data
Page
page
=
DataPage
.
create
(
database
,
pageSize
);
Data
page
=
Data
.
create
(
database
,
pageSize
);
page
.
writeLong
(
writeCounter
);
page
.
writeInt
(
logFirstTrunkPage
);
page
.
writeInt
(
logFirstDataPage
);
...
...
@@ -513,7 +506,7 @@ public class PageStore implements CacheWriter {
* @param logUndo if an undo entry need to be logged
* @param old the old data (if known)
*/
public
void
updateRecord
(
Record
record
,
boolean
logUndo
,
Data
Page
old
)
throws
SQLException
{
public
void
updateRecord
(
Record
record
,
boolean
logUndo
,
Data
old
)
throws
SQLException
{
synchronized
(
database
)
{
if
(
trace
.
isDebugEnabled
())
{
if
(!
record
.
isChanged
())
{
...
...
@@ -614,7 +607,7 @@ public class PageStore implements CacheWriter {
* @param logUndo if an undo entry need to be logged
* @param old the old data (if known)
*/
public
void
freePage
(
int
pageId
,
boolean
logUndo
,
Data
Page
old
)
throws
SQLException
{
public
void
freePage
(
int
pageId
,
boolean
logUndo
,
Data
old
)
throws
SQLException
{
if
(
trace
.
isDebugEnabled
())
{
trace
.
debug
(
"freePage "
+
pageId
);
}
...
...
@@ -622,7 +615,7 @@ public class PageStore implements CacheWriter {
cache
.
remove
(
pageId
);
freePage
(
pageId
);
if
(
recoveryRunning
)
{
writePage
(
pageId
,
createData
Page
());
writePage
(
pageId
,
createData
());
}
else
if
(
logUndo
)
{
if
(
old
==
null
)
{
old
=
readPage
(
pageId
);
...
...
@@ -633,12 +626,12 @@ public class PageStore implements CacheWriter {
}
/**
* Create a data
page
.
* Create a data
object
.
*
* @return the data page.
*/
public
Data
Page
createDataPage
()
{
return
Data
Page
.
create
(
database
,
new
byte
[
pageSize
]);
public
Data
createData
()
{
return
Data
.
create
(
database
,
new
byte
[
pageSize
]);
}
/**
...
...
@@ -660,8 +653,8 @@ public class PageStore implements CacheWriter {
* @param pos the page id
* @return the page
*/
public
Data
Page
readPage
(
int
pos
)
throws
SQLException
{
Data
Page
page
=
createDataPage
();
public
Data
readPage
(
int
pos
)
throws
SQLException
{
Data
page
=
createData
();
readPage
(
pos
,
page
);
return
page
;
}
...
...
@@ -672,7 +665,7 @@ public class PageStore implements CacheWriter {
* @param pos the page id
* @param page the page
*/
public
void
readPage
(
int
pos
,
Data
Page
page
)
throws
SQLException
{
public
void
readPage
(
int
pos
,
Data
page
)
throws
SQLException
{
synchronized
(
database
)
{
if
(
pos
>=
pageCount
)
{
throw
Message
.
getSQLException
(
ErrorCode
.
FILE_CORRUPTED_1
,
pos
+
" of "
+
pageCount
);
...
...
@@ -706,7 +699,7 @@ public class PageStore implements CacheWriter {
* @param pageId the page id
* @param data the data
*/
public
void
writePage
(
int
pageId
,
Data
Page
data
)
throws
SQLException
{
public
void
writePage
(
int
pageId
,
Data
data
)
throws
SQLException
{
synchronized
(
database
)
{
file
.
seek
((
long
)
pageId
<<
pageSizeShift
);
file
.
write
(
data
.
getBytes
(),
0
,
pageSize
);
...
...
@@ -740,6 +733,7 @@ public class PageStore implements CacheWriter {
readMetaData
();
log
.
recover
(
PageLog
.
RECOVERY_STAGE_REDO
);
if
(
log
.
getInDoubtTransactions
().
size
()
==
0
)
{
log
.
truncate
();
switchLog
();
}
else
{
database
.
setReadOnly
(
true
);
...
...
@@ -886,7 +880,7 @@ public class PageStore implements CacheWriter {
trace
.
debug
(
"addMeta id="
+
id
+
" type="
+
type
+
" parent="
+
parent
+
" columns="
+
columnList
);
}
if
(
redo
)
{
writePage
(
headPos
,
createData
Page
());
writePage
(
headPos
,
createData
());
allocatePage
(
headPos
);
}
if
(
type
==
META_TYPE_SCAN_INDEX
)
{
...
...
@@ -1026,6 +1020,19 @@ public class PageStore implements CacheWriter {
}
}
/**
* Create an array of SearchRow with the given size.
*
* @param len the number of bytes requested
* @return the array
*/
public
static
SearchRow
[]
newSearchRows
(
int
entryCount
)
{
if
(
entryCount
==
0
)
{
return
EMPTY_SEARCH_ROW
;
}
return
new
SearchRow
[
entryCount
];
}
// TODO implement checksum
// private void updateChecksum(byte[] d, int pos) {
// int ps = pageSize;
...
...
h2/src/main/org/h2/store/PageStreamData.java
浏览文件 @
f8a5383e
...
...
@@ -27,7 +27,7 @@ public class PageStreamData extends Record {
private
final
PageStore
store
;
private
int
trunk
;
private
Data
Page
data
;
private
Data
data
;
private
int
remaining
;
private
int
length
;
...
...
@@ -41,7 +41,7 @@ public class PageStreamData extends Record {
* Read the page from the disk.
*/
void
read
()
throws
SQLException
{
data
=
store
.
createData
Page
();
data
=
store
.
createData
();
store
.
readPage
(
getPos
(),
data
);
trunk
=
data
.
readInt
();
data
.
setPos
(
4
);
...
...
@@ -61,7 +61,7 @@ public class PageStreamData extends Record {
* Write the header data.
*/
void
initWrite
()
{
data
=
store
.
createData
Page
();
data
=
store
.
createData
();
data
.
writeInt
(
trunk
);
data
.
writeByte
((
byte
)
Page
.
TYPE_STREAM_DATA
);
data
.
writeInt
(
0
);
...
...
h2/src/main/org/h2/store/PageStreamTrunk.java
浏览文件 @
f8a5383e
...
...
@@ -10,6 +10,7 @@ import java.sql.SQLException;
import
org.h2.constant.ErrorCode
;
import
org.h2.index.Page
;
import
org.h2.message.Message
;
import
org.h2.util.MemoryUtils
;
/**
* A trunk page of a stream. It contains the page numbers of the stream, and
...
...
@@ -31,7 +32,7 @@ public class PageStreamTrunk extends Record {
private
int
nextTrunk
;
private
int
[]
pageIds
;
private
int
pageCount
;
private
Data
Page
data
;
private
Data
data
;
private
int
index
;
PageStreamTrunk
(
PageStore
store
,
int
parent
,
int
pageId
,
int
next
,
int
[]
pageIds
)
{
...
...
@@ -52,13 +53,13 @@ public class PageStreamTrunk extends Record {
* Read the page from the disk.
*/
void
read
()
throws
SQLException
{
data
=
store
.
createData
Page
();
data
=
store
.
createData
();
store
.
readPage
(
getPos
(),
data
);
parent
=
data
.
readInt
();
int
t
=
data
.
readByte
();
if
(
t
==
Page
.
TYPE_EMPTY
)
{
// end of file
pageIds
=
new
int
[
0
]
;
pageIds
=
MemoryUtils
.
EMPTY_INTS
;
return
;
}
if
(
t
!=
Page
.
TYPE_STREAM_TRUNK
)
{
...
...
@@ -93,7 +94,7 @@ public class PageStreamTrunk extends Record {
}
public
void
write
(
DataPage
buff
)
throws
SQLException
{
data
=
store
.
createData
Page
();
data
=
store
.
createData
();
data
.
writeInt
(
parent
);
data
.
writeByte
((
byte
)
Page
.
TYPE_STREAM_TRUNK
);
data
.
writeInt
(
nextTrunk
);
...
...
@@ -135,7 +136,7 @@ public class PageStreamTrunk extends Record {
* @return the number of pages freed
*/
int
free
()
throws
SQLException
{
Data
Page
empty
=
store
.
createDataPage
();
Data
empty
=
store
.
createData
();
store
.
freePage
(
getPos
(),
false
,
null
);
int
freed
=
1
;
for
(
int
i
=
0
;
i
<
pageCount
;
i
++)
{
...
...
h2/src/main/org/h2/tools/Recover.java
浏览文件 @
f8a5383e
...
...
@@ -34,6 +34,7 @@ import org.h2.message.Trace;
import
org.h2.result.Row
;
import
org.h2.result.SimpleRow
;
import
org.h2.security.SHA256
;
import
org.h2.store.Data
;
import
org.h2.store.DataHandler
;
import
org.h2.store.DataPage
;
import
org.h2.store.DiskFile
;
...
...
@@ -49,6 +50,7 @@ import org.h2.util.FileUtils;
import
org.h2.util.IOUtils
;
import
org.h2.util.IntArray
;
import
org.h2.util.MathUtils
;
import
org.h2.util.MemoryUtils
;
import
org.h2.util.New
;
import
org.h2.util.ObjectArray
;
import
org.h2.util.RandomUtils
;
...
...
@@ -496,7 +498,7 @@ public class Recover extends Tool implements DataHandler {
// Math.abs(Integer.MIN_VALUE) == Integer.MIN_VALUE
blocks
=
MathUtils
.
convertLongToInt
(
Math
.
abs
(
s
.
readInt
()));
if
(
blocks
>
1
)
{
byte
[]
b2
=
Byte
Utils
.
newBytes
(
blocks
*
blockSize
);
byte
[]
b2
=
Memory
Utils
.
newBytes
(
blocks
*
blockSize
);
System
.
arraycopy
(
buff
,
0
,
b2
,
0
,
blockSize
);
buff
=
b2
;
try
{
...
...
@@ -532,7 +534,7 @@ public class Recover extends Tool implements DataHandler {
case
'S'
:
{
char
fileType
=
(
char
)
s
.
readByte
();
int
sumLength
=
s
.
readInt
();
byte
[]
summary
=
Byte
Utils
.
newBytes
(
sumLength
);
byte
[]
summary
=
Memory
Utils
.
newBytes
(
sumLength
);
if
(
sumLength
>
0
)
{
s
.
read
(
summary
,
0
,
sumLength
);
}
...
...
@@ -858,7 +860,7 @@ public class Recover extends Tool implements DataHandler {
}
private
void
dumpPageLogStream
(
PrintWriter
writer
,
FileStore
store
,
int
logFirstTrunkPage
,
int
logFirstDataPage
,
int
pageSize
)
throws
IOException
,
SQLException
{
Data
Page
s
=
DataPage
.
create
(
this
,
pageSize
);
Data
s
=
Data
.
create
(
this
,
pageSize
);
DataInputStream
in
=
new
DataInputStream
(
new
PageInputStream
(
writer
,
this
,
store
,
logFirstTrunkPage
,
logFirstDataPage
,
pageSize
)
);
...
...
h2/src/test/org/h2/test/TestAll.java
浏览文件 @
f8a5383e
...
...
@@ -332,13 +332,15 @@ kill -9 `jps -l | grep "org.h2.test.TestAll" | cut -d " " -f 1`
new
TestTimer
().
runTest
(
test
);
}
}
else
{
test
.
runTests
();
int
todo
;
// System.setProperty(SysProperties.H2_PAGE_STORE, "true");
// test.pageStore = true;
// test.runTests();
System
.
setProperty
(
SysProperties
.
H2_PAGE_STORE
,
"true"
);
test
.
pageStore
=
true
;
test
.
runTests
();
TestPerformance
.
main
(
new
String
[]{
"-init"
,
"-db"
,
"1"
});
System
.
setProperty
(
SysProperties
.
H2_PAGE_STORE
,
"false"
);
test
.
pageStore
=
false
;
test
.
runTests
();
TestPerformance
.
main
(
new
String
[]{
"-init"
,
"-db"
,
"1"
});
}
System
.
out
.
println
(
TestBase
.
formatTime
(
System
.
currentTimeMillis
()
-
time
)
+
" total"
);
...
...
h2/src/test/org/h2/test/TestBase.java
浏览文件 @
f8a5383e
...
...
@@ -19,6 +19,7 @@ import java.sql.Statement;
import
java.sql.Types
;
import
java.text.SimpleDateFormat
;
import
java.util.ArrayList
;
import
java.util.LinkedList
;
import
org.h2.jdbc.JdbcConnection
;
import
org.h2.message.TraceSystem
;
...
...
@@ -47,6 +48,8 @@ public abstract class TestBase {
*/
protected
long
start
;
private
LinkedList
<
byte
[]>
memory
=
new
LinkedList
<
byte
[]>();
/**
* Get the test directory for this test.
*
...
...
@@ -1068,4 +1071,43 @@ public abstract class TestBase {
return
"-Dh2.pageStore="
+
System
.
getProperty
(
"h2.pageStore"
);
}
protected
void
eatMemory
(
int
remainingKB
)
{
byte
[]
reserve
=
new
byte
[
remainingKB
*
1024
];
int
max
=
128
*
1024
*
1024
;
int
div
=
2
;
while
(
true
)
{
long
free
=
Runtime
.
getRuntime
().
freeMemory
();
long
freeTry
=
free
/
div
;
int
eat
=
(
int
)
Math
.
min
(
max
,
freeTry
);
try
{
byte
[]
block
=
new
byte
[
eat
];
memory
.
add
(
block
);
}
catch
(
OutOfMemoryError
e
)
{
if
(
eat
<
32
)
{
break
;
}
if
(
eat
==
max
)
{
max
/=
2
;
if
(
max
<
128
)
{
break
;
}
}
if
(
eat
==
freeTry
)
{
div
+=
1
;
}
else
{
div
=
2
;
}
}
}
// silly code - makes sure there are no warnings
reserve
[
0
]
=
reserve
[
1
];
// actually it is anyway garbage collected
reserve
=
null
;
}
protected
void
freeMemory
()
{
memory
.
clear
();
}
}
h2/src/test/org/h2/test/db/TestMemoryUsage.java
浏览文件 @
f8a5383e
...
...
@@ -86,19 +86,32 @@ public class TestMemoryUsage extends TestBase {
stat
.
execute
(
"SET MAX_LENGTH_INPLACE_LOB 32768"
);
stat
.
execute
(
"SET CACHE_SIZE 8000"
);
stat
.
execute
(
"CREATE TABLE TEST(ID IDENTITY, DATA CLOB)"
);
System
.
gc
();
System
.
gc
();
freeSoftReferences
();
try
{
int
start
=
MemoryUtils
.
getMemoryUsed
();
for
(
int
i
=
0
;
i
<
4
;
i
++)
{
stat
.
execute
(
"INSERT INTO TEST(DATA) SELECT SPACE(32000) FROM SYSTEM_RANGE(1, 200)"
);
System
.
gc
();
System
.
gc
();
freeSoftReferences
();
int
used
=
MemoryUtils
.
getMemoryUsed
();
if
((
used
-
start
)
>
16000
)
{
fail
(
"Used: "
+
(
used
-
start
));
}
}
}
finally
{
conn
.
close
();
freeMemory
();
}
}
void
freeSoftReferences
()
{
try
{
eatMemory
(
1
);
}
catch
(
OutOfMemoryError
e
)
{
// ignore
}
System
.
gc
();
System
.
gc
();
freeMemory
();
}
private
void
testCreateIndex
()
throws
SQLException
{
...
...
h2/src/test/org/h2/test/db/TestOutOfMemory.java
浏览文件 @
f8a5383e
...
...
@@ -11,8 +11,6 @@ import java.sql.PreparedStatement;
import
java.sql.ResultSet
;
import
java.sql.SQLException
;
import
java.sql.Statement
;
import
java.util.LinkedList
;
import
org.h2.constant.ErrorCode
;
import
org.h2.test.TestBase
;
...
...
@@ -22,8 +20,6 @@ import org.h2.test.TestBase;
*/
public
class
TestOutOfMemory
extends
TestBase
{
private
LinkedList
<
byte
[]>
list
=
new
LinkedList
<
byte
[]>();
/**
* Run just this test.
*
...
...
@@ -56,7 +52,7 @@ public class TestOutOfMemory extends TestBase {
}
catch
(
SQLException
e
)
{
assertEquals
(
ErrorCode
.
OUT_OF_MEMORY
,
e
.
getErrorCode
());
}
list
=
null
;
freeMemory
()
;
ResultSet
rs
=
stat
.
executeQuery
(
"select count(*) from stuff"
);
rs
.
next
();
assertEquals
(
2000
,
rs
.
getInt
(
1
));
...
...
@@ -66,38 +62,4 @@ public class TestOutOfMemory extends TestBase {
deleteDb
(
"outOfMemory"
);
}
private
void
eatMemory
(
int
remainingKB
)
{
byte
[]
reserve
=
new
byte
[
remainingKB
*
1024
];
int
max
=
128
*
1024
*
1024
;
int
div
=
2
;
while
(
true
)
{
long
free
=
Runtime
.
getRuntime
().
freeMemory
();
long
freeTry
=
free
/
div
;
int
eat
=
(
int
)
Math
.
min
(
max
,
freeTry
);
try
{
byte
[]
block
=
new
byte
[
eat
];
list
.
add
(
block
);
}
catch
(
OutOfMemoryError
e
)
{
if
(
eat
<
32
)
{
break
;
}
if
(
eat
==
max
)
{
max
/=
2
;
if
(
max
<
128
)
{
break
;
}
}
if
(
eat
==
freeTry
)
{
div
+=
1
;
}
else
{
div
=
2
;
}
}
}
// silly code - makes sure there are no warnings
reserve
[
0
]
=
reserve
[
1
];
// actually it is anyway garbage collected
reserve
=
null
;
}
}
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论