Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
59ba9829
提交
59ba9829
authored
1月 23, 2009
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
New experimental page store.
上级
84595889
显示空白字符变更
内嵌
并排
正在显示
17 个修改的文件
包含
258 行增加
和
67 行删除
+258
-67
history.html
h2/src/docsrc/html/history.html
+3
-2
links.html
h2/src/docsrc/html/links.html
+5
-0
TransactionCommand.java
h2/src/main/org/h2/command/dml/TransactionCommand.java
+5
-0
Database.java
h2/src/main/org/h2/engine/Database.java
+3
-1
Page.java
h2/src/main/org/h2/index/Page.java
+0
-1
PageDataLeaf.java
h2/src/main/org/h2/index/PageDataLeaf.java
+16
-14
LogSystem.java
h2/src/main/org/h2/log/LogSystem.java
+1
-1
help.csv
h2/src/main/org/h2/res/help.csv
+4
-0
PageLog.java
h2/src/main/org/h2/store/PageLog.java
+11
-3
PageOutputStream.java
h2/src/main/org/h2/store/PageOutputStream.java
+4
-2
PageStore.java
h2/src/main/org/h2/store/PageStore.java
+29
-17
Recover.java
h2/src/main/org/h2/tools/Recover.java
+162
-19
TestAll.java
h2/src/test/org/h2/test/TestAll.java
+0
-3
TestBase.java
h2/src/test/org/h2/test/TestBase.java
+1
-0
TestOpenClose.java
h2/src/test/org/h2/test/db/TestOpenClose.java
+11
-1
TestPageStore.java
h2/src/test/org/h2/test/unit/TestPageStore.java
+2
-2
dictionary.txt
h2/src/tools/org/h2/build/doc/dictionary.txt
+1
-1
没有找到文件。
h2/src/docsrc/html/history.html
浏览文件 @
59ba9829
...
...
@@ -100,7 +100,8 @@ spread the word and have translated this project. Also many thanks to the donors
via PayPal:
</p>
<ul>
<li>
Donald Bleyl, USA
<li><a
href=
"http://skycash.com"
>
SkyCash, Poland
</a>
</li><li>
Donald Bleyl, USA
</li><li>
lumber-mill.co.jp, Japan
</li><li>
Frank Berger, Germany
</li><li>
Ashwin Jayaprakash, USA
...
...
@@ -112,7 +113,7 @@ via PayPal:
</li><li>
Elisabetta Berlini, Italy
</li><li>
William Gilbert, USA
</li><li>
Antonio Dieguez, Chile
</li><li><a
href=
"http://ontologyworks.com
/
"
>
Ontology Works, USA
</a>
</li><li><a
href=
"http://ontologyworks.com"
>
Ontology Works, USA
</a>
</li><li>
Pete Haidinyak, USA
</li><li>
William Osmond, USA
</li><li>
Joachim Ansorg, Germany
...
...
h2/src/docsrc/html/links.html
浏览文件 @
59ba9829
...
...
@@ -355,6 +355,11 @@ by the user; require a license to Oracle; lack advanced
query/retrieval; and the ability to handle chemical structures.
</p>
<p><a
href=
"http://www.nuxeo.org"
>
Nuxeo
</a><br
/>
Standards-based, open source platform for building ECM applications.
</p>
<p><a
href=
"http://www.nwiresoftware.com"
>
nWire
</a><br
/>
Eclipse plug-in which expedites Java development.
...
...
h2/src/main/org/h2/command/dml/TransactionCommand.java
浏览文件 @
59ba9829
...
...
@@ -14,6 +14,7 @@ import org.h2.engine.Session;
import
org.h2.log.LogSystem
;
import
org.h2.message.Message
;
import
org.h2.result.LocalResult
;
import
org.h2.store.PageStore
;
/**
* Represents a transactional statement.
...
...
@@ -122,6 +123,10 @@ public class TransactionCommand extends Prepared {
break
;
case
CHECKPOINT:
session
.
getUser
().
checkAdmin
();
PageStore
store
=
session
.
getDatabase
().
getPageStore
();
if
(
store
!=
null
)
{
store
.
checkpoint
();
}
session
.
getDatabase
().
getLog
().
checkpoint
();
session
.
getDatabase
().
getTempFileDeleter
().
deleteUnused
();
break
;
...
...
h2/src/main/org/h2/engine/Database.java
浏览文件 @
59ba9829
...
...
@@ -1134,6 +1134,9 @@ public class Database implements DataHandler {
}
log
=
null
;
}
if
(
pageStore
!=
null
)
{
pageStore
.
checkpoint
();
}
closeFiles
();
if
(
persistent
&&
lock
==
null
&&
fileLockMethod
!=
FileLock
.
LOCK_NO
)
{
// everything already closed (maybe in checkPowerOff)
...
...
@@ -1165,7 +1168,6 @@ public class Database implements DataHandler {
fileIndex
=
null
;
}
if
(
pageStore
!=
null
)
{
pageStore
.
checkpoint
();
pageStore
.
close
();
pageStore
=
null
;
}
...
...
h2/src/main/org/h2/index/Page.java
浏览文件 @
59ba9829
...
...
@@ -6,7 +6,6 @@
*/
package
org
.
h2
.
index
;
/**
* A page.
*/
...
...
h2/src/main/org/h2/index/PageDataLeaf.java
浏览文件 @
59ba9829
...
...
@@ -21,7 +21,8 @@ import org.h2.util.IntArray;
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>5-6: entry count
* </li><li>only if there is overflow: 7-10: overflow page id
* </li><li>7-10: table id
* </li><li>only if there is overflow: 11-14: overflow page id
* </li><li>list of key / offset pairs (4 bytes key, 2 bytes offset)
* </li></ul>
* The format of an overflow page is:
...
...
@@ -34,6 +35,11 @@ import org.h2.util.IntArray;
*/
class
PageDataLeaf
extends
PageData
{
private
static
final
int
KEY_OFFSET_PAIR_LENGTH
=
6
;
private
static
final
int
KEY_OFFSET_PAIR_START
=
11
;
private
static
final
int
OVERFLOW_DATA_START_MORE
=
9
;
private
static
final
int
OVERFLOW_DATA_START_LAST
=
7
;
/**
* The row offsets.
*/
...
...
@@ -61,7 +67,7 @@ class PageDataLeaf extends PageData {
PageDataLeaf
(
PageScanIndex
index
,
int
pageId
,
int
parentPageId
,
DataPage
data
)
{
super
(
index
,
pageId
,
parentPageId
,
data
);
start
=
7
;
start
=
KEY_OFFSET_PAIR_START
;
}
void
read
()
throws
SQLException
{
...
...
@@ -94,7 +100,7 @@ class PageDataLeaf extends PageData {
// TODO currently the order is important
// TODO and can only add at the end
int
last
=
entryCount
==
0
?
pageSize
:
offsets
[
entryCount
-
1
];
if
(
entryCount
>
0
&&
last
-
rowLength
<
start
+
6
)
{
if
(
entryCount
>
0
&&
last
-
rowLength
<
start
+
KEY_OFFSET_PAIR_LENGTH
)
{
int
todoSplitAtLastInsertionPoint
;
return
(
entryCount
/
2
)
+
1
;
}
...
...
@@ -117,7 +123,7 @@ class PageDataLeaf extends PageData {
}
}
entryCount
++;
start
+=
6
;
start
+=
KEY_OFFSET_PAIR_LENGTH
;
newOffsets
[
x
]
=
offset
;
newKeys
[
x
]
=
row
.
getPos
();
newRows
[
x
]
=
row
;
...
...
@@ -138,7 +144,7 @@ class PageDataLeaf extends PageData {
do
{
int
next
=
index
.
getPageStore
().
allocatePage
();
array
.
add
(
next
);
remaining
-=
pageSize
-
7
;
remaining
-=
pageSize
-
OVERFLOW_DATA_START_LAST
;
if
(
remaining
>
0
)
{
remaining
+=
2
;
}
...
...
@@ -165,7 +171,7 @@ class PageDataLeaf extends PageData {
System
.
arraycopy
(
offsets
,
i
+
1
,
newOffsets
,
i
,
entryCount
-
i
);
System
.
arraycopy
(
keys
,
i
+
1
,
newKeys
,
i
,
entryCount
-
i
);
System
.
arraycopy
(
rows
,
i
+
1
,
newRows
,
i
,
entryCount
-
i
);
start
-=
6
;
start
-=
KEY_OFFSET_PAIR_LENGTH
;
offsets
=
newOffsets
;
keys
=
newKeys
;
rows
=
newRows
;
...
...
@@ -182,10 +188,6 @@ class PageDataLeaf extends PageData {
* @return the row
*/
Row
getRowAt
(
int
at
)
throws
SQLException
{
int
test
;
if
(
rows
==
null
)
{
System
.
out
.
println
(
"stop"
);
}
Row
r
=
rows
[
at
];
if
(
r
==
null
)
{
if
(
firstOverflowPageId
!=
0
)
{
...
...
@@ -199,12 +201,12 @@ if(rows == null) {
int
type
=
page
.
readByte
();
if
(
type
==
(
Page
.
TYPE_DATA_OVERFLOW
|
Page
.
FLAG_LAST
))
{
int
size
=
page
.
readShortInt
();
data
.
write
(
page
.
getBytes
(),
7
,
size
);
data
.
write
(
page
.
getBytes
(),
OVERFLOW_DATA_START_LAST
,
size
);
break
;
}
else
{
next
=
page
.
readInt
();
int
size
=
pageSize
-
9
;
data
.
write
(
page
.
getBytes
(),
9
,
size
);
int
size
=
pageSize
-
OVERFLOW_DATA_START_MORE
;
data
.
write
(
page
.
getBytes
(),
OVERFLOW_DATA_START_MORE
,
size
);
}
}
}
...
...
@@ -335,7 +337,7 @@ if(rows == null) {
overflow
.
reset
();
overflow
.
writeInt
(
parent
);
int
size
;
if
(
remaining
>
pageSize
-
7
)
{
if
(
remaining
>
pageSize
-
OVERFLOW_DATA_START_LAST
)
{
overflow
.
writeByte
((
byte
)
Page
.
TYPE_DATA_OVERFLOW
);
overflow
.
writeInt
(
overflowPageIds
[
i
+
1
]);
size
=
pageSize
-
overflow
.
length
();
...
...
h2/src/main/org/h2/log/LogSystem.java
浏览文件 @
59ba9829
...
...
@@ -633,7 +633,7 @@ public class LogSystem {
*
* @return true if it is
*/
boolean
getFlushOnEachCommit
()
{
public
boolean
getFlushOnEachCommit
()
{
return
flushOnEachCommit
;
}
...
...
h2/src/main/org/h2/res/help.csv
浏览文件 @
59ba9829
...
...
@@ -453,6 +453,7 @@ CREATE SEQUENCE [IF NOT EXISTS] newSequenceName
[CACHE long]
","
Creates a new sequence. The data type of a sequence is BIGINT.
Used values are never re-used, even when the transaction is rolled back.
The cache is the number of pre-allocated numbers. If the system crashes without closing the
database, at most this many numbers are lost. The default cache size is 32.
","
...
...
@@ -475,6 +476,7 @@ Cached tables (the default) are persistent, and the number or rows is not limite
Memory tables are persistent, but the index data is kept in the main memory, so memory tables should not get too large.
Temporary tables are not persistent. Temporary tables can be global (accessible by all connections)
or local (only accessible by the current connection). The default is for temporary tables is global.
Identity and auto-increment columns are columns with a sequence as the default.
","
CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))
"
...
...
@@ -1667,6 +1669,7 @@ IDENTITY
","
Auto-Increment value.
Possible values: -9223372036854775808 to 9223372036854775807.
Used values are never re-used, even when the transaction is rolled back.
See also java.lang.Long.
","
IDENTITY
...
...
@@ -2989,6 +2992,7 @@ MEMORY_USED()
NEXTVAL([schemaName, ] sequenceString): long
","
Returns the next value of the sequence.
Used values are never re-used, even when the transaction is rolled back.
If the schema name is not set, the current schema is used.
If the schema name is not set, the sequence name is converted to uppercase (for compatibility).
","
...
...
h2/src/main/org/h2/store/PageLog.java
浏览文件 @
59ba9829
...
...
@@ -43,6 +43,7 @@ public class PageLog {
private
int
firstPage
;
private
DataPage
data
;
private
boolean
recoveryRunning
;
private
long
operation
;
PageLog
(
PageStore
store
,
int
firstPage
)
{
this
.
store
=
store
;
...
...
@@ -57,7 +58,7 @@ public class PageLog {
*/
void
openForWriting
()
{
trace
.
debug
(
"log openForWriting"
);
pageOut
=
new
PageOutputStream
(
store
,
0
,
firstPage
,
Page
.
TYPE_LOG
);
pageOut
=
new
PageOutputStream
(
store
,
0
,
firstPage
,
Page
.
TYPE_LOG
,
true
);
out
=
new
DataOutputStream
(
pageOut
);
}
...
...
@@ -150,7 +151,7 @@ public class PageLog {
out
.
write
(
page
.
getBytes
(),
0
,
store
.
getPageSize
());
undo
.
set
(
pageId
);
}
catch
(
IOException
e
)
{
throw
Message
.
convertIOException
(
e
,
"recovering"
);
throw
Message
.
convertIOException
(
e
,
null
);
}
}
...
...
@@ -164,8 +165,11 @@ public class PageLog {
trace
.
debug
(
"log commit"
);
out
.
write
(
COMMIT
);
out
.
writeInt
(
session
.
getId
());
if
(
store
.
getDatabase
().
getLog
().
getFlushOnEachCommit
())
{
flush
();
}
}
catch
(
IOException
e
)
{
throw
Message
.
convertIOException
(
e
,
"recovering"
);
throw
Message
.
convertIOException
(
e
,
null
);
}
}
...
...
@@ -186,6 +190,9 @@ public class PageLog {
trace
.
debug
(
"log "
+
(
add
?
"+"
:
"-"
)
+
" table:"
+
tableId
+
" remaining:"
+
pageOut
.
getRemainingBytes
()
+
" row:"
+
row
);
}
int
todoLogPosShouldBeLong
;
session
.
addLogPos
(
0
,
(
int
)
operation
);
row
.
setLastLog
(
0
,
(
int
)
operation
);
out
.
write
(
add
?
ADD
:
REMOVE
);
out
.
writeInt
(
session
.
getId
());
out
.
writeInt
(
tableId
);
...
...
@@ -220,6 +227,7 @@ public class PageLog {
*/
public
void
flush
()
throws
SQLException
{
try
{
int
todoUseLessSpace
;
trace
.
debug
(
"log flush"
);
out
.
flush
();
int
filler
=
pageOut
.
getRemainingBytes
();
...
...
h2/src/main/org/h2/store/PageOutputStream.java
浏览文件 @
59ba9829
...
...
@@ -26,6 +26,7 @@ public class PageOutputStream extends OutputStream {
private
int
nextPage
;
private
DataPage
page
;
private
int
remaining
;
private
final
boolean
allocateAtEnd
;
/**
* Create a new page output stream.
...
...
@@ -35,12 +36,13 @@ public class PageOutputStream extends OutputStream {
* @param headPage the first page
* @param type the page type
*/
public
PageOutputStream
(
PageStore
store
,
int
parentPage
,
int
headPage
,
int
type
)
{
public
PageOutputStream
(
PageStore
store
,
int
parentPage
,
int
headPage
,
int
type
,
boolean
allocateAtEnd
)
{
this
.
trace
=
store
.
getTrace
();
this
.
store
=
store
;
this
.
parentPage
=
parentPage
;
this
.
nextPage
=
headPage
;
this
.
type
=
type
;
this
.
allocateAtEnd
=
allocateAtEnd
;
page
=
store
.
createDataPage
();
initPage
();
}
...
...
@@ -73,7 +75,7 @@ public class PageOutputStream extends OutputStream {
parentPage
=
nextPage
;
pageId
=
nextPage
;
try
{
nextPage
=
store
.
allocatePage
();
nextPage
=
store
.
allocatePage
(
allocateAtEnd
);
}
catch
(
SQLException
e
)
{
throw
Message
.
convertToIOException
(
e
);
}
...
...
h2/src/main/org/h2/store/PageStore.java
浏览文件 @
59ba9829
...
...
@@ -105,7 +105,7 @@ public class PageStore implements CacheWriter {
this
.
database
=
database
;
trace
=
database
.
getTrace
(
Trace
.
PAGE_STORE
);
int
test
;
//trace.setLevel(TraceSystem.DEBUG);
//
trace.setLevel(TraceSystem.DEBUG);
this
.
fileName
=
fileName
;
this
.
accessMode
=
accessMode
;
this
.
cacheSize
=
cacheSizeDefault
;
...
...
@@ -189,6 +189,10 @@ public class PageStore implements CacheWriter {
*/
public
void
checkpoint
()
throws
SQLException
{
trace
.
debug
(
"checkpoint"
);
if
(
log
==
null
)
{
// the file was never fully opened
return
;
}
synchronized
(
database
)
{
database
.
checkPowerOff
();
ObjectArray
list
=
cache
.
getAllChanged
();
...
...
@@ -345,14 +349,17 @@ public class PageStore implements CacheWriter {
* @return the page id
*/
public
int
allocatePage
()
throws
SQLException
{
if
(
freePageCount
==
0
)
{
if
(
pageCount
*
pageSize
>=
fileLength
)
{
increaseFileSize
(
INCREMENT_PAGES
);
}
}
if
(
lastUsedPage
<
pageCount
)
{
return
++
lastUsedPage
;
return
allocatePage
(
false
);
}
/**
* Allocate a page.
*
* @param atEnd if the allocated page must be at the end of the file
* @return the page id
*/
public
int
allocatePage
(
boolean
atEnd
)
throws
SQLException
{
if
(
freePageCount
>
0
&&
!
atEnd
)
{
if
(
freeListRootPageId
==
0
)
{
Message
.
throwInternalError
();
}
...
...
@@ -365,6 +372,11 @@ public class PageStore implements CacheWriter {
freePageCount
--;
return
id
;
}
if
(
lastUsedPage
>=
pageCount
)
{
increaseFileSize
(
INCREMENT_PAGES
);
}
return
++
lastUsedPage
;
}
private
void
increaseFileSize
(
int
increment
)
throws
SQLException
{
pageCount
+=
increment
;
...
...
h2/src/main/org/h2/tools/Recover.java
浏览文件 @
59ba9829
...
...
@@ -28,6 +28,7 @@ import org.h2.command.Parser;
import
org.h2.engine.Constants
;
import
org.h2.engine.DbObject
;
import
org.h2.engine.MetaRecord
;
import
org.h2.index.Page
;
import
org.h2.log.LogFile
;
import
org.h2.message.Message
;
import
org.h2.message.Trace
;
...
...
@@ -70,6 +71,10 @@ public class Recover extends Tool implements DataHandler {
private
int
valueId
;
private
boolean
trace
;
private
boolean
lobFilesInDirectories
;
private
ObjectArray
schema
;
private
HashSet
objectIdSet
;
private
HashMap
tableMap
;
private
boolean
remove
;
private
void
showUsage
()
{
out
.
println
(
"Helps recovering a corrupted database."
);
...
...
@@ -169,6 +174,11 @@ public class Recover extends Tool implements DataHandler {
}
private
void
removePassword
(
String
fileName
)
throws
SQLException
{
if
(
fileName
.
endsWith
(
Constants
.
SUFFIX_PAGE_FILE
))
{
remove
=
true
;
dumpPageStore
(
fileName
);
return
;
}
setDatabaseName
(
fileName
.
substring
(
fileName
.
length
()
-
Constants
.
SUFFIX_DATA_FILE
.
length
()));
FileStore
store
=
FileStore
.
open
(
null
,
fileName
,
"rw"
);
long
length
=
store
.
length
();
...
...
@@ -300,6 +310,8 @@ public class Recover extends Tool implements DataHandler {
String
fileName
=
(
String
)
list
.
get
(
i
);
if
(
fileName
.
endsWith
(
Constants
.
SUFFIX_DATA_FILE
))
{
dumpData
(
fileName
);
}
else
if
(
fileName
.
endsWith
(
Constants
.
SUFFIX_PAGE_FILE
))
{
dumpPageStore
(
fileName
);
}
else
if
(
fileName
.
endsWith
(
Constants
.
SUFFIX_INDEX_FILE
))
{
dumpIndex
(
fileName
);
}
else
if
(
fileName
.
endsWith
(
Constants
.
SUFFIX_LOG_FILE
))
{
...
...
@@ -678,11 +690,140 @@ public class Recover extends Tool implements DataHandler {
}
}
private
void
dumpPageStore
(
String
fileName
)
{
setDatabaseName
(
fileName
.
substring
(
0
,
fileName
.
length
()
-
Constants
.
SUFFIX_PAGE_FILE
.
length
()));
FileStore
store
=
null
;
PrintWriter
writer
=
null
;
try
{
writer
=
getWriter
(
fileName
,
".sql"
);
writer
.
println
(
"CREATE ALIAS IF NOT EXISTS READ_CLOB FOR \""
+
this
.
getClass
().
getName
()
+
".readClob\";"
);
writer
.
println
(
"CREATE ALIAS IF NOT EXISTS READ_BLOB FOR \""
+
this
.
getClass
().
getName
()
+
".readBlob\";"
);
resetSchema
();
store
=
FileStore
.
open
(
null
,
fileName
,
"r"
);
long
length
=
store
.
length
();
byte
[]
buff
=
new
byte
[
128
];
DataPage
s
=
DataPage
.
create
(
this
,
buff
);
store
.
readFully
(
buff
,
0
,
buff
.
length
);
s
.
setPos
(
48
);
int
pageSize
=
s
.
readInt
();
int
writeVersion
=
(
int
)
s
.
readByte
();
int
readVersion
=
(
int
)
s
.
readByte
();
int
systemTableRoot
=
s
.
readInt
();
int
freeListHead
=
s
.
readInt
();
int
logHead
=
s
.
readInt
();
writer
.
println
(
"-- pageSize "
+
pageSize
);
writer
.
println
(
"-- writeVersion: "
+
writeVersion
);
writer
.
println
(
"-- readVersion: "
+
readVersion
);
writer
.
println
(
"-- systemTableRoot: "
+
systemTableRoot
);
writer
.
println
(
"-- freeListHead: "
+
freeListHead
);
writer
.
println
(
"-- logHead: "
+
logHead
);
int
pageCount
=
(
int
)
(
length
/
pageSize
);
blockCount
=
1
;
buff
=
new
byte
[
pageSize
];
s
=
DataPage
.
create
(
this
,
buff
);
for
(
int
page
=
1
;
page
<
pageCount
;
page
++)
{
store
.
seek
((
long
)
page
*
pageSize
);
store
.
readFully
(
buff
,
0
,
pageSize
);
s
.
reset
();
int
parentPageId
=
s
.
readInt
();
int
type
=
s
.
readByte
();
switch
(
type
)
{
case
Page
.
TYPE_EMPTY
:
writer
.
println
(
"-- page "
+
page
+
": empty"
);
if
(
parentPageId
!=
0
)
{
writer
.
println
(
"-- ERROR parent:"
+
parentPageId
);
}
continue
;
}
boolean
last
=
(
type
&
Page
.
FLAG_LAST
)
!=
0
;
type
&=
~
Page
.
FLAG_LAST
;
switch
(
type
)
{
case
Page
.
TYPE_DATA_OVERFLOW
:
writer
.
println
(
"-- page "
+
page
+
": data overflow "
+
(
last
?
"(last)"
:
""
));
break
;
case
Page
.
TYPE_DATA_NODE
:
writer
.
println
(
"-- page "
+
page
+
": data node "
+
(
last
?
"(last)"
:
""
));
break
;
case
Page
.
TYPE_DATA_LEAF
:
writer
.
println
(
"-- page "
+
page
+
": data leaf "
+
(
last
?
"(last)"
:
""
));
dumpPageDataLeaf
(
store
,
pageSize
,
writer
,
s
,
last
);
break
;
case
Page
.
TYPE_FREE_LIST
:
writer
.
println
(
"-- page "
+
page
+
": free list "
+
(
last
?
"(last)"
:
""
));
break
;
case
Page
.
TYPE_LOG
:
writer
.
println
(
"-- page "
+
page
+
": log "
+
(
last
?
"(last)"
:
""
));
break
;
default
:
writer
.
println
(
"-- page "
+
page
+
": ERROR unknown type "
+
type
);
break
;
}
}
writer
.
close
();
}
catch
(
Throwable
e
)
{
writeError
(
writer
,
e
);
}
finally
{
IOUtils
.
closeSilently
(
writer
);
closeSilently
(
store
);
}
}
private
void
dumpPageDataLeaf
(
FileStore
store
,
int
pageSize
,
PrintWriter
writer
,
DataPage
s
,
boolean
last
)
throws
SQLException
{
int
entryCount
=
s
.
readShortInt
();
int
tableId
=
s
.
readInt
();
int
[]
keys
=
new
int
[
entryCount
];
int
[]
offsets
=
new
int
[
entryCount
];
int
next
=
0
;
if
(!
last
)
{
next
=
s
.
readInt
();
}
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
keys
[
i
]
=
s
.
readInt
();
offsets
[
i
]
=
s
.
readShortInt
();
}
if
(!
last
)
{
byte
[]
buff
=
new
byte
[
pageSize
];
DataPage
s2
=
DataPage
.
create
(
this
,
buff
);
s
.
setPos
(
pageSize
);
while
(
true
)
{
store
.
seek
(
pageSize
*
next
);
store
.
readFully
(
s2
.
getBytes
(),
0
,
pageSize
);
s2
.
setPos
(
4
);
int
type
=
s2
.
readByte
();
if
(
type
==
(
Page
.
TYPE_DATA_OVERFLOW
|
Page
.
FLAG_LAST
))
{
int
size
=
s2
.
readShortInt
();
s
.
write
(
s2
.
getBytes
(),
7
,
size
);
break
;
}
else
{
next
=
s2
.
readInt
();
int
size
=
pageSize
-
9
;
s
.
write
(
s2
.
getBytes
(),
9
,
size
);
}
}
}
for
(
int
i
=
0
;
i
<
entryCount
;
i
++)
{
int
key
=
keys
[
i
];
int
off
=
offsets
[
i
];
writer
.
println
(
"-- ["
+
i
+
"] tableId: "
+
tableId
+
" key:"
+
key
+
" off: "
+
off
);
s
.
setPos
(
off
);
s
.
readInt
();
if
(
remove
&&
tableId
==
0
)
{
writer
.
println
(
"-- system table"
);
}
}
}
private
void
dumpData
(
String
fileName
)
{
setDatabaseName
(
fileName
.
substring
(
0
,
fileName
.
length
()
-
Constants
.
SUFFIX_DATA_FILE
.
length
()));
dumpData
(
fileName
,
fileName
,
FileStore
.
HEADER_LENGTH
);
}
private
void
resetSchema
()
{
schema
=
new
ObjectArray
();
objectIdSet
=
new
HashSet
();
tableMap
=
new
HashMap
();
}
private
void
dumpData
(
String
fileName
,
String
outputName
,
int
offset
)
{
PrintWriter
writer
=
null
;
FileStore
store
=
null
;
...
...
@@ -690,9 +831,7 @@ public class Recover extends Tool implements DataHandler {
writer
=
getWriter
(
outputName
,
".sql"
);
writer
.
println
(
"CREATE ALIAS IF NOT EXISTS READ_CLOB FOR \""
+
this
.
getClass
().
getName
()
+
".readClob\";"
);
writer
.
println
(
"CREATE ALIAS IF NOT EXISTS READ_BLOB FOR \""
+
this
.
getClass
().
getName
()
+
".readBlob\";"
);
ObjectArray
schema
=
new
ObjectArray
();
HashSet
objectIdSet
=
new
HashSet
();
HashMap
tableMap
=
new
HashMap
();
resetSchema
();
store
=
FileStore
.
open
(
null
,
fileName
,
"r"
);
long
length
=
store
.
length
();
int
blockSize
=
DiskFile
.
BLOCK_SIZE
;
...
...
@@ -775,22 +914,7 @@ public class Recover extends Tool implements DataHandler {
writeDataError
(
writer
,
"out of memory"
,
s
.
getBytes
(),
blockCount
);
continue
;
}
if
(!
objectIdSet
.
contains
(
ObjectUtils
.
getInteger
(
storageId
)))
{
objectIdSet
.
add
(
ObjectUtils
.
getInteger
(
storageId
));
StringBuffer
sb
=
new
StringBuffer
();
sb
.
append
(
"CREATE TABLE O_"
+
storageId
+
"("
);
for
(
int
i
=
0
;
i
<
recordLength
;
i
++)
{
if
(
i
>
0
)
{
sb
.
append
(
", "
);
}
sb
.
append
(
"C"
);
sb
.
append
(
i
);
sb
.
append
(
" VARCHAR"
);
}
sb
.
append
(
");"
);
writer
.
println
(
sb
.
toString
());
writer
.
flush
();
}
createTemporaryTable
(
writer
);
StringBuffer
sb
=
new
StringBuffer
();
sb
.
append
(
"INSERT INTO O_"
+
storageId
+
" VALUES("
);
for
(
valueId
=
0
;
valueId
<
recordLength
;
valueId
++)
{
...
...
@@ -855,6 +979,25 @@ public class Recover extends Tool implements DataHandler {
}
}
private
void
createTemporaryTable
(
PrintWriter
writer
)
{
if
(!
objectIdSet
.
contains
(
ObjectUtils
.
getInteger
(
storageId
)))
{
objectIdSet
.
add
(
ObjectUtils
.
getInteger
(
storageId
));
StringBuffer
sb
=
new
StringBuffer
();
sb
.
append
(
"CREATE TABLE O_"
+
storageId
+
"("
);
for
(
int
i
=
0
;
i
<
recordLength
;
i
++)
{
if
(
i
>
0
)
{
sb
.
append
(
", "
);
}
sb
.
append
(
"C"
);
sb
.
append
(
i
);
sb
.
append
(
" VARCHAR"
);
}
sb
.
append
(
");"
);
writer
.
println
(
sb
.
toString
());
writer
.
flush
();
}
}
private
String
extractTableOrViewName
(
String
sql
)
{
int
indexTable
=
sql
.
indexOf
(
" TABLE "
);
int
indexView
=
sql
.
indexOf
(
" VIEW "
);
...
...
h2/src/test/org/h2/test/TestAll.java
浏览文件 @
59ba9829
...
...
@@ -282,9 +282,6 @@ java org.h2.test.TestAll timer
/*
document: sequences and auto-increment number
that were used once are never re-used
JCR: for each node type, create a table; one 'dynamic' table with parameter;
option to cache the results
<link rel="icon" type="image/png" href="/path/image.png">
...
...
h2/src/test/org/h2/test/TestBase.java
浏览文件 @
59ba9829
...
...
@@ -1037,6 +1037,7 @@ public abstract class TestBase {
*/
public
static
TestBase
createCaller
()
{
String
className
=
new
Exception
().
getStackTrace
()[
1
].
getClassName
();
org
.
h2
.
Driver
.
load
();
try
{
return
(
TestBase
)
Class
.
forName
(
className
).
newInstance
();
}
catch
(
Exception
e
)
{
...
...
h2/src/test/org/h2/test/db/TestOpenClose.java
浏览文件 @
59ba9829
...
...
@@ -23,7 +23,16 @@ import org.h2.util.FileUtils;
*/
public
class
TestOpenClose
extends
TestBase
implements
DatabaseEventListener
{
int
nextId
=
10
;
private
int
nextId
=
10
;
/**
* Run just this test.
*
* @param a ignored
*/
public
static
void
main
(
String
[]
a
)
throws
Exception
{
TestBase
.
createCaller
().
init
().
test
();
}
public
void
test
()
throws
Exception
{
testCloseDelay
();
...
...
@@ -35,6 +44,7 @@ public class TestOpenClose extends TestBase implements DatabaseEventListener {
}
private
void
testCloseDelay
()
throws
Exception
{
deleteDb
(
baseDir
,
"openClose"
);
String
url
=
getURL
(
"openClose;DB_CLOSE_DELAY=1"
,
true
);
String
user
=
getUser
(),
password
=
getPassword
();
Connection
conn
=
DriverManager
.
getConnection
(
url
,
user
,
password
);
...
...
h2/src/test/org/h2/test/unit/TestPageStore.java
浏览文件 @
59ba9829
...
...
@@ -99,7 +99,7 @@ public class TestPageStore extends TestBase {
if
(
file
)
{
out
=
new
BufferedOutputStream
(
new
FileOutputStream
(
f
),
4
*
1024
);
}
else
{
out
=
new
PageOutputStream
(
store
,
0
,
head
,
Page
.
TYPE_LOG
);
out
=
new
PageOutputStream
(
store
,
0
,
head
,
Page
.
TYPE_LOG
,
false
);
}
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
out
.
write
(
buff
);
...
...
@@ -139,7 +139,7 @@ public class TestPageStore extends TestBase {
byte
[]
data
=
new
byte
[
len
];
random
.
nextBytes
(
data
);
int
head
=
store
.
allocatePage
();
PageOutputStream
out
=
new
PageOutputStream
(
store
,
0
,
head
,
Page
.
TYPE_LOG
);
PageOutputStream
out
=
new
PageOutputStream
(
store
,
0
,
head
,
Page
.
TYPE_LOG
,
false
);
for
(
int
p
=
0
;
p
<
len
;)
{
int
l
=
len
==
0
?
0
:
Math
.
min
(
len
-
p
,
random
.
nextInt
(
len
/
10
));
out
.
write
(
data
,
p
,
l
);
...
...
h2/src/tools/org/h2/build/doc/dictionary.txt
浏览文件 @
59ba9829
...
...
@@ -578,4 +578,4 @@ connecturl problematic transformation lazy querydsl squill empire liq fle
xive evolving mssqlserver eric respond faulhaber fixing northern lying
federal santa america county clara courts california york venue away stages
titles headers grew orchestration social razor finder ranging friend intervals
bot jot delicious rife appenders circles spelling
bot jot delicious rife appenders circles spelling
cash sky ecm nuxeo poland
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论