Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
为 GitLab 提交贡献
登录/注册
切换导航
H
h2database
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分枝图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
分枝图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
Administrator
h2database
Commits
e920b890
提交
e920b890
authored
12 年前
作者:
Thomas Mueller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
A persistent tree map (work in progress).
上级
6c9eede0
隐藏空白字符变更
内嵌
并排
正在显示
7 个修改的文件
包含
172 行增加
和
67 行删除
+172
-67
TestBtreeMapStore.java
h2/src/test/org/h2/test/store/TestBtreeMapStore.java
+26
-0
TestDataUtils.java
h2/src/test/org/h2/test/store/TestDataUtils.java
+20
-8
BtreeMap.java
h2/src/tools/org/h2/dev/store/btree/BtreeMap.java
+7
-10
BtreeMapStore.java
h2/src/tools/org/h2/dev/store/btree/BtreeMapStore.java
+35
-5
DataUtils.java
h2/src/tools/org/h2/dev/store/btree/DataUtils.java
+40
-9
Dump.java
h2/src/tools/org/h2/dev/store/btree/Dump.java
+21
-23
Page.java
h2/src/tools/org/h2/dev/store/btree/Page.java
+23
-12
没有找到文件。
h2/src/test/org/h2/test/store/TestBtreeMapStore.java
浏览文件 @
e920b890
...
...
@@ -29,6 +29,7 @@ public class TestBtreeMapStore extends TestBase {
}
public
void
test
()
{
testFastDelete
();
testRollbackInMemory
();
testRollbackStored
();
testMeta
();
...
...
@@ -42,6 +43,31 @@ public class TestBtreeMapStore extends TestBase {
testSimple
();
}
private
void
testFastDelete
()
{
String
fileName
=
getBaseDir
()
+
"/testMeta.h3"
;
FileUtils
.
delete
(
fileName
);
BtreeMapStore
s
;
BtreeMap
<
Integer
,
String
>
m
;
s
=
openStore
(
fileName
);
s
.
setMaxPageSize
(
100
);
m
=
s
.
openMap
(
"data"
,
Integer
.
class
,
String
.
class
);
for
(
int
i
=
0
;
i
<
1000
;
i
++)
{
m
.
put
(
i
,
"Hello World"
);
}
s
.
store
();
assertEquals
(
3
,
s
.
getWriteCount
());
s
.
close
();
s
=
openStore
(
fileName
);
m
=
s
.
openMap
(
"data"
,
Integer
.
class
,
String
.
class
);
m
.
clear
();
s
.
store
();
// ensure only nodes are read, but not leaves
assertEquals
(
4
,
s
.
getReadCount
());
assertEquals
(
2
,
s
.
getWriteCount
());
s
.
close
();
}
private
void
testRollbackStored
()
{
String
fileName
=
getBaseDir
()
+
"/testMeta.h3"
;
FileUtils
.
delete
(
fileName
);
...
...
This diff is collapsed.
Click to expand it.
h2/src/test/org/h2/test/store/TestDataUtils.java
浏览文件 @
e920b890
...
...
@@ -44,13 +44,25 @@ public class TestDataUtils extends TestBase {
}
private
void
testPagePos
()
{
for
(
int
chunkId
=
0
;
chunkId
<
67000000
;
chunkId
+=
670000
)
{
for
(
long
offset
=
0
;
offset
<
Integer
.
MAX_VALUE
;
offset
+=
Integer
.
MAX_VALUE
/
100
)
{
for
(
int
length
=
0
;
length
<
2000000
;
length
+=
200000
)
{
long
pos
=
DataUtils
.
getPos
(
chunkId
,
(
int
)
offset
,
length
);
assertEquals
(
chunkId
,
DataUtils
.
getChunkId
(
pos
));
assertEquals
(
offset
,
DataUtils
.
getOffset
(
pos
));
assertTrue
(
DataUtils
.
getMaxLength
(
pos
)
>=
length
);
assertEquals
(
0
,
DataUtils
.
PAGE_TYPE_LEAF
);
assertEquals
(
1
,
DataUtils
.
PAGE_TYPE_NODE
);
for
(
int
i
=
0
;
i
<
67000000
;
i
++)
{
long
pos
=
DataUtils
.
getPagePos
(
i
,
3
,
128
,
1
);
assertEquals
(
i
,
DataUtils
.
getPageChunkId
(
pos
));
assertEquals
(
3
,
DataUtils
.
getPageOffset
(
pos
));
assertEquals
(
128
,
DataUtils
.
getPageMaxLength
(
pos
));
assertEquals
(
1
,
DataUtils
.
getPageType
(
pos
));
}
for
(
int
type
=
0
;
type
<=
1
;
type
++)
{
for
(
int
chunkId
=
0
;
chunkId
<
67000000
;
chunkId
+=
670000
)
{
for
(
long
offset
=
0
;
offset
<
Integer
.
MAX_VALUE
;
offset
+=
Integer
.
MAX_VALUE
/
100
)
{
for
(
int
length
=
0
;
length
<
2000000
;
length
+=
200000
)
{
long
pos
=
DataUtils
.
getPagePos
(
chunkId
,
(
int
)
offset
,
length
,
type
);
assertEquals
(
chunkId
,
DataUtils
.
getPageChunkId
(
pos
));
assertEquals
(
offset
,
DataUtils
.
getPageOffset
(
pos
));
assertTrue
(
DataUtils
.
getPageMaxLength
(
pos
)
>=
length
);
assertTrue
(
DataUtils
.
getPageType
(
pos
)
==
type
);
}
}
}
}
...
...
@@ -75,7 +87,7 @@ public class TestDataUtils extends TestBase {
if
(
code
>
lastCode
)
{
lastCode
=
code
;
}
int
max
=
DataUtils
.
get
MaxLength
(
code
);
int
max
=
DataUtils
.
get
PageMaxLength
(
code
<<
1
);
assertTrue
(
max
>=
i
&&
max
>=
32
);
}
}
...
...
This diff is collapsed.
Click to expand it.
h2/src/tools/org/h2/dev/store/btree/BtreeMap.java
浏览文件 @
e920b890
...
...
@@ -95,7 +95,7 @@ public class BtreeMap<K, V> {
}
/**
* Remove all entries, and
remove the map. The map becomes invalid
.
* Remove all entries, and
close the map
.
*/
public
void
remove
()
{
checkWrite
();
...
...
@@ -103,10 +103,14 @@ public class BtreeMap<K, V> {
root
.
removeAllRecursive
();
}
store
.
removeMap
(
id
);
close
();
}
public
void
close
()
{
readOnly
=
true
;
store
=
null
;
oldRoots
.
clear
();
root
=
null
;
store
=
null
;
readOnly
=
true
;
}
public
boolean
isClosed
()
{
...
...
@@ -278,13 +282,6 @@ public class BtreeMap<K, V> {
return
buff
.
toString
();
}
public
void
close
()
{
readOnly
=
true
;
store
=
null
;
oldRoots
.
clear
();
root
=
null
;
}
public
int
hashCode
()
{
return
id
;
}
...
...
This diff is collapsed.
Click to expand it.
h2/src/tools/org/h2/dev/store/btree/BtreeMapStore.java
浏览文件 @
e920b890
...
...
@@ -38,7 +38,6 @@ header:
blockSize=4096
TODO:
- keep page type (leaf/node) in pos to speed up large deletes
- support fast range deletes
- support custom pager for r-tree, kd-tree
- need an 'end of chunk' marker to verify all data is written
...
...
@@ -57,6 +56,7 @@ TODO:
- file header could be a regular chunk, end of file the second
- possibly split chunk data into immutable and mutable
- reduce minimum chunk size, speed up very small transactions
- defragment: use total max length instead of page count (liveCount)
*/
...
...
@@ -107,6 +107,8 @@ public class BtreeMapStore {
private
Compressor
compressor
=
new
CompressLZF
();
private
long
currentVersion
;
private
int
readCount
;
private
int
writeCount
;
private
BtreeMapStore
(
String
fileName
,
DataTypeFactory
typeFactory
)
{
this
.
fileName
=
fileName
;
...
...
@@ -310,6 +312,7 @@ public class BtreeMapStore {
"rootChunk:"
+
rootChunkStart
+
"\n"
+
"lastMapId:"
+
lastMapId
+
"\n"
+
"version:"
+
currentVersion
+
"\n"
).
getBytes
(
"UTF-8"
));
writeCount
++;
file
.
position
(
0
);
file
.
write
(
header
);
file
.
position
(
blockSize
);
...
...
@@ -321,8 +324,9 @@ public class BtreeMapStore {
private
void
readHeader
()
{
try
{
file
.
position
(
0
);
byte
[]
header
=
new
byte
[
blockSize
];
readCount
++;
file
.
position
(
0
);
// TODO read fully; read both headers
file
.
read
(
ByteBuffer
.
wrap
(
header
));
Properties
prop
=
new
Properties
();
...
...
@@ -365,16 +369,16 @@ public class BtreeMapStore {
}
private
Chunk
getChunk
(
long
pos
)
{
return
chunks
.
get
(
DataUtils
.
getChunkId
(
pos
));
return
chunks
.
get
(
DataUtils
.
get
Page
ChunkId
(
pos
));
}
private
long
getFilePosition
(
long
pos
)
{
Chunk
c
=
getChunk
(
pos
);
if
(
c
==
null
)
{
throw
new
RuntimeException
(
"Chunk "
+
DataUtils
.
getChunkId
(
pos
)
+
" not found"
);
throw
new
RuntimeException
(
"Chunk "
+
DataUtils
.
get
Page
ChunkId
(
pos
)
+
" not found"
);
}
long
filePos
=
c
.
start
;
filePos
+=
DataUtils
.
getOffset
(
pos
);
filePos
+=
DataUtils
.
get
Page
Offset
(
pos
);
return
filePos
;
}
...
...
@@ -467,6 +471,7 @@ public class BtreeMapStore {
buff
.
putLong
(
meta
.
getRoot
().
getPos
());
buff
.
rewind
();
try
{
writeCount
++;
file
.
position
(
filePos
);
file
.
write
(
buff
);
}
catch
(
IOException
e
)
{
...
...
@@ -575,6 +580,7 @@ public class BtreeMapStore {
private
Chunk
readChunkHeader
(
long
start
)
{
try
{
readCount
++;
file
.
position
(
start
);
ByteBuffer
buff
=
ByteBuffer
.
wrap
(
new
byte
[
32
]);
DataUtils
.
readFully
(
file
,
buff
);
...
...
@@ -724,6 +730,7 @@ public class BtreeMapStore {
Page
p
=
cache
.
get
(
pos
);
if
(
p
==
null
)
{
long
filePos
=
getFilePosition
(
pos
);
readCount
++;
p
=
Page
.
read
(
file
,
map
,
filePos
,
pos
);
cache
.
put
(
pos
,
p
);
}
...
...
@@ -775,6 +782,11 @@ public class BtreeMapStore {
this
.
maxPageSize
=
maxPageSize
;
}
/**
* The maximum number of key-value pairs in a page.
*
* @return the maximum number of entries
*/
int
getMaxPageSize
()
{
return
maxPageSize
;
}
...
...
@@ -914,4 +926,22 @@ public class BtreeMapStore {
return
currentVersion
;
}
/**
* Get the number of write operations since this store was opened.
*
* @return the number of write operations
*/
public
int
getWriteCount
()
{
return
writeCount
;
}
/**
* Get the number of read operations since this store was opened.
*
* @return the number of read operations
*/
public
int
getReadCount
()
{
return
readCount
;
}
}
This diff is collapsed.
Click to expand it.
h2/src/tools/org/h2/dev/store/btree/DataUtils.java
浏览文件 @
e920b890
...
...
@@ -15,6 +15,21 @@ import java.nio.channels.FileChannel;
*/
public
class
DataUtils
{
/**
* The type for leaf page.
*/
public
static
final
int
PAGE_TYPE_LEAF
=
0
;
/**
* The type for node page.
*/
public
static
final
int
PAGE_TYPE_NODE
=
1
;
/**
* The bit mask for compressed pages.
*/
public
static
final
int
PAGE_COMPRESSED
=
2
;
/**
* The maximum length of a variable size int.
*/
...
...
@@ -211,8 +226,8 @@ public class DataUtils {
* @param pos the position
* @return the chunk id
*/
public
static
int
getChunkId
(
long
pos
)
{
return
(
int
)
(
pos
>>>
3
7
);
public
static
int
get
Page
ChunkId
(
long
pos
)
{
return
(
int
)
(
pos
>>>
3
8
);
}
/**
...
...
@@ -222,8 +237,8 @@ public class DataUtils {
* @param pos the position
* @return the maximum length
*/
public
static
int
getMaxLength
(
long
pos
)
{
int
code
=
(
int
)
(
pos
&
31
);
public
static
int
get
Page
MaxLength
(
long
pos
)
{
int
code
=
(
int
)
(
(
pos
>>
1
)
&
31
);
if
(
code
==
31
)
{
return
Integer
.
MAX_VALUE
;
}
...
...
@@ -236,21 +251,37 @@ public class DataUtils {
* @param pos the position
* @return the offset
*/
public
static
int
getOffset
(
long
pos
)
{
return
(
int
)
(
pos
>>
5
);
public
static
int
getPageOffset
(
long
pos
)
{
return
(
int
)
(
pos
>>
6
);
}
/**
* Get the page type from the position.
*
* @param pos the position
* @return the page type (PAGE_TYPE_NODE or PAGE_TYPE_LEAF)
*/
public
static
int
getPageType
(
long
pos
)
{
return
((
int
)
pos
)
&
1
;
}
/**
* Get the position of this page. The following information is encoded in
* the position: the chunk id, the offset, and the maximum length.
* the position: the chunk id, the offset, the maximum length, and the type
* (node or leaf).
*
* @param chunkId the chunk id
* @param offset the offset
* @param length the length
* @param type the page type (1 for node, 0 for leaf)
* @return the position
*/
public
static
long
getPos
(
int
chunkId
,
int
offset
,
int
length
)
{
return
((
long
)
chunkId
<<
37
)
|
((
long
)
offset
<<
5
)
|
encodeLength
(
length
);
public
static
long
getPagePos
(
int
chunkId
,
int
offset
,
int
length
,
int
type
)
{
long
pos
=
(
long
)
chunkId
<<
38
;
pos
|=
(
long
)
offset
<<
6
;
pos
|=
encodeLength
(
length
)
<<
1
;
pos
|=
type
;
return
pos
;
}
/**
...
...
This diff is collapsed.
Click to expand it.
h2/src/tools/org/h2/dev/store/btree/Dump.java
浏览文件 @
e920b890
...
...
@@ -12,7 +12,6 @@ import java.io.PrintWriter;
import
java.io.StringReader
;
import
java.nio.ByteBuffer
;
import
java.nio.channels.FileChannel
;
import
java.util.Arrays
;
import
java.util.Properties
;
import
org.h2.store.fs.FilePath
;
import
org.h2.store.fs.FileUtils
;
...
...
@@ -68,7 +67,7 @@ public class Dump {
writer
.
println
(
"file "
+
fileName
);
writer
.
println
(
" length "
+
fileLength
);
writer
.
println
(
" "
+
prop
);
ByteBuffer
block
=
ByteBuffer
.
wrap
(
new
byte
[
16
]);
ByteBuffer
block
=
ByteBuffer
.
wrap
(
new
byte
[
32
]);
for
(
long
pos
=
0
;
pos
<
fileLength
;)
{
file
.
position
(
pos
);
block
.
rewind
();
...
...
@@ -78,34 +77,33 @@ public class Dump {
pos
+=
blockSize
;
continue
;
}
int
l
ength
=
block
.
getInt
();
int
chunkL
ength
=
block
.
getInt
();
int
chunkId
=
block
.
getInt
();
int
metaRootOffset
=
block
.
getInt
();
long
metaRootPos
=
block
.
getLong
();
writer
.
println
(
" chunk "
+
chunkId
+
" at "
+
pos
+
" length "
+
length
+
" offset "
+
metaRootOffset
);
ByteBuffer
chunk
=
ByteBuffer
.
allocate
(
l
ength
);
" length "
+
chunkLength
+
" root "
+
metaRootPos
);
ByteBuffer
chunk
=
ByteBuffer
.
allocate
(
chunkL
ength
);
file
.
position
(
pos
);
FileUtils
.
readFully
(
file
,
chunk
);
int
p
=
block
.
position
();
pos
=
(
pos
+
l
ength
+
blockSize
)
/
blockSize
*
blockSize
;
l
ength
-=
p
;
while
(
l
ength
>
0
)
{
pos
=
(
pos
+
chunkL
ength
+
blockSize
)
/
blockSize
*
blockSize
;
chunkL
ength
-=
p
;
while
(
chunkL
ength
>
0
)
{
chunk
.
position
(
p
);
int
len
=
chunk
.
getInt
();
long
mapId
=
chunk
.
getLong
();
int
pageLength
=
chunk
.
getInt
();
// check value (ignored)
chunk
.
getShort
();
long
mapId
=
DataUtils
.
readVarInt
(
chunk
);
int
len
=
DataUtils
.
readVarInt
(
chunk
);
int
type
=
chunk
.
get
();
int
count
=
DataUtils
.
readVarInt
(
chunk
);
if
(
type
==
1
)
{
long
[]
children
=
new
long
[
count
];
for
(
int
i
=
0
;
i
<
count
;
i
++)
{
children
[
i
]
=
chunk
.
getLong
();
}
writer
.
println
(
" map "
+
mapId
+
" at "
+
p
+
" node, "
+
count
+
" children: "
+
Arrays
.
toString
(
children
));
}
else
{
writer
.
println
(
" map "
+
mapId
+
" at "
+
p
+
" leaf, "
+
count
+
" rows"
);
}
p
+=
len
;
length
-=
len
;
boolean
compressed
=
(
type
&
2
)
!=
0
;
boolean
node
=
(
type
&
1
)
!=
0
;
writer
.
println
(
" map "
+
mapId
+
" at "
+
p
+
" "
+
(
node
?
"node"
:
"leaf"
)
+
" "
+
(
compressed
?
"compressed"
:
""
)
+
" "
+
"len: "
+
pageLength
+
" entries: "
+
len
);
p
+=
pageLength
;
chunkLength
-=
pageLength
;
}
}
}
catch
(
IOException
e
)
{
...
...
This diff is collapsed.
Click to expand it.
h2/src/tools/org/h2/dev/store/btree/Page.java
浏览文件 @
e920b890
...
...
@@ -21,6 +21,7 @@ import org.h2.compress.Compressor;
* File format:
* page length (including length): int
* check value: short
* map id: varInt
* number of keys: varInt
* type: byte (0: leaf, 1: node; +2: compressed)
* compressed: bytes saved (varInt)
...
...
@@ -71,7 +72,7 @@ public class Page {
* @return the page
*/
static
Page
read
(
FileChannel
file
,
BtreeMap
<?,
?>
map
,
long
filePos
,
long
pos
)
{
int
maxLength
=
DataUtils
.
getMaxLength
(
pos
),
length
=
maxLength
;
int
maxLength
=
DataUtils
.
get
Page
MaxLength
(
pos
),
length
=
maxLength
;
ByteBuffer
buff
;
try
{
file
.
position
(
filePos
);
...
...
@@ -88,8 +89,8 @@ public class Page {
}
Page
p
=
new
Page
(
map
,
0
);
p
.
pos
=
pos
;
int
chunkId
=
DataUtils
.
getChunkId
(
pos
);
int
offset
=
DataUtils
.
getOffset
(
pos
);
int
chunkId
=
DataUtils
.
get
Page
ChunkId
(
pos
);
int
offset
=
DataUtils
.
get
Page
Offset
(
pos
);
p
.
read
(
buff
,
chunkId
,
offset
,
maxLength
);
return
p
;
}
...
...
@@ -427,7 +428,12 @@ public class Page {
void
removeAllRecursive
()
{
if
(
children
!=
null
)
{
for
(
long
c
:
children
)
{
map
.
readPage
(
c
).
removeAllRecursive
();
int
type
=
DataUtils
.
getPageType
(
c
);
if
(
type
==
DataUtils
.
PAGE_TYPE_LEAF
)
{
getStore
().
removePage
(
c
);
}
else
{
map
.
readPage
(
c
).
removeAllRecursive
();
}
}
}
getStore
().
removePage
(
pos
);
...
...
@@ -524,6 +530,10 @@ public class Page {
throw
new
RuntimeException
(
"Length too large, expected =< "
+
maxLength
+
" got "
+
pageLength
);
}
short
check
=
buff
.
getShort
();
int
mapId
=
DataUtils
.
readVarInt
(
buff
);
if
(
mapId
!=
map
.
getId
())
{
throw
new
RuntimeException
(
"Error reading page, expected map "
+
map
.
getId
()
+
" got "
+
mapId
);
}
int
len
=
DataUtils
.
readVarInt
(
buff
);
int
checkTest
=
DataUtils
.
getCheckValue
(
chunkId
)
^
DataUtils
.
getCheckValue
(
map
.
getId
())
^
...
...
@@ -535,8 +545,8 @@ public class Page {
}
keys
=
new
Object
[
len
];
int
type
=
buff
.
get
();
boolean
node
=
(
type
&
1
)
!=
0
;
boolean
compressed
=
(
type
&
2
)
!=
0
;
boolean
node
=
(
type
&
1
)
==
DataUtils
.
PAGE_TYPE_NODE
;
boolean
compressed
=
(
type
&
DataUtils
.
PAGE_COMPRESSED
)
!=
0
;
if
(
compressed
)
{
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
int
lenAdd
=
DataUtils
.
readVarInt
(
buff
);
...
...
@@ -573,16 +583,17 @@ public class Page {
int
start
=
buff
.
position
();
buff
.
putInt
(
0
);
buff
.
putShort
((
byte
)
0
);
DataUtils
.
writeVarInt
(
buff
,
map
.
getId
());
int
len
=
keys
.
length
;
DataUtils
.
writeVarInt
(
buff
,
len
);
Compressor
compressor
=
map
.
getStore
().
getCompressor
();
int
type
=
children
!=
null
?
1
:
0
;
int
type
=
children
!=
null
?
DataUtils
.
PAGE_TYPE_NODE
:
DataUtils
.
PAGE_TYPE_LEAF
;
buff
.
put
((
byte
)
type
);
int
compressStart
=
buff
.
position
();
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
map
.
getKeyType
().
write
(
buff
,
keys
[
i
]);
}
if
(
type
==
1
)
{
if
(
type
==
DataUtils
.
PAGE_TYPE_NODE
)
{
for
(
int
i
=
0
;
i
<
len
+
1
;
i
++)
{
buff
.
putLong
(
children
[
i
]);
}
...
...
@@ -600,7 +611,7 @@ public class Page {
int
compLen
=
compressor
.
compress
(
exp
,
exp
.
length
,
comp
,
0
);
if
(
compLen
+
DataUtils
.
getVarIntLen
(
compLen
-
expLen
)
<
expLen
)
{
buff
.
position
(
compressStart
-
1
);
buff
.
put
((
byte
)
(
type
+
2
));
buff
.
put
((
byte
)
(
type
+
DataUtils
.
PAGE_COMPRESSED
));
DataUtils
.
writeVarInt
(
buff
,
expLen
-
compLen
);
buff
.
put
(
comp
,
0
,
compLen
);
}
...
...
@@ -614,7 +625,7 @@ public class Page {
DataUtils
.
getCheckValue
(
pageLength
)
^
DataUtils
.
getCheckValue
(
len
);
buff
.
putShort
(
start
+
4
,
(
short
)
check
);
this
.
pos
=
DataUtils
.
getP
os
(
chunkId
,
start
,
pageLength
);
this
.
pos
=
DataUtils
.
getP
agePos
(
chunkId
,
start
,
pageLength
,
type
);
}
/**
...
...
@@ -623,8 +634,8 @@ public class Page {
* @return the next page id
*/
int
getMaxLengthTempRecursive
()
{
// length, check, key length, type
int
maxLength
=
4
+
2
+
DataUtils
.
MAX_VAR_INT_LEN
+
1
;
// length, check,
map id,
key length, type
int
maxLength
=
4
+
2
+
DataUtils
.
MAX_VAR_INT_LEN
+
DataUtils
.
MAX_VAR_INT_LEN
+
1
;
int
len
=
keys
.
length
;
for
(
int
i
=
0
;
i
<
len
;
i
++)
{
maxLength
+=
map
.
getKeyType
().
getMaxLength
(
keys
[
i
]);
...
...
This diff is collapsed.
Click to expand it.
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论