提交 2705a8c5 authored 作者: Thomas Mueller's avatar Thomas Mueller

Documentation / formatting

上级 1c51be2e
......@@ -43,7 +43,7 @@ Change Log
</li><li>Lucene 2 is no longer supported.
</li><li>Fix bug in calculating default MIN and MAX values for SEQUENCE.
</li><li>Fix bug in performing IN queries with multiple values when IGNORECASE=TRUE
</li><li>Add entrypoint to org.h2.tools.Shell so it can be called from inside an application.
</li><li>Add entry-point to org.h2.tools.Shell so it can be called from inside an application.
patch by Thomas Gillet.
</li><li>Fix bug that prevented the PgServer from being stopped and started multiple times.
</li></ul>
......
......@@ -478,11 +478,11 @@ it is recommended to use it together with the MVCC mode
<h2 id="fileFormat">File Format</h2>
<p>
The data is stored in one file.
The file contains two file headers (for safety), and a number of chunks.
The data is stored in one file.
The file contains two file headers (for safety), and a number of chunks.
The file headers are one block each; a block is 4096 bytes.
Each chunk is at least one block, but typically 200 blocks or more.
Data is stored in the chunks in the form of a
Data is stored in the chunks in the form of a
<a href="https://en.wikipedia.org/wiki/Log-structured_file_system">log structured storage</a>.
There is one chunk for every version.
</p>
......@@ -495,12 +495,12 @@ As an example, the following code:
</p>
<pre>
MVStore s = MVStore.open(fileName);
MVMap<Integer, String> map = s.openMap("data");
for (int i = 0; i < 400; i++) {
MVMap&lt;Integer, String&gt; map = s.openMap("data");
for (int i = 0; i &lt; 400; i++) {
map.put(i, "Hello");
}
s.commit();
for (int i = 0; i < 100; i++) {
for (int i = 0; i &lt; 100; i++) {
map.put(0, "Hi");
}
s.commit();
......@@ -522,7 +522,7 @@ will result in the following two chunks (excluding metadata):
</p>
<p>
That means each chunk contains the changes of one version:
the new version of the changed pages and the parent pages, recursively, up to the root page.
the new version of the changed pages and the parent pages, recursively, up to the root page.
Pages in subsequent chunks refer to pages in earlier chunks.
</p>
......
......@@ -24,12 +24,12 @@ public interface Aggregate {
void init(Connection conn) throws SQLException;
/**
* This method must return the H2 data type, {@link org.h2.value.Value},
* of the aggregate function, given the H2 data type of the input data.
* This method must return the H2 data type, {@link org.h2.value.Value},
* of the aggregate function, given the H2 data type of the input data.
* The method should check here if the number of parameters
* passed is correct, and if not it should throw an exception.
*
* @param inputTypes the H2 data type of the parameters,
* @param inputTypes the H2 data type of the parameters,
* @return the H2 data type of the result
* @throws SQLException if the number/type of parameters passed is incorrect
*/
......
......@@ -16,6 +16,7 @@ import java.sql.SQLException;
* Please note this interface only has limited support for data types.
* If you need data types that don't have a corresponding SQL type
* (for example GEOMETRY), then use the {@link Aggregate} interface.
* </p>
*/
public interface AggregateFunction {
......
......@@ -137,10 +137,10 @@ public class Bnf {
rule.setLinks(ruleMap);
rule.accept(visitor);
}
/**
* Check whether the statement starts with a whitespace.
*
*
* @param s the statement
* @return if the statement is not empty and starts with a whitespace
*/
......
......@@ -530,7 +530,7 @@ public class Session extends SessionWithState {
throw DbException.get(ErrorCode.COMMIT_ROLLBACK_NOT_ALLOWED);
}
}
private void endTransaction() {
if (unlinkLobMap != null && unlinkLobMap.size() > 0) {
// need to flush the transaction log, because we can't unlink lobs if the
......
......@@ -91,7 +91,8 @@ public class UserAggregate extends DbObjectBase {
}
/**
* Wrap {@link AggregateFunction} in order to behave as {@link org.h2.api.Aggregate}
* Wrap {@link AggregateFunction} in order to behave as
* {@link org.h2.api.Aggregate}
**/
private static class AggregateWrapper implements Aggregate {
private final AggregateFunction aggregateFunction;
......
......@@ -982,6 +982,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
* Remove the given page (make the space available).
*
* @param pos the position of the page to remove
* @param memory the number of bytes used for this page
*/
protected void removePage(long pos, int memory) {
store.removePage(this, pos, memory);
......
......@@ -49,8 +49,6 @@ TransactionStore:
MVStore:
- console auto-complete: only tab to complete; remove newlines before autocomplete?
- maybe change the length code to have lower gaps
- improve memory calculation for transient and cache
......@@ -1617,7 +1615,7 @@ public class MVStore {
// to support reading old versions and rollback
if (pos == 0) {
// the value could be smaller than 0 because
// in some cases a page is allocated,
// in some cases a page is allocated,
// but never stored
int count = 1 + memory / pageSplitSize;
unsavedPageCount = Math.max(0, unsavedPageCount - count);
......@@ -1806,7 +1804,7 @@ public class MVStore {
/**
* Increment the number of unsaved pages.
*
*
* @param memory the memory usage of the page
*/
void registerUnsavedPage(int memory) {
......
......@@ -74,7 +74,7 @@ public class MVStoreTool {
block.rewind();
int headerType = block.get();
if (headerType == 'H') {
pw.printf("%0" + len + "x fileHeader %s%n",
pw.printf("%0" + len + "x fileHeader %s%n",
pos,
new String(block.array(), DataUtils.LATIN).trim());
pos += blockSize;
......@@ -104,8 +104,8 @@ public class MVStoreTool {
boolean compressed = (type & 2) != 0;
boolean node = (type & 1) != 0;
pw.printf(
"+%0" + len + "x %s, map %x, %d entries, %d bytes%n",
p,
"+%0" + len + "x %s, map %x, %d entries, %d bytes%n",
p,
(node ? "node" : "leaf") +
(compressed ? " compressed" : ""),
mapId,
......@@ -129,7 +129,7 @@ public class MVStoreTool {
long s = DataUtils.readVarLong(chunk);
counts[i] = s;
}
}
}
if (mapId == 0) {
for (int i = 0; i < entries; i++) {
String k = StringDataType.INSTANCE.read(chunk);
......@@ -142,14 +142,14 @@ public class MVStoreTool {
pw.printf(" %d children < %s @ chunk %x +%0" + len + "x%n",
counts[i],
keys[i],
DataUtils.getPageChunkId(cp),
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
}
long cp = children[entries];
pw.printf(" %d children >= %s @ chunk %x +%0" + len + "x%n",
counts[entries],
keys[entries],
DataUtils.getPageChunkId(cp),
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
} else {
// meta map leaf
......@@ -168,18 +168,18 @@ public class MVStoreTool {
long cp = children[i];
pw.printf(" %d children @ chunk %x +%0" + len + "x%n",
counts[i],
DataUtils.getPageChunkId(cp),
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
}
}
}
}
}
int footerPos = chunk.limit() - Chunk.FOOTER_LENGTH;
chunk.position(footerPos);
pw.printf(
"+%0" + len + "x chunkFooter %s%n",
footerPos,
new String(chunk.array(), chunk.position(),
"+%0" + len + "x chunkFooter %s%n",
footerPos,
new String(chunk.array(), chunk.position(),
Chunk.FOOTER_LENGTH, DataUtils.LATIN).trim());
}
pw.printf("%n%0" + len + "x eof%n", fileSize);
......
......@@ -96,12 +96,6 @@ public class LobStorageMap implements LobStorageInterface {
dataMap = mvStore.openMap("lobData",
new MVMapConcurrent.Builder<Long, byte[]>());
streamStore = new StreamStore(dataMap);
;; int todo; // test and then remove
// TODO currently needed to avoid out of memory,
// because memory usage is only measure in number of pages currently
// streamStore.setMaxBlockSize(32 * 1024);
}
@Override
......
......@@ -163,7 +163,7 @@ public class Shell extends Tool implements Runnable {
}
}
}
/**
* Run the shell tool with the given connection and command line settings.
* The connection will be closed when the shell exits.
......@@ -171,6 +171,7 @@ public class Shell extends Tool implements Runnable {
* <p>
* Note: using the "-url" option in {@code args} doesn't make much sense
* since it will override the {@code conn} parameter.
* </p>
*
* @param conn the connection
* @param args the command line settings
......
......@@ -111,7 +111,7 @@ public class TestLob extends TestBase {
deleteDb("lob");
FileUtils.deleteRecursive(TEMP_DIR, true);
}
private void testCleaningUpLobsOnRollback() throws Exception {
if (config.mvStore) {
return;
......@@ -132,7 +132,7 @@ public class TestLob extends TestBase {
assertEquals(0, rs.getInt(1));
conn.close();
}
private void testReadManyLobs() throws Exception {
deleteDb("lob");
Connection conn;
......
......@@ -645,8 +645,8 @@ public class TestSpatial extends TestBase {
rs.addRow(factory.createPoint(new Coordinate(x, y)));
return rs;
}
public void testAggregateWithGeometry() throws SQLException {
private void testAggregateWithGeometry() throws SQLException {
deleteDb("spatialIndex");
Connection conn = getConnection("spatialIndex");
try {
......@@ -681,7 +681,7 @@ public class TestSpatial extends TestBase {
}
return Value.GEOMETRY;
}
@Override
public void init(Connection conn) throws SQLException {
tableEnvelope = null;
......@@ -702,6 +702,6 @@ public class TestSpatial extends TestBase {
public Object getResult() throws SQLException {
return new GeometryFactory().toGeometry(tableEnvelope);
}
}
}
}
......@@ -102,7 +102,7 @@ public class TestMVStore extends TestBase {
// longer running tests
testLargerThan2G();
}
private void testFileFormatExample() {
String fileName = getBaseDir() + "/testFileFormatExample.h3";
MVStore s = MVStore.open(fileName);
......
......@@ -53,7 +53,7 @@ public class TestStreamStore extends TestBase {
testWithFullMap();
testLoop();
}
private void testSaveCount() throws IOException {
String fileName = getBaseDir() + "/testSaveCount.h3";
FileUtils.delete(fileName);
......
......@@ -299,7 +299,7 @@ class FileUnstable extends FileBase {
public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException {
return channel.tryLock(position, size, shared);
}
@Override
public String toString() {
return "unstable:" + file.toString();
......
......@@ -752,4 +752,4 @@ maginatics jdbclint lint lsm unmappable adams douglas definer invoker
fmrn fmxxx fmday fml syyyy tzd nov iyy iyyy fmc fmb fmxx tzr btc yyfxyy scc syear
overwrote though randomize readability datagram rsync mongodb divides crypto
predicted prediction wojtek hops jurczyk cbtree predict vast assumption upside
adjusted lastly sgtatham
adjusted lastly sgtatham cleaning gillet prevented
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论