提交 c5d33cf2 authored 作者: Thomas Mueller's avatar Thomas Mueller

Formatting / javadocs

上级 a6c34373
......@@ -2713,7 +2713,7 @@ public class Database implements DataHandler {
public byte[] getFileEncryptionKey() {
return fileEncryptionKey;
}
public int getPageSize() {
return pageSize;
}
......
......@@ -125,7 +125,7 @@ public class MVMap<K, V> extends AbstractMap<K, V>
/**
* Add or replace a key-value pair in a branch.
*
*
* @param root the root page
* @param key the key (may not be null)
* @param value the value (may not be null)
......
......@@ -168,9 +168,9 @@ public class MVStore {
private CacheLongKeyLIRS<Page> cache;
/**
* The page chunk references cache. The default size is 4 MB, and the average size is 2 KB.
* It is split in 16 segments. The stack move distance is 2% of the expected
* number of entries.
* The page chunk references cache. The default size is 4 MB, and the
* average size is 2 KB. It is split in 16 segments. The stack move distance
* is 2% of the expected number of entries.
*/
private CacheLongKeyLIRS<PageChildren> cacheChunkRef;
......@@ -1201,7 +1201,7 @@ public class MVStore {
}
}
}
private Set<Integer> collectReferencedChunks() {
long testVersion = lastChunk.version;
DataUtils.checkArgument(testVersion > 0, "Collect references on version 0");
......@@ -1224,7 +1224,7 @@ public class MVStore {
readCount = fileStore.readCount - readCount;
return referenced;
}
private int collectReferencedChunks(Set<Integer> targetChunkSet, int mapId, long pos) {
targetChunkSet.add(DataUtils.getPageChunkId(pos));
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
......@@ -1240,7 +1240,7 @@ public class MVStore {
return count;
}
PageChildren readPageChunkReferences(int mapId, long pos, int parentChunk) {
private PageChildren readPageChunkReferences(int mapId, long pos, int parentChunk) {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
return null;
}
......@@ -1323,7 +1323,7 @@ public class MVStore {
* Apply the freed space to the chunk metadata. The metadata is updated, but
* completely free chunks are not removed from the set of chunks, and the
* disk space is not yet marked as free.
*
*
* @param storeVersion apply up to the given version
*/
private Set<Chunk> applyFreedSpace(long storeVersion) {
......@@ -1676,7 +1676,7 @@ public class MVStore {
// calculate the fill rate
long maxLengthSum = 0;
long maxLengthLiveSum = 0;
long time = getTime();
for (Chunk c : chunks.values()) {
......
......@@ -86,6 +86,7 @@ public class MVStoreTool {
*
* @param fileName the name of the file
* @param writer the print writer
* @param details print the page details
*/
public static void dump(String fileName, Writer writer, boolean details) {
PrintWriter pw = new PrintWriter(writer, true);
......@@ -97,7 +98,7 @@ public class MVStoreTool {
pw.printf("File %s, %d bytes, %d MB\n", fileName, size, size / 1024 / 1024);
FileChannel file = null;
int blockSize = MVStore.BLOCK_SIZE;
TreeMap<Integer, Long> mapSizesTotal =
TreeMap<Integer, Long> mapSizesTotal =
new TreeMap<Integer, Long>();
long pageSizeTotal = 0;
try {
......@@ -139,7 +140,7 @@ public class MVStoreTool {
pos += length;
int remaining = c.pageCount;
pageCount += c.pageCount;
TreeMap<Integer, Integer> mapSizes =
TreeMap<Integer, Integer> mapSizes =
new TreeMap<Integer, Integer>();
int pageSizeSum = 0;
while (remaining > 0) {
......@@ -269,7 +270,7 @@ public class MVStoreTool {
}
pw.printf("%n%0" + len + "x eof%n", fileSize);
pw.printf("\n");
pw.printf("page size total: %d bytes, page count: %d, average page size: %d bytes\n",
pw.printf("page size total: %d bytes, page count: %d, average page size: %d bytes\n",
pageSizeTotal, pageCount, pageSizeTotal / pageCount);
for (Integer mapId : mapSizesTotal.keySet()) {
int percent = (int) (100 * mapSizesTotal.get(mapId) / pageSizeTotal);
......
......@@ -984,7 +984,7 @@ public class Page {
this.pos = pos;
this.children = children;
}
PageChildren(Page p) {
this.pos = p.getPos();
int count = p.getRawChildPageCount();
......@@ -997,16 +997,16 @@ public class Page {
int getMemory() {
return 64 + 8 * children.length;
}
/**
* Read the page from the buffer.
* Read an inner node page from the buffer, but ignore the keys and
* values.
*
* @param pos the position
* @param buff the buffer
* @param chunkId the chunk id
* @param fileStore the file store
* @param filePos the position in the file
* @param mapId the map id
* @param offset the offset within the chunk
* @param maxLength the maximum length
* @param pos the position
* @return the page children object
*/
static PageChildren read(FileStore fileStore, long filePos, int mapId, long pos) {
ByteBuffer buff;
......@@ -1066,11 +1066,20 @@ public class Page {
}
return new PageChildren(pos, children);
}
/**
* Only keep one reference to the same chunk. Only leaf references are
* removed (references to inner nodes are not removed, as they could
* indirectly point to other chunks).
*/
void removeDuplicateChunkReferences() {
HashSet<Integer> chunks = New.hashSet();
// we don't need references to leaves in the same chunk
chunks.add(DataUtils.getPageChunkId(pos));
// possible space optimization:
// we could remove more children, for example
// we could remove all leaf references to the same chunk
// if there is also a inner node reference to that chunk
for (int i = 0; i < children.length; i++) {
long p = children[i];
if (DataUtils.getPageType(p) == DataUtils.PAGE_TYPE_NODE) {
......
......@@ -50,9 +50,10 @@ public class MVTable extends TableBase {
private final ArrayList<Index> indexes = New.arrayList();
private long lastModificationId;
private volatile Session lockExclusiveSession;
// using a ConcurrentHashMap as a set
private final ConcurrentHashMap<Session, Session> lockSharedSessions = new ConcurrentHashMap<Session, Session>();
private final ConcurrentHashMap<Session, Session> lockSharedSessions =
new ConcurrentHashMap<Session, Session>();
/**
* The queue of sessions waiting to lock the table. It is a FIFO queue to
......
......@@ -254,7 +254,7 @@ public class LocalResult implements ResultInterface, ResultTarget {
public int getRowId() {
return rowId;
}
private void cloneLobs(Value[] values) {
for (int i = 0; i < values.length; i++) {
Value v = values[i];
......
......@@ -327,7 +327,7 @@ public class TestCompatibility extends TestBase {
"comment='Comment Again' ENGINE=InnoDB");
stat.execute("CREATE TABLE TEST2(ID INT) ROW_FORMAT=DYNAMIC");
conn.close();
conn = getConnection("compatibility");
}
......
......@@ -111,7 +111,7 @@ public class TestLob extends TestBase {
deleteDb("lob");
FileUtils.deleteRecursive(TEMP_DIR, true);
}
private void testConcurrentRemoveRead() throws Exception {
deleteDb("lob");
final String url = getURL("lob", true);
......
......@@ -62,7 +62,7 @@ public class TestMultiThread extends TestBase implements Runnable {
testConcurrentInsertUpdateSelect();
testLockModeWithMultiThreaded();
}
private void testConcurrentSchemaChange() throws Exception {
String db = "testConcurrentSchemaChange";
deleteDb(db);
......@@ -94,7 +94,7 @@ public class TestMultiThread extends TestBase implements Runnable {
for (Task t : tasks) {
t.get();
}
conn.close();
conn.close();
}
private void testConcurrentLobAdd() throws Exception {
......
......@@ -277,7 +277,7 @@ public class TestConcurrent extends TestMVStore {
}
}
task.get();
// this will mark old chunks as unused,
// this will mark old chunks as unused,
// but not remove (and overwrite) them yet
s.commit();
// this will remove them, so we end up with
......@@ -287,7 +287,7 @@ public class TestConcurrent extends TestMVStore {
s.commit();
m.put(2, 2);
s.commit();
MVMap<String, String> meta = s.getMetaMap();
int chunkCount = 0;
for (String k : meta.keyList()) {
......
......@@ -1750,7 +1750,7 @@ public class TestMVStore extends TestBase {
}
}
assertTrue(chunkCount1 + ">" + chunkCount2 + ">" + chunkCount3,
assertTrue(chunkCount1 + ">" + chunkCount2 + ">" + chunkCount3,
chunkCount3 < chunkCount1);
for (int i = 0; i < 10 * factor; i++) {
......
......@@ -57,7 +57,7 @@ public class TestIntPerfectHash extends TestBase {
test(i);
}
}
private void testBitArray() {
byte[] data = new byte[0];
BitSet set = new BitSet();
......@@ -76,7 +76,7 @@ public class TestIntPerfectHash extends TestBase {
}
assertTrue(BitArray.countBits(data) == set.cardinality());
}
private int test(int size) {
Random r = new Random(size);
HashSet<Integer> set = new HashSet<Integer>();
......
......@@ -765,7 +765,6 @@ compaction aggressive powerful traversing pietrzak michi karl rewriting conseque
linearly patching perfect hole sip enwiki flooding uniformly recursions happening
permanently nucleus forbidden student trusted poodle agentlib
jech ladislav cognitect sergey thompson evdokimov arykov mfulton
dimitrijs fedotovs kingdom manley xso latvia ontwikkeling reeve
extendable republic uniquely
datasources accidentally recursing respecting
\ No newline at end of file
dimitrijs fedotovs kingdom manley xso latvia ontwikkeling reeve
extendable republic uniquely datasources accidentally recursing respecting
young sweep
\ No newline at end of file
......@@ -65,10 +65,10 @@ public class IntPerfectHash {
public IntPerfectHash(byte[] data) {
this.data = data;
}
/**
* Get the hash function description.
*
*
* @return the data
*/
public byte[] getData() {
......@@ -307,12 +307,12 @@ public class IntPerfectHash {
}
return len;
}
/**
* A stream of bytes.
*/
static class ByteStream {
private byte[] data;
private int pos;
......@@ -323,19 +323,19 @@ public class IntPerfectHash {
ByteStream(byte[] data) {
this.data = data;
}
/**
* Read a byte.
*
*
* @return the byte, or -1.
*/
int read() {
return pos < data.length ? (data[pos++] & 255) : -1;
}
/**
* Write a byte.
*
*
* @param value the byte
*/
void write(byte value) {
......@@ -344,26 +344,26 @@ public class IntPerfectHash {
}
data[pos++] = value;
}
/**
* Get the byte array.
*
*
* @return the byte array
*/
byte[] toByteArray() {
return Arrays.copyOf(data, pos);
}
}
/**
* A helper class for bit arrays.
*/
public static class BitArray {
/**
* Set a bit in the array.
*
*
* @param data the array
* @param x the bit index
* @param value the new value
......@@ -381,10 +381,10 @@ public class IntPerfectHash {
}
return data;
}
/**
* Get a bit in a bit array.
*
*
* @param data the array
* @param x the bit index
* @return the value
......@@ -392,10 +392,10 @@ public class IntPerfectHash {
public static boolean getBit(byte[] data, int x) {
return (data[x / 8] & (1 << (x & 7))) != 0;
}
/**
* Count the number of set bits.
*
*
* @param data the array
* @return the number of set bits
*/
......@@ -406,8 +406,8 @@ public class IntPerfectHash {
}
return count;
}
}
}
......@@ -105,7 +105,7 @@ public abstract class ImmutableArray3<K> implements Iterable<K> {
/**
* Get the level of "abstraction".
*
*
* @return the level
*/
abstract int level();
......@@ -204,7 +204,7 @@ public abstract class ImmutableArray3<K> implements Iterable<K> {
/**
* Get a plain array with the given entry updated.
*
*
* @param <K> the type
* @param base the base type
* @param index the index
......@@ -223,7 +223,7 @@ public abstract class ImmutableArray3<K> implements Iterable<K> {
/**
* Get a plain array with the given entry inserted.
*
*
* @param <K> the type
* @param base the base type
* @param index the index
......@@ -242,7 +242,7 @@ public abstract class ImmutableArray3<K> implements Iterable<K> {
/**
* Get a plain array with the given entry removed.
*
*
* @param <K> the type
* @param base the base type
* @param index the index
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论