提交 c5d33cf2 authored 作者: Thomas Mueller's avatar Thomas Mueller

Formatting / javadocs

上级 a6c34373
...@@ -168,9 +168,9 @@ public class MVStore { ...@@ -168,9 +168,9 @@ public class MVStore {
private CacheLongKeyLIRS<Page> cache; private CacheLongKeyLIRS<Page> cache;
/** /**
* The page chunk references cache. The default size is 4 MB, and the average size is 2 KB. * The page chunk references cache. The default size is 4 MB, and the
* It is split in 16 segments. The stack move distance is 2% of the expected * average size is 2 KB. It is split in 16 segments. The stack move distance
* number of entries. * is 2% of the expected number of entries.
*/ */
private CacheLongKeyLIRS<PageChildren> cacheChunkRef; private CacheLongKeyLIRS<PageChildren> cacheChunkRef;
...@@ -1240,7 +1240,7 @@ public class MVStore { ...@@ -1240,7 +1240,7 @@ public class MVStore {
return count; return count;
} }
PageChildren readPageChunkReferences(int mapId, long pos, int parentChunk) { private PageChildren readPageChunkReferences(int mapId, long pos, int parentChunk) {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
return null; return null;
} }
......
...@@ -86,6 +86,7 @@ public class MVStoreTool { ...@@ -86,6 +86,7 @@ public class MVStoreTool {
* *
* @param fileName the name of the file * @param fileName the name of the file
* @param writer the print writer * @param writer the print writer
* @param details print the page details
*/ */
public static void dump(String fileName, Writer writer, boolean details) { public static void dump(String fileName, Writer writer, boolean details) {
PrintWriter pw = new PrintWriter(writer, true); PrintWriter pw = new PrintWriter(writer, true);
......
...@@ -999,14 +999,14 @@ public class Page { ...@@ -999,14 +999,14 @@ public class Page {
} }
/** /**
* Read the page from the buffer. * Read an inner node page from the buffer, but ignore the keys and
* values.
* *
* @param pos the position * @param fileStore the file store
* @param buff the buffer * @param filePos the position in the file
* @param chunkId the chunk id
* @param mapId the map id * @param mapId the map id
* @param offset the offset within the chunk * @param pos the position
* @param maxLength the maximum length * @return the page children object
*/ */
static PageChildren read(FileStore fileStore, long filePos, int mapId, long pos) { static PageChildren read(FileStore fileStore, long filePos, int mapId, long pos) {
ByteBuffer buff; ByteBuffer buff;
...@@ -1067,10 +1067,19 @@ public class Page { ...@@ -1067,10 +1067,19 @@ public class Page {
return new PageChildren(pos, children); return new PageChildren(pos, children);
} }
/**
* Only keep one reference to the same chunk. Only leaf references are
* removed (references to inner nodes are not removed, as they could
* indirectly point to other chunks).
*/
void removeDuplicateChunkReferences() { void removeDuplicateChunkReferences() {
HashSet<Integer> chunks = New.hashSet(); HashSet<Integer> chunks = New.hashSet();
// we don't need references to leaves in the same chunk // we don't need references to leaves in the same chunk
chunks.add(DataUtils.getPageChunkId(pos)); chunks.add(DataUtils.getPageChunkId(pos));
// possible space optimization:
// we could remove more children, for example
// we could remove all leaf references to the same chunk
// if there is also a inner node reference to that chunk
for (int i = 0; i < children.length; i++) { for (int i = 0; i < children.length; i++) {
long p = children[i]; long p = children[i];
if (DataUtils.getPageType(p) == DataUtils.PAGE_TYPE_NODE) { if (DataUtils.getPageType(p) == DataUtils.PAGE_TYPE_NODE) {
......
...@@ -52,7 +52,8 @@ public class MVTable extends TableBase { ...@@ -52,7 +52,8 @@ public class MVTable extends TableBase {
private volatile Session lockExclusiveSession; private volatile Session lockExclusiveSession;
// using a ConcurrentHashMap as a set // using a ConcurrentHashMap as a set
private final ConcurrentHashMap<Session, Session> lockSharedSessions = new ConcurrentHashMap<Session, Session>(); private final ConcurrentHashMap<Session, Session> lockSharedSessions =
new ConcurrentHashMap<Session, Session>();
/** /**
* The queue of sessions waiting to lock the table. It is a FIFO queue to * The queue of sessions waiting to lock the table. It is a FIFO queue to
......
...@@ -766,6 +766,5 @@ linearly patching perfect hole sip enwiki flooding uniformly recursions happenin ...@@ -766,6 +766,5 @@ linearly patching perfect hole sip enwiki flooding uniformly recursions happenin
permanently nucleus forbidden student trusted poodle agentlib permanently nucleus forbidden student trusted poodle agentlib
jech ladislav cognitect sergey thompson evdokimov arykov mfulton jech ladislav cognitect sergey thompson evdokimov arykov mfulton
dimitrijs fedotovs kingdom manley xso latvia ontwikkeling reeve dimitrijs fedotovs kingdom manley xso latvia ontwikkeling reeve
extendable republic uniquely datasources accidentally recursing respecting
extendable republic uniquely young sweep
datasources accidentally recursing respecting \ No newline at end of file
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论