提交 adec62cb authored 作者: Noel Grandin's avatar Noel Grandin

limit the size of the thread-pool

上级 1a79c0ac
......@@ -309,11 +309,6 @@ public class MVStore {
private long lastFreeUnusedChunks;
/**
* Service for executing multiple reads in parallel when doing garbage collection.
*/
final ExecutorService executorService;
/**
* Create and open the store.
*
......@@ -364,8 +359,6 @@ public class MVStore {
keysPerPage = DataUtils.getConfigParam(config, "keysPerPage", 48);
backgroundExceptionHandler =
(UncaughtExceptionHandler)config.get("backgroundExceptionHandler");
executorService = new ThreadPoolExecutor(0, 10, 10L, TimeUnit.SECONDS,
new ArrayBlockingQueue<Runnable>(keysPerPage + 1));
meta = new MVMap<>(this);
meta.init();
if (this.fileStore != null) {
......@@ -952,7 +945,6 @@ public class MVStore {
return;
}
stopBackgroundThread();
executorService.shutdownNow(); // no need to wait for reads
closed = true;
storeLock.lock();
try {
......@@ -1352,11 +1344,15 @@ public class MVStore {
}
private Set<Integer> collectReferencedChunks() {
final ThreadPoolExecutor executorService = new ThreadPoolExecutor(10, 10, 10L, TimeUnit.SECONDS,
new ArrayBlockingQueue<Runnable>(keysPerPage + 1));
final AtomicInteger executingThreadCounter = new AtomicInteger(0);
try {
ChunkIdsCollector collector = new ChunkIdsCollector(meta.getId());
Set<Long> inspectedRoots = new HashSet<>();
long pos = lastChunk.metaRootPos;
inspectedRoots.add(pos);
collector.visit(pos);
collector.visit(pos, executorService, executingThreadCounter);
long oldestVersionToKeep = getOldestVersionToKeep();
MVMap.RootReference rootReference = meta.getRoot();
do {
......@@ -1364,13 +1360,13 @@ public class MVStore {
pos = rootPage.getPos();
if (!rootPage.isSaved()) {
collector.setMapId(meta.getId());
collector.visit(rootPage);
} else if(inspectedRoots.add(pos)) {
collector.visit(rootPage, executorService, executingThreadCounter);
} else if (inspectedRoots.add(pos)) {
collector.setMapId(meta.getId());
collector.visit(pos);
collector.visit(pos, executorService, executingThreadCounter);
}
for (Cursor<String, String> c = new Cursor<>(rootPage, "root."); c.hasNext(); ) {
for (Cursor<String, String> c = new Cursor<>(rootPage, "root."); c.hasNext();) {
String key = c.next();
assert key != null;
if (!key.startsWith("root.")) {
......@@ -1378,15 +1374,18 @@ public class MVStore {
}
pos = DataUtils.parseHexLong(c.getValue());
if (DataUtils.isPageSaved(pos) && inspectedRoots.add(pos)) {
// to allow for something like "root.tmp.123" to be processed
// to allow for something like "root.tmp.123" to be
// processed
int mapId = DataUtils.parseHexInt(key.substring(key.lastIndexOf('.') + 1));
collector.setMapId(mapId);
collector.visit(pos);
collector.visit(pos, executorService, executingThreadCounter);
}
}
} while(rootReference.version >= oldestVersionToKeep &&
(rootReference = rootReference.previous) != null);
} while (rootReference.version >= oldestVersionToKeep && (rootReference = rootReference.previous) != null);
return collector.getReferenced();
} finally {
executorService.shutdownNow();
}
}
final class ChunkIdsCollector {
......@@ -1417,7 +1416,7 @@ public class MVStore {
return referencedChunks;
}
public void visit(Page page) {
public void visit(Page page, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) {
long pos = page.getPos();
if (DataUtils.isPageSaved(pos)) {
registerChunk(DataUtils.getPageChunkId(pos));
......@@ -1430,9 +1429,9 @@ public class MVStore {
for (int i = 0; i < count; i++) {
Page childPage = page.getChildPageIfLoaded(i);
if (childPage != null) {
childCollector.visit(childPage);
childCollector.visit(childPage, executorService, executingThreadCounter);
} else {
childCollector.visit(page.getChildPagePos(i));
childCollector.visit(page.getChildPagePos(i), executorService, executingThreadCounter);
}
}
// and cache resulting set of chunk ids
......@@ -1442,7 +1441,7 @@ public class MVStore {
}
}
public void visit(long pos) {
public void visit(long pos, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) {
if (!DataUtils.isPageSaved(pos)) {
return;
}
......@@ -1461,7 +1460,7 @@ public class MVStore {
Page page;
if (cache != null && (page = cache.get(pos)) != null) {
// there is a full page in cache, use it
childCollector.visit(page);
childCollector.visit(page, executorService, executingThreadCounter);
} else {
// page was not cached: read the data
Chunk chunk = getChunk(pos);
......@@ -1472,17 +1471,8 @@ public class MVStore {
"Negative position {0}; p={1}, c={2}", filePos, pos, chunk.toString());
}
long maxPos = (chunk.block + chunk.len) * BLOCK_SIZE;
final List<Future<?>> futures = Page.readChildrenPositions(fileStore, pos, filePos, maxPos,
childCollector, executorService);
for (Future<?> f : futures) {
try {
f.get();
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
} catch (ExecutionException ex) {
throw DbException.convert(ex);
}
}
Page.readChildrenPositions(fileStore, pos, filePos, maxPos,
childCollector, executorService, executingThreadCounter);
}
// and cache resulting set of chunk ids
if (cacheChunkRef != null) {
......
......@@ -13,10 +13,12 @@ import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicInteger;
import org.h2.compress.Compressor;
import org.h2.message.DbException;
import org.h2.mvstore.type.DataType;
import org.h2.util.Utils;
......@@ -252,10 +254,9 @@ public abstract class Page implements Cloneable
* @param maxPos the maximum position (the end of the chunk)
* @param collector to report child pages positions to
*/
static List<Future<?>> readChildrenPositions(FileStore fileStore, long pos,
long filePos, long maxPos,
final MVStore.ChunkIdsCollector collector,
ExecutorService executorService) {
static void readChildrenPositions(FileStore fileStore, long pos, long filePos, long maxPos,
final MVStore.ChunkIdsCollector collector, final ThreadPoolExecutor executorService,
final AtomicInteger executingThreadCounter) {
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
......@@ -266,10 +267,8 @@ public abstract class Page implements Cloneable
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ",
length, filePos, maxPos);
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
int chunkId = DataUtils.getPageChunkId(pos);
......@@ -277,49 +276,65 @@ public abstract class Page implements Cloneable
int start = buff.position();
int pageLength = buff.getInt();
if (pageLength > maxLength) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}",
chunkId, maxLength, pageLength);
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}", chunkId, maxLength,
pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int m = DataUtils.readVarInt(buff);
int mapId = collector.getMapId();
if (m != mapId) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}",
chunkId, mapId, m);
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, mapId, m);
}
int checkTest = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(offset)
int checkTest = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}",
chunkId, checkTest, check);
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check);
}
int len = DataUtils.readVarInt(buff);
int type = buff.get();
if ((type & 1) != DataUtils.PAGE_TYPE_NODE) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT,
"Position {0} expected to be a non-leaf", pos);
}
/**
* The logic here is a little awkward. We want to (a) execute reads in parallel, but (b)
* limit the number of threads we create. This is complicated by (a) the algorithm is
* recursive and needs to wait for children before returning up the call-stack, (b) checking
* the size of the thread-pool is not reliable.
*/
final List<Future<?>> futures = new ArrayList<>(len);
for (int i = 0; i <= len; i++) {
final long childPagePos = buff.getLong();
if (executingThreadCounter.get() >= executorService.getMaximumPoolSize()) {
collector.visit(childPagePos, executorService, executingThreadCounter);
} else {
executingThreadCounter.incrementAndGet();
Future<?> f = executorService.submit(new Runnable() {
@Override
public void run() {
collector.visit(childPagePos);
try {
collector.visit(childPagePos, executorService, executingThreadCounter);
} finally {
executingThreadCounter.decrementAndGet();
}
}
});
futures.add(f);
}
return futures;
}
for (Future<?> f : futures) {
try {
f.get();
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
} catch (ExecutionException ex) {
throw DbException.convert(ex);
}
}
}
/**
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论