提交 8cef94bb authored 作者: Noel Grandin's avatar Noel Grandin

improve performance of nioMemLZF

上级 ae7a05e5
...@@ -17,6 +17,8 @@ import java.util.LinkedHashMap; ...@@ -17,6 +17,8 @@ import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.h2.api.ErrorCode; import org.h2.api.ErrorCode;
import org.h2.compress.CompressLZF; import org.h2.compress.CompressLZF;
import org.h2.message.DbException; import org.h2.message.DbException;
...@@ -401,9 +403,19 @@ class FileNioMemData { ...@@ -401,9 +403,19 @@ class FileNioMemData {
private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1;
private static final ByteBuffer COMPRESSED_EMPTY_BLOCK; private static final ByteBuffer COMPRESSED_EMPTY_BLOCK;
private final CompressLZF LZF = new CompressLZF(); private static final ThreadLocal<CompressLZF> LZF_THREAD_LOCAL = new ThreadLocal<CompressLZF>() {
@Override
protected CompressLZF initialValue() {
return new CompressLZF();
}
};
/** the output buffer when compressing */ /** the output buffer when compressing */
private final byte[] compressOutputBuffer = new byte[BLOCK_SIZE * 2]; private static final ThreadLocal<byte[] > COMPRESS_OUT_BUF_THREAD_LOCAL = new ThreadLocal<byte[] >() {
@Override
protected byte[] initialValue() {
return new byte[BLOCK_SIZE * 2];
}
};
private final CompressLaterCache<CompressItem, CompressItem> compressLaterCache = private final CompressLaterCache<CompressItem, CompressItem> compressLaterCache =
new CompressLaterCache<CompressItem, CompressItem>(CACHE_SIZE); new CompressLaterCache<CompressItem, CompressItem>(CACHE_SIZE);
...@@ -412,11 +424,12 @@ class FileNioMemData { ...@@ -412,11 +424,12 @@ class FileNioMemData {
private final int nameHashCode; private final int nameHashCode;
private final boolean compress; private final boolean compress;
private long length; private long length;
private ByteBuffer[] data; private AtomicReference<ByteBuffer>[] buffers;
private long lastModified; private long lastModified;
private boolean isReadOnly; private boolean isReadOnly;
private boolean isLockedExclusive; private boolean isLockedExclusive;
private int sharedLockCount; private int sharedLockCount;
private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
static { static {
final byte[] n = new byte[BLOCK_SIZE]; final byte[] n = new byte[BLOCK_SIZE];
...@@ -426,11 +439,12 @@ class FileNioMemData { ...@@ -426,11 +439,12 @@ class FileNioMemData {
COMPRESSED_EMPTY_BLOCK.put(output, 0, len); COMPRESSED_EMPTY_BLOCK.put(output, 0, len);
} }
@SuppressWarnings("unchecked")
FileNioMemData(String name, boolean compress) { FileNioMemData(String name, boolean compress) {
this.name = name; this.name = name;
this.nameHashCode = name.hashCode(); this.nameHashCode = name.hashCode();
this.compress = compress; this.compress = compress;
data = new ByteBuffer[0]; buffers = new AtomicReference[0];
lastModified = System.currentTimeMillis(); lastModified = System.currentTimeMillis();
} }
...@@ -490,7 +504,7 @@ class FileNioMemData { ...@@ -490,7 +504,7 @@ class FileNioMemData {
return false; return false;
} }
CompressItem c = (CompressItem) eldest.getKey(); CompressItem c = (CompressItem) eldest.getKey();
c.data.compress(c.page); c.data.compressPage(c.page);
return true; return true;
} }
} }
...@@ -536,23 +550,25 @@ class FileNioMemData { ...@@ -536,23 +550,25 @@ class FileNioMemData {
compressLaterCache.put(c, c); compressLaterCache.put(c, c);
} }
private void expand(int page) { private ByteBuffer expandPage(int page) {
ByteBuffer[] list = data; final ByteBuffer d = buffers[page].get();
if (page >= list.length) {
// was truncated
return;
}
ByteBuffer d = list[page];
if (d.capacity() == BLOCK_SIZE) { if (d.capacity() == BLOCK_SIZE) {
// already expanded, or not compressed // already expanded, or not compressed
return; return d;
} }
ByteBuffer out = ByteBuffer.allocateDirect(BLOCK_SIZE); synchronized (d)
if (d != COMPRESSED_EMPTY_BLOCK) { {
d.position(0); if (d.capacity() == BLOCK_SIZE) {
CompressLZF.expand(d, out); return d;
}
ByteBuffer out = ByteBuffer.allocateDirect(BLOCK_SIZE);
if (d != COMPRESSED_EMPTY_BLOCK) {
d.position(0);
CompressLZF.expand(d, out);
}
buffers[page].compareAndSet(d, out);
return out;
} }
list[page] = out;
} }
/** /**
...@@ -560,17 +576,19 @@ class FileNioMemData { ...@@ -560,17 +576,19 @@ class FileNioMemData {
* *
* @param page which page to compress * @param page which page to compress
*/ */
void compress(int page) { void compressPage(int page) {
ByteBuffer[] list = data; final ByteBuffer d = buffers[page].get();
if (page >= list.length) { synchronized (d)
// was truncated {
return; if (d.capacity() != BLOCK_SIZE) {
return; // already compressed
}
final byte[] compressOutputBuffer = COMPRESS_OUT_BUF_THREAD_LOCAL.get();
int len = LZF_THREAD_LOCAL.get().compress(d, 0, compressOutputBuffer, 0);
ByteBuffer out = ByteBuffer.allocateDirect(len);
out.put(compressOutputBuffer, 0, len);
buffers[page].compareAndSet(d, out);
} }
ByteBuffer d = list[page];
int len = LZF.compress(d, 0, compressOutputBuffer, 0);
d = ByteBuffer.allocateDirect(len);
d.put(compressOutputBuffer, 0, len);
list[page] = d;
} }
/** /**
...@@ -599,33 +617,38 @@ class FileNioMemData { ...@@ -599,33 +617,38 @@ class FileNioMemData {
* *
* @param newLength the new length * @param newLength the new length
*/ */
synchronized void truncate(long newLength) { void truncate(long newLength) {
changeLength(newLength); rwLock.writeLock().lock();
long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE); try {
if (end != newLength) { changeLength(newLength);
int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT); long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE);
expand(lastPage); if (end != newLength) {
ByteBuffer d = data[lastPage]; int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT);
for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) { ByteBuffer d = expandPage(lastPage);
d.put(i, (byte) 0); for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) {
} d.put(i, (byte) 0);
if (compress) { }
addToCompressLaterCache(lastPage); if (compress) {
addToCompressLaterCache(lastPage);
}
} }
} finally {
rwLock.writeLock().unlock();
} }
} }
@SuppressWarnings("unchecked")
private void changeLength(long len) { private void changeLength(long len) {
length = len; length = len;
len = MathUtils.roundUpLong(len, BLOCK_SIZE); len = MathUtils.roundUpLong(len, BLOCK_SIZE);
int blocks = (int) (len >>> BLOCK_SIZE_SHIFT); int blocks = (int) (len >>> BLOCK_SIZE_SHIFT);
if (blocks != data.length) { if (blocks != buffers.length) {
ByteBuffer[] n = new ByteBuffer[blocks]; final AtomicReference<ByteBuffer>[] newBuffers = new AtomicReference[blocks];
System.arraycopy(data, 0, n, 0, Math.min(data.length, n.length)); System.arraycopy(buffers, 0, newBuffers, 0, Math.min(buffers.length, buffers.length));
for (int i = data.length; i < blocks; i++) { for (int i = buffers.length; i < blocks; i++) {
n[i] = COMPRESSED_EMPTY_BLOCK; newBuffers[i] = new AtomicReference<ByteBuffer>(COMPRESSED_EMPTY_BLOCK);
} }
data = n; buffers = newBuffers;
} }
} }
...@@ -639,46 +662,53 @@ class FileNioMemData { ...@@ -639,46 +662,53 @@ class FileNioMemData {
* @param write true for writing * @param write true for writing
* @return the new position * @return the new position
*/ */
synchronized long readWrite(long pos, ByteBuffer b, int off, int len, boolean write) { long readWrite(long pos, ByteBuffer b, int off, int len, boolean write) {
long end = pos + len; final java.util.concurrent.locks.Lock lock = write ? rwLock.writeLock() : rwLock.readLock();
if (end > length) { lock.lock();
if (write) { try {
changeLength(end);
} else { long end = pos + len;
len = (int) (length - pos); if (end > length) {
} if (write) {
} changeLength(end);
while (len > 0) { } else {
int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK)); len = (int) (length - pos);
int page = (int) (pos >>> BLOCK_SIZE_SHIFT); }
expand(page);
ByteBuffer block = data[page];
int blockOffset = (int) (pos & BLOCK_SIZE_MASK);
if (write) {
ByteBuffer tmp = b.slice();
tmp.position(off);
tmp.limit(off + l);
block.position(blockOffset);
block.put(tmp);
} else {
// duplicate, so this can be done concurrently
ByteBuffer tmp = block.duplicate();
tmp.position(blockOffset);
tmp.limit(l + blockOffset);
int oldPosition = b.position();
b.position(off);
b.put(tmp);
// restore old position
b.position(oldPosition);
} }
if (compress) { while (len > 0) {
addToCompressLaterCache(page); final int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK));
final int page = (int) (pos >>> BLOCK_SIZE_SHIFT);
final ByteBuffer block = expandPage(page);
int blockOffset = (int) (pos & BLOCK_SIZE_MASK);
if (write) {
final ByteBuffer srcTmp = b.slice();
final ByteBuffer dstTmp = block.duplicate();
srcTmp.position(off);
srcTmp.limit(off + l);
dstTmp.position(blockOffset);
dstTmp.put(srcTmp);
} else {
// duplicate, so this can be done concurrently
final ByteBuffer tmp = block.duplicate();
tmp.position(blockOffset);
tmp.limit(l + blockOffset);
int oldPosition = b.position();
b.position(off);
b.put(tmp);
// restore old position
b.position(oldPosition);
}
if (compress) {
addToCompressLaterCache(page);
}
off += l;
pos += l;
len -= l;
} }
off += l; return pos;
pos += l; } finally {
len -= l; lock.unlock();
} }
return pos;
} }
/** /**
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论