提交 382e2b7e authored 作者: Thomas Mueller's avatar Thomas Mueller

The scan-resistant cache type "TQ" (two queue) is again available. To use it,…

The scan-resistant cache type "TQ" (two queue) is again available. To use it, append ;CACHE_TYPE=TQ to the database URL.
上级 f1609201
......@@ -18,7 +18,9 @@ Change Log
<h1>Change Log</h1>
<h2>Next Version (unreleased)</h2>
<ul><li>-
<ul><li>There was a memory leak in the trace system. Opening and closing many connections could run out of memory.
</li><li>The scan-resistant cache type "TQ" (two queue) is again available.
To use it, append ;CACHE_TYPE=TQ to the database URL.
</li></ul>
<h2>Version 1.3.149 Beta (2011-01-07)</h2>
......
......@@ -1633,11 +1633,18 @@ overrides this value (even if larger than the physical memory).
To get the current used maximum cache size, use the query
<code>SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE'</code>
</p><p>
Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected
on low memory. By default the second level cache is disabled. To enable it,
use the prefix <code>SOFT_</code>. Example: <code>jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU</code>.
The cache might not actually improve performance. If you plan to use it,
please run your own test cases first.
An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available.
To enable it, append <code>;CACHE_TYPE=TQ</code> to the database URL.
The cache might not actually improve performance.
If you plan to use it, please run your own test cases first.
</p><p>
Also included is an experimental second level soft reference cache.
Rows in this cache are only garbage collected on low memory.
By default the second level cache is disabled.
To enable it, use the prefix <code>SOFT_</code>.
Example: <code>jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU</code>.
The cache might not actually improve performance.
If you plan to use it, please run your own test cases first.
</p><p>
To get information about page reads and writes, and the current caching algorithm in use,
call <code>SELECT * FROM INFORMATION_SCHEMA.SETTINGS</code>. The number of pages read / written
......
......@@ -576,4 +576,8 @@ public class PageDataLeaf extends PageData {
index.memoryChange((Constants.MEMORY_PAGE_DATA + memoryData + index.getPageStore().getPageSize()) >> 2);
}
public boolean isStream() {
return firstOverflowPageId > 0;
}
}
......@@ -258,4 +258,8 @@ public class PageDataOverflow extends Page {
return true;
}
public boolean isStream() {
return true;
}
}
......@@ -56,8 +56,9 @@ public interface Cache {
* Remove an object from the cache.
*
* @param pos the unique key of the element
* @return true if the key was in the cache
*/
void remove(int pos);
boolean remove(int pos);
/**
* Get an element from the cache if it is available.
......
......@@ -21,6 +21,13 @@ public class CacheLRU implements Cache {
static final String TYPE_NAME = "LRU";
private final CacheWriter writer;
/**
* Use First-In-First-Out (don't move recently used items to the front of
* the queue).
*/
private final boolean fifo;
private final CacheObject head = new CacheHead();
private final int mask;
private CacheObject[] values;
......@@ -41,9 +48,10 @@ public class CacheLRU implements Cache {
*/
private int memory;
CacheLRU(CacheWriter writer, int maxMemoryKb) {
this.setMaxMemory(maxMemoryKb);
CacheLRU(CacheWriter writer, int maxMemoryKb, boolean fifo) {
this.writer = writer;
this.fifo = fifo;
this.setMaxMemory(maxMemoryKb);
this.len = MathUtils.nextPowerOf2(maxMemory / 64);
this.mask = len - 1;
MathUtils.checkPowerOf2(len);
......@@ -66,7 +74,9 @@ public class CacheLRU implements Cache {
}
Cache cache;
if (CacheLRU.TYPE_NAME.equals(cacheType)) {
cache = new CacheLRU(writer, cacheSize);
cache = new CacheLRU(writer, cacheSize, false);
} else if (CacheTQ.TYPE_NAME.equals(cacheType)) {
cache = new CacheTQ(writer, cacheSize);
} else {
throw DbException.getInvalidValueException("CACHE_TYPE", cacheType);
}
......@@ -112,8 +122,10 @@ public class CacheLRU implements Cache {
DbException.throwInternalError("old!=record pos:" + pos + " old:" + old + " new:" + rec);
}
}
removeFromLinkedList(rec);
addToFront(rec);
if (!fifo) {
removeFromLinkedList(rec);
addToFront(rec);
}
}
return old;
}
......@@ -132,7 +144,19 @@ public class CacheLRU implements Cache {
int rc = recordCount;
boolean flushed = false;
CacheObject next = head.cacheNext;
while (mem * 4 > maxMemory * 3 && rc > Constants.CACHE_MIN_RECORDS) {
while (true) {
if (rc <= Constants.CACHE_MIN_RECORDS) {
break;
}
if (changed.size() == 0) {
if (mem <= maxMemory) {
break;
}
} else {
if (mem * 4 <= maxMemory * 3) {
break;
}
}
CacheObject check = next;
next = check.cacheNext;
i++;
......@@ -219,11 +243,11 @@ public class CacheLRU implements Cache {
rec.cachePrevious = null;
}
public void remove(int pos) {
public boolean remove(int pos) {
int index = pos & mask;
CacheObject rec = values[index];
if (rec == null) {
return;
return false;
}
if (rec.getPos() == pos) {
values[index] = rec.cacheChained;
......@@ -233,7 +257,7 @@ public class CacheLRU implements Cache {
last = rec;
rec = rec.cacheChained;
if (rec == null) {
return;
return false;
}
} while (rec.getPos() != pos);
last.cacheChained = rec.cacheChained;
......@@ -248,6 +272,7 @@ public class CacheLRU implements Cache {
DbException.throwInternalError("not removed: " + o);
}
}
return true;
}
public CacheObject find(int pos) {
......@@ -261,8 +286,10 @@ public class CacheLRU implements Cache {
public CacheObject get(int pos) {
CacheObject rec = find(pos);
if (rec != null) {
removeFromLinkedList(rec);
addToFront(rec);
if (!fifo) {
removeFromLinkedList(rec);
addToFront(rec);
}
}
return rec;
}
......@@ -334,65 +361,3 @@ public class CacheLRU implements Cache {
}
}
// Unmaintained reference code (very old)
//import java.util.Iterator;
//import java.util.LinkedHashMap;
//import java.util.Map;
//
//public class Cache extends LinkedHashMap {
//
// final static int MAX_SIZE = 1 << 10;
// private CacheWriter writer;
//
// public Cache(CacheWriter writer) {
// super(16, (float) 0.75, true);
// this.writer = writer;
// }
//
// protected boolean removeEldestEntry(Map.Entry eldest) {
// if(size() <= MAX_SIZE) {
// return false;
// }
// Record entry = (Record) eldest.getValue();
// if(entry.getDeleted()) {
// return true;
// }
// if(entry.isChanged()) {
// try {
////System.out.println("cache write "+entry.getPos());
// writer.writeBack(entry);
// } catch(SQLException e) {
// // printStackTrace not needed
// // if we use our own hashtable
// e.printStackTrace();
// }
// }
// return true;
// }
//
// public void put(Record rec) {
// put(new Integer(rec.getPos()), rec);
// }
//
// public Record get(int pos) {
// return (Record)get(new Integer(pos));
// }
//
// public void remove(int pos) {
// remove(new Integer(pos));
// }
//
// public ObjectArray getAllChanged() {
// Iterator it = values().iterator();
// ObjectArray list = New.arrayList();
// while(it.hasNext()) {
// Record rec = (Record)it.next();
// if(rec.isChanged()) {
// list.add(rec);
// }
// }
// return list;
// }
//}
......@@ -80,4 +80,8 @@ public abstract class CacheObject implements Comparable<CacheObject> {
return MathUtils.compareInt(getPos(), other.getPos());
}
public boolean isStream() {
return false;
}
}
......@@ -61,9 +61,10 @@ class CacheSecondLevel implements Cache {
map.put(r.getPos(), r);
}
public void remove(int pos) {
baseCache.remove(pos);
map.remove(pos);
public boolean remove(int pos) {
boolean result = baseCache.remove(pos);
result |= map.remove(pos) != null;
return result;
}
public void setMaxMemory(int size) {
......
/*
* Copyright 2004-2010 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.util;
import java.util.ArrayList;
/**
* An alternative cache implementation. This implementation uses two caches: a
* LRU cache and a FIFO cache. Entries are first kept in the FIFO cache, and if
* referenced again then marked in a hash set. If referenced again, they are
* moved to the LRU cache. Stream pages are never added to the LRU cache. It is
* supposed to be more or less scan resistant, and it doesn't cache large rows
* in the LRU cache.
*/
public class CacheTQ implements Cache {
static final String TYPE_NAME = "TQ";
private final Cache lru;
private final Cache fifo;
private final SmallLRUCache<Integer, Object> recentlyUsed = SmallLRUCache.newInstance(1024);
private int lastUsed = -1;
private int maxMemory;
CacheTQ(CacheWriter writer, int maxMemoryKb) {
this.maxMemory = maxMemoryKb;
lru = new CacheLRU(writer, (int) (maxMemoryKb * 0.8), false);
fifo = new CacheLRU(writer, (int) (maxMemoryKb * 0.2), true);
setMaxMemory(4 * maxMemoryKb);
}
public void clear() {
lru.clear();
fifo.clear();
recentlyUsed.clear();
lastUsed = -1;
}
public CacheObject find(int pos) {
CacheObject r = lru.find(pos);
if (r == null) {
r = fifo.find(pos);
}
return r;
}
public CacheObject get(int pos) {
CacheObject r = lru.find(pos);
if (r != null) {
return r;
}
r = fifo.find(pos);
if (r != null && !r.isStream()) {
if (recentlyUsed.get(pos) != null) {
if (lastUsed != pos) {
fifo.remove(pos);
lru.put(r);
}
} else {
recentlyUsed.put(pos, this);
}
lastUsed = pos;
}
return r;
}
public ArrayList<CacheObject> getAllChanged() {
ArrayList<CacheObject> changed = New.arrayList();
changed.addAll(lru.getAllChanged());
changed.addAll(fifo.getAllChanged());
return changed;
}
public int getMaxMemory() {
return maxMemory;
}
public int getMemory() {
return lru.getMemory() + fifo.getMemory();
}
public void put(CacheObject r) {
if (r.isStream()) {
fifo.put(r);
} else if (recentlyUsed.get(r.getPos()) != null) {
lru.put(r);
} else {
fifo.put(r);
lastUsed = r.getPos();
}
}
public boolean remove(int pos) {
boolean result = lru.remove(pos);
if (!result) {
result = fifo.remove(pos);
}
recentlyUsed.remove(pos);
return result;
}
public void setMaxMemory(int maxMemoryKb) {
this.maxMemory = maxMemoryKb;
lru.setMaxMemory((int) (maxMemoryKb * 0.8));
fifo.setMaxMemory((int) (maxMemoryKb * 0.2));
recentlyUsed.setMaxSize(4 * maxMemoryKb);
}
public CacheObject update(int pos, CacheObject record) {
if (lru.find(pos) != null) {
return lru.update(pos, record);
} else {
return fifo.update(pos, record);
}
}
}
......@@ -49,6 +49,10 @@ extends HashMap
return new SmallLRUCache<K, V>(size);
}
public void setMaxSize(int size) {
this.size = size;
}
//## Java 1.4 begin ##
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
return size() > size;
......
......@@ -6,6 +6,8 @@
*/
package org.h2.test.unit;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
......@@ -41,13 +43,88 @@ public class TestCache extends TestBase implements CacheWriter {
test.test();
}
public void test() throws SQLException {
public void test() throws Exception {
testTQ();
testMemoryUsage();
testCache();
testCacheDb(false);
testCacheDb(true);
}
private void testTQ() throws Exception {
if (config.memory) {
return;
}
deleteDb("cache");
Connection conn = getConnection("cache;LOG=0;UNDO_LOG=0");
Statement stat = conn.createStatement();
stat.execute("create table if not exists lob(id int primary key, data blob)");
PreparedStatement prep = conn.prepareStatement("insert into lob values(?, ?)");
Random r = new Random(1);
byte[] buff = new byte[2 * 1024 * 1024];
for (int i = 0; i < 10; i++) {
prep.setInt(1, i);
r.nextBytes(buff);
prep.setBinaryStream(2, new ByteArrayInputStream(buff));
prep.execute();
}
stat.execute("create table if not exists test(id int primary key, data varchar)");
prep = conn.prepareStatement("insert into test values(?, ?)");
for (int i = 0; i < 20000; i++) {
prep.setInt(1, i);
prep.setString(2, "Hello");
prep.execute();
}
conn.close();
testTQ("LRU", false);
testTQ("TQ", true);
}
private void testTQ(String cacheType, boolean scanResistant) throws Exception {
Connection conn = getConnection("cache;CACHE_TYPE=" + cacheType + ";CACHE_SIZE=4096");
Statement stat = conn.createStatement();
PreparedStatement prep;
for (int k = 0; k < 10; k++) {
int rc;
prep = conn.prepareStatement("select * from test where id = ?");
rc = getReadCount(stat);
int p = 0;
for (int x = 0; x < 2; x++) {
for (int i = 0; i < 15000; i++) {
prep.setInt(1, i);
prep.executeQuery();
p++;
}
}
int rcData = getReadCount(stat) - rc;
if (scanResistant && k > 0) {
// TQ is expected to keep the data rows in the cache
// even if the LOB is read once in a while
assertEquals(0, rcData);
} else {
assertTrue(rcData > 0);
}
rc = getReadCount(stat);
ResultSet rs = stat.executeQuery("select * from lob where id = " + k);
rs.next();
InputStream in = rs.getBinaryStream(2);
while (in.read() >= 0) {
// ignore
}
in.close();
int rcLob = getReadCount(stat) - rc;
assertTrue(rcLob > 0);
}
conn.close();
}
private int getReadCount(Statement stat) throws Exception {
ResultSet rs;
rs = stat.executeQuery("select value from information_schema.settings where name = 'info.FILE_READ'");
rs.next();
return rs.getInt(1);
}
private void testMemoryUsage() throws SQLException {
if (!config.traceTest) {
return;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论