提交 9e1de76b authored 作者: noelgrandin's avatar noelgrandin

Experimental off-heap memory storage engine "nioMemFS:" and "nioMemLZF:",

suggestion from markaddleman@gmail.com
上级 c1897c62
...@@ -60,6 +60,8 @@ Change Log ...@@ -60,6 +60,8 @@ Change Log
</li><li>Supporting dropping an index for Lucene full-text indexes. </li><li>Supporting dropping an index for Lucene full-text indexes.
</li><li>Optimized performance for SELECT ... ORDER BY X LIMIT Y OFFSET Z </li><li>Optimized performance for SELECT ... ORDER BY X LIMIT Y OFFSET Z
queries for in-memory databases using partial sort (by Sergi Vladykin). queries for in-memory databases using partial sort (by Sergi Vladykin).
</li><li>Experimental off-heap memory storage engine "nioMemFS:" and "nioMemLZF:",
suggestion from markaddleman@gmail.com
</li></ul> </li></ul>
<h2>Version 1.3.170 (2012-11-30)</h2> <h2>Version 1.3.170 (2012-11-30)</h2>
......
...@@ -49,6 +49,8 @@ ...@@ -49,6 +49,8 @@
package org.h2.compress; package org.h2.compress;
import java.nio.ByteBuffer;
/** /**
* <p> * <p>
* This class implements the LZF lossless data compression algorithm. LZF is a * This class implements the LZF lossless data compression algorithm. LZF is a
...@@ -122,6 +124,13 @@ public final class CompressLZF implements Compressor { ...@@ -122,6 +124,13 @@ public final class CompressLZF implements Compressor {
return (in[inPos] << 8) | (in[inPos + 1] & 255); return (in[inPos] << 8) | (in[inPos + 1] & 255);
} }
/**
* Return byte with lower 2 bytes being byte at index, then index+1.
*/
private static int first(ByteBuffer in, int inPos) {
return (in.get(inPos) << 8) | (in.get(inPos + 1) & 255);
}
/** /**
* Shift v 1 byte left, add value at index inPos+2. * Shift v 1 byte left, add value at index inPos+2.
*/ */
...@@ -129,6 +138,13 @@ public final class CompressLZF implements Compressor { ...@@ -129,6 +138,13 @@ public final class CompressLZF implements Compressor {
return (v << 8) | (in[inPos + 2] & 255); return (v << 8) | (in[inPos + 2] & 255);
} }
/**
* Shift v 1 byte left, add value at index inPos+2.
*/
private static int next(int v, ByteBuffer in, int inPos) {
return (v << 8) | (in.get(inPos + 2) & 255);
}
/** /**
* Compute the address in the hash table. * Compute the address in the hash table.
*/ */
...@@ -235,6 +251,105 @@ public final class CompressLZF implements Compressor { ...@@ -235,6 +251,105 @@ public final class CompressLZF implements Compressor {
return outPos; return outPos;
} }
public int compress(ByteBuffer in, int inLen, byte[] out, int outPos) {
int inPos = 0;
if (cachedHashTable == null) {
cachedHashTable = new int[HASH_SIZE];
}
int[] hashTab = cachedHashTable;
int literals = 0;
outPos++;
int future = first(in, 0);
while (inPos < inLen - 4) {
byte p2 = in.get(inPos + 2);
// next
future = (future << 8) + (p2 & 255);
int off = hash(future);
int ref = hashTab[off];
hashTab[off] = inPos;
// if (ref < inPos
// && ref > 0
// && (off = inPos - ref - 1) < MAX_OFF
// && in[ref + 2] == p2
// && (((in[ref] & 255) << 8) | (in[ref + 1] & 255)) ==
// ((future >> 8) & 0xffff)) {
if (ref < inPos
&& ref > 0
&& (off = inPos - ref - 1) < MAX_OFF
&& in.get(ref + 2) == p2
&& in.get(ref + 1) == (byte) (future >> 8)
&& in.get(ref) == (byte) (future >> 16)) {
// match
int maxLen = inLen - inPos - 2;
if (maxLen > MAX_REF) {
maxLen = MAX_REF;
}
if (literals == 0) {
// multiple back-references,
// so there is no literal run control byte
outPos--;
} else {
// set the control byte at the start of the literal run
// to store the number of literals
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
}
int len = 3;
while (len < maxLen && in.get(ref + len) == in.get(inPos + len)) {
len++;
}
len -= 2;
if (len < 7) {
out[outPos++] = (byte) ((off >> 8) + (len << 5));
} else {
out[outPos++] = (byte) ((off >> 8) + (7 << 5));
out[outPos++] = (byte) (len - 7);
}
out[outPos++] = (byte) off;
// move one byte forward to allow for a literal run control byte
outPos++;
inPos += len;
// rebuild the future, and store the last bytes to the hashtable.
// Storing hashes of the last bytes in back-reference improves
// the compression ratio and only reduces speed slightly.
future = first(in, inPos);
future = next(future, in, inPos);
hashTab[hash(future)] = inPos++;
future = next(future, in, inPos);
hashTab[hash(future)] = inPos++;
} else {
// copy one byte from input to output as part of literal
out[outPos++] = in.get(inPos++);
literals++;
// at the end of this literal chunk, write the length
// to the control byte and start a new chunk
if (literals == MAX_LITERAL) {
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
// move ahead one byte to allow for the
// literal run control byte
outPos++;
}
}
}
// write the remaining few bytes as literals
while (inPos < inLen) {
out[outPos++] = in.get(inPos++);
literals++;
if (literals == MAX_LITERAL) {
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
outPos++;
}
}
// writes the final literal run length to the control byte
out[outPos - literals - 1] = (byte) (literals - 1);
if (literals == 0) {
outPos--;
}
return outPos;
}
public void expand(byte[] in, int inPos, int inLen, byte[] out, int outPos, int outLen) { public void expand(byte[] in, int inPos, int inLen, byte[] out, int outPos, int outLen) {
// if ((inPos | outPos | outLen) < 0) { // if ((inPos | outPos | outLen) < 0) {
if (inPos < 0 || outPos < 0 || outLen < 0) { if (inPos < 0 || outPos < 0 || outLen < 0) {
...@@ -282,6 +397,53 @@ public final class CompressLZF implements Compressor { ...@@ -282,6 +397,53 @@ public final class CompressLZF implements Compressor {
} while (outPos < outLen); } while (outPos < outLen);
} }
public void expand(ByteBuffer in, int inPos, int inLen, ByteBuffer out, int outPos, int outLen) {
// if ((inPos | outPos | outLen) < 0) {
if (inPos < 0 || outPos < 0 || outLen < 0) {
throw new IllegalArgumentException();
}
do {
int ctrl = in.get(inPos++) & 255;
if (ctrl < MAX_LITERAL) {
// literal run of length = ctrl + 1,
ctrl++;
// copy to output and move forward this many bytes
System.arraycopy(in, inPos, out, outPos, ctrl);
outPos += ctrl;
inPos += ctrl;
} else {
// back reference
// the highest 3 bits are the match length
int len = ctrl >> 5;
// if the length is maxed, add the next byte to the length
if (len == 7) {
len += in.get(inPos++) & 255;
}
// minimum back-reference is 3 bytes,
// so 2 was subtracted before storing size
len += 2;
// ctrl is now the offset for a back-reference...
// the logical AND operation removes the length bits
ctrl = -((ctrl & 0x1f) << 8) - 1;
// the next byte augments/increases the offset
ctrl -= in.get(inPos++) & 255;
// copy the back-reference bytes from the given
// location in output to current position
ctrl += outPos;
if (outPos + len >= out.capacity()) {
// reduce array bounds checking
throw new ArrayIndexOutOfBoundsException();
}
for (int i = 0; i < len; i++) {
out.put(outPos++, out.get(ctrl++));
}
}
} while (outPos < outLen);
}
public int getAlgorithm() { public int getAlgorithm() {
return Compressor.LZF; return Compressor.LZF;
} }
......
...@@ -72,6 +72,8 @@ public abstract class FilePath { ...@@ -72,6 +72,8 @@ public abstract class FilePath {
"org.h2.store.fs.FilePathDisk", "org.h2.store.fs.FilePathDisk",
"org.h2.store.fs.FilePathMem", "org.h2.store.fs.FilePathMem",
"org.h2.store.fs.FilePathMemLZF", "org.h2.store.fs.FilePathMemLZF",
"org.h2.store.fs.FilePathNioMem",
"org.h2.store.fs.FilePathNioMemLZF",
"org.h2.store.fs.FilePathSplit", "org.h2.store.fs.FilePathSplit",
"org.h2.store.fs.FilePathNio", "org.h2.store.fs.FilePathNio",
"org.h2.store.fs.FilePathNioMapped", "org.h2.store.fs.FilePathNioMapped",
......
/*
* Copyright 2004-2013 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.h2.compress.CompressLZF;
import org.h2.constant.ErrorCode;
import org.h2.message.DbException;
import org.h2.util.MathUtils;
import org.h2.util.New;
/**
* This file system keeps files fully in memory. There is an option to compress
* file blocks to safe memory.
*/
public class FilePathNioMem extends FilePath {
private static final TreeMap<String, FileNioMemData> MEMORY_FILES = new TreeMap<String, FileNioMemData>();
public FilePathNioMem getPath(String path) {
FilePathNioMem p = new FilePathNioMem();
p.name = getCanonicalPath(path);
return p;
}
public long size() {
return getMemoryFile().length();
}
public void moveTo(FilePath newName) {
synchronized (MEMORY_FILES) {
FileNioMemData f = getMemoryFile();
f.setName(newName.name);
MEMORY_FILES.remove(name);
MEMORY_FILES.put(newName.name, f);
}
}
public boolean createFile() {
synchronized (MEMORY_FILES) {
if (exists()) {
return false;
}
getMemoryFile();
}
return true;
}
public boolean exists() {
if (isRoot()) {
return true;
}
synchronized (MEMORY_FILES) {
return MEMORY_FILES.get(name) != null;
}
}
public void delete() {
if (isRoot()) {
return;
}
synchronized (MEMORY_FILES) {
MEMORY_FILES.remove(name);
}
}
public List<FilePath> newDirectoryStream() {
ArrayList<FilePath> list = New.arrayList();
synchronized (MEMORY_FILES) {
for (String n : MEMORY_FILES.tailMap(name).keySet()) {
if (n.startsWith(name)) {
list.add(getPath(n));
} else {
break;
}
}
return list;
}
}
public boolean setReadOnly() {
return getMemoryFile().setReadOnly();
}
public boolean canWrite() {
return getMemoryFile().canWrite();
}
public FilePathNioMem getParent() {
int idx = name.lastIndexOf('/');
return idx < 0 ? null : getPath(name.substring(0, idx));
}
public boolean isDirectory() {
if (isRoot()) {
return true;
}
// TODO in memory file system currently
// does not really support directories
synchronized (MEMORY_FILES) {
return MEMORY_FILES.get(name) == null;
}
}
public boolean isAbsolute() {
// TODO relative files are not supported
return true;
}
public FilePathNioMem toRealPath() {
return this;
}
public long lastModified() {
return getMemoryFile().getLastModified();
}
public void createDirectory() {
if (exists() && isDirectory()) {
throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, name + " (a file with this name already exists)");
}
// TODO directories are not really supported
}
public OutputStream newOutputStream(boolean append) throws IOException {
FileNioMemData obj = getMemoryFile();
FileNioMem m = new FileNioMem(obj, false);
return new FileChannelOutputStream(m, append);
}
public InputStream newInputStream() {
FileNioMemData obj = getMemoryFile();
FileNioMem m = new FileNioMem(obj, true);
return new FileChannelInputStream(m);
}
public FileChannel open(String mode) {
FileNioMemData obj = getMemoryFile();
return new FileNioMem(obj, "r".equals(mode));
}
private FileNioMemData getMemoryFile() {
synchronized (MEMORY_FILES) {
FileNioMemData m = MEMORY_FILES.get(name);
if (m == null) {
m = new FileNioMemData(name, compressed());
MEMORY_FILES.put(name, m);
}
return m;
}
}
private boolean isRoot() {
return name.equals(getScheme());
}
private static String getCanonicalPath(String fileName) {
fileName = fileName.replace('\\', '/');
int idx = fileName.indexOf(':') + 1;
if (fileName.length() > idx && fileName.charAt(idx) != '/') {
fileName = fileName.substring(0, idx) + "/" + fileName.substring(idx);
}
return fileName;
}
public String getScheme() {
return "nioMemFS";
}
/**
* Whether the file should be compressed.
*
* @return if it should be compressed.
*/
boolean compressed() {
return false;
}
}
/**
* A memory file system that compresses blocks to conserve memory.
*/
class FilePathNioMemLZF extends FilePathNioMem {
boolean compressed() {
return true;
}
public String getScheme() {
return "nioMemLZF";
}
}
/**
* This class represents an in-memory file.
*/
class FileNioMem extends FileBase {
/**
* The file data.
*/
final FileNioMemData data;
private final boolean readOnly;
private long pos;
FileNioMem(FileNioMemData data, boolean readOnly) {
this.data = data;
this.readOnly = readOnly;
}
public long size() {
return data.length();
}
public FileChannel truncate(long newLength) throws IOException {
if (newLength < size()) {
data.touch(readOnly);
pos = Math.min(pos, newLength);
data.truncate(newLength);
}
return this;
}
public FileChannel position(long newPos) {
this.pos = (int) newPos;
return this;
}
public int write(ByteBuffer src) throws IOException {
int len = src.remaining();
if (len == 0) {
return 0;
}
data.touch(readOnly);
pos = data.readWrite(pos, src, 0/*because we start writing from src.position()*/, len, true);
src.position(src.position() + len);
return len;
}
public int read(ByteBuffer dst) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
long newPos = data.readWrite(pos, dst, dst.position(), len, false);
len = (int) (newPos - pos);
if (len <= 0) {
return -1;
}
dst.position(dst.position() + len);
pos = newPos;
return len;
}
public long position() {
return pos;
}
public void implCloseChannel() throws IOException {
pos = 0;
}
public void force(boolean metaData) throws IOException {
// do nothing
}
public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException {
if (shared) {
if (!data.lockShared()) {
return null;
}
} else {
if (!data.lockExclusive()) {
return null;
}
}
// cast to FileChannel to avoid JDK 1.7 ambiguity
FileLock lock = new FileLock((FileChannel) null, position, size, shared) {
@Override
public boolean isValid() {
return true;
}
@Override
public void release() throws IOException {
data.unlock();
}
};
return lock;
}
public String toString() {
return data.getName();
}
}
/**
* This class contains the data of an in-memory random access file.
* Data compression using the LZF algorithm is supported as well.
*/
class FileNioMemData {
private static final int CACHE_SIZE = 8;
private static final int BLOCK_SIZE_SHIFT = 10;
private static final int BLOCK_SIZE = 1 << BLOCK_SIZE_SHIFT;
private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1;
private static final CompressLZF LZF = new CompressLZF();
private static final byte[] BUFFER = new byte[BLOCK_SIZE * 2];
private static final ByteBuffer COMPRESSED_EMPTY_BLOCK;
private static final Cache<CompressItem, CompressItem> COMPRESS_LATER =
new Cache<CompressItem, CompressItem>(CACHE_SIZE);
private String name;
private final boolean compress;
private long length;
private ByteBuffer[] data;
private long lastModified;
private boolean isReadOnly;
private boolean isLockedExclusive;
private int sharedLockCount;
static {
byte[] n = new byte[BLOCK_SIZE];
int len = LZF.compress(n, BLOCK_SIZE, BUFFER, 0);
COMPRESSED_EMPTY_BLOCK = ByteBuffer.allocateDirect(len);
COMPRESSED_EMPTY_BLOCK.put(BUFFER, 0, len);
}
FileNioMemData(String name, boolean compress) {
this.name = name;
this.compress = compress;
data = new ByteBuffer[0];
lastModified = System.currentTimeMillis();
}
/**
* Lock the file in exclusive mode if possible.
*
* @return if locking was successful
*/
synchronized boolean lockExclusive() {
if (sharedLockCount > 0 || isLockedExclusive) {
return false;
}
isLockedExclusive = true;
return true;
}
/**
* Lock the file in shared mode if possible.
*
* @return if locking was successful
*/
synchronized boolean lockShared() {
if (isLockedExclusive) {
return false;
}
sharedLockCount++;
return true;
}
/**
* Unlock the file.
*/
synchronized void unlock() {
if (isLockedExclusive) {
isLockedExclusive = false;
} else {
sharedLockCount = Math.max(0, sharedLockCount - 1);
}
}
/**
* This small cache compresses the data if an element leaves the cache.
*/
static class Cache<K, V> extends LinkedHashMap<K, V> {
private static final long serialVersionUID = 1L;
private final int size;
Cache(int size) {
super(size, (float) 0.75, true);
this.size = size;
}
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
if (size() < size) {
return false;
}
CompressItem c = (CompressItem) eldest.getKey();
compress(c.data, c.page);
return true;
}
}
/**
* Represents a compressed item.
*/
static class CompressItem {
/**
* The file data.
*/
ByteBuffer[] data;
/**
* The page to compress.
*/
int page;
public int hashCode() {
return page;
}
public boolean equals(Object o) {
if (o instanceof CompressItem) {
CompressItem c = (CompressItem) o;
return c.data == data && c.page == page;
}
return false;
}
}
private static void compressLater(ByteBuffer[] data, int page) {
CompressItem c = new CompressItem();
c.data = data;
c.page = page;
synchronized (LZF) {
COMPRESS_LATER.put(c, c);
}
}
private static void expand(ByteBuffer[] data, int page) {
ByteBuffer d = data[page];
if (d.capacity() == BLOCK_SIZE) {
return;
}
ByteBuffer out = ByteBuffer.allocateDirect(BLOCK_SIZE);
if (d != COMPRESSED_EMPTY_BLOCK) {
synchronized (LZF) {
LZF.expand(d, 0, d.capacity(), out, 0, BLOCK_SIZE);
}
}
data[page] = out;
}
/**
* Compress the data in a byte array.
*
* @param data the page array
* @param page which page to compress
*/
static void compress(ByteBuffer[] data, int page) {
ByteBuffer d = data[page];
synchronized (LZF) {
int len = LZF.compress(d, BLOCK_SIZE, BUFFER, 0);
d = ByteBuffer.allocateDirect(len);
d.put(BUFFER, 0, len);
data[page] = d;
}
}
/**
* Update the last modified time.
*
* @param openReadOnly if the file was opened in read-only mode
*/
void touch(boolean openReadOnly) throws IOException {
if (isReadOnly || openReadOnly) {
throw new IOException("Read only");
}
lastModified = System.currentTimeMillis();
}
/**
* Get the file length.
*
* @return the length
*/
long length() {
return length;
}
/**
* Truncate the file.
*
* @param newLength the new length
*/
void truncate(long newLength) {
changeLength(newLength);
long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE);
if (end != newLength) {
int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT);
expand(data, lastPage);
ByteBuffer d = data[lastPage];
for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) {
d.put(i, (byte)0);
}
if (compress) {
compressLater(data, lastPage);
}
}
}
private void changeLength(long len) {
length = len;
len = MathUtils.roundUpLong(len, BLOCK_SIZE);
int blocks = (int) (len >>> BLOCK_SIZE_SHIFT);
if (blocks != data.length) {
ByteBuffer[] n = new ByteBuffer[blocks];
System.arraycopy(data, 0, n, 0, Math.min(data.length, n.length));
for (int i = data.length; i < blocks; i++) {
n[i] = COMPRESSED_EMPTY_BLOCK;
}
data = n;
}
}
/**
* Read or write.
*
* @param pos the position
* @param b the byte array
* @param off the offset within the byte array
* @param len the number of bytes
* @param write true for writing
* @return the new position
*/
long readWrite(long pos, ByteBuffer b, int off, int len, boolean write) {
long end = pos + len;
if (end > length) {
if (write) {
changeLength(end);
} else {
len = (int) (length - pos);
}
}
while (len > 0) {
int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK));
int page = (int) (pos >>> BLOCK_SIZE_SHIFT);
expand(data, page);
ByteBuffer block = data[page];
int blockOffset = (int) (pos & BLOCK_SIZE_MASK);
if (write) {
ByteBuffer tmp = b.slice();
tmp.position(off);
tmp.limit(off + l);
block.position(blockOffset);
block.put(tmp);
} else {
block.position(blockOffset);
ByteBuffer tmp = block.slice();
tmp.limit(l);
int oldPosition = b.position();
b.position(off);
b.put(tmp);
// restore old position
b.position(oldPosition);
}
if (compress) {
compressLater(data, page);
}
off += l;
pos += l;
len -= l;
}
return pos;
}
/**
* Set the file name.
*
* @param name the name
*/
void setName(String name) {
this.name = name;
}
/**
* Get the file name
*
* @return the name
*/
String getName() {
return name;
}
/**
* Get the last modified time.
*
* @return the time
*/
long getLastModified() {
return lastModified;
}
/**
* Check whether writing is allowed.
*
* @return true if it is
*/
boolean canWrite() {
return !isReadOnly;
}
/**
* Set the read-only flag.
*
* @return true
*/
boolean setReadOnly() {
isReadOnly = true;
return true;
}
}
...@@ -152,7 +152,9 @@ public abstract class TestBase { ...@@ -152,7 +152,9 @@ public abstract class TestBase {
} finally { } finally {
try { try {
FileUtils.deleteRecursive("memFS:", false); FileUtils.deleteRecursive("memFS:", false);
FileUtils.deleteRecursive("nioMemFS:", false);
FileUtils.deleteRecursive("memLZF:", false); FileUtils.deleteRecursive("memLZF:", false);
FileUtils.deleteRecursive("nioMemLZF:", false);
} catch (RuntimeException e) { } catch (RuntimeException e) {
e.printStackTrace(); e.printStackTrace();
} }
......
...@@ -31,17 +31,19 @@ public class TestFile extends TestBase implements DataHandler { ...@@ -31,17 +31,19 @@ public class TestFile extends TestBase implements DataHandler {
} }
public void test() throws Exception { public void test() throws Exception {
doTest(false); doTest(false, false);
doTest(true); doTest(false, true);
doTest(true, false);
doTest(true, true);
} }
private void doTest(boolean compress) throws Exception { private void doTest(boolean nioMem, boolean compress) throws Exception {
int len = getSize(1000, 10000); int len = getSize(1000, 10000);
Random random = new Random(); Random random = new Random();
FileStore mem = null, file = null; FileStore mem = null, file = null;
byte[] buffMem = null; byte[] buffMem = null;
byte[] buffFile = null; byte[] buffFile = null;
String prefix = compress ? "memLZF:" : "memFS:"; String prefix = nioMem ? (compress ? "nioMemLZF:" : "nioMemFS:") : (compress ? "memLZF:" : "memFS:");
FileUtils.delete(prefix + "test"); FileUtils.delete(prefix + "test");
FileUtils.delete("~/testFile"); FileUtils.delete("~/testFile");
......
...@@ -70,7 +70,9 @@ public class TestFileSystem extends TestBase { ...@@ -70,7 +70,9 @@ public class TestFileSystem extends TestBase {
FileUtils.toRealPath(f); FileUtils.toRealPath(f);
testFileSystem(getBaseDir() + "/fs"); testFileSystem(getBaseDir() + "/fs");
testFileSystem("memFS:"); testFileSystem("memFS:");
testFileSystem("nioMemFS:");
testFileSystem("memLZF:"); testFileSystem("memLZF:");
testFileSystem("nioMemLZF:");
testUserHome(); testUserHome();
try { try {
testFileSystem("nio:" + getBaseDir() + "/fs"); testFileSystem("nio:" + getBaseDir() + "/fs");
...@@ -448,7 +450,8 @@ public class TestFileSystem extends TestBase { ...@@ -448,7 +450,8 @@ public class TestFileSystem extends TestBase {
assertTrue(FileUtils.tryDelete(fsBase + "/test2")); assertTrue(FileUtils.tryDelete(fsBase + "/test2"));
FileUtils.delete(fsBase + "/test"); FileUtils.delete(fsBase + "/test");
if (fsBase.indexOf("memFS:") < 0 && fsBase.indexOf("memLZF:") < 0) { if (fsBase.indexOf("memFS:") < 0 && fsBase.indexOf("memLZF:") < 0
&& fsBase.indexOf("nioMemFS:") < 0 && fsBase.indexOf("nioMemLZF:") < 0) {
FileUtils.createDirectories(fsBase + "/testDir"); FileUtils.createDirectories(fsBase + "/testDir");
assertTrue(FileUtils.isDirectory(fsBase + "/testDir")); assertTrue(FileUtils.isDirectory(fsBase + "/testDir"));
if (!fsBase.startsWith("jdbc:")) { if (!fsBase.startsWith("jdbc:")) {
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论