提交 9eb213a0 authored 作者: Thomas Mueller's avatar Thomas Mueller

MVStore: encrypted stores are now supported - table engine

上级 ecbc84d5
......@@ -2402,4 +2402,8 @@ public class Database implements DataHandler {
throw DbException.throwInternalError();
}
public byte[] getFilePasswordHash() {
return filePasswordHash;
}
}
......@@ -512,7 +512,7 @@ public class DataUtils {
for (int i = 0, size = s.length(); i < size;) {
int startKey = i;
i = s.indexOf(':', i);
checkArgument(i > 0, "Not a map");
checkArgument(i >= 0, "Not a map");
String key = s.substring(startKey, i++);
StringBuilder buff = new StringBuilder();
while (i < size) {
......
......@@ -24,7 +24,7 @@ import org.h2.mvstore.cache.CacheLongKeyLIRS;
import org.h2.mvstore.cache.FilePathCache;
import org.h2.mvstore.type.StringDataType;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FilePathCrypt2;
import org.h2.store.fs.FilePathCrypt;
import org.h2.store.fs.FileUtils;
import org.h2.util.MathUtils;
import org.h2.util.New;
......@@ -43,27 +43,21 @@ H:3,...
TODO:
- file system encryption (
test and document speed,
support un-aligned operations,
test other algorithms)
- automated 'kill process' and 'power failure' test
- test stream store if data doesn't fit in memory
- mvcc with multiple transactions
- update checkstyle
- automated 'kill process' and 'power failure' test
- maybe split database into multiple files, to speed up compact
- auto-compact from time to time and on close
- test and possibly improve compact operation (for large dbs)
- performance test with encrypting file system
- possibly split chunk data into immutable and mutable
- compact: avoid processing pages using a counting bloom filter
- defragment (re-creating maps, specially those with small pages)
- remove DataType.getMaxLength (use ByteArrayOutputStream or getMemory)
- chunk header: store changed chunk data as row; maybe after the root
- chunk checksum (header, last page, 2 bytes per page?)
- file locking: solve problem that locks are shared for a VM
- store file "header" at the end of each chunk; at the end of the file
- is there a better name for the file header,
-- if it's no longer always at the beginning of a file?
-- if it's no longer always at the beginning of a file? store header?
- on insert, if the child page is already full, don't load and modify it
-- split directly (for leaves with 1 entry)
- maybe let a chunk point to possible next chunks
......@@ -80,7 +74,7 @@ TODO:
- chunk metadata: do not store default values
- support maps without values (just existence of the key)
- support maps without keys (counted b-tree features)
- use a small object cache (StringCache)
- use a small object cache (StringCache), test on Android
- dump values
- tool to import / manipulate CSV files (maybe concurrently)
- map split / merge (fast if no overlap)
......@@ -96,7 +90,10 @@ TODO:
-- to support concurrent updates and writes, and very large maps
- implement an off-heap file system
- remove change cursor, or add support for writing to branches
- file encryption / decryption using multiple threads
- file encryption: try using multiple threads
- file encryption: support un-aligned operations
- file encryption: separate algorithm/key for tweak
- file encryption: add a fast (insecure) algorithm
*/
......@@ -408,7 +405,7 @@ public class MVStore {
file = f.open(readOnly ? "r" : "rw");
if (filePassword != null) {
byte[] password = DataUtils.getPasswordBytes(filePassword);
file = new FilePathCrypt2.FileCrypt2(password, file);
file = new FilePathCrypt.FileCrypt2(password, file);
}
file = FilePathCache.wrap(file);
if (readOnly) {
......
......@@ -501,10 +501,11 @@ public class Page {
sharedFlags &= ~SHARED_KEYS;
}
Object old = keys[index];
DataType keyType = map.getKeyType();
if (old != null) {
memory -= map.getKeyType().getMemory(old);
memory -= keyType.getMemory(old);
}
memory += map.getKeyType().getMemory(key);
memory += keyType.getMemory(key);
keys[index] = key;
}
......@@ -521,8 +522,9 @@ public class Page {
values = Arrays.copyOf(values, values.length);
sharedFlags &= ~SHARED_VALUES;
}
memory -= map.getValueType().getMemory(old);
memory += map.getValueType().getMemory(value);
DataType valueType = map.getValueType();
memory -= valueType.getMemory(old);
memory += valueType.getMemory(value);
values[index] = value;
return old;
}
......@@ -772,8 +774,9 @@ public class Page {
: DataUtils.PAGE_TYPE_LEAF;
buff.put((byte) type);
int compressStart = buff.position();
DataType keyType = map.getKeyType();
for (int i = 0; i < len; i++) {
map.getKeyType().write(buff, keys[i]);
keyType.write(buff, keys[i]);
}
if (type == DataUtils.PAGE_TYPE_NODE) {
for (int i = 0; i <= len; i++) {
......@@ -783,8 +786,9 @@ public class Page {
DataUtils.writeVarLong(buff, counts[i]);
}
} else {
DataType valueType = map.getValueType();
for (int i = 0; i < len; i++) {
map.getValueType().write(buff, values[i]);
valueType.write(buff, values[i]);
}
}
if (map.getStore().getCompress()) {
......@@ -839,12 +843,14 @@ public class Page {
int maxLength = 4 + 2 + DataUtils.MAX_VAR_INT_LEN
+ DataUtils.MAX_VAR_INT_LEN + 1;
int len = keyCount;
DataType keyType = map.getKeyType();
for (int i = 0; i < len; i++) {
maxLength += map.getKeyType().getMaxLength(keys[i]);
maxLength += keyType.getMaxLength(keys[i]);
}
if (isLeaf()) {
DataType valueType = map.getValueType();
for (int i = 0; i < len; i++) {
maxLength += map.getValueType().getMaxLength(values[i]);
maxLength += valueType.getMaxLength(values[i]);
}
} else {
maxLength += 8 * len;
......@@ -912,12 +918,14 @@ public class Page {
private int calculateMemory() {
int mem = DataUtils.PAGE_MEMORY;
DataType keyType = map.getKeyType();
for (int i = 0; i < keyCount; i++) {
mem += map.getKeyType().getMemory(keys[i]);
mem += keyType.getMemory(keys[i]);
}
if (this.isLeaf()) {
DataType valueType = map.getValueType();
for (int i = 0; i < keyCount; i++) {
mem += map.getValueType().getMemory(values[i]);
mem += valueType.getMemory(values[i]);
}
} else {
mem += this.getChildPageCount() * DataUtils.PAGE_MEMORY_CHILD;
......
......@@ -49,6 +49,7 @@ public class MVTableEngine implements TableEngine {
@Override
public TableBase createTable(CreateTableData data) {
Database db = data.session.getDatabase();
byte[] key = db.getFilePasswordHash();
String storeName = db.getDatabasePath();
MVStore.Builder builder = new MVStore.Builder();
Store store;
......@@ -62,6 +63,13 @@ public class MVTableEngine implements TableEngine {
if (db.isReadOnly()) {
builder.readOnly();
}
if (key != null) {
char[] password = new char[key.length];
for (int i = 0; i < key.length; i++) {
password[i] = (char) key[i];
}
builder.encryptionKey(password);
}
store = new Store(db, builder.open());
STORES.put(storeName, store);
} else if (store.db != db) {
......
......@@ -24,7 +24,7 @@ import org.h2.util.StringUtils;
/**
* An encrypted file.
*/
public class FilePathCrypt2 extends FilePathWrapper {
public class FilePathCrypt extends FilePathWrapper {
private static final String SCHEME = "crypt2";
......@@ -32,7 +32,7 @@ public class FilePathCrypt2 extends FilePathWrapper {
* Register this file system.
*/
public static void register() {
FilePath.register(new FilePathCrypt2());
FilePath.register(new FilePathCrypt());
}
public FileChannel open(String mode) throws IOException {
......@@ -121,12 +121,13 @@ public class FilePathCrypt2 extends FilePathWrapper {
/**
* The length of the salt, in bytes.
*/
private static final int SALT_LENGTH = 32;
private static final int SALT_LENGTH = 8;
/**
* The number of iterations.
* The number of iterations. It is relatively low; a higher value would
* slow down opening files on Android too much.
*/
private static final int HASH_ITERATIONS = 10000;
private static final int HASH_ITERATIONS = 10;
private final FileChannel base;
......
......@@ -7,6 +7,7 @@ package org.h2.test.store;
import java.math.BigDecimal;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
......@@ -33,6 +34,7 @@ public class TestMVTableEngine extends TestBase {
}
public void test() throws Exception {
testEncryption();
testReadOnly();
testReuseDiskSpace();
testDataTypes();
......@@ -40,6 +42,26 @@ public class TestMVTableEngine extends TestBase {
testSimple();
}
private void testEncryption() throws Exception {
FileUtils.deleteRecursive(getBaseDir(), true);
String dbName = "mvstore" +
";DEFAULT_TABLE_ENGINE=org.h2.mvstore.db.MVTableEngine";
Connection conn;
Statement stat;
String url = getURL(dbName + ";CIPHER=AES", true);
String user = "sa";
String password = "123 123";
conn = DriverManager.getConnection(url, user, password);
stat = conn.createStatement();
stat.execute("create table test(id int)");
conn.close();
conn = DriverManager.getConnection(url, user, password);
stat = conn.createStatement();
stat.execute("select * from test");
conn.close();
FileUtils.deleteRecursive(getBaseDir(), true);
}
private void testReadOnly() throws Exception {
FileUtils.deleteRecursive(getBaseDir(), true);
String dbName = "mvstore" +
......
......@@ -14,8 +14,8 @@ import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.FileChannel.MapMode;
import java.nio.channels.FileLock;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
......@@ -23,7 +23,6 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
import java.util.Random;
import org.h2.dev.fs.FilePathCrypt;
import org.h2.message.DbException;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FileUtils;
......@@ -58,9 +57,8 @@ public class TestFileSystem extends TestBase {
testUnsupportedFeatures(getBaseDir());
testMemFsDir();
testClasspath();
FilePathCrypt.register();
FilePathDebug.register().setTrace(true);
testFileSystem("crypt:aes:x:" + getBaseDir() + "/fs");
// testFileSystem("crypt:007:" + getBaseDir() + "/fs");
testSimpleExpandTruncateSize();
testSplitDatabaseInZip();
......@@ -74,8 +72,7 @@ public class TestFileSystem extends TestBase {
testFileSystem("memLZF:");
testUserHome();
try {
FilePathCrypt.register();
testFileSystem("crypt:aes:x:" + getBaseDir() + "/fs");
// testFileSystem("crypt:007:" + getBaseDir() + "/fs");
testFileSystem("nio:" + getBaseDir() + "/fs");
testFileSystem("nioMapped:" + getBaseDir() + "/fs");
if (!config.splitFileSystem) {
......
/*
* Copyright 2004-2011 H2 Group. Multiple-Licensed under the H2 License,
* Version 1.0, and under the Eclipse Public License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.dev.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.security.BlockCipher;
import org.h2.security.CipherFactory;
import org.h2.security.SHA256;
import org.h2.store.fs.FileBase;
import org.h2.store.fs.FileChannelInputStream;
import org.h2.store.fs.FileChannelOutputStream;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FilePathWrapper;
import org.h2.store.fs.FileUtils;
import org.h2.util.MathUtils;
import org.h2.util.StringUtils;
/**
* A file system that encrypts the contents of the files.
*/
public class FilePathCrypt extends FilePathWrapper {
/**
* Register this file system.
*/
public static void register() {
FilePath.register(new FilePathCrypt());
}
protected String getPrefix() {
String[] parsed = parse(name);
return getScheme() + ":" + parsed[0] + ":" + parsed[1] + ":";
}
public FilePath unwrap(String fileName) {
return FilePath.get(parse(fileName)[2]);
}
public long size() {
long len = getBase().size();
return Math.max(0, len - FileCrypt.HEADER_LENGTH - FileCrypt.BLOCK_SIZE);
}
public FileChannel open(String mode) throws IOException {
String[] parsed = parse(name);
FileChannel file = FileUtils.open(parsed[2], mode);
return new FileCrypt(name, parsed[0], parsed[1], file);
}
public OutputStream newOutputStream(boolean append) {
try {
return new FileChannelOutputStream(open("rw"), append);
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
}
public InputStream newInputStream() {
try {
return new FileChannelInputStream(open("r"));
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
}
/**
* Split the file name into algorithm, password, and base file name.
*
* @param fileName the file name
* @return an array with algorithm, password, and base file name
*/
private String[] parse(String fileName) {
if (!fileName.startsWith(getScheme())) {
DbException.throwInternalError(fileName + " doesn't start with " + getScheme());
}
fileName = fileName.substring(getScheme().length() + 1);
int idx = fileName.indexOf(':');
String algorithm, password;
if (idx < 0) {
DbException.throwInternalError(fileName + " doesn't contain encryption algorithm and password");
}
algorithm = fileName.substring(0, idx);
fileName = fileName.substring(idx + 1);
idx = fileName.indexOf(':');
if (idx < 0) {
DbException.throwInternalError(fileName + " doesn't contain encryption password");
}
password = fileName.substring(0, idx);
fileName = fileName.substring(idx + 1);
return new String[] { algorithm, password, fileName };
}
public String getScheme() {
return "crypt";
}
}
/**
* An encrypted file.
*/
class FileCrypt extends FileBase {
/**
* The length of the file header. Using a smaller header is possible, but
* might result in un-aligned reads and writes.
*/
static final int HEADER_LENGTH = 4096;
/**
* The block size.
*/
static final int BLOCK_SIZE = Constants.FILE_BLOCK_SIZE;
// TODO improve the header
private static final byte[] HEADER = "-- H2 crypt --\n\0".getBytes();
private static final int SALT_POS = HEADER.length;
private static final int SALT_LENGTH = 16;
private static final int HASH_ITERATIONS = Constants.ENCRYPTION_KEY_HASH_ITERATIONS;
private final String name;
private final FileChannel file;
private final BlockCipher cipher, cipherForInitVector;
private final byte[] bufferForInitVector;
public FileCrypt(String name, String algorithm, String password, FileChannel file) throws IOException {
this.name = name;
this.file = file;
boolean newFile = file.size() < HEADER_LENGTH + BLOCK_SIZE;
byte[] filePasswordHash;
if (algorithm.endsWith("-hash")) {
filePasswordHash = StringUtils.convertHexToBytes(password);
algorithm = algorithm.substring(0, algorithm.length() - "-hash".length());
} else {
filePasswordHash = SHA256.getKeyPasswordHash("file", password.toCharArray());
}
cipher = CipherFactory.getBlockCipher(algorithm);
cipherForInitVector = CipherFactory.getBlockCipher(algorithm);
int keyIterations = HASH_ITERATIONS;
byte[] salt;
if (newFile) {
salt = MathUtils.secureRandomBytes(SALT_LENGTH);
FileUtils.writeFully(file, ByteBuffer.wrap(HEADER));
file.position(SALT_POS);
FileUtils.writeFully(file, ByteBuffer.wrap(salt));
} else {
salt = new byte[SALT_LENGTH];
file.position(SALT_POS);
FileUtils.readFully(file, ByteBuffer.wrap(salt));
}
byte[] key = SHA256.getHashWithSalt(filePasswordHash, salt);
for (int i = 0; i < keyIterations; i++) {
key = SHA256.getHash(key, true);
}
cipher.setKey(key);
bufferForInitVector = new byte[BLOCK_SIZE];
position(0);
}
public long position() throws IOException {
return Math.max(0, file.position() - HEADER_LENGTH);
}
public long size() throws IOException {
return Math.max(0, file.size() - HEADER_LENGTH - BLOCK_SIZE);
}
public FileChannel position(long pos) throws IOException {
file.position(pos + HEADER_LENGTH);
return this;
}
public void force(boolean metaData) throws IOException {
file.force(metaData);
}
public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException {
return file.tryLock(position, size, shared);
}
public void implCloseChannel() throws IOException {
file.close();
}
public FileChannel truncate(long newLength) throws IOException {
if (newLength >= size()) {
return this;
}
int mod = (int) (newLength % BLOCK_SIZE);
if (mod == 0) {
file.truncate(HEADER_LENGTH + newLength);
} else {
file.truncate(HEADER_LENGTH + newLength + BLOCK_SIZE - mod);
byte[] buff = new byte[BLOCK_SIZE - mod];
long pos = position();
position(newLength);
write(buff, 0, buff.length);
position(pos);
}
file.truncate(HEADER_LENGTH + newLength + BLOCK_SIZE);
if (newLength < position()) {
position(newLength);
}
return this;
}
public int read(ByteBuffer dst) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
long pos = position();
len = (int) Math.min(len, size() - pos);
if (len <= 0) {
return -1;
}
int posMod = (int) (pos % BLOCK_SIZE);
if (posMod == 0 && len % BLOCK_SIZE == 0) {
readAligned(pos, dst.array(), dst.position(), len);
} else {
long p = pos - posMod;
int l = len;
if (posMod != 0) {
l += posMod;
}
l = MathUtils.roundUpInt(l, BLOCK_SIZE);
position(p);
byte[] temp = new byte[l];
try {
readAligned(p, temp, 0, l);
System.arraycopy(temp, posMod, dst.array(), dst.position(), len);
} finally {
position(pos + len);
}
}
dst.position(dst.position() + len);
return len;
}
public int write(ByteBuffer src) throws IOException {
int len = src.remaining();
if (len == 0) {
return 0;
}
write(src.array(), src.position(), len);
src.position(src.position() + len);
return len;
}
private void write(byte[] b, int off, int len) throws IOException {
long pos = position();
int posMod = (int) (pos % BLOCK_SIZE);
if (posMod == 0 && len % BLOCK_SIZE == 0) {
byte[] temp = new byte[len];
System.arraycopy(b, off, temp, 0, len);
writeAligned(pos, temp, 0, len);
} else {
long p = pos - posMod;
int l = len;
if (posMod != 0) {
l += posMod;
}
l = MathUtils.roundUpInt(l, BLOCK_SIZE);
position(p);
byte[] temp = new byte[l];
if (file.size() < HEADER_LENGTH + p + l) {
file.position(HEADER_LENGTH + p + l - 1);
FileUtils.writeFully(file, ByteBuffer.wrap(new byte[1]));
position(p);
}
readAligned(p, temp, 0, l);
System.arraycopy(b, off, temp, posMod, len);
position(p);
try {
writeAligned(p, temp, 0, l);
} finally {
position(pos + len);
}
}
pos = file.position();
if (file.size() < pos + BLOCK_SIZE) {
file.position(pos + BLOCK_SIZE - 1);
FileUtils.writeFully(file, ByteBuffer.wrap(new byte[1]));
file.position(pos);
}
}
private void readAligned(long pos, byte[] b, int off, int len) throws IOException {
FileUtils.readFully(file, ByteBuffer.wrap(b, off, len));
for (int p = 0; p < len; p += BLOCK_SIZE) {
for (int i = 0; i < BLOCK_SIZE; i++) {
// empty blocks are not decrypted
if (b[p + off + i] != 0) {
cipher.decrypt(b, p + off, BLOCK_SIZE);
xorInitVector(b, p + off, BLOCK_SIZE, p + pos);
break;
}
}
}
}
private void writeAligned(long pos, byte[] b, int off, int len) throws IOException {
for (int p = 0; p < len; p += BLOCK_SIZE) {
for (int i = 0; i < BLOCK_SIZE; i++) {
// empty blocks are not decrypted
if (b[p + off + i] != 0) {
xorInitVector(b, p + off, BLOCK_SIZE, p + pos);
cipher.encrypt(b, p + off, BLOCK_SIZE);
break;
}
}
}
FileUtils.writeFully(file, ByteBuffer.wrap(b, off, len));
}
private void xorInitVector(byte[] b, int off, int len, long p) {
byte[] iv = bufferForInitVector;
while (len > 0) {
for (int i = 0; i < BLOCK_SIZE; i += 8) {
long block = (p + i) >>> 3;
iv[i] = (byte) (block >> 56);
iv[i + 1] = (byte) (block >> 48);
iv[i + 2] = (byte) (block >> 40);
iv[i + 3] = (byte) (block >> 32);
iv[i + 4] = (byte) (block >> 24);
iv[i + 5] = (byte) (block >> 16);
iv[i + 6] = (byte) (block >> 8);
iv[i + 7] = (byte) block;
}
cipherForInitVector.encrypt(iv, 0, BLOCK_SIZE);
for (int i = 0; i < BLOCK_SIZE; i++) {
b[off + i] ^= iv[i];
}
p += BLOCK_SIZE;
off += BLOCK_SIZE;
len -= BLOCK_SIZE;
}
}
public String toString() {
return name;
}
}
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论