提交 03a6507d authored 作者: Thomas Mueller's avatar Thomas Mueller

--no commit message

--no commit message
上级 6b5d8b92
...@@ -67,12 +67,12 @@ The other targets may be used as well. ...@@ -67,12 +67,12 @@ The other targets may be used as well.
<h3>Using a Central Repository</h3> <h3>Using a Central Repository</h3>
<p> <p>
You can include the database in your Maven 2 project as a dependency. You can include the database in your Maven 2 project as a dependency.
The version is currently 1.0.&lt;year&gt;&lt;month&gt;&lt;day&gt;. Example: Example:
<pre> <pre>
&lt;dependency&gt; &lt;dependency&gt;
&lt;groupId&gt;com.h2database&lt;/groupId&gt; &lt;groupId&gt;com.h2database&lt;/groupId&gt;
&lt;artifactId&gt;h2&lt;/artifactId&gt; &lt;artifactId&gt;h2&lt;/artifactId&gt;
&lt;version&gt;1.0.20070617&lt;/version&gt; &lt;version&gt;1.0.58&lt;/version&gt;
&lt;/dependency&gt; &lt;/dependency&gt;
</pre> </pre>
</p> </p>
......
...@@ -40,7 +40,16 @@ Hypersonic SQL or HSQLDB. H2 is built from scratch. ...@@ -40,7 +40,16 @@ Hypersonic SQL or HSQLDB. H2 is built from scratch.
<h3>Version 1.0 (Current)</h3> <h3>Version 1.0 (Current)</h3>
<h3>Version 1.0.x (2007-09-x)</h3><ul> <h3>Version 1.0.x (2007-09-x)</h3><ul>
<li>After deleting data, empty space in the database files was not efficiently reused <li>When using IFNULL, NULLIF, COALESCE, LEAST, or GREATEST,
and the first parameter was ?, an exception was thrown.
Now the higest data type of all parameters is used.
</li><li>When comparing TINYINT or SMALLINT columns against constants, the index was not used. Fixed.
</li><li>Maven 2: new version are now automatically synched with the central repositories.
</li><li>The default value for MAX_MEMORY_UNDO is now 100000.
</li><li>The documentation indexer does no longer index Japanese pages.
If somebody knows how to split Japanese into words please post it.
</li><li>Oracle compatibility: SYSDATE now returns a timestamp. CHR(..) is now an alias for CHAR(..).
</li><li>After deleting data, empty space in the database files was not efficiently reused
(but it was reused when opening the database). This has been fixed. (but it was reused when opening the database). This has been fixed.
</li><li>About 230 bytes per database was leaked. This is a problem for applications </li><li>About 230 bytes per database was leaked. This is a problem for applications
opening and closing many thousand databases. The main problem: a shutdown hook opening and closing many thousand databases. The main problem: a shutdown hook
...@@ -744,6 +753,7 @@ Hypersonic SQL or HSQLDB. H2 is built from scratch. ...@@ -744,6 +753,7 @@ Hypersonic SQL or HSQLDB. H2 is built from scratch.
</li><li>Document org.h2.samples.MixedMode </li><li>Document org.h2.samples.MixedMode
</li><li>Server: use one listener (detect if the request comes from an PG or TCP client) </li><li>Server: use one listener (detect if the request comes from an PG or TCP client)
</li><li>Store dates as 'local'. Existing files use GMT. Use escape syntax for backward compatiblity) </li><li>Store dates as 'local'. Existing files use GMT. Use escape syntax for backward compatiblity)
</li><li>Support data type INTERVAL
</li></ul> </li></ul>
<h3>Not Planned</h3> <h3>Not Planned</h3>
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -2849,7 +2849,7 @@ public class Parser { ...@@ -2849,7 +2849,7 @@ public class Parser {
} else if (s.equals("SYSTIME")) { } else if (s.equals("SYSTIME")) {
return CURRENT_TIME; return CURRENT_TIME;
} else if (s.equals("SYSDATE")) { } else if (s.equals("SYSDATE")) {
return CURRENT_DATE; return CURRENT_TIMESTAMP;
} }
return getKeywordOrIdentifier(s, "SELECT", KEYWORD); return getKeywordOrIdentifier(s, "SELECT", KEYWORD);
case 'T': case 'T':
......
...@@ -48,7 +48,7 @@ public class SysProperties { ...@@ -48,7 +48,7 @@ public class SysProperties {
public static final int DATASOURCE_TRACE_LEVEL = getIntSetting("h2.dataSourceTraceLevel", TraceSystem.ERROR); public static final int DATASOURCE_TRACE_LEVEL = getIntSetting("h2.dataSourceTraceLevel", TraceSystem.ERROR);
public static final int CACHE_SIZE_DEFAULT = getIntSetting("h2.cacheSizeDefault", 16 * 1024); public static final int CACHE_SIZE_DEFAULT = getIntSetting("h2.cacheSizeDefault", 16 * 1024);
public static final int CACHE_SIZE_INDEX_SHIFT = getIntSetting("h2.cacheSizeIndexShift", 3); public static final int CACHE_SIZE_INDEX_SHIFT = getIntSetting("h2.cacheSizeIndexShift", 3);
public static final int DEFAULT_MAX_MEMORY_UNDO = getIntSetting("h2.defaultMaxMemoryUndo", 50000); public static final int DEFAULT_MAX_MEMORY_UNDO = getIntSetting("h2.defaultMaxMemoryUndo", 100000);
public static final boolean OPTIMIZE_NOT = getBooleanSetting("h2.optimizeNot", true); public static final boolean OPTIMIZE_NOT = getBooleanSetting("h2.optimizeNot", true);
public static final boolean OPTIMIZE_TWO_EQUALS = getBooleanSetting("h2.optimizeTwoEquals", true); public static final boolean OPTIMIZE_TWO_EQUALS = getBooleanSetting("h2.optimizeTwoEquals", true);
public static final int DEFAULT_LOCK_MODE = getIntSetting("h2.defaultLockMode", Constants.LOCK_MODE_READ_COMMITTED); public static final int DEFAULT_LOCK_MODE = getIntSetting("h2.defaultLockMode", Constants.LOCK_MODE_READ_COMMITTED);
......
...@@ -49,6 +49,7 @@ import org.h2.util.CacheLRU; ...@@ -49,6 +49,7 @@ import org.h2.util.CacheLRU;
import org.h2.util.ClassUtils; import org.h2.util.ClassUtils;
import org.h2.util.FileUtils; import org.h2.util.FileUtils;
import org.h2.util.IOUtils; import org.h2.util.IOUtils;
import org.h2.util.IntHashMap;
import org.h2.util.MemoryFile; import org.h2.util.MemoryFile;
import org.h2.util.ObjectArray; import org.h2.util.ObjectArray;
import org.h2.util.StringUtils; import org.h2.util.StringUtils;
...@@ -96,7 +97,7 @@ public class Database implements DataHandler { ...@@ -96,7 +97,7 @@ public class Database implements DataHandler {
private FileLock lock; private FileLock lock;
private LogSystem log; private LogSystem log;
private WriterThread writer; private WriterThread writer;
private ObjectArray storages = new ObjectArray(); private IntHashMap storageMap = new IntHashMap();
private boolean starting; private boolean starting;
private DiskFile fileData, fileIndex; private DiskFile fileData, fileIndex;
private TraceSystem traceSystem; private TraceSystem traceSystem;
...@@ -568,6 +569,7 @@ public class Database implements DataHandler { ...@@ -568,6 +569,7 @@ public class Database implements DataHandler {
private void removeUnusedStorages(Session session) throws SQLException { private void removeUnusedStorages(Session session) throws SQLException {
if (persistent) { if (persistent) {
ObjectArray storages = getAllStorages();
for (int i = 0; i < storages.size(); i++) { for (int i = 0; i < storages.size(); i++) {
Storage storage = (Storage) storages.get(i); Storage storage = (Storage) storages.get(i);
if (storage != null && storage.getRecordReader() == null) { if (storage != null && storage.getRecordReader() == null) {
...@@ -595,30 +597,23 @@ public class Database implements DataHandler { ...@@ -595,30 +597,23 @@ public class Database implements DataHandler {
public void removeStorage(int id, DiskFile file) { public void removeStorage(int id, DiskFile file) {
if (SysProperties.CHECK) { if (SysProperties.CHECK) {
Storage s = (Storage) storages.get(id); Storage s = (Storage) storageMap.get(id);
if (s == null || s.getDiskFile() != file) { if (s == null || s.getDiskFile() != file) {
throw Message.getInternalError(); throw Message.getInternalError();
} }
} }
storages.set(id, null); storageMap.remove(id);
} }
public Storage getStorage(int id, DiskFile file) { public Storage getStorage(int id, DiskFile file) {
Storage storage = null; Storage storage = (Storage) storageMap.get(id);
if (storages.size() > id) {
storage = (Storage) storages.get(id);
if (storage != null) { if (storage != null) {
if (SysProperties.CHECK && storage.getDiskFile() != file) { if (SysProperties.CHECK && storage.getDiskFile() != file) {
throw Message.getInternalError(); throw Message.getInternalError();
} }
} } else {
}
if (storage == null) {
storage = new Storage(this, file, null, id); storage = new Storage(this, file, null, id);
while (storages.size() <= id) { storageMap.put(id, storage);
storages.add(null);
}
storages.set(id, storage);
} }
return storage; return storage;
} }
...@@ -910,7 +905,7 @@ public class Database implements DataHandler { ...@@ -910,7 +905,7 @@ public class Database implements DataHandler {
} catch (SQLException e) { } catch (SQLException e) {
traceSystem.getTrace(Trace.DATABASE).error("close", e); traceSystem.getTrace(Trace.DATABASE).error("close", e);
} }
storages = new ObjectArray(); storageMap.clear();
} }
private void checkMetaFree(Session session, int id) throws SQLException { private void checkMetaFree(Session session, int id) throws SQLException {
...@@ -931,7 +926,8 @@ public class Database implements DataHandler { ...@@ -931,7 +926,8 @@ public class Database implements DataHandler {
if ((i & 1) != (dataFile ? 1 : 0)) { if ((i & 1) != (dataFile ? 1 : 0)) {
i++; i++;
} }
while (i < storages.size() || objectIds.get(i)) {
while (storageMap.get(i) != null || objectIds.get(i)) {
i++; i++;
if ((i & 1) != (dataFile ? 1 : 0)) { if ((i & 1) != (dataFile ? 1 : 0)) {
i++; i++;
...@@ -1414,7 +1410,7 @@ public class Database implements DataHandler { ...@@ -1414,7 +1410,7 @@ public class Database implements DataHandler {
} }
public ObjectArray getAllStorages() { public ObjectArray getAllStorages() {
return storages; return new ObjectArray(storageMap.values());
} }
public boolean getRecovery() { public boolean getRecovery() {
......
...@@ -95,6 +95,11 @@ public class Comparison extends Condition { ...@@ -95,6 +95,11 @@ public class Comparison extends Condition {
dataType = left.getType(); dataType = left.getType();
} else { } else {
right = right.optimize(session); right = right.optimize(session);
if (left instanceof ExpressionColumn && right.isConstant()) {
right = getCast(right, left.getType(), left.getPrecision(), left.getScale(), session);
} else if (right instanceof ExpressionColumn && left.isConstant()) {
left = getCast(left, right.getType(), right.getPrecision(), right.getScale(), session);
}
int lt = left.getType(), rt = right.getType(); int lt = left.getType(), rt = right.getType();
if (lt == rt) { if (lt == rt) {
if (lt == Value.UNKNOWN) { if (lt == Value.UNKNOWN) {
......
...@@ -185,6 +185,7 @@ public class Function extends Expression implements FunctionCall { ...@@ -185,6 +185,7 @@ public class Function extends Expression implements FunctionCall {
addFunction("ASCII", ASCII, 1, Value.INT); addFunction("ASCII", ASCII, 1, Value.INT);
addFunction("BIT_LENGTH", BIT_LENGTH, 1, Value.INT); addFunction("BIT_LENGTH", BIT_LENGTH, 1, Value.INT);
addFunction("CHAR", CHAR, 1, Value.STRING); addFunction("CHAR", CHAR, 1, Value.STRING);
addFunction("CHR", CHAR, 1, Value.STRING);
addFunction("CHAR_LENGTH", CHAR_LENGTH, 1, Value.INT); addFunction("CHAR_LENGTH", CHAR_LENGTH, 1, Value.INT);
addFunction("CHARACTER_LENGTH", CHAR_LENGTH, 1, Value.INT); // same as addFunction("CHARACTER_LENGTH", CHAR_LENGTH, 1, Value.INT); // same as
// CHAR_LENGTH // CHAR_LENGTH
...@@ -1350,17 +1351,21 @@ public class Function extends Expression implements FunctionCall { ...@@ -1350,17 +1351,21 @@ public class Function extends Expression implements FunctionCall {
case COALESCE: case COALESCE:
case LEAST: case LEAST:
case GREATEST: { case GREATEST: {
dataType = Value.STRING; dataType = Value.UNKNOWN;
scale = 0; scale = 0;
precision = 0; precision = 0;
for (int i = 0; i < args.length; i++) { for (int i = 0; i < args.length; i++) {
Expression e = args[i]; Expression e = args[i];
if (e != ValueExpression.NULL) { if (e != ValueExpression.NULL && e.getType() != Value.UNKNOWN) {
dataType = e.getType(); dataType = Value.getHigherOrder(dataType, e.getType());
scale = e.getScale(); scale = Math.max(scale, e.getScale());
precision = e.getPrecision(); precision = Math.max(precision, e.getPrecision());
break; }
} }
if (dataType == Value.UNKNOWN) {
dataType = Value.STRING;
scale = 0;
precision = 0;
} }
break; break;
} }
......
...@@ -910,8 +910,7 @@ SET MAX_MEMORY_ROWS 1000 ...@@ -910,8 +910,7 @@ SET MAX_MEMORY_ROWS 1000
SET MAX_MEMORY_UNDO int SET MAX_MEMORY_UNDO int
"," ","
The maximum number of undo records per a session that are kept in-memory. The maximum number of undo records per a session that are kept in-memory.
If a transaction is larger, the records are buffered to disk. If a transaction is larger, the records are buffered to disk. The default value is 100000.
The default value is Integer.MAX_VALUE (that means the feature is disabled by default).
Changes to tables without a primary key can not be buffered to disk. Changes to tables without a primary key can not be buffered to disk.
This setting is persistent. This setting is persistent.
Admin rights are required to execute this command. Admin rights are required to execute this command.
...@@ -2045,7 +2044,7 @@ Each character needs 2 bytes. ...@@ -2045,7 +2044,7 @@ Each character needs 2 bytes.
OCTET_LENGTH(NAME) OCTET_LENGTH(NAME)
" "
"Functions (String)","CHAR"," "Functions (String)","CHAR","
CHAR(int): string {CHAR | CHR}(int): string
"," ","
Returns the character that represents the ASCII value. Returns the character that represents the ASCII value.
"," ","
......
package org.h2.util;
import java.util.Collection;
import java.util.HashMap;
public class IntHashMap {
private final HashMap map = new HashMap();
public Object get(int key) {
return map.get(ObjectUtils.getInteger(key));
}
public void put(int key, Object value) {
map.put(ObjectUtils.getInteger(key), value);
}
public void remove(int key) {
map.remove(ObjectUtils.getInteger(key));
}
public int size() {
return map.size();
}
public void clear() {
map.clear();
}
public Collection values() {
return map.values();
}
}
...@@ -102,7 +102,6 @@ public class TestAll { ...@@ -102,7 +102,6 @@ public class TestAll {
// java -cp .;%H2DRIVERS% org.h2.test.TestAll // java -cp .;%H2DRIVERS% org.h2.test.TestAll
// java -Xrunhprof:cpu=samples,depth=8 org.h2.test.TestAll // java -Xrunhprof:cpu=samples,depth=8 org.h2.test.TestAll
// java -Xrunhprof:heap=sites,depth=8 org.h2.test.TestAll // java -Xrunhprof:heap=sites,depth=8 org.h2.test.TestAll
// C:\Programme\Java\jdk1.6.beta\bin\java
/* /*
...@@ -118,14 +117,9 @@ start cmd /k "java -cp . org.h2.test.TestAll random >testRandom.txt" ...@@ -118,14 +117,9 @@ start cmd /k "java -cp . org.h2.test.TestAll random >testRandom.txt"
start cmd /k "java -cp . org.h2.test.TestAll btree >testBtree.txt" start cmd /k "java -cp . org.h2.test.TestAll btree >testBtree.txt"
start cmd /k "java -cp . org.h2.test.TestAll halt >testHalt.txt" start cmd /k "java -cp . org.h2.test.TestAll halt >testHalt.txt"
java org.h2.test.TestAll timer java org.h2.test.TestAll timer
Test for hot spots: */
java -agentlib:yjpagent=sampling,noj2ee,dir=C:\temp\Snapshots org.h2.test.bench.TestPerformance -init -db 1
java -Xmx512m -Xrunhprof:cpu=samples,depth=8 org.h2.tools.RunScript
-url jdbc:h2:test;TRACE_LEVEL_FILE=3;LOG=2;MAX_LOG_SIZE=1000;DATABASE_EVENT_LISTENER='org.h2.samples.ShowProgress' -user sa -script test.sql
*/
public boolean smallLog, big, networked, memory, ssl, textStorage, diskUndo, diskResult, deleteIndex, traceSystemOut; public boolean smallLog, big, networked, memory, ssl, textStorage, diskUndo, diskResult, deleteIndex, traceSystemOut;
public boolean codeCoverage; public boolean codeCoverage;
...@@ -148,7 +142,7 @@ java -Xmx512m -Xrunhprof:cpu=samples,depth=8 org.h2.tools.RunScript ...@@ -148,7 +142,7 @@ java -Xmx512m -Xrunhprof:cpu=samples,depth=8 org.h2.tools.RunScript
/* /*
database files grow when updating data add MVCC
slow: slow:
select ta.attname, ia.attnum, ic.relname select ta.attname, ia.attnum, ic.relname
...@@ -164,35 +158,24 @@ AND (NOT ta.attisdropped) ...@@ -164,35 +158,24 @@ AND (NOT ta.attisdropped)
AND (NOT ia.attisdropped) AND (NOT ia.attisdropped)
order by ia.attnum; order by ia.attnum;
change default for in-memory undo
japanese topics in the javascript search
DROP TABLE IF EXISTS TEST; DROP TABLE IF EXISTS TEST;
CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR);
@LOOP 1000000 INSERT INTO TEST VALUES(?, SPACE(100000)); @LOOP 1000000 INSERT INTO TEST VALUES(?, SPACE(100000));
<stop> <stop>
<reconnect> <reconnect>
out of memory out of memory?
shrink newsletter list (migrate to google groups) shrink newsletter list (migrate to google groups)
see if maven repository is ok, document
http://maven.apache.org/guides/mini/guide-central-repository-upload.html
http://mirrors.ibiblio.org/pub/mirrors/maven2/com/h2database/h2/1.0.57/
add MVCC
don't create @~ of not translated don't create @~ of not translated
test performance and document fulltext search test performance and document fulltext search
clustered tables: test, document clustered tables: test, document
search for japanese: works, but is it ok?
extend tests that simulate power off extend tests that simulate power off
HSQLDB compatibility:
Openfire server uses this script to setup a user permissions Openfire server uses this script to setup a user permissions
on the fresh-installed server. The database is [current] HSQLDB : on the fresh-installed server. The database is [current] HSQLDB :
CREATE SCHEMA PUBLIC AUTHORIZATION DBA CREATE SCHEMA PUBLIC AUTHORIZATION DBA
...@@ -207,49 +190,32 @@ move Comparison to Other Database Engines > Comparison ...@@ -207,49 +190,32 @@ move Comparison to Other Database Engines > Comparison
move Products that Work with H2 > Comparison move Products that Work with H2 > Comparison
move Performance Tuning > Advanced Topics move Performance Tuning > Advanced Topics
storages should be an int hash map
testHalt testHalt
java org.h2.test.TestAll halt java org.h2.test.TestAll halt
>testHalt.txt
timer test timer test
Mail http://sf.net/projects/samooha
java.lang.Exception: query was too quick; result: 0 time:968 java.lang.Exception: query was too quick; result: 0 time:968
at org.h2.test.TestBase.logError(TestBase.java:220) at org.h2.test.TestBase.logError(TestBase.java:220)
at org.h2.test.db.TestCases$1.run(TestCases.java:170) at org.h2.test.db.TestCases$1.run(TestCases.java:170)
at java.lang.Thread.run(Thread.java:595) at java.lang.Thread.run(Thread.java:595)
h2\src\docsrc\html\images\SQLInjection.txt
ftp server: problem with multithreading? ftp server: problem with multithreading?
h2\src\docsrc\html\images\SQLInjection.txt
send http://thecodist.com/fiche/thecodist/article/sql-injections-how-not-to-get-stuck to JavaWorld, TheServerSide, send http://thecodist.com/fiche/thecodist/article/sql-injections-how-not-to-get-stuck to JavaWorld, TheServerSide,
Send SQL Injection solution proposal to PostgreSQL, MySQL, Derby, HSQLDB,... Send SQL Injection solution proposal to PostgreSQL, MySQL, Derby, HSQLDB,...
Convert SQL-injection-2.txt to html document, include SQLInjection.java sample Convert SQL-injection-2.txt to html document, include SQLInjection.java sample
MySQL, PostgreSQL MySQL, PostgreSQL
http://semmle.com/
try out, find bugs
READ_TEXT(fileName String) returning a CLOB. READ_TEXT(fileName String) returning a CLOB.
I am not sure if this will read the CLOB in memory however. I am not sure if this will read the CLOB in memory however.
I will add this to the todo list.
Improve LOB in directories performance Improve LOB in directories performance
Test Eclipse DTP 1.5 (HSQLDB / H2 connection bug fixed)
Automate real power off tests Automate real power off tests
drop table test;
CREATE TABLE TEST( ID BIGINT PRIMARY KEY, CREATED TIMESTAMP);
INSERT INTO TEST VALUES(1, '2007-01-01 00:00:00');
SELECT * FROM TEST;
http://fastutil.dsi.unimi.it/ http://fastutil.dsi.unimi.it/
http://javolution.org/ http://javolution.org/
http://joda-time.sourceforge.net/ http://joda-time.sourceforge.net/
...@@ -260,200 +226,37 @@ http://www.igniterealtime.org/projects/openfire/index.jsp ...@@ -260,200 +226,37 @@ http://www.igniterealtime.org/projects/openfire/index.jsp
translation: translation:
src/org.h2.res/help.csv (using ${.} as in .jsp?) src/org.h2.res/help.csv (using ${.} as in .jsp?)
javadocs (using generated ${.} ?) javadocs (using generated ${.} ?)
html (using generated wiki pages ?)
how do multi line properties files work? xml? [key]...?
converter between properties and [key] ...?
checksum marker
glossary glossary
spell check / word list per language spell check / word list per language
translated .pdf translated .pdf
docs: xml:lang="en" > correct language (and detect wrong language based on _ja)
docs: xhtml: use UTF-8 encoding (<?xml version="1.0"?>)
write tests using the PostgreSQL JDBC driver write tests using the PostgreSQL JDBC driver
SYSDATE sollte CURRENT_TIMESTAMP support Oracle functions:
support Trunc(Sysdate),... TRUNC, NVL2, TO_CHAR, TO_DATE, TO_NUMBER;
public static String chr(int code) {
return String.valueOf((char) code);
}
public static Object nvl(Object value, Object ifNull) {
return (value != null) ? value : ifNull;
}
public static Object nvl2(Object value, Object notNull, Object ifNull) {
return (value != null) ? notNull : ifNull;
}
public static Timestamp sysdate() {
return new Timestamp(System.currentTimeMillis());
}
public static String to_char(Object what, String format) {
throw new Error("not yet"); // @todo check format
}
public static Date to_date(Object what, String format) {
throw new Error("not yet"); // @todo check format
}
public static Number to_number(String str) {
return new Double(str);
}
public static java.sql.Date trunc(Timestamp tsp) {
return new java.sql.Date(tsp.getTime());
}
public static Object trunc(Object what, String format) {
System.out.println("*** trunc ***");
if (what == null)
return null;
else if (what instanceof Date) {
System.out.println("*** date-format = " + format);
return Timestamp.valueOf("1963-03-27 12:34:56.0");
} else if (what instanceof Number) {
System.out.println("*** number-format = " + format);
return new Double(123.456D);
} else
throw new ClassCastException("number or date expected");
}
*/ */
/* // run TestHalt
complete recursive views:
drop all objects;
create table parent(id int primary key, parent int);
insert into parent values(1, null), (2, 1), (3, 1);
with test_view(id, parent) as
select id, parent from parent where id = ?
union all
select parent.id, parent.parent from test_view, parent
where parent.parent = test_view.id
select * from test_view {1: 1};
drop view test_view;
with test_view(id, parent) as
select id, parent from parent where id = 1
union all
select parent.id, parent.parent from test_view, parent
where parent.parent = test_view.id
select * from test_view;
drop view test_view; // GroovyServlet
drop table parent; // Cluster: hot deploy (adding a node on runtime)
*/
/*
DROP TABLE TEST; // test with PostgreSQL Version 8.2
CREATE TABLE TEST(ID INT);
INSERT INTO TEST VALUES(1);
INSERT INTO TEST VALUES(2);
SELECT ID AS A FROM TEST WHERE A>0; // http://dev.helma.org/Wiki/RhinoLoader
-- Yes: HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, MySQL, H2, Derby
SELECT ID AS A FROM TEST ORDER BY A; // test with garbage at the end of the log file (must be consistently detected as such)
-- Yes: Oracle, MS SQL Server, PostgreSQL, MySQL, H2, Derby, HSQLDB // test LIKE: compare against other databases
SELECT ID AS A FROM TEST ORDER BY -A; // TestRandomSQL is too random; most statements fails
-- Yes: Oracle, MySQL, HSQLDB
-- Fail: MS SQL Server, PostgreSQL, H2, Derby
SELECT ID AS A FROM TEST GROUP BY A; // extend the random join test that compared the result against PostgreSQL
-- Yes: PostgreSQL, MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, H2, Derby
SELECT ID AS A FROM TEST GROUP BY -A; // long running test with the same database
-- Yes: MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, H2, Derby
SELECT ID AS A FROM TEST GROUP BY ID HAVING A>0; // repeatable test with a very big database (making backups of the database files)
-- Yes: MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, H2, Derby
SELECT COUNT(*) AS A FROM TEST GROUP BY ID HAVING A>0;
-- Yes: MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, H2, Derby
*/
// TODO: fix Hibernate dialect bug / Bordea Felix (lost email)
// run TestHalt
// WHERE FLAG does not use index, but WHERE FLAG=TRUE does
// drop table test;
// CREATE TABLE test (id int, flag BIT NOT NULL);
// CREATE INDEX idx_flag ON test(flag);
// CREATE INDEX idx_id ON test(id);
// insert into test values(1, false), (2, true), (3, false), (4, true);
// ALTER TABLE test ALTER COLUMN id SELECTIVITY 100;
// ALTER TABLE test ALTER COLUMN flag SELECTIVITY 1;
// EXPLAIN SELECT * FROM test WHERE id=2 AND flag=true;
// EXPLAIN SELECT * FROM test WHERE id between 2 and 3 AND flag=true;
// EXPLAIN SELECT * FROM test WHERE id=2 AND flag;
//
// ALTER TABLE test ALTER COLUMN id SELECTIVITY 1;
// ALTER TABLE test ALTER COLUMN flag SELECTIVITY 100;
// EXPLAIN SELECT * FROM test WHERE id=2 AND flag=true;
// EXPLAIN SELECT * FROM test WHERE id between 2 and 3 AND flag=true;
// EXPLAIN SELECT * FROM test WHERE id=2 AND flag;
// h2
// update FOO set a = dateadd('second', 4320000, a);
// ms sql server
// update FOO set a = dateadd(s, 4320000, a);
// mysql
// update FOO set a = date_add(a, interval 4320000 second);
// postgresql
// update FOO set a = a + interval '4320000 s';
// oracle
// update FOO set a = a + INTERVAL '4320000' SECOND;
// GroovyServlet
// Cluster: hot deploy (adding a node on runtime)
// dataSource.setLogWriter() seems to have no effect?
// CHAR data type
// DROP TABLE TEST;
// CREATE TABLE TEST(C CHAR(10));
// INSERT INTO TEST VALUES('1');
// SELECT COUNT(*) FROM TEST WHERE C='1 ';
// -- PostgreSQL, HSQLDB, MySQL, Derby, MS SQL Server, Oracle: 1
// -- H2: 0
// SELECT LENGTH(C), LENGTH(C || 'x') FROM TEST;
// -- MySQL: 1, 1 (??)
// -- MS SQL Server: 1, 11 (SELECT LEN(C), LEN(C + 'x') FROM TEST)
// -- Oracle, Derby: 10, 11
// -- PostgreSQL, H2, HSQLDB: 1, 2
// auto-upgrade application:
// check if new version is available
// (option: digital signature)
// if yes download new version
// (option: http, https, ftp, network)
// backup database to SQL script
// (option: list of databases, use recovery mechanism)
// install new version
// ftp client
// task to download new version from another HTTP / HTTPS / FTP server
// multi-task
// test with PostgreSQL Version 8.2
// http://dev.helma.org/Wiki/RhinoLoader
// test with garbage at the end of the log file (must be consistently detected as such)
// test LIKE: compare against other databases
// TestRandomSQL is too random; most statements fails
// extend the random join test that compared the result against PostgreSQL
// long running test with the same database
// repeatable test with a very big database (making backups of the database files)
// data conversion should be done automatically when the new engine connects.
if (args.length > 0) { if (args.length > 0) {
if ("crash".equals(args[0])) { if ("crash".equals(args[0])) {
......
...@@ -28,6 +28,7 @@ public class TestPreparedStatement extends TestBase { ...@@ -28,6 +28,7 @@ public class TestPreparedStatement extends TestBase {
deleteDb("preparedStatement"); deleteDb("preparedStatement");
Connection conn = getConnection("preparedStatement"); Connection conn = getConnection("preparedStatement");
testCoalesce(conn);
testPreparedStatementMetaData(conn); testPreparedStatementMetaData(conn);
testDate(conn); testDate(conn);
testArray(conn); testArray(conn);
...@@ -48,6 +49,16 @@ public class TestPreparedStatement extends TestBase { ...@@ -48,6 +49,16 @@ public class TestPreparedStatement extends TestBase {
conn.close(); conn.close();
} }
private void testCoalesce(Connection conn) throws Exception {
Statement stat = conn.createStatement();
stat.executeUpdate("create table test(tm timestamp)");
stat.executeUpdate("insert into test values(current_timestamp)");
PreparedStatement prep = conn.prepareStatement("update test set tm = coalesce(?,tm)");
prep.setTimestamp(1, new java.sql.Timestamp(System.currentTimeMillis()));
prep.executeUpdate();
stat.executeUpdate("drop table test");
}
private void testPreparedStatementMetaData(Connection conn) throws Exception { private void testPreparedStatementMetaData(Connection conn) throws Exception {
PreparedStatement prep = conn.prepareStatement("select * from table(x int = ?, name varchar = ?)"); PreparedStatement prep = conn.prepareStatement("select * from table(x int = ?, name varchar = ?)");
ResultSetMetaData meta = prep.getMetaData(); ResultSetMetaData meta = prep.getMetaData();
......
--- special grammar and test cases --------------------------------------------------------------------------------------------- --- special grammar and test cases ---------------------------------------------------------------------------------------------
create table test(id smallint primary key);
> ok
insert into test values(1), (2), (3);
> update count: 3
explain select * from test where id = 1;
> PLAN
> -------------------------------------------------------------------------------
> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_1: ID = 1 */ WHERE ID = 1
> rows: 1
drop table test;
> ok
create table test(id tinyint primary key);
> ok
insert into test values(1), (2), (3);
> update count: 3
explain select * from test where id = 3;
> PLAN
> -------------------------------------------------------------------------------
> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_1: ID = 3 */ WHERE ID = 3
> rows: 1
explain select * from test where id = 255;
> exception
drop table test;
> ok
create table test(id int primary key); create table test(id int primary key);
> ok > ok
...@@ -313,8 +346,8 @@ select * from test where name = -1 and name = id; ...@@ -313,8 +346,8 @@ select * from test where name = -1 and name = id;
explain select * from test where name = -1 and name = id; explain select * from test where name = -1 and name = id;
> PLAN > PLAN
> ------------------------------------------------------------------------------------------------------------------------------------------------------------------ > ----------------------------------------------------------------------------------------------------------------------------
> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_1: ID = -1 */ WHERE ((CAST(NAME AS INTEGER) = -1) AND (CAST(NAME AS INTEGER) = ID)) AND (-1 = ID) > SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.TEST_TABLE_SCAN */ WHERE (NAME = '-1') AND (CAST(NAME AS INTEGER) = ID)
> rows: 1 > rows: 1
DROP TABLE TEST; DROP TABLE TEST;
......
file conversion should be done automatically when the new engine connects.
auto-upgrade application:
check if new version is available
(option: digital signature)
if yes download new version
(option: http, https, ftp)
backup database to SQL script
(option: list of databases, use recovery mechanism)
install new version
ftp client
task to download new version from another HTTP / HTTPS / FTP server
multi-task
DROP TABLE TEST;
CREATE TABLE TEST(ID INT);
INSERT INTO TEST VALUES(1);
INSERT INTO TEST VALUES(2);
SELECT ID AS A FROM TEST WHERE A>0;
-- Yes: HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, MySQL, H2, Derby
SELECT ID AS A FROM TEST ORDER BY A;
-- Yes: Oracle, MS SQL Server, PostgreSQL, MySQL, H2, Derby, HSQLDB
SELECT ID AS A FROM TEST ORDER BY -A;
-- Yes: Oracle, MySQL, HSQLDB
-- Fail: MS SQL Server, PostgreSQL, H2, Derby
SELECT ID AS A FROM TEST GROUP BY A;
-- Yes: PostgreSQL, MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, H2, Derby
SELECT ID AS A FROM TEST GROUP BY -A;
-- Yes: MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, H2, Derby
SELECT ID AS A FROM TEST GROUP BY ID HAVING A>0;
-- Yes: MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, H2, Derby
SELECT COUNT(*) AS A FROM TEST GROUP BY ID HAVING A>0;
-- Yes: MySQL, HSQLDB
-- Fail: Oracle, MS SQL Server, PostgreSQL, H2, Derby
h2
update FOO set a = dateadd('second', 4320000, a);
ms sql server
update FOO set a = dateadd(s, 4320000, a);
mysql
update FOO set a = date_add(a, interval 4320000 second);
postgresql
update FOO set a = a + interval '4320000 s';
oracle
update FOO set a = a + INTERVAL '4320000' SECOND;
...@@ -98,3 +98,33 @@ SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith'); ...@@ -98,3 +98,33 @@ SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith');
-- OOPS, the comparison's operands are sorted incorrectly for ignorecase! -- OOPS, the comparison's operands are sorted incorrectly for ignorecase!
EXPLAIN SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith'); EXPLAIN SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith');
-------------------
complete recursive views:
drop all objects;
create table parent(id int primary key, parent int);
insert into parent values(1, null), (2, 1), (3, 1);
with test_view(id, parent) as
select id, parent from parent where id = ?
union all
select parent.id, parent.parent from test_view, parent
where parent.parent = test_view.id
select * from test_view {1: 1};
drop view test_view;
with test_view(id, parent) as
select id, parent from parent where id = 1
union all
select parent.id, parent.parent from test_view, parent
where parent.parent = test_view.id
select * from test_view;
drop view test_view;
drop table parent;
...@@ -161,6 +161,9 @@ public class Indexer { ...@@ -161,6 +161,9 @@ public class Indexer {
if (!lower.endsWith(".html") && !lower.endsWith(".htm")) { if (!lower.endsWith(".html") && !lower.endsWith(".htm")) {
return; return;
} }
if (lower.indexOf("_ja.") >= 0) {
return;
}
if (!noIndex.contains(fileName)) { if (!noIndex.contains(fileName)) {
page = new Page(pages.size(), fileName); page = new Page(pages.size(), fileName);
pages.add(page); pages.add(page);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论