From a14795c8521a4e18ad38022b5e301cd68d4e43c6 Mon Sep 17 00:00:00 2001 From: HiveGames-OSS Date: Sun, 11 Aug 2024 16:00:01 +0200 Subject: [PATCH] Initial commit --- .gitattributes | 2 + .github/CODEOWNERS | 1 + .github/workflows/build.yml | 35 + .github/workflows/publish.yml | 34 + .gitignore | 22 + CODE_OF_CONDUCT.md | 10 + CONTRIBUTING.md | 12 + README.md | 52 + SECURITY.md | 37 + SUPPORT.md | 10 + leveldb-api/pom.xml | 26 + .../org/iq80/leveldb/CompressionType.java | 57 + .../src/main/java/org/iq80/leveldb/DB.java | 204 ++ .../java/org/iq80/leveldb/DBComparator.java | 57 + .../java/org/iq80/leveldb/DBException.java | 44 + .../main/java/org/iq80/leveldb/DBFactory.java | 57 + .../java/org/iq80/leveldb/DBIterator.java | 68 + .../main/java/org/iq80/leveldb/Logger.java | 51 + .../main/java/org/iq80/leveldb/Options.java | 330 +++ .../src/main/java/org/iq80/leveldb/Range.java | 56 + .../java/org/iq80/leveldb/ReadOptions.java | 75 + .../main/java/org/iq80/leveldb/Snapshot.java | 32 + .../java/org/iq80/leveldb/WriteBatch.java | 50 + .../java/org/iq80/leveldb/WriteOptions.java | 71 + .../java/org/iq80/leveldb/XFilterPolicy.java | 36 + .../java/org/iq80/leveldb/OptionsTest.java | 100 + leveldb-benchmark/pom.xml | 78 + .../iq80/leveldb/benchmark/DbBenchmark.java | 1263 +++++++++ .../org/iq80/leveldb/benchmark/Histogram.java | 178 ++ leveldb/dependency-reduced-pom.xml | 148 ++ leveldb/pom.xml | 165 ++ .../java/org/iq80/leveldb/env/DbLock.java | 35 + .../main/java/org/iq80/leveldb/env/Env.java | 115 + .../main/java/org/iq80/leveldb/env/File.java | 73 + .../java/org/iq80/leveldb/env/NoOpLogger.java | 35 + .../org/iq80/leveldb/env/RandomInputFile.java | 44 + .../org/iq80/leveldb/env/SequentialFile.java | 50 + .../org/iq80/leveldb/env/WritableFile.java | 46 + .../leveldb/fileenv/ByteBufferSupport.java | 83 + .../org/iq80/leveldb/fileenv/EnvImpl.java | 222 ++ .../org/iq80/leveldb/fileenv/FileLock.java | 97 + .../org/iq80/leveldb/fileenv/FileLogger.java | 75 + .../org/iq80/leveldb/fileenv/FileUtils.java | 120 + .../org/iq80/leveldb/fileenv/JavaFile.java | 156 ++ .../leveldb/fileenv/MMRandomInputFile.java | 95 + .../iq80/leveldb/fileenv/MMWritableFile.java | 125 + .../org/iq80/leveldb/fileenv/MmapLimiter.java | 85 + .../leveldb/fileenv/SequentialFileImpl.java | 63 + .../fileenv/UnbufferedRandomInputFile.java | 136 + .../fileenv/UnbufferedWritableFile.java | 73 + .../org/iq80/leveldb/impl/Compaction.java | 213 ++ .../org/iq80/leveldb/impl/DbConstants.java | 67 + .../java/org/iq80/leveldb/impl/DbImpl.java | 2027 ++++++++++++++ .../org/iq80/leveldb/impl/FileMetaData.java | 103 + .../java/org/iq80/leveldb/impl/Filename.java | 313 +++ .../iq80/leveldb/impl/InsertIntoHandler.java | 49 + .../org/iq80/leveldb/impl/InternalEntry.java | 102 + .../leveldb/impl/InternalFilterPolicy.java | 79 + .../org/iq80/leveldb/impl/InternalKey.java | 160 ++ .../leveldb/impl/InternalKeyComparator.java | 54 + .../leveldb/impl/InternalUserComparator.java | 89 + .../org/iq80/leveldb/impl/Iq80DBFactory.java | 103 + .../iq80/leveldb/impl/KeyMatchingLookup.java | 58 + .../java/org/iq80/leveldb/impl/Level.java | 256 ++ .../org/iq80/leveldb/impl/LogChunkType.java | 60 + .../org/iq80/leveldb/impl/LogConstants.java | 36 + .../org/iq80/leveldb/impl/LogMonitor.java | 25 + .../org/iq80/leveldb/impl/LogMonitors.java | 63 + .../java/org/iq80/leveldb/impl/LogReader.java | 399 +++ .../java/org/iq80/leveldb/impl/LogWriter.java | 200 ++ .../main/java/org/iq80/leveldb/impl/Logs.java | 57 + .../java/org/iq80/leveldb/impl/LookupKey.java | 46 + .../org/iq80/leveldb/impl/LookupResult.java | 70 + .../java/org/iq80/leveldb/impl/MemTable.java | 88 + .../java/org/iq80/leveldb/impl/ReadStats.java | 60 + .../org/iq80/leveldb/impl/SequenceNumber.java | 50 + .../org/iq80/leveldb/impl/SnapshotList.java | 132 + .../org/iq80/leveldb/impl/TableCache.java | 198 ++ .../org/iq80/leveldb/impl/ValueHolder.java | 51 + .../java/org/iq80/leveldb/impl/ValueType.java | 48 + .../java/org/iq80/leveldb/impl/Version.java | 326 +++ .../org/iq80/leveldb/impl/VersionEdit.java | 183 ++ .../org/iq80/leveldb/impl/VersionEditTag.java | 264 ++ .../org/iq80/leveldb/impl/VersionSet.java | 1065 ++++++++ .../org/iq80/leveldb/impl/WriteBatchImpl.java | 125 + .../leveldb/iterator/ASeekingIterator.java | 179 ++ .../leveldb/iterator/DBIteratorAdapter.java | 232 ++ .../org/iq80/leveldb/iterator/DbIterator.java | 110 + .../org/iq80/leveldb/iterator/Direction.java | 39 + .../leveldb/iterator/InternalIterator.java | 31 + .../iterator/InternalTableIterator.java | 106 + .../leveldb/iterator/MemTableIterator.java | 107 + .../leveldb/iterator/MergingIterator.java | 136 + .../leveldb/iterator/SeekingIterator.java | 102 + .../leveldb/iterator/SeekingIterators.java | 73 + .../iq80/leveldb/iterator/SliceIterator.java | 24 + .../iterator/SnapshotSeekingIterator.java | 206 ++ .../iterator/SortedCollectionIterator.java | 137 + .../leveldb/iterator/TwoLevelIterator.java | 180 ++ .../org/iq80/leveldb/memenv/FileState.java | 181 ++ .../java/org/iq80/leveldb/memenv/MemEnv.java | 116 + .../java/org/iq80/leveldb/memenv/MemFile.java | 166 ++ .../java/org/iq80/leveldb/memenv/MemFs.java | 239 ++ .../leveldb/memenv/MemRandomInputFile.java | 65 + .../leveldb/memenv/MemSequentialFile.java | 70 + .../iq80/leveldb/memenv/MemWritableFile.java | 55 + .../java/org/iq80/leveldb/table/Block.java | 116 + .../org/iq80/leveldb/table/BlockBuilder.java | 163 ++ .../org/iq80/leveldb/table/BlockEntry.java | 148 ++ .../org/iq80/leveldb/table/BlockHandle.java | 120 + .../table/BlockHandleSliceWeigher.java | 35 + .../org/iq80/leveldb/table/BlockIterator.java | 236 ++ .../org/iq80/leveldb/table/BlockTrailer.java | 114 + .../iq80/leveldb/table/BloomFilterPolicy.java | 135 + .../leveldb/table/BytewiseComparator.java | 76 + .../java/org/iq80/leveldb/table/CacheKey.java | 53 + .../leveldb/table/CustomUserComparator.java | 63 + .../leveldb/table/FilterBlockBuilder.java | 125 + .../iq80/leveldb/table/FilterBlockReader.java | 74 + .../org/iq80/leveldb/table/FilterPolicy.java | 59 + .../java/org/iq80/leveldb/table/Footer.java | 98 + .../iq80/leveldb/table/KeyValueFunction.java | 36 + .../iq80/leveldb/table/RestartPositions.java | 55 + .../java/org/iq80/leveldb/table/Table.java | 301 +++ .../org/iq80/leveldb/table/TableBuilder.java | 330 +++ .../iq80/leveldb/table/UserComparator.java | 32 + .../iq80/leveldb/util/BasicSliceOutput.java | 197 ++ .../org/iq80/leveldb/util/Closeables.java | 93 + .../iq80/leveldb/util/DynamicSliceOutput.java | 207 ++ .../main/java/org/iq80/leveldb/util/Hash.java | 87 + .../java/org/iq80/leveldb/util/ILRUCache.java | 49 + .../java/org/iq80/leveldb/util/IntVector.java | 87 + .../java/org/iq80/leveldb/util/LRUCache.java | 78 + .../leveldb/util/LogMessageFormatter.java | 75 + .../org/iq80/leveldb/util/PureJavaCrc32C.java | 783 ++++++ .../iq80/leveldb/util/SafeListBuilder.java | 100 + .../java/org/iq80/leveldb/util/SizeOf.java | 30 + .../java/org/iq80/leveldb/util/Slice.java | 586 ++++ .../iq80/leveldb/util/SliceComparator.java | 32 + .../org/iq80/leveldb/util/SliceInput.java | 435 +++ .../org/iq80/leveldb/util/SliceOutput.java | 320 +++ .../java/org/iq80/leveldb/util/Slices.java | 247 ++ .../java/org/iq80/leveldb/util/Snappy.java | 273 ++ .../leveldb/util/VariableLengthQuantity.java | 138 + .../main/java/org/iq80/leveldb/util/ZLib.java | 88 + .../org/iq80/leveldb/impl/version.txt | 1 + .../iq80/leveldb/fileenv/FileLockTest.java | 77 + .../iq80/leveldb/fileenv/FileLoggerTest.java | 68 + .../fileenv/SequentialFileImplTest.java | 73 + .../UnbufferedRandomInputFileTest.java | 177 ++ .../leveldb/impl/AddBoundaryInputsTest.java | 192 ++ .../java/org/iq80/leveldb/impl/ApiTest.java | 108 + .../iq80/leveldb/impl/AutoCompactTest.java | 133 + .../iq80/leveldb/impl/CountingHandlesEnv.java | 227 ++ .../org/iq80/leveldb/impl/DbImplTest.java | 2365 +++++++++++++++++ .../org/iq80/leveldb/impl/FilenameTest.java | 103 + .../org/iq80/leveldb/impl/GIssue320Test.java | 238 ++ .../iq80/leveldb/impl/InternalKeyTest.java | 70 + .../impl/InternalUserComparatorTest.java | 101 + .../org/iq80/leveldb/impl/LogReaderTest.java | 736 +++++ .../java/org/iq80/leveldb/impl/LogTest.java | 189 ++ .../org/iq80/leveldb/impl/LogWriterTest.java | 130 + .../org/iq80/leveldb/impl/MemTableTest.java | 205 ++ .../iq80/leveldb/impl/NativeInteropTest.java | 151 ++ .../org/iq80/leveldb/impl/OutOfSpaceTest.java | 269 ++ .../org/iq80/leveldb/impl/RecoveryTest.java | 340 +++ .../iq80/leveldb/impl/VersionEditTest.java | 57 + .../org/iq80/leveldb/impl/VersionSetTest.java | 185 ++ .../iq80/leveldb/impl/WriteBatchImplTest.java | 50 + .../leveldb/iterator/IteratorTestUtils.java | 177 ++ .../iterator/MemTableIteratorTest.java | 257 ++ .../leveldb/iterator/MergingIteratorTest.java | 187 ++ .../iterator/SeekingDBIteratorAdapter.java | 102 + .../SortedCollectionIteratorTest.java | 240 ++ .../org/iq80/leveldb/memenv/MemEnvTest.java | 396 +++ .../org/iq80/leveldb/memenv/MemFileTest.java | 94 + .../org/iq80/leveldb/memenv/MemFsTest.java | 36 + .../org/iq80/leveldb/table/BlockHelper.java | 174 ++ .../org/iq80/leveldb/table/BlockTest.java | 205 ++ .../leveldb/table/BloomFilterPolicyTest.java | 172 ++ .../leveldb/table/FilterBlockReaderTest.java | 163 ++ .../iq80/leveldb/table/InMemoryTableTest.java | 31 + .../table/MMRandomInputFileTableTest.java | 33 + .../org/iq80/leveldb/table/TableTest.java | 1213 +++++++++ .../UnbufferedRandomInputFileTableTest.java | 33 + .../java/org/iq80/leveldb/util/HashTest.java | 58 + .../org/iq80/leveldb/util/LRUCacheTest.java | 116 + .../iq80/leveldb/util/PureJavaCrc32CTest.java | 204 ++ .../leveldb/util/SafeListBuilderTest.java | 84 + .../leveldb/util/SliceComparatorTest.java | 66 + .../java/org/iq80/leveldb/util/TestUtils.java | 90 + .../util/VariableLengthQuantityTest.java | 79 + license.txt | 203 ++ notice.md | 5 + pom.xml | 126 + src/checkstyle/checks.xml | 118 + src/license/LICENSE-HEADER.txt | 15 + src/site/site.xml | 41 + 198 files changed, 32099 insertions(+) create mode 100644 .gitattributes create mode 100644 .github/CODEOWNERS create mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/publish.yml create mode 100644 .gitignore create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 README.md create mode 100644 SECURITY.md create mode 100644 SUPPORT.md create mode 100644 leveldb-api/pom.xml create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/CompressionType.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/DB.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/DBComparator.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/DBException.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/DBFactory.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/DBIterator.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/Logger.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/Options.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/Range.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/ReadOptions.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/Snapshot.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/WriteBatch.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/WriteOptions.java create mode 100644 leveldb-api/src/main/java/org/iq80/leveldb/XFilterPolicy.java create mode 100644 leveldb-api/src/test/java/org/iq80/leveldb/OptionsTest.java create mode 100644 leveldb-benchmark/pom.xml create mode 100644 leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java create mode 100644 leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/Histogram.java create mode 100644 leveldb/dependency-reduced-pom.xml create mode 100644 leveldb/pom.xml create mode 100644 leveldb/src/main/java/org/iq80/leveldb/env/DbLock.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/env/Env.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/env/File.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/env/NoOpLogger.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/env/RandomInputFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/env/SequentialFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/env/WritableFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/ByteBufferSupport.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/EnvImpl.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLock.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLogger.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/FileUtils.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/JavaFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/MMRandomInputFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/MMWritableFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/MmapLimiter.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/SequentialFileImpl.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedWritableFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/Compaction.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/DbConstants.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/DbImpl.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/Filename.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/InsertIntoHandler.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/InternalEntry.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/InternalFilterPolicy.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/InternalKey.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/InternalKeyComparator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/InternalUserComparator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/Iq80DBFactory.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/KeyMatchingLookup.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/Level.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LogChunkType.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LogConstants.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitor.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitors.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LogReader.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LogWriter.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/Logs.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LookupKey.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/LookupResult.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/MemTable.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/ReadStats.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/SequenceNumber.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/SnapshotList.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/TableCache.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/ValueHolder.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/ValueType.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/Version.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/VersionEdit.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/VersionEditTag.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/VersionSet.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/impl/WriteBatchImpl.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/ASeekingIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/DBIteratorAdapter.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/DbIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/Direction.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/InternalIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/InternalTableIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/MemTableIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/MergingIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterators.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/SliceIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/SnapshotSeekingIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/SortedCollectionIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/iterator/TwoLevelIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/memenv/FileState.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/memenv/MemEnv.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/memenv/MemFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/memenv/MemFs.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/memenv/MemRandomInputFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/memenv/MemSequentialFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/memenv/MemWritableFile.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/Block.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BlockBuilder.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BlockEntry.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BlockHandle.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BlockHandleSliceWeigher.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BlockIterator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BlockTrailer.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BloomFilterPolicy.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/BytewiseComparator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/CacheKey.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/CustomUserComparator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockBuilder.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockReader.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/FilterPolicy.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/Footer.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/KeyValueFunction.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/RestartPositions.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/Table.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/TableBuilder.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/table/UserComparator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/BasicSliceOutput.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/Closeables.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/DynamicSliceOutput.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/Hash.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/ILRUCache.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/IntVector.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/LRUCache.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/LogMessageFormatter.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/PureJavaCrc32C.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/SafeListBuilder.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/SizeOf.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/Slice.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/SliceComparator.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/SliceInput.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/SliceOutput.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/Slices.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/Snappy.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/VariableLengthQuantity.java create mode 100644 leveldb/src/main/java/org/iq80/leveldb/util/ZLib.java create mode 100644 leveldb/src/main/resources/org/iq80/leveldb/impl/version.txt create mode 100644 leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLockTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLoggerTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/fileenv/SequentialFileImplTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFileTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/AddBoundaryInputsTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/ApiTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/AutoCompactTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/CountingHandlesEnv.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/DbImplTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/FilenameTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/GIssue320Test.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/InternalKeyTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/InternalUserComparatorTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/LogReaderTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/LogTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/LogWriterTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/MemTableTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/NativeInteropTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/OutOfSpaceTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/RecoveryTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/VersionEditTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/VersionSetTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/impl/WriteBatchImplTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/iterator/IteratorTestUtils.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/iterator/MemTableIteratorTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/iterator/MergingIteratorTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/iterator/SeekingDBIteratorAdapter.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/iterator/SortedCollectionIteratorTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/memenv/MemEnvTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/memenv/MemFileTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/memenv/MemFsTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/BlockHelper.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/BlockTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/BloomFilterPolicyTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/FilterBlockReaderTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/InMemoryTableTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/MMRandomInputFileTableTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/TableTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/table/UnbufferedRandomInputFileTableTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/util/HashTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/util/LRUCacheTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/util/PureJavaCrc32CTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/util/SafeListBuilderTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/util/SliceComparatorTest.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/util/TestUtils.java create mode 100644 leveldb/src/test/java/org/iq80/leveldb/util/VariableLengthQuantityTest.java create mode 100644 license.txt create mode 100644 notice.md create mode 100644 pom.xml create mode 100644 src/checkstyle/checks.xml create mode 100644 src/license/LICENSE-HEADER.txt create mode 100644 src/site/site.xml diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..e7aa96e --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +* text=auto +*.java text eol=lf diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..21c4c12 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @HiveGamesOSS/chunker \ No newline at end of file diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..ce21dc5 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,35 @@ +# This workflow will build a Java project with Maven, and cache/restore any dependencies to improve the workflow execution time +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-java-with-maven + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Java CI with Maven + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up JDK 17 + uses: actions/setup-java@v3 + with: + java-version: '17' + distribution: 'temurin' + cache: maven + - name: Build with Maven + run: mvn -B package --file pom.xml + + # Optional: Uploads the full dependency graph to GitHub to improve the quality of Dependabot alerts this repository can receive + - name: Update dependency graph + uses: advanced-security/maven-dependency-submission-action@f97a4078d80bca790cd68e93a88da11a056ac0a3 diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..de9e6bd --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,34 @@ +# This workflow will build a package using Maven and then publish it to GitHub packages when a release is created +# For more information see: https://github.com/actions/setup-java/blob/main/docs/advanced-usage.md#apache-maven-with-a-settings-path + +name: Publish Maven Package + +on: + release: + types: [created] + +jobs: + build: + + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - uses: actions/checkout@v4 + - name: Set up JDK 17 + uses: actions/setup-java@v3 + with: + java-version: '17' + distribution: 'temurin' + server-id: github # Value of the distributionManagement/repository/id field of the pom.xml + settings-path: ${{ github.workspace }} # location for the settings.xml file + + - name: Build with Maven + run: mvn -B package --file pom.xml + + - name: Publish to GitHub Packages Apache Maven + run: mvn deploy -s $GITHUB_WORKSPACE/settings.xml + env: + GITHUB_TOKEN: ${{ github.token }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..67590d8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +target/ +/var +pom.xml.versionsBackup +test-output/ +/atlassian-ide-plugin.x +.idea +.*.swp +.*.swo +leveldb-c +*~ +*.swp +.idea +.idea/* +*.iml +*.ipr +*.iws +.DS_Store +.scala_dependencies +.project +.classpath +.settings +eclipse-classes diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..686e5e7 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,10 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns +- Employees can reach out at [aka.ms/opensource/moderation-support](https://aka.ms/opensource/moderation-support) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..cdcd70e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contributing to leveldb-mcpe-java + +Thanks for your interest in contributing to this repository. + +If you find a bug in Chunker or wish to request adding a feature please make a GitHub issue on the Chunker repository, +searching first to ensure it doesn't exist already. + +We don't accept contributions currently for this repository though you are more than welcome to fork it and add your own +contributions, you are also welcome to create an issue on our main Chunker repository referencing your changes for +review. + +This project is maintained by Hive Games. diff --git a/README.md b/README.md new file mode 100644 index 0000000..7eb1e3b --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +# LevelDB MCPE in Java + +This project is a fork of https://github.com/pcmind/leveldb aiming to implement the changes made +in https://github.com/Mojang/leveldb-mcpe/ where relevant to allow the library to read MCPE. + +For more information see the original repository on use cases / API usage. + +Building +-------- + +**Requirements** + +- Git +- Java 11 or higher +- Maven + +**Steps** + +1. Clone this repository via `git clone git://github.com/HiveGamesOSS/leveldb-mcpe-java.git`. +2. Build the project via `mvn clean install`. +3. Obtain the library from `target/` folder. + +Library Usage +-------- + +You can use the following in your maven pom.xml: + +```xml + + + com.hivemc.leveldb + leveldb + 1.0.0 + +``` + +```xml + + + com.hivemc.leveldb + leveldb-api + 1.0.0 + +``` + +This library is aimed as a drop in replacement to the original fork https://github.com/pcmind/leveldb. + +License +-------- + +Details of the LICENSE can be found in the license.txt, this fork maintains the original license for all code and +modifications. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..ff3648b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,37 @@ +## Security + +Hive Games takes the security of our software products and services seriously, which includes all source code +repositories managed through our GitHub organizations. + +If you believe you have found a security vulnerability in any Hive Games repository which follows the MITRE.org +definition of “a weakness in the computational logic (e.g., code) found in software and hardware components that, when +exploited, results in a negative impact to confidentiality, integrity, OR availability. Mitigation of the +vulnerabilities in this context typically involves coding changes but could also include specification changes or even +specification deprecations (e.g., removal of affected protocols or functionality in their entirety).” MITRE.org CNA +Rules 7.1, please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please send email to [secure@hivemc.com](mailto:secure@hivemc.com). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we +received your original message. + +Please include the requested information listed below (as much as you can provide) to help us better understand the +nature and scope of the possible issue: + +* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) +* Full paths of source file(s) related to the manifestation of the issue +* The location of the affected source code (tag/branch/commit or direct URL) +* Any special configuration required to reproduce the issue +* Step-by-step instructions to reproduce the issue +* Proof-of-concept or exploit code (if possible) +* Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +## Preferred Languages + +We prefer all communications to be in English. diff --git a/SUPPORT.md b/SUPPORT.md new file mode 100644 index 0000000..5f6c7a6 --- /dev/null +++ b/SUPPORT.md @@ -0,0 +1,10 @@ +# Support + +## How to file issues and get help + +This project does not allow issues / PRs, if you would like help with Chunker related issues please file an issue on our +main repository referencing this project if needed. + +## Hive Games Support Policy + +Support for this project is limited to the Chunker scope. diff --git a/leveldb-api/pom.xml b/leveldb-api/pom.xml new file mode 100644 index 0000000..4fb2e5f --- /dev/null +++ b/leveldb-api/pom.xml @@ -0,0 +1,26 @@ + + + 4.0.0 + + + com.hivemc.leveldb + leveldb-project + 1.0.0-SNAPSHOT + + + leveldb-api + ${project.artifactId} + High level Java API for LevelDB + + + ${project.parent.basedir} + + + + + org.testng + testng + test + + + diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/CompressionType.java b/leveldb-api/src/main/java/org/iq80/leveldb/CompressionType.java new file mode 100644 index 0000000..99df424 --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/CompressionType.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +/** + * DB contents are stored in a set of blocks, each of which holds a + * sequence of key,value pairs. Each block may be compressed before + * being stored in a file. The following enum describes which + * compression method (if any) is used to compress a block. + */ +public enum CompressionType +{ + // NOTE: do not change the values of existing entries, as these are + // part of the persistent format on disk. + NONE(0x00), + SNAPPY(0x01), + // MCPE Compression Types + ZLIB(0x02), + ZLIB_RAW(0x04); + + public static CompressionType getCompressionTypeByPersistentId(int persistentId) + { + for (CompressionType compressionType : CompressionType.values()) { + if (compressionType.persistentId == persistentId) { + return compressionType; + } + } + throw new IllegalArgumentException("Unknown persistentId " + persistentId); + } + + private final int persistentId; + + CompressionType(int persistentId) + { + this.persistentId = persistentId; + } + + public int persistentId() + { + return persistentId; + } +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/DB.java b/leveldb-api/src/main/java/org/iq80/leveldb/DB.java new file mode 100644 index 0000000..0a709c7 --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/DB.java @@ -0,0 +1,204 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import java.io.Closeable; +import java.util.Map; + +/** + * A DB is a persistent ordered map from keys to values. + * A DB is safe for concurrent access from multiple threads without + * any external synchronization. + * + * @author Hiram Chirino + */ +public interface DB + extends Iterable>, Closeable +{ + /** + * Same as calling {@link DB#get(byte[], ReadOptions)} with default options + */ + byte[] get(byte[] key) + throws DBException; + + /** + * If the database contains an entry for "key" return its corresponding + * value. + *

+ * If there is no entry for "key" return null. + * + * @param key key to search for + * @param options read option + * @return key value or {@code null} + * @throws DBException if error occurred in accessing db sate + */ + byte[] get(byte[] key, ReadOptions options) + throws DBException; + + /** + * Same as calling {@link DB#iterator(ReadOptions)} with default options + */ + @Override + DBIterator iterator(); + + /** + * Return a iterator over the contents of the database. + * The result of {@link DB#iterator(ReadOptions)} will automatically position + * itsel to first entry if seek method is not called. + *

+ * It is preferable to call one of the Seek methods on the iterator before + * using it. + *

+ * Caller should call {@link DBIterator#close()} when it is no longer needed. + * The returned iterator should be closed before this db is deleted. + * + * @param options iterator read options + * @return new iterator content of the database + */ + DBIterator iterator(ReadOptions options); + + void put(byte[] key, byte[] value) + throws DBException; + + void delete(byte[] key) + throws DBException; + + void write(WriteBatch updates) + throws DBException; + + WriteBatch createWriteBatch(); + + /** + * Set the database entry for "key" to "value". + *

+ * Note: consider setting options.sync = true. + * + * @param key entry key + * @param value entry valye + * @return null if options.isSnapshot()==false otherwise returns a snapshot + * of the DB after this operation. + * @throws DBException on any write failure + */ + Snapshot put(byte[] key, byte[] value, WriteOptions options) + throws DBException; + + /** + * Remove the database entry (if any) for "key". + * + * @return null if {@link WriteOptions#snapshot()}==false otherwise returns a snapshot + * of the DB after this operation. + * @throws DBException on any write failure. It is not an error if "key" + * did not exist in the database. + */ + Snapshot delete(byte[] key, WriteOptions options) + throws DBException; + + /** + * Apply the specified updates to the database. + *

+ * Note: consider setting options.sync = true. + * + * @return null if {@link WriteOptions#snapshot()}==false otherwise returns a snapshot + * of the DB after this operation. + * @throws DBException on any write failure + */ + Snapshot write(WriteBatch updates, WriteOptions options) + throws DBException; + + /** + * Return a handle to the current DB state. Iterators created with + * this handle will all observe a stable snapshot of the current DB + * state. The caller must call {@link Snapshot#close()} when the + * snapshot is no longer needed. + * + * @return current db state Snapshot handle + */ + Snapshot getSnapshot(); + + /** + * For each i in [0,n-1], store in "sizes[i]", the approximate + * file system space used by keys in "[range[i].start .. range[i].limit)". + *

+ * Note that the returned sizes measure file system space usage, so + * if the user data compresses by a factor of ten, the returned + * sizes will be one-tenth the size of the corresponding user data size. + *

+ * The results may not include the sizes of recently written data. + * + * @param ranges each range to test for + * @return array with size result of each range + */ + long[] getApproximateSizes(Range... ranges); + + /** + * DB implementations can export properties about their state + * via this method. If "property" is a valid property understood by this + * DB implementation, fills "*value" with its current value and returns + * true. Otherwise returns false. + *

+ *

+ * Valid property names include: + *

+ *

+ * + * @param name property name + * @return property value, {@code null} if property does not exist + */ + String getProperty(String name); + + /** + * Suspends any background compaction threads. This methods + * returns once the background compactions are suspended. + */ + void suspendCompactions() + throws InterruptedException; + + /** + * Resumes the background compaction threads. + */ + void resumeCompactions(); + + /** + * Compact the underlying storage for the key range [begin, end]. + * In particular, deleted and overwritten versions are discarded, + * and the data is rearranged to reduce the cost of operations + * needed to access the data. This operation should typically only + * be invoked by users who understand the underlying implementation. + *

+ *

+ * {@code begin == null} is treated as before all keys in the database. + * {@code end == null} is treated as a key after all keys in the database. + *

+ * Therefore the call to {@code db.compactRange(null, null);} will compact + * the entire database. + * + * @param begin if null then compaction start from the first key + * @param end if null then compaction ends at the last key + */ + void compactRange(byte[] begin, byte[] end) + throws DBException; +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/DBComparator.java b/leveldb-api/src/main/java/org/iq80/leveldb/DBComparator.java new file mode 100644 index 0000000..95b0b75 --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/DBComparator.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import java.util.Comparator; + +/** + * @author Hiram Chirino + */ +public interface DBComparator + extends Comparator +{ + /** + * The name of the comparator. Used to check for comparator + * mismatches (i.e., a DB created with one comparator is + * accessed using a different comparator. + *

+ * The client of this package should switch to a new name whenever + * the comparator implementation changes in a way that will cause + * the relative ordering of any two keys to change. + *

Hiram Chirino + */ +public class DBException + extends RuntimeException +{ + public DBException() + { + } + + public DBException(String s) + { + super(s); + } + + public DBException(String s, Throwable throwable) + { + super(s, throwable); + } + + public DBException(Throwable throwable) + { + super(throwable); + } +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/DBFactory.java b/leveldb-api/src/main/java/org/iq80/leveldb/DBFactory.java new file mode 100644 index 0000000..bff7122 --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/DBFactory.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import java.io.File; +import java.io.IOException; + +/** + * @author Hiram Chirino + */ +public interface DBFactory +{ + /** + * Open/Create a new Database using provided "options" to configure DB + * behavior. + * + * @param path DB root folder + * @param options DB options + * @return a new DB instance + * @throws IOException if unable to open/read DB or preconditions failed + */ + DB open(File path, Options options) + throws IOException; + + /** + * Destroy a database, delete DB files and root directory + * @param path DB root folder + * @param options options used to open DB + * @throws IOException if failed to destruct DB + */ + void destroy(File path, Options options) + throws IOException; + + /** + * Try to repair a corrupted DB or not closed properly. + * @param path DB root directory + * @param options DB options + * @throws IOException if failed to open/recover DB + */ + void repair(File path, Options options) + throws IOException; +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/DBIterator.java b/leveldb-api/src/main/java/org/iq80/leveldb/DBIterator.java new file mode 100644 index 0000000..4a2d037 --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/DBIterator.java @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import java.io.Closeable; +import java.util.Iterator; +import java.util.Map; + +/** + * @author Hiram Chirino + */ +public interface DBIterator + extends Iterator>, Closeable +{ + /** + * Repositions the iterator so the key of the next BlockElement + * returned greater than or equal to the specified targetKey. + */ + void seek(byte[] key); + + /** + * Repositions the iterator so is is at the beginning of the Database. + */ + void seekToFirst(); + + /** + * Returns the next element in the iteration, without advancing the iteration. + */ + Map.Entry peekNext(); + + /** + * @return true if there is a previous entry in the iteration. + */ + boolean hasPrev(); + + /** + * @return the previous element in the iteration and rewinds the iteration. + */ + Map.Entry prev(); + + /** + * @return the previous element in the iteration, without rewinding the iteration. + */ + Map.Entry peekPrev(); + + /** + * Repositions the iterator so it is at the end of of the Database. + */ + void seekToLast(); + + @Override + void close(); +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/Logger.java b/leveldb-api/src/main/java/org/iq80/leveldb/Logger.java new file mode 100644 index 0000000..f9b74bf --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/Logger.java @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import java.io.Closeable; +import java.io.IOException; + +/** + * An interface for writing log messages. + * + * @author Hiram Chirino + */ +public interface Logger extends Closeable +{ + void log(String message); + + /** + * Substitutes each {@code %s} in {@code template} with an argument. Arguments without place holder will + * be placed at the end of template. + *

+ * This is a default method to avoid incompatibilities with older logger interface. + * + * @param template a non-null template string containing 0 or more {@code %s} placeholders. + * @param args the arguments to be substituted into the message template. + */ + default void log(String template, Object... args) + { + log(String.format(template, args)); + } + + @Override + default void close() throws IOException + { + //default to be compatible with older interface + } +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/Options.java b/leveldb-api/src/main/java/org/iq80/leveldb/Options.java new file mode 100644 index 0000000..b12820e --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/Options.java @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +/** + * Options to control the behavior of a database + */ +public class Options +{ + private boolean createIfMissing = true; + private boolean errorIfExists; + private int writeBufferSize = 4 << 20; + + private int maxOpenFiles = 1000; + + private int blockRestartInterval = 16; + private int maxFileSize = 2 << 20; + private int blockSize = 4 * 1024; + private CompressionType compressionType = CompressionType.ZLIB_RAW; + private boolean paranoidChecks; + private DBComparator comparator; + private Logger logger; + private long cacheSize = 8 << 20; + private XFilterPolicy filterPolicy; + private boolean reuseLogs = false; + + /** + * Clone, create a copy of the provided instance of {@link Options} + */ + public static Options fromOptions(Options options) + { + checkArgNotNull(options, "Options can't be null"); + final Options options1 = new Options(); + options1.createIfMissing = options.createIfMissing; + options1.errorIfExists = options.errorIfExists; + options1.writeBufferSize = options.writeBufferSize; + options1.maxOpenFiles = options.maxOpenFiles; + options1.blockRestartInterval = options.blockRestartInterval; + options1.maxFileSize = options.maxFileSize; + options1.blockSize = options.blockSize; + options1.compressionType = options.compressionType; + options1.paranoidChecks = options.paranoidChecks; + options1.comparator = options.comparator; + options1.logger = options.logger; + options1.cacheSize = options.cacheSize; + options1.filterPolicy = options.filterPolicy; + options1.reuseLogs = options.reuseLogs; + return options1; + } + + /** + * Create an Options object with default values for all fields. + */ + public static Options newDefaultOptions() + { + return new Options(); + } + + static void checkArgNotNull(Object value, String name) + { + if (value == null) { + throw new IllegalArgumentException("The " + name + " argument cannot be null"); + } + } + + /** + * If true, the database will be created if it is missing. + */ + public boolean createIfMissing() + { + return createIfMissing; + } + + public Options createIfMissing(boolean createIfMissing) + { + this.createIfMissing = createIfMissing; + return this; + } + + public boolean errorIfExists() + { + return errorIfExists; + } + + /** + * If true, an error is raised if the database already exists. + */ + public Options errorIfExists(boolean errorIfExists) + { + this.errorIfExists = errorIfExists; + return this; + } + + public int writeBufferSize() + { + return writeBufferSize; + } + + /** + * Parameters that affect performance + *

+ * Amount of data to build up in memory (backed by an unsorted log + * on disk) before converting to a sorted on-disk file. + *

+ * Larger values increase performance, especially during bulk loads. + * Up to two write buffers may be held in memory at the same time, + * so you may wish to adjust this parameter to control memory usage. + * Also, a larger write buffer will result in a longer recovery time + * the next time the database is opened. + */ + public Options writeBufferSize(int writeBufferSize) + { + this.writeBufferSize = writeBufferSize; + return this; + } + + public int maxOpenFiles() + { + return maxOpenFiles; + } + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set (budget + * one open file per 2MB of working set). + */ + public Options maxOpenFiles(int maxOpenFiles) + { + this.maxOpenFiles = maxOpenFiles; + return this; + } + + public int blockRestartInterval() + { + return blockRestartInterval; + } + + /** + * Number of keys between restart points for delta encoding of keys. + * This parameter can be changed dynamically. Most clients should + * leave this parameter alone. + */ + public Options blockRestartInterval(int blockRestartInterval) + { + this.blockRestartInterval = blockRestartInterval; + return this; + } + + public int maxFileSize() + { + return maxFileSize; + } + + /** + * Leveldb will write up to this amount of bytes to a file before + * switching to a new one. + * Most clients should leave this parameter alone. However if your + * filesystem is more efficient with larger files, you could + * consider increasing the value. The downside will be longer + * compactions and hence longer latency/performance hiccups. + * Another reason to increase this parameter might be when you are + * initially populating a large database. + *

+ * Default: 2MB + * + * @param maxFileSize max file size int bytes + */ + public Options maxFileSize(int maxFileSize) + { + this.maxFileSize = maxFileSize; + return this; + } + + public int blockSize() + { + return blockSize; + } + + /** + * Approximate size of user data packed per block. Note that the + * block size specified here corresponds to uncompressed data. The + * actual size of the unit read from disk may be smaller if + * compression is enabled. This parameter can be changed dynamically. + */ + public Options blockSize(int blockSize) + { + this.blockSize = blockSize; + return this; + } + + public CompressionType compressionType() + { + return compressionType; + } + + /** + * Compress blocks using the specified compression algorithm. This + * parameter can be changed dynamically. + *

+ * Default: {@link CompressionType#ZLIB}, which is used for Mojangs LevelDB fork. + *

+ * Snappy is also possible, if Snappy compression is not available, {@link CompressionType#NONE} + * will be used. + */ + public Options compressionType(CompressionType compressionType) + { + checkArgNotNull(compressionType, "compressionType"); + this.compressionType = compressionType; + return this; + } + + public long cacheSize() + { + return cacheSize; + } + + /** + * If {@code cacheSize} is zero, no block cache will be used- + * If non-zero, use the specified cache size for blocks. + * By default leveldb will automatically create and use an 8MB internal cache. + */ + public Options cacheSize(long cacheSize) + { + this.cacheSize = cacheSize; + return this; + } + + public DBComparator comparator() + { + return comparator; + } + + /** + * Parameters that affect behavior + *

+ * Comparator used to define the order of keys in the table. + * Default: a comparator that uses lexicographic byte-wise ordering + *

+ * REQUIRES: The client must ensure that the comparator supplied + * here has the same name and orders keys *exactly* the same as the + * comparator provided to previous open calls on the same DB. + */ + public Options comparator(DBComparator comparator) + { + this.comparator = comparator; + return this; + } + + public Logger logger() + { + return logger; + } + + /** + * Any internal progress/error information generated by the db will + * be written to {@code logger} if it is non-null, or to a file stored + * in the same directory as the DB contents if info_log is null. + */ + public Options logger(Logger logger) + { + this.logger = logger; + return this; + } + + public boolean paranoidChecks() + { + return paranoidChecks; + } + + /** + * If true, the implementation will do aggressive checking of the + * data it is processing and will stop early if it detects any + * errors. This may have unforeseen ramifications: for example, a + * corruption of one DB entry may cause a large number of entries to + * become unreadable or for the entire DB to become unopenable. + */ + public Options paranoidChecks(boolean paranoidChecks) + { + this.paranoidChecks = paranoidChecks; + return this; + } + + /** + * If non-null, use the specified filter policy to reduce disk reads. + * Many applications will benefit from passing an instance of BloomFilter + * + * @param filterPolicy new filter policy + * @return self + */ + public Options filterPolicy(XFilterPolicy filterPolicy) + { + this.filterPolicy = filterPolicy; + return this; + } + + public XFilterPolicy filterPolicy() + { + return filterPolicy; + } + + /** + * If true, append to existing MANIFEST and log files + * when a database is opened. This can significantly speed up open. + */ + public Options reuseLogs(boolean reuseLogs) + { + this.reuseLogs = reuseLogs; + return this; + } + + public boolean reuseLogs() + { + return this.reuseLogs; + } +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/Range.java b/leveldb-api/src/main/java/org/iq80/leveldb/Range.java new file mode 100644 index 0000000..23297df --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/Range.java @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +/** + * A range of keys as of {@code [start, limit) } + * @author Hiram Chirino + */ +public class Range +{ + private final byte[] start; + private final byte[] limit; + + /** + * Included in the range + */ + public byte[] limit() + { + return limit; + } + + /** + * Not included in the range + */ + public byte[] start() + { + return start; + } + + /** + * @param start key included in the range + * @param limit key not included in the range + */ + public Range(byte[] start, byte[] limit) + { + Options.checkArgNotNull(start, "start"); + Options.checkArgNotNull(limit, "limit"); + this.limit = limit; + this.start = start; + } +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/ReadOptions.java b/leveldb-api/src/main/java/org/iq80/leveldb/ReadOptions.java new file mode 100644 index 0000000..defab59 --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/ReadOptions.java @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +/** + * Options that control read operations + */ +public class ReadOptions +{ + private boolean verifyChecksums; + private boolean fillCache = true; + private Snapshot snapshot; + + public Snapshot snapshot() + { + return snapshot; + } + + /** + * If "snapshot" is non-null, read as of the supplied snapshot + * (which must belong to the DB that is being read and which must + * not have been closed). If "snapshot" is null, use an implicit + * snapshot of the state at the beginning of this read operation. + */ + public ReadOptions snapshot(Snapshot snapshot) + { + this.snapshot = snapshot; + return this; + } + + public boolean fillCache() + { + return fillCache; + } + + /** + * Should the data read for this iteration be cached in memory? + * Callers may wish to set this field to false for bulk scans. + */ + public ReadOptions fillCache(boolean fillCache) + { + this.fillCache = fillCache; + return this; + } + + public boolean verifyChecksums() + { + return verifyChecksums; + } + + /** + * If true, all data read from underlying storage will be + * verified against corresponding checksums. + */ + public ReadOptions verifyChecksums(boolean verifyChecksums) + { + this.verifyChecksums = verifyChecksums; + return this; + } +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/Snapshot.java b/leveldb-api/src/main/java/org/iq80/leveldb/Snapshot.java new file mode 100644 index 0000000..6400e5a --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/Snapshot.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import java.io.Closeable; + +/** + * Abstract handle to particular state of a DB. + * A Snapshot can therefore be safely accessed from multiple threads + * without any external synchronization. + * + * Can't be used after close + */ +public interface Snapshot + extends Closeable +{ +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/WriteBatch.java b/leveldb-api/src/main/java/org/iq80/leveldb/WriteBatch.java new file mode 100644 index 0000000..40624e5 --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/WriteBatch.java @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import java.io.Closeable; + +/** + * @author Hiram Chirino + */ +public interface WriteBatch + extends Closeable +{ + /** + * The size of the database changes caused by this batch. + *

+ * This number is tied to implementation details, and may change across + * releases. It is intended for LevelDB usage metrics. + */ + int getApproximateSize(); + + /** + * Number of entries in the batch + */ + int size(); + + /** + * Store the mapping key and value in the database. + */ + WriteBatch put(byte[] key, byte[] value); + + /** + * If the database contains a mapping for "key", erase it. Else do nothing. + */ + WriteBatch delete(byte[] key); +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/WriteOptions.java b/leveldb-api/src/main/java/org/iq80/leveldb/WriteOptions.java new file mode 100644 index 0000000..0b027ec --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/WriteOptions.java @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +/** + * Options that control write operations + */ +public class WriteOptions +{ + private boolean sync; + private boolean snapshot; + + /** + * If true, the write will be flushed from the operating system + * buffer cache (by calling WritableFile::Sync()) before the write + * is considered complete. If this flag is true, writes will be + * slower. + *

+ * If this flag is false, and the machine crashes, some recent + * writes may be lost. Note that if it is just the process that + * crashes (i.e., the machine does not reboot), no writes will be + * lost even if sync==false. + *

+ * In other words, a DB write with sync==false has similar + * crash semantics as the "write()" system call. A DB write + * with sync==true has similar crash semantics to a "write()" + * system call followed by "fsync()". + *

+ * In java Implementation if process crash + * Default: false + **/ + public boolean sync() + { + return sync; + } + + public WriteOptions sync(boolean sync) + { + this.sync = sync; + return this; + } + + /** + * If "snapshot" is true, take a snapshot at the end of this write operation + */ + public boolean snapshot() + { + return snapshot; + } + + public WriteOptions snapshot(boolean snapshot) + { + this.snapshot = snapshot; + return this; + } +} diff --git a/leveldb-api/src/main/java/org/iq80/leveldb/XFilterPolicy.java b/leveldb-api/src/main/java/org/iq80/leveldb/XFilterPolicy.java new file mode 100644 index 0000000..67de8ec --- /dev/null +++ b/leveldb-api/src/main/java/org/iq80/leveldb/XFilterPolicy.java @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +/** + * A database can be configured with a custom FilterPolicy object. + * This object is responsible for creating a small filter from a set + * of keys. These filters are stored in leveldb and are consulted + * automatically by leveldb to decide whether or not to read some + * information from disk. In many cases, a filter can cut down the + * number of disk seeks form a handful to a single disk seek per + * DB::Get() call. + *

+ * Most people will want to use the builtin bloom filter support (see + * NewBloomFilterPolicy() below). + * + * @author Honore Vasconcelos + */ +public interface XFilterPolicy +{ +} diff --git a/leveldb-api/src/test/java/org/iq80/leveldb/OptionsTest.java b/leveldb-api/src/test/java/org/iq80/leveldb/OptionsTest.java new file mode 100644 index 0000000..325404e --- /dev/null +++ b/leveldb-api/src/test/java/org/iq80/leveldb/OptionsTest.java @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb; + +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; + +public class OptionsTest +{ + @Test(expectedExceptions = IllegalArgumentException.class) + public void testDefaults() throws Exception + { + Options.fromOptions(null); + } + + @Test + public void testCopy() throws Exception + { + MyDBComparator comparator = new MyDBComparator(); + Logger logger = msg -> { + }; + XFilterPolicy filterPolicy = new XFilterPolicy() + { + }; + Options op = new Options(); + op.createIfMissing(false); + op.errorIfExists(true); + op.writeBufferSize(1234); + op.maxFileSize(56790); + op.maxOpenFiles(2); + op.blockRestartInterval(789); + op.blockSize(345); + op.compressionType(CompressionType.NONE); + op.paranoidChecks(true); + op.comparator(comparator); + op.logger(logger); + op.cacheSize(678); + op.filterPolicy(filterPolicy); + op.reuseLogs(true); + Options op2 = Options.fromOptions(op); + + assertEquals(op2.createIfMissing(), false); + assertEquals(op2.errorIfExists(), true); + assertEquals(op2.writeBufferSize(), 1234); + assertEquals(op2.maxFileSize(), 56790); + assertEquals(op2.maxOpenFiles(), 2); + assertEquals(op2.blockRestartInterval(), 789); + assertEquals(op2.blockSize(), 345); + assertEquals(op2.compressionType(), CompressionType.NONE); + assertEquals(op2.paranoidChecks(), true); + assertEquals(op2.comparator(), comparator); + assertEquals(op2.logger(), logger); + assertEquals(op2.cacheSize(), 678); + assertEquals(op2.filterPolicy(), filterPolicy); + assertEquals(op2.reuseLogs(), true); + } + + private static class MyDBComparator implements DBComparator + { + @Override + public String name() + { + return null; + } + + @Override + public byte[] findShortestSeparator(byte[] start, byte[] limit) + { + return new byte[0]; + } + + @Override + public byte[] findShortSuccessor(byte[] key) + { + return new byte[0]; + } + + @Override + public int compare(byte[] o1, byte[] o2) + { + return 0; + } + } +} diff --git a/leveldb-benchmark/pom.xml b/leveldb-benchmark/pom.xml new file mode 100644 index 0000000..a9e8fc9 --- /dev/null +++ b/leveldb-benchmark/pom.xml @@ -0,0 +1,78 @@ + + + + 4.0.0 + + + com.hivemc.leveldb + leveldb-project + 1.0.0-SNAPSHOT + + + leveldb-benchmark + leveldb-benchmark + Port of LevelDB Benchmarks to Java + + + ${project.parent.basedir} + false + + + + + com.hivemc.leveldb + leveldb-api + + + com.hivemc.leveldb + leveldb + + + com.google.guava + guava + + + junit + junit + 4.13.1 + test + + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + + java + + + + + org.iq80.leveldb.benchmark.DbBenchmark + + + + + + diff --git a/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java b/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java new file mode 100644 index 0000000..3b4e956 --- /dev/null +++ b/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java @@ -0,0 +1,1263 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.benchmark; + +import com.google.common.base.Splitter; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.common.io.CharStreams; +import com.google.common.io.Files; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBFactory; +import org.iq80.leveldb.DBIterator; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.WriteBatch; +import org.iq80.leveldb.WriteOptions; +import org.iq80.leveldb.fileenv.FileUtils; +import org.iq80.leveldb.table.BloomFilterPolicy; +import org.iq80.leveldb.util.Closeables; +import org.iq80.leveldb.util.PureJavaCrc32C; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.util.Snappy; + +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Date; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.nio.charset.StandardCharsets.UTF_8; + +public class DbBenchmark +{ + public static final String FACTORY_CLASS = System.getProperty("leveldb.factory", "org.iq80.leveldb.impl.Iq80DBFactory"); + private final boolean useExisting; + private final Integer writeBufferSize; + private final File databaseDir; + private final double compressionRatio; + private final Map flags; + + private final List benchmarks; + private final int blockCacheSize; + private final int bloomFilterBits; + private final int maxFileSize; + private final int blockSize; + private DB db; + private int num; + private int reads; + private int valueSize; + private WriteOptions writeOptions; + private int entriesPerBatch; + + private final DBFactory factory; + + public DbBenchmark(Map flags) + throws Exception + { + ClassLoader cl = DbBenchmark.class.getClassLoader(); + factory = (DBFactory) cl.loadClass(FACTORY_CLASS).newInstance(); + this.flags = flags; + benchmarks = (List) flags.get(Flag.benchmarks); + + writeBufferSize = (Integer) flags.get(Flag.write_buffer_size); + maxFileSize = (Integer) flags.get(Flag.max_file_size); + blockSize = (Integer) flags.get(Flag.block_size); + compressionRatio = (Double) flags.get(Flag.compression_ratio); + useExisting = (Boolean) flags.get(Flag.use_existing_db); + blockCacheSize = (Integer) flags.get(Flag.cache_size); + bloomFilterBits = (Integer) flags.get(Flag.bloom_bits); + num = (Integer) flags.get(Flag.num); + reads = (Integer) (flags.get(Flag.reads) == null ? flags.get(Flag.num) : flags.get(Flag.reads)); + valueSize = (Integer) flags.get(Flag.value_size); + entriesPerBatch = 1; + + databaseDir = new File((String) flags.get(Flag.db)); + + // delete heap files in db + for (File file : FileUtils.listFiles(databaseDir)) { + if (file.getName().startsWith("heap-")) { + file.delete(); + } + } + + if (!useExisting) { + destroyDb(); + } + } + + private interface BenchmarkMethod + { + void run(ThreadState state) throws Exception; + } + + private void run() + throws IOException + { + printHeader(); + open(); + + for (String benchmark : benchmarks) { + // Reset parameters that may be overridden below + num = (Integer) flags.get(Flag.num); + reads = (Integer) (flags.get(Flag.reads) == null ? flags.get(Flag.num) : flags.get(Flag.reads)); + valueSize = (Integer) flags.get(Flag.value_size); + entriesPerBatch = 1; + writeOptions = new WriteOptions(); + + boolean freshBb = false; + int numThreads = (Integer) flags.get(Flag.threads); + + BenchmarkMethod method = null; + + if (benchmark.equals("open")) { + freshBb = true; + method = this::openBench; + num /= 10000; + if (num < 1) { + num = 1; + } + } + else if (benchmark.equals("fillseq")) { + freshBb = true; + method = this::writeSeq; + } + else if (benchmark.equals("fillbatch")) { + freshBb = true; + entriesPerBatch = 1000; + method = this::writeSeq; + } + else if (benchmark.equals("fillrandom")) { + freshBb = true; + method = this::writeRandom; + } + else if (benchmark.equals("overwrite")) { + freshBb = false; + method = this::writeRandom; + } + else if (benchmark.equals("fillsync")) { + freshBb = true; + num /= 1000; + writeOptions.sync(true); + method = this::writeRandom; + } + else if (benchmark.equals("fill100K")) { + freshBb = true; + num /= 1000; + valueSize = 100 * 1000; + method = this::writeRandom; + } + else if (benchmark.equals("readseq")) { + method = this::readSequential; + } + else if (benchmark.equals("readreverse")) { + method = this::readReverse; + } + else if (benchmark.equals("readrandom")) { + method = this::readRandom; + } + else if (benchmark.equals("readmissing")) { + method = this::readMissing; + } + else if (benchmark.equals("seekrandom")) { + method = this::seekRandom; + } + else if (benchmark.equals("readhot")) { + method = this::readHot; + } + else if (benchmark.equals("readrandomsmall")) { + reads /= 1000; + method = this::readRandom; + } + else if (benchmark.equals("deleteseq")) { + method = this::deleteSeq; + } + else if (benchmark.equals("deleterandom")) { + method = this::deleteRandom; + } + else if (benchmark.equals("readwhilewriting")) { + numThreads++; // Add extra thread for writing + method = this::readWhileWriting; + } + else if (benchmark.equals("compact")) { + method = this::compact; + } + else if (benchmark.equals("crc32c")) { + method = this::crc32c; + } + else if (benchmark.equals("snappycomp")) { + if (Snappy.available()) { + method = this::snappyCompress; + } + } + else if (benchmark.equals("snappyuncomp")) { + if (Snappy.available()) { + method = this::snappyUncompressDirectBuffer; + } + } + else if (benchmark.equals("unsnap-array")) { + if (Snappy.available()) { + method = this::snappyUncompressArray; + } + } + else if (benchmark.equals("unsnap-direct")) { + if (Snappy.available()) { + method = this::snappyUncompressDirectBuffer; + } + } + else if (benchmark.equals("heapprofile")) { + heapProfile(); + } + else if (benchmark.equals("stats")) { + printStats("leveldb.stats"); + } + else if (benchmark.equals("sstables")) { + printStats("leveldb.sstables"); + } + else { + System.err.println("Unknown benchmark: " + benchmark); + } + if (freshBb) { + if (useExisting) { + System.out.println("skipping (--use_existing_db is true)"); + return; + } + db.close(); + db = null; + destroyDb(); + open(); + } + if (method != null) { + try { + runBenchmark(numThreads, benchmark, method); + } + catch (Exception e) { + System.out.println("Failed to rung " + method); + e.printStackTrace(); + return; + } + } + + } + db.close(); + } + + private void runBenchmark(int n, String name, BenchmarkMethod method) throws Exception + { + SharedState shared = new SharedState(); + + ThreadArg[] arg = new ThreadArg[n]; + for (int i = 0; i < arg.length; ++i) { + arg[i] = new ThreadArg(); + arg[i].bm = this; + arg[i].method = method; + arg[i].shared = shared; + arg[i].thread = new ThreadState(i); + arg[i].thread.shared = shared; + startThread(arg[i]); + } + + shared.mu.lock(); + while (shared.numInitialized < n) { + shared.cv.await(); + } + + shared.start = true; + shared.cv.signalAll(); + while (shared.numDone < n) { + shared.cv.await(); + } + shared.mu.unlock(); + + for (int i = 1; i < n; i++) { + arg[0].thread.stats.merge(arg[i].thread.stats); + } + arg[0].thread.stats.report(name); + } + + public void startThread(final ThreadArg arg) + { + new Thread(() -> { + SharedState shared = arg.shared; + ThreadState thread = arg.thread; + shared.mu.lock(); + try { + shared.numInitialized++; + if (shared.numInitialized >= shared.total) { + shared.cv.signalAll(); + } + while (!shared.start) { + shared.cv.awaitUninterruptibly(); + } + } + finally { + shared.mu.unlock(); + } + try { + thread.stats.init(); + arg.method.run(thread); + } + catch (Exception e) { + thread.stats.addMessage("ERROR " + e); + e.printStackTrace(); + } + finally { + thread.stats.stop(); + } + + shared.mu.lock(); + try { + shared.numDone++; + if (shared.numDone >= shared.total) { + shared.cv.signalAll(); + } + } + finally { + shared.mu.unlock(); + } + }).start(); + } + + private void printHeader() + throws IOException + { + int kKeySize = 16; + printEnvironment(); + System.out.printf("Keys: %d bytes each%n", kKeySize); + System.out.printf("Values: %d bytes each (%d bytes after compression)%n", + valueSize, + (int) (valueSize * compressionRatio + 0.5)); + System.out.printf("Entries: %d%n", num); + System.out.printf("RawSize: %.1f MB (estimated)%n", + ((kKeySize + valueSize) * num) / 1048576.0); + System.out.printf("FileSize: %.1f MB (estimated)%n", + (((kKeySize + valueSize * compressionRatio) * num) + / 1048576.0)); + printWarnings(); + System.out.printf("------------------------------------------------%n"); + } + + @SuppressWarnings({"InnerAssignment"}) + static void printWarnings() + { + boolean assertsEnabled = false; + // CHECKSTYLE IGNORE check FOR NEXT 1 LINES + assert assertsEnabled = true; // Intentional side effect!!! + if (assertsEnabled) { + System.out.printf("WARNING: Assertions are enabled; benchmarks unnecessarily slow%n"); + } + + // See if snappy is working by attempting to compress a compressible string + String text = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; + byte[] compressedText = null; + try { + compressedText = Snappy.compress(text); + } + catch (Exception ignored) { + } + if (compressedText == null) { + System.out.printf("WARNING: Snappy compression is not enabled%n"); + } + else if (compressedText.length > text.length()) { + System.out.printf("WARNING: Snappy compression is not effective%n"); + } + } + + void printEnvironment() + throws IOException + { + System.out.printf("LevelDB: %s%n", factory); + + System.out.printf("Date: %tc%n", new Date()); + + File cpuInfo = new File("/proc/cpuinfo"); + if (cpuInfo.canRead()) { + int numberOfCpus = 0; + String cpuType = null; + String cacheSize = null; + for (String line : CharStreams.readLines(Files.newReader(cpuInfo, UTF_8))) { + ImmutableList parts = ImmutableList.copyOf(Splitter.on(':').omitEmptyStrings().trimResults().limit(2).split(line)); + if (parts.size() != 2) { + continue; + } + String key = parts.get(0); + String value = parts.get(1); + + if (key.equals("model name")) { + numberOfCpus++; + cpuType = value; + } + else if (key.equals("cache size")) { + cacheSize = value; + } + } + System.out.printf("CPU: %d * %s%n", numberOfCpus, cpuType); + System.out.printf("CPUCache: %s%n", cacheSize); + } + } + + private void open() + throws IOException + { + Options options = new Options(); + options.createIfMissing(!useExisting); + if (maxFileSize >= 0) { + options.maxFileSize(maxFileSize); + } + if (blockSize >= 0) { + options.blockSize(blockSize); + } + if (blockCacheSize >= 0) { + options.cacheSize(blockCacheSize); + } + if (bloomFilterBits >= 0) { + options.filterPolicy(new BloomFilterPolicy(bloomFilterBits)); + } + if (writeBufferSize != null) { + options.writeBufferSize(writeBufferSize); + } + db = factory.open(databaseDir, options); + } + + private void write(ThreadState thread, boolean seq) + throws IOException + { + if (!flags.get(Flag.num).equals(num)) { + thread.stats.addMessage(String.format("(%d ops)", num)); + } + + RandomGenerator gen = newGenerator(); + long bytes = 0; + for (int i = 0; i < num; i += entriesPerBatch) { + WriteBatch batch = db.createWriteBatch(); + for (int j = 0; j < entriesPerBatch; j++) { + int k = seq ? i + j : thread.rand.nextInt(num); + byte[] key = formatNumber(k); + batch.put(key, gen.generate(valueSize)); + bytes += valueSize + key.length; + thread.stats.finishedSingleOp(); + } + db.write(batch, writeOptions); + batch.close(); + } + thread.stats.addBytes(bytes); + } + + public static byte[] formatNumber(long n) + { + checkArgument(n >= 0, "number must be positive"); + + byte[] slice = new byte[16]; + + int i = 15; + while (n > 0) { + slice[i--] = (byte) ((long) '0' + (n % 10)); + n /= 10; + } + while (i >= 0) { + slice[i--] = '0'; + } + return slice; + } + + private void readSequential(ThreadState thread) + { + long bytes = 0; + for (int loops = 0; loops < 5; loops++) { + try (DBIterator iterator = db.iterator()) { + iterator.seekToFirst(); + for (int i = 0; i < reads && iterator.hasNext(); i++) { + Map.Entry entry = iterator.next(); + bytes += entry.getKey().length + entry.getValue().length; + thread.stats.finishedSingleOp(); + } + } + } + thread.stats.addBytes(bytes); + } + + private void readReverse(ThreadState thread) + { + //TODO implement readReverse + } + + private void readRandom(ThreadState thread) + { + int found = 0; + long bytes = 0; + for (int i = 0; i < reads; i++) { + byte[] key = formatNumber(thread.rand.nextInt(num)); + byte[] value = db.get(key); + if (value != null) { + found++; + bytes += key.length + value.length; + } + thread.stats.finishedSingleOp(); + } + thread.stats.addMessage(String.format("(%d of %d found)", found, num)); + thread.stats.addBytes(bytes); + } + + private void readMissing(ThreadState thread) + { + for (int i = 0; i < reads; i++) { + byte[] key = formatNumber(thread.rand.nextInt(num)); + db.get(key); + thread.stats.finishedSingleOp(); + } + } + + private void readHot(ThreadState thread) + { + long bytes = 0; + int range = (num + 99) / 100; + for (int i = 0; i < reads; i++) { + byte[] key = formatNumber(thread.rand.nextInt(range)); + byte[] value = db.get(key); + bytes += key.length + value.length; + thread.stats.finishedSingleOp(); + } + thread.stats.addBytes(bytes); + } + + private void seekRandom(ThreadState thread) throws IOException + { + ReadOptions options = new ReadOptions(); + int found = 0; + for (int i = 0; i < reads; i++) { + DBIterator iter = db.iterator(options); + byte[] key = formatNumber(thread.rand.nextInt(num)); + iter.seek(key); + if (iter.hasNext() == Arrays.equals(iter.next().getKey(), key)) { + found++; + } + iter.close(); + thread.stats.finishedSingleOp(); + } + thread.stats.addMessage(String.format("(%d of %d found)", found, num)); + } + + private void deleteSeq(ThreadState thread) + { + //TODO implement deleteSeq + } + + private void deleteRandom(ThreadState thread) + { + //TODO implement deleteRandom + } + + private void readWhileWriting(ThreadState thread) + { + if (thread.tid > 0) { + readRandom(thread); + } + else { + // Special thread that keeps writing until other threads are done. + RandomGenerator gen = newGenerator(); + while (true) { + thread.shared.mu.lock(); + try { + if (thread.shared.numDone + 1 >= thread.shared.numInitialized) { + // Other threads have finished + break; + } + } + finally { + thread.shared.mu.unlock(); + } + + byte[] key = formatNumber(thread.rand.nextInt((Integer) flags.get(Flag.num))); + db.put(key, gen.generate(valueSize), writeOptions); + } + + // Do not count any of the preceding work/delay in stats. + thread.stats.init(); + } + } + + private void compact(ThreadState thread) + { + db.compactRange(null, null); + } + + private void crc32c(final ThreadState thread) + { + // Checksum about 500MB of data total + int blockSize = 4096; + String label = "(4K per op)"; + // Checksum about 500MB of data total + byte[] data = new byte[blockSize]; + Arrays.fill(data, (byte) 'x'); + + long bytes = 0; + int crc = 0; + while (bytes < 1000 * 1048576) { + PureJavaCrc32C checksum = new PureJavaCrc32C(); + checksum.update(data, 0, blockSize); + crc = checksum.getMaskedValue(); + thread.stats.finishedSingleOp(); + bytes += blockSize; + } + // Print so result is not dead + System.out.printf("... crc=0x%x\r", crc); + + thread.stats.addBytes(bytes); + thread.stats.addMessage(label); + } + + private void snappyCompress(ThreadState thread) + { + byte[] raw = newGenerator().generate(new Options().blockSize()); + byte[] compressedOutput = new byte[Snappy.maxCompressedLength(raw.length)]; + + long bytes = 0; + long produced = 0; + + // attempt to compress the block + while (bytes < 1024 * 1048576) { // Compress 1G + try { + int compressedSize = Snappy.compress(raw, 0, raw.length, compressedOutput, 0); + bytes += raw.length; + produced += compressedSize; + } + catch (IOException ignored) { + thread.stats.addMessage("(snappy failure)"); + Throwables.propagateIfPossible(ignored, AssertionError.class); + } + + thread.stats.finishedSingleOp(); + } + thread.stats.addMessage(String.format("(output: %.1f%%)", (produced * 100.0) / bytes)); + thread.stats.addBytes(bytes); + } + + private RandomGenerator newGenerator() + { + return new RandomGenerator(compressionRatio); + } + + private void snappyUncompressArray(ThreadState thread) + { + int inputSize = new Options().blockSize(); + byte[] compressedOutput = new byte[Snappy.maxCompressedLength(inputSize)]; + byte[] raw = newGenerator().generate(inputSize); + long bytes = 0; + int compressedLength; + try { + compressedLength = Snappy.compress(raw, 0, raw.length, compressedOutput, 0); + } + catch (IOException e) { + Throwables.propagateIfPossible(e, AssertionError.class); + return; + } + // attempt to uncompress the block + while (bytes < 5L * 1024 * 1048576) { // Compress 1G + try { + Snappy.uncompress(compressedOutput, 0, compressedLength, raw, 0); + bytes += inputSize; + } + catch (IOException ignored) { + thread.stats.addMessage("(snappy failure)"); + throw Throwables.propagate(ignored); + } + + thread.stats.finishedSingleOp(); + } + thread.stats.addBytes(bytes); + } + + private void snappyUncompressDirectBuffer(ThreadState thread) + { + int inputSize = new Options().blockSize(); + byte[] compressedOutput = new byte[Snappy.maxCompressedLength(inputSize)]; + byte[] raw = newGenerator().generate(inputSize); + int compressedLength; + try { + compressedLength = Snappy.compress(raw, 0, raw.length, compressedOutput, 0); + } + catch (IOException e) { + Throwables.propagateIfPossible(e, AssertionError.class); + return; + } + + ByteBuffer uncompressedBuffer = ByteBuffer.allocateDirect(inputSize); + ByteBuffer compressedBuffer = ByteBuffer.allocateDirect(compressedLength); + compressedBuffer.put(compressedOutput, 0, compressedLength); + + long bytes = 0; + // attempt to uncompress the block + while (bytes < 5L * 1024 * 1048576) { // Compress 1G + try { + uncompressedBuffer.clear(); + compressedBuffer.position(0); + compressedBuffer.limit(compressedLength); + Snappy.uncompress(compressedBuffer, uncompressedBuffer); + bytes += inputSize; + } + catch (IOException ignored) { + thread.stats.addMessage("(snappy failure)"); + Throwables.propagateIfPossible(ignored, AssertionError.class); + return; + } + + thread.stats.finishedSingleOp(); + thread.stats.addBytes(bytes); + } + } + + private void openBench(ThreadState thread) throws IOException + { + for (int i = 0; i < num; i++) { + db.close(); + db = null; + open(); + thread.stats.finishedSingleOp(); + } + } + + private void writeSeq(ThreadState thread) throws IOException + { + write(thread, true); + } + + private void writeRandom(ThreadState thread) throws IOException + { + write(thread, false); + } + + private void heapProfile() + { + //TODO implement heapProfile + } + + private void destroyDb() + { + Closeables.closeQuietly(db); + db = null; + FileUtils.deleteRecursively(databaseDir); + } + + private void printStats(String name) + { + final String property = db.getProperty(name); + if (property != null) { + System.out.print(property); + } + } + + public static void main(String[] args) + throws Exception + { + Map flags = new EnumMap<>(Flag.class); + for (Flag flag : Flag.values()) { + flags.put(flag, flag.getDefaultValue()); + } + for (String arg : args) { + boolean valid = false; + if (arg.startsWith("--")) { + try { + ImmutableList parts = ImmutableList.copyOf(Splitter.on("=").limit(2).split(arg.substring(2))); + Flag key = Flag.valueOf(parts.get(0)); + Object value = key.parseValue(parts.get(1)); + flags.put(key, value); + valid = true; + } + catch (Exception e) { + } + } + + if (!valid) { + System.err.println("Invalid argument " + arg); + System.exit(1); + } + + } + System.out.println("Using factory: " + FACTORY_CLASS); + warmUpJVM(flags, (Integer) flags.get(Flag.jvm_warm_up_iterations)); + System.out.println("Main Benchmark Run"); + new DbBenchmark(flags).run(); + } + + private static void warmUpJVM(Map flags, int runs) throws Exception + { + PrintStream outBack = System.out; + PrintStream errBack = System.err; + PrintStream printStream = new PrintStream(new OutputStream() + { + @Override + public void write(int i) + { + } + }); + System.setOut(printStream); + System.setErr(printStream); + for (int i = 1; i <= runs; i++) { + outBack.println("Warm up run #" + i + " (no output will be presented)"); + new DbBenchmark(flags).run(); + } + System.setOut(outBack); + System.setErr(errBack); + outBack.println(); + } + + private enum Flag + { + // Comma-separated list of operations to run in the specified order + // Actual benchmarks: + // fillseq -- write N values in sequential key order in async mode + // fillrandom -- write N values in random key order in async mode + // overwrite -- overwrite N values in random key order in async mode + // fillsync -- write N/100 values in random key order in sync mode + // fill100K -- write N/1000 100K values in random order in async mode + // readseq -- read N times sequentially + // readreverse -- read N times in reverse order + // readrandom -- read N times in random order + // readhot -- read N times in random order from 1% section of DB + // crc32c -- repeated crc32c of 4K of data + // Meta operations: + // compact -- Compact the entire DB + // stats -- Print DB stats + // heapprofile -- Dump a heap profile (if supported by this port) + benchmarks(List.of( + "fillseq", + "fillsync", + "fillrandom", + "overwrite", + "readrandom", + "readrandom", // Extra run to allow previous compactions to quiesce + "readseq", + // "readreverse", + "compact", + "readrandom", + "readseq", + // "readreverse", + "fill100K", + // "crc32c", + "snappycomp", + "unsnap-array", + "unsnap-direct", + "stats" + )) { + @Override + public Object parseValue(String value) + { + return ImmutableList.copyOf(Splitter.on(",").trimResults().omitEmptyStrings().split(value)); + } + }, + + // Arrange to generate values that shrink to this fraction of + // their original size after compression + compression_ratio(0.5d) { + @Override + public Object parseValue(String value) + { + return Double.parseDouble(value); + } + }, + + // Print histogram of operation timings + histogram(false) { + @Override + public Object parseValue(String value) + { + return Boolean.parseBoolean(value); + } + }, + + // If true, do not destroy the existing database. If you set this + // flag and also specify a benchmark that wants a fresh database, that + // benchmark will fail. + use_existing_db(false) { + @Override + public Object parseValue(String value) + { + return Boolean.parseBoolean(value); + } + }, + + // Number of key/values to place in database + num(1000000) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Number of read operations to do. If negative, do FLAGS_num reads. + reads(null) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Number of concurrent threads to run. + threads(1) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Size of each value + value_size(100) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Number of bytes to buffer in memtable before compacting + // (initialized to default value by "main") + write_buffer_size(null) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Number of bytes written to each file. + // (initialized to default value by "main") + max_file_size(0) { + @Override + protected Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Approximate size of user data packed per block (before compression. + // (initialized to default value by "main") + block_size(0) { + @Override + protected Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Number of bytes to use as a cache of uncompressed data. + // Negative means use default settings. + cache_size(-1) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Bloom filter bits per key. + // Negative means use default settings. + bloom_bits(-1) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Maximum number of files to keep open at the same time (use default if == 0) + open_files(0) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }, + + // Use the db with the following name. + db("/tmp/dbbench") { + @Override + public Object parseValue(String value) + { + return value; + } + }, + + // Use to define number of warm up iteration + jvm_warm_up_iterations(1) { + @Override + public Object parseValue(String value) + { + return Integer.parseInt(value); + } + }; + + private final Object defaultValue; + + Flag(Object defaultValue) + { + this.defaultValue = defaultValue; + } + + protected abstract Object parseValue(String value); + + public Object getDefaultValue() + { + return defaultValue; + } + } + + private static class RandomGenerator + { + private final Slice data; + private int position; + + private RandomGenerator(double compressionRatio) + { + // We use a limited amount of data over and over again and ensure + // that it is larger than the compression window (32KB), and also + // large enough to serve all typical value sizes we want to write. + Random rnd = new Random(301); + data = Slices.allocate(1048576 + 100); + SliceOutput sliceOutput = data.output(); + while (sliceOutput.size() < 1048576) { + // Add a short fragment that is as compressible as specified + // by FLAGS_compression_ratio. + sliceOutput.writeBytes(compressibleString(rnd, compressionRatio, 100)); + } + } + + private byte[] generate(int length) + { + if (position + length > data.length()) { + position = 0; + assert (length < data.length()); + } + Slice slice = data.slice(position, length); + position += length; + return slice.getBytes(); + } + } + + private static Slice compressibleString(Random rnd, double compressionRatio, int len) + { + int raw = (int) (len * compressionRatio); + if (raw < 1) { + raw = 1; + } + Slice rawData = generateRandomSlice(rnd, raw); + + // Duplicate the random data until we have filled "len" bytes + Slice dst = Slices.allocate(len); + SliceOutput sliceOutput = dst.output(); + while (sliceOutput.size() < len) { + sliceOutput.writeBytes(rawData, 0, Math.min(rawData.length(), sliceOutput.writableBytes())); + } + return dst; + } + + private static Slice generateRandomSlice(Random random, int length) + { + Slice rawData = Slices.allocate(length); + SliceOutput sliceOutput = rawData.output(); + while (sliceOutput.isWritable()) { + sliceOutput.writeByte((byte) ((int) ' ' + random.nextInt(95))); + } + return rawData; + } + + private static class SharedState + { + ReentrantLock mu; + Condition cv; + int total; + + // Each thread goes through the following states: + // (1) initializing + // (2) waiting for others to be initialized + // (3) running + // (4) done + int numInitialized; + int numDone; + boolean start; + + public SharedState() + { + this.mu = new ReentrantLock(); + this.cv = mu.newCondition(); + } + } + + private class ThreadState + { + int tid; // 0..n-1 when running in n threads + Random rand; // Has different seeds for different threads + DbBenchmark.Stats stats = new Stats(); + SharedState shared; + + public ThreadState(int index) + { + this.tid = index; + this.rand = new Random(1000 + index); + } + } + + private class ThreadArg + { + DbBenchmark bm; + SharedState shared; + ThreadState thread; + BenchmarkMethod method; + } + + private class Stats + { + long start; + long finish; + double seconds; + int done; + int nextReport; + long bytes; + double lastOpFinish; + Histogram hist = new Histogram(); + StringBuilder message = new StringBuilder(); + + public Stats() + { + init(); + } + + void init() + { + nextReport = 100; + lastOpFinish = start; + hist.clear(); + done = 0; + bytes = 0; + seconds = 0; + start = System.nanoTime(); + finish = start; + message.setLength(0); + } + + void merge(Stats other) + { + hist.merge(other.hist); + done += other.done; + bytes += other.bytes; + seconds += other.seconds; + if (other.start < start) { + start = other.start; + } + if (other.finish > finish) { + finish = other.finish; + } + + // Just keep the messages from one thread + if (message.length() == 0) { + message = other.message; + } + } + + void stop() + { + finish = System.nanoTime(); + seconds = 1.0d * (finish - start) / TimeUnit.SECONDS.toNanos(1); + } + + void addMessage(String msg) + { + if (message.length() != 0) { + message.append(" "); + } + message.append(msg); + } + + void finishedSingleOp() + { + if (flags.containsKey(Flag.histogram)) { + double now = System.nanoTime(); + double micros = (now - lastOpFinish) / 1000.0d; + hist.add(micros); + if (micros > 20000) { + System.out.printf("long op: %.1f micros%30s\r", micros, ""); + } + lastOpFinish = now; + } + + done++; + if (done >= nextReport) { + if (nextReport < 1000) { + nextReport += 100; + } + else if (nextReport < 5000) { + nextReport += 500; + } + else if (nextReport < 10000) { + nextReport += 1000; + } + else if (nextReport < 50000) { + nextReport += 5000; + } + else if (nextReport < 100000) { + nextReport += 10000; + } + else if (nextReport < 500000) { + nextReport += 50000; + } + else { + nextReport += 100000; + } + System.out.printf("... finished %d ops%30s\r", done, ""); + } + } + + void addBytes(long n) + { + bytes += n; + } + + void report(String name) + { + if (bytes > 0) { + double elapsed = TimeUnit.NANOSECONDS.toSeconds(finish - start); + String rate = String.format("%6.1f MB/s", (bytes / 1048576.0) / elapsed); + message.insert(0, " ").insert(0, rate); + } + + System.out.printf("%-12s : %11.5f micros/op; %11.0f op/sec;%s%s%n", + name, + done == 0 ? 0 : (seconds * 1.0e6 / done), + done / seconds, + (message == null ? "" : " "), + message); + if (flags.get(Flag.histogram).equals(true)) { + System.out.printf("Microseconds per op:%n%s%n", hist.toString()); + } + } + } +} diff --git a/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/Histogram.java b/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/Histogram.java new file mode 100644 index 0000000..d443524 --- /dev/null +++ b/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/Histogram.java @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.benchmark; + +import com.google.common.base.Strings; + +public class Histogram +{ + static final double[] K_BUCKET_LIMIT = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45, + 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450, + 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000, + 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000, + 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000, + 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000, + 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000, + 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000, + 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000, + 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000, + 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000, + 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000, + 180000000, 200000000, 250000000, 300000000, 350000000, 400000000, + 450000000, 500000000, 600000000, 700000000, 800000000, 900000000, + 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000, + 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0, + 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0, + 1e200, + }; + private final int kNumBuckets = 154; + private double min; + private double max; + private double num; + private double sum; + private double sumSquares; + + private double[] doubles = new double[kNumBuckets]; + + public void clear() + { + min = K_BUCKET_LIMIT[kNumBuckets - 1]; + max = 0; + num = 0; + sum = 0; + sumSquares = 0; + for (int i = 0; i < kNumBuckets; i++) { + doubles[i] = 0; + } + } + + public void add(double value) + { + // Linear search is fast enough for our usage in db_bench + int b = 0; + while (b < kNumBuckets - 1 && K_BUCKET_LIMIT[b] <= value) { + b++; + } + doubles[b] += 1.0; + if (min > value) { + min = value; + } + if (max < value) { + max = value; + } + num++; + sum += value; + sumSquares += (value * value); + } + + public void merge(Histogram other) + { + if (other.min < min) { + min = other.min; + } + if (other.max > max) { + max = other.max; + } + num += other.num; + sum += other.sum; + sumSquares += other.sumSquares; + for (int b = 0; b < kNumBuckets; b++) { + doubles[b] += other.doubles[b]; + } + } + + public double median() + { + return percentile(50.0); + } + + public double percentile(double p) + { + double threshold = num * (p / 100.0); + double sum = 0; + for (int b = 0; b < kNumBuckets; b++) { + sum += doubles[b]; + if (sum >= threshold) { + // Scale linearly within this bucket + double leftPoint = (b == 0) ? 0 : K_BUCKET_LIMIT[b - 1]; + double rightPoint = K_BUCKET_LIMIT[b]; + double leftSum = sum - doubles[b]; + double rightSum = sum; + double pos = (threshold - leftSum) / (rightSum - leftSum); + double r = leftPoint + (rightPoint - leftPoint) * pos; + if (r < min) { + r = min; + } + if (r > max) { + r = max; + } + return r; + } + } + return max; + } + + public double average() + { + if (num == 0.0) { + return 0; + } + return sum / num; + } + + public double standardDeviation() + { + if (num == 0.0) { + return 0; + } + double variance = (sumSquares * num - sum * sum) / (num * num); + return Math.sqrt(variance); + } + + public String toString() + { + StringBuilder r = new StringBuilder(); + r.append(String.format("Count: %.0f Average: %.4f StdDev: %.2f\n", + num, average(), standardDeviation())); + r.append(String.format("Min: %.4f Median: %.4f Max: %.4f\n", + (num == 0.0 ? 0.0 : min), median(), max)); + r.append("------------------------------------------------------\n"); + r.append("left right count % cum % \n"); + double mult = 100.0 / num; + double sum = 0; + for (int b = 0; b < kNumBuckets; b++) { + if (doubles[b] <= 0.0) { + continue; + } + sum += doubles[b]; + r.append(String.format("[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", + ((b == 0) ? 0.0 : K_BUCKET_LIMIT[b - 1]), // left + K_BUCKET_LIMIT[b], // right + doubles[b], // count + mult * doubles[b], // percentage + mult * sum)); // cumulative percentage + + // Add hash marks based on percentage; 20 marks for 100%. + int marks = (int) (20 * (doubles[b] / num) + 0.5); + r.append(Strings.repeat("#", marks)); + r.append("\n"); + } + return r.toString(); + } +} diff --git a/leveldb/dependency-reduced-pom.xml b/leveldb/dependency-reduced-pom.xml new file mode 100644 index 0000000..969c085 --- /dev/null +++ b/leveldb/dependency-reduced-pom.xml @@ -0,0 +1,148 @@ + + + + leveldb-project + com.hivemc.leveldb + 1.0.0-SNAPSHOT + + 4.0.0 + leveldb + ${project.artifactId} + Port of LevelDB to Java + + + + true + ${project.basedir}/src/main/resources + + **/* + + + + + + + maven-surefire-plugin + + + + listener + org.testng.reporters.VerboseReporter + + + + ${project.build.sourceEncoding} + ${air.test.timezone} + true + %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS %4$s %5$s%6$s%n + + random + true + ${air.test.parallel} + ${air.test.thread-count} + ${argLine} + -Dfile.encoding=${project.build.sourceEncoding} + -Xmx${air.test.jvmsize} + -Xms${air.test.jvmsize} + -XX:OnOutOfMemoryError="kill -9 %p" + -XX:+HeapDumpOnOutOfMemoryError + + + + + + + maven-shade-plugin + 3.5.1 + + + package + + shade + + + + + true + uber + + + org.iq80.snappy:snappy:jar + + + + + com.google.common + ${shadeBase}.guava + + + com.google.thirdparty + ${shadeBase}.guava + + + + + + maven-compiler-plugin + + 11 + 11 + + + + + + + joda-time + joda-time + 2.10.5 + test + + + org.testng + testng + 6.10 + test + + + junit + junit + + + guice + com.google.inject + + + jcommander + com.beust + + + + + org.fusesource.leveldbjni + leveldbjni + 1.1 + test + + + org.iq80.leveldb + leveldb-api + + + hawtjni-runtime + org.fusesource.hawtjni + + + leveldb-api + org.iq80.leveldb + + + + + + 2048m + org.iq80.leveldb.shaded + ${project.parent.basedir} + 1 + + diff --git a/leveldb/pom.xml b/leveldb/pom.xml new file mode 100644 index 0000000..445d00e --- /dev/null +++ b/leveldb/pom.xml @@ -0,0 +1,165 @@ + + + 4.0.0 + + + com.hivemc.leveldb + leveldb-project + 1.0.0-SNAPSHOT + + + leveldb + ${project.artifactId} + Port of LevelDB to Java + + + ${project.parent.basedir} + org.iq80.leveldb.shaded + 2048m + 1 + + + + + + com.hivemc.leveldb + leveldb-api + + + + org.xerial.snappy + snappy-java + 1.1.10.4 + true + + + + org.iq80.snappy + snappy + 0.5 + true + + + + com.google.guava + guava + + + + + joda-time + joda-time + test + + + + org.testng + testng + test + + + + + org.fusesource.leveldbjni + leveldbjni + 1.1 + test + + + org.iq80.leveldb + leveldb-api + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 3.5.1 + + + package + + shade + + + + + true + uber + + + + org.iq80.snappy:snappy:jar + + + + + com.google.common + ${shadeBase}.guava + + + com.google.thirdparty + ${shadeBase}.guava + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 11 + 11 + + + + + + + ${project.basedir}/src/main/resources + true + + **/* + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + + listener + org.testng.reporters.VerboseReporter + + + + ${project.build.sourceEncoding} + ${air.test.timezone} + true + %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS %4$s %5$s%6$s%n + + random + true + ${air.test.parallel} + ${air.test.thread-count} + + ${argLine} + -Dfile.encoding=${project.build.sourceEncoding} + -Xmx${air.test.jvmsize} + -Xms${air.test.jvmsize} + -XX:OnOutOfMemoryError="kill -9 %p" + -XX:+HeapDumpOnOutOfMemoryError + + + + + + + diff --git a/leveldb/src/main/java/org/iq80/leveldb/env/DbLock.java b/leveldb/src/main/java/org/iq80/leveldb/env/DbLock.java new file mode 100644 index 0000000..fdc880c --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/env/DbLock.java @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.env; + +/** + * DB lock information + */ +public interface DbLock +{ + /** + * Is lock valid and not released + * @return true, if acquired + */ + boolean isValid(); + + /** + * Release DB lock + */ + void release(); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/env/Env.java b/leveldb/src/main/java/org/iq80/leveldb/env/Env.java new file mode 100644 index 0000000..103b1d6 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/env/Env.java @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.env; + +import org.iq80.leveldb.Logger; + +import java.io.IOException; + +public interface Env +{ + long nowMicros(); + + /** + * Transform a file name into a {@link File} instance scoped to this {@link Env} instance + * @param filename full file name + * @return File instance + */ + File toFile(String filename); + + /** + * Create a temporary directory in filesystem + * @param prefix prefix to use as name + * @return newly created directory + */ + File createTempDir(String prefix); + + /** + * Create a brand new sequentially-readable file with the specified file name. + * + * @return new file that can only be accessed by one thread at a time. + * @throws IOException If the file does not exist or inaccessible. + */ + SequentialFile newSequentialFile(File file) throws IOException; + + /** + * Create a brand new random access read-only file with the + * specified file name. + * + * @return new file that may be concurrently accessed by multiple threads. + * @throws IOException If the file does not exist or inaccessible. + */ + RandomInputFile newRandomAccessFile(File file) throws IOException; + + /** + * Create an object that writes to a new file with the specified + * name. Deletes any existing file with the same name and creates a + * new file. + *

+ * + * @return new file that can be accessed by one thread at a time. + * @throws IOException If the file not writable. + */ + WritableFile newWritableFile(File file) throws IOException; + + /** + * Create an WritableFile that either appends to an existing file, or + * writes to a new file (if the file does not exist to begin with). + *

+ * May return an IsNotSupportedError error if this Env does + * not allow appending to an existing file. Users of Env (including + * the leveldb implementation) must be prepared to deal with + * an Env that does not support appending. TODO + * + * @return new or existing writable file only accessible by one thread at a time. + * @throws IOException If the file is inaccessible. + */ + WritableFile newAppendableFile(File file) throws IOException; + + /** + * Write {@code content} to file. Replace existing content. + * @param file file location + * @param content new content + * @throws IOException If the file not writable. + */ + void writeStringToFileSync(File file, String content) throws IOException; + + /** + * Read full file content to string + * @param file file location + * @throws IOException If the file not readable. + */ + String readFileToString(File file) throws IOException; + + /** + * Create and return a log file for storing informational messages. + * + * @param loggerFile logger file + * @return logger instance if file is writable, {@code null} otherwise + */ + Logger newLogger(File loggerFile) throws IOException; + + /** + * Attempts to acquire an exclusive lock on lock file + * + * @param lockFile lock file + * @return releasable db lock + * @throws IOException If lock is already held or some other I/O error occurs + */ + DbLock tryLock(File lockFile) throws IOException; +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/env/File.java b/leveldb/src/main/java/org/iq80/leveldb/env/File.java new file mode 100644 index 0000000..1e54f34 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/env/File.java @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.env; + +import java.util.List; + +public interface File +{ + /** + * Resolve the given path against this path. + * + * @param other the path to resolve against this path + * @return the resulting path + */ + File child(String other); + + /** + * Creates the directory named by this file, including any + * necessary but nonexistent parent directories. + * + * @return {@code true} if and only if the directory was created, + * along with all necessary parent directories; {@code false} + * otherwise + */ + boolean mkdirs(); + + String getName(); + + File getParentFile(); + + String getPath(); + + boolean canRead(); + + boolean exists(); + + boolean isDirectory(); + + boolean isFile(); + + /** + * @return File size or {@code 0L} if file does not exist + */ + long length(); + + boolean delete(); + + List listFiles(); + + boolean renameTo(File dest); + + /** + * Delete this file and all its contained files and directories. + * @return {@code true} if all content and this file where deleted, {@code false} + * otherwise + */ + boolean deleteRecursively(); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/env/NoOpLogger.java b/leveldb/src/main/java/org/iq80/leveldb/env/NoOpLogger.java new file mode 100644 index 0000000..4f29c0d --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/env/NoOpLogger.java @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.env; + +import org.iq80.leveldb.Logger; + +public class NoOpLogger implements Logger +{ + @Override + public void log(String message) + { + /* no op */ + } + + @Override + public void log(String message, Object... args) + { + /* no op */ + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/env/RandomInputFile.java b/leveldb/src/main/java/org/iq80/leveldb/env/RandomInputFile.java new file mode 100644 index 0000000..23fd2d6 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/env/RandomInputFile.java @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.env; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * Read only data source for table data/blocks. + * + * @author Honore Vasconcelos + */ +public interface RandomInputFile extends Closeable +{ + /** + * Source size + */ + long size(); + + /** + * Read {@code length} bytes from source from {@code source} starting at {@code offset} position. + * @param offset position for read start + * @param length length of the bytes to read + * @return read only view of the data. + * @throws IOException on any exception will accessing source media + */ + ByteBuffer read(long offset, int length) throws IOException; +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/env/SequentialFile.java b/leveldb/src/main/java/org/iq80/leveldb/env/SequentialFile.java new file mode 100644 index 0000000..8ed7940 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/env/SequentialFile.java @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.env; + +import org.iq80.leveldb.util.SliceOutput; + +import java.io.Closeable; +import java.io.IOException; + +public interface SequentialFile extends Closeable +{ + /** + * Skips over and discards n bytes of data from the + * input stream. + * + * @param n the number of bytes to be skipped. + * @throws IOException if n is negative, if the stream does not + * support seek, or if an I/O error occurs. + */ + void skip(long n) throws IOException; + + /** + * Read up to "atMost" bytes from the file. + * + * @param atMost the maximum number of bytes to read. + * @param destination data destination + * @return the total number of bytes read into the destination, or + * -1 if there is no more data because the end of + * the stream has been reached. + * @throws IOException If the first byte cannot be read for any reason + * other than end of file, or if the input stream has been closed, or if + * some other I/O error occurs. + */ + int read(int atMost, SliceOutput destination) throws IOException; +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/env/WritableFile.java b/leveldb/src/main/java/org/iq80/leveldb/env/WritableFile.java new file mode 100644 index 0000000..7d7d6c7 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/env/WritableFile.java @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.env; + +import org.iq80.leveldb.util.Slice; + +import java.io.Closeable; +import java.io.IOException; + +/** + * A file abstraction for sequential writing. The implementation + * must provide buffering since callers may append small fragments + * at a time to the file. + * + * @author Honore Vasconcelos + */ +public interface WritableFile extends Closeable +{ + /** + * Append {@code data} to current file position. + * @param data data to append + * @throws IOException on any error accessing file + */ + void append(Slice data) throws IOException; + + /** + * Force sync bytes to filesystem. + * @throws IOException on any error accessing file + */ + void force() throws IOException; +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/ByteBufferSupport.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/ByteBufferSupport.java new file mode 100644 index 0000000..7b1885a --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/ByteBufferSupport.java @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import com.google.common.base.Throwables; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; + +final class ByteBufferSupport +{ + private static final MethodHandle INVOKE_CLEANER; + + static { + MethodHandle invoker; + try { + // Java 9 added an invokeCleaner method to Unsafe to work around + // module visibility issues for code that used to rely on DirectByteBuffer's cleaner() + Class unsafeClass = Class.forName("sun.misc.Unsafe"); + Field theUnsafe = unsafeClass.getDeclaredField("theUnsafe"); + theUnsafe.setAccessible(true); + invoker = MethodHandles.lookup() + .findVirtual(unsafeClass, "invokeCleaner", MethodType.methodType(void.class, ByteBuffer.class)) + .bindTo(theUnsafe.get(null)); + } + catch (Exception e) { + // fall back to pre-java 9 compatible behavior + try { + Class directByteBufferClass = Class.forName("java.nio.DirectByteBuffer"); + Class cleanerClass = Class.forName("sun.misc.Cleaner"); + + Method cleanerMethod = directByteBufferClass.getDeclaredMethod("cleaner"); + cleanerMethod.setAccessible(true); + MethodHandle getCleaner = MethodHandles.lookup().unreflect(cleanerMethod); + + Method cleanMethod = cleanerClass.getDeclaredMethod("clean"); + cleanerMethod.setAccessible(true); + MethodHandle clean = MethodHandles.lookup().unreflect(cleanMethod); + + clean = MethodHandles.dropArguments(clean, 1, directByteBufferClass); + invoker = MethodHandles.foldArguments(clean, getCleaner); + } + catch (Exception e1) { + throw new AssertionError(e1); + } + } + INVOKE_CLEANER = invoker; + } + + private ByteBufferSupport() + { + } + + public static void unmap(MappedByteBuffer buffer) + { + try { + INVOKE_CLEANER.invoke(buffer); + } + catch (Throwable ignored) { + throw Throwables.propagate(ignored); + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/EnvImpl.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/EnvImpl.java new file mode 100644 index 0000000..de51fad --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/EnvImpl.java @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import com.google.common.io.Files; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.Logger; +import org.iq80.leveldb.env.NoOpLogger; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.util.Slice; + +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; + +import static java.nio.charset.StandardCharsets.UTF_8; + +public class EnvImpl implements Env +{ + private static final int PAGE_SIZE = 1024 * 1024; + private final MmapLimiter mmapLimiter; + + private EnvImpl(MmapLimiter mmapLimiter) + { + this.mmapLimiter = mmapLimiter; + } + + public static Env createEnv() + { + return new EnvImpl(MmapLimiter.defaultLimiter()); + } + + public static Env createEnv(MmapLimiter mmapLimiter) + { + return new EnvImpl(mmapLimiter); + } + + @Override + public long nowMicros() + { + return TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); + } + + @Override + public File toFile(String filename) + { + return JavaFile.fromFile(new java.io.File(filename)); + } + + @Override + public File createTempDir(String prefix) + { + return JavaFile.fromFile(FileUtils.createTempDir(prefix)); + } + + @Override + public SequentialFile newSequentialFile(File file) throws IOException + { + return SequentialFileImpl.open(JavaFile.toFile(file)); + } + + @Override + public RandomInputFile newRandomAccessFile(File file) throws IOException + { + if (mmapLimiter.acquire()) { + try { + return new DelegateRandomInputFile(mmapLimiter, MMRandomInputFile.open(JavaFile.toFile(file))); + } + catch (IOException e) { + mmapLimiter.release(); + throw e; + } + } + return UnbufferedRandomInputFile.open(JavaFile.toFile(file)); + } + + @Override + public WritableFile newWritableFile(File file) throws IOException + { + if (mmapLimiter.acquire()) { + try { + return new DelegateWritableFile(mmapLimiter, MMWritableFile.open(JavaFile.toFile(file), PAGE_SIZE)); + } + catch (IOException e) { + mmapLimiter.release(); + throw e; + } + } + return UnbufferedWritableFile.open(JavaFile.toFile(file), false); + } + + @Override + public WritableFile newAppendableFile(File file) throws IOException + { + return UnbufferedWritableFile.open(JavaFile.toFile(file), true); + } + + @Override + public void writeStringToFileSync(File file, String content) throws IOException + { + try (FileOutputStream stream = new FileOutputStream(JavaFile.toFile(file))) { + stream.write(content.getBytes(UTF_8)); + stream.flush(); + stream.getFD().sync(); + } + } + + @Override + public String readFileToString(File file) throws IOException + { + return Files.asCharSource(JavaFile.toFile(file), UTF_8).read(); + } + + @Override + public Logger newLogger(File loggerFile) throws IOException + { + return new NoOpLogger(); //different that native but avoid for ever growing log file + } + + /** + * Attempts to acquire an exclusive lock on this file + * + * @param file lock file + * @return releasable db lock + * @throws IOException If lock is already held or some other I/O error occurs + */ + @Override + public DbLock tryLock(File file) throws IOException + { + return FileLock.tryLock(JavaFile.toFile(file)); + } + + private static class DelegateRandomInputFile implements RandomInputFile + { + private final MmapLimiter mmapLimiter; + private final RandomInputFile open; + + DelegateRandomInputFile(MmapLimiter mmapLimiter, RandomInputFile open) + { + this.mmapLimiter = mmapLimiter; + this.open = open; + } + + @Override + public long size() + { + return open.size(); + } + + @Override + public ByteBuffer read(long offset, int length) throws IOException + { + return open.read(offset, length); + } + + @Override + public void close() throws IOException + { + try { + open.close(); + } + finally { + mmapLimiter.release(); + } + } + } + + private static class DelegateWritableFile implements WritableFile + { + private final MmapLimiter mmapLimiter; + private final WritableFile open; + + DelegateWritableFile(MmapLimiter mmapLimiter, WritableFile open) + { + this.mmapLimiter = mmapLimiter; + this.open = open; + } + + @Override + public void append(Slice data) throws IOException + { + open.append(data); + } + + @Override + public void force() throws IOException + { + open.force(); + } + + @Override + public void close() throws IOException + { + try { + open.close(); + } + finally { + mmapLimiter.release(); + } + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLock.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLock.java new file mode 100644 index 0000000..1cdb2f0 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLock.java @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.util.Closeables; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; + +import static java.lang.String.format; +import static java.util.Objects.requireNonNull; + +class FileLock implements DbLock +{ + private final File lockFile; + private final FileChannel channel; + private final java.nio.channels.FileLock lock; + + private FileLock(File lockFile, FileChannel channel, java.nio.channels.FileLock lock) + { + this.lockFile = lockFile; + this.channel = channel; + this.lock = lock; + } + + /** + * Attempts to acquire an exclusive lock on this file + * + * @param lockFile lock file + * @return releasable db lock + * @throws IOException If lock is already held or some other I/O error occurs + */ + public static FileLock tryLock(File lockFile) throws IOException + { + requireNonNull(lockFile, "lockFile is null"); + // open and lock the file + final FileChannel channel = new RandomAccessFile(lockFile, "rw").getChannel(); + try { + java.nio.channels.FileLock lock = channel.tryLock(); + if (lock == null) { + throw new IOException(format("Unable to acquire lock on '%s'", lockFile.getAbsolutePath())); + } + return new FileLock(lockFile, channel, lock); + } + catch (Exception e) { + Closeables.closeQuietly(channel); + throw new IOException(format("Unable to acquire lock on '%s'", lockFile.getAbsolutePath()), e); + } + } + + @Override + public boolean isValid() + { + return lock.isValid(); + } + + @Override + public void release() + { + try (FileChannel closeMe = channel) { + lock.release(); + } + catch (IOException e) { + throw new DBException(e); + } + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("DbLock"); + sb.append("{lockFile=").append(lockFile); + sb.append(", lock=").append(lock); + sb.append('}'); + return sb.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLogger.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLogger.java new file mode 100644 index 0000000..bfaa4d3 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileLogger.java @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.Logger; +import org.iq80.leveldb.util.LogMessageFormatter; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.time.LocalDateTime; +import java.util.function.Supplier; + +class FileLogger + implements Logger +{ + private final PrintStream ps; + private final LogMessageFormatter formatter; + + private FileLogger(PrintStream ps, LogMessageFormatter formatter) + { + this.ps = ps; + this.formatter = formatter; + } + + public static Logger createLogger(OutputStream outputStream, Supplier clock) + { + return new FileLogger(new PrintStream(outputStream), new LogMessageFormatter(clock)); + } + + public static Logger createFileLogger(File loggerFile) throws IOException + { + return createLogger(new FileOutputStream(loggerFile), LocalDateTime::now); + } + + @Override + public void log(String template, Object... args) + { + log2(formatter.format(template, args)); + } + + @Override + public void log(String message) + { + log2(formatter.format(message)); + } + + private void log2(String message) + { + ps.println(message); + } + + @Override + public void close() throws IOException + { + ps.close(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileUtils.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileUtils.java new file mode 100644 index 0000000..bcdb596 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/FileUtils.java @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.util.List; + +import static com.google.common.base.Preconditions.checkArgument; + +public final class FileUtils +{ + private static final int TEMP_DIR_ATTEMPTS = 10000; + + private FileUtils() + { + } + + public static boolean isSymbolicLink(File file) + { + try { + File canonicalFile = file.getCanonicalFile(); + File absoluteFile = file.getAbsoluteFile(); + File parentFile = file.getParentFile(); + // a symbolic link has a different name between the canonical and absolute path + return !canonicalFile.getName().equals(absoluteFile.getName()) || + // or the canonical parent path is not the same as the file's parent path, + // provided the file has a parent path + parentFile != null && !parentFile.getCanonicalPath().equals(canonicalFile.getParent()); + } + catch (IOException e) { + // error on the side of caution + return true; + } + } + + public static List listFiles(File dir) + { + File[] files = dir.listFiles(); + if (files == null) { + return List.of(); + } + return List.of(files); + } + + public static List listFiles(File dir, FilenameFilter filter) + { + File[] files = dir.listFiles(filter); + if (files == null) { + return List.of(); + } + return List.of(files); + } + + public static File createTempDir(String prefix) + { + return createTempDir(new File(System.getProperty("java.io.tmpdir")), prefix); + } + + public static File createTempDir(File parentDir, String prefix) + { + String baseName = ""; + if (prefix != null) { + baseName += prefix + "-"; + } + + baseName += System.currentTimeMillis() + "-"; + for (int counter = 0; counter < TEMP_DIR_ATTEMPTS; counter++) { + File tempDir = new File(parentDir, baseName + counter); + if (tempDir.mkdir()) { + return tempDir; + } + } + throw new IllegalStateException("Failed to create directory within " + + TEMP_DIR_ATTEMPTS + " attempts (tried " + + baseName + "0 to " + baseName + (TEMP_DIR_ATTEMPTS - 1) + ')'); + } + + public static boolean deleteDirectoryContents(File directory) + { + checkArgument(directory.isDirectory(), "Not a directory: %s", directory); + + // Don't delete symbolic link directories + if (isSymbolicLink(directory)) { + return false; + } + + boolean success = true; + for (File file : listFiles(directory)) { + success = deleteRecursively(file) && success; + } + return success; + } + + public static boolean deleteRecursively(File file) + { + boolean success = true; + if (file.isDirectory()) { + success = deleteDirectoryContents(file); + } + + return file.delete() && success; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/JavaFile.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/JavaFile.java new file mode 100644 index 0000000..c566a49 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/JavaFile.java @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.env.File; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class JavaFile implements File +{ + private final java.io.File file; + + private JavaFile(java.io.File file) + { + this.file = file; + } + + public static JavaFile fromFile(java.io.File path) + { + return new JavaFile(path); + } + + static java.io.File toFile(File file) + { + return ((JavaFile) file).file; + } + + @Override + public File child(String name) + { + return new JavaFile(new java.io.File(file, name)); + } + + @Override + public boolean mkdirs() + { + return file.mkdirs(); + } + + @Override + public String getName() + { + return file.getName(); + } + + @Override + public File getParentFile() + { + return new JavaFile(file.getParentFile()); + } + + @Override + public String getPath() + { + return file.getAbsolutePath(); + } + + @Override + public boolean canRead() + { + return file.canRead(); + } + + @Override + public boolean exists() + { + return file.exists(); + } + + @Override + public boolean isDirectory() + { + return file.isDirectory(); + } + + @Override + public boolean isFile() + { + return file.isFile(); + } + + @Override + public long length() + { + return file.length(); + } + + @Override + public boolean delete() + { + return file.delete(); + } + + @Override + public List listFiles() + { + java.io.File[] values = file.listFiles(); + return values == null ? Collections.emptyList() : Stream.of(values).map(JavaFile::new).collect(Collectors.toList()); + } + + @Override + public boolean renameTo(File dest) + { + return file.renameTo(((JavaFile) dest).file); + } + + @Override + public boolean deleteRecursively() + { + return FileUtils.deleteRecursively(file); + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + JavaFile javaFile = (JavaFile) o; + return Objects.equals(file, javaFile.file); + } + + @Override + public int hashCode() + { + return Objects.hash(file); + } + + @Override + public String toString() + { + return String.valueOf(file); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/MMRandomInputFile.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/MMRandomInputFile.java new file mode 100644 index 0000000..e375e8d --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/MMRandomInputFile.java @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import com.google.common.io.Files; +import org.iq80.leveldb.env.RandomInputFile; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.MappedByteBuffer; + +import static java.util.Objects.requireNonNull; + +/** + * Memory mapped filed table. + * + * @author Honore Vasconcelos + */ +class MMRandomInputFile implements RandomInputFile +{ + private final String file; + private final long size; + private final MappedByteBuffer data; + + private MMRandomInputFile(String file, MappedByteBuffer data, long size) + { + this.file = file; + this.size = size; + this.data = data; + } + + /** + * Open file using memory mapped file access. + * @param file file to open + * @return readable file + * @throws IOException If some other I/O error occurs + */ + public static RandomInputFile open(File file) throws IOException + { + requireNonNull(file, "file is null"); + MappedByteBuffer map = Files.map(file); + + return new MMRandomInputFile(file.getAbsolutePath(), map, map.capacity()); + } + + @Override + public long size() + { + return size; + } + + @Override + public ByteBuffer read(long offset, int length) + { + int newPosition = (int) (data.position() + offset); + return (ByteBuffer) data.duplicate().order(ByteOrder.LITTLE_ENDIAN).clear().limit(newPosition + length).position(newPosition); + } + + @Override + public void close() throws IOException + { + try { + ByteBufferSupport.unmap(data); + } + catch (Exception e) { + throw new IOException("Unable to unmap file", e); + } + } + + @Override + public String toString() + { + return "MMTableDataSource{" + + "file='" + file + '\'' + + ", size=" + size + + '}'; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/MMWritableFile.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/MMWritableFile.java new file mode 100644 index 0000000..0d33298 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/MMWritableFile.java @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import com.google.common.io.Files; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.env.WritableFile; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; + +/** + * Memory mapped file implementation of {@link WritableFile}. + */ +class MMWritableFile implements WritableFile +{ + private final File file; + private final int pageSize; + private MappedByteBuffer mappedByteBuffer; + private int fileOffset; + + private MMWritableFile(File file, int pageSize, MappedByteBuffer map) + { + this.file = file; + this.pageSize = pageSize; + this.fileOffset = 0; + this.mappedByteBuffer = map; + } + + public static WritableFile open(File file, int pageSize) throws IOException + { + return new MMWritableFile(file, pageSize, Files.map(file, FileChannel.MapMode.READ_WRITE, pageSize)); + } + + @Override + public void append(Slice data) throws IOException + { + ensureCapacity(data.length()); + data.getBytes(0, mappedByteBuffer); + } + + private void destroyMappedByteBuffer() + { + if (mappedByteBuffer != null) { + fileOffset += mappedByteBuffer.position(); + unmap(); + } + mappedByteBuffer = null; + } + + private void ensureCapacity(int bytes) + throws IOException + { + if (mappedByteBuffer == null) { + mappedByteBuffer = openNewMap(fileOffset, Math.max(bytes, pageSize)); + } + if (mappedByteBuffer.remaining() < bytes) { + // remap + fileOffset += mappedByteBuffer.position(); + unmap(); + int sizeToGrow = Math.max(bytes, pageSize); + mappedByteBuffer = openNewMap(fileOffset, sizeToGrow); + } + } + + private MappedByteBuffer openNewMap(int fileOffset, int sizeToGrow) throws IOException + { + try (FileChannel cha = openChannel()) { + return cha.map(FileChannel.MapMode.READ_WRITE, fileOffset, sizeToGrow); + } + } + + private FileChannel openChannel() throws FileNotFoundException + { + return new java.io.RandomAccessFile(file, "rw").getChannel(); + } + + private void unmap() + { + ByteBufferSupport.unmap(mappedByteBuffer); + } + + @Override + public void force() throws IOException + { + if (mappedByteBuffer != null) { + mappedByteBuffer.force(); + } + } + + @Override + public void close() throws IOException + { + destroyMappedByteBuffer(); + try (FileChannel cha = openChannel()) { + cha.truncate(fileOffset); + } + } + + @Override + public String toString() + { + return "MMWritableFile{" + + "file=" + file + + '}'; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/MmapLimiter.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/MmapLimiter.java new file mode 100644 index 0000000..0151249 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/MmapLimiter.java @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Helper class to limit mmap file usage so that we do not end up + * running out virtual memory or running into kernel performance + * problems for very large databases. + */ +public final class MmapLimiter +{ + public static final int CPU_DATA_MODEL; + + static { + boolean is64bit; + if (System.getProperty("os.name").contains("Windows")) { + is64bit = System.getenv("ProgramFiles(x86)") != null; + } + else { + is64bit = System.getProperty("os.arch").contains("64"); + } + CPU_DATA_MODEL = is64bit ? 64 : 32; + } + + /** + * We only use MMAP on 64 bit systems since it's really easy to run out of + * virtual address space on a 32 bit system when all the data is getting mapped + * into memory. If you really want to use MMAP anyways, use -Dleveldb.mmap=true + */ + public static final boolean USE_MMAP = Boolean.parseBoolean(System.getProperty("leveldb.mmap", String.valueOf(CPU_DATA_MODEL > 32))); + + private AtomicInteger maxAllowedMmap; + + private MmapLimiter(int maxAllowedMmap) + { + this.maxAllowedMmap = new AtomicInteger(maxAllowedMmap); + } + + /** + * Up to 1000 mmaps for 64-bit JVM; none for 32bit. + */ + public static MmapLimiter defaultLimiter() + { + return new MmapLimiter(USE_MMAP ? 1000 : 0); + } + + public static MmapLimiter newLimiter(int maxAllowedMmap) + { + return new MmapLimiter(maxAllowedMmap); + } + + /** + * If another mmap slot is available, acquire it and return true. + * Else return false. + */ + public boolean acquire() + { + return maxAllowedMmap.getAndDecrement() > 0; + } + + /** + * Release a slot acquired by a previous call to Acquire() that returned true. + */ + public void release() + { + maxAllowedMmap.incrementAndGet(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/SequentialFileImpl.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/SequentialFileImpl.java new file mode 100644 index 0000000..5381699 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/SequentialFileImpl.java @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.util.SliceOutput; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; + +import static com.google.common.base.Preconditions.checkState; + +class SequentialFileImpl implements SequentialFile +{ + private final FileInputStream inputStream; + + private SequentialFileImpl(FileInputStream inputStream) + { + this.inputStream = inputStream; + } + + public static SequentialFile open(File file) throws IOException + { + return new SequentialFileImpl(new FileInputStream(file)); + } + + @Override + public void skip(long n) throws IOException + { + checkState(n >= 0, "n must be positive"); + if (inputStream.skip(n) != n) { + throw new IOException(inputStream + " as not enough bytes to skip"); + } + } + + @Override + public int read(int atMost, SliceOutput destination) throws IOException + { + return destination.writeBytes(inputStream, atMost); + } + + @Override + public void close() throws IOException + { + inputStream.close(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFile.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFile.java new file mode 100644 index 0000000..f76962c --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFile.java @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.env.RandomInputFile; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileChannel; + +import static java.util.Objects.requireNonNull; + +/** + * @author Honore Vasconcelos + */ +class UnbufferedRandomInputFile implements RandomInputFile +{ + private static final int MAX_RETRY = Integer.getInteger(" org.iq80.leveldb.FileChannel.RETRY", 1000); + private final Object lock = new Object(); + private final File file; + private volatile FileChannel fileChannel; + private final long size; + private boolean closed = false; + + private UnbufferedRandomInputFile(File file, FileChannel fileChannel, long size) + { + this.file = file; + this.fileChannel = fileChannel; + this.size = size; + } + + public static RandomInputFile open(File file) throws IOException + { + requireNonNull(file, "file is null"); + FileChannel channel = openChannel(file); + return new UnbufferedRandomInputFile(file, channel, channel.size()); + } + + private static FileChannel openChannel(File file) throws FileNotFoundException + { + return new FileInputStream(file).getChannel(); + } + + @Override + public long size() + { + return size; + } + + @Override + public ByteBuffer read(long offset, int length) throws IOException + { + if (Thread.currentThread().isInterrupted()) { + throw new ClosedByInterruptException(); //do no close! + } + ByteBuffer uncompressedBuffer = ByteBuffer.allocate(length).order(ByteOrder.LITTLE_ENDIAN); + int maxRetry = MAX_RETRY; + do { + final FileChannel fc = this.fileChannel; + try { + fc.read(uncompressedBuffer, offset); + if (uncompressedBuffer.hasRemaining()) { + throw new IOException("Could not read all the data"); + } + uncompressedBuffer.clear(); + return uncompressedBuffer; + } + catch (ClosedByInterruptException e) { + throw e; + } + catch (ClosedChannelException e) { + uncompressedBuffer.clear(); + if (!reOpenChannel(fc)) { + throw new IOException("Channel closed by an other thread concurrently"); + } + } + } while (--maxRetry > 0); + throw new IOException("Unable to reopen file after close exception"); + } + + private boolean reOpenChannel(FileChannel currentFc) throws FileNotFoundException + { + synchronized (lock) { + if (closed) { + //externally closed + return false; + } + if (this.fileChannel == currentFc) { + this.fileChannel = openChannel(file); + } + } + return true; + } + + @Override + public void close() throws IOException + { + synchronized (lock) { + if (closed) { + return; + } + closed = true; + } + fileChannel.close(); + } + + @Override + public String toString() + { + return "FileTableDataSource{" + + "file='" + file + '\'' + + ", size=" + size + + '}'; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedWritableFile.java b/leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedWritableFile.java new file mode 100644 index 0000000..b2cf195 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/fileenv/UnbufferedWritableFile.java @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.env.WritableFile; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.FileChannel; + +/** + * @author Honore Vasconcelos + */ +class UnbufferedWritableFile implements WritableFile +{ + private final File file; + private final FileChannel channel; + + private UnbufferedWritableFile(File file, FileChannel channel) + { + this.file = file; + this.channel = channel; + } + + public static WritableFile open(File file, boolean append) throws FileNotFoundException + { + return new UnbufferedWritableFile(file, new FileOutputStream(file, append).getChannel()); + } + + @Override + public void append(Slice data) throws IOException + { + channel.write(data.toByteBuffer()); + } + + @Override + public void force() throws IOException + { + channel.force(false); + } + + @Override + public void close() throws IOException + { + channel.close(); + } + + @Override + public String toString() + { + return "UnbufferedWritableFile{" + + "file=" + file + + '}'; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/Compaction.java b/leveldb/src/main/java/org/iq80/leveldb/impl/Compaction.java new file mode 100644 index 0000000..655d38b --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/Compaction.java @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.table.UserComparator; +import org.iq80.leveldb.util.Slice; + +import java.util.List; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; + +// A Compaction encapsulates information about a compaction. +public class Compaction implements AutoCloseable +{ + private Version inputVersion; + private final int level; + + // Each compaction reads inputs from "level" and "level+1" + private final List levelInputs; + private final List levelUpInputs; + private final List grandparents; + private final List[] inputs; + + private final long maxOutputFileSize; + private final VersionEdit edit = new VersionEdit(); + + // State used to check for number of of overlapping grandparent files + // (parent == level_ + 1, grandparent == level_ + 2) + + // Index in grandparent_starts_ + private int grandparentIndex; + + // Some output key has been seen + private boolean seenKey; + + // Bytes of overlap between current output and grandparent files + private long overlappedBytes; + + // State for implementing IsBaseLevelForKey + + // levelPointers holds indices into inputVersion -> levels: our state + // is that we are positioned at one of the file ranges for each + // higher level than the ones involved in this compaction (i.e. for + // all L >= level_ + 2). + private final int[] levelPointers = new int[NUM_LEVELS]; + + public Compaction(Version inputVersion, int level, long maxOutputFileSize, List levelInputs, List levelUpInputs, List grandparents) + { + this.inputVersion = inputVersion; + this.level = level; + this.levelInputs = levelInputs; + this.levelUpInputs = levelUpInputs; + this.grandparents = List.copyOf(requireNonNull(grandparents, "grandparents is null")); + this.maxOutputFileSize = maxOutputFileSize; + this.inputs = new List[] {levelInputs, levelUpInputs}; + inputVersion.retain(); + } + + public int getLevel() + { + return level; + } + + public List getLevelInputs() + { + return levelInputs; + } + + public List getLevelUpInputs() + { + return levelUpInputs; + } + + public VersionEdit getEdit() + { + return edit; + } + + // Return the ith input file at "level()+which" ("which" must be 0 or 1). + public FileMetaData input(int which, int i) + { + checkArgument(which == 0 || which == 1, "which must be either 0 or 1"); + if (which == 0) { + return levelInputs.get(i); + } + else { + return levelUpInputs.get(i); + } + } + + // Maximum size of files to build during this compaction. + public long getMaxOutputFileSize() + { + return maxOutputFileSize; + } + + // Is this a trivial compaction that can be implemented by just + // moving a single input file to the next level (no merging or splitting) + public boolean isTrivialMove() + { + // Avoid a move if there is lots of overlapping grandparent data. + // Otherwise, the move could create a parent file that will require + // a very expensive merge later on. + return (levelInputs.size() == 1 && + levelUpInputs.isEmpty() && + totalFileSize(grandparents) <= inputVersion.getVersionSet().maxGrandParentOverlapBytes()); + + } + + public static long totalFileSize(List files) + { + long sum = 0; + for (FileMetaData file : files) { + sum += file.getFileSize(); + } + return sum; + } + + // Add all inputs to this compaction as delete operations to *edit. + public void addInputDeletions(VersionEdit edit) + { + for (FileMetaData input : levelInputs) { + edit.deleteFile(level, input.getNumber()); + } + for (FileMetaData input : levelUpInputs) { + edit.deleteFile(level + 1, input.getNumber()); + } + } + + // Returns true if the information we have available guarantees that + // the compaction is producing data in "level+1" for which no data exists + // in levels greater than "level+1". + public boolean isBaseLevelForKey(Slice userKey) + { + // Maybe use binary search to find right entry instead of linear search? + UserComparator userComparator = inputVersion.getInternalKeyComparator().getUserComparator(); + for (int level = this.level + 2; level < NUM_LEVELS; level++) { + List files = inputVersion.getFiles(level); + while (levelPointers[level] < files.size()) { + FileMetaData f = files.get(levelPointers[level]); + if (userComparator.compare(userKey, f.getLargest().getUserKey()) <= 0) { + // We've advanced far enough + if (userComparator.compare(userKey, f.getSmallest().getUserKey()) >= 0) { + // Key falls in this file's range, so definitely not base level + return false; + } + break; + } + levelPointers[level]++; + } + } + return true; + } + + // Returns true iff we should stop building the current output + // before processing "internal_key". + public boolean shouldStopBefore(InternalKey internalKey) + { + if (grandparents == null) { + return false; + } + + // Scan to find earliest grandparent file that contains key. + InternalKeyComparator internalKeyComparator = inputVersion.getInternalKeyComparator(); + while (grandparentIndex < grandparents.size() && internalKeyComparator.compare(internalKey, grandparents.get(grandparentIndex).getLargest()) > 0) { + if (seenKey) { + overlappedBytes += grandparents.get(grandparentIndex).getFileSize(); + } + grandparentIndex++; + } + seenKey = true; + + if (overlappedBytes > inputVersion.getVersionSet().maxGrandParentOverlapBytes()) { + // Too much overlap for current output; start new output + overlappedBytes = 0; + return true; + } + else { + return false; + } + } + + @Override + public void close() + { + if (inputVersion != null) { + inputVersion.release(); + inputVersion = null; + } + } + + public List input(int which) + { + return inputs[which]; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/DbConstants.java b/leveldb/src/main/java/org/iq80/leveldb/impl/DbConstants.java new file mode 100644 index 0000000..57c1ec9 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/DbConstants.java @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +public final class DbConstants +{ + public static final int MAJOR_VERSION = 0; + public static final int MINOR_VERSION = 1; + + // todo this should be part of the configuration + + /** + * Max number of levels + */ + public static final int NUM_LEVELS = 7; + + /** + * Level-0 compaction is started when we hit this many files. + */ + public static final int L0_COMPACTION_TRIGGER = 4; + + /** + * Soft limit on number of level-0 files. We slow down writes at this point. + */ + public static final int L0_SLOWDOWN_WRITES_TRIGGER = 8; + + /** + * Maximum number of level-0 files. We stop writes at this point. + */ + public static final int L0_STOP_WRITES_TRIGGER = 12; + + /** + * Maximum level to which a new compacted memtable is pushed if it + * does not create overlap. We try to push to level 2 to avoid the + * relatively expensive level 0=>1 compactions and to avoid some + * expensive manifest file operations. We do not push all the way to + * the largest level since that can generate a lot of wasted disk + * space if the same key space is being repeatedly overwritten. + */ + public static final int MAX_MEM_COMPACT_LEVEL = 2; + + /** + * Approximate gap in bytes between samples of data read during iteration. + */ + public static final int READ_BYTES_PERIOD = 1048576; + + public static final int NUM_NON_TABLE_CACHE_FILES = 10; + + private DbConstants() + { + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/DbImpl.java b/leveldb/src/main/java/org/iq80/leveldb/impl/DbImpl.java new file mode 100644 index 0000000..d1b2c75 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/DbImpl.java @@ -0,0 +1,2027 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Throwables; +import com.google.common.io.Closer; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBComparator; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.Range; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.Snapshot; +import org.iq80.leveldb.WriteBatch; +import org.iq80.leveldb.WriteOptions; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.NoOpLogger; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.impl.Filename.FileInfo; +import org.iq80.leveldb.impl.Filename.FileType; +import org.iq80.leveldb.impl.WriteBatchImpl.Handler; +import org.iq80.leveldb.iterator.DBIteratorAdapter; +import org.iq80.leveldb.iterator.DbIterator; +import org.iq80.leveldb.iterator.InternalIterator; +import org.iq80.leveldb.iterator.MergingIterator; +import org.iq80.leveldb.iterator.SnapshotSeekingIterator; +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.table.CustomUserComparator; +import org.iq80.leveldb.table.FilterPolicy; +import org.iq80.leveldb.table.TableBuilder; +import org.iq80.leveldb.table.UserComparator; +import org.iq80.leveldb.util.Closeables; +import org.iq80.leveldb.util.SafeListBuilder; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.util.Snappy; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.impl.DbConstants.L0_SLOWDOWN_WRITES_TRIGGER; +import static org.iq80.leveldb.impl.DbConstants.L0_STOP_WRITES_TRIGGER; +import static org.iq80.leveldb.impl.DbConstants.NUM_NON_TABLE_CACHE_FILES; +import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; +import static org.iq80.leveldb.impl.ValueType.DELETION; +import static org.iq80.leveldb.impl.ValueType.VALUE; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; +import static org.iq80.leveldb.util.Slices.readLengthPrefixedBytes; +import static org.iq80.leveldb.util.Slices.writeLengthPrefixedBytes; + +@SuppressWarnings("AccessingNonPublicFieldOfAnotherObject") +public class DbImpl + implements DB +{ + private final Options options; + private final boolean ownsLogger; + private final File databaseDir; + private final TableCache tableCache; + private final DbLock dbLock; + private final VersionSet versions; + + private final AtomicBoolean shuttingDown = new AtomicBoolean(); + private final ReentrantLock mutex = new ReentrantLock(); + private final Condition backgroundCondition = mutex.newCondition(); + + private final List pendingOutputs = new ArrayList<>(); // todo + private final Deque writers = new LinkedList<>(); + private final SnapshotList snapshots = new SnapshotList(mutex); + private final WriteBatchImpl tmpBatch = new WriteBatchImpl(); + private final Env env; + + private LogWriter log; + + private MemTable memTable; + private volatile MemTable immutableMemTable; + + private final InternalKeyComparator internalKeyComparator; + + private volatile Throwable backgroundException; + private final ExecutorService compactionExecutor; + private Future backgroundCompaction; + + private ManualCompaction manualCompaction; + + private CompactionStats[] stats = new CompactionStats[DbConstants.NUM_LEVELS]; + + public DbImpl(Options rawOptions, String dbname, Env env) + throws IOException + { + this.env = env; + requireNonNull(rawOptions, "options is null"); + requireNonNull(dbname, "databaseDir is null"); + final File databaseDir = env.toFile(dbname); + this.options = sanitizeOptions(databaseDir, rawOptions); + this.ownsLogger = this.options.logger() != rawOptions.logger(); + + if (this.options.compressionType() == CompressionType.SNAPPY && !Snappy.available()) { + // Disable snappy if it's not available. + this.options.compressionType(CompressionType.NONE); + } + + this.databaseDir = databaseDir; + + if (this.options.filterPolicy() != null) { + checkArgument(this.options.filterPolicy() instanceof FilterPolicy, "Filter policy must implement Java interface FilterPolicy"); + this.options.filterPolicy(InternalFilterPolicy.convert(this.options.filterPolicy())); + } + + //use custom comparator if set + DBComparator comparator = options.comparator(); + UserComparator userComparator; + if (comparator != null) { + userComparator = new CustomUserComparator(comparator); + } + else { + userComparator = new BytewiseComparator(); + } + internalKeyComparator = new InternalKeyComparator(userComparator); + immutableMemTable = null; + + ThreadFactory compactionThreadFactory = new ThreadFactoryBuilder() + .setNameFormat("leveldb-" + databaseDir.getName() + "-%s") + .setUncaughtExceptionHandler((t, e) -> { + mutex.lock(); + try { + if (backgroundException == null) { + backgroundException = e; + } + options.logger().log("Unexpected exception occurred %s", e); + } + finally { + mutex.unlock(); + } + }) + .build(); + compactionExecutor = Executors.newSingleThreadExecutor(compactionThreadFactory); + + // Reserve ten files or so for other uses and give the rest to TableCache. + int tableCacheSize = options.maxOpenFiles() - DbConstants.NUM_NON_TABLE_CACHE_FILES; + tableCache = new TableCache(databaseDir, tableCacheSize, new InternalUserComparator(internalKeyComparator), options, env); + + // create the version set + + // create the database dir if it does not already exist + databaseDir.mkdirs(); + checkArgument(databaseDir.exists(), "Database directory '%s' does not exist and could not be created", databaseDir); + checkArgument(databaseDir.isDirectory(), "Database directory '%s' is not a directory", databaseDir); + + for (int i = 0; i < DbConstants.NUM_LEVELS; i++) { + stats[i] = new CompactionStats(); + } + + mutex.lock(); + Closer c = Closer.create(); + boolean success = false; + try { + // lock the database dir + this.dbLock = env.tryLock(databaseDir.child(Filename.lockFileName())); + c.register(dbLock::release); + // + // verify the "current" file + File currentFile = databaseDir.child(Filename.currentFileName()); + if (!currentFile.canRead()) { + checkArgument(options.createIfMissing(), "Database '%s' does not exist and the create if missing option is disabled", databaseDir); + /** @see VersionSet#initializeIfNeeded() newDB() **/ + } + else { + checkArgument(!options.errorIfExists(), "Database '%s' exists and the error if exists option is enabled", databaseDir); + } + + this.versions = new VersionSet(options, databaseDir, tableCache, internalKeyComparator, env); + c.register(versions::release); + // load (and recover) current version + boolean saveManifest = versions.recover(); + + // Recover from all newer log files than the ones named in the + // descriptor (new log files may have been added by the previous + // incarnation without registering them in the descriptor). + // + // Note that PrevLogNumber() is no longer used, but we pay + // attention to it in case we are recovering a database + // produced by an older version of leveldb. + long minLogNumber = versions.getLogNumber(); + long previousLogNumber = versions.getPrevLogNumber(); + final Set expected = versions.getLiveFiles().stream().map(FileMetaData::getNumber).collect(Collectors.toSet()); + List filenames = databaseDir.listFiles(); + + List logs = new ArrayList<>(); + for (File filename : filenames) { + FileInfo fileInfo = Filename.parseFileName(filename); + if (fileInfo != null) { + expected.remove(fileInfo.getFileNumber()); + if (fileInfo.getFileType() == FileType.LOG && + ((fileInfo.getFileNumber() >= minLogNumber) || (fileInfo.getFileNumber() == previousLogNumber))) { + logs.add(fileInfo.getFileNumber()); + } + } + } + + checkArgument(expected.isEmpty(), "%s missing files", expected.size()); + + // Recover in the order in which the logs were generated + VersionEdit edit = new VersionEdit(); + Collections.sort(logs); + for (Iterator iterator = logs.iterator(); iterator.hasNext(); ) { + Long fileNumber = iterator.next(); + RecoverResult result = recoverLogFile(fileNumber, !iterator.hasNext(), edit); + saveManifest |= result.saveManifest; + + // The previous incarnation may not have written any MANIFEST + // records after allocating this log number. So we manually + // update the file number allocation counter in VersionSet. + this.versions.markFileNumberUsed(fileNumber); + + if (versions.getLastSequence() < result.maxSequence) { + versions.setLastSequence(result.maxSequence); + } + } + // + + // open transaction log + if (memTable == null) { + long logFileNumber = versions.getNextFileNumber(); + this.log = Logs.createLogWriter(databaseDir.child(Filename.logFileName(logFileNumber)), logFileNumber, env); + c.register(log); + edit.setLogNumber(log.getFileNumber()); + memTable = new MemTable(internalKeyComparator); + } + + if (saveManifest) { + edit.setPreviousLogNumber(0); + edit.setLogNumber(log.getFileNumber()); + // apply recovered edits + versions.logAndApply(edit, mutex); + } + + // cleanup unused files + deleteObsoleteFiles(); + + // schedule compactions + maybeScheduleCompaction(); + success = true; + } + catch (Throwable e) { + throw c.rethrow(e); + } + finally { + if (!success) { + if (ownsLogger) { //only close logger if created internally + c.register(this.options.logger()); + } + c.close(); + } + mutex.unlock(); + } + } + + // Fix user-supplied options to be reasonable + private static > T clipToRange(T in, T min, T max) + { + if (in.compareTo(min) < 0) { + return min; + } + if (in.compareTo(max) > 0) { + return max; + } + return in; + } + + /** + * Ensure we do not use external values as is. Ensure value are in correct ranges + * and a copy of external Options instance is used. + */ + private Options sanitizeOptions(File databaseDir, Options src) throws IOException + { + final Options result = Options.fromOptions(src); + result.maxOpenFiles(clipToRange(src.maxOpenFiles(), 64 + NUM_NON_TABLE_CACHE_FILES, 50000)); + result.writeBufferSize(clipToRange(src.writeBufferSize(), 64 << 10, 1 << 30)); + result.maxFileSize(clipToRange(src.maxFileSize(), 1 << 20, 1 << 30)); + result.blockSize(clipToRange(src.blockSize(), 1 << 10, 4 << 20)); + if (result.logger() == null && databaseDir != null && (databaseDir.isDirectory() || databaseDir.mkdirs())) { + File file = databaseDir.child(Filename.infoLogFileName()); + file.renameTo(databaseDir.child(Filename.oldInfoLogFileName())); + result.logger(env.newLogger(file)); + } + if (result.logger() == null) { + result.logger(new NoOpLogger()); + } + return result; + } + + /** + * Wait for all background activity to finish and invalidate all cache. + * Only used to test that all file handles are closed correctly. + */ + @VisibleForTesting + void invalidateAllCaches() + { + mutex.lock(); + try { + while (backgroundCompaction != null && backgroundException == null) { + backgroundCondition.awaitUninterruptibly(); + } + tableCache.invalidateAll(); + } + finally { + mutex.unlock(); + } + } + + @Override + public void close() + { + if (shuttingDown.getAndSet(true)) { + return; + } + + mutex.lock(); + try { + while (backgroundCompaction != null) { + backgroundCondition.awaitUninterruptibly(); + } + } + finally { + mutex.unlock(); + } + + compactionExecutor.shutdown(); + try { + compactionExecutor.awaitTermination(1, TimeUnit.DAYS); + } + catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { + versions.release(); + } + catch (IOException ignored) { + } + try { + log.close(); + } + catch (IOException ignored) { + } + tableCache.close(); + if (ownsLogger) { + Closeables.closeQuietly(options.logger()); + } + dbLock.release(); + } + + @Override + public String getProperty(String name) + { + if (!name.startsWith("leveldb.")) { + return null; + } + String key = name.substring("leveldb.".length()); + mutex.lock(); + try { + Matcher matcher; + matcher = Pattern.compile("num-files-at-level(\\d+)") + .matcher(key); + if (matcher.matches()) { + final int level = Integer.parseInt(matcher.group(1)); + return String.valueOf(versions.numberOfFilesInLevel(level)); + } + matcher = Pattern.compile("stats") + .matcher(key); + if (matcher.matches()) { + final StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append(" Compactions\n"); + stringBuilder.append("Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"); + stringBuilder.append("--------------------------------------------------\n"); + for (int level = 0; level < DbConstants.NUM_LEVELS; level++) { + int files = versions.numberOfFilesInLevel(level); + if (stats[level].micros > 0 || files > 0) { + stringBuilder.append(String.format( + "%3d %8d %8.0f %9.0f %8.0f %9.0f%n", + level, + files, + versions.numberOfBytesInLevel(level) / 1048576.0, + stats[level].micros / 1e6, + stats[level].bytesRead / 1048576.0, + stats[level].bytesWritten / 1048576.0)); + } + } + return stringBuilder.toString(); + } + else if ("sstables".equals(key)) { + return versions.getCurrent().toString(); + } + else if ("approximate-memory-usage".equals(key)) { + long sizeTotal = tableCache.getApproximateMemoryUsage(); + if (memTable != null) { + sizeTotal += memTable.approximateMemoryUsage(); + } + if (immutableMemTable != null) { + sizeTotal += immutableMemTable.approximateMemoryUsage(); + } + return Long.toUnsignedString(sizeTotal); + } + } + finally { + mutex.unlock(); + } + return null; + } + + private void deleteObsoleteFiles() + { + checkState(mutex.isHeldByCurrentThread()); + if (backgroundException != null) { + return; + } + // Make a set of all of the live files + List live = new ArrayList<>(this.pendingOutputs); + for (FileMetaData fileMetaData : versions.getLiveFiles()) { + live.add(fileMetaData.getNumber()); + } + + final List filesToDelete = new ArrayList<>(); + for (File file : databaseDir.listFiles()) { + FileInfo fileInfo = Filename.parseFileName(file); + if (fileInfo == null) { + continue; + } + long number = fileInfo.getFileNumber(); + boolean keep = true; + switch (fileInfo.getFileType()) { + case LOG: + keep = ((number >= versions.getLogNumber()) || + (number == versions.getPrevLogNumber())); + break; + case DESCRIPTOR: + // Keep my manifest file, and any newer incarnations' + // (in case there is a race that allows other incarnations) + keep = (number >= versions.getManifestFileNumber()); + break; + case TABLE: + keep = live.contains(number); + break; + case TEMP: + // Any temp files that are currently being written to must + // be recorded in pending_outputs_, which is inserted into "live" + keep = live.contains(number); + break; + case CURRENT: + case DB_LOCK: + case INFO_LOG: + keep = true; + break; + } + + if (!keep) { + if (fileInfo.getFileType() == FileType.TABLE) { + tableCache.evict(number); + } + options.logger().log("Delete type=%s #%s", + fileInfo.getFileType(), + number); + filesToDelete.add(file); + } + } + // While deleting all files unblock other threads. All files being deleted + // have unique names which will not collide with newly created files and + // are therefore safe to delete while allowing other threads to proceed. + mutex.unlock(); + try { + filesToDelete.forEach(File::delete); + } + finally { + mutex.lock(); + } + } + + private void maybeScheduleCompaction() + { + checkState(mutex.isHeldByCurrentThread()); + + if (backgroundCompaction != null) { + // Already scheduled + } + else if (shuttingDown.get()) { + // DB is being shutdown; no more background compactions + } + else if (backgroundException != null) { + // Already got an error; no more changes + } + else if (immutableMemTable == null && + manualCompaction == null && + !versions.needsCompaction()) { + // No work to be done + } + else { + backgroundCompaction = compactionExecutor.submit(this::backgroundCall); + } + } + + private void checkBackgroundException() + { + Throwable e = backgroundException; + if (e != null) { + throw new BackgroundProcessingException(e); + } + } + + private void backgroundCall() + { + mutex.lock(); + try { + checkState(backgroundCompaction != null, "Compaction was not correctly scheduled"); + + try { + if (!shuttingDown.get() && backgroundException == null) { + backgroundCompaction(); + } + } + finally { + backgroundCompaction = null; + } + // Previous compaction may have produced too many files in a level, + // so reschedule another compaction if needed. + maybeScheduleCompaction(); + } + catch (DatabaseShutdownException ignored) { + } + catch (Throwable throwable) { + recordBackgroundError(throwable); + } + finally { + try { + backgroundCondition.signalAll(); + } + finally { + mutex.unlock(); + } + } + } + + private void backgroundCompaction() + throws IOException + { + checkState(mutex.isHeldByCurrentThread()); + + if (immutableMemTable != null) { + compactMemTable(); + return; + } + + Compaction compaction; + InternalKey manualEnd = null; + boolean isManual = manualCompaction != null; + if (isManual) { + ManualCompaction m = this.manualCompaction; + compaction = versions.compactRange(m.level, m.begin, m.end); + m.done = compaction == null; + if (compaction != null) { + manualEnd = compaction.input(0, compaction.getLevelInputs().size() - 1).getLargest(); + } + options.logger().log( + "Manual compaction at level-%s from %s .. %s; will stop at %s", + m.level, + (m.begin != null ? m.begin.toString() : "(begin)"), + (m.end != null ? m.end.toString() : "(end)"), + (m.done ? "(end)" : manualEnd) + ); + } + else { + compaction = versions.pickCompaction(); + } + + if (compaction == null) { + // no compaction + } + else if (!isManual && compaction.isTrivialMove()) { + // Move file to next level + checkState(compaction.getLevelInputs().size() == 1); + FileMetaData fileMetaData = compaction.getLevelInputs().get(0); + compaction.getEdit().deleteFile(compaction.getLevel(), fileMetaData.getNumber()); + compaction.getEdit().addFile(compaction.getLevel() + 1, fileMetaData); + versions.logAndApply(compaction.getEdit(), mutex); + options.logger().log("Moved #%s to level-%s %s bytes: %s", + fileMetaData.getNumber(), + compaction.getLevel() + 1, + fileMetaData.getFileSize(), + versions.levelSummary()); + } + else { + CompactionState compactionState = new CompactionState(compaction); + try { + doCompactionWork(compactionState); + } + catch (Exception e) { + options.logger().log( + "Compaction error: %s", e.getMessage()); + recordBackgroundError(e); + } + finally { + cleanupCompaction(compactionState); + compaction.close(); //release resources + deleteObsoleteFiles(); + } + } + if (compaction != null) { + compaction.close(); + } + + // manual compaction complete + if (isManual) { + ManualCompaction m = manualCompaction; + if (backgroundException != null) { + m.done = true; + } + if (!m.done) { + m.begin = manualEnd; + } + manualCompaction = null; + } + } + + private void recordBackgroundError(Throwable e) + { + checkState(mutex.isHeldByCurrentThread()); + Throwable backgroundException = this.backgroundException; + if (backgroundException == null) { + this.backgroundException = e; + backgroundCondition.signalAll(); + } + Throwables.throwIfInstanceOf(e, Error.class); + } + + private void cleanupCompaction(CompactionState compactionState) throws IOException + { + checkState(mutex.isHeldByCurrentThread()); + + if (compactionState.builder != null) { + compactionState.builder.abandon(); + compactionState.builder = null; + } + if (compactionState.outfile != null) { + //an error as occurred but we need to release the resources! + compactionState.outfile.force(); + compactionState.outfile.close(); + compactionState.outfile = null; + } + + for (FileMetaData output : compactionState.outputs) { + pendingOutputs.remove(output.getNumber()); + } + } + + private static class RecoverResult + { + long maxSequence; + boolean saveManifest; + + public RecoverResult(long maxSequence, boolean saveManifest) + { + this.maxSequence = maxSequence; + this.saveManifest = saveManifest; + } + } + + private RecoverResult recoverLogFile(long fileNumber, boolean lastLog, VersionEdit edit) + throws IOException + { + checkState(mutex.isHeldByCurrentThread()); + File file = databaseDir.child(Filename.logFileName(fileNumber)); + try (SequentialFile in = env.newSequentialFile(file)) { + LogMonitor logMonitor = LogMonitors.logMonitor(options.logger()); + + // We intentionally make LogReader do checksumming even if + // paranoidChecks==false so that corruptions cause entire commits + // to be skipped instead of propagating bad information (like overly + // large sequence numbers). + LogReader logReader = new LogReader(in, logMonitor, true, 0); + + options.logger().log("Recovering log #%s", + fileNumber); + + // Read all the records and add to a memtable + long maxSequence = 0; + int compactions = 0; + boolean saveManifest = false; + MemTable mem = null; + for (Slice record = logReader.readRecord(); record != null; record = logReader.readRecord()) { + SliceInput sliceInput = record.input(); + // read header + if (sliceInput.available() < 12) { + logMonitor.corruption(sliceInput.available(), "log record too small"); + continue; + } + long sequenceBegin = sliceInput.readLong(); + int updateSize = sliceInput.readInt(); + + // read entries + try (WriteBatchImpl writeBatch = readWriteBatch(sliceInput, updateSize)) { + // apply entries to memTable + if (mem == null) { + mem = new MemTable(internalKeyComparator); + } + writeBatch.forEach(new InsertIntoHandler(mem, sequenceBegin)); + } + catch (Exception e) { + if (!options.paranoidChecks()) { + options.logger().log("Ignoring error %s", e); + } + Throwables.propagateIfPossible(e, IOException.class); + throw new IOException(e); + } + + // update the maxSequence + long lastSequence = sequenceBegin + updateSize - 1; + if (lastSequence > maxSequence) { + maxSequence = lastSequence; + } + + // flush mem table if necessary + if (mem.approximateMemoryUsage() > options.writeBufferSize()) { + compactions++; + saveManifest = true; + writeLevel0Table(mem, edit, null); + mem = null; + } + } + + // See if we should keep reusing the last log file. + if (options.reuseLogs() && lastLog && compactions == 0) { + Preconditions.checkState(this.log == null); + Preconditions.checkState(this.memTable == null); + long originalSize = file.length(); + final WritableFile writableFile = env.newAppendableFile(file); + options.logger().log("Reusing old log %s", file); + this.log = Logs.createLogWriter(fileNumber, writableFile, originalSize); + if (mem != null) { + this.memTable = mem; + mem = null; + } + else { + // mem can be NULL if lognum exists but was empty. + this.memTable = new MemTable(internalKeyComparator); + } + } + + // flush mem table + if (mem != null && !mem.isEmpty()) { + saveManifest = true; + writeLevel0Table(mem, edit, null); + } + + return new RecoverResult(maxSequence, saveManifest); + } + } + + @Override + public byte[] get(byte[] key) + throws DBException + { + return get(key, new ReadOptions()); + } + + @Override + public byte[] get(byte[] key, ReadOptions options) + throws DBException + { + LookupKey lookupKey; + LookupResult lookupResult; + mutex.lock(); + try { + long lastSequence = options.snapshot() != null ? + snapshots.getSequenceFrom(options.snapshot()) : versions.getLastSequence(); + lookupKey = new LookupKey(Slices.wrappedBuffer(key), lastSequence); + + // First look in the memtable, then in the immutable memtable (if any). + final MemTable memTable = this.memTable; + final MemTable immutableMemTable = this.immutableMemTable; + final Version current = versions.getCurrent(); + current.retain(); + ReadStats readStats = null; + mutex.unlock(); + try { + lookupResult = memTable.get(lookupKey); + if (lookupResult == null && immutableMemTable != null) { + lookupResult = immutableMemTable.get(lookupKey); + } + + if (lookupResult == null) { + // Not in memTables; try live files in level order + readStats = new ReadStats(); + lookupResult = current.get(options, lookupKey, readStats); + } + + // schedule compaction if necessary + } + finally { + mutex.lock(); + if (readStats != null && current.updateStats(readStats)) { + maybeScheduleCompaction(); + } + current.release(); + } + } + finally { + mutex.unlock(); + } + + if (lookupResult != null) { + Slice value = lookupResult.getValue(); + if (value != null) { + return value.getBytes(); + } + } + return null; + } + + @Override + public void put(byte[] key, byte[] value) + throws DBException + { + put(key, value, new WriteOptions()); + } + + @Override + public Snapshot put(byte[] key, byte[] value, WriteOptions options) + throws DBException + { + try (WriteBatchImpl writeBatch = new WriteBatchImpl()) { + return writeInternal(writeBatch.put(key, value), options); + } + } + + @Override + public void delete(byte[] key) + throws DBException + { + delete(key, new WriteOptions()); + } + + @Override + public Snapshot delete(byte[] key, WriteOptions options) + throws DBException + { + try (WriteBatchImpl writeBatch = new WriteBatchImpl()) { + return writeInternal(writeBatch.delete(key), options); + } + } + + @Override + public void write(WriteBatch updates) + throws DBException + { + writeInternal((WriteBatchImpl) updates, new WriteOptions()); + } + + @Override + public Snapshot write(WriteBatch updates, WriteOptions options) + throws DBException + { + return writeInternal((WriteBatchImpl) updates, options); + } + + public Snapshot writeInternal(WriteBatchImpl myBatch, WriteOptions options) + throws DBException + { + checkBackgroundException(); + final WriteBatchInternal w = new WriteBatchInternal(myBatch, options.sync(), mutex.newCondition()); + mutex.lock(); + try { + writers.offerLast(w); + while (!w.done && writers.peekFirst() != w) { + w.backgroundCondition.awaitUninterruptibly(); + } + if (w.done) { + w.checkExceptions(); + return options.snapshot() ? snapshots.newSnapshot(versions.getLastSequence()) : null; + } + ValueHolder lastWriterVh = new ValueHolder<>(w); + Throwable error = null; + try { + multipleWriteGroup(myBatch, options, lastWriterVh); + } + catch (Exception e) { + //all writers must be notified of this exception + error = e; + } + + WriteBatchInternal lastWrite = lastWriterVh.getValue(); + while (true) { + WriteBatchInternal ready = writers.peekFirst(); + writers.pollFirst(); + if (ready != w) { + ready.error = error; + ready.done = true; + ready.signal(); + } + if (ready == lastWrite) { + break; + } + } + + // Notify new head of write queue + if (!writers.isEmpty()) { + writers.peekFirst().signal(); + } + checkBackgroundException(); + if (error != null) { + Throwables.propagateIfPossible(error, DBException.class); + throw new DBException(error); + } + return options.snapshot() ? snapshots.newSnapshot(versions.getLastSequence()) : null; + } + finally { + mutex.unlock(); + } + } + + private void multipleWriteGroup(WriteBatchImpl myBatch, WriteOptions options, ValueHolder lastWriter) + { + long sequenceEnd; + WriteBatchImpl updates = null; + // May temporarily unlock and wait. + makeRoomForWrite(myBatch == null); + if (myBatch != null) { + updates = buildBatchGroup(lastWriter); + + // Get sequence numbers for this change set + long sequenceBegin = versions.getLastSequence() + 1; + sequenceEnd = sequenceBegin + updates.size() - 1; + + // Add to log and apply to memtable. We can release the lock + // during this phase since "w" is currently responsible for logging + // and protects against concurrent loggers and concurrent writes + // into mem_. + // log and memtable are modified by makeRoomForWrite + mutex.unlock(); + try { + // Log write + Slice record = writeWriteBatch(updates, sequenceBegin); + log.addRecord(record, options.sync()); + // Update memtable + //this.memTable is modified by makeRoomForWrite + updates.forEach(new InsertIntoHandler(this.memTable, sequenceBegin)); + } + catch (Exception e) { + // The state of the log file is indeterminate: the log record we + // just added may or may not show up when the DB is re-opened. + // So we force the DB into a mode where all future writes fail. + mutex.lock(); + try { + //we need to be inside lock to record exception + recordBackgroundError(e); + } + finally { + mutex.unlock(); + } + } + finally { + mutex.lock(); + } + if (updates == tmpBatch) { + tmpBatch.clear(); + } + // Reserve this sequence in the version set + versions.setLastSequence(sequenceEnd); + } + } + + /** + * REQUIRES: Writer list must be non-empty + * REQUIRES: First writer must have a non-NULL batch + */ + private WriteBatchImpl buildBatchGroup(ValueHolder lastWriter) + { + checkArgument(!writers.isEmpty(), "A least one writer is required"); + final WriteBatchInternal first = writers.peekFirst(); + WriteBatchImpl result = first.batch; + checkArgument(result != null, "Batch must be non null"); + + int sizeInit; + sizeInit = first.batch.getApproximateSize(); + /* + * Allow the group to grow up to a maximum size, but if the + * original write is small, limit the growth so we do not slow + * down the small write too much. + */ + int maxSize = 1 << 20; + if (sizeInit <= (128 << 10)) { + maxSize = sizeInit + (128 << 10); + } + + int size = 0; + lastWriter.setValue(first); + for (WriteBatchInternal w : writers) { + if (w.sync && !lastWriter.getValue().sync) { + // Do not include a sync write into a batch handled by a non-sync write. + break; + } + + if (w.batch != null) { + size += w.batch.getApproximateSize(); + if (size > maxSize) { + // Do not make batch too big + break; + } + + // Append to result + if (result == first.batch) { + // Switch to temporary batch instead of disturbing caller's batch + result = tmpBatch; + checkState(result.size() == 0, "Temp batch should be clean"); + result.append(first.batch); + } + else if (first.batch != w.batch) { + result.append(w.batch); + } + } + lastWriter.setValue(w); + } + return result; + } + + @Override + public WriteBatch createWriteBatch() + { + checkBackgroundException(); + return new WriteBatchImpl(); + } + + @Override + public DBIteratorAdapter iterator() + { + return iterator(new ReadOptions()); + } + + @Override + public DBIteratorAdapter iterator(ReadOptions options) + { + mutex.lock(); + try { + InternalIterator rawIterator = internalIterator(options); + + // filter out any entries not visible in our snapshot + long snapshot = getSnapshot(options); + SnapshotSeekingIterator snapshotIterator = new SnapshotSeekingIterator(rawIterator, snapshot, internalKeyComparator.getUserComparator(), new RecordBytesListener()); + return new DBIteratorAdapter(snapshotIterator); + } + finally { + mutex.unlock(); + } + } + + InternalIterator internalIterator(ReadOptions options) + { + mutex.lock(); + try (SafeListBuilder builder = SafeListBuilder.builder()) { + // merge together the memTable, immutableMemTable, and tables in version set + builder.add(memTable.iterator()); + if (immutableMemTable != null) { + builder.add(immutableMemTable.iterator()); + } + Version current = versions.getCurrent(); + builder.addAll(current.getLevelIterators(options)); + current.retain(); + return new DbIterator(new MergingIterator(builder.build(), internalKeyComparator), () -> { + mutex.lock(); + try { + current.release(); + } + finally { + mutex.unlock(); + } + }); + } + catch (IOException e) { + throw new DBException(e); + } + finally { + mutex.unlock(); + } + } + + /** + * Record a sample of bytes read at the specified internal key. + * Samples are taken approximately once every config::READ_BYTES_PERIOD + * bytes. + */ + void recordReadSample(InternalKey key) + { + mutex.lock(); + try { + if (versions.getCurrent().recordReadSample(key)) { + maybeScheduleCompaction(); + } + } + finally { + mutex.unlock(); + } + } + + @Override + public Snapshot getSnapshot() + { + checkBackgroundException(); + mutex.lock(); + try { + return snapshots.newSnapshot(versions.getLastSequence()); + } + finally { + mutex.unlock(); + } + } + + private long getSnapshot(ReadOptions options) + { + long snapshot; + if (options.snapshot() != null) { + snapshot = snapshots.getSequenceFrom(options.snapshot()); + } + else { + snapshot = versions.getLastSequence(); + } + return snapshot; + } + + private void makeRoomForWrite(boolean force) + { + checkState(mutex.isHeldByCurrentThread()); + checkState(!writers.isEmpty()); + + boolean allowDelay = !force; + + while (true) { + checkBackgroundException(); + if (allowDelay && versions.numberOfFilesInLevel(0) > L0_SLOWDOWN_WRITES_TRIGGER) { + // We are getting close to hitting a hard limit on the number of + // L0 files. Rather than delaying a single write by several + // seconds when we hit the hard limit, start delaying each + // individual write by 1ms to reduce latency variance. Also, + // this delay hands over some CPU to the compaction thread in + // case it is sharing the same core as the writer. + try { + mutex.unlock(); + Thread.sleep(1); + } + catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new DBException(e); + } + finally { + mutex.lock(); + } + + // Do not delay a single write more than once + allowDelay = false; + } + else if (!force && memTable.approximateMemoryUsage() <= options.writeBufferSize()) { + // There is room in current memtable + break; + } + else if (immutableMemTable != null) { + // We have filled up the current memtable, but the previous + // one is still being compacted, so we wait. + options.logger().log("Current memtable full; waiting..."); + backgroundCondition.awaitUninterruptibly(); + } + else if (versions.numberOfFilesInLevel(0) >= L0_STOP_WRITES_TRIGGER) { + // There are too many level-0 files. + options.logger().log("Too many L0 files; waiting..."); + backgroundCondition.awaitUninterruptibly(); + } + else { + // Attempt to switch to a new memtable and trigger compaction of old + checkState(versions.getPrevLogNumber() == 0); + + // close the existing log + try { + log.close(); + } + catch (IOException e) { + throw new DBException("Unable to close log file " + log, e); + } + + // open a new log + long logNumber = versions.getNextFileNumber(); + try { + this.log = Logs.createLogWriter(databaseDir.child(Filename.logFileName(logNumber)), logNumber, env); + } + catch (IOException e) { + throw new DBException("Unable to open new log file " + + databaseDir.child(Filename.logFileName(logNumber)).getPath(), e); + } + + // create a new mem table + immutableMemTable = memTable; + memTable = new MemTable(internalKeyComparator); + + // Do not force another compaction there is space available + force = false; + + maybeScheduleCompaction(); + } + } + } + + private void compactMemTable() + throws IOException + { + checkState(mutex.isHeldByCurrentThread()); + checkState(immutableMemTable != null); + + try { + // Save the contents of the memtable as a new Table + VersionEdit edit = new VersionEdit(); + Version base = versions.getCurrent(); + base.retain(); + writeLevel0Table(immutableMemTable, edit, base); + base.release(); + + if (shuttingDown.get()) { + throw new DatabaseShutdownException("Database shutdown during memtable compaction"); + } + + // Replace immutable memtable with the generated Table + edit.setPreviousLogNumber(0); + edit.setLogNumber(log.getFileNumber()); // Earlier logs no longer needed + versions.logAndApply(edit, mutex); + + immutableMemTable = null; + deleteObsoleteFiles(); + } + finally { + backgroundCondition.signalAll(); + } + } + + private void writeLevel0Table(MemTable mem, VersionEdit edit, Version base) + throws IOException + { + final long startMicros = env.nowMicros(); + checkState(mutex.isHeldByCurrentThread()); + + // skip empty mem table + if (mem.isEmpty()) { + return; + } + + // write the memtable to a new sstable + long fileNumber = versions.getNextFileNumber(); + pendingOutputs.add(fileNumber); + options.logger().log("Level-0 table #%s: started", + fileNumber); + + mutex.unlock(); + FileMetaData meta; + try { + meta = buildTable(mem, fileNumber); + } + finally { + mutex.lock(); + } + options.logger().log("Level-0 table #%s: %s bytes", + meta.getNumber(), + meta.getFileSize()); + pendingOutputs.remove(fileNumber); + + // Note that if file size is zero, the file has been deleted and + // should not be added to the manifest. + int level = 0; + if (meta.getFileSize() > 0) { + Slice minUserKey = meta.getSmallest().getUserKey(); + Slice maxUserKey = meta.getLargest().getUserKey(); + if (base != null) { + level = base.pickLevelForMemTableOutput(minUserKey, maxUserKey); + } + edit.addFile(level, meta); + } + this.stats[level].add(env.nowMicros() - startMicros, 0, meta.getFileSize()); + } + + private FileMetaData buildTable(MemTable data, long fileNumber) + throws IOException + { + File file = databaseDir.child(Filename.tableFileName(fileNumber)); + try { + InternalKey smallest = null; + InternalKey largest = null; + try (WritableFile writableFile = env.newWritableFile(file)) { + TableBuilder tableBuilder = new TableBuilder(options, writableFile, new InternalUserComparator(internalKeyComparator)); + + try (InternalIterator it = data.iterator()) { + for (boolean valid = it.seekToFirst(); valid; valid = it.next()) { + // update keys + InternalKey key = it.key(); + if (smallest == null) { + smallest = key; + } + largest = key; + + tableBuilder.add(key.encode(), it.value()); + } + } + + tableBuilder.finish(); + writableFile.force(); + } + + if (smallest == null) { + //empty iterator + file.delete(); + return new FileMetaData(fileNumber, 0, null, null); + } + FileMetaData fileMetaData = new FileMetaData(fileNumber, file.length(), smallest, largest); + + // verify table can be opened + tableCache.newIterator(fileMetaData, new ReadOptions()).close(); + + return fileMetaData; + } + catch (IOException e) { + file.delete(); + throw e; + } + } + + private void doCompactionWork(CompactionState compactionState) + throws IOException + { + final long startMicros = env.nowMicros(); + long immMicros = 0; // Micros spent doing imm_ compactions + options.logger().log("Compacting %s@%s + %s@%s files", + compactionState.compaction.input(0).size(), + compactionState.compaction.getLevel(), + compactionState.compaction.input(1).size(), + compactionState.compaction.getLevel() + 1); + + checkState(mutex.isHeldByCurrentThread()); + checkArgument(versions.numberOfBytesInLevel(compactionState.getCompaction().getLevel()) > 0); + checkArgument(compactionState.builder == null); + checkArgument(compactionState.outfile == null); + + compactionState.smallestSnapshot = snapshots.isEmpty() ? versions.getLastSequence() : snapshots.getOldest(); + + // Release mutex while we're actually doing the compaction work + final MergingIterator mergingIterator = versions.makeInputIterator(compactionState.compaction); + mutex.unlock(); + try (MergingIterator iterator = mergingIterator) { + Slice currentUserKey = null; + boolean hasCurrentUserKey = false; + + long lastSequenceForKey = MAX_SEQUENCE_NUMBER; + for (boolean valid = iterator.seekToFirst(); valid && !shuttingDown.get(); valid = iterator.next()) { + // always give priority to compacting the current mem table + if (immutableMemTable != null) { + long immStart = env.nowMicros(); + mutex.lock(); + try { + compactMemTable(); + } + finally { + mutex.unlock(); + } + immMicros += (env.nowMicros() - immStart); + } + InternalKey key = iterator.key(); + if (compactionState.compaction.shouldStopBefore(key) && compactionState.builder != null) { + finishCompactionOutputFile(compactionState); + } + + // Handle key/value, add to state, etc. + boolean drop = false; + // todo if key doesn't parse (it is corrupted), + if (false /*!ParseInternalKey(key, &ikey)*/) { + // do not hide error keys + currentUserKey = null; + hasCurrentUserKey = false; + lastSequenceForKey = MAX_SEQUENCE_NUMBER; + } + else { + if (!hasCurrentUserKey || internalKeyComparator.getUserComparator().compare(key.getUserKey(), currentUserKey) != 0) { + // First occurrence of this user key + currentUserKey = key.getUserKey(); + hasCurrentUserKey = true; + lastSequenceForKey = MAX_SEQUENCE_NUMBER; + } + + if (lastSequenceForKey <= compactionState.smallestSnapshot) { + // Hidden by an newer entry for same user key + drop = true; // (A) + } + else if (key.getValueType() == DELETION && + key.getSequenceNumber() <= compactionState.smallestSnapshot && + compactionState.compaction.isBaseLevelForKey(key.getUserKey())) { + // For this user key: + // (1) there is no data in higher levels + // (2) data in lower levels will have larger sequence numbers + // (3) data in layers that are being compacted here and have + // smaller sequence numbers will be dropped in the next + // few iterations of this loop (by rule (A) above). + // Therefore this deletion marker is obsolete and can be dropped. + drop = true; + } + + lastSequenceForKey = key.getSequenceNumber(); + } + + if (!drop) { + // Open output file if necessary + if (compactionState.builder == null) { + openCompactionOutputFile(compactionState); + } + if (compactionState.builder.getEntryCount() == 0) { + compactionState.currentSmallest = key; + } + compactionState.currentLargest = key; + compactionState.builder.add(key.encode(), iterator.value()); + + // Close output file if it is big enough + if (compactionState.builder.getFileSize() >= + compactionState.compaction.getMaxOutputFileSize()) { + finishCompactionOutputFile(compactionState); + } + } + } + + if (shuttingDown.get()) { + throw new DatabaseShutdownException("DB shutdown during compaction"); + } + if (compactionState.builder != null) { + finishCompactionOutputFile(compactionState); + } + } + finally { + long micros = env.nowMicros() - startMicros - immMicros; + long bytesRead = 0; + for (int which = 0; which < 2; which++) { + for (int i = 0; i < compactionState.compaction.input(which).size(); i++) { + bytesRead += compactionState.compaction.input(which, i).getFileSize(); + } + } + long bytesWritten = 0; + for (int i = 0; i < compactionState.outputs.size(); i++) { + bytesWritten += compactionState.outputs.get(i).getFileSize(); + } + mutex.lock(); + this.stats[compactionState.compaction.getLevel() + 1].add(micros, bytesRead, bytesWritten); + } + installCompactionResults(compactionState); + options.logger().log( + "compacted to: %s", versions.levelSummary()); + } + + private void openCompactionOutputFile(CompactionState compactionState) + throws IOException + { + requireNonNull(compactionState, "compactionState is null"); + checkArgument(compactionState.builder == null, "compactionState builder is not null"); + + long fileNumber; + mutex.lock(); + try { + fileNumber = versions.getNextFileNumber(); + pendingOutputs.add(fileNumber); + compactionState.currentFileNumber = fileNumber; + compactionState.currentFileSize = 0; + compactionState.currentSmallest = null; + compactionState.currentLargest = null; + } + finally { + mutex.unlock(); + } + File file = databaseDir.child(Filename.tableFileName(fileNumber)); + compactionState.outfile = env.newWritableFile(file); + compactionState.builder = new TableBuilder(options, compactionState.outfile, new InternalUserComparator(internalKeyComparator)); + } + + private void finishCompactionOutputFile(CompactionState compactionState) + throws IOException + { + requireNonNull(compactionState, "compactionState is null"); + checkArgument(compactionState.outfile != null); + checkArgument(compactionState.builder != null); + + long outputNumber = compactionState.currentFileNumber; + checkArgument(outputNumber != 0); + + long currentEntries = compactionState.builder.getEntryCount(); + long currentBytes = 0; + try { + compactionState.builder.finish(); + currentBytes = compactionState.builder.getFileSize(); + } + finally { + compactionState.builder = null; + } + compactionState.currentFileSize = currentBytes; + compactionState.totalBytes += currentBytes; + + FileMetaData currentFileMetaData = new FileMetaData(compactionState.currentFileNumber, + compactionState.currentFileSize, + compactionState.currentSmallest, + compactionState.currentLargest); + compactionState.outputs.add(currentFileMetaData); + + compactionState.outfile.force(); + compactionState.outfile.close(); + compactionState.outfile = null; + + if (currentEntries > 0) { + // Verify that the table is usable + tableCache.newIterator(outputNumber, new ReadOptions()).close(); + options.logger().log( + "Generated table #%s@%s: %s keys, %s bytes", + outputNumber, + compactionState.compaction.getLevel(), + currentEntries, + currentBytes); + } + } + + private void installCompactionResults(CompactionState compact) + throws IOException + { + checkState(mutex.isHeldByCurrentThread()); + options.logger().log("Compacted %s@%s + %s@%s files => %s bytes", + compact.compaction.input(0).size(), + compact.compaction.getLevel(), + compact.compaction.input(1).size(), + compact.compaction.getLevel() + 1, + compact.totalBytes); + + // Add compaction outputs + compact.compaction.addInputDeletions(compact.compaction.getEdit()); + int level = compact.compaction.getLevel(); + for (FileMetaData output : compact.outputs) { + compact.compaction.getEdit().addFile(level + 1, output); + pendingOutputs.remove(output.getNumber()); + } + + versions.logAndApply(compact.compaction.getEdit(), mutex); + } + + @VisibleForTesting + int numberOfFilesInLevel(int level) + { + mutex.lock(); + Version v; + try { + v = versions.getCurrent(); + } + finally { + mutex.unlock(); + } + return v.numberOfFilesInLevel(level); + } + + @Override + public long[] getApproximateSizes(Range... ranges) + { + requireNonNull(ranges, "ranges is null"); + long[] sizes = new long[ranges.length]; + for (int i = 0; i < ranges.length; i++) { + Range range = ranges[i]; + sizes[i] = getApproximateSizes(range); + } + return sizes; + } + + public long getApproximateSizes(Range range) + { + mutex.lock(); + Version v; + try { + v = versions.getCurrent(); + v.retain(); + try { + InternalKey startKey = new InternalKey(Slices.wrappedBuffer(range.start()), MAX_SEQUENCE_NUMBER, VALUE); + InternalKey limitKey = new InternalKey(Slices.wrappedBuffer(range.limit()), MAX_SEQUENCE_NUMBER, VALUE); + long startOffset = v.getApproximateOffsetOf(startKey); + long limitOffset = v.getApproximateOffsetOf(limitKey); + return (limitOffset >= startOffset ? limitOffset - startOffset : 0); + } + finally { + v.release(); + } + } + finally { + mutex.unlock(); + } + } + + public long getMaxNextLevelOverlappingBytes() + { + mutex.lock(); + try { + return versions.getMaxNextLevelOverlappingBytes(); + } + finally { + mutex.unlock(); + } + } + + private static class CompactionState + { + private final Compaction compaction; + + private final List outputs = new ArrayList<>(); + + private long smallestSnapshot; + + // State kept for output being generated + private WritableFile outfile; + private TableBuilder builder; + + // Current file being generated + private long currentFileNumber; + private long currentFileSize; + private InternalKey currentSmallest; + private InternalKey currentLargest; + + private long totalBytes; + + private CompactionState(Compaction compaction) + { + this.compaction = compaction; + } + + public Compaction getCompaction() + { + return compaction; + } + } + + private static class ManualCompaction + { + private final int level; + private InternalKey begin; + private final InternalKey end; + private boolean done; + + private ManualCompaction(int level, InternalKey begin, InternalKey end) + { + this.level = level; + this.begin = begin; + this.end = end; + } + } + + // Per level compaction stats. stats[level] stores the stats for + // compactions that produced data for the specified "level". + private static class CompactionStats + { + long micros; + long bytesRead; + long bytesWritten; + + CompactionStats() + { + this.micros = 0; + this.bytesRead = 0; + this.bytesWritten = 0; + } + + public void add(long micros, long bytesRead, long bytesWritten) + { + this.micros += micros; + this.bytesRead += bytesRead; + this.bytesWritten += bytesWritten; + } + } + + private WriteBatchImpl readWriteBatch(SliceInput record, int updateSize) + throws IOException + { + WriteBatchImpl writeBatch = new WriteBatchImpl(); + int entries = 0; + while (record.isReadable()) { + entries++; + ValueType valueType = ValueType.getValueTypeByPersistentId(record.readByte()); + if (valueType == VALUE) { + Slice key = readLengthPrefixedBytes(record); + Slice value = readLengthPrefixedBytes(record); + writeBatch.put(key, value); + } + else if (valueType == DELETION) { + Slice key = readLengthPrefixedBytes(record); + writeBatch.delete(key); + } + else { + throw new IllegalStateException("Unexpected value type " + valueType); + } + } + + if (entries != updateSize) { + throw new IOException(String.format("Expected %d entries in log record but found %s entries", updateSize, entries)); + } + + return writeBatch; + } + + static Slice writeWriteBatch(WriteBatchImpl updates, long sequenceBegin) + { + Slice record = Slices.allocate(SIZE_OF_LONG + SIZE_OF_INT + updates.getApproximateSize()); + final SliceOutput sliceOutput = record.output(); + sliceOutput.writeLong(sequenceBegin); + sliceOutput.writeInt(updates.size()); + updates.forEach(new Handler() + { + @Override + public void put(Slice key, Slice value) + { + sliceOutput.writeByte(VALUE.getPersistentId()); + writeLengthPrefixedBytes(sliceOutput, key); + writeLengthPrefixedBytes(sliceOutput, value); + } + + @Override + public void delete(Slice key) + { + sliceOutput.writeByte(DELETION.getPersistentId()); + writeLengthPrefixedBytes(sliceOutput, key); + } + }); + return record.slice(0, sliceOutput.size()); + } + + public static class DatabaseShutdownException + extends DBException + { + public DatabaseShutdownException() + { + } + + public DatabaseShutdownException(String message) + { + super(message); + } + } + + public static class BackgroundProcessingException + extends DBException + { + public BackgroundProcessingException(Throwable cause) + { + super(cause); + } + } + + private final Object suspensionMutex = new Object(); + private int suspensionCounter; + + @Override + public void suspendCompactions() + throws InterruptedException + { + compactionExecutor.execute(() -> { + try { + synchronized (suspensionMutex) { + suspensionCounter++; + suspensionMutex.notifyAll(); + while (suspensionCounter > 0 && !compactionExecutor.isShutdown()) { + suspensionMutex.wait(500); + } + } + } + catch (InterruptedException e) { + } + }); + synchronized (suspensionMutex) { + while (suspensionCounter < 1) { + suspensionMutex.wait(); + } + } + } + + @Override + public void resumeCompactions() + { + synchronized (suspensionMutex) { + suspensionCounter--; + suspensionMutex.notifyAll(); + } + } + + @Override + public void compactRange(byte[] begin, byte[] end) + throws DBException + { + final Slice smallestUserKey = begin == null ? null : new Slice(begin, 0, begin.length); + final Slice largestUserKey = end == null ? null : new Slice(end, 0, end.length); + int maxLevelWithFiles = 1; + mutex.lock(); + try { + Version base = versions.getCurrent(); + for (int level = 1; level < DbConstants.NUM_LEVELS; level++) { + if (base.overlapInLevel(level, smallestUserKey, largestUserKey)) { + maxLevelWithFiles = level; + } + } + } + finally { + mutex.unlock(); + } + testCompactMemTable(); // TODO: Skip if memtable does not overlap + for (int level = 0; level < maxLevelWithFiles; level++) { + testCompactRange(level, smallestUserKey, largestUserKey); + } + } + + @VisibleForTesting + void testCompactRange(int level, Slice begin, Slice end) throws DBException + { + checkArgument(level >= 0); + checkArgument(level + 1 < DbConstants.NUM_LEVELS); + + final InternalKey beginStorage = begin == null ? null : new InternalKey(begin, SequenceNumber.MAX_SEQUENCE_NUMBER, VALUE); + final InternalKey endStorage = end == null ? null : new InternalKey(end, 0, DELETION); + ManualCompaction manual = new ManualCompaction(level, beginStorage, endStorage); + mutex.lock(); + try { + while (!manual.done && !shuttingDown.get() && backgroundException == null) { + if (manualCompaction == null) { // Idle + manualCompaction = manual; + maybeScheduleCompaction(); + } + else { // Running either my compaction or another compaction. + backgroundCondition.awaitUninterruptibly(); + } + } + if (manualCompaction == manual) { + // Cancel my manual compaction since we aborted early for some reason. + manualCompaction = null; + } + } + finally { + mutex.unlock(); + } + } + + @VisibleForTesting + void testCompactMemTable() throws DBException + { + // NULL batch means just wait for earlier writes to be done + writeInternal(null, new WriteOptions()); + // Wait until the compaction completes + mutex.lock(); + + try { + while (immutableMemTable != null && backgroundException == null) { + backgroundCondition.awaitUninterruptibly(); + } + if (immutableMemTable != null) { + if (backgroundException != null) { + throw new DBException(backgroundException); + } + } + } + finally { + mutex.unlock(); + } + } + + /** + * Wait for all background activity to finish; only usable in controlled environment. + */ + @VisibleForTesting + void waitForBackgroundCompactationToFinish() + { + mutex.lock(); + try { + while (backgroundCompaction != null && !shuttingDown.get() && backgroundException == null) { + backgroundCondition.awaitUninterruptibly(); + } + } + finally { + mutex.unlock(); + } + } + + public static boolean destroyDB(File dbname, Env env) throws IOException + { + // Ignore error in case directory does not exist + if (!dbname.exists()) { + return true; + } + List filenames = dbname.listFiles(); + + boolean res = true; + File lockFile = dbname.child(Filename.lockFileName()); + DbLock lock = env.tryLock(lockFile); + try { + for (File filename : filenames) { + FileInfo fileInfo = Filename.parseFileName(filename); + if (fileInfo != null && fileInfo.getFileType() != FileType.DB_LOCK) { // Lock file will be deleted at end + res &= filename.delete(); + } + } + } + finally { + try { + lock.release(); // Ignore error since state is already gone + } + catch (Exception ignore) { + } + } + lockFile.delete(); + dbname.delete(); // Ignore error in case dir contains other files + return res; + } + + public class RecordBytesListener + implements SnapshotSeekingIterator.IRecordBytesListener + { + private final Random r; + private int bytesReadUntilSampling; + + RecordBytesListener() + { + this.r = new Random(); + this.bytesReadUntilSampling = getRandomCompactionPeriod(r); + } + + @Override + public void record(InternalKey internalKey, int bytes) + { + bytesReadUntilSampling -= bytes; + while (bytesReadUntilSampling < 0) { + bytesReadUntilSampling += getRandomCompactionPeriod(r); + DbImpl.this.recordReadSample(internalKey); + } + } + + /** + * Picks the number of bytes that can be read until a compaction is scheduled. + * + * @param r + */ + private int getRandomCompactionPeriod(Random r) + { + return r.nextInt(2 * DbConstants.READ_BYTES_PERIOD); + } + } + + private class WriteBatchInternal + { + private final WriteBatchImpl batch; + private final boolean sync; + private final Condition backgroundCondition; + boolean done = false; + public Throwable error; + + WriteBatchInternal(WriteBatchImpl batch, boolean sync, Condition backgroundCondition) + { + this.batch = batch; + this.sync = sync; + this.backgroundCondition = backgroundCondition; + } + + void signal() + { + backgroundCondition.signal(); + } + + void checkExceptions() + { + checkBackgroundException(); + if (error instanceof Error) { + throw (Error) error; + } + if (error != null) { + throw new DBException(error); + } + } + } + + @Override + public String toString() + { + return this.getClass().getName() + "{" + databaseDir + "}"; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java b/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java new file mode 100644 index 0000000..71f2b22 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import java.util.concurrent.atomic.AtomicInteger; + +public class FileMetaData +{ + private final long number; + + /** + * File size in bytes + */ + private final long fileSize; + + /** + * Smallest internal key served by table + */ + private final InternalKey smallest; + + /** + * Largest internal key served by table + */ + private final InternalKey largest; + + /** + * Seeks allowed until compaction + */ + // todo this mutable state should be moved elsewhere + private final AtomicInteger allowedSeeks = new AtomicInteger(1 << 30); + + public FileMetaData(long number, long fileSize, InternalKey smallest, InternalKey largest) + { + this.number = number; + this.fileSize = fileSize; + this.smallest = smallest.compact(); + this.largest = largest.compact(); + } + + public long getFileSize() + { + return fileSize; + } + + public long getNumber() + { + return number; + } + + public InternalKey getSmallest() + { + return smallest; + } + + public InternalKey getLargest() + { + return largest; + } + + public int getAllowedSeeks() + { + return allowedSeeks.get(); + } + + public void setAllowedSeeks(int allowedSeeks) + { + this.allowedSeeks.set(allowedSeeks); + } + + public void decrementAllowedSeeks() + { + allowedSeeks.getAndDecrement(); + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("FileMetaData"); + sb.append("{number=").append(number); + sb.append(", fileSize=").append(fileSize); + sb.append(", smallest=").append(smallest); + sb.append(", largest=").append(largest); + sb.append(", allowedSeeks=").append(allowedSeeks); + sb.append('}'); + return sb.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/Filename.java b/leveldb/src/main/java/org/iq80/leveldb/impl/Filename.java new file mode 100644 index 0000000..3229de8 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/Filename.java @@ -0,0 +1,313 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.base.Strings; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; + +import java.io.IOException; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.lang.Long.parseUnsignedLong; +import static java.util.Objects.requireNonNull; + +public final class Filename +{ + private Filename() + { + } + + public enum FileType + { + LOG, + DB_LOCK, + TABLE, + DESCRIPTOR, + CURRENT, + TEMP, + INFO_LOG // Either the current one, or an old one + } + + /** + * Return the name of the log file with the specified number. + */ + public static String logFileName(long number) + { + return makeFileName(number, "log"); + } + + /** + * Return the name of the sstable with the specified number. + */ + public static String tableFileName(long number) + { + return makeFileName(number, "ldb"); + } + + /** + * Return the deprecated name of the sstable with the specified number. + */ + public static String sstTableFileName(long number) + { + return makeFileName(number, "sst"); + } + + /** + * Return the name of the descriptor file with the specified incarnation number. + */ + public static String descriptorFileName(long number) + { + checkArgument(number >= 0, "number is negative"); + return String.format("MANIFEST-%06d", number); + } + + /** + * Return the name of the current file. + */ + public static String currentFileName() + { + return "CURRENT"; + } + + /** + * Return the name of the lock file. + */ + public static String lockFileName() + { + return "LOCK"; + } + + /** + * Return the name of a temporary file with the specified number. + */ + public static String tempFileName(long number) + { + return makeFileName(number, "dbtmp"); + } + + /** + * Return the name of the info log file. + */ + public static String infoLogFileName() + { + return "LOG"; + } + + /** + * Return the name of the old info log file. + */ + public static String oldInfoLogFileName() + { + return "LOG.old"; + } + + /** + * If filename is a leveldb file, store the type of the file in *type. + * The number encoded in the filename is stored in *number. If the + * filename was successfully parsed, returns true. Else return false. + */ + public static FileInfo parseFileName(File file) + { + // Owned filenames have the form: + // dbname/CURRENT + // dbname/LOCK + // dbname/LOG + // dbname/LOG.old + // dbname/MANIFEST-[0-9]+ + // dbname/[0-9]+.(log|sst|dbtmp) + try { + String fileName = file.getName(); + if ("CURRENT".equals(fileName)) { + return new FileInfo(FileType.CURRENT); + } + else if ("LOCK".equals(fileName)) { + return new FileInfo(FileType.DB_LOCK); + } + else if ("LOG".equals(fileName) || "LOG.old".equals(fileName)) { + return new FileInfo(FileType.INFO_LOG); + } + else if (fileName.startsWith("MANIFEST-")) { + long fileNumber = parseLong(removePrefix(fileName, "MANIFEST-")); + return new FileInfo(FileType.DESCRIPTOR, fileNumber); + } + else if (fileName.endsWith(".log")) { + long fileNumber = parseLong(removeSuffix(fileName, ".log")); + return new FileInfo(FileType.LOG, fileNumber); + } + else if (fileName.endsWith(".sst") || fileName.endsWith(".ldb")) { + long fileNumber = parseLong(fileName.substring(0, fileName.lastIndexOf('.'))); + return new FileInfo(FileType.TABLE, fileNumber); + } + else if (fileName.endsWith(".dbtmp")) { + long fileNumber = parseLong(removeSuffix(fileName, ".dbtmp")); + return new FileInfo(FileType.TEMP, fileNumber); + } + } + catch (Exception ignore) { + //filename is incorrect/not supported + } + return null; + } + + /** + * Parse unsigned long string + */ + private static long parseLong(String str) + { + return parseUnsignedLong(str, 10); + } + + /** + * Make the CURRENT file point to the descriptor file with the + * specified number. + * @throws IOException on any IO exception + * @throws IllegalArgumentException on invalid descriptorNumber + */ + public static void setCurrentFile(File databaseDir, long descriptorNumber, Env env) + throws IOException + { + String manifest = descriptorFileName(descriptorNumber); + String temp = tempFileName(descriptorNumber); + + File tempFile = databaseDir.child(temp); + env.writeStringToFileSync(tempFile, manifest + "\n"); + + File to = databaseDir.child(currentFileName()); + boolean ok = tempFile.renameTo(to); + if (!ok) { + tempFile.delete(); + env.writeStringToFileSync(to, manifest + "\n"); + } + } + + /** + * Read "CURRENT" file, which contains a pointer to the current manifest file + * + * @param databaseDir DB base directory + * @param env system environment + * @return current manifest file + * @throws IOException on any IO exception + * @throws IllegalStateException if file does not exist or invalid file content + */ + public static String getCurrentFile(File databaseDir, Env env) + throws IOException + { + // Read "CURRENT" file, which contains a pointer to the current manifest file + File currentFile = databaseDir.child(currentFileName()); + checkState(currentFile.exists(), "CURRENT file does not exist"); + + String descriptorName = env.readFileToString(currentFile); + if (descriptorName.isEmpty() || descriptorName.charAt(descriptorName.length() - 1) != '\n') { + throw new IllegalStateException("CURRENT file does not end with newline"); + } + return descriptorName.substring(0, descriptorName.length() - 1); + } + + /** + * Make a new file name + * @param number unsigned number + * @param suffix file name suffix + * @return new file name. + */ + private static String makeFileName(long number, String suffix) + { + requireNonNull(suffix, "suffix is null"); + return String.format("%s.%s", Strings.padStart(Long.toUnsignedString(number), 6, '0'), suffix); + } + + private static String removePrefix(String value, String prefix) + { + return value.substring(prefix.length()); + } + + private static String removeSuffix(String value, String suffix) + { + return value.substring(0, value.length() - suffix.length()); + } + + public static class FileInfo + { + private final FileType fileType; + private final long fileNumber; + + public FileInfo(FileType fileType) + { + this(fileType, 0); + } + + public FileInfo(FileType fileType, long fileNumber) + { + requireNonNull(fileType, "fileType is null"); + this.fileType = fileType; + this.fileNumber = fileNumber; + } + + public FileType getFileType() + { + return fileType; + } + + public long getFileNumber() + { + return fileNumber; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FileInfo fileInfo = (FileInfo) o; + + if (fileNumber != fileInfo.fileNumber) { + return false; + } + if (fileType != fileInfo.fileType) { + return false; + } + + return true; + } + + @Override + public int hashCode() + { + int result = fileType.hashCode(); + result = 31 * result + (int) (fileNumber ^ (fileNumber >>> 32)); + return result; + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("FileInfo"); + sb.append("{fileType=").append(fileType); + sb.append(", fileNumber=").append(Long.toUnsignedString(fileNumber)); + sb.append('}'); + return sb.toString(); + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/InsertIntoHandler.java b/leveldb/src/main/java/org/iq80/leveldb/impl/InsertIntoHandler.java new file mode 100644 index 0000000..444a26c --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/InsertIntoHandler.java @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; + +import static org.iq80.leveldb.impl.ValueType.DELETION; +import static org.iq80.leveldb.impl.ValueType.VALUE; + +final class InsertIntoHandler + implements WriteBatchImpl.Handler +{ + private long sequence; + private final MemTable memTable; + + public InsertIntoHandler(MemTable memTable, long sequenceBegin) + { + this.memTable = memTable; + this.sequence = sequenceBegin; + } + + @Override + public void put(Slice key, Slice value) + { + memTable.add(sequence++, VALUE, key.copySlice(), value.copySlice()); + } + + @Override + public void delete(Slice key) + { + memTable.add(sequence++, DELETION, key.copySlice(), Slices.EMPTY_SLICE); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/InternalEntry.java b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalEntry.java new file mode 100644 index 0000000..ae88c17 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalEntry.java @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; + +import java.util.Map.Entry; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Objects.requireNonNull; + +public class InternalEntry + implements Entry +{ + private final InternalKey key; + private final Slice value; + + public InternalEntry(InternalKey key, Slice value) + { + requireNonNull(key, "key is null"); + requireNonNull(value, "value is null"); + this.key = key; + this.value = value; + } + + @Override + public InternalKey getKey() + { + return key; + } + + @Override + public Slice getValue() + { + return value; + } + + /** + * @throws UnsupportedOperationException always + */ + @Override + public final Slice setValue(Slice value) + { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InternalEntry entry = (InternalEntry) o; + + if (!key.equals(entry.key)) { + return false; + } + if (!value.equals(entry.value)) { + return false; + } + + return true; + } + + @Override + public int hashCode() + { + int result = key.hashCode(); + result = 31 * result + value.hashCode(); + return result; + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("InternalEntry"); + sb.append("{key=").append(key); // todo don't print the real value + sb.append(", value=").append(value.toString(UTF_8)); + sb.append('}'); + return sb.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/InternalFilterPolicy.java b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalFilterPolicy.java new file mode 100644 index 0000000..5b92b3c --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalFilterPolicy.java @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; +import org.iq80.leveldb.XFilterPolicy; +import org.iq80.leveldb.util.Slice; + +import java.util.List; + +import static com.google.common.base.Preconditions.checkArgument; + +/** + * Filter policy wrapper that converts from internal keys to user keys + *

+ *

+ * + * @author Honore Vasconcelos + */ +final class InternalFilterPolicy implements org.iq80.leveldb.table.FilterPolicy +{ + private static final Function EXTRACT_USER_KEY = InternalFilterPolicy::extractUserKey; + private org.iq80.leveldb.table.FilterPolicy userPolicy; + + private InternalFilterPolicy(org.iq80.leveldb.table.FilterPolicy userPolicy) + { + this.userPolicy = userPolicy; + } + + static InternalFilterPolicy convert(XFilterPolicy policy) + { + checkArgument(policy == null || policy instanceof org.iq80.leveldb.table.FilterPolicy, "Filter policy must implement Java interface FilterPolicy"); + if (policy instanceof InternalFilterPolicy) { + return (InternalFilterPolicy) policy; + } + return policy == null ? null : new InternalFilterPolicy((org.iq80.leveldb.table.FilterPolicy) policy); + } + + @Override + public String name() + { + return userPolicy.name(); + } + + @Override + public byte[] createFilter(final List keys) + { + //instead of copying all the keys to a shorter form, make it lazy + return userPolicy.createFilter(Lists.transform(keys, EXTRACT_USER_KEY)); + } + + @Override + public boolean keyMayMatch(Slice key, Slice filter) + { + return userPolicy.keyMayMatch(extractUserKey(key), filter); + } + + private static Slice extractUserKey(Slice key) + { + checkArgument(key.length() >= 8); + return key.slice(0, key.length() - 8); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKey.java b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKey.java new file mode 100644 index 0000000..b1e3df3 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKey.java @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; + +import java.util.Objects; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; + +public class InternalKey +{ + private final Slice userKey; + private final long sequenceNumber; + private final ValueType valueType; + + public InternalKey(Slice userKey, long sequenceNumber, ValueType valueType) + { + requireNonNull(userKey, "userKey is null"); + checkArgument(sequenceNumber >= 0, "sequenceNumber is negative"); + requireNonNull(valueType, "valueType is null"); + + this.userKey = userKey; + this.sequenceNumber = sequenceNumber; + this.valueType = valueType; + } + + public InternalKey(Slice data) + { + requireNonNull(data, "data is null"); + checkArgument(data.length() >= SIZE_OF_LONG, "data must be at least %s bytes", SIZE_OF_LONG); + this.userKey = getUserKey(data); + long packedSequenceAndType = data.getLong(data.length() - SIZE_OF_LONG); + this.sequenceNumber = SequenceNumber.unpackSequenceNumber(packedSequenceAndType); + this.valueType = SequenceNumber.unpackValueType(packedSequenceAndType); + } + + public Slice getUserKey() + { + return userKey; + } + + public long getSequenceNumber() + { + return sequenceNumber; + } + + public ValueType getValueType() + { + return valueType; + } + + public Slice encode() + { + Slice slice = Slices.allocate(userKey.length() + SIZE_OF_LONG); + SliceOutput sliceOutput = slice.output(); + sliceOutput.writeBytes(userKey); + sliceOutput.writeLong(SequenceNumber.packSequenceAndValueType(sequenceNumber, valueType)); + return slice; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InternalKey that = (InternalKey) o; + + if (sequenceNumber != that.sequenceNumber) { + return false; + } + if (!Objects.equals(userKey, that.userKey)) { + return false; + } + if (valueType != that.valueType) { + return false; + } + + return true; + } + + private int hash; + + @Override + public int hashCode() + { + if (hash == 0) { + int result = userKey != null ? userKey.hashCode() : 0; + result = 31 * result + (int) (sequenceNumber ^ (sequenceNumber >>> 32)); + result = 31 * result + (valueType != null ? valueType.hashCode() : 0); + if (result == 0) { + result = 1; + } + hash = result; + } + return hash; + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("InternalKey"); + sb.append("{key=").append(getUserKey().toString(UTF_8)); // todo don't print the real value + sb.append(", sequenceNumber=").append(getSequenceNumber()); + sb.append(", valueType=").append(getValueType()); + sb.append('}'); + return sb.toString(); + } + + private static Slice getUserKey(Slice data) + { + return data.slice(0, data.length() - SIZE_OF_LONG); + } + + /** + * Return the size in bytes used by current internal key + */ + public int size() + { + return userKey.length() + SIZE_OF_LONG; + } + + /** + * If user key refer to partial view of data, create a new InternalKey with only relevant bytes and enable + * the other to be garbage collected. + */ + public InternalKey compact() + { + if (userKey.length() != userKey.getRawArray().length) { + return new InternalKey(userKey.copySlice(), sequenceNumber, valueType); + } + return this; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKeyComparator.java b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKeyComparator.java new file mode 100644 index 0000000..a732fc2 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKeyComparator.java @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.table.UserComparator; + +import java.util.Comparator; + +public class InternalKeyComparator + implements Comparator +{ + private final UserComparator userComparator; + + public InternalKeyComparator(UserComparator userComparator) + { + this.userComparator = userComparator; + } + + public UserComparator getUserComparator() + { + return userComparator; + } + + public String name() + { + return this.userComparator.name(); + } + + @Override + public int compare(InternalKey left, InternalKey right) + { + int result = userComparator.compare(left.getUserKey(), right.getUserKey()); + if (result != 0) { + return result; + } + + return Long.compare(right.getSequenceNumber(), left.getSequenceNumber()); // reverse sorted version numbers + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/InternalUserComparator.java b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalUserComparator.java new file mode 100644 index 0000000..768e6d2 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/InternalUserComparator.java @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.table.UserComparator; +import org.iq80.leveldb.util.Slice; + +import static com.google.common.base.Preconditions.checkState; +import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; + +public class InternalUserComparator + implements UserComparator +{ + private final InternalKeyComparator internalKeyComparator; + + public InternalUserComparator(InternalKeyComparator internalKeyComparator) + { + this.internalKeyComparator = internalKeyComparator; + } + + @Override + public int compare(Slice left, Slice right) + { + return internalKeyComparator.compare(new InternalKey(left), new InternalKey(right)); + } + + @Override + public String name() + { + return internalKeyComparator.name(); + } + + @Override + public Slice findShortestSeparator( + Slice start, + Slice limit) + { + // Attempt to shorten the user portion of the key + Slice startUserKey = new InternalKey(start).getUserKey(); + Slice limitUserKey = new InternalKey(limit).getUserKey(); + + Slice shortestSeparator = internalKeyComparator.getUserComparator().findShortestSeparator(startUserKey, limitUserKey); + + if (internalKeyComparator.getUserComparator().compare(startUserKey, shortestSeparator) < 0) { + // User key has become larger. Tack on the earliest possible + // number to the shortened user key. + InternalKey newInternalKey = new InternalKey(shortestSeparator, MAX_SEQUENCE_NUMBER, ValueType.VALUE); + checkState(compare(start, newInternalKey.encode()) < 0); // todo + checkState(compare(newInternalKey.encode(), limit) < 0); // todo + + return newInternalKey.encode(); + } + + return start; + } + + @Override + public Slice findShortSuccessor(Slice key) + { + Slice userKey = new InternalKey(key).getUserKey(); + Slice shortSuccessor = internalKeyComparator.getUserComparator().findShortSuccessor(userKey); + + if (internalKeyComparator.getUserComparator().compare(userKey, shortSuccessor) < 0) { + // User key has become larger. Tack on the earliest possible + // number to the shortened user key. + InternalKey newInternalKey = new InternalKey(shortSuccessor, MAX_SEQUENCE_NUMBER, ValueType.VALUE); + checkState(compare(key, newInternalKey.encode()) < 0); // todo + + return newInternalKey.encode(); + } + + return key; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/Iq80DBFactory.java b/leveldb/src/main/java/org/iq80/leveldb/impl/Iq80DBFactory.java new file mode 100644 index 0000000..47e82a2 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/Iq80DBFactory.java @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBFactory; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.fileenv.EnvImpl; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Objects.requireNonNull; + +/** + * @author Hiram Chirino + */ +public class Iq80DBFactory + implements DBFactory +{ + public static final String VERSION; + + static { + String v = "unknown"; + InputStream is = Iq80DBFactory.class.getResourceAsStream("version.txt"); + try { + v = new BufferedReader(new InputStreamReader(is, UTF_8)).readLine(); + } + catch (Throwable e) { + } + finally { + try { + is.close(); + } + catch (Throwable e) { + } + } + VERSION = v; + } + + public static final Iq80DBFactory factory = new Iq80DBFactory(); + + @Override + public DB open(File path, Options options) + throws IOException + { + requireNonNull(path, "path is null"); + return new DbImpl(options, path.getAbsolutePath(), EnvImpl.createEnv()); + } + + @Override + public void destroy(File path, Options options) + throws IOException + { + requireNonNull(path, "path is null"); + Env env = EnvImpl.createEnv(); + DbImpl.destroyDB(env.toFile(path.getAbsolutePath()), env); + } + + @Override + public void repair(File path, Options options) + throws IOException + { + // TODO: implement repair + throw new UnsupportedOperationException(); + } + + @Override + public String toString() + { + return String.format("iq80 leveldb version %s", VERSION); + } + + public static byte[] bytes(String value) + { + return (value == null) ? null : value.getBytes(UTF_8); + } + + public static String asString(byte[] value) + { + return (value == null) ? null : new String(value, UTF_8); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/KeyMatchingLookup.java b/leveldb/src/main/java/org/iq80/leveldb/impl/KeyMatchingLookup.java new file mode 100644 index 0000000..8de1438 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/KeyMatchingLookup.java @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.table.KeyValueFunction; +import org.iq80.leveldb.util.Slice; + +import static com.google.common.base.Preconditions.checkState; +import static org.iq80.leveldb.impl.ValueType.VALUE; + +/** + * @author Honore Vasconcelos + */ +public class KeyMatchingLookup implements KeyValueFunction +{ + private LookupKey key; + + KeyMatchingLookup(LookupKey key) + { + this.key = key; + } + + @Override + public LookupResult apply(Slice internalKey1, Slice value) + { + // parse the key in the block + checkState(internalKey1 != null, "Corrupt key for %s", key); + + final InternalKey internalKey = new InternalKey(internalKey1); + + // if this is a value key (not a delete) and the keys match, return the value + if (key.getUserKey().equals(internalKey.getUserKey())) { + if (internalKey.getValueType() == ValueType.DELETION) { + return LookupResult.deleted(key); + } + else if (internalKey.getValueType() == VALUE) { + return LookupResult.ok(key, value); + } + } + return null; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/Level.java b/leveldb/src/main/java/org/iq80/leveldb/impl/Level.java new file mode 100644 index 0000000..d4a299b --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/Level.java @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.annotations.VisibleForTesting; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.iterator.SeekingIterator; +import org.iq80.leveldb.iterator.SeekingIterators; +import org.iq80.leveldb.table.UserComparator; +import org.iq80.leveldb.iterator.InternalIterator; +import org.iq80.leveldb.iterator.MergingIterator; +import org.iq80.leveldb.util.SafeListBuilder; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; +import static org.iq80.leveldb.impl.ValueType.VALUE; + +// todo this class should be immutable +public class Level +{ + public static final Comparator NEWEST_FIRST = (fileMetaData, fileMetaData1) -> (int) (fileMetaData1.getNumber() - fileMetaData.getNumber()); + private final int levelNumber; + private final TableCache tableCache; + private final InternalKeyComparator internalKeyComparator; + private final List files; + + public Level(int levelNumber, Collection files, TableCache tableCache, InternalKeyComparator internalKeyComparator) + { + checkArgument(levelNumber >= 0, "levelNumber is negative"); + requireNonNull(files, "files is null"); + requireNonNull(tableCache, "tableCache is null"); + requireNonNull(internalKeyComparator, "internalKeyComparator is null"); + + this.files = new ArrayList<>(files); + this.tableCache = tableCache; + this.internalKeyComparator = internalKeyComparator; + this.levelNumber = levelNumber; + } + + public int getLevelNumber() + { + return levelNumber; + } + + public List getFiles() + { + return files; + } + + public InternalIterator iterator(ReadOptions options) throws IOException + { + if (levelNumber == 0) { + try (SafeListBuilder builder = SafeListBuilder.builder()) { + for (FileMetaData file : files) { + builder.add(tableCache.newIterator(file, options)); + } + return new MergingIterator(builder.build(), internalKeyComparator); + } + } + else { + return createLevelConcatIterator(tableCache, files, internalKeyComparator, options); + } + } + + public static InternalIterator createLevelConcatIterator(TableCache tableCache, List files, InternalKeyComparator internalKeyComparator, ReadOptions options) + { + SeekingIterator iterator = SeekingIterators.fromSortedList(files, FileMetaData::getLargest, f -> f, internalKeyComparator); + return SeekingIterators.twoLevelInternalIterator(iterator, fileMetaData -> { + try { + return tableCache.newIterator(fileMetaData, options); + } + catch (IOException e) { + throw new DBException(e); + } + }, () -> { + }); + } + + public LookupResult get(ReadOptions options, LookupKey key, ReadStats readStats, ReadStats lasReadFile) + { + if (files.isEmpty()) { + return null; + } + + List fileMetaDataList = getFilesForKey(key.getUserKey(), key.getInternalKey()); + if (fileMetaDataList.isEmpty()) { + return null; + } + + for (FileMetaData fileMetaData : fileMetaDataList) { + if (lasReadFile.getSeekFile() != null && readStats.getSeekFile() == null) { + // We have had more than one seek for this read. Charge the first file. + readStats.setSeekFile(lasReadFile.getSeekFile()); + readStats.setSeekFileLevel(lasReadFile.getSeekFileLevel()); + } + + lasReadFile.setSeekFile(fileMetaData); + lasReadFile.setSeekFileLevel(levelNumber); + + final LookupResult lookupResult = tableCache.get(options, key.getInternalKey().encode(), fileMetaData, new KeyMatchingLookup(key)); + if (lookupResult != null) { + return lookupResult; + } + } + + return null; + } + + public List getFilesForKey(Slice userKey, InternalKey internalKey) + { + final UserComparator userComparator = internalKeyComparator.getUserComparator(); + if (levelNumber == 0) { + final List fileMetaDataList = new ArrayList<>(files.size()); + for (FileMetaData fileMetaData : files) { + if (userComparator.compare(userKey, fileMetaData.getSmallest().getUserKey()) >= 0 && + userComparator.compare(userKey, fileMetaData.getLargest().getUserKey()) <= 0) { + fileMetaDataList.add(fileMetaData); + } + } + if (fileMetaDataList.isEmpty()) { + return Collections.emptyList(); + } + fileMetaDataList.sort(NEWEST_FIRST); + return fileMetaDataList; + } + else { + // Binary search to find earliest index whose largest key >= ikey. + int index = findFile(internalKey); + + // did we find any files that could contain the key? + if (index >= files.size()) { + return Collections.emptyList(); + } + + // check if the smallest user key in the file is less than the target user key + FileMetaData fileMetaData = files.get(index); + if (userComparator.compare(userKey, fileMetaData.getSmallest().getUserKey()) < 0) { + return Collections.emptyList(); + } + + // search this file + return Collections.singletonList(fileMetaData); + } + } + + public boolean someFileOverlapsRange(boolean disjointSortedFiles, Slice smallestUserKey, Slice largestUserKey) + { + UserComparator userComparator = internalKeyComparator.getUserComparator(); + if (!disjointSortedFiles) { + // Need to check against all files + for (FileMetaData file : files) { + if (afterFile(userComparator, smallestUserKey, file) || + beforeFile(userComparator, largestUserKey, file)) { + // No overlap + } + else { + return true; // Overlap + } + } + return false; + } + int index = 0; + if (smallestUserKey != null) { + InternalKey smallestInternalKey = new InternalKey(smallestUserKey, MAX_SEQUENCE_NUMBER, VALUE); + index = findFile(smallestInternalKey); + } + + if (index >= files.size()) { + // beginning of range is after all files, so no overlap. + return false; + } + + return !beforeFile(userComparator, largestUserKey, files.get(index)); + } + + private boolean beforeFile(UserComparator userComparator, Slice userKey, FileMetaData file) + { + // null userKey occurs after all keys and is therefore never before *f + return (userKey != null && + userComparator.compare(userKey, file.getSmallest().getUserKey()) < 0); + } + + private boolean afterFile(UserComparator userComparator, Slice userKey, FileMetaData file) + { + // NULL user_key occurs before all keys and is therefore never after *f + return (userKey != null && + userComparator.compare(userKey, file.getLargest().getUserKey()) > 0); + } + + @VisibleForTesting + int findFile(InternalKey targetKey) + { + // todo replace with Collections.binarySearch + int left = 0; + int right = files.size(); + + // binary search restart positions to find the restart position immediately before the targetKey + while (left < right) { + int mid = (left + right) / 2; + + if (internalKeyComparator.compare(files.get(mid).getLargest(), targetKey) < 0) { + // Key at "mid.largest" is < "target". Therefore all + // files at or before "mid" are uninteresting. + left = mid + 1; + } + else { + // Key at "mid.largest" is >= "target". Therefore all files + // after "mid" are uninteresting. + right = mid; + } + } + return right; + } + + public void addFile(FileMetaData fileMetaData) + { + // todo remove mutation + files.add(fileMetaData); + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("Level"); + sb.append("{levelNumber=").append(levelNumber); + sb.append(", files=").append(files); + sb.append('}'); + return sb.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LogChunkType.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LogChunkType.java new file mode 100644 index 0000000..d1cb879 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LogChunkType.java @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import static com.google.common.base.Preconditions.checkArgument; + +public enum LogChunkType +{ + ZERO_TYPE(0), + FULL(1), + FIRST(2), + MIDDLE(3), + LAST(4), + EOF, + BAD_CHUNK, + UNKNOWN; + + public static LogChunkType getLogChunkTypeByPersistentId(int persistentId) + { + for (LogChunkType logChunkType : LogChunkType.values()) { + if (logChunkType.persistentId != null && logChunkType.persistentId == persistentId) { + return logChunkType; + } + } + return UNKNOWN; + } + + private final Integer persistentId; + + LogChunkType() + { + this.persistentId = null; + } + + LogChunkType(int persistentId) + { + this.persistentId = persistentId; + } + + public int getPersistentId() + { + checkArgument(persistentId != null, "%s is not a persistent chunk type", name()); + return persistentId; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LogConstants.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LogConstants.java new file mode 100644 index 0000000..8fe304b --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LogConstants.java @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_BYTE; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_SHORT; + +public final class LogConstants +{ + // todo find new home for these + + public static final int BLOCK_SIZE = 32768; + + // Header is checksum (4 bytes), type (1 byte), length (2 bytes). + public static final int HEADER_SIZE = SIZE_OF_INT + SIZE_OF_BYTE + SIZE_OF_SHORT; + + private LogConstants() + { + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitor.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitor.java new file mode 100644 index 0000000..3504f93 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitor.java @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +public interface LogMonitor +{ + void corruption(long bytes, String reason); + + void corruption(long bytes, Throwable reason); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitors.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitors.java new file mode 100644 index 0000000..126dce0 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitors.java @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.Logger; + +public final class LogMonitors +{ + public static LogMonitor throwExceptionMonitor() + { + return new LogMonitor() + { + @Override + public void corruption(long bytes, String reason) + { + throw new RuntimeException(String.format("corruption of %s bytes: %s", bytes, reason)); + } + + @Override + public void corruption(long bytes, Throwable reason) + { + throw new RuntimeException(String.format("corruption of %s bytes", bytes), reason); + } + }; + } + + public static LogMonitor logMonitor(Logger logger) + { + return new LogMonitor() + { + @Override + public void corruption(long bytes, String reason) + { + logger.log("corruption of %s bytes: %s", bytes, reason); + } + + @Override + public void corruption(long bytes, Throwable reason) + { + logger.log("corruption of %s bytes. %s", bytes, reason); + } + }; + } + + private LogMonitors() + { + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LogReader.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LogReader.java new file mode 100644 index 0000000..9cbd9e3 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LogReader.java @@ -0,0 +1,399 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.util.DynamicSliceOutput; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; + +import java.io.IOException; + +import static org.iq80.leveldb.impl.LogChunkType.BAD_CHUNK; +import static org.iq80.leveldb.impl.LogChunkType.EOF; +import static org.iq80.leveldb.impl.LogChunkType.LAST; +import static org.iq80.leveldb.impl.LogChunkType.MIDDLE; +import static org.iq80.leveldb.impl.LogChunkType.UNKNOWN; +import static org.iq80.leveldb.impl.LogChunkType.ZERO_TYPE; +import static org.iq80.leveldb.impl.LogChunkType.getLogChunkTypeByPersistentId; +import static org.iq80.leveldb.impl.LogConstants.BLOCK_SIZE; +import static org.iq80.leveldb.impl.LogConstants.HEADER_SIZE; +import static org.iq80.leveldb.impl.Logs.getChunkChecksum; + +public class LogReader +{ + private final SequentialFile sequentialFile; + + private final LogMonitor monitor; + + private final boolean verifyChecksums; + + /** + * Offset at which to start looking for the first record to return + */ + private final long initialOffset; + private boolean resyncing; + + /** + * Have we read to the end of the file? + */ + private boolean eof; + + /** + * Offset of the last record returned by readRecord. + */ + private long lastRecordOffset; + + /** + * Offset of the first location past the end of buffer. + */ + private long endOfBufferOffset; + + /** + * Scratch buffer in which the next record is assembled. + */ + private final DynamicSliceOutput recordScratch = new DynamicSliceOutput(BLOCK_SIZE); + + /** + * Scratch buffer for current block. The currentBlock is sliced off the underlying buffer. + */ + private final SliceOutput blockScratch = Slices.allocate(BLOCK_SIZE).output(); + + /** + * The current block records are being read from. + */ + private SliceInput currentBlock = Slices.EMPTY_SLICE.input(); + + /** + * Current chunk which is sliced from the current block. + */ + private Slice currentChunk = Slices.EMPTY_SLICE; + + public LogReader(SequentialFile sequentialFile, LogMonitor monitor, boolean verifyChecksums, long initialOffset) + { + this.sequentialFile = sequentialFile; + this.monitor = monitor; + this.verifyChecksums = verifyChecksums; + this.initialOffset = initialOffset; + this.resyncing = initialOffset > 0; + } + + public long getLastRecordOffset() + { + return lastRecordOffset; + } + + /** + * Skips all blocks that are completely before "initial_offset_". + *

+ * Handles reporting corruption + * + * @return true on success. + */ + private boolean skipToInitialBlock() + { + int offsetInBlock = (int) (initialOffset % BLOCK_SIZE); + long blockStartLocation = initialOffset - offsetInBlock; + + // Don't search a block if we'd be in the trailer + if (offsetInBlock > BLOCK_SIZE - 6) { + blockStartLocation += BLOCK_SIZE; + } + + endOfBufferOffset = blockStartLocation; + + // Skip to start of first block that can contain the initial record + if (blockStartLocation > 0) { + try { + sequentialFile.skip(blockStartLocation); + } + catch (IOException e) { + reportDrop(blockStartLocation, e); + return false; + } + } + + return true; + } + + public Slice readRecord() + { + recordScratch.reset(); + + // advance to the first record, if we haven't already + if (lastRecordOffset < initialOffset) { + if (!skipToInitialBlock()) { + return null; + } + } + + // Record offset of the logical record that we're reading + long prospectiveRecordOffset = 0; + + boolean inFragmentedRecord = false; + while (true) { + LogChunkType chunkType = readNextChunk(); + + // ReadPhysicalRecord may have only had an empty trailer remaining in its + // internal buffer. Calculate the offset of the next physical record now + // that it has returned, properly accounting for its header size. + long physicalRecordOffset = endOfBufferOffset - currentBlock.available() - HEADER_SIZE - currentChunk.length(); + + if (resyncing) { + if (chunkType == MIDDLE) { + continue; + } + else if (chunkType == LAST) { + resyncing = false; + continue; + } + else { + resyncing = false; + } + } + + switch (chunkType) { + case FULL: + if (inFragmentedRecord) { + reportCorruption(recordScratch.size(), "partial record without end(1)"); + // simply return this full block + } + recordScratch.reset(); + prospectiveRecordOffset = physicalRecordOffset; + lastRecordOffset = prospectiveRecordOffset; + return currentChunk.copySlice(); + + case FIRST: + if (inFragmentedRecord) { + reportCorruption(recordScratch.size(), "partial record without end(2)"); + // clear the scratch and start over from this chunk + recordScratch.reset(); + } + prospectiveRecordOffset = physicalRecordOffset; + recordScratch.writeBytes(currentChunk); + inFragmentedRecord = true; + break; + + case MIDDLE: + if (!inFragmentedRecord) { + reportCorruption(currentChunk.length(), "missing start of fragmented record(1)"); + + // clear the scratch and skip this chunk + recordScratch.reset(); + } + else { + recordScratch.writeBytes(currentChunk); + } + break; + + case LAST: + if (!inFragmentedRecord) { + reportCorruption(currentChunk.length(), "missing start of fragmented record(2)"); + + // clear the scratch and skip this chunk + recordScratch.reset(); + } + else { + recordScratch.writeBytes(currentChunk); + lastRecordOffset = prospectiveRecordOffset; + return recordScratch.slice().copySlice(); + } + break; + + case EOF: + if (inFragmentedRecord) { + // This can be caused by the writer dying immediately after + // writing a physical record but before completing the next; don't + // treat it as a corruption, just ignore the entire logical record. + recordScratch.reset(); + } + return null; + + case BAD_CHUNK: + if (inFragmentedRecord) { + reportCorruption(recordScratch.size(), "error in middle of record"); + inFragmentedRecord = false; + recordScratch.reset(); + } + break; + + default: + int dropSize = currentChunk.length(); + if (inFragmentedRecord) { + dropSize += recordScratch.size(); + } + reportCorruption(dropSize, String.format("Unexpected chunk type %s", chunkType)); + inFragmentedRecord = false; + recordScratch.reset(); + break; + } + } + } + + /** + * Return type, or one of the preceding special values + */ + private LogChunkType readNextChunk() + { + // clear the current chunk + currentChunk = Slices.EMPTY_SLICE; + + // read the next block if necessary + if (currentBlock.available() < HEADER_SIZE) { + if (!readNextBlock()) { + if (eof) { + return EOF; + } + } + } + + // parse header + int expectedChecksum = currentBlock.readInt(); + int length = currentBlock.readUnsignedByte(); + length = length | currentBlock.readUnsignedByte() << 8; + byte chunkTypeId = currentBlock.readByte(); + LogChunkType chunkType = getLogChunkTypeByPersistentId(chunkTypeId); + + // verify length + if (length > currentBlock.available()) { + if (!eof) { + int dropSize = currentBlock.available() + HEADER_SIZE; + reportCorruption(dropSize, "bad record length"); + currentBlock = Slices.EMPTY_SLICE.input(); + return BAD_CHUNK; + } + // If the end of the file has been reached without reading |length| bytes + // of payload, assume the writer died in the middle of writing the record. + // Don't report a corruption. + return EOF; + } + + // skip zero length records + if (chunkType == ZERO_TYPE && length == 0) { + // Skip zero length record without reporting any drops since + // such records are produced by the writing code. + currentBlock = Slices.EMPTY_SLICE.input(); + return BAD_CHUNK; + } + + // Skip physical record that started before initialOffset + if (endOfBufferOffset - HEADER_SIZE - length < initialOffset) { + currentBlock.skipBytes(length); + return BAD_CHUNK; + } + + // read the chunk + currentChunk = currentBlock.readBytes(length); + + if (verifyChecksums) { + int actualChecksum = getChunkChecksum(chunkTypeId, currentChunk); + if (actualChecksum != expectedChecksum) { + // Drop the rest of the buffer since "length" itself may have + // been corrupted and if we trust it, we could find some + // fragment of a real log record that just happens to look + // like a valid log record. + int dropSize = currentBlock.available() + HEADER_SIZE + length; + currentBlock = Slices.EMPTY_SLICE.input(); + reportCorruption(dropSize, "checksum mismatch"); + return BAD_CHUNK; + } + } + + // Skip physical record that started before initial_offset_ + if (endOfBufferOffset - currentBlock.available() - HEADER_SIZE - length < + initialOffset) { + currentChunk = Slices.EMPTY_SLICE; + return BAD_CHUNK; + } + + // Skip unknown chunk types + // Since this comes last so we the, know it is a valid chunk, and is just a type we don't understand + if (chunkType == UNKNOWN) { + reportCorruption(length, "unknown record type"); + return BAD_CHUNK; + } + + return chunkType; + } + + private boolean readNextBlock() + { + if (eof) { + return false; + } + + // clear the block + blockScratch.reset(); + + int readSoFar = 0; + // read the next full block + while (blockScratch.writableBytes() > 0) { + try { + int bytesRead = sequentialFile.read(blockScratch.writableBytes(), blockScratch); + if (bytesRead < 0) { //eof + // no more bytes to read + eof = true; + if (blockScratch.writableBytes() > 0 && readSoFar < HEADER_SIZE) { + // Note that if buffer_ is non-empty, we have a truncated header at the + // end of the file, which can be caused by the writer crashing in the + // middle of writing the header. Instead of considering this an error, + // just report EOF. + currentBlock = Slices.EMPTY_SLICE.input(); + return false; + } + break; + } + readSoFar += bytesRead; + endOfBufferOffset += bytesRead; + } + catch (IOException e) { + currentBlock = Slices.EMPTY_SLICE.input(); + reportDrop(BLOCK_SIZE, e); + eof = true; + return false; + } + + } + currentBlock = blockScratch.slice().input(); + return currentBlock.isReadable(); + } + + /** + * Reports corruption to the monitor. + * The buffer must be updated to remove the dropped bytes prior to invocation. + */ + private void reportCorruption(long bytes, String reason) + { + if (monitor != null) { + monitor.corruption(bytes, reason); + } + } + + /** + * Reports dropped bytes to the monitor. + * The buffer must be updated to remove the dropped bytes prior to invocation. + */ + private void reportDrop(long bytes, Throwable reason) + { + if (monitor != null) { + monitor.corruption(bytes, reason); + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LogWriter.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LogWriter.java new file mode 100644 index 0000000..56c0c24 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LogWriter.java @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.env.WritableFile; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.impl.LogConstants.BLOCK_SIZE; +import static org.iq80.leveldb.impl.LogConstants.HEADER_SIZE; +import static org.iq80.leveldb.impl.Logs.getChunkChecksum; + +public class LogWriter + implements Closeable +{ + private static final byte[] SA = new byte[HEADER_SIZE]; + private final WritableFile writableFile; + private final long fileNumber; + private final AtomicBoolean closed = new AtomicBoolean(); + + /** + * Current offset in the current block + */ + private int blockOffset; + + private LogWriter(long fileNumber, WritableFile file) + { + requireNonNull(file, "file is null"); + checkArgument(fileNumber >= 0, "fileNumber is negative"); + this.fileNumber = fileNumber; + this.writableFile = file; + } + + private LogWriter(long fileNumber, WritableFile file, long destinationLength) + { + this(fileNumber, file); + this.blockOffset = (int) (destinationLength % LogConstants.BLOCK_SIZE); + } + + public static LogWriter createWriter(long fileNumber, WritableFile writableFile) + { + return new LogWriter(fileNumber, writableFile); + } + + public static LogWriter createWriter(long fileNumber, WritableFile writableFile, long destinationLength) + { + return new LogWriter(fileNumber, writableFile, destinationLength); + } + + @Override + public void close() + throws IOException + { + closed.set(true); + writableFile.close(); + + } + + public long getFileNumber() + { + return fileNumber; + } + + // Writes a stream of chunks such that no chunk is split across a block boundary + public void addRecord(Slice record, boolean force) + throws IOException + { + checkState(!closed.get(), "Log has been closed"); + + SliceInput sliceInput = record.input(); + + // used to track first, middle and last blocks + boolean begin = true; + + // Fragment the record int chunks as necessary and write it. Note that if record + // is empty, we still want to iterate once to write a single + // zero-length chunk. + do { + int bytesRemainingInBlock = BLOCK_SIZE - blockOffset; + checkState(bytesRemainingInBlock >= 0); + + // Switch to a new block if necessary + if (bytesRemainingInBlock < HEADER_SIZE) { + if (bytesRemainingInBlock > 0) { + // Fill the rest of the block with zeros + // todo lame... need a better way to write zeros + writableFile.append(new Slice(SA, 0, bytesRemainingInBlock)); + } + blockOffset = 0; + bytesRemainingInBlock = BLOCK_SIZE - blockOffset; + } + + // Invariant: we never leave less than HEADER_SIZE bytes available in a block + int bytesAvailableInBlock = bytesRemainingInBlock - HEADER_SIZE; + checkState(bytesAvailableInBlock >= 0); + + // if there are more bytes in the record then there are available in the block, + // fragment the record; otherwise write to the end of the record + boolean end; + int fragmentLength; + if (sliceInput.available() > bytesAvailableInBlock) { + end = false; + fragmentLength = bytesAvailableInBlock; + } + else { + end = true; + fragmentLength = sliceInput.available(); + } + + // determine block type + LogChunkType type; + if (begin && end) { + type = LogChunkType.FULL; + } + else if (begin) { + type = LogChunkType.FIRST; + } + else if (end) { + type = LogChunkType.LAST; + } + else { + type = LogChunkType.MIDDLE; + } + + // write the chunk + writeChunk(type, sliceInput.readBytes(fragmentLength)); + + // we are no longer on the first chunk + begin = false; + } while (sliceInput.isReadable()); + + if (force) { + writableFile.force(); + } + } + + private void writeChunk(LogChunkType type, Slice slice) + throws IOException + { + checkArgument(slice.length() <= 0xffff, "length %s is larger than two bytes", slice.length()); + checkArgument(blockOffset + HEADER_SIZE <= BLOCK_SIZE); + + // create header + Slice header = newLogRecordHeader(type, slice, slice.length()); + + // write the header and the payload + writableFile.append(header); + writableFile.append(slice); + + blockOffset += HEADER_SIZE + slice.length(); + } + + private static Slice newLogRecordHeader(LogChunkType type, Slice slice, int length) + { + int crc = getChunkChecksum(type.getPersistentId(), slice.getRawArray(), slice.getRawOffset(), length); + + // Format the header + Slice header = Slices.allocate(HEADER_SIZE); + SliceOutput sliceOutput = header.output(); + sliceOutput.writeInt(crc); + sliceOutput.writeByte((byte) (length & 0xff)); + sliceOutput.writeByte((byte) (length >>> 8)); + sliceOutput.writeByte((byte) (type.getPersistentId())); + + return header; + } + + @Override + public String toString() + { + return "LogWriter{" + + "writableFile=" + writableFile + + ", fileNumber=" + fileNumber + + '}'; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/Logs.java b/leveldb/src/main/java/org/iq80/leveldb/impl/Logs.java new file mode 100644 index 0000000..a1dacc3 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/Logs.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.util.PureJavaCrc32C; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.env.WritableFile; + +import org.iq80.leveldb.env.File; +import java.io.IOException; + +public final class Logs +{ + private Logs() + { + } + + public static LogWriter createLogWriter(File file, long fileNumber, Env env) throws IOException + { + return LogWriter.createWriter(fileNumber, env.newWritableFile(file)); + } + + public static LogWriter createLogWriter(long fileNumber, WritableFile writableFile, long destinationLength) throws IOException + { + return LogWriter.createWriter(fileNumber, writableFile, destinationLength); + } + + public static int getChunkChecksum(int chunkTypeId, Slice slice) + { + return getChunkChecksum(chunkTypeId, slice.getRawArray(), slice.getRawOffset(), slice.length()); + } + + public static int getChunkChecksum(int chunkTypeId, byte[] buffer, int offset, int length) + { + // Compute the crc of the record type and the payload. + PureJavaCrc32C crc32C = new PureJavaCrc32C(); + crc32C.update(chunkTypeId); + crc32C.update(buffer, offset, length); + return crc32C.getMaskedValue(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LookupKey.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LookupKey.java new file mode 100644 index 0000000..5bf81f5 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LookupKey.java @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; + +public class LookupKey +{ + private final InternalKey key; + + public LookupKey(Slice userKey, long sequenceNumber) + { + key = new InternalKey(userKey, sequenceNumber, ValueType.VALUE); + } + + public InternalKey getInternalKey() + { + return key; + } + + public Slice getUserKey() + { + return key.getUserKey(); + } + + @Override + public String toString() + { + return key.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/LookupResult.java b/leveldb/src/main/java/org/iq80/leveldb/impl/LookupResult.java new file mode 100644 index 0000000..db372dd --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/LookupResult.java @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; + +import static java.util.Objects.requireNonNull; + +public class LookupResult +{ + public static LookupResult ok(LookupKey key, Slice value) + { + return new LookupResult(key, value, false); + } + + public static LookupResult deleted(LookupKey key) + { + return new LookupResult(key, null, true); + } + + private final LookupKey key; + private final Slice value; + private final boolean deleted; + + private LookupResult(LookupKey key, Slice value, boolean deleted) + { + requireNonNull(key, "key is null"); + this.key = key; + if (value != null) { + this.value = value.slice(); + } + else { + this.value = null; + } + this.deleted = deleted; + } + + public LookupKey getKey() + { + return key; + } + + public Slice getValue() + { + if (value == null) { + return null; + } + return value; + } + + public boolean isDeleted() + { + return deleted; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/MemTable.java b/leveldb/src/main/java/org/iq80/leveldb/impl/MemTable.java new file mode 100644 index 0000000..095037f --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/MemTable.java @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.iterator.MemTableIterator; +import org.iq80.leveldb.util.Slice; + +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; + +public class MemTable +{ + private final ConcurrentSkipListMap table; + private final AtomicLong approximateMemoryUsage = new AtomicLong(); + + public MemTable(InternalKeyComparator internalKeyComparator) + { + table = new ConcurrentSkipListMap<>(internalKeyComparator); + } + + public boolean isEmpty() + { + return table.isEmpty(); + } + + public long approximateMemoryUsage() + { + return approximateMemoryUsage.get(); + } + + public void add(long sequenceNumber, ValueType valueType, Slice key, Slice value) + { + requireNonNull(valueType, "valueType is null"); + requireNonNull(key, "key is null"); + requireNonNull(valueType, "valueType is null"); + + InternalKey internalKey = new InternalKey(key, sequenceNumber, valueType); + table.put(internalKey, value); + + approximateMemoryUsage.addAndGet(key.length() + SIZE_OF_LONG + value.length()); + } + + public LookupResult get(LookupKey key) + { + requireNonNull(key, "key is null"); + + InternalKey internalKey = key.getInternalKey(); + Entry entry = table.ceilingEntry(internalKey); + if (entry == null) { + return null; + } + + InternalKey entryKey = entry.getKey(); + if (entryKey.getUserKey().equals(key.getUserKey())) { + if (entryKey.getValueType() == ValueType.DELETION) { + return LookupResult.deleted(key); + } + else { + return LookupResult.ok(key, entry.getValue()); + } + } + return null; + } + + public MemTableIterator iterator() + { + return new MemTableIterator(table); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/ReadStats.java b/leveldb/src/main/java/org/iq80/leveldb/impl/ReadStats.java new file mode 100644 index 0000000..f24e64a --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/ReadStats.java @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +public class ReadStats +{ + private int seekFileLevel = -1; + private FileMetaData seekFile; + + ReadStats() + { + } + + ReadStats(int seekFileLevel, FileMetaData seekFile) + { + this.seekFileLevel = seekFileLevel; + this.seekFile = seekFile; + } + + public void clear() + { + seekFileLevel = -1; + seekFile = null; + } + + public int getSeekFileLevel() + { + return seekFileLevel; + } + + public void setSeekFileLevel(int seekFileLevel) + { + this.seekFileLevel = seekFileLevel; + } + + public FileMetaData getSeekFile() + { + return seekFile; + } + + public void setSeekFile(FileMetaData seekFile) + { + this.seekFile = seekFile; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/SequenceNumber.java b/leveldb/src/main/java/org/iq80/leveldb/impl/SequenceNumber.java new file mode 100644 index 0000000..b46d742 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/SequenceNumber.java @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; + +public final class SequenceNumber +{ + // We leave eight bits empty at the bottom so a type and sequence# + // can be packed together into 64-bits. + public static final long MAX_SEQUENCE_NUMBER = ((0x1L << 56) - 1); + + private SequenceNumber() + { + } + + public static long packSequenceAndValueType(long sequence, ValueType valueType) + { + checkArgument(sequence <= MAX_SEQUENCE_NUMBER, "Sequence number is greater than MAX_SEQUENCE_NUMBER"); + requireNonNull(valueType, "valueType is null"); + + return (sequence << 8) | valueType.getPersistentId(); + } + + public static ValueType unpackValueType(long num) + { + return ValueType.getValueTypeByPersistentId((byte) num); + } + + public static long unpackSequenceNumber(long num) + { + return num >>> 8; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/SnapshotList.java b/leveldb/src/main/java/org/iq80/leveldb/impl/SnapshotList.java new file mode 100644 index 0000000..8fd89ca --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/SnapshotList.java @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.Snapshot; + +import java.util.concurrent.locks.ReentrantLock; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; + +/** + * Snapshots are kept in a doubly-linked list in the DB. + * Each Snapshot corresponds to a particular sequence number. + */ +final class SnapshotList +{ + private final ReentrantLock mutex; + private final SnapshotNode list; + + /** + * Snapshot list where all operation are protected by {@ode mutex}. + * All {@code mutex} acquisition mut be done externally to ensure sequence order. + * + * @param mutex protect concurrent read/write to this list + */ + public SnapshotList(ReentrantLock mutex) + { + this.mutex = mutex; + this.list = new SnapshotNode(0); + this.list.next = this.list; + this.list.prev = this.list; + } + + /** + * Track a new snapshot for {@code sequence}. + * + * @param sequence most actual version sequence available + * @return new a new tracked snapshot for {@code sequence} + * @throws IllegalStateException if mutex is not held by current thread + */ + public Snapshot newSnapshot(long sequence) + { + checkState(mutex.isHeldByCurrentThread()); + SnapshotNode s = new SnapshotNode(sequence); + s.next = this.list; + s.prev = list.prev; + s.prev.next = s; + s.next.prev = s; + return s; + } + + /** + * Return {@code true} if list is empty + * + * @return Return {@code true} if list is empty + * @throws IllegalStateException if mutex is not held by current thread + */ + public boolean isEmpty() + { + checkState(mutex.isHeldByCurrentThread()); + return list.next == list; + } + + /** + * Return oldest sequence number of this list + * + * @return oldest sequence number + * @throws IllegalStateException if mutex is not held by current thread or list is empty + */ + public long getOldest() + { + checkState(mutex.isHeldByCurrentThread()); + checkState(!isEmpty()); + return list.next.number; + } + + /** + * Return sequence corresponding to given snapshot. + * + * @param snapshot snapshot to read from + * @return Return sequence corresponding to given snapshot. + * @throws IllegalArgumentException if snapshot concrete type does not come from current list + * @throws IllegalStateException if mutex is not held by current thread + */ + public long getSequenceFrom(Snapshot snapshot) + { + checkArgument(snapshot instanceof SnapshotNode); + checkState(mutex.isHeldByCurrentThread()); + return ((SnapshotNode) snapshot).number; + } + + private final class SnapshotNode implements Snapshot + { + private final long number; + private SnapshotNode next; + private SnapshotNode prev; + + private SnapshotNode(long number) + { + this.number = number; + } + + @Override + public void close() + { + mutex.lock(); + try { + this.prev.next = this.next; + this.next.prev = this.prev; + } + finally { + mutex.unlock(); + } + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/TableCache.java b/leveldb/src/main/java/org/iq80/leveldb/impl/TableCache.java new file mode 100644 index 0000000..c9a1b81 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/TableCache.java @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.cache.RemovalListener; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.iterator.InternalTableIterator; +import org.iq80.leveldb.table.BlockHandleSliceWeigher; +import org.iq80.leveldb.table.CacheKey; +import org.iq80.leveldb.table.FilterPolicy; +import org.iq80.leveldb.table.KeyValueFunction; +import org.iq80.leveldb.table.Table; +import org.iq80.leveldb.table.UserComparator; +import org.iq80.leveldb.util.Closeables; +import org.iq80.leveldb.util.ILRUCache; +import org.iq80.leveldb.util.LRUCache; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.util.Slice; + +import org.iq80.leveldb.env.File; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +import static java.util.Objects.requireNonNull; + +public class TableCache +{ + private final LoadingCache cache; + private final ILRUCache blockCache; + + public TableCache(final File databaseDir, + int tableCacheSize, + final UserComparator userComparator, + final Options options, Env env) + { + requireNonNull(databaseDir, "databaseName is null"); + blockCache = options.cacheSize() == 0 ? null : LRUCache.createCache((int) options.cacheSize(), new BlockHandleSliceWeigher()); + cache = CacheBuilder.newBuilder() + .maximumSize(tableCacheSize) + .removalListener((RemovalListener) notification -> { + final TableAndFile value = notification.getValue(); + if (value != null) { + final Table table = value.getTable(); + try { + //end user is required to close resources/iterators + //no need to rely on GC to collect files even for MM Files. + table.close(); + } + catch (IOException e) { + throw new DBException(e); + } + } + }) + .build(new CacheLoader() + { + @Override + public TableAndFile load(Long fileNumber) + throws IOException + { + return new TableAndFile(databaseDir, fileNumber, userComparator, options, blockCache, env); + } + }); + } + + public InternalTableIterator newIterator(FileMetaData file, ReadOptions options) throws IOException + { + return newIterator(file.getNumber(), options); + } + + public InternalTableIterator newIterator(long number, ReadOptions options) throws IOException + { + try (Table table = getTable(number)) { //same as release + return new InternalTableIterator(table.iterator(options)); //make its own retain + } + } + + public T get(ReadOptions options, Slice key, FileMetaData fileMetaData, KeyValueFunction resultBuilder) + { + try (Table table = getTable(fileMetaData.getNumber())) { //same as release + return table.internalGet(options, key, resultBuilder); + } + catch (Exception e) { + throw new DBException(e); + } + } + + public long getApproximateOffsetOf(FileMetaData file, Slice key) + { + try (Table table = getTable(file.getNumber())) { + return table.getApproximateOffsetOf(key); + } + catch (IOException e) { + throw new DBException(e); + } + } + + private Table getTable(long number) + { + Table table; + try { + do { + table = cache.get(number).getTable(); + } while (!table.retain()); + } + catch (ExecutionException e) { + Throwable cause = e; + if (e.getCause() != null) { + cause = e.getCause(); + } + throw new DBException("Could not open table " + number, cause); + } + return table; + } + + public void close() + { + invalidateAll(); + } + + /** + * Discards all entries in table and block (if any). + */ + public void invalidateAll() + { + if (blockCache != null) { + blockCache.invalidateAll(); + } + cache.invalidateAll(); + } + + public void evict(long number) + { + cache.invalidate(number); + } + + private static final class TableAndFile + { + private final Table table; + + private TableAndFile(File databaseDir, long fileNumber, UserComparator userComparator, Options options, ILRUCache blockCache, Env env) + throws IOException + { + final File tableFile = tableFileName(databaseDir, fileNumber); + RandomInputFile source = env.newRandomAccessFile(tableFile); + table = Closeables.wrapResource(() -> { + final FilterPolicy filterPolicy = (FilterPolicy) options.filterPolicy(); + return new Table(source, userComparator, + options.paranoidChecks(), blockCache, filterPolicy); + }, source); + } + + private File tableFileName(File databaseDir, long fileNumber) + { + final String tableFileName = Filename.tableFileName(fileNumber); + File tableFile = databaseDir.child(tableFileName); + if (!tableFile.canRead()) { + // attempt to open older .sst extension + final String sstFileName = Filename.sstTableFileName(fileNumber); + final File sstPath = databaseDir.child(sstFileName); + if (sstPath.canRead()) { + tableFile = sstPath; + } + } + return tableFile; + } + + public Table getTable() + { + return table; + } + } + + public long getApproximateMemoryUsage() + { + return blockCache.getApproximateMemoryUsage(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/ValueHolder.java b/leveldb/src/main/java/org/iq80/leveldb/impl/ValueHolder.java new file mode 100644 index 0000000..cc91210 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/ValueHolder.java @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +/** + * Value holder for reference modification like in C++ + */ +final class ValueHolder +{ + private V value; + + public ValueHolder(V value) + { + this.value = value; + } + + /** + * Setter for property 'value'. + * + * @param value Value to set for property 'value'. + */ + public void setValue(V value) + { + this.value = value; + } + + /** + * Getter for property 'value'. + * + * @return Value for property 'value'. + */ + public V getValue() + { + return value; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/ValueType.java b/leveldb/src/main/java/org/iq80/leveldb/impl/ValueType.java new file mode 100644 index 0000000..d03b14c --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/ValueType.java @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +public enum ValueType +{ + DELETION(0x00), + VALUE(0x01); + + public static ValueType getValueTypeByPersistentId(int persistentId) + { + switch (persistentId) { + case 0: + return DELETION; + case 1: + return VALUE; + default: + throw new IllegalArgumentException("Unknown persistentId " + persistentId); + } + } + + private final int persistentId; + + ValueType(int persistentId) + { + this.persistentId = persistentId; + } + + public int getPersistentId() + { + return persistentId; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/Version.java b/leveldb/src/main/java/org/iq80/leveldb/impl/Version.java new file mode 100644 index 0000000..d8052b6 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/Version.java @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.Multimap; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.iterator.InternalIterator; +import org.iq80.leveldb.util.SafeListBuilder; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkPositionIndex; +import static com.google.common.collect.Ordering.natural; +import static org.iq80.leveldb.impl.DbConstants.MAX_MEM_COMPACT_LEVEL; +import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; +import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; + +// todo this class should be immutable +public class Version +{ + private final AtomicInteger retained = new AtomicInteger(1); + private final VersionSet versionSet; + private final List levels; + + // move these mutable fields somewhere else + private int compactionLevel; + private double compactionScore; + private FileMetaData fileToCompact; + private int fileToCompactLevel; + + public Version(VersionSet versionSet) + { + this.versionSet = versionSet; + checkArgument(NUM_LEVELS > 1, "levels must be at least 2"); + Builder builder = ImmutableList.builder(); + for (int i = 0; i < NUM_LEVELS; i++) { + List files = new ArrayList<>(); + builder.add(new Level(i, files, getTableCache(), getInternalKeyComparator())); + } + this.levels = builder.build(); + } + + public void assertNoOverlappingFiles(int level) + { + if (level > 0) { + Collection files = getFiles(level); + if (files != null) { + long previousFileNumber = 0; + InternalKey previousEnd = null; + for (FileMetaData fileMetaData : files) { + if (previousEnd != null) { + checkArgument(getInternalKeyComparator().compare( + previousEnd, + fileMetaData.getSmallest() + ) < 0, "Overlapping files %s and %s in level %s", previousFileNumber, fileMetaData.getNumber(), level); + } + + previousFileNumber = fileMetaData.getNumber(); + previousEnd = fileMetaData.getLargest(); + } + } + } + } + + public VersionSet getVersionSet() + { + return versionSet; + } + + private TableCache getTableCache() + { + return versionSet.getTableCache(); + } + + public final InternalKeyComparator getInternalKeyComparator() + { + return versionSet.getInternalKeyComparator(); + } + + public int getCompactionLevel() + { + return compactionLevel; + } + + public void setCompactionLevel(int compactionLevel) + { + this.compactionLevel = compactionLevel; + } + + public double getCompactionScore() + { + return compactionScore; + } + + public void setCompactionScore(double compactionScore) + { + this.compactionScore = compactionScore; + } + + List getLevelIterators(ReadOptions options) throws IOException + { + try (SafeListBuilder builder = SafeListBuilder.builder()) { + for (Level level : levels) { + if (!level.getFiles().isEmpty()) { + builder.add(level.iterator(options)); + } + } + return builder.build(); + } + } + + public LookupResult get(ReadOptions options, LookupKey key, ReadStats readStats) + { + // We can search level-by-level since entries never hop across + // levels. Therefore we are guaranteed that if we find data + // in a smaller level, later levels are irrelevant. + LookupResult lookupResult = null; + ReadStats lastStats = new ReadStats(); + for (Level level : levels) { + lookupResult = level.get(options, key, readStats, lastStats); + if (lookupResult != null) { + break; + } + } + return lookupResult; + } + + int pickLevelForMemTableOutput(Slice smallestUserKey, Slice largestUserKey) + { + int level = 0; + if (!overlapInLevel(0, smallestUserKey, largestUserKey)) { + // Push to next level if there is no overlap in next level, + // and the #bytes overlapping in the level after that are limited. + InternalKey start = new InternalKey(smallestUserKey, MAX_SEQUENCE_NUMBER, ValueType.VALUE); + InternalKey limit = new InternalKey(largestUserKey, 0, ValueType.DELETION); + while (level < MAX_MEM_COMPACT_LEVEL) { + if (overlapInLevel(level + 1, smallestUserKey, largestUserKey)) { + break; + } + if (level + 2 < DbConstants.NUM_LEVELS) { + // Check that file does not overlap too many grandparent bytes. + long sum = Compaction.totalFileSize(versionSet.getOverlappingInputs(level + 2, start, limit)); + if (sum > versionSet.maxGrandParentOverlapBytes()) { + break; + } + } + level++; + } + } + return level; + } + + public boolean overlapInLevel(int level, Slice smallestUserKey, Slice largestUserKey) + { + checkPositionIndex(level, levels.size(), "Invalid level"); + return levels.get(level).someFileOverlapsRange(level > 0, smallestUserKey, largestUserKey); + } + + public int numberOfLevels() + { + return levels.size(); + } + + public int numberOfFilesInLevel(int level) + { + return getFiles(level).size(); + } + + public Multimap getFiles() + { + ImmutableMultimap.Builder builder = ImmutableMultimap.builder(); + builder = builder.orderKeysBy(natural()); + for (Level level : levels) { + builder.putAll(level.getLevelNumber(), level.getFiles()); + } + return builder.build(); + } + + public List getFiles(int level) + { + return levels.get(level).getFiles(); + } + + public void addFile(int level, FileMetaData fileMetaData) + { + levels.get(level).addFile(fileMetaData); + } + + public boolean updateStats(ReadStats readStats) + { + final int seekFileLevel = readStats.getSeekFileLevel(); + final FileMetaData seekFile = readStats.getSeekFile(); + + if (seekFile == null) { + return false; + } + + seekFile.decrementAllowedSeeks(); + if (seekFile.getAllowedSeeks() <= 0 && fileToCompact == null) { + fileToCompact = seekFile; + fileToCompactLevel = seekFileLevel; + return true; + } + return false; + } + + public FileMetaData getFileToCompact() + { + return fileToCompact; + } + + public int getFileToCompactLevel() + { + return fileToCompactLevel; + } + + public long getApproximateOffsetOf(InternalKey key) + { + long result = 0; + for (int level = 0; level < NUM_LEVELS; level++) { + for (FileMetaData fileMetaData : getFiles(level)) { + if (getInternalKeyComparator().compare(fileMetaData.getLargest(), key) <= 0) { + // Entire file is before "ikey", so just add the file size + result += fileMetaData.getFileSize(); + } + else if (getInternalKeyComparator().compare(fileMetaData.getSmallest(), key) > 0) { + // Entire file is after "ikey", so ignore + if (level > 0) { + // Files other than level 0 are sorted by meta.smallest, so + // no further files in this level will contain data for + // "ikey". + break; + } + } + else { + // "ikey" falls in the range for this table. Add the + // approximate offset of "ikey" within the table. + result += getTableCache().getApproximateOffsetOf(fileMetaData, key.encode()); + } + } + } + return result; + } + + public void retain() + { + int was = retained.getAndIncrement(); + assert was > 0 : "Version was retain after it was disposed."; + } + + public void release() + { + int now = retained.decrementAndGet(); + assert now >= 0 : "Version was released after it was disposed."; + if (now == 0) { + // The version is now disposed. + versionSet.removeVersion(this); + } + } + + public boolean recordReadSample(InternalKey internalKey) + { + // Holds first matching file + ReadStats readStats = null; + for (int level = 0; level < NUM_LEVELS; ++level) { + for (FileMetaData file : levels.get(level).getFilesForKey(internalKey.getUserKey(), internalKey)) { + if (readStats != null) { + // Must have at least two matches since we want to merge across + // files. But what if we have a single file that contains many + // overwrites and deletions? Should we have another mechanism for + // finding such files? + return updateStats(readStats); + } + else { + // Remember first match + readStats = new ReadStats(level, file); + } + } + } + return false; + } + + @Override + public String toString() + { + final StringBuilder r = new StringBuilder(); + for (Level level : levels) { + r.append("--- level "); + r.append(level); + r.append(" ---").append(System.lineSeparator()); + for (FileMetaData file : level.getFiles()) { + r.append(" ").append(file.getNumber()).append(";"); + r.append(file.getFileSize()).append("["); + r.append(file.getSmallest()); + r.append(" .. "); + r.append(file.getLargest()); + r.append("]").append(System.lineSeparator()); + } + } + return r.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEdit.java b/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEdit.java new file mode 100644 index 0000000..f7a6fae --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEdit.java @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.Multimap; +import org.iq80.leveldb.util.DynamicSliceOutput; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.VariableLengthQuantity; + +import java.util.Map; +import java.util.TreeMap; + +public class VersionEdit +{ + private String comparatorName; + private Long logNumber; + private Long nextFileNumber; + private Long previousLogNumber; + private Long lastSequenceNumber; + private final Map compactPointers = new TreeMap<>(); + private final Multimap newFiles = ArrayListMultimap.create(); + private final Multimap deletedFiles = ArrayListMultimap.create(); + + public VersionEdit() + { + } + + public VersionEdit(Slice slice) + { + SliceInput sliceInput = slice.input(); + while (sliceInput.isReadable()) { + int i = VariableLengthQuantity.readVariableLengthInt(sliceInput); + VersionEditTag tag = VersionEditTag.getValueTypeByPersistentId(i); + tag.readValue(sliceInput, this); + } + } + + public String getComparatorName() + { + return comparatorName; + } + + public void setComparatorName(String comparatorName) + { + this.comparatorName = comparatorName; + } + + public Long getLogNumber() + { + return logNumber; + } + + public void setLogNumber(long logNumber) + { + this.logNumber = logNumber; + } + + public Long getNextFileNumber() + { + return nextFileNumber; + } + + public void setNextFileNumber(long nextFileNumber) + { + this.nextFileNumber = nextFileNumber; + } + + public Long getPreviousLogNumber() + { + return previousLogNumber; + } + + public void setPreviousLogNumber(long previousLogNumber) + { + this.previousLogNumber = previousLogNumber; + } + + public Long getLastSequenceNumber() + { + return lastSequenceNumber; + } + + public void setLastSequenceNumber(long lastSequenceNumber) + { + this.lastSequenceNumber = lastSequenceNumber; + } + + public Map getCompactPointers() + { + return Map.copyOf(compactPointers); + } + + public void setCompactPointer(int level, InternalKey key) + { + compactPointers.put(level, key); + } + + public void setCompactPointers(Map compactPointers) + { + this.compactPointers.putAll(compactPointers); + } + + public Multimap getNewFiles() + { + return ImmutableMultimap.copyOf(newFiles); + } + + // Add the specified file at the specified level. + // REQUIRES: This version has not been saved (see VersionSet::SaveTo) + // REQUIRES: "smallest" and "largest" are smallest and largest keys in file + public void addFile(int level, long fileNumber, + long fileSize, + InternalKey smallest, + InternalKey largest) + { + FileMetaData fileMetaData = new FileMetaData(fileNumber, fileSize, smallest, largest); + addFile(level, fileMetaData); + } + + public void addFile(int level, FileMetaData fileMetaData) + { + newFiles.put(level, fileMetaData); + } + + public void addFiles(Multimap files) + { + newFiles.putAll(files); + } + + public Multimap getDeletedFiles() + { + return ImmutableMultimap.copyOf(deletedFiles); + } + + // Delete the specified "file" from the specified "level". + public void deleteFile(int level, long fileNumber) + { + deletedFiles.put(level, fileNumber); + } + + public Slice encode() + { + DynamicSliceOutput dynamicSliceOutput = new DynamicSliceOutput(4096); + for (VersionEditTag versionEditTag : VersionEditTag.values()) { + versionEditTag.writeValue(dynamicSliceOutput, this); + } + return dynamicSliceOutput.slice(); + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("VersionEdit"); + sb.append("{comparatorName='").append(comparatorName).append('\''); + sb.append(", logNumber=").append(logNumber); + sb.append(", previousLogNumber=").append(previousLogNumber); + sb.append(", lastSequenceNumber=").append(lastSequenceNumber); + sb.append(", compactPointers=").append(compactPointers); + sb.append(", newFiles=").append(newFiles); + sb.append(", deletedFiles=").append(deletedFiles); + sb.append('}'); + return sb.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEditTag.java b/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEditTag.java new file mode 100644 index 0000000..55984c7 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEditTag.java @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.VariableLengthQuantity; + +import java.util.Map.Entry; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.iq80.leveldb.util.Slices.readLengthPrefixedBytes; +import static org.iq80.leveldb.util.Slices.writeLengthPrefixedBytes; + +public enum VersionEditTag +{ + // 8 is no longer used. It was used for large value refs. + + COMPARATOR(1) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + byte[] bytes = new byte[VariableLengthQuantity.readVariableLengthInt(sliceInput)]; + sliceInput.readBytes(bytes); + versionEdit.setComparatorName(new String(bytes, UTF_8)); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + String comparatorName = versionEdit.getComparatorName(); + if (comparatorName != null) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + byte[] bytes = comparatorName.getBytes(UTF_8); + VariableLengthQuantity.writeVariableLengthInt(bytes.length, sliceOutput); + sliceOutput.writeBytes(bytes); + } + } + }, + LOG_NUMBER(2) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + versionEdit.setLogNumber(VariableLengthQuantity.readVariableLengthLong(sliceInput)); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + Long logNumber = versionEdit.getLogNumber(); + if (logNumber != null) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + VariableLengthQuantity.writeVariableLengthLong(logNumber, sliceOutput); + } + } + }, + + PREVIOUS_LOG_NUMBER(9) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + long previousLogNumber = VariableLengthQuantity.readVariableLengthLong(sliceInput); + versionEdit.setPreviousLogNumber(previousLogNumber); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + Long previousLogNumber = versionEdit.getPreviousLogNumber(); + if (previousLogNumber != null) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + VariableLengthQuantity.writeVariableLengthLong(previousLogNumber, sliceOutput); + } + } + }, + + NEXT_FILE_NUMBER(3) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + versionEdit.setNextFileNumber(VariableLengthQuantity.readVariableLengthLong(sliceInput)); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + Long nextFileNumber = versionEdit.getNextFileNumber(); + if (nextFileNumber != null) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + VariableLengthQuantity.writeVariableLengthLong(nextFileNumber, sliceOutput); + } + } + }, + + LAST_SEQUENCE(4) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + versionEdit.setLastSequenceNumber(VariableLengthQuantity.readVariableLengthLong(sliceInput)); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + Long lastSequenceNumber = versionEdit.getLastSequenceNumber(); + if (lastSequenceNumber != null) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + VariableLengthQuantity.writeVariableLengthLong(lastSequenceNumber, sliceOutput); + } + } + }, + + COMPACT_POINTER(5) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + // level + int level = VariableLengthQuantity.readVariableLengthInt(sliceInput); + + // internal key + InternalKey internalKey = new InternalKey(readLengthPrefixedBytes(sliceInput)); + + versionEdit.setCompactPointer(level, internalKey); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + for (Entry entry : versionEdit.getCompactPointers().entrySet()) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + + // level + VariableLengthQuantity.writeVariableLengthInt(entry.getKey(), sliceOutput); + + // internal key + writeLengthPrefixedBytes(sliceOutput, entry.getValue().encode()); + } + } + }, + + DELETED_FILE(6) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + // level + int level = VariableLengthQuantity.readVariableLengthInt(sliceInput); + + // file number + long fileNumber = VariableLengthQuantity.readVariableLengthLong(sliceInput); + + versionEdit.deleteFile(level, fileNumber); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + for (Entry entry : versionEdit.getDeletedFiles().entries()) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + + // level + VariableLengthQuantity.writeVariableLengthInt(entry.getKey(), sliceOutput); + + // file number + VariableLengthQuantity.writeVariableLengthLong(entry.getValue(), sliceOutput); + } + } + }, + + NEW_FILE(7) + { + @Override + public void readValue(SliceInput sliceInput, VersionEdit versionEdit) + { + // level + int level = VariableLengthQuantity.readVariableLengthInt(sliceInput); + + // file number + long fileNumber = VariableLengthQuantity.readVariableLengthLong(sliceInput); + + // file size + long fileSize = VariableLengthQuantity.readVariableLengthLong(sliceInput); + + // smallest key + InternalKey smallestKey = new InternalKey(readLengthPrefixedBytes(sliceInput)); + + // largest key + InternalKey largestKey = new InternalKey(readLengthPrefixedBytes(sliceInput)); + + versionEdit.addFile(level, fileNumber, fileSize, smallestKey, largestKey); + } + + @Override + public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) + { + for (Entry entry : versionEdit.getNewFiles().entries()) { + VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); + + // level + VariableLengthQuantity.writeVariableLengthInt(entry.getKey(), sliceOutput); + + // file number + FileMetaData fileMetaData = entry.getValue(); + VariableLengthQuantity.writeVariableLengthLong(fileMetaData.getNumber(), sliceOutput); + + // file size + VariableLengthQuantity.writeVariableLengthLong(fileMetaData.getFileSize(), sliceOutput); + + // smallest key + writeLengthPrefixedBytes(sliceOutput, fileMetaData.getSmallest().encode()); + + // smallest key + writeLengthPrefixedBytes(sliceOutput, fileMetaData.getLargest().encode()); + } + } + }; + + public static VersionEditTag getValueTypeByPersistentId(int persistentId) + { + for (VersionEditTag compressionType : VersionEditTag.values()) { + if (compressionType.persistentId == persistentId) { + return compressionType; + } + } + throw new IllegalArgumentException(String.format("Unknown %s persistentId %d", VersionEditTag.class.getSimpleName(), persistentId)); + } + + private final int persistentId; + + VersionEditTag(int persistentId) + { + this.persistentId = persistentId; + } + + public int getPersistentId() + { + return persistentId; + } + + public abstract void readValue(SliceInput sliceInput, VersionEdit versionEdit); + + public abstract void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/VersionSet.java b/leveldb/src/main/java/org/iq80/leveldb/impl/VersionSet.java new file mode 100644 index 0000000..4a26c5e --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/VersionSet.java @@ -0,0 +1,1065 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ComparisonChain; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSortedSet; +import com.google.common.collect.MapMaker; +import com.google.common.collect.Maps; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.iterator.InternalIterator; +import org.iq80.leveldb.iterator.MergingIterator; +import org.iq80.leveldb.table.UserComparator; +import org.iq80.leveldb.util.SafeListBuilder; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; +import static org.iq80.leveldb.impl.LogMonitors.throwExceptionMonitor; + +public class VersionSet +{ + private static final int L0_COMPACTION_TRIGGER = 4; + + private final AtomicLong nextFileNumber = new AtomicLong(2); + private long manifestFileNumber = 1; + private Version current; + private long lastSequence; + private long logNumber; + private long prevLogNumber; + + private final Map activeVersions = new MapMaker().weakKeys().makeMap(); + private final Options options; + private final File databaseDir; + private final TableCache tableCache; + private final InternalKeyComparator internalKeyComparator; + private final Env env; + + private LogWriter descriptorLog; + private final Map compactPointers = new TreeMap<>(); + + public VersionSet(Options options, File databaseDir, TableCache tableCache, InternalKeyComparator internalKeyComparator, Env env) + throws IOException + { + this.options = options; + this.databaseDir = databaseDir; + this.tableCache = tableCache; + this.internalKeyComparator = internalKeyComparator; + this.env = env; + appendVersion(new Version(this)); + + initializeIfNeeded(); + } + + private void initializeIfNeeded() + throws IOException + { + File currentFile = databaseDir.child(Filename.currentFileName()); + + if (!currentFile.exists()) { + VersionEdit edit = new VersionEdit(); + edit.setComparatorName(internalKeyComparator.name()); + edit.setLogNumber(prevLogNumber); + edit.setNextFileNumber(nextFileNumber.get()); + edit.setLastSequenceNumber(lastSequence); + + LogWriter log = Logs.createLogWriter(databaseDir.child(Filename.descriptorFileName(manifestFileNumber)), manifestFileNumber, env); + try { + writeSnapshot(log); + log.addRecord(edit.encode(), false); + } + finally { + log.close(); + } + + Filename.setCurrentFile(databaseDir, log.getFileNumber(), env); + } + } + + public void release() + throws IOException + { + if (descriptorLog != null) { + descriptorLog.close(); + descriptorLog = null; + } + + Version t = current; + if (t != null) { + current = null; + t.release(); + } + + Set versions = activeVersions.keySet(); + if (versions.size() > 0) { + options.logger().log("DB closed with %s open snapshots. This could mean your application has a resource leak.", versions.size()); + } + } + + private void appendVersion(Version version) + { + requireNonNull(version, "version is null"); + checkArgument(version != current, "version is the current version"); + Version previous = current; + current = version; //version already retained, create with retained = 1 + activeVersions.put(version, new Object()); + if (previous != null) { + previous.release(); + } + } + + public void removeVersion(Version version) + { + requireNonNull(version, "version is null"); + checkArgument(version != current, "version is the current version"); + boolean removed = activeVersions.remove(version) != null; + assert removed : "Expected the version to still be in the active set"; + } + + public InternalKeyComparator getInternalKeyComparator() + { + return internalKeyComparator; + } + + public TableCache getTableCache() + { + return tableCache; + } + + public Version getCurrent() + { + return current; + } + + public long getManifestFileNumber() + { + return manifestFileNumber; + } + + public long getNextFileNumber() + { + return nextFileNumber.getAndIncrement(); + } + + public long getLogNumber() + { + return logNumber; + } + + public long getPrevLogNumber() + { + return prevLogNumber; + } + + public MergingIterator makeInputIterator(Compaction c) throws IOException + { + ReadOptions rOptions = new ReadOptions(); + rOptions.verifyChecksums(this.options.paranoidChecks()); + rOptions.fillCache(false); + + // Level-0 files have to be merged together. For other levels, + // we will make a concatenating iterator per level. + // TODO(opt): use concatenating iterator for level-0 if there is no overlap + try (SafeListBuilder list = SafeListBuilder.builder()) { + for (int which = 0; which < 2; which++) { + List files = c.input(which); + if (!files.isEmpty()) { + if (c.getLevel() + which == 0) { + try (SafeListBuilder builder = SafeListBuilder.builder()) { + for (FileMetaData file : files) { + builder.add(tableCache.newIterator(file, rOptions)); + } + list.add(new MergingIterator(builder.build(), internalKeyComparator)); + } + } + else { + // Create concatenating iterator for the files from this level + list.add(Level.createLevelConcatIterator(tableCache, files, internalKeyComparator, rOptions)); + } + } + } + return new MergingIterator(list.build(), internalKeyComparator); + } + } + + public boolean overlapInLevel(int level, Slice smallestUserKey, Slice largestUserKey) + { + return current.overlapInLevel(level, smallestUserKey, largestUserKey); + } + + public int numberOfFilesInLevel(int level) + { + return current.numberOfFilesInLevel(level); + } + + public long numberOfBytesInLevel(int level) + { + return current.numberOfFilesInLevel(level); + } + + public long getLastSequence() + { + return lastSequence; + } + + public void setLastSequence(long newLastSequence) + { + checkArgument(newLastSequence >= lastSequence, "Expected newLastSequence to be greater than or equal to current lastSequence"); + this.lastSequence = newLastSequence; + } + + public void logAndApply(VersionEdit edit, ReentrantLock mutex) + throws IOException + { + if (edit.getLogNumber() != null) { + checkArgument(edit.getLogNumber() >= logNumber); + checkArgument(edit.getLogNumber() < nextFileNumber.get()); + } + else { + edit.setLogNumber(logNumber); + } + + if (edit.getPreviousLogNumber() == null) { + edit.setPreviousLogNumber(prevLogNumber); + } + + edit.setNextFileNumber(nextFileNumber.get()); + edit.setLastSequenceNumber(lastSequence); + + Version version = new Version(this); + try (Builder builder = new Builder(this, current)) { + builder.apply(edit); + builder.saveTo(version); + } + + finalizeVersion(version); + + boolean createdNewManifest = false; + final long mFileNumber = manifestFileNumber; + try { + // Initialize new descriptor log file if necessary by creating + // a temporary file that contains a snapshot of the current version. + if (descriptorLog == null) { + // No reason to unlock mutex here since we only hit this path in the + // first call to logAndApply (when opening the database). + edit.setNextFileNumber(nextFileNumber.get()); + descriptorLog = Logs.createLogWriter(databaseDir.child(Filename.descriptorFileName(mFileNumber)), mFileNumber, env); + writeSnapshot(descriptorLog); + createdNewManifest = true; + } + // Unlock during expensive MANIFEST log write + mutex.unlock(); + try { + // Write new record to MANIFEST log + Slice record = edit.encode(); + descriptorLog.addRecord(record, true); + + // If we just created a new descriptor file, install it by writing a + // new CURRENT file that points to it. + if (createdNewManifest) { + Filename.setCurrentFile(databaseDir, mFileNumber, env); + } + } + finally { + mutex.lock(); + } + } + catch (IOException e) { + options.logger().log("MANIFEST write: %s", e); + // New manifest file was not installed, so clean up state and delete the file + if (createdNewManifest) { + descriptorLog.close(); + // todo add delete method to LogWriter + databaseDir.child(Filename.logFileName(mFileNumber)).delete(); + descriptorLog = null; + } + throw e; + } + + // Install the new version + appendVersion(version); + logNumber = edit.getLogNumber(); + prevLogNumber = edit.getPreviousLogNumber(); + } + + private void writeSnapshot(LogWriter log) + throws IOException + { + // Save metadata + VersionEdit edit = new VersionEdit(); + edit.setComparatorName(internalKeyComparator.name()); + + // Save compaction pointers + edit.setCompactPointers(compactPointers); + + // Save files + edit.addFiles(current.getFiles()); + + Slice record = edit.encode(); + log.addRecord(record, false); + } + + /** + * @return {@code true} if manifest should be saved, {@code false} otherwise + */ + public boolean recover() + throws IOException + { + // Read "CURRENT" file, which contains a pointer to the current manifest file + final String descriptorName = Filename.getCurrentFile(databaseDir, env); + + // open file channel + final File descriptorFile = databaseDir.child(descriptorName); + try (SequentialFile in = env.newSequentialFile(descriptorFile)) { + // read log edit log + Long nextFileNumber = null; + Long lastSequence = null; + Long logNumber = null; + Long prevLogNumber = null; + Builder builder = new Builder(this, current); + + LogReader reader = new LogReader(in, throwExceptionMonitor(), true, 0); + for (Slice record = reader.readRecord(); record != null; record = reader.readRecord()) { + // read version edit + VersionEdit edit = new VersionEdit(record); + + // verify comparator + String editComparator = edit.getComparatorName(); + String userComparator = internalKeyComparator.name(); + checkArgument(editComparator == null || editComparator.equals(userComparator), + "Expected user comparator %s to match existing database comparator ", userComparator, editComparator); + + // apply edit + builder.apply(edit); + + // save edit values for verification below + logNumber = coalesce(edit.getLogNumber(), logNumber); + prevLogNumber = coalesce(edit.getPreviousLogNumber(), prevLogNumber); + nextFileNumber = coalesce(edit.getNextFileNumber(), nextFileNumber); + lastSequence = coalesce(edit.getLastSequenceNumber(), lastSequence); + } + + List problems = new ArrayList<>(); + if (nextFileNumber == null) { + problems.add("Descriptor does not contain a meta-nextfile entry"); + } + if (logNumber == null) { + problems.add("Descriptor does not contain a meta-lognumber entry"); + } + if (lastSequence == null) { + problems.add("Descriptor does not contain a last-sequence-number entry"); + } + if (!problems.isEmpty()) { + throw new DBException("Corruption: \n\t" + Joiner.on("\n\t").join(problems)); + } + + if (prevLogNumber == null) { + prevLogNumber = 0L; + } + markFileNumberUsed(prevLogNumber); + markFileNumberUsed(logNumber); + + Version newVersion = new Version(this); + builder.saveTo(newVersion); + builder.close(); + + // Install recovered version + finalizeVersion(newVersion); + + appendVersion(newVersion); + manifestFileNumber = nextFileNumber; + this.nextFileNumber.set(nextFileNumber + 1); + this.lastSequence = lastSequence; + this.logNumber = logNumber; + this.prevLogNumber = prevLogNumber; + if (reuseManifest(descriptorFile)) { + // No need to save manifest + return false; + } + else { + return true; + } + } + } + + void markFileNumberUsed(long number) + { + long current; + while ((current = nextFileNumber.get()) <= number) { + if (nextFileNumber.compareAndSet(current, number + 1)) { + break; + } + } + } + + private boolean reuseManifest(File currentFile) + { + if (!options.reuseLogs()) { + return false; + } + Filename.FileInfo fileInfo = Filename.parseFileName(currentFile); + if (fileInfo == null || + fileInfo.getFileType() != Filename.FileType.DESCRIPTOR || + // Make new compacted MANIFEST if old one is too big + currentFile.length() >= targetFileSize()) { + return false; + } + Preconditions.checkState(descriptorLog == null, "descriptor log should be null"); + try { + descriptorLog = LogWriter.createWriter(fileInfo.getFileNumber(), env.newAppendableFile(currentFile)); + } + catch (Exception e) { + assert descriptorLog == null; + options.logger().log("Reuse MANIFEST: %s", e); + return false; + } + + options.logger().log("Reusing MANIFEST %s", currentFile); + this.manifestFileNumber = fileInfo.getFileNumber(); + return true; + } + + private void finalizeVersion(Version version) + { + // Precomputed best level for next compaction + int bestLevel = -1; + double bestScore = -1; + + for (int level = 0; level < version.numberOfLevels() - 1; level++) { + double score; + if (level == 0) { + // We treat level-0 specially by bounding the number of files + // instead of number of bytes for two reasons: + // + // (1) With larger write-buffer sizes, it is nice not to do too + // many level-0 compactions. + // + // (2) The files in level-0 are merged on every read and + // therefore we wish to avoid too many files when the individual + // file size is small (perhaps because of a small write-buffer + // setting, or very high compression ratios, or lots of + // overwrites/deletions). + score = 1.0 * version.numberOfFilesInLevel(level) / L0_COMPACTION_TRIGGER; + } + else { + // Compute the ratio of current size to size limit. + long levelBytes = 0; + for (FileMetaData fileMetaData : version.getFiles(level)) { + levelBytes += fileMetaData.getFileSize(); + } + score = 1.0 * levelBytes / maxBytesForLevel(level); + } + + if (score > bestScore) { + bestLevel = level; + bestScore = score; + } + } + + version.setCompactionLevel(bestLevel); + version.setCompactionScore(bestScore); + } + + private static V coalesce(V... values) + { + for (V value : values) { + if (value != null) { + return value; + } + } + return null; + } + + public List getLiveFiles() + { + ImmutableList.Builder builder = ImmutableList.builder(); + for (Version activeVersion : activeVersions.keySet()) { + builder.addAll(activeVersion.getFiles().values()); + } + return builder.build(); + } + + public long targetFileSize() + { + return options.maxFileSize(); + } + + /** + * Maximum bytes of overlaps in grandparent (i.e., level+2) before we + * stop building a single file in a level->level+1 compaction. + */ + public long maxGrandParentOverlapBytes() + { + return 10L * targetFileSize(); + } + + /** + * Maximum number of bytes in all compacted files. We avoid expanding + * the lower level file set of a compaction if it would make the + * total compaction cover more than this many bytes. + */ + public long expandedCompactionByteSizeLimit() + { + return 25L * targetFileSize(); + } + + private double maxBytesForLevel(int level) + { + // Note: the result for level zero is not really used since we set + // the level-0 compaction threshold based on number of files. + + // Result for both level-0 and level-1 + double result = 10 * 1048576.0; + while (level > 1) { + result *= 10; + level--; + } + return result; + } + + public long maxFileSizeForLevel() + { + return targetFileSize(); + } + + public long totalFileSize(List files) + { + long sum = 0; + for (FileMetaData file : files) { + sum += file.getFileSize(); + } + return sum; + } + + public boolean needsCompaction() + { + return current.getCompactionScore() >= 1 || current.getFileToCompact() != null; + } + + public Compaction compactRange(int level, InternalKey begin, InternalKey end) + { + List levelInputs = getOverlappingInputs(level, begin, end); + if (levelInputs.isEmpty()) { + return null; + } + + return setupOtherInputs(level, levelInputs); + } + + public Compaction pickCompaction() + { + // We prefer compactions triggered by too much data in a level over + // the compactions triggered by seeks. + boolean sizeCompaction = (current.getCompactionScore() >= 1); + boolean seekCompaction = (current.getFileToCompact() != null); + + int level; + List levelInputs; + if (sizeCompaction) { + level = current.getCompactionLevel(); + checkState(level >= 0); + checkState(level + 1 < NUM_LEVELS); + + // Pick the first file that comes after compact_pointer_[level] + levelInputs = new ArrayList<>(); + for (FileMetaData fileMetaData : current.getFiles(level)) { + if (!compactPointers.containsKey(level) || + internalKeyComparator.compare(fileMetaData.getLargest(), compactPointers.get(level)) > 0) { + levelInputs.add(fileMetaData); + break; + } + } + if (levelInputs.isEmpty()) { + // Wrap-around to the beginning of the key space + levelInputs.add(current.getFiles(level).get(0)); + } + } + else if (seekCompaction) { + level = current.getFileToCompactLevel(); + levelInputs = List.of(current.getFileToCompact()); + } + else { + return null; + } + + // Files in level 0 may overlap each other, so pick up all overlapping ones + if (level == 0) { + Entry range = getRange(levelInputs); + // Note that the next call will discard the file we placed in + // c->inputs_[0] earlier and replace it with an overlapping set + // which will include the picked file. + levelInputs = getOverlappingInputs(0, range.getKey(), range.getValue()); + + checkState(!levelInputs.isEmpty()); + } + + return setupOtherInputs(level, levelInputs); + } + + /** + * find the largest key in a vector of files. + * + * @return {@link InternalKey} if {@code files} is no empty, {@code null} otherwise + */ + private static InternalKey findLargestKey(InternalKeyComparator internalKeyComparator, List files) + { + if (files.isEmpty()) { + return null; + } + InternalKey largestKey = files.get(0).getLargest(); + for (FileMetaData file : files) { + if (internalKeyComparator.compare(file.getLargest(), largestKey) > 0) { + largestKey = file.getLargest(); + } + } + return largestKey; + } + + /** + * find minimum file b2=(l2, u2) in level file for which l2 > u1 and userKey(l2) = userKey(u1) + */ + private static FileMetaData findSmallestBoundaryFile(InternalKeyComparator internalKeyComparator, List levelFiles, + InternalKey largestKey) + { + UserComparator userComparator = internalKeyComparator.getUserComparator(); + FileMetaData smallestBoundaryFile = null; + for (FileMetaData f : levelFiles) { + if (internalKeyComparator.compare(f.getSmallest(), largestKey) > 0 && + userComparator.compare(f.getSmallest().getUserKey(), largestKey.getUserKey()) == 0) { + if (smallestBoundaryFile == null || + internalKeyComparator.compare(f.getSmallest(), smallestBoundaryFile.getSmallest()) < 0) { + smallestBoundaryFile = f; + } + } + } + return smallestBoundaryFile; + } + + /** + * Extracts the largest file b1 from {@code compactionFiles} and then searches for a + * b2 in {@code levelFiles} for which userKey(u1) = userKey(l2). If it finds such a + * file b2 (known as a boundary file), adds it to {@code compactionFiles} and then + * searches again using this new upper bound. + *

+ * If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and + * userKey(u1) = userKey(l2), and if we compact b1 but not b2 then a + * subsequent get operation will yield an incorrect result because it will + * return the record from b2 in level i rather than from b1 because it searches + * level by level for records matching the supplied user key. + * + * @param internalKeyComparator internal key comparator + * @param levelFiles List of files to search for boundary files. + * @param compactionFiles in/out List of files to extend by adding boundary files. + */ + static void addBoundaryInputs(InternalKeyComparator internalKeyComparator, List levelFiles, + List compactionFiles) + { + InternalKey largestKey = findLargestKey(internalKeyComparator, compactionFiles); + if (largestKey == null) { + return; + } + + while (true) { + FileMetaData smallestBoundaryFile = + findSmallestBoundaryFile(internalKeyComparator, levelFiles, largestKey); + + // if a boundary file was found advance largestKey, otherwise we're done + if (smallestBoundaryFile != null) { + compactionFiles.add(smallestBoundaryFile); + largestKey = smallestBoundaryFile.getLargest(); + } + else { + break; + } + } + } + + private Compaction setupOtherInputs(int level, List levelInputs) + { + addBoundaryInputs(internalKeyComparator, current.getFiles(level), levelInputs); + Entry range = getRange(levelInputs); + InternalKey smallest = range.getKey(); + InternalKey largest = range.getValue(); + + List levelUpInputs = getOverlappingInputs(level + 1, smallest, largest); + addBoundaryInputs(internalKeyComparator, current.getFiles(level + 1), levelUpInputs); + + // Get entire range covered by compaction + range = getRange(levelInputs, levelUpInputs); + InternalKey allStart = range.getKey(); + InternalKey allLimit = range.getValue(); + + // See if we can grow the number of inputs in "level" without + // changing the number of "level+1" files we pick up. + if (!levelUpInputs.isEmpty()) { + List expanded0 = getOverlappingInputs(level, allStart, allLimit); + addBoundaryInputs(internalKeyComparator, current.getFiles(level), expanded0); + long levelInputSize = totalFileSize(levelInputs); + long levelUpInputSize = totalFileSize(levelUpInputs); + long expanded0Size = totalFileSize(expanded0); + + if (expanded0.size() > levelInputs.size() + && levelUpInputSize + expanded0Size < expandedCompactionByteSizeLimit()) { + range = getRange(expanded0); + InternalKey newStart = range.getKey(); + InternalKey newLimit = range.getValue(); + + List expanded1 = getOverlappingInputs(level + 1, newStart, newLimit); + addBoundaryInputs(internalKeyComparator, current.getFiles(level + 1), expanded1); + if (expanded1.size() == levelUpInputs.size()) { + options.logger().log( + "Expanding@%s %s+%s (%s+%s bytes) to %s+%s (%s+%s bytes)", + level, + levelInputs.size(), + levelUpInputs.size(), + levelInputSize, levelUpInputSize, + expanded0.size(), + expanded1.size(), + expanded0Size, levelUpInputSize); + largest = newLimit; + levelInputs = expanded0; + levelUpInputs = expanded1; + + range = getRange(levelInputs, levelUpInputs); + allStart = range.getKey(); + allLimit = range.getValue(); + } + } + } + + // Compute the set of grandparent files that overlap this compaction + // (parent == level+1; grandparent == level+2) + final List grandparents; + if (level + 2 < NUM_LEVELS) { + grandparents = getOverlappingInputs(level + 2, allStart, allLimit); + } + else { + grandparents = Collections.emptyList(); + } + + Compaction compaction = new Compaction(current, level, maxFileSizeForLevel(), levelInputs, levelUpInputs, grandparents); + + // Update the place where we will do the next compaction for this level. + // We update this immediately instead of waiting for the VersionEdit + // to be applied so that if the compaction fails, we will try a different + // key range next time. + compactPointers.put(level, largest); + compaction.getEdit().setCompactPointer(level, largest); + + return compaction; + } + + List getOverlappingInputs(int level, InternalKey begin, InternalKey end) + { + Preconditions.checkArgument(level >= 0 && level <= DbConstants.NUM_LEVELS, "Invalid level value %s", level); + List inputs = new ArrayList<>(); + Slice userBegin = begin == null ? null : begin.getUserKey(); + Slice userEnd = end == null ? null : end.getUserKey(); + UserComparator userComparator = internalKeyComparator.getUserComparator(); + List filesInLevel = current.getFiles(level); + for (int i = 0; i < filesInLevel.size(); i++) { + FileMetaData fileMetaData = filesInLevel.get(i); + Slice fileStart = fileMetaData.getSmallest().getUserKey(); + Slice fileLimit = fileMetaData.getLargest().getUserKey(); + if (begin != null && userComparator.compare(fileLimit, userBegin) < 0) { + // "files1" is completely before specified range; skip it + } + else if (end != null && userComparator.compare(fileStart, userEnd) > 0) { + // "files1" is completely after specified range; skip it + } + else { + inputs.add(fileMetaData); + if (level == 0) { + // Level-0 files may overlap each other. So check if the newly + // added file has expanded the range. If so, restart search. + if (begin != null && userComparator.compare(fileStart, userBegin) < 0) { + userBegin = fileStart; + inputs.clear(); + i = -1; + } + else if (end != null && userComparator.compare(fileLimit, userEnd) > 0) { + userEnd = fileLimit; + inputs.clear(); + i = -1; + } + } + } + } + return inputs; + } + + private Entry getRange(List... inputLists) + { + InternalKey smallest = null; + InternalKey largest = null; + for (List inputList : inputLists) { + for (FileMetaData fileMetaData : inputList) { + if (smallest == null) { + smallest = fileMetaData.getSmallest(); + largest = fileMetaData.getLargest(); + } + else { + if (internalKeyComparator.compare(fileMetaData.getSmallest(), smallest) < 0) { + smallest = fileMetaData.getSmallest(); + } + if (internalKeyComparator.compare(fileMetaData.getLargest(), largest) > 0) { + largest = fileMetaData.getLargest(); + } + } + } + } + return Maps.immutableEntry(smallest, largest); + } + + public long getMaxNextLevelOverlappingBytes() + { + long result = 0; + for (int level = 1; level < NUM_LEVELS - 1; level++) { + for (FileMetaData fileMetaData : current.getFiles(level)) { + List overlaps = getOverlappingInputs(level + 1, fileMetaData.getSmallest(), fileMetaData.getLargest()); + long totalSize = 0; + for (FileMetaData overlap : overlaps) { + totalSize += overlap.getFileSize(); + } + result = Math.max(result, totalSize); + } + } + return result; + } + + public CharSequence levelSummary() + { + StringBuilder sb = new StringBuilder(); + sb.append("files[ "); + for (int level = 0; level < NUM_LEVELS; level++) { + sb.append(" "); + sb.append(current.getFiles(level).size()); + } + sb.append(" ]"); + return sb; + } + + /** + * A helper class so we can efficiently apply a whole sequence + * of edits to a particular state without creating intermediate + * Versions that contain full copies of the intermediate state. + */ + private static class Builder + implements AutoCloseable + { + private final VersionSet versionSet; + private final Version baseVersion; + private final List levels; + + private Builder(VersionSet versionSet, Version baseVersion) + { + this.versionSet = versionSet; + this.baseVersion = baseVersion; + baseVersion.retain(); + + levels = new ArrayList<>(baseVersion.numberOfLevels()); + for (int i = 0; i < baseVersion.numberOfLevels(); i++) { + levels.add(new LevelState(versionSet.internalKeyComparator)); + } + } + + /** + * Apply the specified edit to the current state. + */ + public void apply(VersionEdit edit) + { + // Update compaction pointers + for (Entry entry : edit.getCompactPointers().entrySet()) { + Integer level = entry.getKey(); + InternalKey internalKey = entry.getValue(); + versionSet.compactPointers.put(level, internalKey); + } + + // Delete files + for (Entry entry : edit.getDeletedFiles().entries()) { + Integer level = entry.getKey(); + Long fileNumber = entry.getValue(); + levels.get(level).deletedFiles.add(fileNumber); + } + + // Add new files + for (Entry entry : edit.getNewFiles().entries()) { + Integer level = entry.getKey(); + FileMetaData fileMetaData = entry.getValue(); + + // We arrange to automatically compact this file after + // a certain number of seeks. Let's assume: + // (1) One seek costs 10ms + // (2) Writing or reading 1MB costs 10ms (100MB/s) + // (3) A compaction of 1MB does 25MB of IO: + // 1MB read from this level + // 10-12MB read from next level (boundaries may be misaligned) + // 10-12MB written to next level + // This implies that 25 seeks cost the same as the compaction + // of 1MB of data. I.e., one seek costs approximately the + // same as the compaction of 40KB of data. We are a little + // conservative and allow approximately one seek for every 16KB + // of data before triggering a compaction. + int allowedSeeks = (int) (fileMetaData.getFileSize() / 16384); + if (allowedSeeks < 100) { + allowedSeeks = 100; + } + fileMetaData.setAllowedSeeks(allowedSeeks); + + levels.get(level).deletedFiles.remove(fileMetaData.getNumber()); + levels.get(level).addedFiles.add(fileMetaData); + } + } + + /** + * Saves the current state in specified version. + */ + public void saveTo(Version version) + throws IOException + { + FileMetaDataBySmallestKey cmp = new FileMetaDataBySmallestKey(versionSet.internalKeyComparator); + for (int level = 0; level < baseVersion.numberOfLevels(); level++) { + // Merge the set of added files with the set of pre-existing files. + // Drop any deleted files. Store the result in *v. + + Collection baseFiles = baseVersion.getFiles(level); + if (baseFiles == null) { + baseFiles = List.of(); + } + SortedSet addedFiles = levels.get(level).addedFiles; + if (addedFiles == null) { + addedFiles = ImmutableSortedSet.of(); + } + + // files must be added in sorted order so assertion check in maybeAddFile works + ArrayList sortedFiles = new ArrayList<>(baseFiles.size() + addedFiles.size()); + sortedFiles.addAll(baseFiles); + sortedFiles.addAll(addedFiles); + Collections.sort(sortedFiles, cmp); + + for (FileMetaData fileMetaData : sortedFiles) { + maybeAddFile(version, level, fileMetaData); + } + + //#ifndef NDEBUG todo + // Make sure there is no overlap in levels > 0 + version.assertNoOverlappingFiles(level); + //#endif + } + } + + private void maybeAddFile(Version version, int level, FileMetaData fileMetaData) + throws IOException + { + if (levels.get(level).deletedFiles.contains(fileMetaData.getNumber())) { + // File is deleted: do nothing + } + else { + List files = version.getFiles(level); + if (level > 0 && !files.isEmpty()) { + // Must not overlap + boolean filesOverlap = versionSet.internalKeyComparator.compare(files.get(files.size() - 1).getLargest(), fileMetaData.getSmallest()) >= 0; + if (filesOverlap) { + // A memory compaction, while this compaction was running, resulted in a a database state that is + // incompatible with the compaction. This is rare and expensive to detect while the compaction is + // running, so we catch here simply discard the work. + throw new IOException(String.format("Compaction is obsolete: Overlapping files %s and %s in level %s", + files.get(files.size() - 1).getNumber(), + fileMetaData.getNumber(), level)); + } + } + version.addFile(level, fileMetaData); + } + } + + @Override + public void close() + { + baseVersion.release(); + } + + private static class FileMetaDataBySmallestKey + implements Comparator + { + private final InternalKeyComparator internalKeyComparator; + + private FileMetaDataBySmallestKey(InternalKeyComparator internalKeyComparator) + { + this.internalKeyComparator = internalKeyComparator; + } + + @Override + public int compare(FileMetaData f1, FileMetaData f2) + { + return ComparisonChain + .start() + .compare(f1.getSmallest(), f2.getSmallest(), internalKeyComparator) + .compare(f1.getNumber(), f2.getNumber()) + .result(); + } + } + + private static class LevelState + { + private final SortedSet addedFiles; + private final Set deletedFiles = new HashSet<>(); + + public LevelState(InternalKeyComparator internalKeyComparator) + { + addedFiles = new TreeSet<>(new FileMetaDataBySmallestKey(internalKeyComparator)); + } + + @Override + public String toString() + { + final StringBuilder sb = new StringBuilder(); + sb.append("LevelState"); + sb.append("{addedFiles=").append(addedFiles); + sb.append(", deletedFiles=").append(deletedFiles); + sb.append('}'); + return sb.toString(); + } + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/WriteBatchImpl.java b/leveldb/src/main/java/org/iq80/leveldb/impl/WriteBatchImpl.java new file mode 100644 index 0000000..b0849df --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/impl/WriteBatchImpl.java @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.collect.Maps; +import org.iq80.leveldb.WriteBatch; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map.Entry; + +import static java.util.Objects.requireNonNull; + +public class WriteBatchImpl + implements WriteBatch +{ + // WriteBatch header has an 8-byte sequence number followed by a 4-byte count. + private static final int HEADER_SIZE = 12; + + private final List> batch = new ArrayList<>(); + private int approximateSize; + + @Override + public int getApproximateSize() + { + return approximateSize; + } + + @Override + public int size() + { + return batch.size(); + } + + @Override + public WriteBatchImpl put(byte[] key, byte[] value) + { + requireNonNull(key, "key is null"); + requireNonNull(value, "value is null"); + batch.add(Maps.immutableEntry(Slices.wrappedBuffer(key), Slices.wrappedBuffer(value))); + approximateSize += HEADER_SIZE + key.length + value.length; + return this; + } + + public WriteBatchImpl put(Slice key, Slice value) + { + requireNonNull(key, "key is null"); + requireNonNull(value, "value is null"); + batch.add(Maps.immutableEntry(key, value)); + approximateSize += HEADER_SIZE + key.length() + value.length(); + return this; + } + + @Override + public WriteBatchImpl delete(byte[] key) + { + requireNonNull(key, "key is null"); + batch.add(Maps.immutableEntry(Slices.wrappedBuffer(key), (Slice) null)); + approximateSize += 6 + key.length; + return this; + } + + public WriteBatchImpl delete(Slice key) + { + requireNonNull(key, "key is null"); + batch.add(Maps.immutableEntry(key, (Slice) null)); + approximateSize += 6 + key.length(); + return this; + } + + @Override + public void close() + { + } + + public void forEach(Handler handler) + { + for (Entry entry : batch) { + Slice key = entry.getKey(); + Slice value = entry.getValue(); + if (value != null) { + handler.put(key, value); + } + else { + handler.delete(key); + } + } + } + + public void append(WriteBatchImpl batch) + { + this.batch.addAll(batch.batch); + this.approximateSize += batch.approximateSize; + } + + public void clear() + { + approximateSize = 0; + batch.clear(); + } + + public interface Handler + { + void put(Slice key, Slice value); + + void delete(Slice key); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/ASeekingIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/ASeekingIterator.java new file mode 100644 index 0000000..c4b20b4 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/ASeekingIterator.java @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.DBException; + +import java.io.IOException; +import java.util.NoSuchElementException; + +/** + * Seeking Iterator base implementation that ensure proper state validation before + * each call and implement shared direction management between iterator implementations. + * + * @param type of the key + * @param type of the value + */ +public abstract class ASeekingIterator implements SeekingIterator +{ + private static final String RELEASED_EXCEPTION = "Illegal use of iterator after release"; + private Direction direction = Direction.START_OF_ITERATOR; + + @Override + public final boolean valid() + { + return direction.isValid(); + } + + @Override + public final boolean seekToFirst() + { + if (direction == Direction.RELEASED) { + throw new DBException(RELEASED_EXCEPTION); + } + if (internalSeekToFirst()) { + direction = Direction.FORWARD; + return true; + } + this.direction = Direction.END_OF_ITERATOR; + return false; + } + + @Override + public final boolean seekToLast() + { + if (direction == Direction.RELEASED) { + throw new DBException(RELEASED_EXCEPTION); + } + if (internalSeekToLast()) { + direction = Direction.REVERSE; + return true; + } + this.direction = Direction.START_OF_ITERATOR; + return false; + } + + @Override + public final boolean seek(K key) + { + if (direction == Direction.RELEASED) { + throw new DBException(RELEASED_EXCEPTION); + } + if (internalSeek(key)) { + direction = Direction.FORWARD; + return true; + } + direction = Direction.END_OF_ITERATOR; + return false; + } + + @Override + public final boolean next() + { + switch (direction) { + case START_OF_ITERATOR: + return seekToFirst(); + case RELEASED: + throw new DBException(RELEASED_EXCEPTION); + case END_OF_ITERATOR: + return false; + } + boolean switchDirection = direction == Direction.REVERSE; + if (internalNext(switchDirection)) { + if (switchDirection) { + direction = Direction.FORWARD; + } + return true; + } + direction = Direction.END_OF_ITERATOR; + return false; + } + + @Override + public final boolean prev() + { + switch (direction) { + case RELEASED: + throw new DBException(RELEASED_EXCEPTION); + case START_OF_ITERATOR: + return false; + case END_OF_ITERATOR: + return seekToLast(); + } + boolean switchDirection = direction == Direction.FORWARD; + if (internalPrev(switchDirection)) { + if (switchDirection) { + direction = Direction.REVERSE; + } + return true; + } + direction = Direction.START_OF_ITERATOR; + return false; + } + + @Override + public final K key() + { + if (!direction.isValid()) { + throw new NoSuchElementException(); + } + return internalKey(); + } + + @Override + public final V value() + { + if (!direction.isValid()) { + throw new NoSuchElementException(); + } + return internalValue(); + } + + @Override + public final void close() + { + if (direction != Direction.RELEASED) { + direction = Direction.RELEASED; + try { + internalClose(); + } + catch (IOException e) { + throw new DBException(e); + } + } + else { + throw new DBException("Releasing iterator more than once"); + } + } + + protected abstract void internalClose() throws IOException; + + protected abstract boolean internalSeek(K key); + + protected abstract boolean internalNext(boolean switchDirection); + + protected abstract boolean internalPrev(boolean switchDirection); + + protected abstract boolean internalSeekToFirst(); + + protected abstract boolean internalSeekToLast(); + + protected abstract V internalValue(); + + protected abstract K internalKey(); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/DBIteratorAdapter.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/DBIteratorAdapter.java new file mode 100644 index 0000000..1280ce9 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/DBIteratorAdapter.java @@ -0,0 +1,232 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.DBIterator; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; + +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.util.Objects.requireNonNull; + +public class DBIteratorAdapter + implements DBIterator +{ + private static final String ILLEGAL_STATE = "Illegal use of iterator after release"; + private final SnapshotSeekingIterator seekingIterator; + private final AtomicBoolean closed = new AtomicBoolean(false); + private Direction direction = Direction.FORWARD; + private DbEntry elem; + + public DBIteratorAdapter(SnapshotSeekingIterator seekingIterator) + { + this.seekingIterator = seekingIterator; + } + + @Override + public void seekToFirst() + { + if (direction == Direction.RELEASED) { + throw new DBException(ILLEGAL_STATE); + } + direction = Direction.FORWARD; + elem = seekingIterator.seekToFirst() ? new DbEntry(seekingIterator.key(), seekingIterator.value()) : null; + } + + @Override + public void seek(byte[] targetKey) + { + if (direction == Direction.RELEASED) { + throw new DBException(ILLEGAL_STATE); + } + direction = Direction.FORWARD; + elem = seekingIterator.seek(Slices.wrappedBuffer(targetKey)) ? new DbEntry(seekingIterator.key(), seekingIterator.value()) : null; + } + + @Override + public boolean hasNext() + { + if (direction == Direction.RELEASED) { + throw new DBException(ILLEGAL_STATE); + } + if (direction != Direction.FORWARD) { + elem = null; + direction = Direction.FORWARD; + } + if (elem == null) { + elem = seekingIterator.next() ? new DbEntry(seekingIterator.key(), seekingIterator.value()) : null; + } + return elem != null; + } + + @Override + public DbEntry next() + { + if (!hasNext()) { + throw new NoSuchElementException(); + } + DbEntry elem = this.elem; + this.elem = null; + return elem; + } + + @Override + public DbEntry peekNext() + { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return elem; + } + + @Override + public void close() + { + // This is an end user API.. he might screw up and close multiple times. + // but we don't want the close multiple times as reference counts go bad. + if (closed.compareAndSet(false, true)) { + direction = Direction.RELEASED; + seekingIterator.close(); + } + } + + @Override + public void remove() + { + throw new UnsupportedOperationException(); + } + + @Override + public void seekToLast() + { + if (direction == Direction.RELEASED) { + throw new DBException(ILLEGAL_STATE); + } + direction = Direction.REVERSE; + elem = seekingIterator.seekToLast() ? new DbEntry(seekingIterator.key(), seekingIterator.value()) : null; + } + + @Override + public boolean hasPrev() + { + if (direction == Direction.RELEASED) { + throw new DBException(ILLEGAL_STATE); + } + if (direction != Direction.REVERSE) { + elem = null; + direction = Direction.REVERSE; + } + if (elem == null) { + elem = seekingIterator.prev() ? new DbEntry(seekingIterator.key(), seekingIterator.value()) : null; + } + return elem != null; + } + + @Override + public DbEntry prev() + { + if (!hasPrev()) { + throw new NoSuchElementException(); + } + DbEntry elem = this.elem; + this.elem = null; + return elem; + } + + @Override + public DbEntry peekPrev() + { + if (!hasPrev()) { + throw new NoSuchElementException(); + } + return this.elem; + } + + public static class DbEntry + implements Entry + { + private final Slice key; + private final Slice value; + + public DbEntry(Slice key, Slice value) + { + requireNonNull(key, "key is null"); + requireNonNull(value, "value is null"); + this.key = key; + this.value = value; + } + + @Override + public byte[] getKey() + { + return key.getBytes(); + } + + public Slice getKeySlice() + { + return key; + } + + @Override + public byte[] getValue() + { + return value.getBytes(); + } + + public Slice getValueSlice() + { + return value; + } + + @Override + public byte[] setValue(byte[] value) + { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object object) + { + if (object instanceof Entry) { + Entry that = (Entry) object; + return key.equals(that.getKey()) && + value.equals(that.getValue()); + } + return false; + } + + @Override + public int hashCode() + { + return key.hashCode() ^ value.hashCode(); + } + + /** + * Returns a string representation of the form {key}={value}. + */ + @Override + public String toString() + { + return key + "=" + value; + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/DbIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/DbIterator.java new file mode 100644 index 0000000..582e971 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/DbIterator.java @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; + +public final class DbIterator implements InternalIterator +{ + /* + * NOTE: This code has been specifically tuned for performance of the DB + * iterator methods. Before committing changes to this code, make sure + * that the performance of the DB benchmark with the following parameters + * has not regressed: + * + * --num=10000000 --benchmarks=fillseq,readrandom,readseq,readseq,readseq + * + * The code in this class purposely does not use the SeekingIterator + * interface, but instead used the concrete implementations. This is + * because we want the hot spot compiler to inline the code from the + * concrete iterators, and this can not happen with truly polymorphic + * call-sites. If a future version of hot spot supports inlining of truly + * polymorphic call-sites, this code can be made much simpler. + */ + private final MergingIterator mergingIterator; + private final Runnable cleanup; + + public DbIterator(MergingIterator mergingIterator, Runnable cleanup) + { + this.mergingIterator = mergingIterator; + this.cleanup = cleanup; + } + + @Override + public void close() throws IOException + { + //end user api is protected against multiple close + try { + mergingIterator.close(); + } + finally { + cleanup.run(); + } + } + + @Override + public boolean valid() + { + return mergingIterator.valid(); + } + + @Override + public boolean seekToFirst() + { + return mergingIterator.seekToFirst(); + } + + @Override + public boolean seekToLast() + { + return mergingIterator.seekToLast(); + } + + @Override + public boolean seek(InternalKey targetKey) + { + return mergingIterator.seek(targetKey); + } + + @Override + public boolean next() + { + return mergingIterator.next(); + } + + @Override + public boolean prev() + { + return mergingIterator.prev(); + } + + @Override + public InternalKey key() + { + return mergingIterator.key(); + } + + @Override + public Slice value() + { + return mergingIterator.value(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/Direction.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/Direction.java new file mode 100644 index 0000000..ca2f0f7 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/Direction.java @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +enum Direction +{ + START_OF_ITERATOR(false), + RELEASED(false), + END_OF_ITERATOR(false), + REVERSE(true), + FORWARD(true); + + private boolean valid; + + Direction(boolean valid) + { + this.valid = valid; + } + + public boolean isValid() + { + return valid; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/InternalIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/InternalIterator.java new file mode 100644 index 0000000..57bd628 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/InternalIterator.java @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.util.Slice; + +/** + *

A common interface for internal iterators.

+ * + * @author Hiram Chirino + */ +public interface InternalIterator + extends SeekingIterator +{ +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/InternalTableIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/InternalTableIterator.java new file mode 100644 index 0000000..ca4bfee --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/InternalTableIterator.java @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; + +public class InternalTableIterator + implements InternalIterator +{ + private final SliceIterator tableIterator; + private InternalKey key; + + public InternalTableIterator(SliceIterator tableIterator) + { + this.tableIterator = tableIterator; + } + + @Override + public boolean valid() + { + return tableIterator.valid(); + } + + @Override + public boolean seekToFirst() + { + this.key = null; + return tableIterator.seekToFirst(); + } + + public boolean seek(InternalKey targetKey) + { + this.key = null; + return tableIterator.seek(targetKey.encode()); + } + + @Override + public boolean seekToLast() + { + this.key = null; + return tableIterator.seekToLast(); + } + + @Override + public boolean next() + { + this.key = null; + return tableIterator.next(); + } + + @Override + public boolean prev() + { + this.key = null; + return tableIterator.prev(); + } + + @Override + public InternalKey key() + { + if (key == null) { + //cache key decomposition + this.key = new InternalKey(tableIterator.key()); + } + return this.key; + } + + @Override + public Slice value() + { + return tableIterator.value(); + } + + @Override + public String toString() + { + return "InternalTableIterator" + + "{fromIterator=" + tableIterator + + '}'; + } + + @Override + public void close() throws IOException + { + this.key = null; + tableIterator.close(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/MemTableIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/MemTableIterator.java new file mode 100644 index 0000000..fb91544 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/MemTableIterator.java @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import com.google.common.collect.Iterators; +import com.google.common.collect.PeekingIterator; +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.util.Slice; + +import java.util.Map; +import java.util.concurrent.ConcurrentNavigableMap; + +public final class MemTableIterator + extends ASeekingIterator implements InternalIterator +{ + private PeekingIterator> iterator; + private Map.Entry entry; + private final ConcurrentNavigableMap table; + + public MemTableIterator(ConcurrentNavigableMap table) + { + this.table = table; + } + + @Override + protected boolean internalSeekToFirst() + { + iterator = Iterators.peekingIterator(table.entrySet().iterator()); + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected boolean internalSeekToLast() + { + iterator = Iterators.peekingIterator(table.descendingMap().entrySet().iterator()); + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected boolean internalSeek(InternalKey targetKey) + { + iterator = Iterators.peekingIterator(table.tailMap(targetKey).entrySet().iterator()); + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected boolean internalNext(boolean switchDirection) + { + if (switchDirection) { + iterator = Iterators.peekingIterator(table.tailMap(entry.getKey()).entrySet().iterator()); + if (iterator.hasNext()) { + iterator.next(); //skip "entry" + } + } + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected boolean internalPrev(boolean switchDirection) + { + if (switchDirection) { + iterator = Iterators.peekingIterator(table.descendingMap().tailMap(entry.getKey()).entrySet().iterator()); + if (iterator.hasNext()) { + iterator.next(); //skip "entry" + } + } + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected InternalKey internalKey() + { + return entry.getKey(); + } + + @Override + protected Slice internalValue() + { + return entry.getValue(); + } + + @Override + public void internalClose() + { + iterator = null; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/MergingIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/MergingIterator.java new file mode 100644 index 0000000..438c4f1 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/MergingIterator.java @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.util.Closeables; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.PriorityQueue; +import java.util.function.Function; + +public final class MergingIterator extends ASeekingIterator + implements InternalIterator +{ + private final List iterators; + private final Comparator keyComparator; + private final Comparator iteratorComparator; + private PriorityQueue queue; + private InternalIterator current; + + public MergingIterator(List iterators, Comparator comparator) + { + this.keyComparator = comparator; + this.iteratorComparator = (o1, o2) -> keyComparator.compare(o1.key(), o2.key()); + this.iterators = iterators; + } + + private void rebuildQueue(boolean reverse, Function func) + { + this.queue = new PriorityQueue<>(iterators.size(), reverse ? iteratorComparator.reversed() : iteratorComparator); + for (InternalIterator iterator : iterators) { + if (func.apply(iterator)) { + queue.add(iterator); + } + } + } + + @Override + protected boolean internalSeekToFirst() + { + rebuildQueue(false, SeekingIterator::seekToFirst); + current = queue.poll(); + return current != null; + } + + @Override + protected boolean internalSeekToLast() + { + rebuildQueue(true, SeekingIterator::seekToLast); + current = queue.poll(); + return current != null; + } + + @Override + protected boolean internalSeek(InternalKey targetKey) + { + rebuildQueue(false, itr -> itr.seek(targetKey)); + current = queue.poll(); + return current != null; + } + + @Override + protected boolean internalNext(boolean switchDirection) + { + if (switchDirection) { + InternalKey key = key(); + rebuildQueue(false, iter -> iter != current && iter.seek(key) && (keyComparator.compare(key, iter.key()) != 0 || iter.next())); + } + if (current.next()) { + queue.add(current); + } + current = queue.poll(); + return current != null; + } + + @Override + protected boolean internalPrev(boolean switchDirection) + { + if (switchDirection) { + InternalKey key = key(); + rebuildQueue(true, iter -> { + if (iter.seek(key)) { + // Child is at first entry >= key(). Step back one to be < key() + return iter.prev(); + } + else { + // Child has no entries >= key(). Position at last entry. + return iter.seekToLast(); + } + }); + } + else { + if (current.prev()) { + queue.add(current); + } + } + current = queue.poll(); + return current != null; + } + + @Override + protected InternalKey internalKey() + { + return current.key(); + } + + @Override + protected Slice internalValue() + { + return current.value(); + } + + @Override + public void internalClose() throws IOException + { + Closeables.closeAll(iterators); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterator.java new file mode 100644 index 0000000..6b8bdff --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterator.java @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import java.io.Closeable; + +/** + * Seeking iterator that is very similar to original implementation, + * with the distinction that all methods return the state (valid/invalid) + * of the iterator. + * + * @param type of the key + * @param type of the value + */ +public interface SeekingIterator extends Closeable +{ + /** + * An iterator is either positioned at a key/value pair, or + * not valid. + * + * @return true if the iterator is valid. + */ + boolean valid(); + + /** + * Position at the first key in the source. The iterator is {@link #valid()} + * after this call if the source is not empty. + * + * @return {@code true} if iterator is valid, same value will be return by {@link #valid()} after this call + **/ + boolean seekToFirst(); + + /** + * Position at the last key in the source. The iterator is + * {@link #valid()} after this call if the source is not empty. + * + * @return {@code true} if iterator is valid, same value will be return by {@link #valid()} after this call + **/ + boolean seekToLast(); + + /** + * Position at the first key in the source that is at or past target. + * The iterator is {@link #valid()} after this call if the source contains + * an entry that comes at or past target. + * + * @return {@code true} if iterator is valid, same value will be return by {@link #valid()} after this call + **/ + boolean seek(K key); + + /** + * Moves to the next entry in the source. After this call, {@link #valid()} is + * true if the iterator was not positioned at the last entry in the source. + * In the case {@link #seek(Object)}, {@link #seekToLast()} or {@link #seekToLast()} where not called + * first call to this method should position iterator on the first entry. + * + * @return {@code true} if iterator is valid, same value will be return by {@link #valid()} after this call + **/ + boolean next(); + + /** + * Moves to the previous entry in the source. Return true if the iterator was + * not positioned at the first entry in source. + * + * @return {@code true} if iterator is valid, same value will be return by {@link #valid()} after this call + **/ + boolean prev(); + + /** + * Return the key for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator. + * + * @return current position key + * @throws java.util.NoSuchElementException if iterator is not in valid state + **/ + K key(); + + /** + * Return the value for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator. + * + * @return current position value + * @throws java.util.NoSuchElementException if iterator is not in valid state + **/ + V value(); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterators.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterators.java new file mode 100644 index 0000000..29f3be5 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/SeekingIterators.java @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.util.Slice; + +import java.io.Closeable; +import java.util.Comparator; +import java.util.List; +import java.util.function.Function; + +/** + * When ever possible a specific interface implementation is created for speed purpose. + * see {@link DbIterator} where the same approach is used. + */ +public final class SeekingIterators +{ + private SeekingIterators() + { + //utility + } + + /** + * Seeking iterator based on provided sorted list. Unpredictable behavior + * will happen if {@code list} is not sorted according to {@code comparator} + */ + public static SeekingIterator fromSortedList(List list, Function keyExtractor, Function valueExtractor, Comparator comparator) + { + return new SortedCollectionIterator<>(list, keyExtractor, valueExtractor, comparator); + } + + public static SliceIterator twoLevelSliceIterator(SliceIterator indexIterator, Function> blockFunction, Closeable closeableResources) + { + return new SliceTwoLevelIterator(indexIterator, blockFunction, closeableResources); + } + + public static InternalIterator twoLevelInternalIterator(SeekingIterator indexIterator, Function> blockFunction, Closeable closeableResources) + { + return new InternalTwoLevelIterator<>(indexIterator, blockFunction, closeableResources); + } + + private static class InternalTwoLevelIterator extends TwoLevelIterator implements InternalIterator + { + InternalTwoLevelIterator(SeekingIterator indexIterator, Function> blockFunction, Closeable closeableResources) + { + super(indexIterator, blockFunction, closeableResources); + } + } + + private static class SliceTwoLevelIterator extends TwoLevelIterator implements SliceIterator + { + SliceTwoLevelIterator(SliceIterator indexIterator, Function> blockFunction, Closeable closeableResources) + { + super(indexIterator, blockFunction, closeableResources); + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/SliceIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/SliceIterator.java new file mode 100644 index 0000000..a50f02b --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/SliceIterator.java @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.util.Slice; + +public interface SliceIterator extends SeekingIterator +{ +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/SnapshotSeekingIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/SnapshotSeekingIterator.java new file mode 100644 index 0000000..c6061d8 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/SnapshotSeekingIterator.java @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import com.google.common.base.Preconditions; +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.impl.ValueType; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; +import java.util.Comparator; + +//DbIter +public final class SnapshotSeekingIterator + extends ASeekingIterator +{ + private final InternalIterator iterator; + private final long sequence; + private final Comparator userComparator; + private final IRecordBytesListener listener; + private Slice key; + private Slice value; + + public SnapshotSeekingIterator(InternalIterator iterator, long sequence, Comparator userComparator, IRecordBytesListener listener) + { + this.iterator = iterator; + this.sequence = sequence; + this.userComparator = userComparator; + this.listener = listener; + } + + @Override + protected void internalClose() throws IOException + { + iterator.close(); + } + + @Override + protected boolean internalSeekToFirst() + { + return iterator.seekToFirst() && findNextUserEntry(false, null); + } + + @Override + protected boolean internalSeekToLast() + { + return iterator.seekToLast() && findPrevUserEntry(); + } + + @Override + protected boolean internalSeek(Slice targetKey) + { + return iterator.seek(new InternalKey(targetKey, sequence, ValueType.VALUE)) && findNextUserEntry(false, null); + } + + @Override + protected boolean internalNext(boolean switchDirection) + { + if (switchDirection) { + // iterator is pointing just before the entries for this.key(), + // so advance into the range of entries for this.key() and then + // use the normal skipping code below. + boolean itrValid = iterator.valid(); + if (!itrValid) { + itrValid = iterator.seekToFirst(); + } + else { + itrValid = iterator.next(); + } + boolean valid = itrValid ? iterator.next() : iterator.seekToFirst(); + if (!valid) { + this.key = null; + this.value = null; + return false; + } + } + // find the next user entry after the key we are about to return + return findNextUserEntry(true, this.key); + } + + @Override + protected boolean internalPrev(boolean switchDirection) + { + if (switchDirection) { + Preconditions.checkState(iterator.valid(), "Should be valid"); + do { + if (!iterator.prev()) { + return false; + } + } while (userComparator.compare(iterator.key().getUserKey(), this.key) >= 0); + } + return findPrevUserEntry(); + } + + @Override + protected Slice internalKey() + { + return key; + } + + private boolean findPrevUserEntry() + { + ValueType valueType = ValueType.DELETION; + if (!iterator.valid()) { + return false; + } + do { + InternalKey key = iterator.key(); + if (key.getSequenceNumber() <= sequence) { + if (valueType != ValueType.DELETION && userComparator.compare(key.getUserKey(), this.key) < 0) { + // We encountered a non-deleted value in entries for previous keys, + return true; + } + valueType = key.getValueType(); + if (valueType == ValueType.DELETION) { + this.key = null; + this.value = null; + } + else { + this.key = key.getUserKey(); + this.value = iterator.value(); + } + } + } while (iterator.prev()); + if (valueType == ValueType.DELETION) { + this.key = null; + this.value = null; + return false; + } + else { + return true; + } + } + + @Override + protected Slice internalValue() + { + return value; + } + + private boolean findNextUserEntry(boolean skipping, Slice savedKey) + { + // Loop until we hit an acceptable entry to yield + if (!iterator.valid()) { + return false; + } + do { + InternalKey ikey = iterator.key(); + Slice value = iterator.value(); + listener.record(ikey, ikey.size() + value.length()); + if (ikey.getSequenceNumber() <= sequence) { + switch (ikey.getValueType()) { + case DELETION: + // Arrange to skip all upcoming entries for this key since + // they are hidden by this deletion. + savedKey = ikey.getUserKey(); + skipping = true; + break; + case VALUE: + if (skipping && + userComparator.compare(ikey.getUserKey(), savedKey) <= 0) { + // Entry hidden + } + else { + this.key = ikey.getUserKey(); + this.value = value; + return true; + } + break; + } + } + } while (iterator.next()); + this.key = null; + this.value = null; + return false; + } + + @Override + public String toString() + { + return "SnapshotSeekingIterator" + + "{sequence=" + sequence + + ", iterator=" + iterator + + '}'; + } + + public interface IRecordBytesListener + { + void record(InternalKey internalKey, int bytes); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/SortedCollectionIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/SortedCollectionIterator.java new file mode 100644 index 0000000..d1942ef --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/SortedCollectionIterator.java @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import java.util.Comparator; +import java.util.List; +import java.util.function.Function; + +class SortedCollectionIterator extends ASeekingIterator +{ + private final List entries; + private Function keyExtractor; + private Function valueExtractor; + private Comparator comparator; + private int index; + + SortedCollectionIterator(List entries, Function keyExtractor, Function valueExtractor, Comparator comparator) + { + this.entries = entries; + this.keyExtractor = keyExtractor; + this.valueExtractor = valueExtractor; + this.comparator = comparator; + this.index = entries.size(); + } + + @Override + public void internalClose() + { + //na + } + + @Override + protected boolean internalSeekToFirst() + { + if (entries.isEmpty()) { + return false; + } + index = 0; + return true; + } + + @Override + protected boolean internalSeekToLast() + { + if (entries.isEmpty()) { + return false; + } + else { + index = entries.size() - 1; + return true; + } + } + + @Override + protected boolean internalSeek(K targetKey) + { + // seek the index to the block containing the + if (entries.isEmpty()) { + return false; + } + + // todo replace with Collections.binarySearch + //Collections.binarySearch(entries, comparator) + int left = 0; + int right = entries.size() - 1; + + // binary search restart positions to find the restart position immediately before the targetKey + while (left < right) { + int mid = (left + right) / 2; + + if (comparator.compare(keyExtractor.apply(entries.get(mid)), targetKey) < 0) { + // Key at "mid.largest" is < "target". Therefore all + // files at or before "mid" are uninteresting. + left = mid + 1; + } + else { + // Key at "mid.largest" is >= "target". Therefore all files + // after "mid" are uninteresting. + right = mid; + } + } + index = right; + + // if the index is now pointing to the last block in the file, check if the largest key + // in the block is than the the target key. If so, we need to seek beyond the end of this file + if (index == entries.size() - 1 && comparator.compare(keyExtractor.apply(entries.get(index)), targetKey) < 0) { + index++; + } + return index < entries.size(); + } + + @Override + protected boolean internalNext(boolean switchDirection) + { + index++; + return index < entries.size(); + } + + @Override + protected boolean internalPrev(boolean switchDirection) + { + if (index == 0) { + return false; + } + else { + index--; + return true; + } + } + + @Override + protected K internalKey() + { + return keyExtractor.apply(entries.get(index)); + } + + @Override + protected V internalValue() + { + return valueExtractor.apply(entries.get(index)); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/iterator/TwoLevelIterator.java b/leveldb/src/main/java/org/iq80/leveldb/iterator/TwoLevelIterator.java new file mode 100644 index 0000000..25f4fe6 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/iterator/TwoLevelIterator.java @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.DBException; + +import java.io.Closeable; +import java.io.IOException; +import java.util.function.Function; + +/** + * Equivalent to TwoLevelIterator int google leveldb + */ +class TwoLevelIterator + extends ASeekingIterator +{ + private final Function> blockFunction; + private Closeable closeableResources; + private SeekingIterator indexIterator; + private SeekingIterator current; + + TwoLevelIterator(SeekingIterator indexIterator, Function> blockFunction, Closeable closeableResources) + { + this.indexIterator = indexIterator; + this.blockFunction = blockFunction; + this.closeableResources = closeableResources; + } + + @Override + protected boolean internalSeekToFirst() + { + if (initDataBlock(indexIterator.seekToFirst()) && current.seekToFirst()) { + return true; + } + return skipEmptyDataBlocksForward(); + } + + @Override + protected boolean internalSeek(K targetKey) + { + // seek the index to the block containing the key + // if indexIterator does not have a next, it mean the key does not exist in this iterator + if (initDataBlock(indexIterator.seek(targetKey)) && current.seek(targetKey)) { + return true; + } + return skipEmptyDataBlocksForward(); + } + + @Override + protected boolean internalSeekToLast() + { + if (!indexIterator.seekToLast()) { + closeAndResetCurrent(); + return false; + } + if (initDataBlock(true) && current.seekToLast()) { + return true; + } + return skipEmptyDataBlocksBackward(); + } + + @Override + protected boolean internalNext(boolean switchDirection) + { + return current.next() || skipEmptyDataBlocksForward(); + } + + @Override + protected boolean internalPrev(boolean switchDirection) + { + return current.prev() || skipEmptyDataBlocksBackward(); + } + + @Override + protected K internalKey() + { + return current.key(); + } + + @Override + protected V internalValue() + { + return current.value(); + } + + private boolean skipEmptyDataBlocksForward() + { + while (current == null || !current.valid()) { + if (!indexIterator.valid()) { + closeAndResetCurrent(); + return false; + } + if (initDataBlock(indexIterator.next()) && current.seekToFirst()) { + return true; + } + } + return true; + } + + private boolean skipEmptyDataBlocksBackward() + { + while (current == null || !current.valid()) { + if (!indexIterator.valid()) { + closeAndResetCurrent(); + return false; + } + if (initDataBlock(indexIterator.prev()) && current.seekToLast()) { + return true; + } + } + return true; + } + + private boolean initDataBlock(boolean valid) + { + closeAndResetCurrent(); + if (valid) { + // seek the current iterator to the key + T blockHandle = indexIterator.value(); + current = blockFunction.apply(blockHandle); + return true; + } + else { + return false; + } + } + + @Override + public String toString() + { + return "TwoLevelIterator{" + + "blockFunction=" + blockFunction + + ", indexIterator=" + indexIterator + + ", current=" + current + + '}'; + } + + private void closeAndResetCurrent() + { + if (current != null) { + try { + current.close(); + } + catch (IOException e) { + throw new DBException(e); + } + } + current = null; + } + + @Override + public void internalClose() throws IOException + { + assert closeableResources != null : "Unexpected multiple calls to close() method"; + try { + closeAndResetCurrent(); + this.indexIterator.close(); + this.indexIterator = null; + } + finally { + closeableResources.close(); + closeableResources = null; + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/memenv/FileState.java b/leveldb/src/main/java/org/iq80/leveldb/memenv/FileState.java new file mode 100644 index 0000000..0c480ab --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/memenv/FileState.java @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import com.google.common.base.Preconditions; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; +import java.util.Arrays; + +/** + * File content. + */ +class FileState +{ + private static final int BLOCK_SIZE = 8 * 1024; + // TODO switch to RW lock. we need concurrent read only + private final Object lock = new Object(); + //file lock + private boolean locked = false; + private int size = 0; + private byte[][] content; + + /** + * Return current file size + * + * @return current file size + */ + public long length() + { + synchronized (lock) { + return size; + } + } + + /** + * Delete all content of file + * + * @return slef file + */ + public FileState truncate() + { + synchronized (lock) { + content = null; + size = 0; + } + return this; + } + + /** + * Read some content from file + * + * @param offset data offset + * @param n at most number of bytes to read + * @return read bytes or {@code null} if EOF is reached + * @throws IOException on any "IO" error + */ + public byte[] read(long offset, int n) throws IOException + { + synchronized (lock) { + if (offset > size) { + throw new IOException("Offset greater than file size."); + } + long available = size - offset; + if (n != 0 && available == 0) { + return null; //EOF + } + if (n > available) { + n = (int) available; + } + if (n == 0) { + return new byte[0]; + } + Preconditions.checkArgument(offset / BLOCK_SIZE <= Integer.MAX_VALUE, "Invalid offset"); + int block = (int) (offset / BLOCK_SIZE); + int blockOffset = (int) (offset % BLOCK_SIZE); + int bytesToCopy = n; + int dst = 0; + byte[] output = new byte[n]; + while (bytesToCopy > 0) { + int avail = BLOCK_SIZE - blockOffset; + if (avail > bytesToCopy) { + avail = bytesToCopy; + } + System.arraycopy(content[block], blockOffset, output, dst, avail); + bytesToCopy -= avail; + dst += avail; + block++; + blockOffset = 0; + } + return output; + } + } + + /** + * Add content at the end of the file + * + * @param data content to append + */ + public void append(Slice data) + { + int srcLen = data.length(); + //avoid 2 copy + byte[] src = data.getRawArray(); + int srcPos = data.getRawOffset(); + synchronized (lock) { + while (srcLen > 0) { + int avail; + int offset = size % BLOCK_SIZE; + + if (offset != 0) { + // There is some room in the last block. + avail = BLOCK_SIZE - offset; + } + else { + // No room in the last block; push new one. + addBlock(new byte[BLOCK_SIZE]); + avail = BLOCK_SIZE; + } + + if (avail > srcLen) { + avail = srcLen; + } + System.arraycopy(src, srcPos, content[content.length - 1], offset, avail); + srcLen -= avail; + srcPos += avail; + size += avail; + } + } + } + + private void addBlock(byte[] bytes) + { + if (content == null) { + content = new byte[1][]; + } + else { + content = Arrays.copyOf(content, content.length + 1); + } + content[content.length - 1] = bytes; + } + + /** + * {@code true} If file locked + * + * @return {@code true} If file locked + */ + public boolean isLocked() + { + synchronized (lock) { + return locked; + } + } + + /** + * Set new locked file state + * + * @param locked new lock state + */ + public void setLocked(boolean locked) + { + synchronized (lock) { + this.locked = locked; + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/memenv/MemEnv.java b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemEnv.java new file mode 100644 index 0000000..4d55923 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemEnv.java @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import org.iq80.leveldb.Logger; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.NoOpLogger; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.util.Slices; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Environment that stores its data in memory + */ +public class MemEnv implements Env +{ + private final MemFs fs = new MemFs(); + + public static Env createEnv() + { + return new MemEnv(); + } + + @Override + public long nowMicros() + { + return TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); + } + + @Override + public File toFile(String filename) + { + return MemFile.createMemFile(fs, filename); + } + + @Override + public File createTempDir(String prefix) + { + return fs.createTempDir(prefix); + } + + @Override + public SequentialFile newSequentialFile(File file) throws IOException + { + return new MemSequentialFile(fs.requireFile((MemFile) file)); + } + + @Override + public RandomInputFile newRandomAccessFile(File file) throws IOException + { + return new MemRandomInputFile(file, fs.requireFile((MemFile) file)); + } + + @Override + public WritableFile newWritableFile(File file) throws IOException + { + return new MemWritableFile(fs.getOrCreateFile((MemFile) file)); + } + + @Override + public WritableFile newAppendableFile(File file) throws IOException + { + return new MemWritableFile(fs.getOrCreateFile((MemFile) file)); + } + + @Override + public void writeStringToFileSync(File file, String content) throws IOException + { + fs.getOrCreateFile((MemFile) file).truncate().append(Slices.wrappedBuffer(content.getBytes(UTF_8))); + } + + @Override + public String readFileToString(File file) throws IOException + { + byte[] read = fs.requireFile((MemFile) file).read(0, (int) file.length()); + if (read == null) { + throw new IOException("Could not read all the data"); + } + return new String(read, UTF_8); + } + + @Override + public Logger newLogger(File loggerFile) throws IOException + { + return new NoOpLogger(); + } + + @Override + public DbLock tryLock(File file) throws IOException + { + return fs.doLock(((MemFile) file)); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/memenv/MemFile.java b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemFile.java new file mode 100644 index 0000000..83a5348 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemFile.java @@ -0,0 +1,166 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import com.google.common.base.Preconditions; +import org.iq80.leveldb.env.File; + +import java.util.List; +import java.util.Objects; + +import static org.iq80.leveldb.memenv.MemFs.SEPARATOR; +import static org.iq80.leveldb.memenv.MemFs.SEPARATOR_CHAR; + +class MemFile implements File +{ + private final MemFs fs; + private final String filename; + + private MemFile(MemFs fs, String filename) + { + Preconditions.checkArgument(fs != null, "fs null"); + Preconditions.checkArgument(filename != null && !filename.isEmpty(), "empty file name"); + this.fs = fs; + this.filename = filename; + } + + static MemFile createMemFile(MemFs fs, String filename) + { + Objects.requireNonNull(filename, "filename"); + String path = filename; + if (!path.startsWith(MemFs.SEPARATOR)) { + path = MemFs.SEPARATOR + path; + } + while (path.length() > 1 && path.endsWith(MemFs.SEPARATOR)) { + path = path.substring(0, path.length() - 1); + } + return new MemFile(fs, path); + } + + @Override + public MemFile child(String other) + { + Preconditions.checkArgument(other == null || other.isEmpty() || !other.contains(SEPARATOR), "Invalid file/directory name %s", other); + return createMemFile(fs, filename + SEPARATOR_CHAR + other); + } + + @Override + public boolean mkdirs() + { + return fs.mkdirs(this); + } + + @Override + public String getName() + { + int i = filename.lastIndexOf(SEPARATOR_CHAR); + return i >= 0 ? filename.substring(i + 1) : filename; + } + + @Override + public MemFile getParentFile() + { + int i = filename.lastIndexOf(SEPARATOR); + return createMemFile(fs, i > 0 ? filename.substring(0, i) : SEPARATOR); + } + + @Override + public String getPath() + { + return filename; + } + + @Override + public boolean canRead() + { + return fs.canRead(this); + } + + @Override + public boolean exists() + { + return fs.exists(this); + } + + @Override + public boolean isDirectory() + { + return fs.isDirectory(this); + } + + @Override + public boolean isFile() + { + return fs.isFile(this); + } + + @Override + public long length() + { + return fs.getFileState(this).map(FileState::length).orElse(0L); + } + + @Override + public boolean delete() + { + return fs.delete(this); + } + + @Override + public List listFiles() + { + return fs.listFiles(this); + } + + @Override + public boolean renameTo(File dest) + { + return fs.renameTo(this, ((MemFile) dest)); + } + + @Override + public boolean deleteRecursively() + { + return fs.deleteRecursively(this); + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MemFile memFile = (MemFile) o; + return Objects.equals(filename, memFile.filename); + } + + @Override + public int hashCode() + { + return Objects.hash(filename); + } + + @Override + public String toString() + { + return filename; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/memenv/MemFs.java b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemFs.java new file mode 100644 index 0000000..aaaf91f --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemFs.java @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.File; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +class MemFs +{ + public static final char SEPARATOR_CHAR = '/'; + public static final String SEPARATOR = "/"; + private final Object lock = new Object(); + private final Set dirs = new HashSet<>(); + private final Map maps = new HashMap<>(); + + public File createTempDir(String prefix) + { + synchronized (lock) { + MemFile e; + do { + String baseName = "/"; + if (prefix != null) { + baseName += prefix + "-"; + } + + baseName += System.currentTimeMillis() + "-"; + e = MemFile.createMemFile(this, baseName); + } while (maps.containsKey(e) || !dirs.add(e)); + e.mkdirs(); + return e; + } + } + + public FileState requireFile(MemFile file) throws FileNotFoundException + { + FileState fileState; + synchronized (lock) { + fileState = maps.get(file); + if (fileState == null) { + throw new FileNotFoundException(file.getPath()); + } + } + return fileState; + } + + public FileState getOrCreateFile(MemFile file) throws IOException + { + FileState fileState; + synchronized (lock) { + if (dirs.contains(file)) { + throw new IOException(file + " is a directory"); + } + if (!dirs.contains(file.getParentFile())) { + throw new IOException("Unable to create file " + file + ", parent directory does not exist"); + } + fileState = maps.computeIfAbsent(file, memFile -> new FileState()); + } + return fileState; + } + + public boolean mkdirs(MemFile memFile) + { + synchronized (lock) { + if (maps.containsKey(memFile)) { + return false; + } + dirs.add(memFile); + return true; + } + } + + public boolean canRead(MemFile memFile) + { + synchronized (lock) { + return maps.containsKey(memFile); + } + } + + public boolean isFile(MemFile memFile) + { + return canRead(memFile); + } + + public boolean isDirectory(MemFile memFile) + { + synchronized (lock) { + return dirs.contains(memFile); + } + } + + public Optional getFileState(MemFile file) + { + synchronized (lock) { + return Optional.ofNullable(maps.get(file)); + } + } + + public boolean delete(MemFile memFile) + { + synchronized (lock) { + return maps.remove(memFile) != null || dirs.remove(memFile); + } + } + + public List listFiles(MemFile memFile) + { + synchronized (lock) { + return children(memFile).collect(Collectors.toList()); + } + } + + private Stream children(MemFile memFile) + { + String s = memFile.getPath() + SEPARATOR; + return Stream.concat(maps.keySet().stream(), dirs.stream()) + .filter(e -> e.getPath().startsWith(s)) + .map(e -> { + int i = e.getPath().indexOf(SEPARATOR, s.length()); + return i >= 0 ? MemFile.createMemFile(this, e.getPath().substring(0, i)) : e; + }) + .distinct(); + } + + public boolean renameTo(MemFile from, MemFile dest) + { + synchronized (lock) { + if (isDirectory(from)) { + //not supported, not required by DB + return false; + } + if (maps.containsKey(from) && !maps.containsKey(dest)) { + maps.put(dest, maps.get(from)); + maps.remove(from); + return true; + } + } + return false; + } + + public boolean deleteRecursively(MemFile memFile) + { + String prefix = memFile.getPath() + SEPARATOR; + synchronized (lock) { + boolean r = false; + for (Iterator iterator = dirs.iterator(); iterator.hasNext(); ) { + MemFile dir = iterator.next(); + if (dir.equals(memFile) || dir.getPath().startsWith(prefix)) { + iterator.remove(); + r = true; + } + } + for (Iterator> iterator = maps.entrySet().iterator(); iterator.hasNext(); ) { + Map.Entry entry = iterator.next(); + if (entry.getKey().equals(memFile) || entry.getKey().getPath().startsWith(prefix)) { + iterator.remove(); + r = true; + } + } + return r; + } + } + + public boolean exists(MemFile memFile) + { + synchronized (lock) { + return dirs.contains(memFile) || maps.containsKey(memFile); + } + } + + public DbLock doLock(MemFile file) throws IOException + { + final FileState orCreateFile; + synchronized (lock) { + orCreateFile = getOrCreateFile(file); + if (orCreateFile.isLocked()) { + throw new IOException("lock on " + file + " already owned"); + } + orCreateFile.setLocked(true); + } + return new MemDbLock(orCreateFile); + } + + private class MemDbLock implements DbLock + { + private final FileState file; + private boolean released; + + public MemDbLock(FileState file) + { + this.file = file; + } + + @Override + public boolean isValid() + { + synchronized (lock) { + return !released; + } + } + + @Override + public void release() + { + synchronized (lock) { + if (!released) { + released = true; + file.setLocked(false); + } + } + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/memenv/MemRandomInputFile.java b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemRandomInputFile.java new file mode 100644 index 0000000..ea83a8a --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemRandomInputFile.java @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.RandomInputFile; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; + +class MemRandomInputFile implements RandomInputFile +{ + private final File file; + private final FileState fileState; + private boolean closed; + + public MemRandomInputFile(File file, FileState fileState) + { + this.file = file; + this.fileState = fileState; + closed = false; + } + + @Override + public long size() + { + return file.length(); + } + + @Override + public ByteBuffer read(long offset, int length) throws IOException + { + if (closed) { + throw new ClosedChannelException(); + } + byte[] read = fileState.read(offset, length); + if (read == null) { + throw new IOException("Could not read all the data"); + } + // read is already a copy + return ByteBuffer.wrap(read); + } + + @Override + public void close() throws IOException + { + closed = true; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/memenv/MemSequentialFile.java b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemSequentialFile.java new file mode 100644 index 0000000..7992d98 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemSequentialFile.java @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.util.SliceOutput; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; + +class MemSequentialFile implements SequentialFile +{ + private final FileState fileState; + private boolean closed; + private int index; + + public MemSequentialFile(FileState fileState) + { + this.fileState = fileState; + } + + @Override + public void skip(long n) throws IOException + { + if (index > fileState.length()) { + throw new IOException("File position " + index + " is greater than file size"); + } + long available = fileState.length() - index; + if (n > available) { + n = available; + } + index += n; + } + + @Override + public int read(int atMost, SliceOutput destination) throws IOException + { + if (closed) { + throw new ClosedChannelException(); + } + byte[] read = fileState.read(index, atMost); + if (read != null) { + index += read.length; + destination.writeBytes(read); + return read.length; + } + return -1; + } + + @Override + public void close() throws IOException + { + closed = true; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/memenv/MemWritableFile.java b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemWritableFile.java new file mode 100644 index 0000000..1e27486 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/memenv/MemWritableFile.java @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.util.Slice; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; + +class MemWritableFile implements WritableFile +{ + private final FileState fileState; + private boolean closed; + + public MemWritableFile(FileState fileState) + { + this.fileState = fileState; + } + + @Override + public void append(Slice data) throws IOException + { + if (closed) { + throw new ClosedChannelException(); + } + fileState.append(data); + } + + @Override + public void force() throws IOException + { + } + + @Override + public void close() throws IOException + { + closed = true; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/Block.java b/leveldb/src/main/java/org/iq80/leveldb/table/Block.java new file mode 100644 index 0000000..d3b6f70 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/Block.java @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; + +import java.util.Comparator; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; + +/** + * Binary Structure + * + * + * + * + * + * + * + * + * + * + *

+ *

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
nameoffsetlengthdescription
entries4varyEntries in order by key
restart indexvary4 * restart countIndex of prefix compression restarts
restart count04Number of prefix compression restarts (used as index into entries)
+ */ +public class Block +{ + private final Slice block; + private final Comparator comparator; + + private final Slice data; + private final Slice restartPositions; + + public Block(Slice block, Comparator comparator) + { + requireNonNull(block, "block is null"); + checkArgument(block.length() >= SIZE_OF_INT, "Block is corrupt: size must be at least %s block", SIZE_OF_INT); + requireNonNull(comparator, "comparator is null"); + + block = block.slice(); + this.block = block; + this.comparator = comparator; + + // Keys are prefix compressed. Every once in a while the prefix compression is restarted and the full key is written. + // These "restart" locations are written at the end of the file, so you can seek to key without having to read the + // entire file sequentially. + + // key restart count is the last int of the block + int restartCount = block.getInt(block.length() - SIZE_OF_INT); + + if (restartCount > 0) { + // restarts are written at the end of the block + int restartOffset = block.length() - (1 + restartCount) * SIZE_OF_INT; + checkArgument(restartOffset < block.length() - SIZE_OF_INT, "Block is corrupt: restart offset count is greater than block size"); + restartPositions = block.slice(restartOffset, restartCount * SIZE_OF_INT); + + // data starts at 0 and extends to the restart index + data = block.slice(0, restartOffset); + } + else { + data = Slices.EMPTY_SLICE; + restartPositions = Slices.EMPTY_SLICE; + } + } + + public long size() + { + return block.length(); + } + + public BlockIterator iterator() + { + if (restartPositions.length() == 0) { + //initial java db implementation did not save restart position for empty blocks + return new BlockIterator(data, Slices.allocate(4), comparator); + } + return new BlockIterator(data, restartPositions, comparator); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BlockBuilder.java b/leveldb/src/main/java/org/iq80/leveldb/table/BlockBuilder.java new file mode 100644 index 0000000..0ff4d09 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BlockBuilder.java @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.DynamicSliceOutput; +import org.iq80.leveldb.util.IntVector; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.VariableLengthQuantity; + +import java.util.Comparator; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkPositionIndex; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; + +public class BlockBuilder +{ + private final int blockRestartInterval; + private final IntVector restartPositions; + private final Comparator comparator; + + private int entryCount; + private int restartBlockEntryCount; + + private boolean finished; + private final DynamicSliceOutput block; + private Slice lastKey; + + public BlockBuilder(int estimatedSize, int blockRestartInterval, Comparator comparator) + { + checkArgument(estimatedSize >= 0, "estimatedSize is negative"); + checkArgument(blockRestartInterval >= 0, "blockRestartInterval is negative"); + requireNonNull(comparator, "comparator is null"); + + this.block = new DynamicSliceOutput(estimatedSize); + this.blockRestartInterval = blockRestartInterval; + this.comparator = comparator; + + restartPositions = new IntVector(32); + restartPositions.add(0); // first restart point must be 0 + } + + public void reset() + { + block.reset(); + entryCount = 0; + restartPositions.clear(); + restartPositions.add(0); // first restart point must be 0 + restartBlockEntryCount = 0; + lastKey = null; + finished = false; + } + + public int getEntryCount() + { + return entryCount; + } + + public boolean isEmpty() + { + return entryCount == 0; + } + + public int currentSizeEstimate() + { + // no need to estimate if closed + if (finished) { + return block.size(); + } + + return block.size() + // raw data buffer + restartPositions.size() * SIZE_OF_INT + // restart positions + SIZE_OF_INT; // restart position size + } + + public void add(BlockEntry blockEntry) + { + requireNonNull(blockEntry, "blockEntry is null"); + add(blockEntry.getKey(), blockEntry.getValue()); + } + + public void add(Slice key, Slice value) + { + requireNonNull(key, "key is null"); + requireNonNull(value, "value is null"); + checkState(!finished, "block is finished"); + checkPositionIndex(restartBlockEntryCount, blockRestartInterval); + + checkArgument(lastKey == null || comparator.compare(key, lastKey) > 0, "key must be greater than last key"); + + int sharedKeyBytes = 0; + if (restartBlockEntryCount < blockRestartInterval) { + sharedKeyBytes = calculateSharedBytes(key, lastKey); + } + else { + // restart prefix compression + restartPositions.add(block.size()); + restartBlockEntryCount = 0; + } + + int nonSharedKeyBytes = key.length() - sharedKeyBytes; + + // write "" + VariableLengthQuantity.writeVariableLengthInt(sharedKeyBytes, block); + VariableLengthQuantity.writeVariableLengthInt(nonSharedKeyBytes, block); + VariableLengthQuantity.writeVariableLengthInt(value.length(), block); + + // write non-shared key bytes + block.writeBytes(key, sharedKeyBytes, nonSharedKeyBytes); + + // write value bytes + block.writeBytes(value, 0, value.length()); + + // update last key + lastKey = key; + + // update state + entryCount++; + restartBlockEntryCount++; + } + + public static int calculateSharedBytes(Slice leftKey, Slice rightKey) + { + int sharedKeyBytes = 0; + + if (leftKey != null && rightKey != null) { + int minSharedKeyBytes = Math.min(leftKey.length(), rightKey.length()); + while (sharedKeyBytes < minSharedKeyBytes && leftKey.getByte(sharedKeyBytes) == rightKey.getByte(sharedKeyBytes)) { + sharedKeyBytes++; + } + } + + return sharedKeyBytes; + } + + public Slice finish() + { + if (!finished) { + finished = true; + //restart position at least one + restartPositions.write(block); + block.writeInt(restartPositions.size()); + } + return block.slice(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BlockEntry.java b/leveldb/src/main/java/org/iq80/leveldb/table/BlockEntry.java new file mode 100644 index 0000000..c253aff --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BlockEntry.java @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; + +import java.util.Map.Entry; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Objects.requireNonNull; + +/** + * Binary Structure + * + * + * + * + * + * + * + * + * + * + *

+ *

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
nameoffsetlengthdescription
shared key length0varyvariable length encoded int: size of shared key prefix with the key from the previous entry
non-shared key lengthvaryvaryvariable length encoded int: size of non-shared key suffix in this entry
value lengthvaryvaryvariable length encoded int: size of value in this entry
non-shared keyvarynon-shared key lengthnon-shared key data
valuevaryvalue lengthvalue data
+ */ +public class BlockEntry + implements Entry +{ + private final Slice key; + private final Slice value; + + public BlockEntry(Slice key, Slice value) + { + requireNonNull(key, "key is null"); + requireNonNull(value, "value is null"); + this.key = key; + this.value = value; + } + + @Override + public Slice getKey() + { + return key; + } + + @Override + public Slice getValue() + { + return value; + } + + /** + * @throws UnsupportedOperationException always + */ + @Override + public final Slice setValue(Slice value) + { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BlockEntry entry = (BlockEntry) o; + + if (!key.equals(entry.key)) { + return false; + } + if (!value.equals(entry.value)) { + return false; + } + + return true; + } + + @Override + public int hashCode() + { + int result = key.hashCode(); + result = 31 * result + value.hashCode(); + return result; + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("BlockEntry"); + sb.append("{key=").append(key.toString(UTF_8)); // todo don't print the real value + sb.append(", value=").append(value.toString(UTF_8)); + sb.append('}'); + return sb.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BlockHandle.java b/leveldb/src/main/java/org/iq80/leveldb/table/BlockHandle.java new file mode 100644 index 0000000..4cb20f3 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BlockHandle.java @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.util.VariableLengthQuantity; + +public class BlockHandle +{ + public static final int MAX_ENCODED_LENGTH = 10 + 10; + + private final long offset; + private final int dataSize; + + BlockHandle(long offset, int dataSize) + { + this.offset = offset; + this.dataSize = dataSize; + } + + public long getOffset() + { + return offset; + } + + public int getDataSize() + { + return dataSize; + } + + public int getFullBlockSize() + { + return dataSize + BlockTrailer.ENCODED_LENGTH; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BlockHandle that = (BlockHandle) o; + + if (dataSize != that.dataSize) { + return false; + } + if (offset != that.offset) { + return false; + } + + return true; + } + + @Override + public int hashCode() + { + int result = (int) (offset ^ (offset >>> 32)); + result = 31 * result + dataSize; + return result; + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("BlockHandle"); + sb.append("{offset=").append(offset); + sb.append(", dataSize=").append(dataSize); + sb.append('}'); + return sb.toString(); + } + + public static BlockHandle readBlockHandle(SliceInput sliceInput) + { + long offset = VariableLengthQuantity.readVariableLengthLong(sliceInput); + long size = VariableLengthQuantity.readVariableLengthLong(sliceInput); + + if (size > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Blocks can not be larger than Integer.MAX_VALUE"); + } + + return new BlockHandle(offset, (int) size); + } + + public static Slice writeBlockHandle(BlockHandle blockHandle) + { + Slice slice = Slices.allocate(MAX_ENCODED_LENGTH); + SliceOutput sliceOutput = slice.output(); + writeBlockHandleTo(blockHandle, sliceOutput); + return slice.slice(); + } + + public static void writeBlockHandleTo(BlockHandle blockHandle, SliceOutput sliceOutput) + { + VariableLengthQuantity.writeVariableLengthLong(blockHandle.offset, sliceOutput); + VariableLengthQuantity.writeVariableLengthLong(blockHandle.dataSize, sliceOutput); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BlockHandleSliceWeigher.java b/leveldb/src/main/java/org/iq80/leveldb/table/BlockHandleSliceWeigher.java new file mode 100644 index 0000000..df18b7e --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BlockHandleSliceWeigher.java @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.table; + +import com.google.common.cache.Weigher; +import org.iq80.leveldb.util.Slice; + +/** + * @author Honore Vasconcelos + */ +public class BlockHandleSliceWeigher implements Weigher +{ + @Override + public int weigh(CacheKey key, Slice value) + { + //approximate weigher + return 64 + value.getRawArray().length; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BlockIterator.java b/leveldb/src/main/java/org/iq80/leveldb/table/BlockIterator.java new file mode 100644 index 0000000..595e5fe --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BlockIterator.java @@ -0,0 +1,236 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.iterator.ASeekingIterator; +import org.iq80.leveldb.iterator.SliceIterator; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.util.VariableLengthQuantity; + +import java.util.Comparator; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; + +public final class BlockIterator extends ASeekingIterator + implements SliceIterator +{ + private final SliceInput data; + private final RestartPositions restartPositions; + private final Comparator comparator; + + private int current; + private int restartIndex; + private Slice key; + private Slice value; + + public BlockIterator(Slice data, Slice restartPositions, Comparator comparator) + { + requireNonNull(data, "data is null"); + requireNonNull(restartPositions, "restartPositions is null"); + requireNonNull(comparator, "comparator is null"); + + this.data = requireNonNull(data.input(), "data input is null"); + + this.restartPositions = new RestartPositions(restartPositions); + checkArgument(this.restartPositions.size() > 0, + "At least one restart position is expected"); + this.comparator = comparator; + } + + @Override + protected Slice internalKey() + { + return key; + } + + @Override + protected Slice internalValue() + { + return value; + } + + @Override + protected boolean internalNext(boolean switchDirection) + { + return parseNextKey(); + } + + @Override + protected boolean internalPrev(boolean switchDirection) + { + // Scan backwards to a restart point before current + final int original = current; + while (restartPositions.get(restartIndex) >= original) { + if (restartIndex == 0) { + current = Integer.MAX_VALUE; + return false; + } + restartIndex--; + } + + seekToRestartPoint(restartIndex); + do { + // Loop until end of current entry hits the start of original entry + } while (parseNextKey() && data.position() < original); + return valid(); + } + + private void seekToRestartPoint(int index) + { + this.restartIndex = index; + this.data.setPosition(restartPositions.get(restartIndex)); + this.key = null; + this.value = null; + } + + /** + * Repositions the iterator so the beginning of this block. + */ + @Override + protected boolean internalSeekToFirst() + { + seekToRestartPosition(0); + return parseNextKey(); + } + + protected boolean internalSeekToLast() + { + seekToRestartPoint(restartPositions.size() - 1); // we have at lease one restart + boolean valid; + do { + valid = parseNextKey(); + } while (valid && data.isReadable()); + return valid; + } + + /** + * Repositions the iterator so the key of the next BlockElement returned greater than or equal to the specified targetKey. + */ + @Override + protected boolean internalSeek(Slice targetKey) + { + int left = 0; + int right = restartPositions.size() - 1; + + // binary search restart positions to find the restart position immediately before the targetKey + while (left < right) { + int mid = (left + right + 1) / 2; + + seekToRestartPosition(mid); + + Slice key = readFirstKeyAtRestartPoint(); + + if (comparator.compare(key, targetKey) < 0) { + // key at mid is smaller than targetKey. Therefore all restart + // blocks before mid are uninteresting. + left = mid; + } + else { + // key at mid is greater than or equal to targetKey. Therefore + // all restart blocks at or after mid are uninteresting. + right = mid - 1; + } + } + + // linear search (within restart block) for first key greater than or equal to targetKey + seekToRestartPosition(left); + while (parseNextKey()) { //load this.key + if (comparator.compare(key, targetKey) >= 0) { + return true; + } + } + current = data.position(); + return false; + } + + /** + * Seeks to and reads the entry at the specified restart position. + *

+ * After this method, nextEntry will contain the next entry to return, and the previousEntry will be null. + */ + private void seekToRestartPosition(int restartPosition) + { + // seek data readIndex to the beginning of the restart block + this.restartIndex = restartPosition; + this.key = null; + this.value = null; + int offset = restartPositions.get(restartPosition); + data.setPosition(offset); + current = offset; + } + + private Slice readFirstKeyAtRestartPoint() + { + checkState(VariableLengthQuantity.readVariableLengthInt(data) == 0, + "First restart position can't have a shared "); + current = data.position(); + int nonSharedKeyLength = VariableLengthQuantity.readVariableLengthInt(data); + //data size + VariableLengthQuantity.readVariableLengthInt(data); + return data.readSlice(nonSharedKeyLength); + } + + /** + * Reads the entry at the current data readIndex. + * After this method, data readIndex is positioned at the beginning of the next entry + * or at the end of data if there was not a next entry. + * + * @return true if an entry was read + */ + private boolean parseNextKey() + { + current = data.position(); + if (!data.isReadable()) { + return false; + } + // read entry header + int sharedKeyLength = VariableLengthQuantity.readVariableLengthInt(data); + int nonSharedKeyLength = VariableLengthQuantity.readVariableLengthInt(data); + int valueLength = VariableLengthQuantity.readVariableLengthInt(data); + + // read key + Slice key; + if (sharedKeyLength > 0) { + key = Slices.allocate(sharedKeyLength + nonSharedKeyLength); + SliceOutput sliceOutput = key.output(); + checkState(this.key != null, "Entry has a shared key but no previous entry was provided"); + sliceOutput.writeBytes(this.key, 0, sharedKeyLength); + sliceOutput.writeBytes(data, nonSharedKeyLength); + } + else { + key = data.readSlice(nonSharedKeyLength); + } + // read value + Slice value = data.readSlice(valueLength); + + this.key = key; + this.value = value; + return true; + } + + @Override + protected void internalClose() + { + //na + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BlockTrailer.java b/leveldb/src/main/java/org/iq80/leveldb/table/BlockTrailer.java new file mode 100644 index 0000000..b32f54a --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BlockTrailer.java @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; + +import static java.util.Objects.requireNonNull; + +public class BlockTrailer +{ + public static final int ENCODED_LENGTH = 5; + + private final CompressionType compressionType; + private final int crc32c; + + public BlockTrailer(CompressionType compressionType, int crc32c) + { + requireNonNull(compressionType, "compressionType is null"); + + this.compressionType = compressionType; + this.crc32c = crc32c; + } + + public CompressionType getCompressionType() + { + return compressionType; + } + + public int getCrc32c() + { + return crc32c; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BlockTrailer that = (BlockTrailer) o; + + if (crc32c != that.crc32c) { + return false; + } + if (compressionType != that.compressionType) { + return false; + } + + return true; + } + + @Override + public int hashCode() + { + int result = compressionType.hashCode(); + result = 31 * result + crc32c; + return result; + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("BlockTrailer"); + sb.append("{compressionType=").append(compressionType); + sb.append(", crc32c=0x").append(Integer.toHexString(crc32c)); + sb.append('}'); + return sb.toString(); + } + + public static BlockTrailer readBlockTrailer(Slice slice) + { + SliceInput sliceInput = slice.input(); + CompressionType compressionType = CompressionType.getCompressionTypeByPersistentId(sliceInput.readUnsignedByte()); + int crc32c = sliceInput.readInt(); + return new BlockTrailer(compressionType, crc32c); + } + + public static Slice writeBlockTrailer(BlockTrailer blockTrailer) + { + Slice slice = Slices.allocate(ENCODED_LENGTH); + writeBlockTrailer(blockTrailer, slice.output()); + return slice; + } + + public static void writeBlockTrailer(BlockTrailer blockTrailer, SliceOutput sliceOutput) + { + sliceOutput.writeByte(blockTrailer.getCompressionType().persistentId()); + sliceOutput.writeInt(blockTrailer.getCrc32c()); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BloomFilterPolicy.java b/leveldb/src/main/java/org/iq80/leveldb/table/BloomFilterPolicy.java new file mode 100644 index 0000000..da71c87 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BloomFilterPolicy.java @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.table; + +import org.iq80.leveldb.XFilterPolicy; +import org.iq80.leveldb.util.Hash; +import org.iq80.leveldb.util.Slice; + +import java.util.List; + +/** + * BloomFilter policy + * + * @author Honore Vasconcelos + * @link https://github.com/google/leveldb/commit/85584d497e7b354853b72f450683d59fcf6b9c5c + */ +public final class BloomFilterPolicy implements org.iq80.leveldb.table.FilterPolicy, XFilterPolicy +{ + private final int bitsPerKey; + private final int k; + + public BloomFilterPolicy(final int bitsPerKey) + { + this.bitsPerKey = bitsPerKey; + int k = (int) (bitsPerKey * 0.69); + if (k < 1) { + k = 1; + } + else if (k > 30) { + k = 30; + } + this.k = k; + } + + @Override + public String name() + { + return "leveldb.BuiltinBloomFilter2"; + } + + @Override + public byte[] createFilter(List keys) + { + // Compute bloom filter size (in both bits and bytes) + int bits = keys.size() * bitsPerKey; + + // For small n, we can see a very high false positive rate. Fix it + // by enforcing a minimum bloom filter length. + if (bits < 64) { + bits = 64; + } + + int bytes = (bits + 7) / 8; + bits = bytes * 8; + + final byte[] array = new byte[bytes + 1]; + array[array.length - 1] = (byte) k; // Remember # of probes in filter + + for (Slice key : keys) { + // Use double-hashing to generate a sequence of hash values. + // See analysis in [Kirsch,Mitzenmacher 2006]. + int h = bloomHash(key); + int delta = (h >>> 17) | (h << 15); // Rotate right 17 bits + for (int j = 0; j < k; j++) { + int bitpos = (int) ((toLong(h)) % bits); + final int i = bitpos / 8; + array[i] |= (1 << (bitpos % 8)); + h += delta; + } + } + return array; + } + + private int bloomHash(Slice data) + { + return Hash.hash(data.getRawArray(), data.getRawOffset(), data.length(), 0xbc9f1d34); //avoid data copy + } + + @Override + public boolean keyMayMatch(Slice key, Slice bloomFilter1) + { + int len = bloomFilter1.length(); + byte[] data = bloomFilter1.getRawArray(); + int offset = bloomFilter1.getRawOffset(); + if (len < 2) { + return false; + } + + int bits = (len - 1) * 8; + + // Use the encoded k so that we can read filters generated by + // bloom filters created using different parameters. + int k = data[offset + len - 1]; + if (k > 30) { + // Reserved for potentially new encodings for short bloom filters. + // Consider it a match. + return true; + } + + int h = bloomHash(key); + int delta = (h >>> 17) | (h << 15); // Rotate right 17 bits + for (int j = 0; j < k; j++) { + int bitpos = (int) (toLong(h) % bits); + if ((data[offset + (bitpos / 8)] & (1 << (bitpos % 8))) == 0) { + return false; + } + h += delta; + } + return true; + } + + /** + * Convert an unsigned int into a long + */ + private long toLong(int h) + { + return h & 0xffffffffL; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/BytewiseComparator.java b/leveldb/src/main/java/org/iq80/leveldb/table/BytewiseComparator.java new file mode 100644 index 0000000..8625d3f --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/BytewiseComparator.java @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; + +public class BytewiseComparator + implements UserComparator +{ + @Override + public String name() + { + return "leveldb.BytewiseComparator"; + } + + @Override + public int compare(Slice sliceA, Slice sliceB) + { + return sliceA.compareTo(sliceB); + } + + @Override + public Slice findShortestSeparator( + Slice start, + Slice limit) + { + // Find length of common prefix + int sharedBytes = BlockBuilder.calculateSharedBytes(start, limit); + + // Do not shorten if one string is a prefix of the other + if (sharedBytes < Math.min(start.length(), limit.length())) { + // if we can add one to the last shared byte without overflow and the two keys differ by more than + // one increment at this location. + int lastSharedByte = start.getUnsignedByte(sharedBytes); + if (lastSharedByte < 0xff && lastSharedByte + 1 < limit.getUnsignedByte(sharedBytes)) { + Slice result = start.copySlice(0, sharedBytes + 1); + result.setByte(sharedBytes, lastSharedByte + 1); + + assert (compare(result, limit) < 0) : "start must be less than last limit"; + return result; + } + } + return start; + } + + @Override + public Slice findShortSuccessor(Slice key) + { + // Find first character that can be incremented + for (int i = 0; i < key.length(); i++) { + int b = key.getUnsignedByte(i); + if (b != 0xff) { + Slice result = key.copySlice(0, i + 1); + result.setByte(i, b + 1); + return result; + } + } + // key is a run of 0xffs. Leave it alone. + return key; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/CacheKey.java b/leveldb/src/main/java/org/iq80/leveldb/table/CacheKey.java new file mode 100644 index 0000000..16b5500 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/CacheKey.java @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +public final class CacheKey +{ + private final long id; + private final BlockHandle key; + + CacheKey(final long id, BlockHandle key) + { + this.id = id; + this.key = key; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CacheKey cacheKey = (CacheKey) o; + + return id == cacheKey.id && key.equals(cacheKey.key); + } + + @Override + public int hashCode() + { + int result = (int) (id ^ (id >>> 32)); + result = 31 * result + key.hashCode(); + return result; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/CustomUserComparator.java b/leveldb/src/main/java/org/iq80/leveldb/table/CustomUserComparator.java new file mode 100644 index 0000000..255d09a --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/CustomUserComparator.java @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.DBComparator; +import org.iq80.leveldb.util.Slice; + +import static java.util.Objects.requireNonNull; + +public class CustomUserComparator + implements UserComparator +{ + private final DBComparator comparator; + + public CustomUserComparator(DBComparator comparator) + { + requireNonNull(comparator.name(), "User Comparator name can't be null"); + this.comparator = comparator; + } + + @Override + public String name() + { + return comparator.name(); + } + + @Override + public Slice findShortestSeparator(Slice start, Slice limit) + { + byte[] shortestSeparator = comparator.findShortestSeparator(start.getBytes(), limit.getBytes()); + requireNonNull(shortestSeparator, "User comparator returned null from findShortestSeparator()"); + return new Slice(shortestSeparator); + } + + @Override + public Slice findShortSuccessor(Slice key) + { + byte[] shortSuccessor = comparator.findShortSuccessor(key.getBytes()); + requireNonNull(comparator, "User comparator returned null from findShortSuccessor()"); + return new Slice(shortSuccessor); + } + + @Override + public int compare(Slice o1, Slice o2) + { + return comparator.compare(o1.getBytes(), o2.getBytes()); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockBuilder.java b/leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockBuilder.java new file mode 100644 index 0000000..00d90bc --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockBuilder.java @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.DynamicSliceOutput; +import org.iq80.leveldb.util.IntVector; +import org.iq80.leveldb.util.Slice; + +import java.util.ArrayList; +import java.util.List; + +import static com.google.common.base.Preconditions.checkArgument; + +/** + * The filter block stores a sequence of filters, where filter i contains + * the output of FilterPolicy::CreateFilter() on all keys that are stored + * in a block whose file offset falls within the range + *

+ * [ i*base ... (i+1)*base-1 ] + *

+ * Currently, "base" is 2KB. So for example, if blocks X and Y start in + * the range [ 0KB .. 2KB-1 ], all of the keys in X and Y will be + * converted to a filter by calling FilterPolicy::CreateFilter(), and the + * resulting filter will be stored as the first filter in the filter + * block. + *

+ * The filter block is formatted as follows: + *

+ * [filter 0] + * [filter 1] + * [filter 2] + * ... + * [filter N-1] + *

+ * [offset of filter 0] : 4 bytes + * [offset of filter 1] : 4 bytes + * [offset of filter 2] : 4 bytes + * ... + * [offset of filter N-1] : 4 bytes + *

+ * [offset of beginning of offset array] : 4 bytes + * lg(base) : 1 byte + *

+ *

+ * + * @author Honore Vasconcelos + */ +public class FilterBlockBuilder +{ + // Generate new filter every 2KB of data + private static final byte FILTER_BASE_LG = 11; + private static final int FILTER_BASE = 1 << FILTER_BASE_LG; + + private final List keys = new ArrayList<>(); + private final DynamicSliceOutput result = new DynamicSliceOutput(32); + private final IntVector filterOffsets = new IntVector(32); + private final FilterPolicy policy; + + public FilterBlockBuilder(FilterPolicy policy) + { + this.policy = policy; + } + + public void addKey(Slice key) + { + keys.add(key); + } + + public void startBlock(long blockOffset) + { + long filterIndex = blockOffset / FILTER_BASE; + checkArgument(filterIndex >= filterOffsets.size()); + while (filterIndex > filterOffsets.size()) { + generateFilter(); + } + } + + private void generateFilter() + { + final int numberOfKeys = keys.size(); + if (numberOfKeys == 0) { + //Fast path if there are no keys for this filter + filterOffsets.add(result.size()); + return; + } + filterOffsets.add(result.size()); + final byte[] filter = policy.createFilter(keys); + result.writeBytes(filter); + keys.clear(); + } + + public Slice finish() + { + if (!keys.isEmpty()) { + generateFilter(); + } + final int arrayOffset = result.size(); + filterOffsets.write(result); + result.writeInt(arrayOffset); //4 bytes + result.write(FILTER_BASE_LG); //1 byte + final Slice slice = result.slice(); + return slice; + } + + public String name() + { + return policy.name(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockReader.java b/leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockReader.java new file mode 100644 index 0000000..b1f1520 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/FilterBlockReader.java @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; + +/** + * @author Honore Vasconcelos + */ +final class FilterBlockReader +{ + private final byte baseLg; + private final int num; + private final Slice contents; + private final int offset; + private final FilterPolicy filterPolicy; + + FilterBlockReader(FilterPolicy filterPolicy, Slice contents) + { + this.filterPolicy = filterPolicy; + final int n = contents.length(); + final int lgAndOffset = 5; + if (n < lgAndOffset) { //1 byte for baseLg and 4 for start of offset array + this.baseLg = 0; + this.contents = null; + this.num = 0; + this.offset = 0; + return; + } + baseLg = contents.getByte(n - 1); + offset = contents.getInt(n - lgAndOffset); + if (offset > n - lgAndOffset) { + this.num = 0; + this.contents = null; + return; + } + num = (n - lgAndOffset - offset) / 4; + this.contents = contents; + } + + public boolean keyMayMatch(long offset1, Slice key) + { + final int index = (int) (offset1 >> baseLg); + if (index < num) { + final int start = contents.getInt(this.offset + index * 4); + final int limit = contents.getInt(this.offset + index * 4 + 4); + if (start <= limit && limit <= offset) { + Slice filter = contents.slice(start, limit - start); + return filterPolicy.keyMayMatch(key, filter); + } + else if (start == limit) { + // Empty filters do not match any keys + return false; + } + } + return true; // Errors are treated as potential matches + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/FilterPolicy.java b/leveldb/src/main/java/org/iq80/leveldb/table/FilterPolicy.java new file mode 100644 index 0000000..dd457f8 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/FilterPolicy.java @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; + +import java.util.List; + +/** + * A database can be configured with a custom FilterPolicy object. + * This object is responsible for creating a small filter from a set + * of keys. These filters are stored in leveldb and are consulted + * automatically by leveldb to decide whether or not to read some + * information from disk. In many cases, a filter can cut down the + * number of disk seeks form a handful to a single disk seek per + * DB::Get() call. + *

+ * Most people will want to use the builtin bloom filter support (see + * NewBloomFilterPolicy() below). + * + * @author Honore Vasconcelos + */ +public interface FilterPolicy extends org.iq80.leveldb.XFilterPolicy +{ + String name(); + + /** + * Append a filter that summarizes keys[0,n-1] to *dst. + * + * @param keys keys[0,n-1] contains a list of keys (potentially with duplicates) + * that are ordered according to the user supplied comparator. + */ + byte[] createFilter(List keys); + + /** + * "filter" contains the data appended by a preceding call to + * CreateFilter() on this class. This method must return true if + * the key was in the list of keys passed to CreateFilter(). + * This method may return true or false if the key was not on the + * list, but it should aim to return false with a high probability. + */ + boolean keyMayMatch(Slice key, Slice filter); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/Footer.java b/leveldb/src/main/java/org/iq80/leveldb/table/Footer.java new file mode 100644 index 0000000..0f7d835 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/Footer.java @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceInput; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.table.BlockHandle.readBlockHandle; +import static org.iq80.leveldb.table.BlockHandle.writeBlockHandleTo; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; + +public class Footer +{ + public static final int ENCODED_LENGTH = (BlockHandle.MAX_ENCODED_LENGTH * 2) + SIZE_OF_LONG; + + private final BlockHandle metaindexBlockHandle; + private final BlockHandle indexBlockHandle; + + Footer(BlockHandle metaindexBlockHandle, BlockHandle indexBlockHandle) + { + this.metaindexBlockHandle = metaindexBlockHandle; + this.indexBlockHandle = indexBlockHandle; + } + + public BlockHandle getMetaindexBlockHandle() + { + return metaindexBlockHandle; + } + + public BlockHandle getIndexBlockHandle() + { + return indexBlockHandle; + } + + public static Footer readFooter(Slice slice) + { + requireNonNull(slice, "slice is null"); + checkArgument(slice.length() == ENCODED_LENGTH, "Expected slice.size to be %s but was %s", ENCODED_LENGTH, slice.length()); + + SliceInput sliceInput = slice.input(); + + // read metaindex and index handles + BlockHandle metaindexBlockHandle = readBlockHandle(sliceInput); + BlockHandle indexBlockHandle = readBlockHandle(sliceInput); + + // skip padding + sliceInput.setPosition(ENCODED_LENGTH - SIZE_OF_LONG); + + // verify magic number + long magicNumber = sliceInput.readUnsignedInt() | (sliceInput.readUnsignedInt() << 32); + checkArgument(magicNumber == TableBuilder.TABLE_MAGIC_NUMBER, "File is not a table (bad magic number)"); + + return new Footer(metaindexBlockHandle, indexBlockHandle); + } + + public static Slice writeFooter(Footer footer) + { + Slice slice = Slices.allocate(ENCODED_LENGTH); + writeFooter(footer, slice.output()); + return slice; + } + + public static void writeFooter(Footer footer, SliceOutput sliceOutput) + { + // remember the starting write index so we can calculate the padding + int startingWriteIndex = sliceOutput.size(); + + // write metaindex and index handles + writeBlockHandleTo(footer.getMetaindexBlockHandle(), sliceOutput); + writeBlockHandleTo(footer.getIndexBlockHandle(), sliceOutput); + + // write padding + sliceOutput.writeZero(ENCODED_LENGTH - SIZE_OF_LONG - (sliceOutput.size() - startingWriteIndex)); + + // write magic number as two (little endian) integers + sliceOutput.writeInt((int) TableBuilder.TABLE_MAGIC_NUMBER); + sliceOutput.writeInt((int) (TableBuilder.TABLE_MAGIC_NUMBER >>> 32)); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/KeyValueFunction.java b/leveldb/src/main/java/org/iq80/leveldb/table/KeyValueFunction.java new file mode 100644 index 0000000..ae9aa91 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/KeyValueFunction.java @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; + +/** + * @author Honore Vasconcelos + */ +public interface KeyValueFunction +{ + /** + * Function to apply on first entry after seeking in a table. + * + * @param internalKey internal key + * @param value associated value + * @return transformed key/value + */ + T apply(Slice internalKey, Slice value); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/RestartPositions.java b/leveldb/src/main/java/org/iq80/leveldb/table/RestartPositions.java new file mode 100644 index 0000000..bab916c --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/RestartPositions.java @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkPositionIndex; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; + +final class RestartPositions +{ + private final Slice restartPositions; + private final int size; + + RestartPositions(Slice restartPositions) + { + requireNonNull(restartPositions, "restartPositions is null"); + checkArgument(restartPositions.length() % SIZE_OF_INT == 0, "restartPositions.readableBytes() must be a multiple of %s", SIZE_OF_INT); + this.restartPositions = restartPositions; + this.size = restartPositions.length() / SIZE_OF_INT; + } + + public int get(int index) + { + checkPositionIndex(index, size, "index out of range"); + return restartPositions.getInt(index * SIZE_OF_INT); + } + + public boolean isEmpty() + { + return size == 0; + } + + public int size() + { + return size; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/Table.java b/leveldb/src/main/java/org/iq80/leveldb/table/Table.java new file mode 100644 index 0000000..b4effee --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/Table.java @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import com.google.common.base.Throwables; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.iterator.SeekingIterators; +import org.iq80.leveldb.iterator.SliceIterator; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.util.ILRUCache; +import org.iq80.leveldb.util.PureJavaCrc32C; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.util.Snappy; +import org.iq80.leveldb.util.ZLib; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Comparator; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.CompressionType.SNAPPY; +import static org.iq80.leveldb.CompressionType.ZLIB; +import static org.iq80.leveldb.CompressionType.ZLIB_RAW; + +public final class Table + implements Closeable +{ + private static final Charset CHARSET = StandardCharsets.UTF_8; + private static final AtomicLong ID_GENERATOR = new AtomicLong(); + private final long id = ID_GENERATOR.incrementAndGet(); + private final Comparator comparator; + private final Block indexBlock; + private final BlockHandle metaindexBlockHandle; + private final RandomInputFile source; + private final ILRUCache blockCache; + private final FilterBlockReader filter; + //use ref count to release resource early + //external user iterator are required to be closed + private final AtomicInteger refCount = new AtomicInteger(1); + + public Table(RandomInputFile source, Comparator comparator, boolean paranoidChecks, ILRUCache blockCache, final FilterPolicy filterPolicy) + throws IOException + { + this.source = source; + this.blockCache = blockCache; + requireNonNull(source, "source is null"); + long size = source.size(); + checkArgument(size >= Footer.ENCODED_LENGTH, "File is corrupt: size must be at least %s bytes", Footer.ENCODED_LENGTH); + requireNonNull(comparator, "comparator is null"); + + this.comparator = comparator; + final ByteBuffer footerData = source.read(size - Footer.ENCODED_LENGTH, Footer.ENCODED_LENGTH); + + Footer footer = Footer.readFooter(Slices.avoidCopiedBuffer(footerData)); + indexBlock = new Block(readRawBlock(footer.getIndexBlockHandle(), paranoidChecks), comparator); //no need for cache + metaindexBlockHandle = footer.getMetaindexBlockHandle(); + this.filter = readMeta(filterPolicy, paranoidChecks); + + } + + private FilterBlockReader readMeta(FilterPolicy filterPolicy, boolean verifyChecksum) throws IOException + { + assert refCount.get() > 0; + if (filterPolicy == null) { + return null; // Do not need any metadata + } + + final Block meta = new Block(readRawBlock(metaindexBlockHandle, verifyChecksum), new BytewiseComparator()); + try (BlockIterator iterator = meta.iterator()) { + final Slice targetKey = new Slice(("filter." + filterPolicy.name()).getBytes(CHARSET)); + if (iterator.seek(targetKey) && iterator.key().equals(targetKey)) { + return readFilter(filterPolicy, iterator.value(), verifyChecksum); + } + else { + return null; + } + } + } + + protected FilterBlockReader readFilter(FilterPolicy filterPolicy, Slice filterHandle, boolean verifyChecksum) throws IOException + { + assert refCount.get() > 0; + final Slice filterBlock = readRawBlock(BlockHandle.readBlockHandle(filterHandle.input()), verifyChecksum); + return new FilterBlockReader(filterPolicy, filterBlock); + } + + public SliceIterator iterator(ReadOptions options) + { + assert refCount.get() > 0; + this.retain(); + return SeekingIterators.twoLevelSliceIterator(indexBlock.iterator(), blockHandle -> openBlock(options, blockHandle), this::release); + } + + private BlockIterator openBlock(ReadOptions options, Slice blockHandle) + { + Block dataBlock = openBlock(blockHandle, options); + return dataBlock.iterator(); + } + + public FilterBlockReader getFilter() + { + assert refCount.get() > 0; + return filter; + } + + public Block openBlock(Slice blockEntry, ReadOptions options) + { + assert refCount.get() > 0; + BlockHandle blockHandle = BlockHandle.readBlockHandle(blockEntry.input()); + Block dataBlock; + try { + dataBlock = readBlock(blockHandle, options); + } + catch (IOException e) { + throw new DBException(e); + } + return dataBlock; + } + + private Block readBlock(BlockHandle blockHandle, ReadOptions options) + throws IOException + { + assert refCount.get() > 0; + try { + final Slice rawBlock; + if (blockCache == null) { + rawBlock = readRawBlock(blockHandle, options.verifyChecksums()); + } + else if (!options.fillCache()) { + Slice ifPresent = blockCache.getIfPresent(new CacheKey(id, blockHandle)); + if (ifPresent == null) { + rawBlock = readRawBlock(blockHandle, options.verifyChecksums()); + } + else { + rawBlock = ifPresent; + } + } + else { + rawBlock = blockCache.load(new CacheKey(id, blockHandle), () -> readRawBlock(blockHandle, options.verifyChecksums())); + } + return new Block(rawBlock, comparator); + } + catch (ExecutionException e) { + Throwables.propagateIfPossible(e.getCause(), IOException.class); + throw new IOException(e.getCause()); + } + } + + protected Slice readRawBlock(BlockHandle blockHandle, boolean verifyChecksum) + throws IOException + { + assert refCount.get() > 0; + // read block trailer + final ByteBuffer content = source.read(blockHandle.getOffset(), blockHandle.getFullBlockSize()); + int limit = content.limit(); + int position = content.position(); + int trailerStart = position + blockHandle.getDataSize(); + content.position(trailerStart); + final BlockTrailer blockTrailer = BlockTrailer.readBlockTrailer(Slices.avoidCopiedBuffer(content)); + + // only verify check sums if explicitly asked by the user + if (verifyChecksum) { + // checksum data and the compression type in the trailer + PureJavaCrc32C checksum = new PureJavaCrc32C(); + content.position(position).limit(trailerStart /*content*/ + 1/*type*/); + checksum.update(content); + int actualCrc32c = checksum.getMaskedValue(); + + checkState(blockTrailer.getCrc32c() == actualCrc32c, "Block corrupted: checksum mismatch"); + } + + // decompress data + Slice uncompressedData; + content.position(position); + content.limit(limit - BlockTrailer.ENCODED_LENGTH); + if (blockTrailer.getCompressionType() == ZLIB || blockTrailer.getCompressionType() == ZLIB_RAW) { + ByteBuffer uncompressed = ZLib.uncompress(content, blockTrailer.getCompressionType() == ZLIB_RAW); + uncompressedData = Slices.avoidCopiedBuffer(uncompressed); + } + else if (blockTrailer.getCompressionType() == SNAPPY) { + ByteBuffer uncompressed = Snappy.uncompress(content); + uncompressedData = Slices.copiedBuffer(uncompressed); + } + else { + uncompressedData = Slices.avoidCopiedBuffer(content); + } + + return uncompressedData; + } + + public T internalGet(ReadOptions options, Slice key, KeyValueFunction keyValueFunction) + { + assert refCount.get() > 0; + try (final BlockIterator iterator = indexBlock.iterator()) { + if (iterator.seek(key)) { + final Slice handleValue = iterator.value(); + if (filter != null && !filter.keyMayMatch(BlockHandle.readBlockHandle(handleValue.input()).getOffset(), key)) { + return null; + } + else { + try (BlockIterator iterator1 = openBlock(handleValue, options).iterator()) { + if (iterator1.seek(key)) { + return keyValueFunction.apply(iterator1.key(), iterator1.value()); + } + } + } + } + return null; + } + } + + /** + * Given a key, return an approximate byte offset in the file where + * the data for that key begins (or would begin if the key were + * present in the file). The returned value is in terms of file + * bytes, and so includes effects like compression of the underlying data. + * For example, the approximate offset of the last key in the table will + * be close to the file length. + */ + public long getApproximateOffsetOf(Slice key) + { + assert refCount.get() > 0; + try (BlockIterator iterator = indexBlock.iterator()) { + if (iterator.seek(key)) { + BlockHandle blockHandle = BlockHandle.readBlockHandle(iterator.value().input()); + return blockHandle.getOffset(); + } + } + + // key is past the last key in the file. Approximate the offset + // by returning the offset of the metaindex block (which is + // right near the end of the file). + return metaindexBlockHandle.getOffset(); + } + + /** + * Try to retain current instance. + * + * @return {@code true} if table was successfully retained, {@code false} otherwise + */ + public boolean retain() + { + int refs; + do { + refs = refCount.get(); + if (refs == 0) { + return false; //already released. do not use! + } + } while (!refCount.compareAndSet(refs, refs + 1)); + return true; + } + + public void release() throws IOException + { + assert refCount.get() > 0; + final int refs = refCount.decrementAndGet(); + if (refs == 0) { + source.close(); + } + } + + @Override + public String toString() + { + return "Table" + + "{source='" + source + '\'' + + ", comparator=" + comparator + + '}'; + } + + @Override + public void close() throws IOException + { + release(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/TableBuilder.java b/leveldb/src/main/java/org/iq80/leveldb/table/TableBuilder.java new file mode 100644 index 0000000..feda1e2 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/TableBuilder.java @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.util.PureJavaCrc32C; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.util.Snappy; +import org.iq80.leveldb.util.ZLib; + +import java.io.IOException; +import java.nio.charset.Charset; + +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; + +public class TableBuilder +{ + /** + * TABLE_MAGIC_NUMBER was picked by running + * echo http://code.google.com/p/leveldb/ | sha1sum + * and taking the leading 64 bits. + */ + public static final long TABLE_MAGIC_NUMBER = 0xdb4775248b80fb57L; + private static final Charset CHARSET = Charset.forName("UTF-8"); + + private final int blockRestartInterval; + private final int blockSize; + private final CompressionType compressionType; + + private final WritableFile file; + private final BlockBuilder dataBlockBuilder; + private final BlockBuilder indexBlockBuilder; + private final FilterBlockBuilder filterPolicyBuilder; + private Slice lastKey; + private final UserComparator userComparator; + + private long entryCount; + + // Either Finish() or Abandon() has been called. + private boolean closed; + + // We do not emit the index entry for a block until we have seen the + // first key for the next data block. This allows us to use shorter + // keys in the index block. For example, consider a block boundary + // between the keys "the quick brown fox" and "the who". We can use + // "the r" as the key for the index block entry since it is >= all + // entries in the first block and < all entries in subsequent + // blocks. + private boolean pendingIndexEntry; + private BlockHandle pendingHandle; // Handle to add to index block + + private Slice compressedOutput; + + private long position; + + public TableBuilder(Options options, WritableFile file, UserComparator userComparator) + { + requireNonNull(options, "options is null"); + requireNonNull(file, "file is null"); + + this.file = file; + this.userComparator = userComparator; + + blockRestartInterval = options.blockRestartInterval(); + blockSize = options.blockSize(); + compressionType = options.compressionType(); + + dataBlockBuilder = new BlockBuilder((int) Math.min(blockSize * 1.1, options.maxFileSize()), blockRestartInterval, userComparator); + + // with expected 50% compression + int expectedNumberOfBlocks = 1024; + indexBlockBuilder = new BlockBuilder(BlockHandle.MAX_ENCODED_LENGTH * expectedNumberOfBlocks, 1, userComparator); + + lastKey = Slices.EMPTY_SLICE; + + if (options.filterPolicy() != null) { + filterPolicyBuilder = new FilterBlockBuilder((FilterPolicy) options.filterPolicy()); + filterPolicyBuilder.startBlock(0); + } + else { + filterPolicyBuilder = null; + } + } + + public long getEntryCount() + { + return entryCount; + } + + public long getFileSize() + { + return position; + } + + public void add(BlockEntry blockEntry) + throws IOException + { + requireNonNull(blockEntry, "blockEntry is null"); + add(blockEntry.getKey(), blockEntry.getValue()); + } + + public void add(Slice key, Slice value) + throws IOException + { + requireNonNull(key, "key is null"); + requireNonNull(value, "value is null"); + + checkState(!closed, "table is finished"); + + if (entryCount > 0) { + assert (userComparator.compare(key, lastKey) > 0) : "key must be greater than last key"; + } + + // If we just wrote a block, we can now add the handle to index block + if (pendingIndexEntry) { + checkState(dataBlockBuilder.isEmpty(), "Internal error: Table has a pending index entry but data block builder is empty"); + + Slice shortestSeparator = userComparator.findShortestSeparator(lastKey, key); + + Slice handleEncoding = BlockHandle.writeBlockHandle(pendingHandle); + indexBlockBuilder.add(shortestSeparator, handleEncoding); + pendingIndexEntry = false; + } + + if (filterPolicyBuilder != null) { + filterPolicyBuilder.addKey(key); + } + + lastKey = key; + entryCount++; + dataBlockBuilder.add(key, value); + + int estimatedBlockSize = dataBlockBuilder.currentSizeEstimate(); + if (estimatedBlockSize >= blockSize) { + flush(); + } + } + + private void flush() + throws IOException + { + checkState(!closed, "table is finished"); + if (dataBlockBuilder.isEmpty()) { + return; + } + + checkState(!pendingIndexEntry, "Internal error: Table already has a pending index entry to flush"); + + pendingHandle = writeBlock(dataBlockBuilder); + + if (filterPolicyBuilder != null) { + filterPolicyBuilder.startBlock(position); + } + + pendingIndexEntry = true; + } + + private BlockHandle writeBlock(BlockBuilder blockBuilder) + throws IOException + { + // close the block + Slice raw = blockBuilder.finish(); + BlockHandle blockHandle = writeRawBlock(raw); + + // clean up state + blockBuilder.reset(); + + return blockHandle; + } + + private BlockHandle writeRawBlock(Slice raw) throws IOException + { + // attempt to compress the block + Slice blockContents = raw; + CompressionType blockCompressionType = CompressionType.NONE; + if (compressionType == CompressionType.ZLIB || compressionType == CompressionType.ZLIB_RAW) { + ensureCompressedOutputCapacity(maxCompressedLength(raw.length())); + try { + int compressedSize = ZLib.compress(raw.getRawArray(), raw.getRawOffset(), raw.length(), compressedOutput.getRawArray(), 0, compressionType == CompressionType.ZLIB_RAW); + + // Don't use the compressed data if compressed less than 12.5%, + if (compressedSize < raw.length() - (raw.length() / 8)) { + blockContents = compressedOutput.slice(0, compressedSize); + blockCompressionType = compressionType; + } + } + catch (IOException ignored) { + // compression failed, so just store uncompressed form + } + } + else if (compressionType == CompressionType.SNAPPY) { + ensureCompressedOutputCapacity(maxCompressedLength(raw.length())); + try { + int compressedSize = Snappy.compress(raw.getRawArray(), raw.getRawOffset(), raw.length(), compressedOutput.getRawArray(), 0); + + // Don't use the compressed data if compressed less than 12.5%, + if (compressedSize < raw.length() - (raw.length() / 8)) { + blockContents = compressedOutput.slice(0, compressedSize); + blockCompressionType = CompressionType.SNAPPY; + } + } + catch (IOException ignored) { + // compression failed, so just store uncompressed form + } + } + + // create block trailer + BlockTrailer blockTrailer = new BlockTrailer(blockCompressionType, crc32c(blockContents, blockCompressionType)); + Slice trailer = BlockTrailer.writeBlockTrailer(blockTrailer); + + // create a handle to this block + BlockHandle blockHandle = new BlockHandle(position, blockContents.length()); + + // write data and trailer + file.append(blockContents); + file.append(trailer); + position += blockContents.length() + trailer.length(); + return blockHandle; + } + + private static int maxCompressedLength(int length) + { + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // I.e., 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + return 32 + length + (length / 6); + } + + public void finish() + throws IOException + { + checkState(!closed, "table is finished"); + + // flush current data block + flush(); + + // mark table as closed + closed = true; + + BlockHandle filterBlockHandle = null; + + if (filterPolicyBuilder != null) { + filterBlockHandle = writeRawBlock(filterPolicyBuilder.finish()); + } + + // write (empty) meta index block + BlockBuilder metaIndexBlockBuilder = new BlockBuilder(256, blockRestartInterval, new BytewiseComparator()); + + if (filterBlockHandle != null) { + metaIndexBlockBuilder.add(new Slice(("filter." + filterPolicyBuilder.name()).getBytes(CHARSET)), BlockHandle.writeBlockHandle(filterBlockHandle)); + } + + BlockHandle metaindexBlockHandle = writeBlock(metaIndexBlockBuilder); + + // add last handle to index block + if (pendingIndexEntry) { + Slice shortSuccessor = userComparator.findShortSuccessor(lastKey); + + Slice handleEncoding = BlockHandle.writeBlockHandle(pendingHandle); + indexBlockBuilder.add(shortSuccessor, handleEncoding); + pendingIndexEntry = false; + } + + // write index block + BlockHandle indexBlockHandle = writeBlock(indexBlockBuilder); + + // write footer + Footer footer = new Footer(metaindexBlockHandle, indexBlockHandle); + Slice footerEncoding = Footer.writeFooter(footer); + file.append(footerEncoding); + position += footerEncoding.length(); + } + + public void abandon() + { + closed = true; //mark it as unusable + } + + public static int crc32c(Slice data, CompressionType type) + { + PureJavaCrc32C crc32c = new PureJavaCrc32C(); + crc32c.update(data.getRawArray(), data.getRawOffset(), data.length()); + crc32c.update(type.persistentId() & 0xFF); + return crc32c.getMaskedValue(); + } + + public void ensureCompressedOutputCapacity(int capacity) + { + if (compressedOutput != null && compressedOutput.length() > capacity) { + return; + } + compressedOutput = Slices.allocate(capacity); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/table/UserComparator.java b/leveldb/src/main/java/org/iq80/leveldb/table/UserComparator.java new file mode 100644 index 0000000..49b9a27 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/table/UserComparator.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; + +import java.util.Comparator; + +public interface UserComparator + extends Comparator +{ + String name(); + + Slice findShortestSeparator(Slice start, Slice limit); + + Slice findShortSuccessor(Slice key); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/BasicSliceOutput.java b/leveldb/src/main/java/org/iq80/leveldb/util/BasicSliceOutput.java new file mode 100644 index 0000000..b006dd5 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/BasicSliceOutput.java @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +public class BasicSliceOutput + extends SliceOutput +{ + private final Slice slice; + private int size; + + protected BasicSliceOutput(Slice slice) + { + this.slice = slice; + } + + @Override + public void reset() + { + size = 0; + } + + @Override + public int size() + { + return size; + } + + @Override + public boolean isWritable() + { + return writableBytes() > 0; + } + + @Override + public int writableBytes() + { + return slice.length() - size; + } + + @Override + public void writeByte(int value) + { + slice.setByte(size++, value); + } + + @Override + public void writeShort(int value) + { + slice.setShort(size, value); + size += 2; + } + + @Override + public void writeInt(int value) + { + slice.setInt(size, value); + size += 4; + } + + @Override + public void writeLong(long value) + { + slice.setLong(size, value); + size += 8; + } + + @Override + public void writeBytes(byte[] source, int sourceIndex, int length) + { + slice.setBytes(size, source, sourceIndex, length); + size += length; + } + + @Override + public void writeBytes(byte[] source) + { + writeBytes(source, 0, source.length); + } + + @Override + public void writeBytes(Slice source) + { + writeBytes(source, 0, source.length()); + } + + @Override + public void writeBytes(SliceInput source, int length) + { + if (length > source.available()) { + throw new IndexOutOfBoundsException(); + } + writeBytes(source.readBytes(length)); + } + + @Override + public void writeBytes(Slice source, int sourceIndex, int length) + { + slice.setBytes(size, source, sourceIndex, length); + size += length; + } + + @Override + public void writeBytes(ByteBuffer source) + { + int length = source.remaining(); + slice.setBytes(size, source); + size += length; + } + + @Override + public int writeBytes(InputStream in, int length) + throws IOException + { + int writtenBytes = slice.setBytes(size, in, length); + if (writtenBytes > 0) { + size += writtenBytes; + } + return writtenBytes; + } + + @Override + public void writeZero(int length) + { + if (length == 0) { + return; + } + if (length < 0) { + throw new IllegalArgumentException( + "length must be 0 or greater than 0."); + } + int nLong = length >>> 3; + int nBytes = length & 7; + for (int i = nLong; i > 0; i--) { + writeLong(0); + } + if (nBytes == 4) { + writeInt(0); + } + else if (nBytes < 4) { + for (int i = nBytes; i > 0; i--) { + writeByte((byte) 0); + } + } + else { + writeInt(0); + for (int i = nBytes - 4; i > 0; i--) { + writeByte((byte) 0); + } + } + } + + @Override + public Slice slice() + { + return slice.slice(0, size); + } + + @Override + public ByteBuffer toByteBuffer() + { + return slice.toByteBuffer(0, size); + } + + @Override + public String toString() + { + return getClass().getSimpleName() + '(' + + "size=" + size + ", " + + "capacity=" + slice.length() + + ')'; + } + + public String toString(Charset charset) + { + return slice.toString(0, size, charset); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/Closeables.java b/leveldb/src/main/java/org/iq80/leveldb/util/Closeables.java new file mode 100644 index 0000000..26318ad --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/Closeables.java @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import com.google.common.base.Throwables; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.Callable; + +public final class Closeables +{ + private Closeables() + { + } + + public static void closeQuietly(Closeable closeable) + { + if (closeable == null) { + return; + } + try { + closeable.close(); + } + catch (IOException ignored) { + } + } + + public static void closeAll(Iterable closeables) throws IOException + { + Throwable throwable = null; + for (Closeable closeable : closeables) { + try { + closeable.close(); + } + catch (Throwable e) { + if (throwable == null) { + throwable = e; + } + else { + throwable.addSuppressed(e); + } + } + } + + if (throwable != null) { + Throwables.propagateIfPossible(throwable, IOException.class); + throw new AssertionError(throwable); // not possible + } + } + + /** + * Create a wrapper for {@code resource}. If wrapper fail to be created, resource is properly closed. + * In the case if {@code wrapperFactory.call()} succeed, returned object is responsible to close {@code resource}. + * + * @param wrapperFactory wrapper factory + * @param resource resource used by wrapper + * @param wrapper object type + * @return resource wrapper instance + * @throws IOException in the case of any exception. + */ + public static T wrapResource(Callable wrapperFactory, Closeable resource) throws IOException + { + try { + return wrapperFactory.call(); + } + catch (Throwable throwable) { + try { + resource.close(); + } + catch (Throwable e1) { + throwable.addSuppressed(e1); + } + Throwables.propagateIfPossible(throwable, IOException.class); + throw new AssertionError(throwable); // not possible + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/DynamicSliceOutput.java b/leveldb/src/main/java/org/iq80/leveldb/util/DynamicSliceOutput.java new file mode 100644 index 0000000..428131d --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/DynamicSliceOutput.java @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +public class DynamicSliceOutput + extends SliceOutput +{ + private Slice slice; + private int size; + + public DynamicSliceOutput(int estimatedSize) + { + this.slice = new Slice(estimatedSize); + } + + @Override + public void reset() + { + size = 0; + } + + @Override + public int size() + { + return size; + } + + @Override + public boolean isWritable() + { + return writableBytes() > 0; + } + + @Override + public int writableBytes() + { + return slice.length() - size; + } + + @Override + public void writeByte(int value) + { + slice = Slices.ensureSize(slice, size + 1); + slice.setByte(size++, value); + } + + @Override + public void writeShort(int value) + { + slice = Slices.ensureSize(slice, size + 2); + slice.setShort(size, value); + size += 2; + } + + @Override + public void writeInt(int value) + { + slice = Slices.ensureSize(slice, size + 4); + slice.setInt(size, value); + size += 4; + } + + @Override + public void writeLong(long value) + { + slice = Slices.ensureSize(slice, size + 8); + slice.setLong(size, value); + size += 8; + } + + @Override + public void writeBytes(byte[] source) + { + writeBytes(source, 0, source.length); + } + + @Override + public void writeBytes(byte[] source, int sourceIndex, int length) + { + slice = Slices.ensureSize(slice, size + length); + slice.setBytes(size, source, sourceIndex, length); + size += length; + } + + @Override + public void writeBytes(Slice source) + { + writeBytes(source, 0, source.length()); + } + + @Override + public void writeBytes(SliceInput source, int length) + { + if (length > source.available()) { + throw new IndexOutOfBoundsException(); + } + writeBytes(source.slice()); + } + + @Override + public void writeBytes(Slice source, int sourceIndex, int length) + { + slice = Slices.ensureSize(slice, size + length); + slice.setBytes(size, source, sourceIndex, length); + size += length; + } + + @Override + public void writeBytes(ByteBuffer source) + { + int length = source.remaining(); + slice = Slices.ensureSize(slice, size + length); + slice.setBytes(size, source); + size += length; + } + + @Override + public int writeBytes(InputStream in, int length) + throws IOException + { + slice = Slices.ensureSize(slice, size + length); + int writtenBytes = slice.setBytes(size, in, length); + if (writtenBytes > 0) { + size += writtenBytes; + } + return writtenBytes; + } + + @Override + public void writeZero(int length) + { + if (length == 0) { + return; + } + if (length < 0) { + throw new IllegalArgumentException( + "length must be 0 or greater than 0."); + } + slice = Slices.ensureSize(slice, size + length); + int nLong = length >>> 3; + int nBytes = length & 7; + for (int i = nLong; i > 0; i--) { + writeLong(0); + } + if (nBytes == 4) { + writeInt(0); + } + else if (nBytes < 4) { + for (int i = nBytes; i > 0; i--) { + writeByte((byte) 0); + } + } + else { + writeInt(0); + for (int i = nBytes - 4; i > 0; i--) { + writeByte((byte) 0); + } + } + } + + @Override + public Slice slice() + { + return slice.slice(0, size); + } + + @Override + public ByteBuffer toByteBuffer() + { + return slice.toByteBuffer(0, size); + } + + @Override + public String toString() + { + return getClass().getSimpleName() + '(' + + "size=" + size + ", " + + "capacity=" + slice.length() + + ')'; + } + + @Override + public String toString(Charset charset) + { + return slice.toString(0, size, charset); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/Hash.java b/leveldb/src/main/java/org/iq80/leveldb/util/Hash.java new file mode 100644 index 0000000..8d11045 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/Hash.java @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.util; + +/** + * @author Honore Vasconcelos + */ +public final class Hash +{ + private Hash() + { + } + + public static int hash(byte[] data, int seed) + { + return hash(data, 0, data.length, seed); + } + + /** + * Partial array hash that start at offset and with len size. + * + * @param data full data + * @param offset data start offset + * @param len length of data + * @param seed hash seed + * @return hash (sign has no meaning) + */ + public static int hash(byte[] data, int offset, int len, int seed) + { + final int endIdx = len + offset; + // Similar to murmur hash + int m = 0xc6a4a793; + int r = 24; + + int h = seed ^ (len * m); + + int idx = offset; + // Pick up four bytes at a time + for (; idx + 4 <= endIdx; idx += 4) { + int w = byteToInt(data, idx); + h += w; + h *= m; + h ^= (h >>> 16); + } + + // Pick up remaining bytes + final int remaining = endIdx - idx; + switch (remaining) { + case 3: + h += (data[idx + 2] & 0xff) << 16; + //FALLTHROUGH INTENDED: DO NOT PUT BREAK + case 2: + h += (data[idx + 1] & 0xff) << 8; + //FALLTHROUGH INTENDED: DO NOT PUT BREAK + case 1: + h += data[idx] & 0xff; + h *= m; + h ^= (h >>> r); + break; + } + return h; + } + + private static int byteToInt(byte[] data, final int index) + { + return (data[index] & 0xff) | + (data[index + 1] & 0xff) << 8 | + (data[index + 2] & 0xff) << 16 | + (data[index + 3] & 0xff) << 24; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/ILRUCache.java b/leveldb/src/main/java/org/iq80/leveldb/util/ILRUCache.java new file mode 100644 index 0000000..73b78c1 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/ILRUCache.java @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; + +public interface ILRUCache +{ + /** + * Get cached valued by key or load and cache loaded value. + * + * @param key cache key + * @param loader key value loader + * @return loaded/saved value + * @throws ExecutionException if load has any exception. + */ + V load(final K key, Callable loader) throws ExecutionException; + + long getApproximateMemoryUsage(); + + /** + * Get a value from cache if present (already loaded) + * + * @param key cache key + * @return value if present, {@ode null} otherwise + */ + V getIfPresent(K key); + + /** + * Discards all entries in the cache. + */ + void invalidateAll(); +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/IntVector.java b/leveldb/src/main/java/org/iq80/leveldb/util/IntVector.java new file mode 100644 index 0000000..974837d --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/IntVector.java @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.util.Arrays; + +import static com.google.common.base.Preconditions.checkArgument; + +public class IntVector +{ + private int size; + private int[] values; + + public IntVector(int initialCapacity) + { + this.values = new int[initialCapacity]; + } + + public int size() + { + return size; + } + + public void clear() + { + size = 0; + } + + public void add(int value) + { + checkArgument(size + 1 >= 0, "Invalid minLength: %s", size + 1); + + ensureCapacity(size + 1); + + values[size++] = value; + } + + private void ensureCapacity(int minCapacity) + { + if (values.length >= minCapacity) { + return; + } + + int newLength = values.length; + if (newLength == 0) { + newLength = 1; + } + else { + newLength <<= 1; + + } + values = Arrays.copyOf(values, newLength); + } + + public void write(SliceOutput sliceOutput) + { + for (int index = 0; index < size; index++) { + sliceOutput.writeInt(values[index]); + } + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + sb.append("IntVector"); + sb.append("{size=").append(size); + sb.append(", values=").append(Arrays.toString(values)); + sb.append('}'); + return sb.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/LRUCache.java b/leveldb/src/main/java/org/iq80/leveldb/util/LRUCache.java new file mode 100644 index 0000000..a192626 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/LRUCache.java @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.iq80.leveldb.util; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.Weigher; + +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; + +/** + * LRU cache with special weigher to count correctly Slice weight. + * + * @author Honore Vasconcelos + */ +public final class LRUCache + implements ILRUCache +{ + private final Cache cache; + private final Weigher weigher; + + private LRUCache(int capacity, final Weigher weigher) + { + this.cache = CacheBuilder.newBuilder() + .maximumWeight(capacity) + .weigher(weigher) + .concurrencyLevel(1 << 4) + .build(); + this.weigher = weigher; + } + + public static ILRUCache createCache(int capacity, final Weigher weigher) + { + return new LRUCache<>(capacity, weigher); + } + + public V load(final K key, Callable loader) throws ExecutionException + { + return cache.get(key, loader); + } + + @Override + public long getApproximateMemoryUsage() + { + return cache.asMap().entrySet().stream() + .mapToLong(e -> weigher.weigh(e.getKey(), e.getValue())) + .sum(); + } + + @Override + public V getIfPresent(K key) + { + return cache.getIfPresent(key); + } + + @Override + public void invalidateAll() + { + cache.invalidateAll(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/LogMessageFormatter.java b/leveldb/src/main/java/org/iq80/leveldb/util/LogMessageFormatter.java new file mode 100644 index 0000000..80029e6 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/LogMessageFormatter.java @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.time.LocalDateTime; +import java.util.function.Supplier; + +public final class LogMessageFormatter +{ + private static final int DATE_SIZE = 28; + private final Supplier clock; + + public LogMessageFormatter(Supplier clock) + { + this.clock = clock; + } + + public String format(String message) + { + final StringBuilder sb = new StringBuilder(message.length() + DATE_SIZE); + sb.append(clock.get()); + sb.append(' '); + sb.append(message); + return sb.toString(); + } + + public String format(String template, Object[] args) + { + template = String.valueOf(template); // null -> "null" + + // start substituting the arguments into the '%s' placeholders + StringBuilder builder = new StringBuilder(DATE_SIZE + template.length() + 16 * args.length); + builder.append(clock.get()); + builder.append(" "); + int templateStart = 0; + int i = 0; + while (i < args.length) { + int placeholderStart = template.indexOf("%s", templateStart); + if (placeholderStart == -1) { + break; + } + builder.append(template, templateStart, placeholderStart); + builder.append(args[i++]); + templateStart = placeholderStart + 2; + } + builder.append(template, templateStart, template.length()); + + // if we run out of placeholders, append the extra args in square braces + if (i < args.length) { + builder.append(" ["); + builder.append(args[i++]); + while (i < args.length) { + builder.append(", "); + builder.append(args[i++]); + } + builder.append(']'); + } + return builder.toString(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/PureJavaCrc32C.java b/leveldb/src/main/java/org/iq80/leveldb/util/PureJavaCrc32C.java new file mode 100644 index 0000000..656e3d9 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/PureJavaCrc32C.java @@ -0,0 +1,783 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.zip.Checksum; + +/** + * A pure-java implementation of the CRC32 checksum that uses + * the CRC32-C polynomial, the same polynomial used by iSCSI + * and implemented on many Intel chipsets supporting SSE4.2. + */ +// this code was taken from Apache Hadoop +public class PureJavaCrc32C + implements Checksum +{ + private static final int MASK_DELTA = 0xa282ead8; + + /** + * Return a masked representation of crc. + *

+ * Motivation: it is problematic to compute the CRC of a string that + * contains embedded CRCs. Therefore we recommend that CRCs stored + * somewhere (e.g., in files) should be masked before being stored. + */ + public static int mask(int crc) + { + // Rotate right by 15 bits and add a constant. + return ((crc >>> 15) | (crc << 17)) + MASK_DELTA; + } + + /** + * Return the crc whose masked representation is masked_crc. + */ + public static int unmask(int maskedCrc) + { + int rot = maskedCrc - MASK_DELTA; + return ((rot >>> 17) | (rot << 15)); + } + + /** + * the current CRC value, bit-flipped + */ + private int crc; + + /** + * Create a new PureJavaCrc32 object. + */ + public PureJavaCrc32C() + { + reset(); + } + + public int getMaskedValue() + { + return mask(getIntValue()); + } + + public int getIntValue() + { + return ~crc; + } + + @Override + public long getValue() + { + long ret = crc; + return (~ret) & 0xffffffffL; + } + + @Override + public final void reset() + { + crc = 0xffffffff; + } + + @Override + public void update(byte[] b, int off, int len) + { + int localCrc = crc; + while (len > 7) { + int c0 = b[off++] ^ localCrc; + localCrc >>>= 8; + int c1 = b[off++] ^ (localCrc); + localCrc >>>= 8; + int c2 = b[off++] ^ (localCrc); + localCrc >>>= 8; + int c3 = b[off++] ^ (localCrc); + localCrc = (T8_7[c0 & 0xff] ^ T8_6[c1 & 0xff]) + ^ (T8_5[c2 & 0xff] ^ T8_4[c3 & 0xff]); + + localCrc ^= (T8_3[b[off++] & 0xff] ^ T8_2[b[off++] & 0xff]) + ^ (T8_1[b[off++] & 0xff] ^ T8_0[b[off++] & 0xff]); + + len -= 8; + } + while (len > 0) { + localCrc = (localCrc >>> 8) ^ T8_0[(localCrc ^ b[off++]) & 0xff]; + len--; + } + + // Publish crc out to object + crc = localCrc; + } + + public void update(ByteBuffer buffer) + { + int pos = buffer.position(); + int limit = buffer.limit(); + int rem = limit - pos; + + if (rem <= 0) { + return; + } + if (buffer.hasArray()) { + update(buffer.array(), pos + buffer.arrayOffset(), rem); + } + else { + //we could swap buffer type to LE (as most CPU) but no gain noticed + if (buffer.order() == ByteOrder.LITTLE_ENDIAN) { + updateDirectBufferL(buffer, pos, limit); + } + else { + updateDirectBufferB(buffer, pos, limit); + } + } + buffer.position(limit); + } + + /** + * Little endian + */ + private void updateDirectBufferL(ByteBuffer buffer, int off, int limit) + { + int len = limit - off; + int localCrc = crc; + while (len > 7) { + final long aLong = buffer.getLong(off); + final int aInt = (int) aLong; + final int c0 = (aInt ^ localCrc) & 0xff; + localCrc >>>= 8; + final int c1 = (aInt >>> 8 ^ (localCrc)) & 0xff; + localCrc >>>= 8; + final int c2 = (aInt >>> 16 ^ (localCrc)) & 0xff; + final int c3 = (aInt >>> 24 ^ (localCrc >>> 8)) & 0xff; + + localCrc = (T8_7[c0] ^ T8_6[c1]) ^ + (T8_5[c2] ^ T8_4[c3]); + + final int aInt1 = (int) (aLong >>> 32); + + final int c4 = aInt1 & 0xff; + final int c5 = aInt1 >>> 8 & 0xff; + final int c6 = aInt1 >>> 16 & 0xff; + final int c7 = aInt1 >>> 24 & 0xff; + + localCrc ^= (T8_3[c4] ^ T8_2[c5]) ^ + (T8_1[c6] ^ T8_0[c7]); + + off += 8; + len -= 8; + } + + /* loop unroll - duff's device style */ + switch (len) { + case 7: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 6: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 5: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 4: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 3: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 2: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 1: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off)) & 0xff)]; + default: break; // satisfy Findbugs + } + // Publish crc out to object + crc = localCrc; + } + + /** + * big endian + */ + private void updateDirectBufferB(ByteBuffer buffer, int off, int limit) + { + int len = limit - off; + + int localCrc = crc; + while (len > 7) { + final long aLong = buffer.getLong(off); + final int aInt = (int) (aLong >>> 32); + final int c0 = (aInt >>> 24 ^ localCrc) & 0xff; + localCrc >>>= 8; + final int c1 = (aInt >>> 16 ^ (localCrc)) & 0xff; + localCrc >>>= 8; + final int c2 = (aInt >>> 8 ^ (localCrc)) & 0xff; + final int c3 = (aInt ^ (localCrc >>> 8)) & 0xff; + + final int aInt1 = (int) aLong; + + final int c4 = aInt1 >>> 24; + final int c5 = aInt1 >>> 16 & 0xff; + final int c6 = aInt1 >>> 8 & 0xff; + final int c7 = aInt1 & 0xff; + + localCrc = (T8_7[c0] ^ T8_6[c1]) ^ + (T8_5[c2] ^ T8_4[c3]); + + localCrc ^= (T8_3[c4] ^ T8_2[c5]) ^ + (T8_1[c6] ^ T8_0[c7]); + + off += 8; + len -= 8; + } + + /* loop unroll - duff's device style */ + switch (len) { + case 7: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 6: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 5: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 4: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 3: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 2: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off++)) & 0xff)]; + case 1: localCrc = (localCrc >>> 8) ^ T8_0[((localCrc ^ buffer.get(off)) & 0xff)]; + default: break; // satisfy Findbugs + } + // Publish crc out to object + crc = localCrc; + } + + @Override + public void update(int b) + { + crc = (crc >>> 8) ^ T8_0[(crc ^ b) & 0xff]; + } + + // CRC polynomial tables generated by: + // java -cp build/test/classes/:build/classes/ \ + // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78 + + static final int[] T8_0 = new int[] { + 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, + 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, + 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, + 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, + 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, + 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, + 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, + 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, + 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, + 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, + 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, + 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, + 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, + 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, + 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, + 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, + 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, + 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, + 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, + 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, + 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, + 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, + 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, + 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, + 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, + 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, + 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, + 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, + 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, + 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, + 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, + 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, + 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, + 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, + 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, + 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, + 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, + 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, + 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, + 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, + 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, + 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, + 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, + 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, + 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, + 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, + 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, + 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, + 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, + 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, + 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, + 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, + 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, + 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, + 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, + 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, + 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, + 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, + 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, + 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, + 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, + 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, + 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, + 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351 + }; + static final int[] T8_1 = new int[] { + 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, + 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, + 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, + 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, + 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, + 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, + 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, + 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, + 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, + 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, + 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, + 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, + 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, + 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, + 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, + 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, + 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, + 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, + 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, + 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, + 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, + 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, + 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, + 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, + 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, + 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, + 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, + 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, + 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, + 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, + 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, + 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, + 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, + 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, + 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, + 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, + 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, + 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, + 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, + 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, + 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, + 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, + 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, + 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, + 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, + 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, + 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, + 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, + 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, + 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, + 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, + 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, + 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, + 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, + 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, + 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, + 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, + 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, + 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, + 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, + 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, + 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, + 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, + 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483 + }; + static final int[] T8_2 = new int[] { + 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, + 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, + 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, + 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, + 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, + 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, + 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, + 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, + 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, + 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, + 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, + 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, + 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, + 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, + 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, + 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, + 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, + 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, + 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, + 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, + 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, + 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, + 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, + 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, + 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, + 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, + 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, + 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, + 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, + 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, + 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, + 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, + 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, + 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, + 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, + 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, + 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, + 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, + 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, + 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, + 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, + 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, + 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, + 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, + 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, + 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, + 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, + 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, + 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, + 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, + 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, + 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, + 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, + 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, + 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, + 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, + 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, + 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, + 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, + 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, + 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, + 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, + 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, + 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8 + }; + static final int[] T8_3 = new int[] { + 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, + 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, + 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, + 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, + 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, + 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, + 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, + 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, + 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, + 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, + 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, + 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, + 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, + 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, + 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, + 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, + 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, + 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, + 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, + 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, + 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, + 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, + 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, + 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, + 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, + 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, + 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, + 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, + 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, + 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, + 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, + 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, + 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, + 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, + 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, + 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, + 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, + 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, + 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, + 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, + 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, + 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, + 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, + 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, + 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, + 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, + 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, + 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, + 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, + 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, + 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, + 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, + 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, + 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, + 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, + 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, + 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, + 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, + 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, + 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, + 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, + 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, + 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, + 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842 + }; + static final int[] T8_4 = new int[] { + 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, + 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, + 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, + 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, + 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, + 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, + 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, + 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, + 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, + 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, + 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, + 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, + 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, + 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, + 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, + 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, + 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, + 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, + 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, + 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, + 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, + 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, + 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, + 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, + 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, + 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, + 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, + 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, + 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, + 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, + 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, + 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, + 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, + 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, + 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, + 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, + 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, + 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, + 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, + 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, + 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, + 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, + 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, + 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, + 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, + 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, + 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, + 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, + 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, + 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, + 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, + 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, + 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, + 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, + 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, + 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, + 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, + 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, + 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, + 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, + 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, + 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, + 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, + 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3 + }; + static final int[] T8_5 = new int[] { + 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, + 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, + 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, + 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, + 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, + 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, + 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, + 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, + 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, + 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, + 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, + 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, + 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, + 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, + 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, + 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, + 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, + 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, + 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, + 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, + 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, + 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, + 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, + 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, + 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, + 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, + 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, + 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, + 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, + 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, + 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, + 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, + 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, + 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, + 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, + 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, + 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, + 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, + 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, + 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, + 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, + 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, + 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, + 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, + 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, + 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, + 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, + 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, + 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, + 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, + 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, + 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, + 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, + 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, + 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, + 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, + 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, + 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, + 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, + 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, + 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, + 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, + 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, + 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C + }; + static final int[] T8_6 = new int[] { + 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, + 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, + 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, + 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, + 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, + 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, + 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, + 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, + 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, + 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, + 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, + 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, + 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, + 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, + 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, + 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, + 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, + 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, + 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, + 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, + 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, + 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, + 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, + 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, + 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, + 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, + 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, + 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, + 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, + 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, + 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, + 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, + 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, + 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, + 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, + 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, + 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, + 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, + 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, + 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, + 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, + 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, + 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, + 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, + 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, + 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, + 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, + 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, + 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, + 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, + 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, + 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, + 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, + 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, + 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, + 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, + 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, + 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, + 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, + 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, + 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, + 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, + 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, + 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F + }; + static final int[] T8_7 = new int[] { + 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, + 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, + 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, + 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, + 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, + 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, + 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, + 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, + 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, + 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, + 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, + 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, + 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, + 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, + 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, + 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, + 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, + 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, + 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, + 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, + 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, + 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, + 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, + 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, + 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, + 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, + 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, + 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, + 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, + 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, + 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, + 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, + 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, + 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, + 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, + 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, + 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, + 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, + 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, + 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, + 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, + 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, + 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, + 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, + 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, + 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, + 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, + 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, + 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, + 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, + 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, + 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, + 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, + 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, + 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, + 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, + 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, + 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, + 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, + 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, + 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, + 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, + 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, + 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 + }; +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/SafeListBuilder.java b/leveldb/src/main/java/org/iq80/leveldb/util/SafeListBuilder.java new file mode 100644 index 0000000..311a802 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/SafeListBuilder.java @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import com.google.common.collect.ImmutableList; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * If for some reason {@link #build()} is not called (eg: due to some exception) when {@link SafeListBuilder#close()} + * is called, then {@link Closeable#close()} method will be called on each inserted items. + * + * @param entry type that should be closed if nop properly read. + */ +public final class SafeListBuilder implements Closeable +{ + private ImmutableList.Builder builder; + + private SafeListBuilder(ImmutableList.Builder builder) + { + this.builder = builder; + } + + /** + * Returns a new builder. + */ + public static final SafeListBuilder builder() + { + return new SafeListBuilder<>(ImmutableList.builder()); + } + + /** + * Adds {@code element} to the {@code ImmutableList}. + * + * @param element the element to add + * @return this {@code Builder} object + * @throws NullPointerException if {@code element} is null + */ + public ImmutableList.Builder add(T element) + { + return builder.add(element); + } + + /** + * Adds each element of {@code elements} to the {@code ImmutableList}. + * + * @param elements the {@code Iterable} to add to the {@code ImmutableList} + * @return this {@code Builder} object + * @throws NullPointerException if {@code elements} is null or contains a + * null element + */ + public ImmutableList.Builder addAll(Iterable elements) + { + return builder.addAll(elements); + } + + /** + * Returns a newly-created {@code ImmutableList} based on the contents of + * the {@code SafeListBuilder}. + *

+ * After this call, {@link #close()} wont have any effect on item of the list. + */ + public List build() + { + final ImmutableList.Builder b1 = this.builder; + this.builder = null; + return b1.build(); + } + + /** + * If {@link #build()} was not called yet, {@link Closeable#close()} method will be called on all items added + * trough {@link #add(Closeable)} and {@link #addAll(Iterable)}. + * If {@link #build()} was already used, nothing will happen on the data of this builder. + */ + @Override + public void close() throws IOException + { + final ImmutableList.Builder b = this.builder; + if (b != null) { + Closeables.closeAll(b.build()); + } + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/SizeOf.java b/leveldb/src/main/java/org/iq80/leveldb/util/SizeOf.java new file mode 100644 index 0000000..cd454a5 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/SizeOf.java @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +public final class SizeOf +{ + public static final byte SIZE_OF_BYTE = 1; + public static final byte SIZE_OF_SHORT = 2; + public static final byte SIZE_OF_INT = 4; + public static final byte SIZE_OF_LONG = 8; + + private SizeOf() + { + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/Slice.java b/leveldb/src/main/java/org/iq80/leveldb/util/Slice.java new file mode 100644 index 0000000..862dd42 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/Slice.java @@ -0,0 +1,586 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +import static com.google.common.base.Preconditions.checkPositionIndex; +import static com.google.common.base.Preconditions.checkPositionIndexes; +import static java.nio.ByteOrder.LITTLE_ENDIAN; +import static java.util.Objects.requireNonNull; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_BYTE; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_SHORT; + +/** + * Little Endian slice of a byte array. + */ +public final class Slice + implements Comparable +{ + private final byte[] data; + private final int offset; + private final int length; + + private int hash; + + public Slice(int length) + { + data = new byte[length]; + this.offset = 0; + this.length = length; + } + + public Slice(byte[] data) + { + requireNonNull(data, "array is null"); + this.data = data; + this.offset = 0; + this.length = data.length; + } + + public Slice(byte[] data, int offset, int length) + { + requireNonNull(data, "array is null"); + this.data = data; + this.offset = offset; + this.length = length; + } + + /** + * Length of this slice. + */ + public int length() + { + return length; + } + + /** + * Gets the array underlying this slice. + */ + public byte[] getRawArray() + { + return data; + } + + /** + * Gets the offset of this slice in the underlying array. + */ + public int getRawOffset() + { + return offset; + } + + /** + * Gets a byte at the specified absolute {@code index} in this buffer. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 1} is greater than {@code this.capacity} + */ + public byte getByte(int index) + { + checkPositionIndexes(index, index + SIZE_OF_BYTE, this.length); + index += offset; + return data[index]; + } + + /** + * Gets an unsigned byte at the specified absolute {@code index} in this + * buffer. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 1} is greater than {@code this.capacity} + */ + public short getUnsignedByte(int index) + { + return (short) (getByte(index) & 0xFF); + } + + /** + * Gets a 16-bit short integer at the specified absolute {@code index} in + * this slice. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 2} is greater than {@code this.capacity} + */ + public short getShort(int index) + { + checkPositionIndexes(index, index + SIZE_OF_SHORT, this.length); + index += offset; + return (short) (data[index] & 0xFF | data[index + 1] << 8); + } + + /** + * Gets a 32-bit integer at the specified absolute {@code index} in + * this buffer. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 4} is greater than {@code this.capacity} + */ + public int getInt(int index) + { + checkPositionIndexes(index, index + SIZE_OF_INT, this.length); + index += offset; + return (data[index] & 0xff) | + (data[index + 1] & 0xff) << 8 | + (data[index + 2] & 0xff) << 16 | + (data[index + 3] & 0xff) << 24; + } + + /** + * Gets a 64-bit long integer at the specified absolute {@code index} in + * this buffer. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 8} is greater than {@code this.capacity} + */ + public long getLong(int index) + { + checkPositionIndexes(index, index + SIZE_OF_LONG, this.length); + index += offset; + return ((long) data[index] & 0xff) | + ((long) data[index + 1] & 0xff) << 8 | + ((long) data[index + 2] & 0xff) << 16 | + ((long) data[index + 3] & 0xff) << 24 | + ((long) data[index + 4] & 0xff) << 32 | + ((long) data[index + 5] & 0xff) << 40 | + ((long) data[index + 6] & 0xff) << 48 | + ((long) data[index + 7] & 0xff) << 56; + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the specified absolute {@code index}. + * + * @param dstIndex the first index of the destination + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, + * if the specified {@code dstIndex} is less than {@code 0}, + * if {@code index + length} is greater than + * {@code this.capacity}, or + * if {@code dstIndex + length} is greater than + * {@code dst.capacity} + */ + public void getBytes(int index, Slice dst, int dstIndex, int length) + { + getBytes(index, dst.data, dstIndex, length); + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the specified absolute {@code index}. + * + * @param destinationIndex the first index of the destination + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, + * if the specified {@code dstIndex} is less than {@code 0}, + * if {@code index + length} is greater than + * {@code this.capacity}, or + * if {@code dstIndex + length} is greater than + * {@code dst.length} + */ + public void getBytes(int index, byte[] destination, int destinationIndex, int length) + { + checkPositionIndexes(index, index + length, this.length); + checkPositionIndexes(destinationIndex, destinationIndex + length, destination.length); + index += offset; + System.arraycopy(data, index, destination, destinationIndex, length); + } + + public byte[] getBytes() + { + return getBytes(0, length); + } + + public byte[] getBytes(int index, int length) + { + checkPositionIndexes(index, index + length, this.length); + if (length == 0 && this.data.length == 0) { + return data; //zero size array is immutable + } + index += offset; + byte[] value = new byte[length]; + System.arraycopy(data, index, value, 0, length); + return value; + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the specified absolute {@code index} until the destination's position + * reaches its limit. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * if {@code index + dst.remaining()} is greater than + * {@code this.capacity} + */ + public void getBytes(int index, ByteBuffer destination) + { + checkPositionIndex(index, this.length); + index += offset; + destination.put(data, index, Math.min(length, destination.remaining())); + } + + /** + * Sets the specified 16-bit short integer at the specified absolute + * {@code index} in this buffer. The 16 high-order bits of the specified + * value are ignored. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 2} is greater than {@code this.capacity} + */ + public void setShort(int index, int value) + { + checkPositionIndexes(index, index + SIZE_OF_SHORT, this.length); + index += offset; + data[index] = (byte) (value); + data[index + 1] = (byte) (value >>> 8); + } + + /** + * Sets the specified 32-bit integer at the specified absolute + * {@code index} in this buffer. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 4} is greater than {@code this.capacity} + */ + public void setInt(int index, int value) + { + checkPositionIndexes(index, index + SIZE_OF_INT, this.length); + index += offset; + data[index] = (byte) (value); + data[index + 1] = (byte) (value >>> 8); + data[index + 2] = (byte) (value >>> 16); + data[index + 3] = (byte) (value >>> 24); + } + + /** + * Sets the specified 64-bit long integer at the specified absolute + * {@code index} in this buffer. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 8} is greater than {@code this.capacity} + */ + public void setLong(int index, long value) + { + checkPositionIndexes(index, index + SIZE_OF_LONG, this.length); + index += offset; + data[index] = (byte) (value); + data[index + 1] = (byte) (value >>> 8); + data[index + 2] = (byte) (value >>> 16); + data[index + 3] = (byte) (value >>> 24); + data[index + 4] = (byte) (value >>> 32); + data[index + 5] = (byte) (value >>> 40); + data[index + 6] = (byte) (value >>> 48); + data[index + 7] = (byte) (value >>> 56); + } + + /** + * Sets the specified byte at the specified absolute {@code index} in this + * buffer. The 24 high-order bits of the specified value are ignored. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * {@code index + 1} is greater than {@code this.capacity} + */ + public void setByte(int index, int value) + { + checkPositionIndexes(index, index + SIZE_OF_BYTE, this.length); + index += offset; + data[index] = (byte) value; + } + + /** + * Transfers the specified source buffer's data to this buffer starting at + * the specified absolute {@code index}. + * + * @param srcIndex the first index of the source + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, + * if the specified {@code srcIndex} is less than {@code 0}, + * if {@code index + length} is greater than + * {@code this.capacity}, or + * if {@code srcIndex + length} is greater than + * {@code src.capacity} + */ + public void setBytes(int index, Slice src, int srcIndex, int length) + { + setBytes(index, src.data, src.offset + srcIndex, length); + } + + /** + * Transfers the specified source array's data to this buffer starting at + * the specified absolute {@code index}. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, + * if the specified {@code srcIndex} is less than {@code 0}, + * if {@code index + length} is greater than + * {@code this.capacity}, or + * if {@code srcIndex + length} is greater than {@code src.length} + */ + public void setBytes(int index, byte[] source, int sourceIndex, int length) + { + checkPositionIndexes(index, index + length, this.length); + checkPositionIndexes(sourceIndex, sourceIndex + length, source.length); + index += offset; + System.arraycopy(source, sourceIndex, data, index, length); + } + + /** + * Transfers the specified source buffer's data to this buffer starting at + * the specified absolute {@code index} until the source buffer's position + * reaches its limit. + * + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * if {@code index + src.remaining()} is greater than + * {@code this.capacity} + */ + public void setBytes(int index, ByteBuffer source) + { + checkPositionIndexes(index, index + source.remaining(), this.length); + index += offset; + source.get(data, index, source.remaining()); + } + + /** + * Transfers the content of the specified source stream to this buffer + * starting at the specified absolute {@code index}. + * + * @param length the number of bytes to transfer + * @return the actual number of bytes read in from the specified channel. + * {@code -1} if the specified channel is closed. + * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or + * if {@code index + length} is greater than {@code this.capacity} + * @throws java.io.IOException if the specified stream threw an exception during I/O + */ + public int setBytes(int index, InputStream in, int length) + throws IOException + { + checkPositionIndexes(index, index + length, this.length); + index += offset; + int readBytes = 0; + do { + int localReadBytes = in.read(data, index, length); + if (localReadBytes < 0) { + if (readBytes == 0) { + return -1; + } + else { + break; + } + } + readBytes += localReadBytes; + index += localReadBytes; + length -= localReadBytes; + } while (length > 0); + + return readBytes; + } + + public Slice copySlice() + { + return copySlice(0, length); + } + + /** + * Returns a copy of this buffer's sub-region. Modifying the content of + * the returned buffer or this buffer does not affect each other at all. + */ + public Slice copySlice(int index, int length) + { + return new Slice(copyBytes(index, length)); + } + + public byte[] copyBytes() + { + return copyBytes(0, length); + } + + public byte[] copyBytes(int index, int length) + { + checkPositionIndexes(index, index + length, this.length); + return getBytes(index, length); + } + + /** + * Returns a slice of this buffer's readable bytes. Modifying the content + * of the returned buffer or this buffer affects each other's content + * while they maintain separate indexes and marks. + */ + public Slice slice() + { + return slice(0, length); + } + + /** + * Returns a slice of this buffer's sub-region. Modifying the content of + * the returned buffer or this buffer affects each other's content while + * they maintain separate indexes and marks. + */ + public Slice slice(int index, int length) + { + if (index == 0 && length == this.length) { + return this; + } + + checkPositionIndexes(index, index + length, this.length); + if (index >= 0 && length == 0) { + return Slices.EMPTY_SLICE; + } + return new Slice(data, offset + index, length); + } + + /** + * Creates an input stream over this slice. + */ + public SliceInput input() + { + return new SliceInput(this); + } + + /** + * Creates an output stream over this slice. + */ + public SliceOutput output() + { + return new BasicSliceOutput(this); + } + + /** + * Converts this buffer's readable bytes into a NIO buffer. The returned + * buffer shares the content with this buffer. + */ + public ByteBuffer toByteBuffer() + { + return toByteBuffer(0, length); + } + + /** + * Converts this buffer's sub-region into a NIO buffer. The returned + * buffer shares the content with this buffer. + */ + public ByteBuffer toByteBuffer(int index, int length) + { + checkPositionIndexes(index, index + length, this.length); + index += offset; + return ByteBuffer.wrap(data, index, length).order(LITTLE_ENDIAN); + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Slice slice = (Slice) o; + + // do lengths match + if (length != slice.length) { + return false; + } + + // if arrays have same base offset, some optimizations can be taken... + if (offset == slice.offset && data == slice.data) { + return true; + } + for (int i = 0; i < length; i++) { + if (data[offset + i] != slice.data[slice.offset + i]) { + return false; + } + } + return true; + } + + @Override + public int hashCode() + { + if (hash != 0) { + return hash; + } + + int result = length; + for (int i = offset; i < offset + length; i++) { + result = 31 * result + data[i]; + } + if (result == 0) { + result = 1; + } + hash = result; + return hash; + } + + /** + * Compares the content of the specified buffer to the content of this + * buffer. This comparison is performed byte by byte using an unsigned + * comparison. + */ + public int compareTo(Slice that) + { + if (this == that) { + return 0; + } + if (this.data == that.data && length == that.length && offset == that.offset) { + return 0; + } + + int minLength = Math.min(this.length, that.length); + for (int i = 0; i < minLength; i++) { + int thisByte = 0xFF & this.data[this.offset + i]; + int thatByte = 0xFF & that.data[that.offset + i]; + if (thisByte != thatByte) { + return (thisByte) - (thatByte); + } + } + return this.length - that.length; + } + + /** + * Decodes this buffer's readable bytes into a string with the specified + * character set name. + */ + public String toString(Charset charset) + { + return toString(0, length, charset); + } + + /** + * Decodes this buffer's sub-region into a string with the specified + * character set. + */ + public String toString(int index, int length, Charset charset) + { + if (length == 0) { + return ""; + } + + return Slices.decodeString(toByteBuffer(index, length), charset); + } + + public String toString() + { + return getClass().getSimpleName() + '(' + + "length=" + length() + + ')'; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/SliceComparator.java b/leveldb/src/main/java/org/iq80/leveldb/util/SliceComparator.java new file mode 100644 index 0000000..fa244a4 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/SliceComparator.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.util.Comparator; + +public final class SliceComparator + implements Comparator +{ + public static final SliceComparator SLICE_COMPARATOR = new SliceComparator(); + + @Override + public int compare(Slice sliceA, Slice sliceB) + { + return sliceA.compareTo(sliceB); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/SliceInput.java b/leveldb/src/main/java/org/iq80/leveldb/util/SliceInput.java new file mode 100644 index 0000000..140cc69 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/SliceInput.java @@ -0,0 +1,435 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.DataInput; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +public final class SliceInput + extends InputStream + implements DataInput +{ + private final Slice slice; + private int position; + + public SliceInput(Slice slice) + { + this.slice = slice; + } + + /** + * Returns the {@code position} of this buffer. + */ + public int position() + { + return position; + } + + /** + * Sets the {@code position} of this buffer. + * + * @throws IndexOutOfBoundsException if the specified {@code position} is + * less than {@code 0} or + * greater than {@code this.writerIndex} + */ + public void setPosition(int position) + { + if (position < 0 || position > slice.length()) { + throw new IndexOutOfBoundsException(); + } + this.position = position; + } + + /** + * Returns {@code true} + * if and only if {@code available()} is greater + * than {@code 0}. + */ + public boolean isReadable() + { + return available() > 0; + } + + /** + * Returns the number of readable bytes which is equal to + * {@code (this.slice.length() - this.position)}. + */ + @Override + public int available() + { + return slice.length() - position; + } + + @Override + public boolean readBoolean() + throws IOException + { + return readByte() != 0; + } + + @Override + public int read() + { + return readByte(); + } + + /** + * Gets a byte at the current {@code position} and increases + * the {@code position} by {@code 1} in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 1} + */ + @Override + public byte readByte() + { + if (position == slice.length()) { + throw new IndexOutOfBoundsException(); + } + return slice.getByte(position++); + } + + /** + * Gets an unsigned byte at the current {@code position} and increases + * the {@code position} by {@code 1} in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 1} + */ + @Override + public int readUnsignedByte() + { + return (short) (readByte() & 0xFF); + } + + /** + * Gets a 16-bit short integer at the current {@code position} + * and increases the {@code position} by {@code 2} in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 2} + */ + @Override + public short readShort() + { + short v = slice.getShort(position); + position += 2; + return v; + } + + @Override + public int readUnsignedShort() + throws IOException + { + return readShort() & 0xff; + } + + /** + * Gets a 32-bit integer at the current {@code position} + * and increases the {@code position} by {@code 4} in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 4} + */ + @Override + public int readInt() + { + int v = slice.getInt(position); + position += 4; + return v; + } + + /** + * Gets an unsigned 32-bit integer at the current {@code position} + * and increases the {@code position} by {@code 4} in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 4} + */ + public long readUnsignedInt() + { + return readInt() & 0xFFFFFFFFL; + } + + /** + * Gets a 64-bit integer at the current {@code position} + * and increases the {@code position} by {@code 8} in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 8} + */ + @Override + public long readLong() + { + long v = slice.getLong(position); + position += 8; + return v; + } + + public byte[] readByteArray(int length) + { + byte[] value = slice.copyBytes(position, length); + position += length; + return value; + } + + /** + * Transfers this buffer's data to a newly created buffer starting at + * the current {@code position} and increases the {@code position} + * by the number of the transferred bytes (= {@code length}). + * The returned buffer's {@code position} and {@code writerIndex} are + * {@code 0} and {@code length} respectively. + * + * @param length the number of bytes to transfer + * @return the newly created buffer which contains the transferred bytes + * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} + */ + public Slice readBytes(int length) + { + if (length == 0) { + return Slices.EMPTY_SLICE; + } + Slice value = slice.slice(position, length); + position += length; + return value; + } + + /** + * Returns a new slice of this buffer's sub-region starting at the current + * {@code position} and increases the {@code position} by the size + * of the new slice (= {@code length}). + * + * @param length the size of the new slice + * @return the newly created slice + * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} + */ + public Slice readSlice(int length) + { + Slice newSlice = slice.slice(position, length); + position += length; + return newSlice; + } + + @Override + public void readFully(byte[] destination) + { + readBytes(destination); + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the current {@code position} and increases the {@code position} + * by the number of the transferred bytes (= {@code dst.length}). + * + * @throws IndexOutOfBoundsException if {@code dst.length} is greater than {@code this.available()} + */ + public void readBytes(byte[] destination) + { + readBytes(destination, 0, destination.length); + } + + @Override + public void readFully(byte[] destination, int offset, int length) + { + readBytes(destination, offset, length); + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the current {@code position} and increases the {@code position} + * by the number of the transferred bytes (= {@code length}). + * + * @param destinationIndex the first index of the destination + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if the specified {@code destinationIndex} is less than {@code 0}, + * if {@code length} is greater than {@code this.available()}, or + * if {@code destinationIndex + length} is greater than {@code destination.length} + */ + public void readBytes(byte[] destination, int destinationIndex, int length) + { + slice.getBytes(position, destination, destinationIndex, length); + position += length; + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the current {@code position} until the destination becomes + * non-writable, and increases the {@code position} by the number of the + * transferred bytes. This method is basically same with + * {@link #readBytes(Slice, int, int)}, except that this method + * increases the {@code writerIndex} of the destination by the number of + * the transferred bytes while {@link #readBytes(Slice, int, int)} + * does not. + * + * @throws IndexOutOfBoundsException if {@code destination.writableBytes} is greater than + * {@code this.available()} + */ + public void readBytes(Slice destination) + { + readBytes(destination, destination.length()); + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the current {@code position} and increases the {@code position} + * by the number of the transferred bytes (= {@code length}). This method + * is basically same with {@link #readBytes(Slice, int, int)}, + * except that this method increases the {@code writerIndex} of the + * destination by the number of the transferred bytes (= {@code length}) + * while {@link #readBytes(Slice, int, int)} does not. + * + * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} or + * if {@code length} is greater than {@code destination.writableBytes} + */ + public void readBytes(Slice destination, int length) + { + if (length > destination.length()) { + throw new IndexOutOfBoundsException(); + } + readBytes(destination, destination.length(), length); + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the current {@code position} and increases the {@code position} + * by the number of the transferred bytes (= {@code length}). + * + * @param destinationIndex the first index of the destination + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if the specified {@code destinationIndex} is less than {@code 0}, + * if {@code length} is greater than {@code this.available()}, or + * if {@code destinationIndex + length} is greater than + * {@code destination.capacity} + */ + public void readBytes(Slice destination, int destinationIndex, int length) + { + slice.getBytes(position, destination, destinationIndex, length); + position += length; + } + + /** + * Transfers this buffer's data to the specified destination starting at + * the current {@code position} until the destination's position + * reaches its limit, and increases the {@code position} by the + * number of the transferred bytes. + * + * @throws IndexOutOfBoundsException if {@code destination.remaining()} is greater than + * {@code this.available()} + */ + public void readBytes(ByteBuffer destination) + { + int length = destination.remaining(); + slice.getBytes(position, destination); + position += length; + } + + public int skipBytes(int length) + { + length = Math.min(length, available()); + position += length; + return length; + } + + /** + * Returns a slice of this buffer's readable bytes. Modifying the content + * of the returned buffer or this buffer affects each other's content + * while they maintain separate indexes and marks. This method is + * identical to {@code buf.slice(buf.position(), buf.available()())}. + * This method does not modify {@code position} or {@code writerIndex} of + * this buffer. + */ + public Slice slice() + { + return slice.slice(position, available()); + } + + /** + * Decodes this buffer's readable bytes into a string with the specified + * character set name. This method is identical to + * {@code buf.toString(buf.position(), buf.available()(), charsetName)}. + * This method does not modify {@code position} or {@code writerIndex} of + * this buffer. + * + * @throws java.nio.charset.UnsupportedCharsetException if the specified character set name is not supported by the + * current VM + */ + public String toString(Charset charset) + { + return slice.toString(position, available(), charset); + } + + @Override + public String toString() + { + return getClass().getSimpleName() + '(' + + "ridx=" + position + ", " + + "cap=" + slice.length() + + ')'; + } + + // + // Unsupported operations + // + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public char readChar() + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public float readFloat() + { + throw new UnsupportedOperationException(); + } + + @Override + public double readDouble() + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public String readLine() + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public String readUTF() + { + throw new UnsupportedOperationException(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/SliceOutput.java b/leveldb/src/main/java/org/iq80/leveldb/util/SliceOutput.java new file mode 100644 index 0000000..77c2a4a --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/SliceOutput.java @@ -0,0 +1,320 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.DataOutput; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +public abstract class SliceOutput + extends OutputStream + implements DataOutput +{ + /** + * Resets this stream to the initial position. + */ + public abstract void reset(); + + /** + * Returns the {@code writerIndex} of this buffer. + */ + public abstract int size(); + + /** + * Returns the number of writable bytes which is equal to + * {@code (this.capacity - this.writerIndex)}. + */ + public abstract int writableBytes(); + + /** + * Returns {@code true} + * if and only if {@code (this.capacity - this.writerIndex)} is greater + * than {@code 0}. + */ + public abstract boolean isWritable(); + + @Override + public final void writeBoolean(boolean value) + { + writeByte(value ? 1 : 0); + } + + @Override + public final void write(int value) + { + writeByte(value); + } + + /** + * Sets the specified byte at the current {@code writerIndex} + * and increases the {@code writerIndex} by {@code 1} in this buffer. + * The 24 high-order bits of the specified value are ignored. + * + * @throws IndexOutOfBoundsException if {@code this.writableBytes} is less than {@code 1} + */ + @Override + public abstract void writeByte(int value); + + /** + * Sets the specified 16-bit short integer at the current + * {@code writerIndex} and increases the {@code writerIndex} by {@code 2} + * in this buffer. The 16 high-order bits of the specified value are ignored. + * + * @throws IndexOutOfBoundsException if {@code this.writableBytes} is less than {@code 2} + */ + @Override + public abstract void writeShort(int value); + + /** + * Sets the specified 32-bit integer at the current {@code writerIndex} + * and increases the {@code writerIndex} by {@code 4} in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.writableBytes} is less than {@code 4} + */ + @Override + public abstract void writeInt(int value); + + /** + * Sets the specified 64-bit long integer at the current + * {@code writerIndex} and increases the {@code writerIndex} by {@code 8} + * in this buffer. + * + * @throws IndexOutOfBoundsException if {@code this.writableBytes} is less than {@code 8} + */ + @Override + public abstract void writeLong(long value); + + /** + * Transfers the specified source buffer's data to this buffer starting at + * the current {@code writerIndex} until the source buffer becomes + * unreadable, and increases the {@code writerIndex} by the number of + * the transferred bytes. This method is basically same with + * {@link #writeBytes(Slice, int, int)}, except that this method + * increases the {@code readerIndex} of the source buffer by the number of + * the transferred bytes while {@link #writeBytes(Slice, int, int)} + * does not. + * + * @throws IndexOutOfBoundsException if {@code source.readableBytes} is greater than + * {@code this.writableBytes} + */ + public abstract void writeBytes(Slice source); + + /** + * Transfers the specified source buffer's data to this buffer starting at + * the current {@code writerIndex} and increases the {@code writerIndex} + * by the number of the transferred bytes (= {@code length}). This method + * is basically same with {@link #writeBytes(Slice, int, int)}, + * except that this method increases the {@code readerIndex} of the source + * buffer by the number of the transferred bytes (= {@code length}) while + * {@link #writeBytes(Slice, int, int)} does not. + * + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.writableBytes} or + * if {@code length} is greater then {@code source.readableBytes} + */ + public abstract void writeBytes(SliceInput source, int length); + + /** + * Transfers the specified source buffer's data to this buffer starting at + * the current {@code writerIndex} and increases the {@code writerIndex} + * by the number of the transferred bytes (= {@code length}). + * + * @param sourceIndex the first index of the source + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if the specified {@code sourceIndex} is less than {@code 0}, + * if {@code sourceIndex + length} is greater than + * {@code source.capacity}, or + * if {@code length} is greater than {@code this.writableBytes} + */ + public abstract void writeBytes(Slice source, int sourceIndex, int length); + + @Override + public final void write(byte[] source) + throws IOException + { + writeBytes(source); + } + + /** + * Transfers the specified source array's data to this buffer starting at + * the current {@code writerIndex} and increases the {@code writerIndex} + * by the number of the transferred bytes (= {@code source.length}). + * + * @throws IndexOutOfBoundsException if {@code source.length} is greater than {@code this.writableBytes} + */ + public abstract void writeBytes(byte[] source); + + @Override + public final void write(byte[] source, int sourceIndex, int length) + { + writeBytes(source, sourceIndex, length); + } + + /** + * Transfers the specified source array's data to this buffer starting at + * the current {@code writerIndex} and increases the {@code writerIndex} + * by the number of the transferred bytes (= {@code length}). + * + * @param sourceIndex the first index of the source + * @param length the number of bytes to transfer + * @throws IndexOutOfBoundsException if the specified {@code sourceIndex} is less than {@code 0}, + * if {@code sourceIndex + length} is greater than + * {@code source.length}, or + * if {@code length} is greater than {@code this.writableBytes} + */ + public abstract void writeBytes(byte[] source, int sourceIndex, int length); + + /** + * Transfers the specified source buffer's data to this buffer starting at + * the current {@code writerIndex} until the source buffer's position + * reaches its limit, and increases the {@code writerIndex} by the + * number of the transferred bytes. + * + * @throws IndexOutOfBoundsException if {@code source.remaining()} is greater than + * {@code this.writableBytes} + */ + public abstract void writeBytes(ByteBuffer source); + + /** + * Transfers the content of the specified stream to this buffer + * starting at the current {@code writerIndex} and increases the + * {@code writerIndex} by the number of the transferred bytes. + * + * @param length the number of bytes to transfer + * @return the actual number of bytes read in from the specified stream + * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.writableBytes} + * @throws java.io.IOException if the specified stream threw an exception during I/O + */ + public abstract int writeBytes(InputStream in, int length) + throws IOException; + + /** + * Fills this buffer with NUL (0x00) starting at the current + * {@code writerIndex} and increases the {@code writerIndex} by the + * specified {@code length}. + * + * @param length the number of NULs to write to the buffer + * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.writableBytes} + */ + public abstract void writeZero(int length); + + /** + * Returns a slice of this buffer's readable bytes. Modifying the content + * of the returned buffer or this buffer affects each other's content + * while they maintain separate indexes and marks. This method is + * identical to {@code buf.slice(buf.readerIndex(), buf.readableBytes())}. + * This method does not modify {@code readerIndex} or {@code writerIndex} of + * this buffer. + */ + public abstract Slice slice(); + + /** + * Converts this buffer's readable bytes into a NIO buffer. The returned + * buffer might or might not share the content with this buffer, while + * they have separate indexes and marks. This method is identical to + * {@code buf.toByteBuffer(buf.readerIndex(), buf.readableBytes())}. + * This method does not modify {@code readerIndex} or {@code writerIndex} of + * this buffer. + */ + public abstract ByteBuffer toByteBuffer(); + + /** + * Decodes this buffer's readable bytes into a string with the specified + * character set name. This method is identical to + * {@code buf.toString(buf.readerIndex(), buf.readableBytes(), charsetName)}. + * This method does not modify {@code readerIndex} or {@code writerIndex} of + * this buffer. + * + * @throws java.nio.charset.UnsupportedCharsetException if the specified character set name is not supported by the + * current VM + */ + public abstract String toString(Charset charset); + + // + // Unsupported operations + // + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeChar(int value) + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeFloat(float v) + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeDouble(double v) + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeChars(String s) + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeUTF(String s) + { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported operation + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeBytes(String s) + { + throw new UnsupportedOperationException(); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/Slices.java b/leveldb/src/main/java/org/iq80/leveldb/util/Slices.java new file mode 100644 index 0000000..81b2eb4 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/Slices.java @@ -0,0 +1,247 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CoderResult; +import java.nio.charset.CodingErrorAction; +import java.util.IdentityHashMap; +import java.util.Map; + +import static java.util.Objects.requireNonNull; + +public final class Slices +{ + public static Slice readLengthPrefixedBytes(SliceInput sliceInput) + { + int length = VariableLengthQuantity.readVariableLengthInt(sliceInput); + return sliceInput.readBytes(length); + } + + public static void writeLengthPrefixedBytes(SliceOutput sliceOutput, Slice value) + { + VariableLengthQuantity.writeVariableLengthInt(value.length(), sliceOutput); + sliceOutput.writeBytes(value); + } + + /** + * A buffer whose capacity is {@code 0}. + */ + public static final Slice EMPTY_SLICE = new Slice(0); + + private Slices() + { + } + + public static Slice ensureSize(Slice existingSlice, int minWritableBytes) + { + if (existingSlice == null) { + existingSlice = EMPTY_SLICE; + } + + if (minWritableBytes <= existingSlice.length()) { + return existingSlice; + } + + int newCapacity; + if (existingSlice.length() == 0) { + newCapacity = 1; + } + else { + newCapacity = existingSlice.length(); + } + int minNewCapacity = existingSlice.length() + minWritableBytes; + while (newCapacity < minNewCapacity) { + newCapacity <<= 1; + } + + Slice newSlice = allocate(newCapacity); + newSlice.setBytes(0, existingSlice, 0, existingSlice.length()); + return newSlice; + } + + public static Slice allocate(int capacity) + { + if (capacity == 0) { + return EMPTY_SLICE; + } + return new Slice(capacity); + } + + public static Slice wrappedBuffer(byte[] array) + { + if (array.length == 0) { + return EMPTY_SLICE; + } + return new Slice(array); + } + + public static Slice copiedBuffer(ByteBuffer source, int sourceOffset, int length) + { + requireNonNull(source, "source is null"); + int newPosition = source.position() + sourceOffset; + return copiedBuffer((ByteBuffer) source.duplicate().order(ByteOrder.LITTLE_ENDIAN).clear().limit(newPosition + length).position(newPosition)); + } + + public static Slice copiedBuffer(ByteBuffer source) + { + requireNonNull(source, "source is null"); + Slice copy = allocate(source.limit() - source.position()); + copy.setBytes(0, source.duplicate().order(ByteOrder.LITTLE_ENDIAN)); + return copy; + } + + public static Slice avoidCopiedBuffer(ByteBuffer source) + { + if (source.hasArray()) { + return new Slice(source.array(), source.arrayOffset() + source.position(), source.remaining()); + } + else { + return copiedBuffer(source); + } + } + + public static Slice copiedBuffer(String string, Charset charset) + { + requireNonNull(string, "string is null"); + requireNonNull(charset, "charset is null"); + + return wrappedBuffer(string.getBytes(charset)); + } + + public static ByteBuffer encodeString(CharBuffer src, Charset charset) + { + CharsetEncoder encoder = getEncoder(charset); + ByteBuffer dst = ByteBuffer.allocate( + (int) ((double) src.remaining() * encoder.maxBytesPerChar())); + try { + CoderResult cr = encoder.encode(src, dst, true); + if (!cr.isUnderflow()) { + cr.throwException(); + } + cr = encoder.flush(dst); + if (!cr.isUnderflow()) { + cr.throwException(); + } + } + catch (CharacterCodingException x) { + throw new IllegalStateException(x); + } + dst.flip(); + return dst; + } + + public static String decodeString(ByteBuffer src, Charset charset) + { + CharsetDecoder decoder = getDecoder(charset); + CharBuffer dst = CharBuffer.allocate( + (int) ((double) src.remaining() * decoder.maxCharsPerByte())); + try { + CoderResult cr = decoder.decode(src, dst, true); + if (!cr.isUnderflow()) { + cr.throwException(); + } + cr = decoder.flush(dst); + if (!cr.isUnderflow()) { + cr.throwException(); + } + } + catch (CharacterCodingException x) { + throw new IllegalStateException(x); + } + return dst.flip().toString(); + } + + private static final ThreadLocal> encoders = + new ThreadLocal>() + { + @Override + protected Map initialValue() + { + return new IdentityHashMap<>(); + } + }; + + private static final ThreadLocal> decoders = + new ThreadLocal>() + { + @Override + protected Map initialValue() + { + return new IdentityHashMap<>(); + } + }; + + /** + * Returns a cached thread-local {@link CharsetEncoder} for the specified + * charset. + */ + private static CharsetEncoder getEncoder(Charset charset) + { + if (charset == null) { + throw new NullPointerException("charset"); + } + + Map map = encoders.get(); + CharsetEncoder e = map.get(charset); + if (e != null) { + e.reset(); + e.onMalformedInput(CodingErrorAction.REPLACE); + e.onUnmappableCharacter(CodingErrorAction.REPLACE); + return e; + } + + e = charset.newEncoder(); + e.onMalformedInput(CodingErrorAction.REPLACE); + e.onUnmappableCharacter(CodingErrorAction.REPLACE); + map.put(charset, e); + return e; + } + + /** + * Returns a cached thread-local {@link CharsetDecoder} for the specified + * charset. + */ + private static CharsetDecoder getDecoder(Charset charset) + { + if (charset == null) { + throw new NullPointerException("charset"); + } + + Map map = decoders.get(); + CharsetDecoder d = map.get(charset); + if (d != null) { + d.reset(); + d.onMalformedInput(CodingErrorAction.REPLACE); + d.onUnmappableCharacter(CodingErrorAction.REPLACE); + return d; + } + + d = charset.newDecoder(); + d.onMalformedInput(CodingErrorAction.REPLACE); + d.onUnmappableCharacter(CodingErrorAction.REPLACE); + map.put(charset, d); + return d; + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/Snappy.java b/leveldb/src/main/java/org/iq80/leveldb/util/Snappy.java new file mode 100644 index 0000000..98122fe --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/Snappy.java @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + *

+ * A Snappy abstraction which attempts uses the iq80 implementation and falls back + * to the xerial Snappy implementation it cannot be loaded. You can change the + * load order by setting the 'leveldb.snappy' system property. Example: + *

+ * + * -Dleveldb.snappy=xerial,iq80 + * + *

+ * The system property can also be configured with the name of a class which + * implements the Snappy.SPI interface. + *

+ * + * @author Hiram Chirino + */ +public final class Snappy +{ + private Snappy() + { + } + + public interface SPI + { + int uncompress(ByteBuffer compressed, ByteBuffer uncompressed) + throws IOException; + + int uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException; + + int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException; + + byte[] compress(String text) + throws IOException; + + int maxCompressedLength(int length); + } + + public static class XerialSnappy + implements SPI + { + static { + // Make sure that the JNI libs are fully loaded. + try { + org.xerial.snappy.Snappy.compress("test"); + } + catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public int uncompress(ByteBuffer compressed, ByteBuffer uncompressed) + throws IOException + { + return org.xerial.snappy.Snappy.uncompress(compressed, uncompressed); + } + + @Override + public int uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException + { + return org.xerial.snappy.Snappy.uncompress(input, inputOffset, length, output, outputOffset); + } + + @Override + public int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException + { + return org.xerial.snappy.Snappy.compress(input, inputOffset, length, output, outputOffset); + } + + @Override + public byte[] compress(String text) + throws IOException + { + return org.xerial.snappy.Snappy.compress(text); + } + + @Override + public int maxCompressedLength(int length) + { + return org.xerial.snappy.Snappy.maxCompressedLength(length); + } + } + + public static class IQ80Snappy + implements SPI + { + static { + // Make sure that the library can fully load. + try { + new IQ80Snappy().compress("test"); + } + catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public int uncompress(ByteBuffer compressed, ByteBuffer uncompressed) + throws IOException + { + byte[] input; + int inputOffset; + int length; + byte[] output; + int outputOffset; + if (compressed.hasArray()) { + input = compressed.array(); + inputOffset = compressed.arrayOffset() + compressed.position(); + length = compressed.remaining(); + } + else { + input = new byte[compressed.remaining()]; + inputOffset = 0; + length = input.length; + compressed.mark(); + compressed.get(input); + compressed.reset(); + } + if (uncompressed.hasArray()) { + output = uncompressed.array(); + outputOffset = uncompressed.arrayOffset() + uncompressed.position(); + } + else { + int t = org.iq80.snappy.Snappy.getUncompressedLength(input, inputOffset); + output = new byte[t]; + outputOffset = 0; + } + + int count = org.iq80.snappy.Snappy.uncompress(input, inputOffset, length, output, outputOffset); + if (uncompressed.hasArray()) { + uncompressed.limit(uncompressed.position() + count); + } + else { + int p = uncompressed.position(); + uncompressed.limit(uncompressed.capacity()); + uncompressed.put(output, 0, count); + uncompressed.flip().position(p); + } + return count; + } + + @Override + public int uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException + { + return org.iq80.snappy.Snappy.uncompress(input, inputOffset, length, output, outputOffset); + } + + @Override + public int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException + { + return org.iq80.snappy.Snappy.compress(input, inputOffset, length, output, outputOffset); + } + + @Override + public byte[] compress(String text) + throws IOException + { + byte[] uncomressed = text.getBytes(UTF_8); + byte[] compressedOut = new byte[maxCompressedLength(uncomressed.length)]; + int compressedSize = compress(uncomressed, 0, uncomressed.length, compressedOut, 0); + byte[] trimmedBuffer = new byte[compressedSize]; + System.arraycopy(compressedOut, 0, trimmedBuffer, 0, compressedSize); + return trimmedBuffer; + } + + @Override + public int maxCompressedLength(int length) + { + return org.iq80.snappy.Snappy.maxCompressedLength(length); + } + } + + private static final SPI SNAPPY; + + static { + SPI attempt = null; + String[] factories = System.getProperty("leveldb.snappy", "iq80,xerial").split(","); + for (int i = 0; i < factories.length && attempt == null; i++) { + String name = factories[i]; + try { + name = name.trim(); + if ("xerial".equals(name.toLowerCase())) { + name = "org.iq80.leveldb.util.Snappy$XerialSnappy"; + } + else if ("iq80".equals(name.toLowerCase())) { + name = "org.iq80.leveldb.util.Snappy$IQ80Snappy"; + } + attempt = (SPI) Thread.currentThread().getContextClassLoader().loadClass(name).newInstance(); + } + catch (Throwable e) { + } + } + SNAPPY = attempt; + } + + public static boolean available() + { + return SNAPPY != null; + } + + public static ByteBuffer uncompress(ByteBuffer compressed) + throws IOException + { + int uncompressedLength = uncompressedLength(compressed); + final ByteBuffer uncompressed = ByteBuffer.allocateDirect(uncompressedLength); + return uncompress(compressed, uncompressed); + } + + public static ByteBuffer uncompress(ByteBuffer compressed, ByteBuffer uncompressed) + throws IOException + { + SNAPPY.uncompress(compressed, uncompressed); + return uncompressed; + } + + public static void uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException + { + SNAPPY.uncompress(input, inputOffset, length, output, outputOffset); + } + + public static int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) + throws IOException + { + return SNAPPY.compress(input, inputOffset, length, output, outputOffset); + } + + public static byte[] compress(String text) + throws IOException + { + return SNAPPY.compress(text); + } + + public static int maxCompressedLength(int length) + { + return SNAPPY.maxCompressedLength(length); + } + + private static int uncompressedLength(ByteBuffer data) + { + return VariableLengthQuantity.readVariableLengthInt(data.duplicate()); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/VariableLengthQuantity.java b/leveldb/src/main/java/org/iq80/leveldb/util/VariableLengthQuantity.java new file mode 100644 index 0000000..7913e13 --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/VariableLengthQuantity.java @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.nio.ByteBuffer; + +public final class VariableLengthQuantity +{ + private VariableLengthQuantity() + { + } + + public static int variableLengthSize(int value) + { + int size = 1; + while ((value & (~0x7f)) != 0) { + value >>>= 7; + size++; + } + return size; + } + + public static int variableLengthSize(long value) + { + int size = 1; + while ((value & (~0x7f)) != 0) { + value >>>= 7; + size++; + } + return size; + } + + public static void writeVariableLengthInt(int value, SliceOutput sliceOutput) + { + int highBitMask = 0x80; + if (value < (1 << 7) && value >= 0) { + sliceOutput.writeByte(value); + } + else if (value < (1 << 14) && value > 0) { + sliceOutput.writeByte(value | highBitMask); + sliceOutput.writeByte(value >>> 7); + } + else if (value < (1 << 21) && value > 0) { + sliceOutput.writeByte(value | highBitMask); + sliceOutput.writeByte((value >>> 7) | highBitMask); + sliceOutput.writeByte(value >>> 14); + } + else if (value < (1 << 28) && value > 0) { + sliceOutput.writeByte(value | highBitMask); + sliceOutput.writeByte((value >>> 7) | highBitMask); + sliceOutput.writeByte((value >>> 14) | highBitMask); + sliceOutput.writeByte(value >>> 21); + } + else { + sliceOutput.writeByte(value | highBitMask); + sliceOutput.writeByte((value >>> 7) | highBitMask); + sliceOutput.writeByte((value >>> 14) | highBitMask); + sliceOutput.writeByte((value >>> 21) | highBitMask); + sliceOutput.writeByte(value >>> 28); + } + } + + public static void writeVariableLengthLong(long value, SliceOutput sliceOutput) + { + // while value more than the first 7 bits set + while ((value & (~0x7f)) != 0) { + sliceOutput.writeByte((int) ((value & 0x7f) | 0x80)); + value >>>= 7; + } + sliceOutput.writeByte((int) value); + } + + public static int readVariableLengthInt(SliceInput sliceInput) + { + int result = 0; + for (int shift = 0; shift <= 28; shift += 7) { + int b = sliceInput.readUnsignedByte(); + + // add the lower 7 bits to the result + result |= ((b & 0x7f) << shift); + + // if high bit is not set, this is the last byte in the number + if ((b & 0x80) == 0) { + return result; + } + } + throw new NumberFormatException("last byte of variable length int has high bit set"); + } + + public static int readVariableLengthInt(ByteBuffer sliceInput) + { + int result = 0; + for (int shift = 0; shift <= 28; shift += 7) { + int b = sliceInput.get(); + + // add the lower 7 bits to the result + result |= ((b & 0x7f) << shift); + + // if high bit is not set, this is the last byte in the number + if ((b & 0x80) == 0) { + return result; + } + } + throw new NumberFormatException("last byte of variable length int has high bit set"); + } + + public static long readVariableLengthLong(SliceInput sliceInput) + { + long result = 0; + for (int shift = 0; shift <= 63; shift += 7) { + long b = sliceInput.readUnsignedByte(); + + // add the lower 7 bits to the result + result |= ((b & 0x7f) << shift); + + // if high bit is not set, this is the last byte in the number + if ((b & 0x80) == 0) { + return result; + } + } + throw new NumberFormatException("last byte of variable length int has high bit set"); + } +} diff --git a/leveldb/src/main/java/org/iq80/leveldb/util/ZLib.java b/leveldb/src/main/java/org/iq80/leveldb/util/ZLib.java new file mode 100644 index 0000000..85f265a --- /dev/null +++ b/leveldb/src/main/java/org/iq80/leveldb/util/ZLib.java @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; +import java.util.zip.Deflater; +import java.util.zip.Inflater; + +/** + * A wrapper for java based ZLib + */ +public final class ZLib +{ + private static final ThreadLocal INFLATER = ThreadLocal.withInitial(Inflater::new); + private static final ThreadLocal INFLATER_RAW = ThreadLocal.withInitial(() -> new Inflater(true)); + private static final ThreadLocal DEFLATER = ThreadLocal.withInitial(Deflater::new); + private static final ThreadLocal DEFLATER_RAW = ThreadLocal.withInitial(() -> new Deflater(Deflater.DEFAULT_COMPRESSION, true)); + + private ZLib() + { + } + + public static ByteBuffer uncompress(ByteBuffer compressed, boolean raw) throws IOException + { + Inflater inflater = (raw ? INFLATER_RAW : INFLATER).get(); + try { + ByteBuffer buffer = ByteBuffer.allocate(1024); + inflater.setInput(compressed); + while (!inflater.finished()) { + if (inflater.inflate(buffer) == 0) { + // Grow buffer + ByteBuffer newBuffer = ByteBuffer.allocate(buffer.capacity() + 1024); + int position = buffer.position(); + + // Reset reader index + buffer.flip(); + newBuffer.put(buffer); + + // Set position to the original + newBuffer.position(position); + buffer = newBuffer; + } + } + + // Flip buffer + buffer.flip(); + return buffer; + } + catch (DataFormatException e) { + throw new IOException(e); + } + finally { + inflater.reset(); + } + } + + public static int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset, boolean raw) + throws IOException + { + Deflater deflater = (raw ? DEFLATER_RAW : DEFLATER).get(); + try { + deflater.setInput(input, inputOffset, length); + deflater.finish(); + + return deflater.deflate(output, outputOffset, output.length - outputOffset); + } + finally { + deflater.reset(); + } + } +} diff --git a/leveldb/src/main/resources/org/iq80/leveldb/impl/version.txt b/leveldb/src/main/resources/org/iq80/leveldb/impl/version.txt new file mode 100644 index 0000000..f2ab45c --- /dev/null +++ b/leveldb/src/main/resources/org/iq80/leveldb/impl/version.txt @@ -0,0 +1 @@ +${project.version} \ No newline at end of file diff --git a/leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLockTest.java b/leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLockTest.java new file mode 100644 index 0000000..5be173b --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLockTest.java @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.IOException; + +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class FileLockTest +{ + @Test + public void testCanDeleteFileAfterUnlock() throws IOException + { + File databaseDir = FileUtils.createTempDir("leveldb"); + File lock1 = new File(databaseDir, "LOCK"); + FileLock lock = FileLock.tryLock(lock1); + lock.release(); + assertTrue(lock1.delete()); + assertTrue(databaseDir.delete()); + assertFalse(databaseDir.exists()); + } + + @Test + public void testCantDoubleLock() throws IOException + { + File databaseDir = FileUtils.createTempDir("leveldb"); + File lock1 = new File(databaseDir, "LOCK"); + FileLock lock = FileLock.tryLock(lock1); + try { + FileLock.tryLock(new File(databaseDir, "LOCK")); + Assert.fail("No expected to aquire more than once the lock"); + } + catch (Exception e) { + //expected + } + lock.release(); + } + + @Test + public void testCanDeleteFileAfterLockFailure() throws IOException + { + File databaseDir = FileUtils.createTempDir("leveldb"); + File lock1 = new File(databaseDir, "LOCK"); + FileLock lock = FileLock.tryLock(lock1); + try { + FileLock.tryLock(new File(databaseDir, "LOCK")); + Assert.fail("Can lock a already locked DB"); + } + catch (Exception e) { + //expected + } + lock.release(); + assertTrue(lock1.delete()); + assertTrue(databaseDir.delete()); + assertFalse(databaseDir.exists()); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLoggerTest.java b/leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLoggerTest.java new file mode 100644 index 0000000..3b53edb --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/fileenv/FileLoggerTest.java @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.Logger; +import org.testng.annotations.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.time.LocalDateTime; +import java.time.temporal.ChronoUnit; +import java.util.function.Supplier; + +import static org.testng.Assert.assertEquals; + +public class FileLoggerTest +{ + @Test + public void testFormatting() throws IOException + { + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + LocalDateTime start = LocalDateTime.now(); + Logger fileLogger = FileLogger.createLogger(outputStream, new LocalDateTimeSupplier(start)); + fileLogger.log("a bc "); + fileLogger.log("without place", "arg1", "arg2"); + fileLogger.log("- %s -", "abc"); + fileLogger.close(); + LocalDateTimeSupplier d = new LocalDateTimeSupplier(start); + StringBuilder s = new StringBuilder(); + s.append(d.get()).append(' ').append("a bc ").append(System.lineSeparator()); + s.append(d.get()).append(' ').append("without place [arg1, arg2]").append(System.lineSeparator()); + s.append(d.get()).append(' ').append("- abc -").append(System.lineSeparator()); + + assertEquals(new String(outputStream.toByteArray()), s.toString()); + } + + private static class LocalDateTimeSupplier implements Supplier + { + LocalDateTime now; + + public LocalDateTimeSupplier(LocalDateTime start) + { + now = start; + } + + @Override + public LocalDateTime get() + { + now = now.plus(1, ChronoUnit.SECONDS); + return now; + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/fileenv/SequentialFileImplTest.java b/leveldb/src/test/java/org/iq80/leveldb/fileenv/SequentialFileImplTest.java new file mode 100644 index 0000000..71add81 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/fileenv/SequentialFileImplTest.java @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.util.DynamicSliceOutput; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.util.Slice; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.FileOutputStream; + +import static org.testng.AssertJUnit.assertEquals; + +public class SequentialFileImplTest +{ + File file; + + @BeforeMethod + public void setUp() throws Exception + { + file = File.createTempFile("test", ".log"); + } + + @Test + public void testCheckReadBounds() throws Exception + { + try (FileOutputStream f = new FileOutputStream(file)) { + for (int i = 0; i < 200; ++i) { + f.write(i); + } + } + try (SequentialFile open = SequentialFileImpl.open(file)) { + DynamicSliceOutput destination = new DynamicSliceOutput(10); + assertEquals(10, open.read(10, destination)); + Slice slice = destination.slice(); + assertEquals(new Slice(new byte[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), slice); + byte[] bytes = new byte[190]; + for (int i = 10, k = 0; i < 200; ++i, k++) { + bytes[k] = (byte) i; + } + DynamicSliceOutput destination1 = new DynamicSliceOutput(10); + assertEquals(190, open.read(200, destination1)); + Slice slice1 = destination1.slice(); + assertEquals(new Slice(bytes), slice1); + assertEquals(-1, open.read(10, new DynamicSliceOutput(10))); //EOF + assertEquals(0, open.read(0, new DynamicSliceOutput(10))); //EOF + } + } + + @AfterMethod + public void tearDown() + { + file.delete(); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFileTest.java b/leveldb/src/test/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFileTest.java new file mode 100644 index 0000000..fc28f55 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/fileenv/UnbufferedRandomInputFileTest.java @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.fileenv; + +import org.iq80.leveldb.env.RandomInputFile; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedByInterruptException; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; + +public class UnbufferedRandomInputFileTest +{ + @Test + public void testResilientToThreadInterruptOnReaderThread() throws IOException + { + File file = File.createTempFile("table", ".db"); + try (final FileOutputStream fileOutputStream = new FileOutputStream(file)) { + fileOutputStream.write(new byte[1024]); + } + try (final RandomInputFile open = UnbufferedRandomInputFile.open(file)) { + //mark current thread as interrupted + Thread.currentThread().interrupt(); + try { + open.read(200, 200); + Assert.fail("Should have failed with ClosedByInterruptException"); + } + catch (ClosedByInterruptException e) { + //reader that was interrupted is expected fail at this point + //no other threads + } + //clear current thread interrupt + Thread.interrupted(); + + //verify file is still accessible after previous failure + final ByteBuffer read = open.read(200, 200); + assertEquals(read.remaining(), 200); + } + finally { + file.delete(); + } + } + + @Test + public void testResilientToThreadInterruptOnReaderThreadMultiThread() throws Exception + { + File file = File.createTempFile("table", ".db"); + final byte[] bytes = new byte[1024]; + new Random().nextBytes(bytes); + try (final FileOutputStream fileOutputStream = new FileOutputStream(file)) { + fileOutputStream.write(bytes); + } + try (final RandomInputFile open = UnbufferedRandomInputFile.open(file)) { + final Reader[] readers = new Reader[Runtime.getRuntime().availableProcessors()]; + for (int i = 0; i < readers.length; i++) { + readers[i] = new Reader(open, bytes); + readers[i].start(); + } + int interrups = 0; + long timeout = System.nanoTime() + TimeUnit.SECONDS.toNanos(20); + while (interrups < 300 && System.nanoTime() < timeout) { + interrups = 0; + for (Reader reader : readers) { + reader.fireInterrupt(); + Thread.sleep(5); + interrups += reader.getCount(); + } + } + for (Reader reader : readers) { + reader.stop = true; + } + for (Reader reader : readers) { + reader.join(); + assertFalse(reader.failed); + } + } + finally { + file.delete(); + } + } + + private static class Reader extends Thread + { + private final RandomInputFile reader; + private final byte[] content; + private final byte[] result; + public volatile boolean stop = false; + private boolean wasInterrupted = false; + private boolean failed = false; + private int interruptCount = 0; + + private Object lock = false; + + public Reader(RandomInputFile reader, byte[] content) + { + this.reader = reader; + this.content = content; + this.result = new byte[content.length]; + } + + public void fireInterrupt() + { + synchronized (lock) { + if (!wasInterrupted) { + wasInterrupted = true; + this.interrupt(); + } + } + } + + public boolean exceptionNoticed() + { + synchronized (lock) { + if (!wasInterrupted || !Thread.interrupted()) { + failed = true; + } + else { + interruptCount++; + wasInterrupted = false; + } + } + return failed; + } + + public int getCount() + { + synchronized (lock) { + return interruptCount; + } + } + + @Override + public void run() + { + final Random random = new Random(this.getId()); + while (!stop && !failed) { + if (random.nextInt(100) > 90) { + Thread.yield(); + } + try { + final ByteBuffer read = reader.read(0, content.length); + read.get(this.result); + assertEquals(this.result, this.content); + assertEquals(read.remaining(), 0); + } + catch (Exception e) { + if (exceptionNoticed()) { + return; + } + } + } + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/AddBoundaryInputsTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/AddBoundaryInputsTest.java new file mode 100644 index 0000000..74e12c4 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/AddBoundaryInputsTest.java @@ -0,0 +1,192 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.util.Slice; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +public class AddBoundaryInputsTest +{ + private List levelFiles; + private List compactionFiles; + private InternalKeyComparator icmp; + + @BeforeMethod + public void setUp() + { + levelFiles = new ArrayList<>(); + compactionFiles = new ArrayList<>(); + icmp = new InternalKeyComparator(new BytewiseComparator()); + } + + @Test + public void testEmptyFileSets() throws Exception + { + VersionSet.addBoundaryInputs(icmp, levelFiles, compactionFiles); + assertTrue(compactionFiles.isEmpty()); + assertTrue(levelFiles.isEmpty()); + } + + @Test + public void testEmptyLevelFiles() throws Exception + { + FileMetaData f1 = + createFileMetaData(1, internalKey("100", 2), + internalKey("100", 1)); + compactionFiles.add(f1); + + VersionSet.addBoundaryInputs(icmp, levelFiles, compactionFiles); + assertEquals(compactionFiles.size(), 1); + assertEquals(compactionFiles.get(0), f1); + assertTrue(levelFiles.isEmpty()); + } + + @Test + public void testEmptyCompactionFiles() throws Exception + { + FileMetaData f1 = + createFileMetaData(1, internalKey("100", 2), + internalKey("100", 1)); + levelFiles.add(f1); + + VersionSet.addBoundaryInputs(icmp, levelFiles, compactionFiles); + assertTrue(compactionFiles.isEmpty()); + assertEquals(levelFiles.size(), 1); + assertEquals(levelFiles.get(0), f1); + } + + @Test + public void testNoBoundaryFiles() throws Exception + { + FileMetaData f1 = + createFileMetaData(1, internalKey("100", 2), + internalKey("100", 1)); + FileMetaData f2 = + createFileMetaData(1, internalKey("200", 2), + internalKey("200", 1)); + FileMetaData f3 = + createFileMetaData(1, internalKey("300", 2), + internalKey("300", 1)); + + levelFiles.add(f3); + levelFiles.add(f2); + levelFiles.add(f1); + compactionFiles.add(f2); + compactionFiles.add(f3); + + VersionSet.addBoundaryInputs(icmp, levelFiles, compactionFiles); + assertEquals(compactionFiles.size(), 2); + } + + @Test + public void testOneBoundaryFiles() throws Exception + { + FileMetaData f1 = + createFileMetaData(1, internalKey("100", 3), + internalKey("100", 2)); + FileMetaData f2 = + createFileMetaData(1, internalKey("100", 1), + internalKey("200", 3)); + FileMetaData f3 = + createFileMetaData(1, internalKey("300", 2), + internalKey("300", 1)); + + levelFiles.add(f3); + levelFiles.add(f2); + levelFiles.add(f1); + compactionFiles.add(f1); + + VersionSet.addBoundaryInputs(icmp, levelFiles, compactionFiles); + assertEquals(compactionFiles.size(), 2); + assertEquals(compactionFiles.get(0), f1); + assertEquals(compactionFiles.get(1), f2); + } + + @Test + public void testTwoBoundaryFiles() throws Exception + { + FileMetaData f1 = + createFileMetaData(1, internalKey("100", 6), + internalKey("100", 5)); + FileMetaData f2 = + createFileMetaData(1, internalKey("100", 2), + internalKey("300", 1)); + FileMetaData f3 = + createFileMetaData(1, internalKey("100", 4), + internalKey("100", 3)); + + levelFiles.add(f2); + levelFiles.add(f3); + levelFiles.add(f1); + compactionFiles.add(f1); + + VersionSet.addBoundaryInputs(icmp, levelFiles, compactionFiles); + assertEquals(compactionFiles.size(), 3); + assertEquals(compactionFiles.get(0), f1); + assertEquals(compactionFiles.get(1), f3); + assertEquals(compactionFiles.get(2), f2); + } + + @Test + public void testDisjoinFilePointers() throws Exception + { + FileMetaData f1 = + createFileMetaData(1, internalKey("100", 6), + internalKey("100", 5)); + FileMetaData f2 = + createFileMetaData(1, internalKey("100", 6), + internalKey("100", 5)); + FileMetaData f3 = + createFileMetaData(1, internalKey("100", 2), + internalKey("300", 1)); + FileMetaData f4 = + createFileMetaData(1, internalKey("100", 4), + internalKey("100", 3)); + + levelFiles.add(f2); + levelFiles.add(f3); + levelFiles.add(f4); + + compactionFiles.add(f1); + + VersionSet.addBoundaryInputs(icmp, levelFiles, compactionFiles); + assertEquals(compactionFiles.size(), 3); + assertEquals(compactionFiles.get(0), f1); + assertEquals(compactionFiles.get(1), f4); + assertEquals(compactionFiles.get(2), f3); + } + + private FileMetaData createFileMetaData(long number, InternalKey smallest, + InternalKey largest) + { + return new FileMetaData(number, 0, smallest, largest); + } + + private InternalKey internalKey(String s, int sequenceNumber) + { + return new InternalKey(new Slice(s.getBytes()), sequenceNumber, ValueType.VALUE); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/ApiTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/ApiTest.java new file mode 100644 index 0000000..1d5e645 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/ApiTest.java @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.DBFactory; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.fileenv.FileUtils; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; + +import static org.iq80.leveldb.impl.Iq80DBFactory.asString; +import static org.iq80.leveldb.impl.Iq80DBFactory.bytes; +import static org.testng.Assert.assertTrue; + +/** + * Test the implemenation via the org.iq80.leveldb API. + * + * @author Hiram Chirino + */ +public class ApiTest +{ + private final File databaseDir = FileUtils.createTempDir("leveldb"); + + public void assertEquals(byte[] arg1, byte[] arg2) + { + assertTrue(Arrays.equals(arg1, arg2), asString(arg1) + " != " + asString(arg2)); + } + + private final DBFactory factory = Iq80DBFactory.factory; + + File getTestDirectory(String name) + throws IOException + { + File rc = new File(databaseDir, name); + factory.destroy(rc, new Options().createIfMissing(true)); + rc.mkdirs(); + return rc; + } + + @Test + public void testCompaction() + throws IOException, DBException + { + Options options = new Options().createIfMissing(true).compressionType(CompressionType.NONE); + File path = getTestDirectory("testCompaction"); + DB db = factory.open(path, options); + + System.out.println("Adding"); + for (int i = 0; i < 1000 * 1000; i++) { + if (i % 100000 == 0) { + System.out.println(" at: " + i); + } + db.put(bytes("key" + i), bytes("value" + i)); + } + + db.close(); + db = factory.open(path, options); + + System.out.println("Deleting"); + for (int i = 0; i < 1000 * 1000; i++) { + if (i % 100000 == 0) { + System.out.println(" at: " + i); + } + db.delete(bytes("key" + i)); + } + + db.close(); + db = factory.open(path, options); + + System.out.println("Adding"); + for (int i = 0; i < 1000 * 1000; i++) { + if (i % 100000 == 0) { + System.out.println(" at: " + i); + } + db.put(bytes("key" + i), bytes("value" + i)); + } + + db.close(); + } + + @AfterMethod + public void tearDown() + { + FileUtils.deleteRecursively(databaseDir); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/AutoCompactTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/AutoCompactTest.java new file mode 100644 index 0000000..5eb5857 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/AutoCompactTest.java @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.base.Strings; +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.Range; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.fileenv.FileUtils; +import org.iq80.leveldb.iterator.DBIteratorAdapter; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.IOException; + +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class AutoCompactTest +{ + private static final int K_VALUE_SIZE = 200 * 1024; + private static final int K_TOTAL_SIZE = 100 * 1024 * 1024; + private static final int K_COUNT = K_TOTAL_SIZE / K_VALUE_SIZE; + private File databaseDir; + private DbImpl db; + + private byte[] key(int i) + { + return String.format("key%06d", i).getBytes(); + } + + @Test + public void testReadAll() throws Exception + { + doReads(K_COUNT); + } + + @Test + public void testReadHalf() throws Exception + { + doReads(K_COUNT / 2); + } + + public void doReads(int n) throws Exception + { + final byte[] value = Strings.repeat("x", K_VALUE_SIZE).getBytes(); + // Fill database + for (int i = 0; i < K_COUNT; i++) { + db.put(key(i), value); + } + db.testCompactMemTable(); + // Delete everything + for (int i = 0; i < K_COUNT; i++) { + db.delete(key(i)); + } + db.testCompactMemTable(); + // Get initial measurement of the space we will be reading. + final long initialSize = size(key(0), key(n)); + final long initialOtherSize = size(key(n), key(K_COUNT)); + // Read until size drops significantly. + byte[] limitKey = key(n); + for (int read = 0; true; read++) { + assertTrue(read < 100, "Taking too long to compact"); + try (DBIteratorAdapter iterator = db.iterator()) { + iterator.seekToFirst(); + while (iterator.hasNext()) { + final DBIteratorAdapter.DbEntry next = iterator.next(); + if (new String(next.getKey()).compareTo(new String(limitKey)) >= 0) { + break; + } + } + } + Thread.sleep(1000L); + final long size = size(key(0), key(n)); + System.out.printf("iter %3d => %7.3f MB [other %7.3f MB]\n", + read + 1, size / 1048576.0, size(key(0), key(K_COUNT)) / 1048576.0); + if (size <= initialSize / 10) { + break; + } + } + // Verify that the size of the key space not touched by the reads + // is pretty much unchanged. + long finalOtherSize = size(key(n), key(K_COUNT)); + assertTrue(finalOtherSize <= initialOtherSize + 1048576, finalOtherSize + "<=" + (initialOtherSize + 1048576)); + assertTrue(finalOtherSize >= initialOtherSize / 5 - 1048576, finalOtherSize + "<=" + (initialOtherSize / 5 - 1048576)); + } + + private long size(byte[] key, byte[] key1) + { + final Range range = new Range(key, key1); + return db.getApproximateSizes(range); + } + + //https://github.com/google/leveldb/commit/748539c183453bdeaff1eb0da8ccf5adacb796e7#diff-0465a3d0601c0cd6f05a6d0e9bfabd36 + @BeforeMethod + public void setUp() throws IOException + { + databaseDir = FileUtils.createTempDir("leveldb_autocompact_test"); + final Options options = new Options() + .paranoidChecks(true) + .createIfMissing(true) + .errorIfExists(true) + .compressionType(CompressionType.NONE) + .cacheSize(100); //tiny cache + db = new DbImpl(options, databaseDir.getAbsolutePath(), EnvImpl.createEnv()); + } + + @AfterMethod + public void tearDown() + { + db.close(); + boolean b = FileUtils.deleteRecursively(databaseDir); + assertFalse(!b && databaseDir.exists(), "Dir should be possible to delete! All files should have been released."); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/CountingHandlesEnv.java b/leveldb/src/test/java/org/iq80/leveldb/impl/CountingHandlesEnv.java new file mode 100644 index 0000000..ba91e65 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/CountingHandlesEnv.java @@ -0,0 +1,227 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.Logger; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.env.WritableFile; + +import org.iq80.leveldb.env.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Environment that count how many handles are currently opened. + */ +public class CountingHandlesEnv implements Env +{ + private final Env env; + private final AtomicInteger counter = new AtomicInteger(); + private final ConcurrentMap ob = new ConcurrentHashMap<>(); + + public CountingHandlesEnv(Env env) + { + this.env = env; + } + + public int getOpenHandles() + { + return counter.get(); + } + + @Override + public long nowMicros() + { + return env.nowMicros(); + } + + @Override + public File toFile(String filename) + { + return env.toFile(filename); + } + + @Override + public File createTempDir(String prefix) + { + return env.createTempDir(prefix); + } + + @Override + public SequentialFile newSequentialFile(File file) throws IOException + { + final SequentialFile sequentialFile = env.newSequentialFile(file); + counter.incrementAndGet(); + return new SequentialFile() + { + boolean closed; + + { + ob.put(this, this); + } + + public void skip(long n) throws IOException + { + sequentialFile.skip(n); + } + + public int read(int atMost, SliceOutput destination) throws IOException + { + return sequentialFile.read(atMost, destination); + } + + public void close() throws IOException + { + if (!closed) { + counter.decrementAndGet(); + closed = true; + ob.remove(this); + } + sequentialFile.close(); + } + }; + } + + @Override + public RandomInputFile newRandomAccessFile(File file) throws IOException + { + final RandomInputFile randomInputFile = env.newRandomAccessFile(file); + counter.incrementAndGet(); + return new RandomInputFile() + { + boolean closed; + + { + ob.put(this, this); + } + + public long size() + { + return randomInputFile.size(); + } + + public ByteBuffer read(long offset, int length) throws IOException + { + return randomInputFile.read(offset, length); + } + + public void close() throws IOException + { + if (!closed) { + counter.decrementAndGet(); + closed = true; + ob.remove(this); + } + randomInputFile.close(); + } + }; + } + + @Override + public WritableFile newWritableFile(File file) throws IOException + { + return getWritableFile(env.newWritableFile(file)); + } + + @Override + public WritableFile newAppendableFile(File file) throws IOException + { + return getWritableFile(env.newAppendableFile(file)); + } + + @Override + public Logger newLogger(File loggerFile) throws IOException + { + counter.incrementAndGet(); + Logger logger = env.newLogger(loggerFile); + return new Logger() + { + @Override + public void log(String message) + { + logger.log(message); + } + + @Override + public void close() throws IOException + { + counter.decrementAndGet(); + logger.close(); + } + }; + } + + @Override + public DbLock tryLock(File file) throws IOException + { + return env.tryLock(file); + } + + private WritableFile getWritableFile(WritableFile writableFile) throws IOException + { + counter.incrementAndGet(); + return new WritableFile() + { + boolean closed; + + { + ob.put(this, this); + } + + public void append(Slice data) throws IOException + { + writableFile.append(data); + } + + public void force() throws IOException + { + writableFile.force(); + } + + public void close() throws IOException + { + if (!closed) { + counter.decrementAndGet(); + closed = true; + ob.remove(this); + } + writableFile.close(); + } + }; + } + + @Override + public void writeStringToFileSync(File file, String content) throws IOException + { + env.writeStringToFileSync(file, content); + } + + @Override + public String readFileToString(File file) throws IOException + { + return env.readFileToString(file); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/DbImplTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/DbImplTest.java new file mode 100644 index 0000000..b68efa1 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/DbImplTest.java @@ -0,0 +1,2365 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.primitives.UnsignedBytes; +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBComparator; +import org.iq80.leveldb.Logger; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.Range; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.Snapshot; +import org.iq80.leveldb.WriteBatch; +import org.iq80.leveldb.WriteOptions; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.fileenv.FileUtils; +import org.iq80.leveldb.iterator.InternalIterator; +import org.iq80.leveldb.iterator.IteratorTestUtils; +import org.iq80.leveldb.iterator.SeekingDBIteratorAdapter; +import org.iq80.leveldb.iterator.SeekingIterator; +import org.iq80.leveldb.table.BloomFilterPolicy; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.google.common.collect.Maps.immutableEntry; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Arrays.asList; +import static org.iq80.leveldb.CompressionType.NONE; +import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; +import static org.iq80.leveldb.iterator.IteratorTestUtils.assertInvalid; +import static org.iq80.leveldb.iterator.IteratorTestUtils.assertValidKV; +import static org.iq80.leveldb.iterator.IteratorTestUtils.entry; +import static org.iq80.leveldb.table.BlockHelper.afterString; +import static org.iq80.leveldb.table.BlockHelper.assertReverseSequence; +import static org.iq80.leveldb.table.BlockHelper.assertSequence; +import static org.iq80.leveldb.table.BlockHelper.beforeString; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +public class DbImplTest +{ + // You can set the STRESS_FACTOR system property to make the tests run more iterations. + private static final double STRESS_FACTOR = Double.parseDouble(System.getProperty("STRESS_FACTOR", "1")); + + private static final String DOES_NOT_EXIST_FILENAME = "/foo/bar/doowop/idontexist"; + private static final String DOES_NOT_EXIST_FILENAME_PATTERN = ".foo.bar.doowop.idontexist"; + private Env defaultEnv; + + private File databaseDir; + + @DataProvider(name = "options") + public Object[][] optionsProvider() + { + return new Object[][] { + {new OptionsDesc("Default")}, + //new OptionsDesc("Reuse").reuseLog(true)}, + {new OptionsDesc("Bloom Filter").filterPolicy(new BloomFilterPolicy(10))}, + {new OptionsDesc("No Compression").compressionType(CompressionType.NONE)}, + {new OptionsDesc("Snappy").compressionType(CompressionType.SNAPPY)}, + {new OptionsDesc("ZLib").compressionType(CompressionType.ZLIB)}, + {new OptionsDesc("ZLib Raw").compressionType(CompressionType.ZLIB_RAW)} + }; + } + + @Test(dataProvider = "options") + public void testBackgroundCompaction(final Options options) + throws Exception + { + options.maxOpenFiles(100); + options.createIfMissing(true); + DbStringWrapper db = new DbStringWrapper(options, this.databaseDir, defaultEnv); + Random random = new Random(301); + for (int i = 0; i < 200000 * STRESS_FACTOR; i++) { + db.put(randomString(random, 64), new String(new byte[] {0x01}, UTF_8), new WriteOptions().sync(false)); + db.get(randomString(random, 64)); + if ((i % 50000) == 0 && i != 0) { + System.out.println(i + " rows written"); + } + } + } + + @Test + public void testConcurrentWrite() throws Exception + { + Options options = new Options(); + options.maxOpenFiles(50); + options.createIfMissing(true); + ExecutorService ex = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors() * 4); + try { + DbStringWrapper db = new DbStringWrapper(options, this.databaseDir, defaultEnv); + final int numEntries = 1000000; + final int growValueBy = 10; + final CountDownLatch segmentsToPutEnd = new CountDownLatch(numEntries / 100); + final Random random = new Random(Thread.currentThread().getId()); + final int segmentSize = 100; + //dispatch writes + for (int i = 0; i < numEntries; i += segmentSize) { + final int finalI = i; + ex.submit(() -> { + final int i2 = finalI + segmentSize; + for (int j = finalI; j < i2; j++) { + final BigInteger bigInteger = BigInteger.valueOf(j); + final byte[] value = bigInteger.toByteArray(); + final byte[] bytes = new byte[growValueBy + value.length]; + for (int k = 0; k < growValueBy; k += value.length) { + System.arraycopy(value, 0, bytes, k, value.length); + } + db.db.put(value, bytes); + if (random.nextInt(100) < 2) { + Thread.yield(); + } + } + segmentsToPutEnd.countDown(); + }); + } + segmentsToPutEnd.await(); + //check all writes have + for (int i = 0; i < numEntries; i++) { + final BigInteger bigInteger = BigInteger.valueOf(i); + final byte[] value = bigInteger.toByteArray(); + final byte[] bytes = new byte[growValueBy + value.length]; + for (int k = 0; k < growValueBy; k += value.length) { + System.arraycopy(value, 0, bytes, k, value.length); + } + assertEquals(db.db.get(value), bytes); + } + } + finally { + ex.shutdownNow(); + } + } + + @Test(dataProvider = "options") + public void testCompactionsOnBigDataSet(final Options options) + throws Exception + { + options.createIfMissing(true); + DbStringWrapper db = new DbStringWrapper(options, databaseDir, defaultEnv); + for (int index = 0; index < 5000000; index++) { + String key = "Key LOOOOOOOOOOOOOOOOOONG KEY " + index; + String value = "This is element " + index + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABZASDFASDKLFJASDFKJSDFLKSDJFLKJSDHFLKJHSDJFSDFHJASDFLKJSDF"; + db.put(key, value); + } + } + + @Test(dataProvider = "options") + public void testEmpty(final Options options) + throws Exception + { + File databaseDir = this.databaseDir; + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + assertNull(db.get("foo")); + } + + @Test(dataProvider = "options") + public void testEmptyKey(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("", "v1"); + assertEquals(db.get(""), "v1"); + db.put("", "v2"); + assertEquals(db.get(""), "v2"); + } + + @Test(dataProvider = "options") + public void testEmptyValue(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("key", "v1"); + assertEquals(db.get("key"), "v1"); + db.put("key", ""); + assertEquals(db.get("key"), ""); + db.put("key", "v2"); + assertEquals(db.get("key"), "v2"); + } + + @Test(dataProvider = "options") + public void testEmptyBatch(final Options options) + throws Exception + { + // open new db + options.createIfMissing(true); + + DB db = new DbImpl(options, databaseDir.getPath(), defaultEnv); + + // write an empty batch + WriteBatch batch = db.createWriteBatch(); + batch.close(); + db.write(batch); + + // close the db + db.close(); + + // reopen db + new DbImpl(options, databaseDir.getPath(), defaultEnv).close(); + } + + @Test(dataProvider = "options") + public void testReadWrite(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "v1"); + assertEquals(db.get("foo"), "v1"); + db.put("bar", "v2"); + db.put("foo", "v3"); + assertEquals(db.get("foo"), "v3"); + assertEquals(db.get("bar"), "v2"); + } + + @Test(dataProvider = "options") + public void testPutDeleteGet(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "v1"); + assertEquals(db.get("foo"), "v1"); + db.put("foo", "v2"); + assertEquals(db.get("foo"), "v2"); + db.delete("foo"); + assertNull(db.get("foo")); + } + + @Test(dataProvider = "options") + public void testGetFromImmutableLayer(final Options options) + throws Exception + { + // create db with small write buffer + SpecialEnv env = new SpecialEnv(defaultEnv); + DbStringWrapper db = new DbStringWrapper(options.writeBufferSize(100000), databaseDir, env); + db.put("foo", "v1"); + assertEquals(db.get("foo"), "v1"); + + env.delayDataSync.set(true); + + // Fill memtable + db.put("k1", longString(100000, 'x')); + // Trigger compaction + db.put("k2", longString(100000, 'y')); + assertEquals(db.get("foo"), "v1"); + + env.delayDataSync.set(false); + } + + @Test(dataProvider = "options") + public void testGetFromVersions(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "v1"); + db.testCompactMemTable(); + assertEquals(db.get("foo"), "v1"); + } + + @Test(dataProvider = "options") + public void testGetSnapshot(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + // Try with both a short key and a long key + for (int i = 0; i < 2; i++) { + String key = (i == 0) ? "foo" : longString(200, 'x'); + db.put(key, "v1"); + Snapshot s1 = db.getSnapshot(); + db.put(key, "v2"); + assertEquals(db.get(key), "v2"); + assertEquals(db.get(key, s1), "v1"); + + db.testCompactMemTable(); + assertEquals(db.get(key), "v2"); + assertEquals(db.get(key, s1), "v1"); + s1.close(); + } + } + + @Test(dataProvider = "options") + public void testGetIdenticalSnapshots(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + // Try with both a short key and a long key + for (int i = 0; i < 2; i++) { + String key = (i == 0) ? "foo" : "X" + Strings.repeat(" ", 199); + db.put(key, "v1"); + Snapshot s1 = db.getSnapshot(); + Snapshot s2 = db.getSnapshot(); + Snapshot s3 = db.getSnapshot(); + db.put(key, "v2"); + assertEquals(db.get(key), "v2"); + assertEquals(db.get(key, s1), "v1"); + assertEquals(db.get(key, s2), "v1"); + assertEquals(db.get(key, s3), "v1"); + s1.close(); + db.testCompactMemTable(); + assertEquals(db.get(key), "v2"); + assertEquals(db.get(key, s2), "v1"); + s2.close(); + assertEquals(db.get(key, s3), "v1"); + s3.close(); + } + } + + @Test(dataProvider = "options") + public void testIterateOverEmptySnapshot(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + Snapshot snapshot = db.getSnapshot(); + ReadOptions readOptions = new ReadOptions(); + readOptions.snapshot(snapshot); + db.put("foo", "v1"); + db.put("foo", "v2"); + + SeekingIterator iterator = db.iterator(readOptions); + iterator.seekToFirst(); + assertFalse(iterator.valid()); + iterator.close(); + + db.testCompactMemTable(); + + SeekingIterator iterator2 = db.iterator(readOptions); + iterator2.seekToFirst(); + assertFalse(iterator2.valid()); + iterator2.close(); + + snapshot.close(); + } + + @Test(dataProvider = "options") + public void testGetLevel0Ordering(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + // Check that we process level-0 files in correct order. The code + // below generates two level-0 files where the earlier one comes + // before the later one in the level-0 file list since the earlier + // one has a smaller "smallest" key. + db.put("bar", "b"); + db.put("foo", "v1"); + db.testCompactMemTable(); + db.put("foo", "v2"); + db.testCompactMemTable(); + assertEquals(db.get("foo"), "v2"); + } + + @Test(dataProvider = "options") + public void testGetOrderedByLevels(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "v1"); + db.compactRange("a", "z"); + assertEquals(db.get("foo"), "v1"); + db.put("foo", "v2"); + assertEquals(db.get("foo"), "v2"); + db.testCompactMemTable(); + assertEquals(db.get("foo"), "v2"); + } + + @Test(dataProvider = "options") + public void testGetPicksCorrectFile(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("a", "va"); + db.compactRange("a", "b"); + db.put("x", "vx"); + db.compactRange("x", "y"); + db.put("f", "vf"); + db.compactRange("f", "g"); + + assertEquals(db.get("a"), "va"); + assertEquals(db.get("f"), "vf"); + assertEquals(db.get("x"), "vx"); + } + + @Test(dataProvider = "options") + public void testGetEncountersEmptyLevel(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + // Arrange for the following to happen: + // * sstable A in level 0 + // * nothing in level 1 + // * sstable B in level 2 + // Then do enough Get() calls to arrange for an automatic compaction + // of sstable A. A bug would cause the compaction to be marked as + // occurring at level 1 (instead of the correct level 0). + + // Step 1: First place sstables in levels 0 and 2 + int compactionCount = 0; + while (db.numberOfFilesInLevel(0) == 0 || db.numberOfFilesInLevel(2) == 0) { + assertTrue(compactionCount <= 100, "could not fill levels 0 and 2"); + compactionCount++; + db.put("a", "begin"); + db.put("z", "end"); + db.testCompactMemTable(); + } + + // Step 2: clear level 1 if necessary. + db.testCompactRange(1, null, null); + assertEquals(db.numberOfFilesInLevel(0), 1); + assertEquals(db.numberOfFilesInLevel(1), 0); + assertEquals(db.numberOfFilesInLevel(2), 1); + + // Step 3: read a bunch of times + for (int i = 0; i < 1000; i++) { + assertNull(db.get("missing")); + } + + // Step 4: Wait for compaction to finish + db.waitForBackgroundCompactationToFinish(); + + assertEquals(db.numberOfFilesInLevel(0), 0); + } + + @Test(dataProvider = "options") + public void testEmptyIterator(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + SeekingIterator iterator = db.iterator(); + + iterator.seekToFirst(); + assertNoNextElement(iterator); + + iterator.seek("foo"); + assertNoNextElement(iterator); + iterator.close(); + } + + @Test(dataProvider = "options") + public void testIteratorSingle(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("a", "va"); + + try (SeekingIterator iterator = db.iterator()) { + iterator.seekToFirst(); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.next(), iterator); + assertTrue(iterator.seekToFirst()); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.prev(), iterator); + + assertTrue(iterator.seekToLast()); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.next(), iterator); + assertTrue(iterator.seekToLast()); + assertValidKV(iterator, "a", "va"); + + assertInvalid(iterator.prev(), iterator); + + assertTrue(iterator.seek("")); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.next(), iterator); + + assertTrue(iterator.seek("a")); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.next(), iterator); + + assertInvalid(iterator.seek("b"), iterator); + } + } + + @Test(dataProvider = "options") + public void testIteratorMultiple(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("a", "va"); + db.put("b", "vb"); + db.put("c", "vc"); + + try (SeekingIterator iterator = db.iterator()) { + assertTrue(iterator.seekToFirst()); + assertValidKV(iterator, "a", "va"); + assertTrue(iterator.next()); + assertValidKV(iterator, "b", "vb"); + assertTrue(iterator.next()); + assertValidKV(iterator, "c", "vc"); + assertInvalid(iterator.next(), iterator); + assertTrue(iterator.seekToFirst()); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.prev(), iterator); + + assertTrue(iterator.seekToLast()); + assertValidKV(iterator, "c", "vc"); + assertTrue(iterator.prev()); + assertValidKV(iterator, "b", "vb"); + assertTrue(iterator.prev()); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.prev(), iterator); + assertTrue(iterator.seekToLast()); + assertValidKV(iterator, "c", "vc"); + assertInvalid(iterator.next(), iterator); + + assertTrue(iterator.seek("")); + assertValidKV(iterator, "a", "va"); + assertTrue(iterator.seek("a")); + assertValidKV(iterator, "a", "va"); + assertTrue(iterator.seek("ax")); + assertValidKV(iterator, "b", "vb"); + assertTrue(iterator.seek("b")); + assertValidKV(iterator, "b", "vb"); + assertInvalid(iterator.seek("z"), iterator); + + // Switch from reverse to forward + assertTrue(iterator.seekToLast()); + assertTrue(iterator.prev()); + assertTrue(iterator.prev()); + assertTrue(iterator.next()); + assertValidKV(iterator, "b", "vb"); + + // Switch from forward to reverse + assertTrue(iterator.seekToFirst()); + assertTrue(iterator.next()); + assertTrue(iterator.next()); + assertTrue(iterator.prev()); + assertValidKV(iterator, "b", "vb"); + + // Make sure iter stays at snapshot + db.put("a", "va2"); + db.put("a2", "va3"); + db.put("b", "vb2"); + db.put("c", "vc2"); + db.delete("b"); + assertTrue(iterator.seekToFirst()); + assertValidKV(iterator, "a", "va"); + assertTrue(iterator.next()); + assertValidKV(iterator, "b", "vb"); + assertTrue(iterator.next()); + assertValidKV(iterator, "c", "vc"); + assertInvalid(iterator.next(), iterator); + assertTrue(iterator.seekToLast()); + assertValidKV(iterator, "c", "vc"); + assertTrue(iterator.prev()); + assertValidKV(iterator, "b", "vb"); + assertTrue(iterator.prev()); + assertValidKV(iterator, "a", "va"); + assertInvalid(iterator.prev(), iterator); + } + } + + @Test(dataProvider = "options") + public void testIterSmallAndLargeMix(final Options options) + throws IOException + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("a", "va"); + db.put("b", Strings.repeat("b", 100000)); + db.put("c", "vc"); + db.put("d", Strings.repeat("d", 100000)); + db.put("e", Strings.repeat("e", 100000)); + try (SeekingIterator iterator = db.iterator()) { + assertTrue(iterator.seekToFirst()); + assertSequence(iterator, + immutableEntry("a", "va"), + immutableEntry("b", Strings.repeat("b", 100000)), + immutableEntry("c", "vc"), + immutableEntry("d", Strings.repeat("d", 100000)), + immutableEntry("e", Strings.repeat("e", 100000))); + + iterator.seekToLast(); + assertReverseSequence(iterator, + immutableEntry("e", Strings.repeat("e", 100000)), + immutableEntry("d", Strings.repeat("d", 100000)), + immutableEntry("c", "vc"), + immutableEntry("b", Strings.repeat("b", 100000)), + immutableEntry("a", "va") + ); + assertFalse(iterator.valid()); + } + } + + @Test(dataProvider = "options") + public void testIterMultiWithDelete(final Options options) + throws IOException + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("b", "vb"); + db.put("c", "vc"); + db.put("a", "va"); + db.delete("b"); + assertNull(db.get("b")); + SeekingIterator iterator = db.iterator(); + iterator.seek("c"); + assertValidKV(iterator, "c", "vc"); + assertTrue(iterator.prev()); + assertValidKV(iterator, "a", "va"); + iterator.close(); + } + + @Test(dataProvider = "options") + public void testRecover(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "v1"); + db.put("baz", "v5"); + + db.reopen(); + + assertEquals(db.get("foo"), "v1"); + assertEquals(db.get("baz"), "v5"); + db.put("bar", "v2"); + db.put("foo", "v3"); + + db.reopen(); + + assertEquals(db.get("foo"), "v3"); + db.put("foo", "v4"); + assertEquals(db.get("foo"), "v4"); + assertEquals(db.get("bar"), "v2"); + assertEquals(db.get("baz"), "v5"); + } + + @Test(dataProvider = "options") + public void testRecoveryWithEmptyLog(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "v1"); + db.put("foo", "v2"); + db.reopen(); + db.reopen(); + db.put("foo", "v3"); + db.reopen(); + assertEquals(db.get("foo"), "v3"); + } + + @Test(dataProvider = "options") + public void testSliceMutationAfterWriteShouldNotAffectInternalState(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + final WriteBatchImpl updates = new WriteBatchImpl(); + final Slice key = new Slice("foo".getBytes()); + final Slice value = new Slice("v1".getBytes()); + updates.put(key, value); + db.db.write(updates); + + //precondition + assertEquals(db.get("foo"), "v1"); + + //change value should have no effect + value.setByte(1, '1'); + assertEquals(db.get("foo"), "v1"); + + //change key should have no effect + key.setByte(0, 'x'); + assertEquals(db.get("foo"), "v1"); + + //change in delete key should have no effect + final WriteBatchImpl updates1 = new WriteBatchImpl(); + final Slice key1 = new Slice("foo".getBytes()); + updates1.delete(key1); + db.db.write(updates1); + assertNull(db.get("foo")); + key1.setByte(0, 'x'); + assertNull(db.get("foo")); + } + + @Test(dataProvider = "options") + public void testArrayMutationAfterWriteShouldNotAffectInternalState(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + final byte[] key = {'f', 'o', 'o'}; + final byte[] value = {'v', '1'}; + db.db.put(key, value); + + //precondition + assertEquals(db.get("foo"), "v1"); + + //change value should have no effect + value[1] = '2'; + assertEquals(db.get("foo"), "v1"); + + //change key should have no effect + key[1] = '1'; + assertEquals(db.get("foo"), "v1"); + + //change in delete key should have no effect + final byte[] key1 = {'f', 'o', 'o'}; + db.db.delete(key1); + assertNull(db.get("foo")); + key1[0] = 'x'; + assertNull(db.get("foo")); + } + + @Test(dataProvider = "options") + public void testRecoverDuringMemtableCompaction(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options.writeBufferSize(1000000), databaseDir); + + // Trigger a long memtable compaction and reopen the database during it + db.put("foo", "v1"); // Goes to 1st log file + db.put("big1", longString(10000000, 'x')); // Fills memtable + db.put("big2", longString(1000, 'y')); // Triggers compaction + db.put("bar", "v2"); // Goes to new log file + + db.reopen(); + assertEquals(db.get("foo"), "v1"); + assertEquals(db.get("bar"), "v2"); + assertEquals(db.get("big1"), longString(10000000, 'x')); + assertEquals(db.get("big2"), longString(1000, 'y')); + } + + @Test(dataProvider = "options") + public void testMinorCompactionsHappen(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options.writeBufferSize(10000), databaseDir); + + int n = 500; + int startingNumTables = db.totalTableFiles(); + for (int i = 0; i < n; i++) { + db.put(key(i), key(i) + longString(1000, 'v')); + } + int endingNumTables = db.totalTableFiles(); + assertTrue(endingNumTables > startingNumTables); + + for (int i = 0; i < n; i++) { + assertEquals(db.get(key(i)), key(i) + longString(1000, 'v')); + } + db.testCompactMemTable(); + + for (int i = 0; i < n; i++) { + assertEquals(db.get(key(i)), key(i) + longString(1000, 'v')); + } + + db.reopen(); + for (int i = 0; i < n; i++) { + assertEquals(db.get(key(i)), key(i) + longString(1000, 'v')); + } + } + + @Test(dataProvider = "options") + public void testRecoverWithLargeLog(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("big1", longString(200000, '1')); + db.put("big2", longString(200000, '2')); + db.put("small3", longString(10, '3')); + db.put("small4", longString(10, '4')); + assertEquals(db.numberOfFilesInLevel(0), 0); + + db.reopen(options.writeBufferSize(100000)); + assertEquals(db.numberOfFilesInLevel(0), 3); + assertEquals(db.get("big1"), longString(200000, '1')); + assertEquals(db.get("big2"), longString(200000, '2')); + assertEquals(db.get("small3"), longString(10, '3')); + assertEquals(db.get("small4"), longString(10, '4')); + assertTrue(db.numberOfFilesInLevel(0) > 1); + } + + @Test(dataProvider = "options") + public void testCompactionsGenerateMultipleFiles(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options.writeBufferSize(100000000), databaseDir); + + // Write 8MB (80 values, each 100K) + assertEquals(db.numberOfFilesInLevel(0), 0); + assertEquals(db.numberOfFilesInLevel(1), 0); + Random random = new Random(301); + List values = new ArrayList<>(); + for (int i = 0; i < 80; i++) { + String value = randomString(random, 100 * 1024); + db.put(key(i), value); + values.add(value); + } + + // Reopening moves updates to level-0 + db.reopen(); + assertTrue(db.numberOfFilesInLevel(0) > 0); + assertEquals(db.numberOfFilesInLevel(1), 0); + db.testCompactRange(0, null, null); + + assertEquals(db.numberOfFilesInLevel(0), 0); + assertTrue(db.numberOfFilesInLevel(1) > 0); + for (int i = 0; i < 80; i++) { + assertEquals(db.get(key(i)), values.get(i)); + } + } + + @Test(dataProvider = "options") + public void testRepeatedWritesToSameKey(final Options options) + throws Exception + { + options.writeBufferSize(100000); + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + // We must have at most one file per level except for level-0, + // which may have up to kL0_StopWritesTrigger files. + int maxFiles = NUM_LEVELS + DbConstants.L0_STOP_WRITES_TRIGGER; + + Random random = new Random(301); + String value = randomString(random, 2 * options.writeBufferSize()); + for (int i = 0; i < 5 * maxFiles; i++) { + db.put("key", value); + assertTrue(db.totalTableFiles() < maxFiles); + } + + db.close(); + } + + @Test(dataProvider = "options") + public void testSparseMerge(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options().compressionType(NONE), databaseDir); + + fillLevels(db, "A", "Z"); + + // Suppose there is: + // small amount of data with prefix A + // large amount of data with prefix B + // small amount of data with prefix C + // and that recent updates have made small changes to all three prefixes. + // Check that we do not do a compaction that merges all of B in one shot. + String value = longString(1000, 'x'); + db.put("A", "va"); + + // Write approximately 100MB of "B" values + for (int i = 0; i < 100000; i++) { + String key = String.format("B%010d", i); + db.put(key, value); + } + db.put("C", "vc"); + db.testCompactMemTable(); + db.testCompactRange(0, null, null); + + // Make sparse update + db.put("A", "va2"); + db.put("B100", "bvalue2"); + db.put("C", "vc2"); + db.testCompactMemTable(); + + // Compactions should not cause us to create a situation where + // a file overlaps too much data at the next level. + assertTrue(db.getMaxNextLevelOverlappingBytes() <= 20 * 1048576); + db.testCompactRange(0, null, null); + assertTrue(db.getMaxNextLevelOverlappingBytes() <= 20 * 1048576); + db.testCompactRange(1, null, null); + assertTrue(db.getMaxNextLevelOverlappingBytes() <= 20 * 1048576); + } + + @Test + public void testApproximateSizes() + throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options().writeBufferSize(100000000).compressionType(NONE), databaseDir); + + assertBetween(db.size("", "xyz"), 0, 0); + db.reopen(); + assertBetween(db.size("", "xyz"), 0, 0); + + // Write 8MB (80 values, each 100K) + assertEquals(db.numberOfFilesInLevel(0), 0); + int n = 80; + Random random = new Random(301); + for (int i = 0; i < n; i++) { + db.put(key(i), randomString(random, 100000)); + } + + // 0 because GetApproximateSizes() does not account for memtable space + assertBetween(db.size("", key(50)), 0, 0); + + // Check sizes across recovery by reopening a few times + for (int run = 0; run < 3; run++) { + db.reopen(); + + for (int compactStart = 0; compactStart < n; compactStart += 10) { + for (int i = 0; i < n; i += 10) { + assertBetween(db.size("", key(i)), 100000 * i, 100000 * i + 10000); + assertBetween(db.size("", key(i) + ".suffix"), 100000 * (i + 1), 100000 * (i + 1) + 10000); + assertBetween(db.size(key(i), key(i + 10)), 100000 * 10, 100000 * 10 + 10000); + } + assertBetween(db.size("", key(50)), 5000000, 5010000); + assertBetween(db.size("", key(50) + ".suffix"), 5100000, 5110000); + + db.testCompactRange(0, key(compactStart), key(compactStart + 9)); + } + + assertEquals(db.numberOfFilesInLevel(0), 0); + assertTrue(db.numberOfFilesInLevel(1) > 0); + } + } + + @Test + public void testApproximateSizesMixOfSmallAndLarge() + throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options().compressionType(NONE), databaseDir); + Random random = new Random(301); + String big1 = randomString(random, 100000); + db.put(key(0), randomString(random, 10000)); + db.put(key(1), randomString(random, 10000)); + db.put(key(2), big1); + db.put(key(3), randomString(random, 10000)); + db.put(key(4), big1); + db.put(key(5), randomString(random, 10000)); + db.put(key(6), randomString(random, 300000)); + db.put(key(7), randomString(random, 10000)); + + // Check sizes across recovery by reopening a few times + for (int run = 0; run < 3; run++) { + db.reopen(); + + assertBetween(db.size("", key(0)), 0, 0); + assertBetween(db.size("", key(1)), 10000, 11000); + assertBetween(db.size("", key(2)), 20000, 21000); + assertBetween(db.size("", key(3)), 120000, 121000); + assertBetween(db.size("", key(4)), 130000, 131000); + assertBetween(db.size("", key(5)), 230000, 231000); + assertBetween(db.size("", key(6)), 240000, 241000); + assertBetween(db.size("", key(7)), 540000, 541000); + assertBetween(db.size("", key(8)), 550000, 551000); + + assertBetween(db.size(key(3), key(5)), 110000, 111000); + + db.testCompactRange(0, null, null); + } + } + + @Test(dataProvider = "options") + public void testIteratorPinsRef(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "hello"); + + try (SeekingIterator iterator = db.iterator()) { + iterator.seekToFirst(); + + db.put("foo", "newvalue1"); + for (int i = 0; i < 100; i++) { + db.put(key(i), key(i) + longString(100000, 'v')); + } + db.put("foo", "newvalue1"); + + assertSequence(iterator, immutableEntry("foo", "hello")); + } + } + + @Test(dataProvider = "options") + public void testSnapshot(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + db.put("foo", "v1"); + Snapshot s1 = db.getSnapshot(); + db.put("foo", "v2"); + Snapshot s2 = db.getSnapshot(); + db.put("foo", "v3"); + Snapshot s3 = db.getSnapshot(); + + db.put("foo", "v4"); + + assertEquals("v1", db.get("foo", s1)); + assertEquals("v2", db.get("foo", s2)); + assertEquals("v3", db.get("foo", s3)); + assertEquals("v4", db.get("foo")); + + s3.close(); + assertEquals("v1", db.get("foo", s1)); + assertEquals("v2", db.get("foo", s2)); + assertEquals("v4", db.get("foo")); + + s1.close(); + assertEquals("v2", db.get("foo", s2)); + assertEquals("v4", db.get("foo")); + + s2.close(); + assertEquals("v4", db.get("foo")); + } + + @Test(dataProvider = "options") + public void testHiddenValuesAreRemoved(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + Random random = new Random(301); + fillLevels(db, "a", "z"); + + String big = randomString(random, 50000); + db.put("foo", big); + db.put("pastFoo", "v"); + + Snapshot snapshot = db.getSnapshot(); + + db.put("foo", "tiny"); + db.put("pastFoo2", "v2"); // Advance sequence number one more + + db.testCompactMemTable(); + assertTrue(db.numberOfFilesInLevel(0) > 0); + + assertEquals(big, db.get("foo", snapshot)); + assertBetween(db.size("", "pastFoo"), 40000, 60000); + snapshot.close(); + assertEquals(db.allEntriesFor("foo"), asList("tiny", big)); + db.testCompactRange(0, null, "x"); + assertEquals(db.allEntriesFor("foo"), asList("tiny")); + assertEquals(db.numberOfFilesInLevel(0), 0); + assertTrue(db.numberOfFilesInLevel(1) >= 1); + db.testCompactRange(1, null, "x"); + assertEquals(db.allEntriesFor("foo"), asList("tiny")); + + assertBetween(db.size("", "pastFoo"), 0, 1000); + } + + @Test + public void testDeleteEntriesShouldNotAbeamOnIteration() throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options().createIfMissing(true), databaseDir); + db.put("b", "v"); + db.delete("b"); + db.delete("a"); + assertEquals("[]", toString(db)); + } + + @Test + public void testL0CompactionGoogleBugIssue44a() throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options().createIfMissing(true), databaseDir); + db.reopen(); + db.put("b", "v"); + db.reopen(); + db.delete("b"); + db.delete("a"); + db.reopen(); + db.delete("a"); + db.reopen(); + db.put("a", "v"); + db.reopen(); + db.reopen(); + assertEquals("[a=v]", toString(db)); + Thread.sleep(1000); // Wait for compaction to finish + assertEquals("[a=v]", toString(db)); + } + + private String toString(DbStringWrapper db) + { + String s; + try (SeekingIterator iterator = db.iterator()) { + iterator.seekToFirst(); + s = IteratorTestUtils.toString(iterator); + return s; + } + catch (IOException e) { + Assert.fail(e.getMessage()); + return ""; + } + } + + @Test(invocationCount = 10) + public void testL0CompactionGoogleBugIssue44b() throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options().createIfMissing(true), databaseDir); + db.reopen(); + db.put("", ""); + db.reopen(); + db.delete("e"); + db.put("", ""); + db.reopen(); + db.put("c", "cv"); + db.reopen(); + assertEquals("[=, c=cv]", toString(db)); + db.put("", ""); + db.reopen(); + db.put("", ""); + Thread.sleep(1000); // Wait for compaction to finish + db.reopen(); + db.put("d", "dv"); + db.reopen(); + db.put("", ""); + db.reopen(); + db.delete("d"); + db.delete("b"); + db.reopen(); + assertEquals("[=, c=cv]", toString(db)); + Thread.sleep(1000); // Wait for compaction to finish + assertEquals("[=, c=cv]", toString(db)); + } + + @Test(dataProvider = "options") + public void testDeletionMarkers1(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + db.put("foo", "v1"); + db.testCompactMemTable(); + + int last = DbConstants.MAX_MEM_COMPACT_LEVEL; + assertEquals(db.numberOfFilesInLevel(last), 1); // foo => v1 is now in last level + + // Place a table at level last-1 to prevent merging with preceding mutation + db.put("a", "begin"); + db.put("z", "end"); + db.testCompactMemTable(); + assertEquals(db.numberOfFilesInLevel(last), 1); + assertEquals(db.numberOfFilesInLevel(last - 1), 1); + assertEquals(db.get("a"), "begin"); + assertEquals(db.get("foo"), "v1"); + assertEquals(db.get("z"), "end"); + + db.delete("foo"); + db.put("foo", "v2"); + final List foo = db.allEntriesFor("foo"); + assertEquals(foo, asList("v2", "DEL", "v1")); + db.testCompactMemTable(); // Moves to level last-2 + assertEquals(db.get("a"), "begin"); + assertEquals(db.get("foo"), "v2"); + assertEquals(db.get("z"), "end"); + + assertEquals(db.allEntriesFor("foo"), asList("v2", "DEL", "v1")); + db.testCompactRange(last - 2, null, "z"); + + // DEL eliminated, but v1 remains because we aren't compacting that level + // (DEL can be eliminated because v2 hides v1). + assertEquals(db.allEntriesFor("foo"), asList("v2", "v1")); + db.testCompactRange(last - 1, null, null); + + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + assertEquals(db.allEntriesFor("foo"), asList("v2")); + } + + @Test(dataProvider = "options") + public void testDeletionMarkers2(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + db.put("foo", "v1"); + db.testCompactMemTable(); + + int last = DbConstants.MAX_MEM_COMPACT_LEVEL; + assertEquals(db.numberOfFilesInLevel(last), 1); // foo => v1 is now in last level + + // Place a table at level last-1 to prevent merging with preceding mutation + db.put("a", "begin"); + db.put("z", "end"); + db.testCompactMemTable(); + assertEquals(db.numberOfFilesInLevel(last), 1); + assertEquals(db.numberOfFilesInLevel(last - 1), 1); + + db.delete("foo"); + + assertEquals(db.allEntriesFor("foo"), asList("DEL", "v1")); + db.testCompactMemTable(); // Moves to level last-2 + assertEquals(db.allEntriesFor("foo"), asList("DEL", "v1")); + db.testCompactRange(last - 2, null, null); + + // DEL kept: "last" file overlaps + assertEquals(db.allEntriesFor("foo"), asList("DEL", "v1")); + db.testCompactRange(last - 1, null, null); + + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + assertEquals(db.allEntriesFor("foo"), asList()); + } + + @Test(dataProvider = "options") + public void testOverlapInLevel0(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + assertEquals(DbConstants.MAX_MEM_COMPACT_LEVEL, 2, "Fix test to match config"); + + // Fill levels 1 and 2 to disable the pushing of new memtables to levels > + // 0. + db.put("100", "v100"); + db.put("999", "v999"); + db.testCompactMemTable(); + db.delete("100"); + db.delete("999"); + db.testCompactMemTable(); + assertEquals(db.filesPerLevel(), "0,1,1"); + + // Make files spanning the following ranges in level-0: + // files[0] 200 .. 900 + // files[1] 300 .. 500 + // Note that files are sorted by smallest key. + db.put("300", "v300"); + db.put("500", "v500"); + db.testCompactMemTable(); + db.put("200", "v200"); + db.put("600", "v600"); + db.put("900", "v900"); + db.testCompactMemTable(); + assertEquals(db.filesPerLevel(), "2,1,1"); + + // Compact away the placeholder files we created initially + db.testCompactRange(1, null, null); + db.testCompactRange(2, null, null); + assertEquals(db.filesPerLevel(), "2"); + + // Do a memtable compaction. Before bug-fix, the compaction would + // not detect the overlap with level-0 files and would incorrectly place + // the deletion in a deeper level. + db.delete("600"); + db.testCompactMemTable(); + assertEquals(db.filesPerLevel(), "3"); + assertNull(db.get("600")); + } + + @Test(dataProvider = "options") + public void testEmptyDb(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + testDb(db); + } + + @Test(dataProvider = "options") + public void testSingleEntrySingle(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + testDb(db, immutableEntry("name", "dain sundstrom")); + } + + @Test(dataProvider = "options") + public void testMultipleEntries(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + List> entries = asList( + immutableEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), + immutableEntry("beer/ipa", "Lagunitas IPA"), + immutableEntry("beer/stout", "Lagunitas Imperial Stout"), + immutableEntry("scotch/light", "Oban 14"), + immutableEntry("scotch/medium", "Highland Park"), + immutableEntry("scotch/strong", "Lagavulin")); + + testDb(db, entries); + } + + @Test(dataProvider = "options") + public void testMultiPassMultipleEntries(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + + List> entries = asList( + immutableEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), + immutableEntry("beer/ipa", "Lagunitas IPA"), + immutableEntry("beer/stout", "Lagunitas Imperial Stout"), + immutableEntry("scotch/light", "Oban 14"), + immutableEntry("scotch/medium", "Highland Park"), + immutableEntry("scotch/strong", "Lagavulin")); + + for (int i = 1; i < entries.size(); i++) { + testDb(db, entries); + } + } + + //TODO this test may fail in windows. a path that also fails in windows must be found + @Test(enabled = false, expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Database directory '" + DOES_NOT_EXIST_FILENAME_PATTERN + "'.*") + public void testCantCreateDirectoryReturnMessage() + throws Exception + { + new DbStringWrapper(new Options(), defaultEnv.toFile(DOES_NOT_EXIST_FILENAME)); + } + + @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Database directory.*is not a directory") + public void testDBDirectoryIsFileRetrunMessage() + throws Exception + { + File databaseFile = databaseDir.child("imafile"); + defaultEnv.writeStringToFileSync(databaseFile, ""); + new DbStringWrapper(new Options(), databaseFile); + } + + @Test + public void testSymbolicLinkForFileWithoutParent() + { + assertFalse(FileUtils.isSymbolicLink(new java.io.File("db"))); + } + + @Test + public void testSymbolicLinkForFileWithParent() + { + assertFalse(FileUtils.isSymbolicLink(new java.io.File(DOES_NOT_EXIST_FILENAME, "db"))); + } + + @Test(dataProvider = "options") + public void testCustomComparator(final Options options) + throws Exception + { + DbStringWrapper db = new DbStringWrapper(options.comparator(new LexicographicalReverseDBComparator()), databaseDir); + + List> entries = asList( + immutableEntry("scotch/strong", "Lagavulin"), + immutableEntry("scotch/medium", "Highland Park"), + immutableEntry("scotch/light", "Oban 14"), + immutableEntry("beer/stout", "Lagunitas Imperial Stout"), + immutableEntry("beer/ipa", "Lagunitas IPA"), + immutableEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’") + ); + + for (Entry entry : entries) { + db.put(entry.getKey(), entry.getValue()); + } + + SeekingIterator seekingIterator = db.iterator(); + seekingIterator.seekToFirst(); + for (Entry entry : entries) { + assertTrue(seekingIterator.valid()); + assertEquals(entry(seekingIterator), entry); + seekingIterator.next(); + } + + assertFalse(seekingIterator.valid()); + seekingIterator.close(); + } + + @Test(dataProvider = "options") + public void testManualCompaction(final Options options) throws Exception + { + assertEquals(DbConstants.MAX_MEM_COMPACT_LEVEL, 2); + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + makeTables(db, 3, "p", "q"); + assertEquals("1,1,1", db.filesPerLevel()); + + // Compaction range falls before files + db.compactRange("", "c"); + assertEquals("1,1,1", db.filesPerLevel()); + + // Compaction range falls after files + db.compactRange("r", "z"); + assertEquals("1,1,1", db.filesPerLevel()); + + // Compaction range overlaps files + db.compactRange("p1", "p9"); + assertEquals("0,0,1", db.filesPerLevel()); + + // Populate a different range + makeTables(db, 3, "c", "e"); + assertEquals("1,1,2", db.filesPerLevel()); + + // Compact just the new range + db.compactRange("b", "f"); + assertEquals("0,0,2", db.filesPerLevel()); + + // Compact all + makeTables(db, 1, "a", "z"); + assertEquals("0,1,2", db.filesPerLevel()); + db.compactRange(null, null); + assertEquals("0,0,1", db.filesPerLevel()); + } + + @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = ".*' does not exist.*") + public void testOpenOptionsCreateIfMissingFalse() throws Exception + { + Options options = new Options(); + options.createIfMissing(false); + new DbImpl(options, databaseDir.child("missing").getPath(), defaultEnv); + } + + @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = ".*' exists .*") + public void testOpenOptionsErrorIfExistTrue() throws Exception + { + Options options = new Options(); + try { + for (int i = 0; i < 2; ++i) { + options.createIfMissing(true); + options.errorIfExists(false); + try (DbImpl db = new DbImpl(options, databaseDir.getPath(), defaultEnv)) { + //make db and close + } + } + } + catch (Exception e) { + Assert.fail("Should not fail exceptions"); + } + options.createIfMissing(false); + options.errorIfExists(true); + new DbImpl(options, databaseDir.getPath(), defaultEnv); //reopen and should fail + } + + @Test + public void testDestroyEmptyDir() throws Exception + { + DbImpl.destroyDB(databaseDir, defaultEnv); + assertFalse(databaseDir.exists()); + } + + @Test + public void testDestroyOpenDB() throws Exception + { + databaseDir.delete(); + assertFalse(databaseDir.exists()); + Options options = new Options(); + options.createIfMissing(true); + DbStringWrapper db = new DbStringWrapper(options, databaseDir); + assertTrue(databaseDir.exists()); + + try { + //must fail + DbImpl.destroyDB(databaseDir, defaultEnv); + Assert.fail("Destroy DB should not complete successfully"); + } + catch (Exception e) { + //expected + } + db.close(); + + // Should succeed destroying a closed db. + DbImpl.destroyDB(databaseDir, defaultEnv); + assertFalse(databaseDir.exists()); + } + + //Check that number of files does not grow when we are out of space + @Test + public void testNoSpace() throws Exception + { + Options options = new Options(); + SpecialEnv env = new SpecialEnv(defaultEnv); + DbStringWrapper db = new DbStringWrapper(options, databaseDir, env); + + db.put("foo", "v1"); + assertEquals(db.get("foo"), "v1"); + db.compactRange("a", "z"); + int numFiles = databaseDir.listFiles().size(); + env.noSpace.set(true); // Force out-of-space errors + for (int i = 0; i < 10; i++) { + for (int level = 0; level < DbConstants.NUM_LEVELS - 1; level++) { + db.testCompactRange(level, null, null); + } + } + env.noSpace.set(false); + assertTrue(databaseDir.listFiles().size() < numFiles + 3); + } + + @Test + public void testNonWritableFileSystem() throws Exception + { + Options options = new Options(); + options.writeBufferSize(1000); + SpecialEnv env = new SpecialEnv(defaultEnv); + DbStringWrapper db = new DbStringWrapper(options, databaseDir, env); + db.put("foo", "v1"); + env.nonWritable.set(true); // Force errors for new files + String big = longString(100000, 'x'); + int errors = 0; + for (int i = 0; i < 20; i++) { + try { + db.put("foo", big); + } + catch (Exception e) { + errors++; + Thread.sleep(100); + } + } + assertTrue(errors > 0); + env.nonWritable.set(false); + } + + @Test + public void testWriteSyncError() throws Exception + { + // Check that log sync errors cause the DB to disallow future writes. + + // (a) Cause log sync calls to fail + SpecialEnv env = new SpecialEnv(defaultEnv); + DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir, env); + env.dataSyncError.set(true); + + WriteOptions w = new WriteOptions(); + // (b) Normal write should succeed + db.put("k1", "v1", w); + assertEquals(db.get("k1"), "v1"); + + // (c) Do a sync write; should fail + w.sync(true); + try { + db.put("k2", "v2", w); + Assert.fail("Should not reach this"); + } + catch (Exception ignore) { + } + assertEquals(db.get("k1"), "v1"); + assertEquals(db.get("k2"), null); + + // (d) make sync behave normally + env.dataSyncError.set(false); + + // (e) Do a non-sync write; should fail + w.sync(false); + try { + db.put("k3", "v3", w); + Assert.fail("Should not reach this"); + } + catch (Exception e) { + } + assertEquals(db.get("k1"), "v1"); + assertEquals(db.get("k2"), null); + assertEquals(db.get("k3"), null); + } + + @Test + public void testManifestWriteError() throws Exception + { + // Test for the following problem: + // (a) Compaction produces file F + // (b) Log record containing F is written to MANIFEST file, but Sync() fails + // (c) GC deletes F + // (d) After reopening DB, reads fail since deleted F is named in log record + + // We iterate twice. In the second iteration, everything is the + // same except the log record never makes it to the MANIFEST file. + SpecialEnv specialEnv = new SpecialEnv(defaultEnv); + for (int iter = 0; iter < 2; iter++) { + AtomicBoolean errorType = (iter == 0) + ? specialEnv.manifestSyncError + : specialEnv.manifestWriteError; + + // Insert foo=>bar mapping + Options options = new Options(); + options.createIfMissing(true); + options.errorIfExists(false); + DbStringWrapper db = new DbStringWrapper(options, databaseDir, specialEnv); + db.put("foo", "bar"); + assertEquals(db.get("foo"), "bar"); + + // Memtable compaction (will succeed) + db.testCompactMemTable(); + assertEquals(db.get("foo"), "bar"); + int last = DbConstants.MAX_MEM_COMPACT_LEVEL; + assertEquals(db.numberOfFilesInLevel(last), 1); // foo=>bar is now in last level + + // Merging compaction (will fail) + errorType.set(true); + db.testCompactRange(last, null, null); // Should fail + assertEquals(db.get("foo"), "bar"); + + // Recovery: should not lose data + errorType.set(false); + db.reopen(); + assertEquals(db.get("foo"), "bar"); + db.close(); + databaseDir.deleteRecursively(); + } + } + + @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = ".* missing files") + public void testMissingSSTFile() throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir, defaultEnv); + db.put("foo", "bar"); + assertEquals(db.get("foo"), "bar"); + + // Dump the memtable to disk. + db.testCompactMemTable(); + assertEquals(db.get("foo"), "bar"); + + db.close(); + assertTrue(deleteAnSSTFile()); + db.options.paranoidChecks(true); + db.reopen(); + } + + private boolean deleteAnSSTFile() + { + for (org.iq80.leveldb.env.File f : databaseDir.listFiles()) { + Filename.FileInfo fileInfo = Filename.parseFileName(f); + if (fileInfo != null && fileInfo.getFileType() == Filename.FileType.TABLE) { + assertTrue(f.delete()); + return true; + } + } + return false; + } + + @Test + public void testStillReadSST() throws Exception + { + DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir, defaultEnv); + db.put("foo", "bar"); + assertEquals("bar", db.get("foo")); + + // Dump the memtable to disk. + db.testCompactMemTable(); + assertEquals("bar", db.get("foo")); + db.close(); + assertTrue(renameLDBToSST() > 0); + Options options = new Options(); + options.paranoidChecks(true); + options.errorIfExists(false); + db.reopen(); + assertEquals("bar", db.get("foo")); + } + + // Returns number of files renamed. + private int renameLDBToSST() + { + int filesRenamed = 0; + for (File f : databaseDir.listFiles()) { + Filename.FileInfo fileInfo = Filename.parseFileName(f); + if (fileInfo != null && fileInfo.getFileType() == Filename.FileType.TABLE) { + assertTrue(f.renameTo(f.getParentFile().child(Filename.sstTableFileName(fileInfo.getFileNumber())))); + filesRenamed++; + } + } + return filesRenamed; + } + + @Test + public void testFilesDeletedAfterCompaction() throws Exception + { + File counting = databaseDir.child("counting"); + DbStringWrapper db = new DbStringWrapper(new Options(), counting, defaultEnv); + db.put("foo", "v2"); + db.compactRange("a", "z"); + int files = databaseDir.listFiles().size(); + for (int i = 0; i < 10; i++) { + db.put("foo", "v2"); + db.compactRange("a", "z"); + } + assertEquals(databaseDir.listFiles().size(), files); + } + + @Test + public void testBloomFilter() throws Exception + { + SpecialEnv env = new SpecialEnv(defaultEnv); + env.countRandomReads = true; + Options options = new Options() + .filterPolicy(new BloomFilterPolicy(10)) + .cacheSize(0); + DbStringWrapper db = new DbStringWrapper(options, databaseDir, env); + // Populate multiple layers + int n = 10000; + for (int i = 0; i < n; i++) { + db.put(key(i), key(i)); + } + db.compactRange("a", "z"); + for (int i = 0; i < n; i += 100) { + db.put(key(i), key(i)); + } + db.testCompactMemTable(); + + // Prevent auto compactions triggered by seeks + env.delayDataSync.set(true); + + // Lookup present keys. Should rarely read from small sstable. + env.randomReadCounter.set(0); + for (int i = 0; i < n; i++) { + assertEquals(key(i), db.get(key(i))); + } + int reads = env.randomReadCounter.get(); + assertTrue(reads >= n, "no true that (reads>=n) " + reads + ">=" + n); + assertTrue(reads <= n + 2 * n / 100, "no true that (reads <= n + 2 * n / 100): " + reads + "<= " + n + " + 2 * " + n + " / 100"); + + // Lookup present keys. Should rarely read from either sstable. + env.randomReadCounter.set(0); + for (int i = 0; i < n; i++) { + assertNull(db.get(key(i) + ".missing")); + } + reads = env.randomReadCounter.get(); + assertTrue(reads <= 3 * n / 100); + + env.delayDataSync.set(false); + db.close(); + } + + /** + * Beside current test, at the end every {@link DbImplTest} test case, close is asserted for opened file handles. + */ + @Test(dataProvider = "options") + public void testFileHandlesClosed(final Options options) throws Exception + { + assertTrue(options.maxOpenFiles() > 2); //for this test to work + DbStringWrapper db = new DbStringWrapper(options, databaseDir, defaultEnv); + fillLevels(db, "A", "C"); + assertNotNull(db.get("A")); + assertNull(db.get("A.missing")); + db.db.invalidateAllCaches(); + assertEquals(db.getOpenHandles(), 3, "All files but log and manifest should be closed"); + try (SeekingIterator iterator = db.iterator()) { + iterator.seek("B"); + assertNotNull(iterator.key()); + assertNotNull(iterator.value()); + assertTrue(db.getOpenHandles() > 3); + } + db.db.invalidateAllCaches(); + //with no compaction running and no cache, all db files should be closed but log and manifest + assertEquals(db.getOpenHandles(), 3, "All files but log and manifest should be closed"); + db.close(); + assertEquals(db.getOpenHandles(), 0, "All files should be closed"); + } + + // Do n memtable compactions, each of which produces an sstable + // covering the range [small,large]. + private void makeTables(DbStringWrapper db, int n, String small, String large) + { + for (int i = 0; i < n; i++) { + db.put(small, "begin"); + db.put(large, "end"); + db.testCompactMemTable(); + } + } + + @SafeVarargs + private final void testDb(DbStringWrapper db, Entry... entries) + throws IOException + { + testDb(db, asList(entries)); + } + + private void testDb(DbStringWrapper db, List> entries) throws IOException + { + for (Entry entry : entries) { + db.put(entry.getKey(), entry.getValue()); + } + + for (Entry entry : entries) { + String actual = db.get(entry.getKey()); + assertEquals(actual, entry.getValue(), "Key: " + entry.getKey()); + } + + SeekingIterator seekingIterator = db.iterator(); + seekingIterator.seekToFirst(); + assertSequence(seekingIterator, entries); + + seekingIterator.seekToFirst(); + assertSequence(seekingIterator, entries); + + for (Entry entry : entries) { + List> nextEntries = entries.subList(entries.indexOf(entry), entries.size()); + seekingIterator.seek(entry.getKey()); + assertSequence(seekingIterator, nextEntries); + + seekingIterator.seek(beforeString(entry)); + assertSequence(seekingIterator, nextEntries); + + seekingIterator.seek(afterString(entry)); + assertSequence(seekingIterator, nextEntries.subList(1, nextEntries.size())); + } + + Slice endKey = Slices.wrappedBuffer(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}); + seekingIterator.seek(endKey.toString(UTF_8)); + assertSequence(seekingIterator, Collections.emptyList()); + seekingIterator.close(); + } + + @BeforeMethod + public void setUp() + { + defaultEnv = EnvImpl.createEnv(); + databaseDir = defaultEnv.createTempDir("leveldb"); + } + + @AfterMethod + public void tearDown() + { + for (DbStringWrapper db : opened) { + db.close(); + } + opened.clear(); + boolean b = databaseDir.deleteRecursively(); + //assertion is specially useful in windows + assertFalse(!b && databaseDir.exists(), "Dir should be possible to delete! All files should have been released. Existing files: " + databaseDir.listFiles()); + } + + private void assertBetween(long actual, int smallest, int greatest) + { + if (!between(actual, smallest, greatest)) { + fail(String.format("Expected: %s to be between %s and %s", actual, smallest, greatest)); + } + } + + private void assertNoNextElement(SeekingIterator iterator) + { + assertFalse(iterator.valid()); + assertFalse(iterator.next()); + assertThrows(NoSuchElementException.class, iterator::key); + assertThrows(NoSuchElementException.class, iterator::value); + } + + static byte[] toByteArray(String value) + { + return value.getBytes(UTF_8); + } + + private static String randomString(Random random, int length) + { + char[] chars = new char[length]; + for (int i = 0; i < chars.length; i++) { + chars[i] = (char) ((int) ' ' + random.nextInt(95)); + } + return new String(chars); + } + + private static String longString(int length, char character) + { + char[] chars = new char[length]; + Arrays.fill(chars, character); + return new String(chars); + } + + public static String key(int i) + { + return String.format("key%06d", i); + } + + private boolean between(long size, long left, long right) + { + return left <= size && size <= right; + } + + private void fillLevels(DbStringWrapper db, String smallest, String largest) + { + makeTables(db, NUM_LEVELS, smallest, largest); + } + + private final ArrayList opened = new ArrayList<>(); + + private static class LexicographicalReverseDBComparator + implements DBComparator + { + @Override + public String name() + { + return "test"; + } + + @Override + public int compare(byte[] sliceA, byte[] sliceB) + { + // reverse order + return -(UnsignedBytes.lexicographicalComparator().compare(sliceA, sliceB)); + } + + @Override + public byte[] findShortestSeparator(byte[] start, byte[] limit) + { + // Find length of common prefix + int sharedBytes = calculateSharedBytes(start, limit); + + // Do not shorten if one string is a prefix of the other + if (sharedBytes < Math.min(start.length, limit.length)) { + // if we can add one to the last shared byte without overflow and the two keys differ by more than + // one increment at this location. + int lastSharedByte = start[sharedBytes] & 0xff; + if (lastSharedByte < 0xff && lastSharedByte + 1 < limit[sharedBytes]) { + byte[] result = Arrays.copyOf(start, sharedBytes + 1); + result[sharedBytes] = (byte) (lastSharedByte + 1); + + assert (compare(result, limit) < 0) : "start must be less than last limit"; + return result; + } + } + return start; + } + + @Override + public byte[] findShortSuccessor(byte[] key) + { + // Find first character that can be incremented + for (int i = 0; i < key.length; i++) { + int b = key[i]; + if (b != 0xff) { + byte[] result = Arrays.copyOf(key, i + 1); + result[i] = (byte) (b + 1); + return result; + } + } + // key is a run of 0xffs. Leave it alone. + return key; + } + + private int calculateSharedBytes(byte[] leftKey, byte[] rightKey) + { + int sharedKeyBytes = 0; + + if (leftKey != null && rightKey != null) { + int minSharedKeyBytes = Math.min(leftKey.length, rightKey.length); + while (sharedKeyBytes < minSharedKeyBytes && leftKey[sharedKeyBytes] == rightKey[sharedKeyBytes]) { + sharedKeyBytes++; + } + } + + return sharedKeyBytes; + } + } + + private class DbStringWrapper + { + private final Options options; + private final File databaseDir; + private final CountingHandlesEnv env1; + private DbImpl db; + + private DbStringWrapper(Options options, File databaseDir) + throws IOException + { + this(options, databaseDir, defaultEnv); + } + + private DbStringWrapper(Options options, File databaseDir, Env env) + throws IOException + { + this.options = options.paranoidChecks(true).createIfMissing(true).errorIfExists(true); + this.databaseDir = databaseDir; + env1 = new CountingHandlesEnv(env); + this.db = new DbImpl(options, databaseDir.getPath(), env1); + opened.add(this); + } + + //get non closed file handles + public int getOpenHandles() + { + return env1.getOpenHandles(); + } + + public String get(String key) + { + byte[] slice = db.get(toByteArray(key)); + if (slice == null) { + return null; + } + return new String(slice, UTF_8); + } + + public String get(String key, Snapshot snapshot) + { + byte[] slice = db.get(toByteArray(key), new ReadOptions().snapshot(snapshot)); + if (slice == null) { + return null; + } + return new String(slice, UTF_8); + } + + public void put(String key, String value) + { + db.put(toByteArray(key), toByteArray(value)); + } + + public void put(String key, String value, WriteOptions wo) + { + db.put(toByteArray(key), toByteArray(value), wo); + } + + public void delete(String key) + { + db.delete(toByteArray(key)); + } + + public SeekingIterator iterator() + { + return SeekingDBIteratorAdapter.toSeekingIterator(db.iterator(), k -> k.getBytes(UTF_8), k -> new String(k, UTF_8), v -> new String(v, UTF_8)); + } + + public SeekingIterator iterator(ReadOptions readOption) + { + return SeekingDBIteratorAdapter.toSeekingIterator(db.iterator(readOption), k -> k.getBytes(UTF_8), k -> new String(k, UTF_8), v -> new String(v, UTF_8)); + } + + public Snapshot getSnapshot() + { + return db.getSnapshot(); + } + + public void close() + { + db.close(); + assertEquals(env1.getOpenHandles(), 0, "All files should be closed"); + } + + public void testCompactMemTable() + { + db.testCompactMemTable(); + db.waitForBackgroundCompactationToFinish(); + } + + public void compactRange(String start, String limit) + { + db.compactRange(start == null ? null : Slices.copiedBuffer(start, UTF_8).getBytes(), limit == null ? null : Slices.copiedBuffer(limit, UTF_8).getBytes()); + db.waitForBackgroundCompactationToFinish(); + } + + public void testCompactRange(int level, String start, String limit) + { + db.testCompactRange(level, start == null ? null : Slices.copiedBuffer(start, UTF_8), limit == null ? null : Slices.copiedBuffer(limit, UTF_8)); + db.waitForBackgroundCompactationToFinish(); + } + + public void waitForBackgroundCompactationToFinish() + { + db.waitForBackgroundCompactationToFinish(); + } + + public int numberOfFilesInLevel(int level) + { + return db.numberOfFilesInLevel(level); + } + + public int totalTableFiles() + { + int result = 0; + for (int level = 0; level < NUM_LEVELS; level++) { + result += db.numberOfFilesInLevel(level); + } + return result; + } + + // Return spread of files per level + public String filesPerLevel() + { + StringBuilder result = new StringBuilder(); + int lastNonZeroOffset = 0; + for (int level = 0; level < DbConstants.NUM_LEVELS; level++) { + int f = db.numberOfFilesInLevel(level); + if (result.length() > 0) { + result.append(","); + } + result.append(f); + if (f > 0) { + lastNonZeroOffset = result.length(); + } + } + result.setLength(lastNonZeroOffset); + return result.toString(); + } + + public long size(String start, String limit) + { + return db.getApproximateSizes(new Range(toByteArray(start), toByteArray(limit))); + } + + public long getMaxNextLevelOverlappingBytes() + { + return db.getMaxNextLevelOverlappingBytes(); + } + + public void reopen() + throws IOException + { + reopen(options); + } + + public void reopen(Options options) + throws IOException + { + db.close(); + db = new DbImpl(options.paranoidChecks(true).createIfMissing(false).errorIfExists(false), databaseDir.getPath(), defaultEnv); + } + + private List allEntriesFor(String userKey) throws IOException + { + ImmutableList.Builder result = ImmutableList.builder(); + try (InternalIterator iterator = db.internalIterator(new ReadOptions())) { + for (boolean valid = iterator.seekToFirst(); valid; valid = iterator.next()) { + Entry entry = entry(iterator); + String entryKey = entry.getKey().getUserKey().toString(UTF_8); + if (entryKey.equals(userKey)) { + if (entry.getKey().getValueType() == ValueType.VALUE) { + result.add(entry.getValue().toString(UTF_8)); + } + else { + result.add("DEL"); + } + } + } + } + return result.build(); + } + } + + private static class SpecialEnv implements Env + { + private Env env; + // sstable/log Sync() calls are blocked while this pointer is non-NULL. + private AtomicBoolean delayDataSync = new AtomicBoolean(); + + // sstable/log Sync() calls return an error. + private AtomicBoolean dataSyncError = new AtomicBoolean(); + + // Simulate no-space errors while this pointer is non-NULL. + private AtomicBoolean noSpace = new AtomicBoolean(); + + // Simulate non-writable file system while this pointer is non-NULL + protected AtomicBoolean nonWritable = new AtomicBoolean(); + + // Force sync of manifest files to fail while this pointer is non-NULL + private AtomicBoolean manifestSyncError = new AtomicBoolean(); + + // Force write to manifest files to fail while this pointer is non-NULL + private AtomicBoolean manifestWriteError = new AtomicBoolean(); + boolean countRandomReads; + + AtomicInteger randomReadCounter = new AtomicInteger(); + + public SpecialEnv(Env env) + { + this.env = env; + } + + @Override + public long nowMicros() + { + return env.nowMicros(); + } + + @Override + public org.iq80.leveldb.env.File toFile(String filename) + { + return env.toFile(filename); + } + + @Override + public org.iq80.leveldb.env.File createTempDir(String prefix) + { + return env.createTempDir(prefix); + } + + @Override + public SequentialFile newSequentialFile(org.iq80.leveldb.env.File file) throws IOException + { + return env.newSequentialFile(file); + } + + @Override + public RandomInputFile newRandomAccessFile(org.iq80.leveldb.env.File file) throws IOException + { + RandomInputFile randomInputFile = env.newRandomAccessFile(file); + if (countRandomReads) { + return new CountingFile(randomInputFile); + } + return randomInputFile; + } + + @Override + public WritableFile newWritableFile(org.iq80.leveldb.env.File file) throws IOException + { + if (nonWritable.get()) { + throw new IOException("simulated write error"); + } + if (file.getName().endsWith(".ldb") || file.getName().endsWith(".log")) { + return new DataFile(env.newWritableFile(file)); + } + else { + return new ManifestFile(env.newWritableFile(file)); + } + } + + @Override + public WritableFile newAppendableFile(org.iq80.leveldb.env.File file) throws IOException + { + return env.newAppendableFile(file); + } + + @Override + public Logger newLogger(org.iq80.leveldb.env.File loggerFile) throws IOException + { + return env.newLogger(loggerFile); + } + + @Override + public DbLock tryLock(org.iq80.leveldb.env.File file) throws IOException + { + return env.tryLock(file); + } + + @Override + public void writeStringToFileSync(File file, String content) throws IOException + { + env.writeStringToFileSync(file, content); + } + + @Override + public String readFileToString(File file) throws IOException + { + return env.readFileToString(file); + } + + private class CountingFile implements RandomInputFile + { + private RandomInputFile randomInputFile; + + public CountingFile(RandomInputFile randomInputFile) + { + this.randomInputFile = randomInputFile; + } + + @Override + public long size() + { + return randomInputFile.size(); + } + + @Override + public ByteBuffer read(long offset, int length) throws IOException + { + randomReadCounter.incrementAndGet(); + return randomInputFile.read(offset, length); + } + + @Override + public void close() throws IOException + { + randomInputFile.close(); + } + } + + private class DataFile implements WritableFile + { + private final WritableFile writableFile; + + public DataFile(WritableFile writableFile) + { + this.writableFile = writableFile; + } + + @Override + public void append(Slice data) throws IOException + { + if (noSpace.get()) { + // Drop writes on the floor + } + else { + writableFile.append(data); + } + } + + @Override + public void force() throws IOException + { + if (dataSyncError.get()) { + throw new IOException("simulated data sync error"); + } + while (delayDataSync.get()) { + try { + Thread.sleep(100); + } + catch (InterruptedException e) { + throw new IOException(e); + } + } + writableFile.force(); + } + + @Override + public void close() throws IOException + { + writableFile.close(); + } + } + + private class ManifestFile implements WritableFile + { + private WritableFile writableFile; + + public ManifestFile(WritableFile writableFile) + { + this.writableFile = writableFile; + } + + @Override + public void append(Slice data) throws IOException + { + if (manifestWriteError.get()) { + throw new IOException("simulated writer error"); + } + writableFile.append(data); + } + + @Override + public void force() throws IOException + { + if (manifestSyncError.get()) { + throw new IOException("simulated sync error"); + } + writableFile.force(); + } + + @Override + public void close() throws IOException + { + writableFile.close(); + } + } + } + + static class OptionsDesc extends Options + { + private String desc; + + OptionsDesc(String desc) + { + this.desc = desc; + } + + @Override + public String toString() + { + return "Options{" + desc + '}'; + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/FilenameTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/FilenameTest.java new file mode 100644 index 0000000..2f5dc1e --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/FilenameTest.java @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.collect.Lists; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.testng.annotations.Test; + +import java.util.List; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; + +public class FilenameTest +{ + @Test + public void testFileNameTest() + { + assertFileInfo("100.log", 100L, Filename.FileType.LOG); + assertFileInfo("0.log", 0L, Filename.FileType.LOG); + assertFileInfo("0.sst", 0L, Filename.FileType.TABLE); + assertFileInfo("0.ldb", 0L, Filename.FileType.TABLE); + assertFileInfo("CURRENT", 0L, Filename.FileType.CURRENT); + assertFileInfo("LOCK", 0L, Filename.FileType.DB_LOCK); + assertFileInfo("MANIFEST-2", 2L, Filename.FileType.DESCRIPTOR); + assertFileInfo("MANIFEST-7", 7L, Filename.FileType.DESCRIPTOR); + assertFileInfo("LOG", 0L, Filename.FileType.INFO_LOG); + assertFileInfo("LOG.old", 0L, Filename.FileType.INFO_LOG); + assertFileInfo("18446744073709551615.log", -1L, Filename.FileType.LOG); + assertFileInfo("099876.ldb", 99876L, Filename.FileType.TABLE); + } + + @Test + public void testShouldNotParse() + { + List errors = Lists.newArrayList("", + "foo", + "foo-dx-100.log", + ".log", + "", + "manifest", + "CURREN", + "CURRENTX", + "MANIFES", + "MANIFEST", + "MANIFEST-", + "XMANIFEST-3", + "MANIFEST-3x", + "LOC", + "LOCKx", + "LO", + "LOGx", + "18446744073709551616.log", + "184467440737095516150.log", + "100", + "100.", + "100.lop"); + for (String error : errors) { + assertNull(Filename.parseFileName(EnvImpl.createEnv().toFile(error))); + } + } + + @Test + public void testGeneratedFileNameAreAsExpected() + { + assertEquals(Filename.tableFileName(-1L), "18446744073709551615.ldb"); + + assertFileInfo(Filename.currentFileName(), 0, Filename.FileType.CURRENT); + assertFileInfo(Filename.lockFileName(), 0, Filename.FileType.DB_LOCK); + assertFileInfo(Filename.logFileName(192), 192, Filename.FileType.LOG); + assertFileInfo(Filename.tableFileName(200), 200, Filename.FileType.TABLE); + assertFileInfo(Filename.tableFileName(-1L), -1L, Filename.FileType.TABLE); + assertFileInfo(Filename.descriptorFileName(100), 100, Filename.FileType.DESCRIPTOR); + assertFileInfo(Filename.tempFileName(999), 999, Filename.FileType.TEMP); + assertFileInfo(Filename.infoLogFileName(), 0, Filename.FileType.INFO_LOG); + assertFileInfo(Filename.oldInfoLogFileName(), 0, Filename.FileType.INFO_LOG); + assertFileInfo(Filename.sstTableFileName(344), 344, Filename.FileType.TABLE); + } + + private void assertFileInfo(String file, long expectedNumber, Filename.FileType expectedType) + { + Filename.FileInfo fileInfo = Filename.parseFileName(EnvImpl.createEnv().toFile(file)); + assertNotNull(fileInfo); + assertEquals(fileInfo.getFileNumber(), expectedNumber); + assertEquals(fileInfo.getFileType(), expectedType); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/GIssue320Test.java b/leveldb/src/test/java/org/iq80/leveldb/impl/GIssue320Test.java new file mode 100644 index 0000000..b74a0de --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/GIssue320Test.java @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.collect.Maps; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBIterator; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.Snapshot; +import org.iq80.leveldb.WriteBatch; +import org.iq80.leveldb.WriteOptions; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.fileenv.FileUtils; +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.util.Closeables; +import org.iq80.leveldb.util.Slices; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.function.Consumer; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertNotNull; + +public class GIssue320Test +{ + private Random rand; + private DB db; + private File databaseDir; + + @BeforeMethod + public void setUp() + { + rand = new Random(0); + databaseDir = FileUtils.createTempDir("leveldbIssues"); + } + + @AfterMethod + public void tearDown() + { + if (db != null) { + Closeables.closeQuietly(db); + } + boolean b = FileUtils.deleteRecursively(databaseDir); + //assertion is specially useful in windows + assertFalse(!b && databaseDir.exists(), "Dir should be possible to delete! All files should have been released. Existing files: " + FileUtils.listFiles(databaseDir)); + } + + private byte[] newString(int index) + { + int len = 1024; + byte[] bytes = new byte[len]; + int i = 0; + while (i < 8) { + bytes[i] = (byte) ('a' + ((index >> (4 * i)) & 0xf)); + ++i; + } + while (i < bytes.length) { + bytes[i] = (byte) ('a' + rand.nextInt(26)); + ++i; + } + return bytes; + } + + @Test + public void testRaiseFromTheDead() throws IOException + { + //data that after inserted will not be deleted + final List alwaysPresentData = new ArrayList<>(); + //data that will be entirely deleted + final List dataToDelete = new ArrayList<>(); + final WriteBatchImpl batch = new WriteBatchImpl(); + + final Options rawOptions = new Options() + .errorIfExists(true) + .createIfMissing(true) + .writeBufferSize(1024 * 1024); + db = new DbImpl(rawOptions, databaseDir.getAbsolutePath(), EnvImpl.createEnv()); + Consumer writeAndClear = b -> { + db.write(b); + b.clear(); + }; + //to increase the chance to reproduce the issue, increase this number + for (int i = 0; i < 2000000; i++) { + if (rand.nextInt(50000) == 0) { + writeAndClear.accept(batch); + sortUnique(dataToDelete); + try (final Snapshot snapshot = db.getSnapshot(); final DBIterator it = db.iterator(new ReadOptions().snapshot(snapshot))) { + it.seek(new byte[] {'A'}); + for (byte[] iterableDatum : dataToDelete) { + assertEquals(iterableDatum, it.next().getKey(), + "Entry raised from the dead. Key should have been deleted forever"); + assertEquals(iterableDatum[0], 'A'); + batch.delete(iterableDatum); + if (rand.nextInt(500) == 0) { + batch.put(randomPrefixKey(alwaysPresentData, 'M'), newString('m')); + writeAndClear.accept(batch); + } + if (!alwaysPresentData.isEmpty() && rand.nextInt(10) == 0) { + final byte[] key = alwaysPresentData.get(rand.nextInt(alwaysPresentData.size())); + assertNotNull(db.get(key)); + } + } + i += dataToDelete.size(); + if (it.hasNext()) { + //all "A" prefixed keys where deleted so only non A prefixed keys should exist + assertNotEquals(it.next().getKey()[0], 'A'); + } + dataToDelete.clear(); + writeAndClear.accept(batch); + } + } + else { + if (rand.nextInt(100) == 0) { + batch.put(randomPrefixKey(alwaysPresentData, 'Z'), newString('x')); + writeAndClear.accept(batch); + } + batch.put(randomPrefixKey(dataToDelete, 'A'), new byte[0]); + } + } + } + + private byte[] randomPrefixKey(List cache, char prefix) + { + final byte[] key = new byte[197]; + key[0] = (byte) prefix; + for (int i = 1; i < key.length; i++) { + key[i] = (byte) (rand.nextInt(26) + 'a'); + } + cache.add(key); + return key; + } + + private void sortUnique(List iterableData) + { + final BytewiseComparator bytewiseComparator = new BytewiseComparator(); + iterableData.sort((o1, o2) -> bytewiseComparator.compare(Slices.wrappedBuffer(o1), Slices.wrappedBuffer(o2))); + byte[] p = null; + for (Iterator iterator = iterableData.iterator(); iterator.hasNext(); ) { + byte[] s = iterator.next(); + if (Arrays.equals(s, p)) { + iterator.remove(); + } + p = s; + } + } + + @Test + public void testIssue320() throws IOException + { + Map.Entry[] testMap = new Map.Entry[10000]; + Snapshot[] snapshots = new Snapshot[100]; + + db = new DbImpl(new Options().createIfMissing(true), databaseDir.getAbsolutePath(), EnvImpl.createEnv()); + + int targetSize = 10000; + int numItems = 0; + long count = 0; + + WriteOptions writeOptions = new WriteOptions(); + while (count++ < 200000) { + int index = rand.nextInt(testMap.length); + WriteBatch batch = new WriteBatchImpl(); + + if (testMap[index] == null) { + numItems++; + testMap[index] = + Maps.immutableEntry(newString(index), newString(index)); + batch.put(testMap[index].getKey(), testMap[index].getValue()); + } + else { + byte[] oldValue = db.get(testMap[index].getKey()); + if (!Arrays.equals(oldValue, testMap[index].getValue())) { + Assert.fail("ERROR incorrect value returned by get" + + " \ncount=" + count + + " \nold value=" + new String(oldValue) + + " \ntestMap[index].getValue()=" + new String(testMap[index].getValue()) + + " \ntestMap[index].getKey()=" + new String(testMap[index].getKey()) + + " \nindex=" + index); + } + + if (numItems >= targetSize && rand.nextInt(100) > 30) { + batch.delete(testMap[index].getKey()); + testMap[index] = null; + --numItems; + } + else { + testMap[index] = Maps.immutableEntry(testMap[index].getKey(), newString(index)); + batch.put(testMap[index].getKey(), testMap[index].getValue()); + } + } + + db.write(batch, writeOptions); + + if (rand.nextInt(10) == 0) { + final int i = rand.nextInt(snapshots.length); + if (snapshots[i] != null) { + snapshots[i].close(); + } + snapshots[i] = db.getSnapshot(); + } + } + for (Snapshot snapshot : snapshots) { + if (snapshot != null) { + snapshot.close(); + } + } + db.close(); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/InternalKeyTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/InternalKeyTest.java new file mode 100644 index 0000000..7dcf7db --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/InternalKeyTest.java @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.testng.Assert; +import org.testng.annotations.Test; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.testng.Assert.assertEquals; + +public class InternalKeyTest +{ + @Test + public void testEncodeDecode() throws Exception + { + String[] keys = {"", "k", "hello", "longggggggggggggggggggggg"}; + long[] seq = { + 1, 2, 3, + (1L << 8) - 1, 1L << 8, (1L << 8) + 1, + (1L << 16) - 1, 1L << 16, (1L << 16) + 1, + (1L << 32) - 1, 1L << 32, (1L << 32) + 1 + }; + for (String key : keys) { + for (long s : seq) { + testKey(key, s, ValueType.VALUE); + testKey("hello", 1, ValueType.DELETION); + } + } + try { + InternalKey internalKey = new InternalKey(new Slice("bar".getBytes(UTF_8))); + Assert.fail("value " + internalKey + " ot expected"); + } + catch (Exception e) { + //expected + } + } + + @Test(expectedExceptions = IllegalArgumentException.class) + public void testDecodeEmpty() + { + new InternalKey(Slices.wrappedBuffer(new byte[0])); + } + + private void testKey(String key, long seq, ValueType valueType) + { + InternalKey k = new InternalKey(Slices.wrappedBuffer(key.getBytes(UTF_8)), seq, valueType); + InternalKey decoded = new InternalKey(k.encode()); + + assertEquals(key, decoded.getUserKey().toString(UTF_8)); + assertEquals(seq, decoded.getSequenceNumber()); + assertEquals(valueType, decoded.getValueType()); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/InternalUserComparatorTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/InternalUserComparatorTest.java new file mode 100644 index 0000000..5e742ef --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/InternalUserComparatorTest.java @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.testng.annotations.Test; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.testng.Assert.assertEquals; + +public class InternalUserComparatorTest +{ + final ValueType valueTypeForSeek = ValueType.VALUE; + + @Test + public void testInternalKeyShortSeparator() + { + assertEquals(key("foo", 100, ValueType.VALUE), + shorten(key("foo", 100, ValueType.VALUE), + key("foo", 99, ValueType.VALUE))); + assertEquals(key("foo", 100, ValueType.VALUE), + shorten(key("foo", 100, ValueType.VALUE), + key("foo", 101, ValueType.VALUE))); + assertEquals(key("foo", 100, ValueType.VALUE), + shorten(key("foo", 100, ValueType.VALUE), + key("foo", 100, ValueType.VALUE))); + assertEquals(key("foo", 100, ValueType.VALUE), + shorten(key("foo", 100, ValueType.VALUE), + key("foo", 100, ValueType.DELETION))); + + // When user keys are misordered + assertEquals(key("foo", 100, ValueType.VALUE), + shorten(key("foo", 100, ValueType.VALUE), + key("bar", 99, ValueType.VALUE))); + + // When user keys are different, but correctly ordered + assertEquals(key("g", SequenceNumber.MAX_SEQUENCE_NUMBER, valueTypeForSeek), + shorten(key("foo", 100, ValueType.VALUE), + key("hello", 200, ValueType.VALUE))); + + // When start user key is prefix of limit user key + assertEquals(key("foo", 100, ValueType.VALUE), + shorten(key("foo", 100, ValueType.VALUE), + key("foobar", 200, ValueType.VALUE))); + + // When limit user key is prefix of start user key + assertEquals(key("foobar", 100, ValueType.VALUE), + shorten(key("foobar", 100, ValueType.VALUE), + key("foo", 200, ValueType.VALUE))); + } + + @Test + public void testInternalKeyShortestSuccessor() + { + assertEquals(key("g", SequenceNumber.MAX_SEQUENCE_NUMBER, valueTypeForSeek), + shortSuccessor(key("foo", 100, ValueType.VALUE))); + assertEquals(key(new byte[] {(byte) 0xff, (byte) 0xff}, 100, ValueType.VALUE), + shortSuccessor(key(new byte[] {(byte) 0xff, (byte) 0xff}, 100, ValueType.VALUE))); + } + + private InternalKey key(String foo, long sequenceNumber, ValueType value) + { + return new InternalKey(Slices.wrappedBuffer(foo.getBytes(UTF_8)), sequenceNumber, value); + } + + private InternalKey key(byte[] key, long sequenceNumber, ValueType value) + { + return new InternalKey(Slices.wrappedBuffer(key), sequenceNumber, value); + } + + private InternalKey shorten(InternalKey s, InternalKey l) + { + Slice shortestSeparator = new InternalUserComparator(new InternalKeyComparator(new BytewiseComparator())) + .findShortestSeparator(s.encode(), l.encode()); + return new InternalKey(shortestSeparator); + } + + private InternalKey shortSuccessor(InternalKey s) + { + Slice shortestSeparator = new InternalUserComparator(new InternalKeyComparator(new BytewiseComparator())) + .findShortSuccessor(s.encode()); + return new InternalKey(shortestSeparator); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/LogReaderTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/LogReaderTest.java new file mode 100644 index 0000000..b21b5be --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/LogReaderTest.java @@ -0,0 +1,736 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import org.iq80.leveldb.util.PureJavaCrc32C; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.env.WritableFile; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Random; + +import static org.iq80.leveldb.impl.LogConstants.BLOCK_SIZE; +import static org.iq80.leveldb.impl.LogConstants.HEADER_SIZE; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; + +public class LogReaderTest +{ + private StringDest dest; + private StringSource source; + private ReportCollector report; + + private boolean reading; + private LogWriter writer; + private LogReader reader; + private int[] initialOffsetRecordSizes; + private long[] initialOffsetLastRecordOffsets; + private int numInitialOffsetRecords; + + @BeforeMethod + public void setUp() + { + dest = new StringDest(); + source = new StringSource(); + report = new ReportCollector(); + + reading = false; + writer = LogWriter.createWriter(0, dest); + reader = new LogReader(source, report, true/*checksum*/, 0/*initialOffset*/); + initialOffsetRecordSizes = new int[] { + 10000, // Two sizable records in first block + 10000, + 2 * BLOCK_SIZE - 1000, // Span three blocks + 1, + 13716, // Consume all but two bytes of block 3. + BLOCK_SIZE - HEADER_SIZE, // Consume the entirety of block 4. + }; + initialOffsetLastRecordOffsets = new long[] { + 0, + HEADER_SIZE + 10000, + 2 * (HEADER_SIZE + 10000), + 2 * (HEADER_SIZE + 10000) + + (2 * BLOCK_SIZE - 1000) + 3 * HEADER_SIZE, + 2 * (HEADER_SIZE + 10000) + + (2 * BLOCK_SIZE - 1000) + 3 * HEADER_SIZE + + HEADER_SIZE + 1, + 3 * BLOCK_SIZE, + }; + numInitialOffsetRecords = initialOffsetLastRecordOffsets.length; + } + + @Test + public void testEmpty() throws Exception + { + assertEquals(read(), "EOF"); + } + + @Test + public void testReadWrite() throws Exception + { + write("foo"); + write("bar"); + write(""); + write("xxxx"); + assertEquals(read(), "foo"); + assertEquals(read(), "bar"); + assertEquals(read(), ""); + assertEquals(read(), "xxxx"); + assertEquals(read(), "EOF"); + assertEquals(read(), "EOF"); // Make sure reads at eof work + } + + @Test + public void testManyBlocks() throws Exception + { + for (int i = 0; i < 100000; i++) { + write(numberString(i)); + } + for (int i = 0; i < 100000; i++) { + assertEquals(read(), numberString(i)); + } + assertEquals(read(), "EOF"); + } + + @Test + public void testFragmentation() throws Exception + { + write("small"); + write(bigString("medium", 50000)); + write(bigString("large", 100000)); + assertEquals(read(), "small"); + assertEquals(read(), bigString("medium", 50000)); + assertEquals(read(), bigString("large", 100000)); + assertEquals(read(), "EOF"); + } + + @Test + public void testMarginalTrailer() throws Exception + { + // Make a trailer that is exactly the same length as an empty record. + int n = BLOCK_SIZE - 2 * HEADER_SIZE; + write(bigString("foo", n)); + assertEquals(writtenBytes(), BLOCK_SIZE - HEADER_SIZE); + write(""); + write("bar"); + assertEquals(read(), bigString("foo", n)); + assertEquals(read(), ""); + assertEquals(read(), "bar"); + assertEquals(read(), "EOF"); + } + + @Test + public void testMarginalTrailer2() throws Exception + { + // Make a trailer that is exactly the same length as an empty record. + int n = BLOCK_SIZE - 2 * HEADER_SIZE; + write(bigString("foo", n)); + assertEquals(writtenBytes(), BLOCK_SIZE - HEADER_SIZE); + write("bar"); + assertEquals(read(), bigString("foo", n)); + assertEquals(read(), "bar"); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 0); + assertEquals(reportMessage(), ""); + } + + @Test + public void testShortTrailer() throws Exception + { + int n = BLOCK_SIZE - 2 * HEADER_SIZE + 4; + write(bigString("foo", n)); + assertEquals(writtenBytes(), BLOCK_SIZE - HEADER_SIZE + 4); + write(""); + write("bar"); + assertEquals(read(), bigString("foo", n)); + assertEquals(read(), ""); + assertEquals(read(), "bar"); + assertEquals(read(), "EOF"); + } + + @Test + public void testAlignedEof() throws Exception + { + int n = BLOCK_SIZE - 2 * HEADER_SIZE + 4; + write(bigString("foo", n)); + assertEquals(writtenBytes(), BLOCK_SIZE - HEADER_SIZE + 4); + assertEquals(read(), bigString("foo", n)); + assertEquals(read(), "EOF"); + } + + @Test + public void testOpenForAppend() throws Exception + { + write("hello"); + reopenForAppend(); + write("world"); + assertEquals(read(), "hello"); + assertEquals(read(), "world"); + assertEquals(read(), "EOF"); + } + + @Test + public void testRandomRead() throws Exception + { + int n = 500; + Random writeRnd = new Random(301); + for (int i = 0; i < n; i++) { + write(randomSkewedString(i, writeRnd)); + } + Random readRnd = new Random(301); + for (int i = 0; i < n; i++) { + assertEquals(read(), randomSkewedString(i, readRnd)); + } + assertEquals(read(), "EOF"); + } + + // Tests of all the error paths in LogLeader follow: + @Test + public void testReadError() throws Exception + { + write("foo"); + forceError(); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), BLOCK_SIZE); + assertEquals(matchError("read error"), "OK"); + } + + @Test + public void testBadRecordType() throws Exception + { + write("foo"); + // Type is stored in header[6] + incrementByte(6, 100); + fixChecksum(0, 3); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 3); + assertEquals(matchError("unknown record type"), "OK"); + } + + @Test + public void testTruncatedTrailingRecordIsIgnored() throws Exception + { + write("foo"); + shrinkSize(4); // Drop all payload as well as a header byte + assertEquals(read(), "EOF"); + // Truncated last record is ignored, not treated as an error. + assertEquals(droppedBytes(), 0); + assertEquals(reportMessage(), ""); + } + + @Test + public void testBadLength() throws Exception + { + int kPayloadSize = BLOCK_SIZE - HEADER_SIZE; + write(bigString("bar", kPayloadSize)); + write("foo"); + // Least significant size byte is stored in header[4]. + incrementByte(4, 1); + assertEquals(read(), "foo"); + assertEquals(droppedBytes(), BLOCK_SIZE); + assertEquals(matchError("bad record length"), "OK"); + } + + @Test + public void testBadLengthAtEndIsIgnored() throws Exception + { + write("foo"); + shrinkSize(1); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 0); + assertEquals(reportMessage(), ""); + } + + @Test + public void testChecksumMismatch() throws Exception + { + write("foo"); + incrementByte(0, 10); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 10); + assertEquals(matchError("checksum mismatch"), "OK"); + } + + @Test + public void testUnexpectedMiddleType() throws Exception + { + write("foo"); + setByte(6, LogChunkType.MIDDLE.getPersistentId()); + fixChecksum(0, 3); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 3); + assertEquals(matchError("missing start"), "OK"); + } + + @Test + public void testUnexpectedLastType() throws Exception + { + write("foo"); + setByte(6, LogChunkType.LAST.getPersistentId()); + fixChecksum(0, 3); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 3); + assertEquals(matchError("missing start"), "OK"); + } + + @Test + public void testUnexpectedFullType() throws Exception + { + write("foo"); + write("bar"); + setByte(6, LogChunkType.FIRST.getPersistentId()); + fixChecksum(0, 3); + assertEquals(read(), "bar"); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 3); + assertEquals(matchError("partial record without end"), "OK"); + } + + @Test + public void testUnexpectedFirstType() throws Exception + { + write("foo"); + write(bigString("bar", 100000)); + setByte(6, LogChunkType.FIRST.getPersistentId()); + fixChecksum(0, 3); + assertEquals(read(), bigString("bar", 100000)); + assertEquals(read(), "EOF"); + assertEquals(droppedBytes(), 3); + assertEquals(matchError("partial record without end"), "OK"); + } + + @Test + public void testMissingLastIsIgnored() throws Exception + { + write(bigString("bar", BLOCK_SIZE)); + // Remove the LAST block, including header. + shrinkSize(14); + assertEquals(read(), "EOF"); + assertEquals(reportMessage(), ""); + assertEquals(droppedBytes(), 0); + } + + @Test + public void testPartialLastIsIgnored() throws Exception + { + write(bigString("bar", BLOCK_SIZE)); + // Cause a bad record length in the LAST block. + shrinkSize(1); + assertEquals(read(), "EOF"); + assertEquals(reportMessage(), ""); + assertEquals(droppedBytes(), 0); + } + + @Test + public void testSkipIntoMultiRecord() throws Exception + { + // Consider a fragmented record: + // first(R1), middle(R1), last(R1), first(R2) + // If initialOffset points to a record after first(R1) but before first(R2) + // incomplete fragment errors are not actual errors, and must be suppressed + // until a new first or full record is encountered. + write(bigString("foo", 3 * BLOCK_SIZE)); + write("correct"); + startReadingAt(BLOCK_SIZE); + + assertEquals(read(), "correct"); + assertEquals(reportMessage(), ""); + assertEquals(droppedBytes(), 0); + assertEquals(read(), "EOF"); + } + + @Test + public void testErrorJoinsRecords() throws Exception + { + // Consider two fragmented records: + // first(R1) last(R1) first(R2) last(R2) + // where the middle two fragments disappear. We do not want + // first(R1),last(R2) to get joined and returned as a valid record. + + // write records that span two blocks + write(bigString("foo", BLOCK_SIZE)); + write(bigString("bar", BLOCK_SIZE)); + write("correct"); + + // Wipe the middle block + for (int offset = BLOCK_SIZE; offset < 2 * BLOCK_SIZE; offset++) { + setByte(offset, 'x'); + } + + assertEquals(read(), "correct"); + assertEquals(read(), "EOF"); + int dropped = droppedBytes(); + assertTrue((long) dropped <= (long) (2 * BLOCK_SIZE + 100)); + assertTrue((long) dropped >= (long) (2 * BLOCK_SIZE)); + } + + @Test + public void testReadStart() throws Exception + { + checkInitialOffsetRecord(0, 0); + } + + @Test + public void testReadSecondOneOff() throws Exception + { + checkInitialOffsetRecord(1, 1); + } + + @Test + public void testReadSecondTenThousand() throws Exception + { + checkInitialOffsetRecord(10000, 1); + } + + @Test + public void testReadSecondStart() throws Exception + { + checkInitialOffsetRecord(10007, 1); + } + + @Test + public void testReadThirdOneOff() throws Exception + { + checkInitialOffsetRecord(10008, 2); + } + + @Test + public void testReadThirdStart() throws Exception + { + checkInitialOffsetRecord(20014, 2); + } + + @Test + public void testReadFourthOneOff() throws Exception + { + checkInitialOffsetRecord(20015, 3); + } + + @Test + public void testReadFourthFirstBlockTrailer() throws Exception + { + checkInitialOffsetRecord(BLOCK_SIZE - 4, 3); + } + + @Test + public void testReadFourthMiddleBlock() throws Exception + { + checkInitialOffsetRecord(BLOCK_SIZE + 1, 3); + } + + @Test + public void testReadFourthLastBlock() throws Exception + { + checkInitialOffsetRecord(2 * BLOCK_SIZE + 1, 3); + } + + @Test + public void testReadFourthStart() throws Exception + { + checkInitialOffsetRecord( + 2 * (HEADER_SIZE + 1000) + (2 * BLOCK_SIZE - 1000) + 3 * HEADER_SIZE, + 3); + } + + @Test + public void testReadInitialOffsetIntoBlockPadding() throws Exception + { + checkInitialOffsetRecord(3 * BLOCK_SIZE - 3, 5); + } + + @Test + public void testReadEnd() throws Exception + { + checkOffsetPastEndReturnsNoRecords(0); + } + + @Test + public void testReadPastEnd() throws Exception + { + checkOffsetPastEndReturnsNoRecords(5); + } + + private String read() + { + if (!reading) { + reading = true; + source.contents = new Slice(dest.contents.toByteArray()); + } + + Slice slice = reader.readRecord(); + if (slice != null) { + return new String(slice.getBytes()); + } + else { + return "EOF"; + } + } + + private static String bigString(String partialString, int n) + { + return Strings.repeat(partialString, (n / partialString.length()) + 1).substring(0, n); + } + + // Construct a string from a number + private static String numberString(int n) + { + return n + "."; + } + + // Return a skewed potentially long string + static String randomSkewedString(int i, Random rnd) + { + return bigString(numberString(i), skewed(rnd, 17)); + } + + private static int skewed(Random rnd, int i1) + { + return rnd.nextInt(Integer.MAX_VALUE) % (1 << rnd.nextInt(Integer.MAX_VALUE) % (i1 + 1)); + } + + void reopenForAppend() throws IOException + { + writer.close(); + + writer = LogWriter.createWriter(0, dest, dest.contents.size()); + } + + void incrementByte(int offset, int delta) + { + dest.contents.getBuf()[offset] += (byte) delta; + } + + void setByte(int offset, int newByte) + { + dest.contents.getBuf()[offset] = (byte) newByte; + } + + void shrinkSize(int bytes) + { + dest.contents.shrink(dest.contents.size() - bytes); + } + + void fixChecksum(int headerOffset, int len) + { + // Compute crc of type/len/data + PureJavaCrc32C jCrc = new PureJavaCrc32C(); + jCrc.update(dest.contents.getBuf(), headerOffset + 6, 1 + len); + int crc = jCrc.getMaskedValue(); + new Slice(dest.contents.getBuf()).setInt(headerOffset, crc); + } + + void forceError() + { + source.forceError = true; + } + + int droppedBytes() + { + return report.doppedBytes; + } + + String reportMessage() + { + return report.message; + } + + // Returns OK iff recorded error message contains "msg" + String matchError(String msg) + { + if (!report.message.contains(msg)) { + return report.message; + } + else { + return "OK"; + } + } + + void write(String msg) throws IOException + { + assertTrue(!reading, "write() after starting to read"); + writer.addRecord(new Slice(msg.getBytes()), false); + } + + long writtenBytes() + { + return dest.contents.size(); + } + + void writeInitialOffsetLog() throws IOException + { + for (int i = 0; i < numInitialOffsetRecords; i++) { + write(Strings.repeat(String.valueOf((char) ((byte) 'a' + i)), initialOffsetRecordSizes[i])); + } + } + + void startReadingAt(long initialOffset) + { + reader = new LogReader(source, report, true/*checksum*/, initialOffset); + } + + void checkOffsetPastEndReturnsNoRecords(long offsetPastEnd) throws IOException + { + writeInitialOffsetLog(); + reading = true; + source.contents = new Slice(dest.contents.toByteArray()); + LogReader offsetReader = new LogReader(source, report, true/*checksum*/, + writtenBytes() + offsetPastEnd); + + assertNull(offsetReader.readRecord()); + } + + void checkInitialOffsetRecord(long initialOffset, + int expectedRecordOffset) throws IOException + { + writeInitialOffsetLog(); + reading = true; + source.contents = new Slice(dest.contents.toByteArray()); + LogReader offsetReader = new LogReader(source, report, true/*checksum*/, + initialOffset); + + // read all records from expectedRecordOffset through the last one. + assertLt(expectedRecordOffset, numInitialOffsetRecords); + for (; expectedRecordOffset < numInitialOffsetRecords; + ++expectedRecordOffset) { + Slice record = offsetReader.readRecord(); + assertEquals(record.length(), initialOffsetRecordSizes[expectedRecordOffset]); + assertEquals(offsetReader.getLastRecordOffset(), initialOffsetLastRecordOffsets[expectedRecordOffset]); + assertEquals(record.getByte(0), (char) ('a' + expectedRecordOffset)); + } + } + + private void assertLt(long a, long b) + { + assertTrue(a < b, "Expect that " + a + "<" + b + " is false"); + } + + private static class StringSource + implements SequentialFile + { + Slice contents = new Slice(0); + boolean forceError = false; + boolean returnedPartial = false; + + @Override + public void skip(long n) throws IOException + { + if (n > contents.length()) { + contents = new Slice(0); + throw new IOException("in-memory file skipped past end"); + } + + contents = contents.slice((int) n, contents.length() - (int) n); + } + + @Override + public int read(int atMost, SliceOutput destination) throws IOException + { + assertTrue(!returnedPartial, "must not read() after eof/error"); + if (forceError) { + forceError = false; + returnedPartial = true; + throw new IOException("read error"); + } + int read = atMost; + int available = contents.length(); + if (available == 0) { + returnedPartial = true; + return -1; //eof + } + else if (available < read) { + read = contents.length(); + } + destination.writeBytes(contents, 0, read); + contents = contents.slice(read, contents.length() - read); + return read; + } + + @Override + public void close() throws IOException + { + } + } + + private static class StringDest + implements WritableFile + { + SettableByteArrayOutputStream contents = new SettableByteArrayOutputStream(); + + @Override + public void append(Slice data) throws IOException + { + contents.write(data.getBytes()); + } + + @Override + public void force() throws IOException + { + } + + @Override + public void close() throws IOException + { + } + } + + private static class SettableByteArrayOutputStream + extends ByteArrayOutputStream + { + //expose buffer + public byte[] getBuf() + { + return buf; + } + + //expose buffer + public void shrink(int size) + { + Preconditions.checkElementIndex(size, count); + count = size; + } + } + + private static class ReportCollector + implements LogMonitor + { + int doppedBytes; + String message = ""; + + @Override + public void corruption(long bytes, String reason) + { + doppedBytes += bytes; + message += reason; + } + + @Override + public void corruption(long bytes, Throwable reason) + { + doppedBytes += bytes; + message += reason.getMessage(); + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/LogTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/LogTest.java new file mode 100644 index 0000000..eaeb288 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/LogTest.java @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.util.Closeables; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.util.Slices; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.IOException; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Arrays.asList; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import static org.testng.FileAssert.fail; + +public class LogTest +{ + private static final LogMonitor NO_CORRUPTION_MONITOR = new LogMonitor() + { + @Override + public void corruption(long bytes, String reason) + { + fail(String.format("corruption of %s bytes: %s", bytes, reason)); + } + + @Override + public void corruption(long bytes, Throwable reason) + { + throw new RuntimeException(String.format("corruption of %s bytes: %s", bytes, reason), reason); + } + }; + + private LogWriter writer; + private File tempFile; + + @Test + public void testEmptyBlock() + throws Exception + { + testLog(); + } + + @Test + public void testSmallRecord() + throws Exception + { + testLog(toSlice("dain sundstrom")); + } + + @Test + public void testMultipleSmallRecords() + throws Exception + { + List records = asList( + toSlice("Lagunitas Little Sumpin’ Sumpin’"), + toSlice("Lagunitas IPA"), + toSlice("Lagunitas Imperial Stout"), + toSlice("Oban 14"), + toSlice("Highland Park"), + toSlice("Lagavulin")); + + testLog(records); + } + + @Test + public void testLargeRecord() + throws Exception + { + testLog(toSlice("dain sundstrom", 4000)); + } + + @Test + public void testMultipleLargeRecords() + throws Exception + { + List records = asList( + toSlice("Lagunitas Little Sumpin’ Sumpin’", 4000), + toSlice("Lagunitas IPA", 4000), + toSlice("Lagunitas Imperial Stout", 4000), + toSlice("Oban 14", 4000), + toSlice("Highland Park", 4000), + toSlice("Lagavulin", 4000)); + + testLog(records); + } + + @Test + public void testReadWithoutProperClose() + throws Exception + { + testLog(List.of(toSlice("something"), toSlice("something else")), false); + } + + private void testLog(Slice... entries) + throws IOException + { + testLog(asList(entries)); + } + + private void testLog(List records) + throws IOException + { + testLog(records, true); + } + + private void testLog(List records, boolean closeWriter) + throws IOException + { + for (Slice entry : records) { + writer.addRecord(entry, false); + } + + if (closeWriter) { + writer.close(); + } + + // test readRecord + + Env env = EnvImpl.createEnv(); + try (SequentialFile in = env.newSequentialFile(env.toFile(tempFile.getAbsolutePath()))) { + LogReader reader = new LogReader(in, NO_CORRUPTION_MONITOR, true, 0); + for (Slice expected : records) { + Slice actual = reader.readRecord(); + assertEquals(actual, expected); + } + assertNull(reader.readRecord()); + } + } + + @BeforeMethod + public void setUp() + throws Exception + { + tempFile = File.createTempFile("table", ".log"); + writer = Logs.createLogWriter(EnvImpl.createEnv().toFile(tempFile.getAbsolutePath()), 42, EnvImpl.createEnv()); + } + + @AfterMethod + public void tearDown() + { + if (writer != null) { + Closeables.closeQuietly(writer); + } + if (tempFile != null) { + tempFile.delete(); + } + } + + static Slice toSlice(String value) + { + return toSlice(value, 1); + } + + static Slice toSlice(String value, int times) + { + byte[] bytes = value.getBytes(UTF_8); + Slice slice = Slices.allocate(bytes.length * times); + SliceOutput sliceOutput = slice.output(); + for (int i = 0; i < times; i++) { + sliceOutput.writeBytes(bytes); + } + return slice; + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/LogWriterTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/LogWriterTest.java new file mode 100644 index 0000000..06923e4 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/LogWriterTest.java @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.SliceOutput; +import org.iq80.leveldb.env.WritableFile; +import org.testng.annotations.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; + +public class LogWriterTest +{ + @Test + public void testLogRecordBounds() + throws Exception + { + StringSink open = new StringSink(); + + int recordSize = LogConstants.BLOCK_SIZE - LogConstants.HEADER_SIZE; + Slice record = new Slice(recordSize); + + LogWriter writer = LogWriter.createWriter(10, open); + writer.addRecord(record, false); + writer.close(); + + LogMonitor logMonitor = new AssertNoCorruptionLogMonitor(); + + try (SequentialFile in = new SequentialBytes(new ByteArrayInputStream(open.sb.toByteArray()))) { + LogReader logReader = new LogReader(in, logMonitor, true, 0); + int count = 0; + for (Slice slice = logReader.readRecord(); slice != null; slice = logReader.readRecord()) { + assertEquals(slice.length(), recordSize); + count++; + } + assertEquals(count, 1); + } + } + + private static class StringSink implements WritableFile + { + private ByteArrayOutputStream sb = new ByteArrayOutputStream(); + + byte[] content; + + @Override + public void append(Slice data) throws IOException + { + sb.write(data.getBytes()); + } + + @Override + public void force() + { + content = sb.toByteArray(); + } + + @Override + public void close() throws IOException + { + content = sb.toByteArray(); + sb.close(); + } + } + + private static class SequentialBytes implements SequentialFile + { + private ByteArrayInputStream in; + + public SequentialBytes(ByteArrayInputStream in) + { + this.in = in; + } + + @Override + public void skip(long n) + { + assertEquals(in.skip(n), n); + } + + @Override + public int read(int atMost, SliceOutput destination) throws IOException + { + return destination.writeBytes(in, atMost); + } + + @Override + public void close() throws IOException + { + in.close(); + } + } + + private static class AssertNoCorruptionLogMonitor + implements LogMonitor + { + @Override + public void corruption(long bytes, String reason) + { + fail("corruption at " + bytes + " reason: " + reason); + } + + @Override + public void corruption(long bytes, Throwable reason) + { + fail("corruption at " + bytes + " reason: " + reason); + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/MemTableTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/MemTableTest.java new file mode 100644 index 0000000..7d87afb --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/MemTableTest.java @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.collect.Lists; +import org.iq80.leveldb.iterator.DBIteratorAdapter; +import org.iq80.leveldb.iterator.DbIterator; +import org.iq80.leveldb.iterator.MemTableIterator; +import org.iq80.leveldb.iterator.MergingIterator; +import org.iq80.leveldb.iterator.SeekingDBIteratorAdapter; +import org.iq80.leveldb.iterator.SeekingIterator; +import org.iq80.leveldb.iterator.SeekingIterators; +import org.iq80.leveldb.iterator.SnapshotSeekingIterator; +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.util.Slice; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.iq80.leveldb.util.TestUtils.asciiToBytes; +import static org.iq80.leveldb.util.TestUtils.asciiToSlice; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class MemTableTest +{ + /** + * skipFirst + iter to last ok + */ + @Test + public void testTestSimple() throws Exception + { + final MemTableIterator iter = getMemTableIterator(new InternalKeyComparator(new BytewiseComparator())); + assertTrue(iter.seekToFirst()); + assertEntry(iter, "k1", "v1", 101); + assertTrue(iter.next()); + assertEntry(iter, "k1", "v1p", 100); + assertTrue(iter.next()); + assertEntry(iter, "k2", "v2", 102); + assertTrue(iter.next()); + assertEntry(iter, "k3", "v3", 103); + assertTrue(iter.next()); + assertEntry(iter, "largekey", "vlarge", 104); + assertFalse(iter.next()); + } + + @Test + public void testMemIterator() + { + BytewiseComparator userComparator = new BytewiseComparator(); + InternalKeyComparator cmp = new InternalKeyComparator(userComparator); + final MemTableIterator iter = getMemTableIterator(cmp); + test(iter); + } + + @Test + public void testDbIterator() + { + BytewiseComparator userComparator = new BytewiseComparator(); + InternalKeyComparator cmp = new InternalKeyComparator(userComparator); + final MemTableIterator iter = getMemTableIterator(cmp); + MergingIterator mIter = new MergingIterator(Lists.newArrayList(iter), cmp); + testUserKey(new SnapshotSeekingIterator(new DbIterator(mIter, () -> { + }), Integer.MAX_VALUE, userComparator, (internalKey, bytes) -> { + })); + } + + @Test + public void testMergingIterator() + { + BytewiseComparator userComparator = new BytewiseComparator(); + InternalKeyComparator cmp = new InternalKeyComparator(userComparator); + final MemTableIterator iter = getMemTableIterator(cmp); + MergingIterator mIter = new MergingIterator(Lists.newArrayList(iter), cmp); + test(mIter); + } + + @Test + public void testSeekingIteratorAdapter() + { + BytewiseComparator userComparator = new BytewiseComparator(); + InternalKeyComparator cmp = new InternalKeyComparator(userComparator); + final MemTableIterator iter = getMemTableIterator(cmp); + MergingIterator mIter = new MergingIterator(Lists.newArrayList(iter), cmp); + DBIteratorAdapter adapter = new DBIteratorAdapter(new SnapshotSeekingIterator(new DbIterator(mIter, () -> { + }), Integer.MAX_VALUE, userComparator, (internalKey, bytes) -> { + })); + SeekingIterator sliceSliceSeekingIterator = SeekingDBIteratorAdapter.toSeekingIterator(adapter, Slice::getBytes, Slice::new, Slice::new); + testUserKey(sliceSliceSeekingIterator); + } + + @Test + public void testCollectionIterator() + { + BytewiseComparator userComparator = new BytewiseComparator(); + InternalKeyComparator cmp = new InternalKeyComparator(userComparator); + final MemTableIterator iter = getMemTableIterator(cmp); + final List> objects = new ArrayList<>(); + while (iter.next()) { + objects.add(new InternalEntry(iter.key(), iter.value())); + } + SeekingIterator internalKeySliceSeekingIterator = SeekingIterators.fromSortedList(objects, Map.Entry::getKey, Map.Entry::getValue, cmp); + test(internalKeySliceSeekingIterator); + } + + private void test(SeekingIterator iter) + { + assertTrue(iter.next()); + assertEntry(iter, "k1", "v1", 101); + assertTrue(iter.seekToFirst()); + assertEntry(iter, "k1", "v1", 101); + iter.seekToLast(); + assertEntry(iter, "largekey", "vlarge", 104); + assertTrue(iter.prev()); + assertTrue(iter.valid()); + assertEntry(iter, "k3", "v3", 103); + assertTrue(iter.seek(new InternalKey(asciiToSlice("k2"), 102, ValueType.VALUE))); + assertEntry(iter, "k2", "v2", 102); + iter.seekToFirst(); + assertTrue(iter.seek(new InternalKey(asciiToSlice("k2"), 102, ValueType.VALUE))); + assertEntry(iter, "k2", "v2", 102); + assertTrue(iter.prev()); + assertEntry(iter, "k1", "v1p", 100); + assertTrue(iter.next()); + assertEntry(iter, "k2", "v2", 102); + assertTrue(iter.seek(new InternalKey(asciiToSlice("k1"), 100, ValueType.VALUE))); + assertEntry(iter, "k1", "v1p", 100); + assertTrue(iter.seek(new InternalKey(asciiToSlice("largekey"), 190, ValueType.VALUE))); + assertEntry(iter, "largekey", "vlarge", 104); + assertFalse(iter.seek(new InternalKey(asciiToSlice("largekey"), 100, ValueType.VALUE))); + assertFalse(iter.valid()); + + } + + private void testUserKey(SeekingIterator iter) + { + assertTrue(iter.next()); + assertEntry(iter, "k1", "v1"); + assertTrue(iter.seekToFirst()); + assertEntry(iter, "k1", "v1"); + iter.seekToLast(); + assertEntry(iter, "largekey", "vlarge"); + assertTrue(iter.prev()); + assertTrue(iter.valid()); + assertEntry(iter, "k3", "v3"); + assertTrue(iter.seek(asciiToSlice("k2"))); + assertEntry(iter, "k2", "v2"); + iter.seekToFirst(); + assertTrue(iter.seek(asciiToSlice("k2"))); + assertEntry(iter, "k2", "v2"); + assertTrue(iter.prev()); + assertEntry(iter, "k1", "v1"); + assertTrue(iter.next()); + assertEntry(iter, "k2", "v2"); + assertTrue(iter.seek(asciiToSlice("k1"))); + assertEntry(iter, "k1", "v1"); + assertTrue(iter.seek(asciiToSlice("largekey"))); + assertEntry(iter, "largekey", "vlarge"); + assertFalse(iter.seek(asciiToSlice("largekez"))); + assertFalse(iter.valid()); + } + + private MemTableIterator getMemTableIterator(InternalKeyComparator cmp) + { + final MemTable memtable = new MemTable(cmp); + WriteBatchImpl batch = new WriteBatchImpl(); + batch.put(asciiToBytes("k1"), asciiToBytes("v1p")); + batch.put(asciiToBytes("k1"), asciiToBytes("v1")); + batch.put(asciiToBytes("k2"), asciiToBytes("v2")); + batch.put(asciiToBytes("k3"), asciiToBytes("v3")); + batch.put(asciiToBytes("largekey"), asciiToBytes("vlarge")); + batch.forEach(new InsertIntoHandler(memtable, 100)); + return memtable.iterator(); + } + + private void assertEntry(SeekingIterator iter, String key, String value, int sequenceNumber) + { + assertEquals(new InternalKey(asciiToSlice(key), sequenceNumber, ValueType.VALUE), iter.key()); + assertEquals(asciiToSlice(value), iter.value()); + } + + private void assertEntry(SeekingIterator iter, String key, String value) + { + assertEquals(asciiToSlice(key), iter.key()); + assertEquals(asciiToSlice(value), iter.value()); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/NativeInteropTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/NativeInteropTest.java new file mode 100644 index 0000000..84ce149 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/NativeInteropTest.java @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.DBFactory; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.WriteOptions; +import org.iq80.leveldb.fileenv.FileUtils; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.iq80.leveldb.impl.Iq80DBFactory.asString; +import static org.iq80.leveldb.impl.Iq80DBFactory.bytes; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; + +/** + * @author Hiram Chirino + */ +public class NativeInteropTest +{ + private static final AtomicInteger NEXT_ID = new AtomicInteger(); + + private final File databaseDir = FileUtils.createTempDir("leveldb"); + + public static void assertEquals(byte[] arg1, byte[] arg2) + { + assertTrue(Arrays.equals(arg1, arg2), asString(arg1) + " != " + asString(arg2)); + } + + private final DBFactory iq80factory = Iq80DBFactory.factory; + private final DBFactory jnifactory; + + public NativeInteropTest() + { + DBFactory jnifactory = Iq80DBFactory.factory; + try { + ClassLoader cl = NativeInteropTest.class.getClassLoader(); + jnifactory = (DBFactory) cl.loadClass("org.fusesource.leveldbjni.JniDBFactory").newInstance(); + } + catch (Throwable e) { + // We cannot create a JniDBFactory on windows :( so just use a Iq80DBFactory for both + // to avoid test failures. + } + this.jnifactory = jnifactory; + } + + File getTestDirectory(String name) + throws IOException + { + File rc = new File(databaseDir, name); + iq80factory.destroy(rc, new Options().compressionType(CompressionType.SNAPPY).createIfMissing(true)); + rc.mkdirs(); + return rc; + } + + @Test + public void testCRUDviaIQ80() + throws IOException, DBException + { + crud(iq80factory, iq80factory); + } + + @Test + public void testCRUDviaJNI() + throws IOException, DBException + { + crud(jnifactory, jnifactory); + } + + @Test + public void testCRUDviaIQ80thenJNI() + throws IOException, DBException + { + crud(iq80factory, jnifactory); + } + + @Test + public void testCRUDviaJNIthenIQ80() + throws IOException, DBException + { + crud(jnifactory, iq80factory); + } + + public void crud(DBFactory firstFactory, DBFactory secondFactory) + throws IOException, DBException + { + Options options = new Options().compressionType(CompressionType.SNAPPY).createIfMissing(true); + + File path = getTestDirectory(getClass().getName() + "_" + NEXT_ID.incrementAndGet()); + DB db = firstFactory.open(path, options); + + WriteOptions wo = new WriteOptions().sync(false); + ReadOptions ro = new ReadOptions().fillCache(true).verifyChecksums(true); + db.put(bytes("Tampa"), bytes("green")); + db.put(bytes("London"), bytes("red")); + db.put(bytes("New York"), bytes("blue")); + + db.close(); + db = secondFactory.open(path, options); + + assertEquals(db.get(bytes("Tampa"), ro), bytes("green")); + assertEquals(db.get(bytes("London"), ro), bytes("red")); + assertEquals(db.get(bytes("New York"), ro), bytes("blue")); + + db.delete(bytes("New York"), wo); + + assertEquals(db.get(bytes("Tampa"), ro), bytes("green")); + assertEquals(db.get(bytes("London"), ro), bytes("red")); + assertNull(db.get(bytes("New York"), ro)); + + db.close(); + db = firstFactory.open(path, options); + + assertEquals(db.get(bytes("Tampa"), ro), bytes("green")); + assertEquals(db.get(bytes("London"), ro), bytes("red")); + assertNull(db.get(bytes("New York"), ro)); + + db.close(); + } + + @AfterMethod + public void tearDown() throws Exception + { + FileUtils.deleteRecursively(databaseDir); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/OutOfSpaceTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/OutOfSpaceTest.java new file mode 100644 index 0000000..22f13cf --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/OutOfSpaceTest.java @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.base.Throwables; +import org.iq80.leveldb.Logger; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.memenv.MemEnv; +import org.iq80.leveldb.util.Slice; +import org.testng.Assert; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.io.IOException; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertSame; +import static org.testng.Assert.assertTrue; + +public class OutOfSpaceTest +{ + public static final String OUT_OF_SPACE = "Out of space"; + + private interface Disk + { + void write(int contentSize) throws IOException; + } + + @DataProvider(name = "diskSize") + private Object[][] diskSizeProvider() + { + return new Object[][] { + new Object[] {2 << 8}, + new Object[] {2 << 10}, + new Object[] {2 << 12}, + new Object[] {2 << 16}, + new Object[] {2 << 24}, + }; + } + + /** + * Write concurrently to db and fail at some point like an out of memory. + * Verify that all theads fail not for invalid state but due to Out of memory exception. + * It is expected that DB should not be usable after a write failure (in write request or compaction). + */ + @Test(invocationCount = 20, dataProvider = "diskSize") + public void testAllWritesShouldFailAfterFirstWriteFailure(int diskSpace) throws Exception + { + AtomicInteger disk = new AtomicInteger(0); + Disk check = contentSize -> { + if (disk.addAndGet(contentSize) > diskSpace) { + throw new IOException(OUT_OF_SPACE); + } + }; + final Options rawOptions = new Options(); + + final DbImpl db = new DbImpl(rawOptions, "leveldb", new MyEnv(check)); + //simulate concurrent work with random batch side, this will stimulate multi write grouping into one batch + //inside the DB + final int threads = 4; + final ExecutorService executorService = Executors.newFixedThreadPool(threads); + try { + Future[] fut = new Future[threads]; + for (int i = 0; i < threads; i++) { + //at some point all thread should fail due to out of space exception + fut[i] = executorService.submit(() -> { + final Random rand = new Random(Thread.currentThread().getId()); + try { + while (true) { + writeRandomBatch(db, rand); + } + } + catch (Exception e) { + return e; + } + }); + } + //wait for all thread + //all threads should fail because of continuous write. + for (Future exceptionFuture : fut) { + final Exception exception = exceptionFuture.get(1, TimeUnit.MINUTES); + final Throwable rootCause = Throwables.getRootCause(exception); + assertNotNull(rootCause, "Route cause is expected in thrown exception" + exception); + exception.printStackTrace(); + assertTrue(rootCause.getMessage().equals("Out of space"), "Out of space exception is expected as route cause of failure in " + exception); + } + + //DB should be failed with background failure, so any new write should fail with background exception cause + //last check to verify that if we try to write additional records to DB we get same route cause twice + final Assert.ThrowingRunnable shouldFail = () -> { + try (WriteBatchImpl updates = new WriteBatchImpl()) { + updates.put(new byte[] {1, 2, 3, 5}, new byte[] {45, 5, 6, 7}); + db.write(updates); + Assert.fail("expected to fail"); + } + }; + Throwable t1 = Throwables.getRootCause(Assert.expectThrows(Exception.class, shouldFail)); + Throwable t2 = Throwables.getRootCause(Assert.expectThrows(Exception.class, shouldFail)); + assertSame(t1, t2); + assertNotNull(t1, "Route cause is expected in thrown exception" + t1); + assertTrue(t1.getMessage().equals(OUT_OF_SPACE), "Out of space exception is expected as route cause of failure in " + t1); + } + finally { + executorService.shutdown(); + } + } + + private void writeRandomBatch(DbImpl db, Random rand) + { + try (WriteBatchImpl updates = new WriteBatchImpl()) { + final int batchSize = rand.nextInt(10) + 1; + for (int j = 0; j < batchSize; j++) { + final int keySize = rand.nextInt(300) + 15; + final int valueSize = rand.nextInt(1000) + 10; + final byte[] kByte = new byte[keySize]; + final byte[] vByte = new byte[valueSize]; + rand.nextBytes(kByte); + rand.nextBytes(vByte); + if (rand.nextInt(20) % 20 == 0) { + updates.delete(kByte); + } + else { + updates.put(kByte, vByte); + } + } + db.write(updates); + } + } + + private static class WrapperWritableFile implements WritableFile + { + private final WritableFile writableFile; + private final Disk check; + + public WrapperWritableFile(WritableFile writableFile, Disk check) + { + this.writableFile = writableFile; + this.check = check; + } + + @Override + public void append(Slice data) throws IOException + { + check.write(data.length()); + writableFile.append(data); + } + + @Override + public void force() throws IOException + { + check.write(100); //simulate some write + writableFile.force(); + } + + @Override + public void close() throws IOException + { + check.write(100); //simulate some write + writableFile.close(); + } + } + + private static class MyEnv implements Env + { + final Env env; + private final Disk check; + + public MyEnv(Disk check) + { + this.check = check; + this.env = MemEnv.createEnv(); + } + + @Override + public long nowMicros() + { + return env.nowMicros(); + } + + @Override + public File toFile(String filename) + { + return env.toFile(filename); + } + + @Override + public File createTempDir(String prefix) + { + return env.createTempDir(prefix); + } + + @Override + public SequentialFile newSequentialFile(File file) throws IOException + { + return env.newSequentialFile(file); + } + + @Override + public RandomInputFile newRandomAccessFile(File file) throws IOException + { + return env.newRandomAccessFile(file); + } + + @Override + public WritableFile newWritableFile(File file) throws IOException + { + final WritableFile writableFile = env.newWritableFile(file); + return new WrapperWritableFile(writableFile, check); + } + + @Override + public WritableFile newAppendableFile(File file) throws IOException + { + return new WrapperWritableFile(env.newAppendableFile(file), check); + } + + @Override + public void writeStringToFileSync(File file, String content) throws IOException + { + check.write(content.length()); + env.writeStringToFileSync(file, content); + } + + @Override + public String readFileToString(File file) throws IOException + { + return env.readFileToString(file); + } + + @Override + public Logger newLogger(File loggerFile) throws IOException + { + return env.newLogger(loggerFile); + } + + @Override + public DbLock tryLock(File lockFile) throws IOException + { + return env.tryLock(lockFile); + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/RecoveryTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/RecoveryTest.java new file mode 100644 index 0000000..d54bd99 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/RecoveryTest.java @@ -0,0 +1,340 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import com.google.common.io.CharStreams; +import com.google.common.io.Files; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.Snapshot; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.fileenv.MmapLimiter; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.env.WritableFile; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertTrue; + +public class RecoveryTest +{ + private Env env; + private File dbname; + private DbImpl db; + + @BeforeMethod + public void setUp() throws Exception + { + env = EnvImpl.createEnv(MmapLimiter.newLimiter(0)); + dbname = env.createTempDir("leveldb").child("recovery_test"); + assertTrue(DbImpl.destroyDB(dbname, env), "unable to close/delete previous db correctly"); + open(null); + } + + @AfterMethod + public void tearDown() throws Exception + { + close(); + DbImpl.destroyDB(dbname, env); + dbname.getParentFile().deleteRecursively(); + } + + void close() + { + if (db != null) { + db.close(); + } + db = null; + } + + void open(Options options) throws IOException + { + close(); + Options opts = new Options(); + if (options != null) { + opts = options; + } + else { + opts.reuseLogs(true); + opts.createIfMissing(true); + } + db = new DbImpl(opts, dbname.getPath(), env); + assertEquals(numLogs(), 1); + } + + void put(String k, String v) + { + db.put(k.getBytes(StandardCharsets.UTF_8), v.getBytes(StandardCharsets.UTF_8)); + } + + String get(String k) + { + return get(k, null); + } + + String get(String k, Snapshot snapshot) + { + byte[] bytes = db.get(k.getBytes(StandardCharsets.UTF_8)); + return bytes == null ? "NOT_FOUND" : new String(bytes); + } + + File manifestFileName() throws IOException + { + try (BufferedReader in = Files.newReader(new java.io.File(dbname.child(Filename.currentFileName()).getPath()), StandardCharsets.UTF_8)) { + String current = CharStreams.toString(in); + int len = current.length(); + if (len > 0 && current.charAt(len - 1) == '\n') { + current = current.substring(0, len - 1); + } + return dbname.child(current); + } + } + + File logName(long number) + { + return dbname.child(Filename.logFileName(number)); + } + + long deleteLogFiles() + { + List logs = getFiles(Filename.FileType.LOG); + for (Long log : logs) { + boolean delete = dbname.child(Filename.logFileName(log)).delete(); + assertTrue(delete); + } + return logs.size(); + } + + long firstLogFile() + { + return getFiles(Filename.FileType.LOG).get(0); + } + + List getFiles(Filename.FileType t) + { + ArrayList result = new ArrayList<>(); + for (File file : dbname.listFiles()) { + Filename.FileInfo fileInfo = Filename.parseFileName(file); + if (fileInfo != null && t == fileInfo.getFileType()) { + result.add(fileInfo.getFileNumber()); + } + } + return result; + } + + int numLogs() + { + return getFiles(Filename.FileType.LOG).size(); + } + + int numTables() + { + return getFiles(Filename.FileType.TABLE).size(); + } + + long fileSize(File fname) + { + return fname.length(); + } + + void compactMemTable() + { + db.testCompactMemTable(); + } + + // Directly construct a log file that sets key to val. + void makeLogFile(long lognum, long seq, Slice key, Slice val) throws IOException + { + org.iq80.leveldb.env.File fname = dbname.child(Filename.logFileName(lognum)); + try (LogWriter writer = Logs.createLogWriter(fname, lognum, env)) { + WriteBatchImpl batch = new WriteBatchImpl(); + batch.put(key, val); + writer.addRecord(DbImpl.writeWriteBatch(batch, seq), true); + } + } + + @Test + public void testManifestReused() throws Exception + { + put("foo", "bar"); + close(); + File oldManifest = manifestFileName(); + open(null); + assertEquals(manifestFileName(), oldManifest); + assertEquals(get("foo"), "bar"); + open(null); + assertEquals(manifestFileName(), oldManifest); + assertEquals(get("foo"), "bar"); + } + + @Test + public void testLargeManifestCompacted() throws Exception + { + put("foo", "bar"); + close(); + File oldManifest = manifestFileName(); + + // Pad with zeroes to make manifest file very big. + + long len = fileSize(oldManifest); + try (WritableFile file = env.newAppendableFile(oldManifest)) { + file.append(Slices.allocate(3 * 1048576 - ((int) len))); + file.force(); //flush + } + + open(null); + File newManifest = manifestFileName(); + assertNotEquals(newManifest, oldManifest); + assertTrue(10000L > fileSize(newManifest)); + assertEquals(get("foo"), "bar"); + + open(null); + assertEquals(manifestFileName(), newManifest); + assertEquals(get("foo"), "bar"); + } + + @Test + public void testNoLogFiles() throws Exception + { + put("foo", "bar"); + close(); //file are locked in windows, can't delete them fi do not close db. + assertEquals(deleteLogFiles(), 1); + open(null); + assertEquals(get("foo"), "NOT_FOUND"); + open(null); + assertEquals(get("foo"), "NOT_FOUND"); + } + + @Test + public void testLogFileReuse() throws Exception + { + for (int i = 0; i < 2; i++) { + put("foo", "bar"); + if (i == 0) { + // Compact to ensure current log is empty + compactMemTable(); + } + close(); + assertEquals(numLogs(), 1); + long number = firstLogFile(); + if (i == 0) { + assertEquals(fileSize(logName(number)), 0); + } + else { + assertTrue((long) 0 < fileSize(logName(number))); + } + open(null); + assertEquals(numLogs(), 1); + assertEquals(firstLogFile(), number, "did not reuse log file"); + assertEquals(get("foo"), "bar"); + open(null); + assertEquals(numLogs(), 1); + assertEquals(firstLogFile(), number, "did not reuse log file"); + assertEquals(get("foo"), "bar"); + } + } + + @Test + public void testMultipleMemTables() throws Exception + { + // Make a large log. + int kNum = 1000; + for (int i = 0; i < kNum; i++) { + String format = String.format("%050d", i); + put(format, format); + } + assertEquals(numTables(), 0); + close(); + assertEquals(numTables(), 0); + assertEquals(numLogs(), 1); + long oldLogFile = firstLogFile(); + + // Force creation of multiple memtables by reducing the write buffer size. + Options opt = new Options(); + opt.reuseLogs(true); + opt.writeBufferSize((kNum * 100) / 2); + open(opt); + assertTrue((long) 2 <= (long) numTables()); + assertEquals(numLogs(), 1); + assertNotEquals(firstLogFile(), oldLogFile, "must not reuse log"); + for (int i = 0; i < kNum; i++) { + String format = String.format("%050d", i); + assertEquals(get(format), format); + } + } + + @Test + public void testMultipleLogFiles() throws Exception + { + put("foo", "bar"); + close(); + assertEquals(numLogs(), 1); + + // Make a bunch of uncompacted log files. + long oldLog = firstLogFile(); + makeLogFile(oldLog + 1, 1000, toSlice("hello"), toSlice("world")); + makeLogFile(oldLog + 2, 1001, toSlice("hi"), toSlice("there")); + makeLogFile(oldLog + 3, 1002, toSlice("foo"), toSlice("bar2")); + + // Recover and check that all log files were processed. + open(null); + assertTrue((long) 1 <= (long) numTables()); + assertEquals(numLogs(), 1); + long newLog = firstLogFile(); + assertTrue(oldLog + 3 <= newLog); + assertEquals(get("foo"), "bar2"); + assertEquals(get("hello"), "world"); + assertEquals(get("hi"), "there"); + + // Test that previous recovery produced recoverable state. + open(null); + assertTrue((long) 1 <= (long) numTables()); + assertEquals(numLogs(), 1); + assertEquals(firstLogFile(), newLog); + assertEquals(get("foo"), "bar2"); + assertEquals(get("hello"), "world"); + assertEquals(get("hi"), "there"); + + // Check that introducing an older log file does not cause it to be re-read. + close(); + makeLogFile(oldLog + 1, 2000, toSlice("hello"), toSlice("stale write")); + open(null); + assertTrue((long) 1 <= (long) numTables()); + assertEquals(numLogs(), 1); + assertEquals(firstLogFile(), newLog); + assertEquals(get("foo"), "bar2"); + assertEquals(get("hello"), "world"); + assertEquals(get("hi"), "there"); + } + + private Slice toSlice(String hello) + { + return Slices.copiedBuffer(hello, StandardCharsets.UTF_8); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/VersionEditTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/VersionEditTest.java new file mode 100644 index 0000000..8cb579c --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/VersionEditTest.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; +import org.testng.annotations.Test; + +import static org.iq80.leveldb.util.TestUtils.asciiToSlice; +import static org.testng.Assert.assertEquals; + +public class VersionEditTest +{ + @Test + public void testEncodeDecode() throws Exception + { + long kBig = 1L << 50; + + VersionEdit edit = new VersionEdit(); + for (int i = 0; i < 4; i++) { + testEncodeDecode(edit); + edit.addFile(3, kBig + 300 + i, kBig + 400 + i, + new InternalKey(asciiToSlice("foo"), kBig + 500 + i, ValueType.VALUE), + new InternalKey(asciiToSlice("zoo"), kBig + 600 + i, ValueType.DELETION)); + edit.deleteFile(4, kBig + 700 + i); + edit.setCompactPointer(i, new InternalKey(asciiToSlice("x"), kBig + 900 + i, ValueType.VALUE)); + } + + edit.setComparatorName("foo"); + edit.setLogNumber(kBig + 100); + edit.setNextFileNumber(kBig + 200); + edit.setLastSequenceNumber(kBig + 1000); + testEncodeDecode(edit); + } + + void testEncodeDecode(VersionEdit edit) + { + Slice encoded = edit.encode(); + VersionEdit parsed = new VersionEdit(encoded); + Slice encoded2 = parsed.encode(); + assertEquals(encoded, encoded2); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/VersionSetTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/VersionSetTest.java new file mode 100644 index 0000000..1e4b071 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/VersionSetTest.java @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.Options; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.TestUtils; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class VersionSetTest +{ + private List files = new ArrayList<>(); + + @BeforeMethod + public void setUp() throws Exception + { + files.clear(); + } + + void add(String smallest, String largest, + long smallestSeq, + long largestSeq) + { + files.add(new FileMetaData(files.size() + 1, 0, new InternalKey(TestUtils.asciiToSlice(smallest), smallestSeq, ValueType.VALUE), new InternalKey(TestUtils.asciiToSlice(largest), largestSeq, ValueType.VALUE))); + } + + int find(String key) + { + InternalKey target = new InternalKey(TestUtils.asciiToSlice(key), 100, ValueType.VALUE); + return newLevel().findFile(target); + } + + private Level newLevel() + { + InternalKeyComparator internalKeyComparator = new InternalKeyComparator(new BytewiseComparator()); + return new Level(1, files, new TableCache(EnvImpl.createEnv().toFile("xxxxxxxxxxx"), 0, new BytewiseComparator(), new Options(), EnvImpl.createEnv()), internalKeyComparator); + } + + boolean overlaps(String smallest, String largest) + { + Slice s = smallest != null ? TestUtils.asciiToSlice(smallest) : null; + Slice l = largest != null ? TestUtils.asciiToSlice(largest) : null; + return newLevel().someFileOverlapsRange(true, s, l); + } + + @Test + public void testEmpty() throws Exception + { + assertEquals(find("foo"), 0); + assertFalse(overlaps("z", "a")); + assertFalse(overlaps("z", null)); + assertFalse(overlaps("a", null)); + assertFalse(overlaps(null, null)); + } + + @Test + public void testSingle() throws Exception + { + add("p", "q", 100, 100); + assertEquals(find("a"), 0); + assertEquals(find("p"), 0); + assertEquals(find("pl"), 0); + assertEquals(find("q"), 0); + assertEquals(find("ql"), 1); + assertEquals(find("z"), 1); + + assertTrue(!overlaps("a", "b")); + assertTrue(!overlaps("z1", "z2")); + assertTrue(overlaps("a", "p")); + assertTrue(overlaps("a", "q")); + assertTrue(overlaps("a", "z")); + assertTrue(overlaps("p", "p1")); + assertTrue(overlaps("p", "q")); + assertTrue(overlaps("p", "z")); + assertTrue(overlaps("p1", "p2")); + assertTrue(overlaps("p1", "z")); + assertTrue(overlaps("q", "q")); + assertTrue(overlaps("q", "q1")); + + assertTrue(!overlaps(null, "j")); + assertTrue(!overlaps("r", null)); + assertTrue(overlaps(null, "p")); + assertTrue(overlaps(null, "p1")); + assertTrue(overlaps("q", null)); + assertTrue(overlaps(null, null)); + } + + @Test + public void testMultiple() throws Exception + { + add("150", "200", 100, 100); + add("200", "250", 100, 100); + add("300", "350", 100, 100); + add("400", "450", 100, 100); + assertEquals(0, find("100")); + assertEquals(0, find("150")); + assertEquals(0, find("151")); + assertEquals(0, find("199")); + assertEquals(0, find("200")); + assertEquals(1, find("201")); + assertEquals(1, find("249")); + assertEquals(1, find("250")); + assertEquals(2, find("251")); + assertEquals(2, find("299")); + assertEquals(2, find("300")); + assertEquals(2, find("349")); + assertEquals(2, find("350")); + assertEquals(3, find("351")); + assertEquals(3, find("400")); + assertEquals(3, find("450")); + assertEquals(4, find("451")); + + assertTrue(!overlaps("100", "149")); + assertTrue(!overlaps("251", "299")); + assertTrue(!overlaps("451", "500")); + assertTrue(!overlaps("351", "399")); + + assertTrue(overlaps("100", "150")); + assertTrue(overlaps("100", "200")); + assertTrue(overlaps("100", "300")); + assertTrue(overlaps("100", "400")); + assertTrue(overlaps("100", "500")); + assertTrue(overlaps("375", "400")); + assertTrue(overlaps("450", "450")); + assertTrue(overlaps("450", "500")); + } + + @Test + public void testMultipleNullBoundaries() throws Exception + { + add("150", "200", 100, 100); + add("200", "250", 100, 100); + add("300", "350", 100, 100); + add("400", "450", 100, 100); + assertTrue(!overlaps(null, "149")); + assertTrue(!overlaps("451", null)); + assertTrue(overlaps(null, null)); + assertTrue(overlaps(null, "150")); + assertTrue(overlaps(null, "199")); + assertTrue(overlaps(null, "200")); + assertTrue(overlaps(null, "201")); + assertTrue(overlaps(null, "400")); + assertTrue(overlaps(null, "800")); + assertTrue(overlaps("100", null)); + assertTrue(overlaps("200", null)); + assertTrue(overlaps("449", null)); + assertTrue(overlaps("450", null)); + } + + @Test + public void testOverlapSequenceChecks() throws Exception + { + add("200", "200", 5000, 3000); + assertTrue(!overlaps("199", "199")); + assertTrue(!overlaps("201", "300")); + assertTrue(overlaps("200", "200")); + assertTrue(overlaps("190", "200")); + assertTrue(overlaps("200", "210")); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/impl/WriteBatchImplTest.java b/leveldb/src/test/java/org/iq80/leveldb/impl/WriteBatchImplTest.java new file mode 100644 index 0000000..9d88c9d --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/impl/WriteBatchImplTest.java @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.impl; + +import org.iq80.leveldb.util.Slice; +import org.testng.annotations.Test; + +import static org.testng.Assert.assertTrue; + +public class WriteBatchImplTest +{ + @Test + public void testApproximateSize() + { + WriteBatchImpl batch = new WriteBatchImpl(); + int emptySize = batch.getApproximateSize(); + + batch.put(slice("foo"), slice("bar")); + int oneKeySize = batch.getApproximateSize(); + assertTrue(emptySize < oneKeySize); + + batch.put(slice("baz"), slice("boo")); + int twoKeysSize = batch.getApproximateSize(); + assertTrue(oneKeySize < twoKeysSize); + + batch.delete(slice("box")); + int postDeleteSize = batch.getApproximateSize(); + assertTrue(twoKeysSize < postDeleteSize); + } + + private static Slice slice(String txt) + { + return new Slice(txt.getBytes()); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/iterator/IteratorTestUtils.java b/leveldb/src/test/java/org/iq80/leveldb/iterator/IteratorTestUtils.java new file mode 100644 index 0000000..8d2d021 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/iterator/IteratorTestUtils.java @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.impl.ValueType; +import org.iq80.leveldb.util.Slice; +import org.testng.Assert; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.Map; + +public final class IteratorTestUtils +{ + private IteratorTestUtils() + { + //util + } + + public static InternalIterator asInternalIterator(SeekingIterator iterator) + { + return new InternalForwardingIterator(iterator); + } + + public static SliceIterator asSliceIterator(SeekingIterator iterator) + { + return new SliceForwardingIterator(iterator); + } + + public static InternalKey key(String userK, int sequence, ValueType valueType) + { + return new InternalKey(new Slice(userK.getBytes()), sequence, valueType); + } + + public static void assertValidKV(SeekingIterator mergingIterator, K internalKey, V value) + { + Assert.assertTrue(mergingIterator.valid()); + Assert.assertEquals(mergingIterator.key(), internalKey); + Assert.assertEquals(mergingIterator.value(), value); + } + + public static void assertInvalid(boolean op, SeekingIterator it) + { + Assert.assertFalse(op); + Assert.assertFalse(it.valid()); + Assert.assertThrows(it::key); + Assert.assertThrows(it::value); + } + + public static String toString(SeekingIterator it) + { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("["); + while (it.valid()) { + if (stringBuilder.length() > 1) { + stringBuilder.append(", "); + } + stringBuilder.append(it.key()).append("=").append(it.value()); + it.next(); + } + return stringBuilder.append("]").toString(); + } + + public static Map.Entry entry(SeekingIterator it) + { + return new AbstractMap.SimpleEntry<>(it.key(), it.value()); + } + + private static class InternalForwardingIterator extends ForwardingIterator + implements InternalIterator + { + private final SeekingIterator iterator; + + InternalForwardingIterator(SeekingIterator iterator) + { + this.iterator = iterator; + } + + @Override + protected SeekingIterator delegate() + { + return iterator; + } + } + + private static class SliceForwardingIterator extends ForwardingIterator + implements SliceIterator + { + private final SeekingIterator iterator; + + SliceForwardingIterator(SeekingIterator iterator) + { + this.iterator = iterator; + } + + @Override + protected SeekingIterator delegate() + { + return iterator; + } + } + + public abstract static class ForwardingIterator implements SeekingIterator + { + protected abstract SeekingIterator delegate(); + + @Override + public boolean valid() + { + return delegate().valid(); + } + + @Override + public boolean seekToFirst() + { + return delegate().seekToFirst(); + } + + @Override + public boolean seekToLast() + { + return delegate().seekToLast(); + } + + @Override + public boolean seek(K key) + { + return delegate().seek(key); + } + + @Override + public boolean next() + { + return delegate().next(); + } + + @Override + public boolean prev() + { + return delegate().prev(); + } + + @Override + public K key() + { + return delegate().key(); + } + + @Override + public V value() + { + return delegate().value(); + } + + @Override + public void close() throws IOException + { + delegate().close(); + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/iterator/MemTableIteratorTest.java b/leveldb/src/test/java/org/iq80/leveldb/iterator/MemTableIteratorTest.java new file mode 100644 index 0000000..dab524f --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/iterator/MemTableIteratorTest.java @@ -0,0 +1,257 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.impl.InternalKeyComparator; +import org.iq80.leveldb.impl.ValueType; +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.util.Slice; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentSkipListMap; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class MemTableIteratorTest +{ + List> sampleEntries1 = Lists.newArrayList(entry("k1", 102, ValueType.VALUE, "v2"), entry("k1", 100, ValueType.VALUE, "v1"), entry("k2", 100, ValueType.VALUE, "v1"), entry("k3", 300, ValueType.VALUE, "v1"), entry("k4", 100, ValueType.VALUE, "v1") + ); + + protected InternalIterator iterFactory(List> data) + { + return new MemTableIterator(getMapOf(data)); + } + + @Test + public void testSeekAfterLast() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.seek(entry("k4", 90, ValueType.VALUE, "").getKey())); + assertTrue(iter.seek(entry("k4", 200, ValueType.VALUE, "").getKey())); + assertEntryEquals(iter, sampleEntries1.get(4)); + assertTrue(iter.seek(entry("k1", 101, ValueType.VALUE, "").getKey())); + assertEntryEquals(iter, sampleEntries1.get(1)); + assertTrue(iter.seek(entry("k1", 100, ValueType.VALUE, "").getKey())); + assertEntryEquals(iter, sampleEntries1.get(1)); + assertTrue(iter.seek(entry("k1", 102, ValueType.VALUE, "").getKey())); + assertEntryEquals(iter, sampleEntries1.get(0)); + } + + @Test + public void testForwardScanAfterSeek() throws Exception + { + int count = 0; + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + Iterator> iterator = sampleEntries1.iterator(); + Iterators.advance(iterator, 2); + for (boolean valid = iter.seek(new InternalKey(sampleEntries1.get(2).getKey().encode())); valid; valid = iter.next()) { + assertEntryEquals(iter, iterator.next()); + count++; + } + assertFalse(iter.next()); + assertEndAndClose(iter); + assertEquals(count, sampleEntries1.size() - 2); + } + + @Test + public void testForwardScanAfterSeekFirst() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + validateAll(iter, iter.seekToFirst(), sampleEntries1); + validateAll(iter, iter.seekToFirst(), sampleEntries1); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + + @Test + public void testForwardScan() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + validateAll(iter, iter.next(), sampleEntries1); + validateAll(iter, iter.seekToFirst(), sampleEntries1); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + + @Test + public void testReverseScan() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + + @Test + public void testReverseScanAfterSeekLast() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + validateAllReverse(iter, iter.seekToLast(), sampleEntries1); + validateAllReverse(iter, iter.seekToLast(), sampleEntries1); + assertFalse(iter.prev()); + assertTrue(iter.next()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + + @Test + public void testReverseScanAfterSeekFirst() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + assertTrue(iter.seekToFirst()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + + @Test + public void testForwardScanAfterSeekLast() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + assertTrue(iter.seekToLast()); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + + @Test + public void testEmpty() throws Exception + { + ConcurrentSkipListMap table = getMapOf(Collections.emptyList()); + { + MemTableIterator iter = new MemTableIterator(table); + assertFalse(iter.valid()); + assertFalse(iter.seekToLast()); + assertFalse(iter.next()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + { + MemTableIterator iter = new MemTableIterator(table); + assertFalse(iter.valid()); + assertFalse(iter.seekToLast()); + assertFalse(iter.prev()); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + { + MemTableIterator iter = new MemTableIterator(table); + assertFalse(iter.next()); + assertFalse(iter.valid()); + assertFalse(iter.prev()); + assertFalse(iter.seekToLast()); + assertFalse(iter.seekToFirst()); + assertEndAndClose(iter); + } + } + + @Test + public void testReverseAfterLastNext() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + validateAll(iter, iter.next(), sampleEntries1); + validateAllReverse(iter, iter.prev(), sampleEntries1); + assertEndAndClose(iter); + } + + @Test + public void testNextAfterLastPrev() throws Exception + { + InternalIterator iter = iterFactory(sampleEntries1); + validateAllReverse(iter, iter.seekToLast(), sampleEntries1); + validateAll(iter, iter.next(), sampleEntries1); + assertEndAndClose(iter); + } + + private void validateAll(InternalIterator iter, boolean valid1, List> data) + { + int count = 0; + Iterator> iterator = data.iterator(); + for (boolean valid = valid1; valid; valid = iter.next()) { + assertEntryEquals(iter, iterator.next()); + count++; + } + assertEquals(count, data.size()); + } + + private void validateAllReverse(InternalIterator iter, boolean valid1, List> data) + { + int count = 0; + Iterator> iterator = Lists.reverse(data).iterator(); + for (boolean valid = valid1; valid; valid = iter.prev()) { + assertEntryEquals(iter, iterator.next()); + count++; + } + assertEquals(count, data.size()); + } + + private void assertEndAndClose(InternalIterator iter) throws IOException + { + assertFalse(iter.valid()); + Assert.assertThrows(NoSuchElementException.class, iter::key); + Assert.assertThrows(NoSuchElementException.class, iter::value); + iter.close(); + //after close, call should not succeed + Assert.assertThrows(iter::next); + Assert.assertThrows(iter::prev); + Assert.assertThrows(iter::seekToFirst); + Assert.assertThrows(iter::seekToLast); + Assert.assertThrows(() -> iter.seek(new InternalKey(new Slice("k1".getBytes()), 100, ValueType.DELETION))); + } + + protected final ConcurrentSkipListMap getMapOf(List> entries) + { + ConcurrentSkipListMap table = new ConcurrentSkipListMap<>(new InternalKeyComparator(new BytewiseComparator())); + ArrayList> entries1 = Lists.newArrayList(entries); + Collections.shuffle(entries1); + entries1.forEach(e -> table.put(e.getKey(), e.getValue())); + return table; + } + + private void assertEntryEquals(SeekingIterator iter, Map.Entry next) + { + assertTrue(iter.valid()); + assertEquals(iter.key(), next.getKey()); + assertEquals(iter.key(), next.getKey()); + assertEquals(iter.value(), next.getValue()); + assertEquals(iter.value(), next.getValue()); + } + + private Map.Entry entry(String k, int sec, ValueType vt, String v) + { + return new AbstractMap.SimpleEntry<>(new InternalKey(new Slice(k.getBytes()), sec, vt), new Slice(v.getBytes())); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/iterator/MergingIteratorTest.java b/leveldb/src/test/java/org/iq80/leveldb/iterator/MergingIteratorTest.java new file mode 100644 index 0000000..6646883 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/iterator/MergingIteratorTest.java @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.impl.InternalKeyComparator; +import org.iq80.leveldb.impl.ValueType; +import org.iq80.leveldb.table.BytewiseComparator; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.iq80.leveldb.iterator.IteratorTestUtils.assertValidKV; +import static org.iq80.leveldb.iterator.IteratorTestUtils.key; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class MergingIteratorTest +{ + @Test + public void testCantReuseAfterClose() + { + InternalKeyComparator comparator = new InternalKeyComparator(new BytewiseComparator()); + InternalIterator b = toIter(comparator, Arrays.asList(Maps.immutableEntry(key("B", 4, ValueType.VALUE), Slices.EMPTY_SLICE))); + MergingIterator mergingIterator1 = new MergingIterator(Arrays.asList(b), comparator); + mergingIterator1.close(); + Assert.assertThrows(mergingIterator1::close); + Assert.assertThrows(b::close); + } + + @Test + public void testRandomizeGroups() + { + InternalKeyComparator comparator = new InternalKeyComparator(new BytewiseComparator()); + List internalKeys = Arrays.asList( + key("A", 1, ValueType.DELETION), + key("B", 4, ValueType.VALUE), + key("B", 3, ValueType.VALUE), + key("B", 2, ValueType.VALUE), + key("C", 5, ValueType.VALUE), + key("E", 6, ValueType.VALUE), + key("H", 7, ValueType.DELETION), + key("I", 8, ValueType.VALUE), + key("K", 9, ValueType.VALUE), + key("L", 11, ValueType.DELETION), + key("L", 10, ValueType.DELETION), + key("M", 12, ValueType.DELETION), + key("O", 13, ValueType.VALUE) + ); + List values = IntStream.range(0, internalKeys.size()).mapToObj(i -> new Slice(String.valueOf(i).getBytes())).collect(Collectors.toList()); + + for (int i = 0; i < 100; i++) { + for (Supplier> randomGroup : randomGroups(internalKeys, values, comparator)) { + try (MergingIterator mergingIterator = new MergingIterator(randomGroup.get(), comparator)) { + //can iterate without calling seek first + int idx = 0; + while (mergingIterator.next()) { + assertValidKV(mergingIterator, internalKeys.get(idx), values.get(idx)); + idx++; + } + assertEquals(idx, internalKeys.size()); + + //ensure we can restart safely + assertTrue(mergingIterator.seekToFirst()); + assertValidKV(mergingIterator, internalKeys.get(0), values.get(0)); + + //seeking unknown key after last + assertFalse(mergingIterator.seek(key("Z", 2, ValueType.VALUE))); + assertFalse(mergingIterator.valid()); + + //seeking + + //can use iterator in revers order after reaching the end + idx = internalKeys.size(); + for (boolean ok = mergingIterator.seekToLast(); ok; ok = mergingIterator.prev()) { + idx--; + assertValidKV(mergingIterator, internalKeys.get(idx), values.get(idx)); + } + assertEquals(idx, 0); + } + //can iterate with seekFirst + try (MergingIterator mergingIterator = new MergingIterator(randomGroup.get(), comparator)) { + //can iterate without calling seek first + int idx = 0; + for (boolean ok = mergingIterator.seekToFirst(); ok; ok = mergingIterator.next()) { + assertValidKV(mergingIterator, internalKeys.get(idx), values.get(idx)); + idx++; + } + assertFalse(mergingIterator.valid()); + } + try (MergingIterator mergingIterator = new MergingIterator(randomGroup.get(), comparator)) { + //start invalid + assertFalse(mergingIterator.valid()); + + //find key with distinct sequence + assertTrue(mergingIterator.seek(key("B", 200, ValueType.VALUE))); + assertValidKV(mergingIterator, internalKeys.get(1), values.get(1)); + + assertTrue(mergingIterator.prev()); + assertValidKV(mergingIterator, internalKeys.get(0), values.get(0)); + assertTrue(mergingIterator.next()); + assertValidKV(mergingIterator, internalKeys.get(1), values.get(1)); + mergingIterator.prev(); + mergingIterator.prev(); + mergingIterator.next(); + assertValidKV(mergingIterator, internalKeys.get(0), values.get(0)); + + assertTrue(mergingIterator.seek(key("B", 2, ValueType.VALUE))); + assertValidKV(mergingIterator, internalKeys.get(3), values.get(3)); + assertTrue(mergingIterator.seek(key("B", 3, ValueType.VALUE))); + assertValidKV(mergingIterator, internalKeys.get(2), values.get(2)); + assertTrue(mergingIterator.seek(key("B", 4, ValueType.VALUE))); + assertValidKV(mergingIterator, internalKeys.get(1), values.get(1)); + + //find key that don't exist due to previous sequence + assertTrue(mergingIterator.seek(key("B", 1, ValueType.VALUE))); + assertValidKV(mergingIterator, internalKeys.get(4), values.get(4)); + + } + try (MergingIterator mergingIterator = new MergingIterator(randomGroup.get(), comparator)) { + assertFalse(mergingIterator.seek(key("Z", 2, ValueType.VALUE))); + assertFalse(mergingIterator.valid()); + } + } + } + } + + /* + Create some random groups of keys to ensure merge is ordering correctly the output + */ + private List>> randomGroups(List internalKeys, List values, Comparator comparator) + { + List>> multipleSets = new ArrayList<>(); + Random random = new Random(); + for (int groups = 1; groups < internalKeys.size(); groups++) { + Iterator iterator = new ArrayList<>(internalKeys).iterator(); + Iterator valIter = values.iterator(); + List>> splitGroups = new ArrayList<>(); + for (int i = 0; i < groups; i++) { + splitGroups.add(Lists.newArrayList(Maps.immutableEntry(iterator.next(), valIter.next()))); + } + while (iterator.hasNext()) { + int i = random.nextInt(splitGroups.size()); + splitGroups.get(i).add(Maps.immutableEntry(iterator.next(), valIter.next())); + } + multipleSets.add(() -> splitGroups.stream().map(e -> toIter(comparator, e)).collect(Collectors.toList())); + } + Collections.shuffle(multipleSets); + return multipleSets; + } + + private InternalIterator toIter(Comparator comparator, List> of1) + { + return IteratorTestUtils.asInternalIterator(SeekingIterators.fromSortedList(of1, Map.Entry::getKey, Map.Entry::getValue, comparator)); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/iterator/SeekingDBIteratorAdapter.java b/leveldb/src/test/java/org/iq80/leveldb/iterator/SeekingDBIteratorAdapter.java new file mode 100644 index 0000000..22a9207 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/iterator/SeekingDBIteratorAdapter.java @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import org.iq80.leveldb.DBIterator; + +import java.util.Map; +import java.util.function.Function; + +public class SeekingDBIteratorAdapter extends ASeekingIterator +{ + private final DBIterator iterator; + private final Function toKey; + private final Function key; + private final Function value; + private Map.Entry entry; + + private SeekingDBIteratorAdapter(DBIterator iterator, Function toKey, Function key, Function value) + { + this.iterator = iterator; + this.toKey = toKey; + this.key = key; + this.value = value; + } + + public static SeekingIterator toSeekingIterator(DBIterator iterator, Function toKey, Function key, Function value) + { + return new SeekingDBIteratorAdapter<>(iterator, toKey, key, value); + } + + @Override + protected boolean internalSeekToFirst() + { + iterator.seekToFirst(); + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected boolean internalSeekToLast() + { + iterator.seekToLast(); + entry = iterator.hasPrev() ? iterator.prev() : null; + return entry != null; + } + + @Override + protected boolean internalSeek(K key) + { + iterator.seek(this.toKey.apply(key)); + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected boolean internalNext(boolean switchDirection) + { + entry = iterator.hasNext() ? iterator.next() : null; + return entry != null; + } + + @Override + protected boolean internalPrev(boolean switchDirection) + { + entry = iterator.hasPrev() ? iterator.prev() : null; + return entry != null; + } + + @Override + protected K internalKey() + { + return key.apply(entry.getKey()); + } + + @Override + protected V internalValue() + { + return value.apply(entry.getValue()); + } + + @Override + protected void internalClose() + { + entry = null; + iterator.close(); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/iterator/SortedCollectionIteratorTest.java b/leveldb/src/test/java/org/iq80/leveldb/iterator/SortedCollectionIteratorTest.java new file mode 100644 index 0000000..ef95574 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/iterator/SortedCollectionIteratorTest.java @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.iterator; + +import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class SortedCollectionIteratorTest +{ + List> sampleEntries1 = Lists.newArrayList( + entry("k1", "v1"), entry("k2", "v2"), entry("k3", "v3"), entry("k3", "v3"), entry("k4", "v4"), entry("k5", "v5"), entry("k7", "v5") + ); + + protected SeekingIterator iterFactory(List> data) + { + return new SortedCollectionIterator<>(data, Map.Entry::getKey, Map.Entry::getValue, String::compareTo); + } + + @Test + public void testSeekAfterLast() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.seek("k8")); + assertTrue(iter.seek("k5")); + assertEntryEquals(iter, sampleEntries1.get(5)); + assertTrue(iter.seek("k1")); + assertEntryEquals(iter, sampleEntries1.get(0)); + assertTrue(iter.seek("k6")); + assertEntryEquals(iter, sampleEntries1.get(6)); + assertFalse(iter.next()); + } + + @Test + public void testForwardScanAfterSeek() throws Exception + { + int count = 0; + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + Iterator> iterator = sampleEntries1.iterator(); + Iterators.advance(iterator, 2); + for (boolean valid = iter.seek(sampleEntries1.get(2).getKey()); valid; valid = iter.next()) { + assertEntryEquals(iter, iterator.next()); + count++; + } + assertFalse(iter.next()); + assertEndAndClose(iter); + assertEquals(count, sampleEntries1.size() - 2); + } + + @Test + public void testForwardScanAfterSeekFirst() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + validateAll(iter, iter.seekToFirst(), sampleEntries1); + validateAll(iter, iter.seekToFirst(), sampleEntries1); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + + @Test + public void testForwardScan() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + validateAll(iter, iter.next(), sampleEntries1); + validateAll(iter, iter.seekToFirst(), sampleEntries1); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + + @Test + public void testReverseScan() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + + @Test + public void testReverseScanAfterSeekLast() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + validateAllReverse(iter, iter.seekToLast(), sampleEntries1); + validateAllReverse(iter, iter.seekToLast(), sampleEntries1); + assertFalse(iter.prev()); + assertTrue(iter.next()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + + @Test + public void testReverseScanAfterSeekFirst() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + assertTrue(iter.seekToFirst()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + + @Test + public void testForwardScanAfterSeekLast() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + assertFalse(iter.valid()); + assertTrue(iter.seekToLast()); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + + @Test + public void testEmpty() throws Exception + { + { + SeekingIterator iter = iterFactory(Collections.emptyList()); + assertFalse(iter.valid()); + assertFalse(iter.seekToLast()); + assertFalse(iter.next()); + assertFalse(iter.prev()); + assertEndAndClose(iter); + } + { + SeekingIterator iter = iterFactory(Collections.emptyList()); + assertFalse(iter.valid()); + assertFalse(iter.seekToLast()); + assertFalse(iter.prev()); + assertFalse(iter.next()); + assertEndAndClose(iter); + } + { + SeekingIterator iter = iterFactory(Collections.emptyList()); + assertFalse(iter.next()); + assertFalse(iter.valid()); + assertFalse(iter.prev()); + assertFalse(iter.seekToLast()); + assertFalse(iter.seekToFirst()); + assertEndAndClose(iter); + } + } + + @Test + public void testReverseAfterLastNext() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + validateAll(iter, iter.next(), sampleEntries1); + validateAllReverse(iter, iter.prev(), sampleEntries1); + assertEndAndClose(iter); + } + + @Test + public void testNextAfterLastPrev() throws Exception + { + SeekingIterator iter = iterFactory(sampleEntries1); + validateAllReverse(iter, iter.seekToLast(), sampleEntries1); + validateAll(iter, iter.next(), sampleEntries1); + assertEndAndClose(iter); + } + + private void validateAll(SeekingIterator iter, boolean valid1, List> data) + { + int count = 0; + Iterator> iterator = data.iterator(); + for (boolean valid = valid1; valid; valid = iter.next()) { + assertEntryEquals(iter, iterator.next()); + count++; + } + assertEquals(count, data.size()); + } + + private void validateAllReverse(SeekingIterator iter, boolean valid1, List> data) + { + int count = 0; + Iterator> iterator = Lists.reverse(data).iterator(); + for (boolean valid = valid1; valid; valid = iter.prev()) { + assertEntryEquals(iter, iterator.next()); + count++; + } + assertEquals(count, data.size()); + } + + private void assertEndAndClose(SeekingIterator iter) throws IOException + { + assertFalse(iter.valid()); + Assert.assertThrows(NoSuchElementException.class, iter::key); + Assert.assertThrows(NoSuchElementException.class, iter::value); + iter.close(); + //after close, call should not succeed + Assert.assertThrows(iter::next); + Assert.assertThrows(iter::prev); + Assert.assertThrows(iter::seekToFirst); + Assert.assertThrows(iter::seekToLast); + Assert.assertThrows(() -> iter.seek("k2")); + } + + private void assertEntryEquals(SeekingIterator iter, Map.Entry next) + { + assertTrue(iter.valid()); + assertEquals(iter.key(), next.getKey()); + assertEquals(iter.key(), next.getKey()); + assertEquals(iter.value(), next.getValue()); + assertEquals(iter.value(), next.getValue()); + } + + private Map.Entry entry(String k, String v) + { + return new AbstractMap.SimpleEntry<>(k, v); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/memenv/MemEnvTest.java b/leveldb/src/test/java/org/iq80/leveldb/memenv/MemEnvTest.java new file mode 100644 index 0000000..f0b2896 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/memenv/MemEnvTest.java @@ -0,0 +1,396 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import com.google.common.collect.Lists; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.env.DbLock; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.env.SequentialFile; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.impl.DbImpl; +import org.iq80.leveldb.iterator.DBIteratorAdapter; +import org.iq80.leveldb.util.DynamicSliceOutput; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; +import static org.testng.internal.junit.ArrayAsserts.assertArrayEquals; + +public class MemEnvTest +{ + @Test + public void testNewFineName() + { + Env env = MemEnv.createEnv(); + testUnexisting(env); + } + + private void testUnexisting(Env env) + { + File file = env.toFile("/roo/doNotExist"); + assertEquals(file.getName(), "doNotExist"); + assertEquals(file.getPath(), "/roo/doNotExist"); + assertFalse(file.exists()); + assertFalse(file.isDirectory()); + assertFalse(file.isFile()); + assertEquals(file.length(), 0); + } + + @Test + public void testCantBeFileAndDir() + { + File file = MemEnv.createEnv().toFile("/a/b"); + assertFalse(file.isFile()); + assertFalse(file.isDirectory()); + assertFalse(file.exists()); + assertEquals(file.getName(), "b"); + assertEquals(file.getPath(), "/a/b"); + assertTrue(file.mkdirs()); + assertTrue(file.isDirectory()); + assertFalse(file.isFile()); + assertTrue(file.exists()); + } + + @Test + public void testCantCreateFileIfDirDoesNotExist() + { + Env env = MemEnv.createEnv(); + File abc = env.createTempDir("abc"); + File child = abc.child("a").child("b"); + Assert.assertThrows(() -> env.writeStringToFileSync(child, "newContent")); + } + + @Test + public void testTreeIsolated() throws IOException + { + Env env = MemEnv.createEnv(); + File abc = env.toFile("/dir"); + assertTrue(abc.mkdirs()); + File child = abc.child("a").child("b"); + File child1 = abc.child("c").child("b"); + child.mkdirs(); + child1.mkdirs(); + //write content on both trees + // /dir/a/b/c + // /dir/c/c/c + env.writeStringToFileSync(child.child("c"), "content2"); + env.writeStringToFileSync(child1.child("c"), "content1"); + assertEquals(abc.listFiles().size(), 2); + assertEquals(abc.child("a").listFiles().size(), 1); + assertEquals(abc.child("c").listFiles().size(), 1); + assertEquals(abc.child("y").listFiles().size(), 0); + assertEquals(abc.child("a").listFiles().get(0), env.toFile("/dir/a/b")); + // write content on two distinct sub tree + assertEquals(env.readFileToString(env.toFile("/dir/a/b/c")), "content2"); + assertEquals(env.readFileToString(env.toFile("/dir/c/b/c")), "content1"); + + //delete one of the trees + env.toFile("/dir/a").deleteRecursively(); + assertEquals(abc.listFiles().size(), 1); + + // unable to read delete file content + Assert.assertThrows(IOException.class, () -> env.readFileToString(env.toFile("/dir/a/b/c"))); + + //can still access non deleted file content + assertEquals(env.readFileToString(env.toFile("/dir/c/b/c")), "content1"); + assertEquals(abc.child("a").child("b").child("c").getParentFile(), abc.child("a").child("b")); + } + + @Test + public void testTempDir() throws IOException + { + Env env = MemEnv.createEnv(); + File file = env.createTempDir("prefixme"); + assertTrue(file.isDirectory()); + assertFalse(file.isFile()); + assertTrue(file.exists()); + assertEquals(file.listFiles().size(), 0); + + File current = file.child("current"); + assertEquals(current.getName(), "current"); + assertFalse(current.exists()); + env.writeStringToFileSync(current, "newContent"); + assertTrue(current.exists()); + assertTrue(current.isFile()); + + List files = file.listFiles(); + assertEquals(files.size(), 1); + assertEquals(files.get(0), current); + + testUnexisting(env); + } + + @Test + public void testBasic() throws IOException + { + Env env = MemEnv.createEnv(); + File file = env.toFile("/dir"); + assertTrue(file.mkdirs()); + + // Check that the directory is empty. + File nonExistingFile = env.toFile("/dir/non_existent"); + assertFalse(nonExistingFile.exists()); + assertEquals(nonExistingFile.length(), 0L); + assertEquals(file.listFiles().size(), 0); + + // Create a file. + assertFalse(env.toFile("/dir/f").exists()); + WritableFile f = env.newWritableFile(file.child("f")); + assertNotNull(f); + assertEquals(env.toFile("/dir/f").length(), 0); + f.close(); + + // Check that the file exists. + File file1 = env.toFile("/dir/f"); + assertTrue(file1.exists()); + assertEquals(file1.length(), 0L); + List files = env.toFile("/dir").listFiles(); + assertEquals(files.size(), 1); + assertEquals(files.get(0), file1); + + // Write to the file. + WritableFile writableFile = env.newWritableFile(file1); + writableFile.append(slice("abc")); + writableFile.close(); + + // Check that append works. + WritableFile writableFile1 = env.newAppendableFile(env.toFile("/dir/f")); + assertEquals(env.toFile("/dir/f").length(), 3); + writableFile1.append(slice("hello")); + writableFile1.close(); + + // Check for expected size. + assertEquals(env.toFile("/dir/f").length(), 8); + + // Check that renaming works. + assertFalse(nonExistingFile.renameTo(env.toFile("/dir/g"))); + assertTrue(env.toFile("/dir/f").renameTo(env.toFile("/dir/g"))); + assertFalse(env.toFile("/dir/f").exists()); + assertTrue(env.toFile("/dir/g").exists()); + assertEquals(env.toFile("/dir/g").length(), 8); + assertEquals(env.readFileToString(env.toFile("/dir/g")), "abchello"); + + // Check that opening non-existent file fails. + Assert.assertThrows(() -> env.newSequentialFile(nonExistingFile)); + Assert.assertThrows(() -> env.newRandomAccessFile(nonExistingFile)); + Assert.assertThrows(() -> env.readFileToString(nonExistingFile)); + + // Check that deleting works. + assertFalse(nonExistingFile.delete()); + assertFalse(nonExistingFile.deleteRecursively()); + assertTrue(env.toFile("/dir/g").delete()); + assertFalse(env.toFile("/dir/g").exists()); + assertEquals(env.toFile("/dir").listFiles().size(), 0); + } + + @Test + public void testMkdirs() + { + Env env = MemEnv.createEnv(); + File dir = env.toFile("/dir"); + assertFalse(dir.isDirectory()); + assertTrue(dir.mkdirs()); + assertTrue(dir.isDirectory()); + } + + @Test + public void testReadWrite() throws IOException + { + Env env = MemEnv.createEnv(); + File dir = env.toFile("/dir"); + assertTrue(dir.mkdirs()); + File f = dir.child("f"); + WritableFile writableFile = env.newWritableFile(f); + writableFile.append(slice("hello ")); + writableFile.append(slice("world")); + writableFile.close(); + + // Read sequentially. + SequentialFile sequentialFile = env.newSequentialFile(f); + assertEquals(readSeq(sequentialFile, 5), slice("hello")); + sequentialFile.skip(1); + assertEquals(readSeq(sequentialFile, 1000), slice("world")); + assertEquals(readSeq(sequentialFile, 1000), slice("")); + sequentialFile.skip(100); + assertEquals(readSeq(sequentialFile, 1000), slice("")); // Try to skip past end of file. + sequentialFile.close(); + + // Random reads. + RandomInputFile randomInputFile = env.newRandomAccessFile(env.toFile("/dir/f")); + assertEquals(slice(randomInputFile.read(6, 5)), slice("world")); + assertEquals(slice(randomInputFile.read(0, 5)), slice("hello")); + assertEquals(slice(randomInputFile.read(10, 100)), slice("d")); + + // Too high offset. + Assert.assertThrows(() -> randomInputFile.read(1000, 5)); + randomInputFile.close(); + } + + private Slice readSeq(SequentialFile sequentialFile, int atMost) throws IOException + { + DynamicSliceOutput dynamicSliceOutput = new DynamicSliceOutput(atMost * 2); + int read = sequentialFile.read(atMost, dynamicSliceOutput); + Slice slice = dynamicSliceOutput.slice(); + if (read != -1) { + assertEquals(read, slice.length()); + } + else { + assertEquals(slice.length(), 0); + } + return slice; + } + + @Test(expectedExceptions = IOException.class) + public void testDbLockOnInvalidPath() throws IOException + { + Env env = MemEnv.createEnv(); + env.tryLock(env.toFile("/dir/a")); + } + + @Test + public void testDbLock() throws IOException + { + Env env = MemEnv.createEnv(); + File lockFile = env.toFile("/dir/a"); + assertTrue(lockFile.getParentFile().mkdirs()); + DbLock dbLock = env.tryLock(lockFile); + assertTrue(dbLock.isValid()); + dbLock.release(); + assertFalse(dbLock.isValid()); + } + + @Test + public void testMisc() throws IOException + { + Env env = MemEnv.createEnv(); + File test = env.createTempDir("test"); + assertFalse(test.getName().isEmpty()); + WritableFile writableFile = env.newWritableFile(test.child("b")); + // These are no-ops, but we test they return success. + // TODO writableFile.sync(); + writableFile.force(); + writableFile.close(); + } + + @Test + public void testLargeFiles() throws IOException + { + int writeSize = 300 * 1024; + DynamicSliceOutput dynamicSliceOutput = new DynamicSliceOutput(2 * writeSize); + for (int i = 0; i < writeSize; i++) { + dynamicSliceOutput.writeByte((byte) i); + } + Slice data = dynamicSliceOutput.slice(); + Env env = MemEnv.createEnv(); + env.toFile("/dir").mkdirs(); + WritableFile writableFile = env.newWritableFile(env.toFile("/dir/f")); + writableFile.append(slice("foo")); + writableFile.append(dynamicSliceOutput.slice()); + writableFile.close(); + + SequentialFile sequentialFile = env.newSequentialFile(env.toFile("/dir/f")); + assertEquals(readSeq(sequentialFile, 3), slice("foo")); + int read = 0; + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + while (read < writeSize) { + Slice slice = readSeq(sequentialFile, 3); + read += slice.length(); + byteArrayOutputStream.write(slice.getBytes()); + } + byte[] bytes = byteArrayOutputStream.toByteArray(); + assertEquals(Slices.wrappedBuffer(bytes), data); + sequentialFile.close(); + } + + @Test + public void testOverwriteOpenFile() throws IOException + { + Env env = MemEnv.createEnv(); + String writeData = "Write #1 data"; + File prefix = env.createTempDir("prefix"); + File child = prefix.child("leveldb-TestFile.dat"); + env.writeStringToFileSync(child, writeData); + RandomInputFile randomInputFile = env.newRandomAccessFile(child); + String write2Data = "Write #2 data"; + env.writeStringToFileSync(child, write2Data); + + // Verify that overwriting an open file will result in the new file data + // being read from files opened before the write. + assertEquals(slice(randomInputFile.read(0, writeData.length())), slice(write2Data)); + randomInputFile.close(); + } + + @Test + public void testDbTest() throws IOException + { + Options options1 = new Options(); + options1.createIfMissing(true); + Env env = MemEnv.createEnv(); + + List keys = Lists.newArrayList(slice("aaa"), slice("bbb"), slice("ccc")); + List values = Lists.newArrayList(slice("foo"), slice("bar"), slice("baz")); + + try (DbImpl db = new DbImpl(options1, "/dir/db", env)) { + for (int i = 0; i < keys.size(); i++) { + db.put(keys.get(i).getBytes(), values.get(i).getBytes()); + } + for (int i = 0; i < keys.size(); i++) { + assertEquals(Slices.wrappedBuffer(db.get(keys.get(i).getBytes())), values.get(i)); + } + try (DBIteratorAdapter iterator = db.iterator()) { + for (int i = 0; i < keys.size(); i++) { + assertTrue(iterator.hasNext()); + DBIteratorAdapter.DbEntry next = iterator.next(); + assertArrayEquals(next.getKey(), keys.get(i).getBytes()); + assertArrayEquals(next.getValue(), values.get(i).getBytes()); + } + assertFalse(iterator.hasNext()); + } + db.compactRange(null, null); + for (int i = 0; i < keys.size(); i++) { + assertEquals(Slices.wrappedBuffer(db.get(keys.get(i).getBytes())), values.get(i)); + } + } + } + + private Slice slice(ByteBuffer read) + { + byte[] dst = new byte[read.remaining()]; + read.get(dst); + return Slices.wrappedBuffer(dst); + } + + private static Slice slice(String value) + { + return Slices.copiedBuffer(value, UTF_8); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/memenv/MemFileTest.java b/leveldb/src/test/java/org/iq80/leveldb/memenv/MemFileTest.java new file mode 100644 index 0000000..2986b44 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/memenv/MemFileTest.java @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.util.Collections; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotEquals; + +public class MemFileTest +{ + @Test + public void testEquals() + { + MemFs fs = new MemFs(); + MemFile ab = MemFile.createMemFile(fs, "/a/b"); + MemFile abParent = ab.getParentFile(); + MemFile a = MemFile.createMemFile(new MemFs(), "/a"); + MemFile ab1 = MemFile.createMemFile(new MemFs(), "/a").child("b"); + MemFile ab1Parent = ab1.getParentFile(); + + assertEqualsFile(ab, ab1); + assertEqualsFile(abParent, ab1Parent); + assertEqualsFile(abParent, a); + assertEqualsFile(ab, MemFile.createMemFile(new MemFs(), "/a/b/")); + assertEqualsFile(ab, MemFile.createMemFile(new MemFs(), "a/b/")); + + assertNotEquals(ab, a); + assertNotEquals(ab, MemFile.createMemFile(fs, "/a/e")); + assertNotEquals(ab, MemFile.createMemFile(fs, "/")); + assertNotEquals(ab.hashCode(), a.hashCode()); + assertNotEquals(ab1.hashCode(), a.hashCode()); + assertNotEquals(ab, a); + } + + @Test + public void testParent() + { + MemFs fs = new MemFs(); + MemFile ab = MemFile.createMemFile(fs, "/a/b/c"); + MemFile p = ab.getParentFile(); + assertEqualsFile(p, MemFile.createMemFile(fs, "/a/b")); + p = p.getParentFile(); + assertEqualsFile(p, MemFile.createMemFile(fs, "/a")); + p = p.getParentFile(); + assertEqualsFile(p, MemFile.createMemFile(fs, "/")); + p = p.getParentFile(); + assertEqualsFile(p, MemFile.createMemFile(fs, "/")); + } + + @Test + public void testDefault() + { + MemFile ab = MemFile.createMemFile(new MemFs(), "/a/b"); + assertEquals(ab.getName(), "b"); + assertEquals(ab.getPath(), "/a/b"); + assertFalse(ab.isDirectory()); + assertFalse(ab.isFile()); + assertFalse(ab.exists()); + assertEquals(ab.listFiles(), Collections.emptyList()); + Assert.assertThrows(() -> ab.child("/invalid")); + } + + private static void assertEqualsFile(MemFile f1, MemFile f2) + { + assertEquals(f1, f2); + assertEquals(f1.getName(), f2.getName()); + assertEquals(f1.getPath(), f2.getPath()); + assertEquals(f1.isFile(), f2.isFile()); + assertEquals(f1.isDirectory(), f2.isDirectory()); + assertEquals(f1.exists(), f2.exists()); + assertEquals(f1.getParentFile(), f2.getParentFile()); + assertEquals(f1.hashCode(), f2.hashCode()); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/memenv/MemFsTest.java b/leveldb/src/test/java/org/iq80/leveldb/memenv/MemFsTest.java new file mode 100644 index 0000000..dc4efa3 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/memenv/MemFsTest.java @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.memenv; + +import org.iq80.leveldb.env.File; +import org.testng.annotations.Test; + +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class MemFsTest +{ + @Test + public void testListFiles() + { + File abc = MemEnv.createEnv().createTempDir("prefix"); + assertTrue(abc.isDirectory()); + assertTrue(abc.exists()); + assertFalse(abc.isFile()); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/BlockHelper.java b/leveldb/src/test/java/org/iq80/leveldb/table/BlockHelper.java new file mode 100644 index 0000000..f607db2 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/BlockHelper.java @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import com.google.common.collect.Iterables; +import org.iq80.leveldb.iterator.SeekingIterator; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.testng.Assert; + +import java.util.Arrays; +import java.util.List; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.function.Function; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.iq80.leveldb.iterator.IteratorTestUtils.entry; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_BYTE; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; + +public final class BlockHelper +{ + private BlockHelper() + { + } + + public static int estimateBlockSize(int blockRestartInterval, List entries) + { + if (entries.isEmpty()) { + return SIZE_OF_INT * 2; //restart[0] + restart size + } + int restartCount = (int) Math.ceil(1.0 * entries.size() / blockRestartInterval); + return estimateEntriesSize(blockRestartInterval, entries) + + (restartCount * SIZE_OF_INT) + + SIZE_OF_INT; + } + + @SafeVarargs + public static void assertSequence(SeekingIterator seekingIterator, Entry... entries) + { + assertSequence(seekingIterator, Arrays.asList(entries)); + } + + @SafeVarargs + public static void assertReverseSequence(SeekingIterator seekingIterator, Entry... entries) + { + assertReverseSequence(seekingIterator, Arrays.asList(entries)); + } + + private static void assertSequence(SeekingIterator seekingIterator, Iterable> entries, Function, Boolean> next) + { + Assert.assertNotNull(seekingIterator, "blockIterator is not null"); + boolean valid = true; + for (Entry entry : entries) { + assertTrue(valid, "Last method should have return true"); + assertTrue(seekingIterator.valid(), "Expecting next element to be " + entry); + assertEntryEquals(entry(seekingIterator), entry); + valid = next.apply(seekingIterator); + } + assertFalse(valid && !Iterables.isEmpty(entries), "Last method should have return false"); + assertFalse(seekingIterator.valid()); + + assertFalse(next.apply(seekingIterator), "expected no more elements"); + assertThrows(NoSuchElementException.class, seekingIterator::key); + assertThrows(NoSuchElementException.class, seekingIterator::value); + } + + public static void assertReverseSequence(SeekingIterator seekingIterator, Iterable> entries) + { + assertSequence(seekingIterator, entries, SeekingIterator::prev); + } + + public static void assertSequence(SeekingIterator seekingIterator, Iterable> entries) + { + assertSequence(seekingIterator, entries, SeekingIterator::next); + } + + public static void assertEntryEquals(Entry actual, Entry expected) + { + if (actual.getKey() instanceof Slice) { + assertSliceEquals((Slice) actual.getKey(), (Slice) expected.getKey()); + assertSliceEquals((Slice) actual.getValue(), (Slice) expected.getValue()); + } + else { + assertEquals(actual.getKey(), expected.getKey()); + assertEquals(actual.getValue(), expected.getValue()); + } + } + + public static void assertSliceEquals(Slice actual, Slice expected) + { + assertEquals(actual.toString(UTF_8), expected.toString(UTF_8)); + } + + public static String beforeString(Entry expectedEntry) + { + String key = expectedEntry.getKey(); + int lastByte = key.charAt(key.length() - 1); + return key.substring(0, key.length() - 1) + ((char) (lastByte - 1)); + } + + public static String afterString(Entry expectedEntry) + { + String key = expectedEntry.getKey(); + int lastByte = key.charAt(key.length() - 1); + return key.substring(0, key.length() - 1) + ((char) (lastByte + 1)); + } + + public static Slice before(Entry expectedEntry) + { + Slice slice = expectedEntry.getKey().copySlice(0, expectedEntry.getKey().length()); + int lastByte = slice.length() - 1; + slice.setByte(lastByte, slice.getUnsignedByte(lastByte) - 1); + return slice; + } + + public static Slice after(Entry expectedEntry) + { + Slice slice = expectedEntry.getKey().copySlice(0, expectedEntry.getKey().length()); + int lastByte = slice.length() - 1; + slice.setByte(lastByte, slice.getUnsignedByte(lastByte) + 1); + return slice; + } + + public static int estimateEntriesSize(int blockRestartInterval, List entries) + { + int size = 0; + Slice previousKey = null; + int restartBlockCount = 0; + for (BlockEntry entry : entries) { + int nonSharedBytes; + if (restartBlockCount < blockRestartInterval) { + nonSharedBytes = entry.getKey().length() - BlockBuilder.calculateSharedBytes(entry.getKey(), previousKey); + } + else { + nonSharedBytes = entry.getKey().length(); + restartBlockCount = 0; + } + size += nonSharedBytes + + entry.getValue().length() + + (SIZE_OF_BYTE * 3); // 3 bytes for sizes + + previousKey = entry.getKey(); + restartBlockCount++; + + } + return size; + } + + static BlockEntry createBlockEntry(String key, String value) + { + return new BlockEntry(Slices.copiedBuffer(key, UTF_8), Slices.copiedBuffer(value, UTF_8)); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/BlockTest.java b/leveldb/src/test/java/org/iq80/leveldb/table/BlockTest.java new file mode 100644 index 0000000..3e452b3 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/BlockTest.java @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.util.Collections; +import java.util.List; + +import static java.util.Arrays.asList; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class BlockTest +{ + @Test(expectedExceptions = IllegalArgumentException.class) + public void testEmptyBuffer() + throws Exception + { + new Block(Slices.EMPTY_SLICE, new BytewiseComparator()); + } + + @Test + public void testEmptyBlock() + throws Exception + { + blockTest(Integer.MAX_VALUE); + } + + @Test + public void testSingleEntry() + throws Exception + { + blockTest(Integer.MAX_VALUE, + BlockHelper.createBlockEntry("name", "dain sundstrom")); + } + + @Test + public void testMultipleEntriesWithNonSharedKey() + throws Exception + { + blockTest(Integer.MAX_VALUE, + BlockHelper.createBlockEntry("beer", "Lagunitas IPA"), + BlockHelper.createBlockEntry("scotch", "Highland Park")); + } + + @Test + public void testMultipleEntriesWithSharedKey() + throws Exception + { + blockTest(Integer.MAX_VALUE, + BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), + BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), + BlockHelper.createBlockEntry("scotch", "Highland Park")); + } + + @Test + public void testMultipleEntriesWithNonSharedKeyAndRestartPositions() + throws Exception + { + List entries = asList( + BlockHelper.createBlockEntry("ale", "Lagunitas Little Sumpin’ Sumpin’"), + BlockHelper.createBlockEntry("ipa", "Lagunitas IPA"), + BlockHelper.createBlockEntry("stout", "Lagunitas Imperial Stout"), + BlockHelper.createBlockEntry("strong", "Lagavulin")); + + for (int i = 1; i < entries.size(); i++) { + blockTest(i, entries); + } + } + + @Test + public void testMultipleEntriesWithSharedKeyAndRestartPositions() + throws Exception + { + List entries = asList( + BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), + BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), + BlockHelper.createBlockEntry("beer/stout", "Lagunitas Imperial Stout"), + BlockHelper.createBlockEntry("scotch/light", "Oban 14"), + BlockHelper.createBlockEntry("scotch/medium", "Highland Park"), + BlockHelper.createBlockEntry("scotch/strong", "Lagavulin")); + + for (int i = 1; i < entries.size(); i++) { + blockTest(i, entries); + } + } + + @Test + public void testNextPrev() + { + List entries = asList( + BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), + BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), + BlockHelper.createBlockEntry("beer/stout", "Lagunitas Imperial Stout"), + BlockHelper.createBlockEntry("scotch/light", "Oban 14"), + BlockHelper.createBlockEntry("scotch/medium", "Highland Park"), + BlockHelper.createBlockEntry("scotch/strong", "Lagavulin")); + Block block = buildBLock(2, entries); + try (BlockIterator it = block.iterator()) { + assertTrue(it.next()); + assertEquals(entry(it), entries.get(0)); + assertFalse(it.prev()); + assertTrue(it.next()); + assertEquals(entry(it), entries.get(0)); + for (int i = 1; i < entries.size(); i++) { + assertTrue(it.next()); + assertEquals(entry(it), entries.get(i), "Entry #" + i + " should match its pair"); + assertTrue(it.prev()); + assertEquals(entry(it), entries.get(i - 1)); + assertTrue(it.next()); + assertEquals(entry(it), entries.get(i)); + } + assertFalse(it.next()); + assertTrue(it.prev()); + assertEquals(entry(it), entries.get(entries.size() - 1)); + } + } + + private static BlockEntry entry(BlockIterator it) + { + return new BlockEntry(it.key(), it.value()); + } + + private static void blockTest(int blockRestartInterval, BlockEntry... entries) + { + blockTest(blockRestartInterval, asList(entries)); + } + + private static Block buildBLock(int blockRestartInterval, List entries) + { + BlockBuilder builder = new BlockBuilder(256, blockRestartInterval, new BytewiseComparator()); + + for (BlockEntry entry : entries) { + builder.add(entry); + } + + assertEquals(builder.currentSizeEstimate(), BlockHelper.estimateBlockSize(blockRestartInterval, entries)); + Slice blockSlice = builder.finish(); + assertEquals(builder.currentSizeEstimate(), BlockHelper.estimateBlockSize(blockRestartInterval, entries)); + + Block block = new Block(blockSlice, new BytewiseComparator()); + assertEquals(block.size(), BlockHelper.estimateBlockSize(blockRestartInterval, entries)); + + return block; + } + + private static void blockTest(int blockRestartInterval, List entries) + { + Block block = buildBLock(blockRestartInterval, entries); + try (BlockIterator it = block.iterator()) { + assertTrue(it.next() || entries.isEmpty(), "Next should return validity of iterator"); + BlockHelper.assertSequence(it, entries); + + assertTrue(it.seekToFirst() || entries.isEmpty()); + BlockHelper.assertSequence(it, entries); + + for (BlockEntry entry : entries) { + List nextEntries = entries.subList(entries.indexOf(entry), entries.size()); + assertEquals(it.seek(entry.getKey()), !nextEntries.isEmpty()); + BlockHelper.assertSequence(it, nextEntries); + + assertEquals(it.seek(BlockHelper.before(entry)), !nextEntries.isEmpty()); + BlockHelper.assertSequence(it, nextEntries); + + List entries1 = nextEntries.subList(1, nextEntries.size()); + assertEquals(it.seek(BlockHelper.after(entry)), !entries1.isEmpty()); + BlockHelper.assertSequence(it, entries1); + } + + it.seek(Slices.wrappedBuffer(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF})); + BlockHelper.assertSequence(it, Collections.emptyList()); + } + BlockIterator iterator = block.iterator(); + iterator.seekToFirst(); + iterator.close(); + assertFalse(iterator.valid()); + //Should not be possible to use iterator after close + Assert.assertThrows(iterator::next); + Assert.assertThrows(iterator::key); + Assert.assertThrows(iterator::value); + Assert.assertThrows(iterator::seekToFirst); + Assert.assertThrows(iterator::seekToLast); + Assert.assertThrows(iterator::close); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/BloomFilterPolicyTest.java b/leveldb/src/test/java/org/iq80/leveldb/table/BloomFilterPolicyTest.java new file mode 100644 index 0000000..b14b3c5 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/BloomFilterPolicyTest.java @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.Slice; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; + +/** + * @author Honore Vasconcelos + */ +public class BloomFilterPolicyTest +{ + public static final int BLOOM_BITS = 10; + private byte[] filter = new byte[0]; + private List keys = new ArrayList<>(); + private BloomFilterPolicy policy = new BloomFilterPolicy(BLOOM_BITS); + + @Test + public void emptyBloom() throws Exception + { + Assert.assertTrue(!matches("hello")); + Assert.assertTrue(!matches("world")); + } + + @Test + public void smallBloom() throws Exception + { + add("hello"); + add("world"); + Assert.assertTrue(matches("hello"), "Key should be found"); + Assert.assertTrue(matches("world"), "Key should be sound"); + Assert.assertTrue(!matches("x")); + Assert.assertTrue(!matches("foo")); + } + + @Test + public void testVariableLength() throws Exception + { + // Count number of filters that significantly exceed the false positive rate + int mediocreFilters = 0; + int goodFilters = 0; + + for (int length = 1; length <= 10000; length = nextLength(length)) { + reset(); + for (int i = 0; i < length; i++) { + keys.add(intToBytes(i)); + } + build(); + + Assert.assertTrue(filter.length <= (length * BLOOM_BITS / 8) + 40); + + // All added keys must match + for (int i = 0; i < length; i++) { + Assert.assertTrue(matches(intToBytes(i))); + } + + // Check false positive rate + double rate = falsePositiveRate(); + System.err.print(String.format("False positives: %5.2f%% @ length = %6d ; bytes = %6d\n", + rate * 100.0, length, filter.length)); + + Assert.assertTrue(rate <= 0.02); // Must not be over 2% + if (rate > 0.0125) { + mediocreFilters++; // Allowed, but not too often + } + else { + goodFilters++; + } + } + System.err.print(String.format("Filters: %d good, %d mediocre\n", + goodFilters, mediocreFilters)); + Assert.assertTrue(mediocreFilters <= goodFilters / 5); + + } + + private double falsePositiveRate() + { + int result = 0; + for (int i = 0; i < 10000; i++) { + if (matches(intToBytes(i + 1000000000))) { + result++; + } + } + return result / 10000.0; + } + + private byte[] intToBytes(int value) + { + byte[] buffer = new byte[4]; + buffer[0] = (byte) (value); + buffer[1] = (byte) (value >>> 8); + buffer[2] = (byte) (value >>> 16); + buffer[3] = (byte) (value >>> 24); + return buffer; + } + + private void reset() + { + keys.clear(); + filter = new byte[0]; + } + + private static int nextLength(int length) + { + if (length < 10) { + length += 1; + } + else if (length < 100) { + length += 10; + } + else if (length < 1000) { + length += 100; + } + else { + length += 1000; + } + return length; + } + + private void add(String hello) + { + keys.add(getBytes(hello)); + } + + private boolean matches(String s) + { + return matches(getBytes(s)); + } + + private boolean matches(byte[] s) + { + if (!keys.isEmpty()) { + build(); + } + return policy.keyMayMatch(new Slice(s), new Slice(filter)); + } + + private byte[] getBytes(String s) + { + return s.getBytes(Charset.forName("ISO-8859-1")); + } + + private void build() + { + List keySlices = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + keySlices.add(new Slice(keys.get(i))); + } + filter = policy.createFilter(keySlices); + keys.clear(); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/FilterBlockReaderTest.java b/leveldb/src/test/java/org/iq80/leveldb/table/FilterBlockReaderTest.java new file mode 100644 index 0000000..d56ae59 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/FilterBlockReaderTest.java @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.util.DynamicSliceOutput; +import org.iq80.leveldb.util.Hash; +import org.iq80.leveldb.util.Slice; +import org.testng.annotations.Test; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.testng.Assert.assertTrue; + +/** + * @author Honore Vasconcelos + */ +public class FilterBlockReaderTest +{ + { + final FilterBlockBuilder filterBlockBuilder = new FilterBlockBuilder(new BloomFilterPolicy(10)); + filterBlockBuilder.startBlock(189); + for (int i = 0; i < 2000; ++i) { + filterBlockBuilder.addKey(new Slice(String.format("key%06d", i).getBytes())); + } + final Slice finish = filterBlockBuilder.finish(); + final FilterBlockReader reader = new FilterBlockReader(new BloomFilterPolicy(10), finish); + for (int i = 0; i < 2000; ++i) { + assertTrue(reader.keyMayMatch(189, new Slice(String.format("key%06d", i).getBytes()))); + } + } + + private static class TestHashFilter implements FilterPolicy + { + @Override + public String name() + { + return "TestHashFilter"; + } + + @Override + public byte[] createFilter(List keys) + { + final DynamicSliceOutput out = new DynamicSliceOutput(100); + for (Slice key : keys) { + out.writeInt(Hash.hash(key.getRawArray(), key.getRawOffset(), key.length(), 1)); + } + return out.slice().copyBytes(); + } + + @Override + public boolean keyMayMatch(Slice key, Slice filter) + { + final int hash = Hash.hash(key.getRawArray(), key.getRawOffset(), key.length(), 1); + for (int i = 0; i + 4 <= filter.length(); i += 4) { + if (hash == filter.getInt(i)) { + return true; + } + } + return false; + } + } + + @Test + public void testEmptyBuilder() throws Exception + { + FilterBlockBuilder builder = new FilterBlockBuilder(new TestHashFilter()); + final Slice finish = builder.finish(); + assertTrue(Arrays.equals(finish.copyBytes(), new byte[]{0, 0, 0, 0, 11})); + final FilterBlockReader reader = new FilterBlockReader(new TestHashFilter(), finish); + assertTrue(reader.keyMayMatch(0, new Slice("foo".getBytes()))); + assertTrue(reader.keyMayMatch(100000, new Slice("foo".getBytes()))); + } + + @Test + public void testSingleChunk() throws IOException + { + FilterBlockBuilder builder = new FilterBlockBuilder(new TestHashFilter()); + builder.startBlock(100); + builder.addKey(new Slice("foo".getBytes())); + builder.addKey(new Slice("bar".getBytes())); + builder.addKey(new Slice("box".getBytes())); + builder.startBlock(200); + builder.addKey(new Slice("box".getBytes())); + builder.startBlock(300); + builder.addKey(new Slice("hello".getBytes())); + Slice block = builder.finish(); + final FilterBlockReader reader = new FilterBlockReader(new TestHashFilter(), block); + assertTrue(reader.keyMayMatch(100, new Slice("foo".getBytes()))); + assertTrue(reader.keyMayMatch(100, new Slice("bar".getBytes()))); + assertTrue(reader.keyMayMatch(100, new Slice("box".getBytes()))); + assertTrue(reader.keyMayMatch(100, new Slice("hello".getBytes()))); + assertTrue(reader.keyMayMatch(100, new Slice("foo".getBytes()))); + assertTrue(!reader.keyMayMatch(100, new Slice("missing".getBytes()))); + assertTrue(!reader.keyMayMatch(100, new Slice("other".getBytes()))); + } + + @Test + public void testMultiChunk() + { + FilterBlockBuilder builder = new FilterBlockBuilder(new TestHashFilter()); + + // First filter + builder.startBlock(0); + builder.addKey(new Slice("foo".getBytes())); + builder.startBlock(2000); + builder.addKey(new Slice("bar".getBytes())); + + // Second filter + builder.startBlock(3100); + builder.addKey(new Slice("box".getBytes())); + + // Third filter is empty + + // Last filter + builder.startBlock(9000); + builder.addKey(new Slice("box".getBytes())); + builder.addKey(new Slice("hello".getBytes())); + + Slice block = builder.finish(); + final FilterBlockReader reader = new FilterBlockReader(new TestHashFilter(), block); + + // Check first filter + assertTrue(reader.keyMayMatch(0, new Slice("foo".getBytes()))); + assertTrue(reader.keyMayMatch(2000, new Slice("bar".getBytes()))); + assertTrue(!reader.keyMayMatch(0, new Slice("box".getBytes()))); + assertTrue(!reader.keyMayMatch(0, new Slice("hello".getBytes()))); + + // Check second filter + assertTrue(reader.keyMayMatch(3100, new Slice("box".getBytes()))); + assertTrue(!reader.keyMayMatch(3100, new Slice("foo".getBytes()))); + assertTrue(!reader.keyMayMatch(3100, new Slice("bar".getBytes()))); + assertTrue(!reader.keyMayMatch(3100, new Slice("hello".getBytes()))); + + // Check third filter (empty) + assertTrue(!reader.keyMayMatch(4100, new Slice("foo".getBytes()))); + assertTrue(!reader.keyMayMatch(4100, new Slice("bar".getBytes()))); + assertTrue(!reader.keyMayMatch(4100, new Slice("box".getBytes()))); + assertTrue(!reader.keyMayMatch(4100, new Slice("hello".getBytes()))); + + // Check last filter + assertTrue(reader.keyMayMatch(9000, new Slice("box".getBytes()))); + assertTrue(reader.keyMayMatch(9000, new Slice("hello".getBytes()))); + assertTrue(!reader.keyMayMatch(9000, new Slice("foo".getBytes()))); + assertTrue(!reader.keyMayMatch(9000, new Slice("bar".getBytes()))); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/InMemoryTableTest.java b/leveldb/src/test/java/org/iq80/leveldb/table/InMemoryTableTest.java new file mode 100644 index 0000000..9447643 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/InMemoryTableTest.java @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.memenv.MemEnv; + +public class InMemoryTableTest + extends TableTest +{ + @Override + protected Env getEnv() + { + return MemEnv.createEnv(); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/MMRandomInputFileTableTest.java b/leveldb/src/test/java/org/iq80/leveldb/table/MMRandomInputFileTableTest.java new file mode 100644 index 0000000..71d80a8 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/MMRandomInputFileTableTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.fileenv.MmapLimiter; + +public class MMRandomInputFileTableTest + extends TableTest +{ + @Override + protected Env getEnv() + { + //force MMap files + return EnvImpl.createEnv(MmapLimiter.newLimiter(1000)); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/TableTest.java b/leveldb/src/test/java/org/iq80/leveldb/table/TableTest.java new file mode 100644 index 0000000..5ddbb9f --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/TableTest.java @@ -0,0 +1,1213 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import com.google.common.collect.Lists; +import org.iq80.leveldb.CompressionType; +import org.iq80.leveldb.DBComparator; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.env.File; +import org.iq80.leveldb.env.RandomInputFile; +import org.iq80.leveldb.env.WritableFile; +import org.iq80.leveldb.impl.CountingHandlesEnv; +import org.iq80.leveldb.impl.DbConstants; +import org.iq80.leveldb.impl.DbImpl; +import org.iq80.leveldb.impl.InternalKey; +import org.iq80.leveldb.impl.InternalKeyComparator; +import org.iq80.leveldb.impl.MemTable; +import org.iq80.leveldb.impl.ValueType; +import org.iq80.leveldb.iterator.SeekingDBIteratorAdapter; +import org.iq80.leveldb.iterator.SeekingIterator; +import org.iq80.leveldb.util.Closeables; +import org.iq80.leveldb.util.ILRUCache; +import org.iq80.leveldb.util.LRUCache; +import org.iq80.leveldb.util.Slice; +import org.iq80.leveldb.util.Slices; +import org.iq80.leveldb.util.Snappy; +import org.iq80.leveldb.util.TestUtils; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.function.Function; + +import static java.util.Arrays.asList; +import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; +import static org.iq80.leveldb.iterator.IteratorTestUtils.entry; +import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; +import static org.iq80.leveldb.util.TestUtils.asciiToSlice; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public abstract class TableTest +{ + private Env defaultEnv; + private File file; + + private Table createTable(File file, Comparator comparator, boolean verifyChecksums, FilterPolicy filterPolicy) + throws Exception + { + RandomInputFile open = defaultEnv.newRandomAccessFile(file); + try { + return new Table(open, comparator, verifyChecksums, LRUCache.createCache(8 << 5, new BlockHandleSliceWeigher()), filterPolicy); + } + catch (Exception e) { + Closeables.closeQuietly(open); + throw e; + } + } + + @Test(expectedExceptions = IllegalArgumentException.class) + public void testEmptyFile() + throws Exception + { + createTable(file, new BytewiseComparator(), true, null).close(); + } + + @Test + public void testEmptyBlock() + throws Exception + { + tableTest(Integer.MAX_VALUE, Integer.MAX_VALUE); + } + + @Test + public void testSingleEntrySingleBlock() + throws Exception + { + tableTest(Integer.MAX_VALUE, Integer.MAX_VALUE, + BlockHelper.createBlockEntry("name", "dain sundstrom")); + } + + @Test + public void testMultipleEntriesWithSingleBlock() + throws Exception + { + List entries = asList( + BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), + BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), + BlockHelper.createBlockEntry("beer/stout", "Lagunitas Imperial Stout"), + BlockHelper.createBlockEntry("scotch/light", "Oban 14"), + BlockHelper.createBlockEntry("scotch/medium", "Highland Park"), + BlockHelper.createBlockEntry("scotch/strong", "Lagavulin")); + + for (int i = 1; i < entries.size(); i++) { + tableTest(Integer.MAX_VALUE, i, entries); + } + } + + @Test + public void testMultipleEntriesWithMultipleBlock() + throws Exception + { + List entries = asList( + BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), + BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), + BlockHelper.createBlockEntry("beer/stout", "Lagunitas Imperial Stout"), + BlockHelper.createBlockEntry("scotch/light", "Oban 14"), + BlockHelper.createBlockEntry("scotch/medium", "Highland Park"), + BlockHelper.createBlockEntry("scotch/strong", "Lagavulin")); + + // one entry per block + tableTest(1, Integer.MAX_VALUE, entries); + + // about 3 blocks + tableTest(BlockHelper.estimateBlockSize(Integer.MAX_VALUE, entries) / 3, Integer.MAX_VALUE, entries); + } + + @Test + public void testZeroRestartPointsInBlock() + { + Block entries = new Block(Slices.allocate(SIZE_OF_INT), new BytewiseComparator()); + + try (BlockIterator iterator = entries.iterator()) { + assertFalse(iterator.next()); + assertFalse(iterator.valid()); + assertFalse(iterator.seekToFirst()); + assertFalse(iterator.valid()); + assertFalse(iterator.seekToLast()); + assertFalse(iterator.valid()); + assertFalse(iterator.seek(asciiToSlice("foo"))); + assertFalse(iterator.valid()); + } + } + + private static final class KVMap + extends ConcurrentSkipListMap + { + KVMap(UserComparator useComparator) + { + super(new STLLessThan(useComparator)); + } + + void add(String key, Slice value) + { + put(asciiToSlice(key), value); + } + + KVIter iter() + { + ArrayList> entries = Lists.newArrayList(entrySet()); + return new KVIter(entries); + } + + //avoid using an implementation we are testing + class KVIter + { + private final List> entries; + private int index = -1; + + KVIter(List> entries) + { + this.entries = entries; + } + + Optional> next() + { + if (index < entries.size()) { + index++; + } + return index < entries.size() ? Optional.of(entries.get(index)) : Optional.empty(); + } + + Optional> prev() + { + if (index >= 0) { + index--; + } + return index >= 0 ? Optional.of(entries.get(index)) : Optional.empty(); + } + + Optional> entry() + { + return index >= 0 && index < entries.size() ? Optional.of(entries.get(index)) : Optional.empty(); + } + + KVIter seek(Slice key) + { + index = -1; + Optional> next = next(); + while (next.isPresent() && comparator().compare(next.get().getKey(), key) < 0) { + next = next(); + } + return this; + } + + public Optional> seekToLast() + { + if (entries.isEmpty()) { + return Optional.empty(); + } + else { + index = entries.size() - 1; + return Optional.of(entries.get(index)); + } + } + } + } + + private static class STLLessThan + implements Comparator + { + private UserComparator useComparator; + + public STLLessThan(UserComparator useComparator) + { + this.useComparator = useComparator; + } + + @Override + public int compare(Slice o1, Slice o2) + { + return useComparator.compare(o1, o2); + } + } + + @Test + public void testTableApproximateOffsetOfPlain() throws Exception + { + TableConstructor c = new TableConstructor(new BytewiseComparator()); + c.add("k01", "hello"); + c.add("k02", "hello2"); + c.add("k03", TestUtils.longString(10000, 'x')); + c.add("k04", TestUtils.longString(200000, 'x')); + c.add("k05", TestUtils.longString(300000, 'x')); + c.add("k06", "hello3"); + c.add("k07", TestUtils.longString(100000, 'x')); + + final Options options = new Options(); + options.blockSize(1024); + options.compressionType(CompressionType.NONE); + c.finish(options, defaultEnv); + + assertBetween(c.approximateOffsetOf("abc"), 0, 0); + assertBetween(c.approximateOffsetOf("k01"), 0, 0); + assertBetween(c.approximateOffsetOf("k01a"), 0, 0); + assertBetween(c.approximateOffsetOf("k02"), 0, 0); + assertBetween(c.approximateOffsetOf("k03"), 0, 0); + assertBetween(c.approximateOffsetOf("k04"), 10000, 11000); + assertBetween(c.approximateOffsetOf("k04a"), 210000, 211000); + assertBetween(c.approximateOffsetOf("k05"), 210000, 211000); + assertBetween(c.approximateOffsetOf("k06"), 510000, 511000); + assertBetween(c.approximateOffsetOf("k07"), 510000, 511000); + assertBetween(c.approximateOffsetOf("xyz"), 610000, 612000); + } + + @Test + public void testTableTestApproximateOffsetOfCompressedSnappy() throws Exception + { + if (!Snappy.available()) { + System.out.println("skipping compression tests"); + return; + } + + Random rnd = new Random(301); + TableConstructor c = new TableConstructor(new BytewiseComparator()); + c.add("k01", "hello"); + c.add("k02", TestUtils.compressibleString(rnd, 0.25, 10000)); + c.add("k03", "hello3"); + c.add("k04", TestUtils.compressibleString(rnd, 0.25, 10000)); + + Options options = new Options(); + options.blockSize(1024); + options.compressionType(CompressionType.SNAPPY); + c.finish(options, defaultEnv); + + // Expected upper and lower bounds of space used by compressible strings. + int kSlop = 1000; // Compressor effectiveness varies. + int expected = 2500; // 10000 * compression ratio (0.25) + int minZ = expected - kSlop; + int maxZ = expected + kSlop; + + assertBetween(c.approximateOffsetOf("abc"), 0, kSlop); + assertBetween(c.approximateOffsetOf("k01"), 0, kSlop); + assertBetween(c.approximateOffsetOf("k02"), 0, kSlop); + // Have now emitted a large compressible string, so adjust expected offset. + assertBetween(c.approximateOffsetOf("k03"), minZ, maxZ); + assertBetween(c.approximateOffsetOf("k04"), minZ, maxZ); + // Have now emitted two large compressible strings, so adjust expected offset. + assertBetween(c.approximateOffsetOf("xyz"), 2 * minZ, 2 * maxZ); + } + + @Test + public void testTableTestApproximateOffsetOfCompressedZLib() throws Exception + { + Random rnd = new Random(301); + TableConstructor c = new TableConstructor(new BytewiseComparator()); + c.add("k01", "hello"); + c.add("k02", TestUtils.compressibleString(rnd, 0.25, 10000)); + c.add("k03", "hello3"); + c.add("k04", TestUtils.compressibleString(rnd, 0.25, 10000)); + + Options options = new Options(); + options.blockSize(1024); + options.compressionType(CompressionType.ZLIB); + c.finish(options, defaultEnv); + + // Expected upper and lower bounds of space used by compressible strings. + int kSlop = 5000; // Compressor effectiveness varies. + int expected = 5000; // 10000 * compression ratio (0.5) + int minZ = expected - kSlop; + int maxZ = expected + kSlop; + + assertBetween(c.approximateOffsetOf("abc"), 0, kSlop); + assertBetween(c.approximateOffsetOf("k01"), 0, kSlop); + assertBetween(c.approximateOffsetOf("k02"), 0, kSlop); + // Have now emitted a large compressible string, so adjust expected offset. + assertBetween(c.approximateOffsetOf("k03"), minZ, maxZ); + assertBetween(c.approximateOffsetOf("k04"), minZ, maxZ); + // Have now emitted two large compressible strings, so adjust expected offset. + assertBetween(c.approximateOffsetOf("xyz"), 2 * minZ, 2 * maxZ); + } + + @Test + public void testTableTestApproximateOffsetOfCompressedZLibRaw() throws Exception + { + Random rnd = new Random(301); + TableConstructor c = new TableConstructor(new BytewiseComparator()); + c.add("k01", "hello"); + c.add("k02", TestUtils.compressibleString(rnd, 0.25, 10000)); + c.add("k03", "hello3"); + c.add("k04", TestUtils.compressibleString(rnd, 0.25, 10000)); + + Options options = new Options(); + options.blockSize(1024); + options.compressionType(CompressionType.ZLIB_RAW); + c.finish(options, defaultEnv); + + // Expected upper and lower bounds of space used by compressible strings. + int kSlop = 5000; // Compressor effectiveness varies. + int expected = 5000; // 10000 * compression ratio (0.5) + int minZ = expected - kSlop; + int maxZ = expected + kSlop; + + assertBetween(c.approximateOffsetOf("abc"), 0, kSlop); + assertBetween(c.approximateOffsetOf("k01"), 0, kSlop); + assertBetween(c.approximateOffsetOf("k02"), 0, kSlop); + // Have now emitted a large compressible string, so adjust expected offset. + assertBetween(c.approximateOffsetOf("k03"), minZ, maxZ); + assertBetween(c.approximateOffsetOf("k04"), minZ, maxZ); + // Have now emitted two large compressible strings, so adjust expected offset. + assertBetween(c.approximateOffsetOf("xyz"), 2 * minZ, 2 * maxZ); + } + + static void assertBetween(long val, long low, long high) + { + assertTrue((val >= low) && (val <= high), + String.format("Value %s is not in range [%s, %s]", val, low, high)); + } + + private abstract static class Constructor + implements AutoCloseable + { + private final KVMap kvMap; + private final UserComparator comparator; + + public Constructor(final UserComparator comparator) + { + this.comparator = comparator; + this.kvMap = new KVMap(this.comparator); + } + + void add(Slice key, Slice value) + { + kvMap.put(key, value); + } + + void add(String key, Slice value) + { + kvMap.put(asciiToSlice(key), value); + } + + void add(String key, String value) + { + add(key, asciiToSlice(value)); + } + + public final KVMap finish(Options options, Env env) throws IOException + { + finish(options, env, comparator, kvMap); + return kvMap; + } + + @Override + public void close() throws Exception + { + } + + protected abstract void finish(Options options, Env env, UserComparator comparator, KVMap kvMap) throws IOException; + + public abstract SeekingIterator iterator(); + } + + public static class TableConstructor + extends Constructor + { + private Table table; + + public TableConstructor(UserComparator comparator) + { + super(comparator); + } + + @Override + protected void finish(Options options, Env env, UserComparator comp, KVMap data) throws IOException + { + StringSink sink = new StringSink(); + TableBuilder builder = new TableBuilder(options, sink, comp); + + for (Map.Entry e : data.entrySet()) { + builder.add(e.getKey(), e.getValue()); + } + builder.finish(); + sink.close(); + + assertEquals(sink.content.length, builder.getFileSize()); + + // Open the table + StringSource source = new StringSource(sink.content); + ILRUCache blockCache = LRUCache.createCache(options.cacheSize() > 0 ? (int) options.cacheSize() : 8 << 20, new BlockHandleSliceWeigher()); + table = new Table(source, comp, options.paranoidChecks(), blockCache, (FilterPolicy) options.filterPolicy()); + } + + public long approximateOffsetOf(String key) + { + return table.getApproximateOffsetOf(asciiToSlice(key)); + } + + @Override + public SeekingIterator iterator() + { + return table.iterator(new ReadOptions()); + } + } + + @DataProvider(name = "testArgs") + public Object[][] testArgsProvider() + { + try { + final ReverseDBComparator reverse = new ReverseDBComparator(); + return new Object[][] { + {newHarness(TableConstructor.class, null, 16)}, + {newHarness(TableConstructor.class, null, 1)}, + {newHarness(TableConstructor.class, null, 1024)}, + {newHarness(TableConstructor.class, reverse, 16)}, + {newHarness(TableConstructor.class, reverse, 1)}, + {newHarness(TableConstructor.class, reverse, 1024)}, + + {newHarness(BlockConstructor.class, null, 16)}, + {newHarness(BlockConstructor.class, null, 1)}, + {newHarness(BlockConstructor.class, null, 1014)}, + {newHarness(BlockConstructor.class, reverse, 16)}, + {newHarness(BlockConstructor.class, reverse, 1)}, + {newHarness(BlockConstructor.class, reverse, 1024)}, + + //TODO ported from original but need to be moved away. they don't exactly belong in current package! + {newHarness(MemTableConstructor.class, null, 16)}, + {newHarness(MemTableConstructor.class, reverse, 16)}, + + {newHarness(DbConstructor.class, null, 16)}, + {newHarness(DbConstructor.class, reverse, 16)}, + }; + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static Harness newHarness(Class cls, DBComparator dbComparator, int restartInterval) throws Exception + { + Random rnd = new Random(301 + System.nanoTime()); + return new Harness(rnd, dbComparator, cls, restartInterval); + } + + @Test(dataProvider = "testArgs") + public void testEmpty(Harness harness) throws Exception + { + try { + harness.test(defaultEnv); + } + finally { + harness.close(); + } + } + + @Test(dataProvider = "testArgs") + public void testSimpleEmptyKey(Harness harness) throws Exception + { + try { + harness.add(Slices.EMPTY_SLICE, asciiToSlice("v")); + harness.test(defaultEnv); + } + finally { + harness.close(); + } + } + + @Test(dataProvider = "testArgs") + public void testSimpleSingle(Harness harness) throws Exception + { + try { + harness.add(asciiToSlice("abc"), asciiToSlice("v")); + harness.test(defaultEnv); + } + finally { + harness.close(); + } + } + + @Test(dataProvider = "testArgs") + public void testSimpleMulti(Harness harness) throws Exception + { + try { + harness.add(asciiToSlice("abc"), asciiToSlice("v")); + harness.add(asciiToSlice("abcd"), asciiToSlice("v")); + harness.add(asciiToSlice("ac"), asciiToSlice("v2")); + harness.test(defaultEnv); + } + finally { + harness.close(); + } + } + + @Test(dataProvider = "testArgs") + public void testSimpleSpecialKey(Harness harness) throws Exception + { + try { + harness.add(Slices.wrappedBuffer(new byte[] {-1, -1}), asciiToSlice("v3")); + harness.test(defaultEnv); + } + finally { + harness.close(); + } + } + + @Test(dataProvider = "testArgs") + public void testRandomized(Harness harness) throws Exception + { + try { + Random rnd = harness.getRnd(); + for (int numEntries = 0; numEntries < 2000; + numEntries += (numEntries < 50 ? 1 : 200)) { + if ((numEntries % 10) == 0) { + //System.err.println(String.format("case %s: numEntries = %d", harness, numEntries)); + } + for (int e = 0; e < numEntries; e++) { + harness.add(new Slice(TestUtils.randomKey(rnd, harness.getRandomSkewed(4))), + TestUtils.randomString(rnd, harness.getRandomSkewed(5))); + } + } + harness.test(defaultEnv); + } + finally { + harness.close(); + } + } + + @Test + public void testRandomizedLongDB() throws Exception + { + Random rnd = new Random(301); + try (Harness harness = new Harness<>(rnd, null, DbConstructor.class, 16)) { + int numEntries = 100000; + for (int e = 0; e < numEntries; e++) { + harness.add(new Slice(TestUtils.randomKey(rnd, harness.getRandomSkewed(4))), + TestUtils.randomString(rnd, harness.getRandomSkewed(5))); + } + harness.test(defaultEnv); + // We must have created enough data to force merging + int files = 0; + for (int level = 0; level < DbConstants.NUM_LEVELS; level++) { + files += Integer.valueOf(harness.constructor.db.getProperty("leveldb.num-files-at-level" + level)); + } + assertTrue(files > 0); + } + } + + private static class Harness + implements AutoCloseable + { + private final UserComparator comparator; + private String desc; + private final Random rnd; + private T constructor; + private Options options; + + public Harness(Random random, DBComparator comparator, Class cls, int restartInterval) throws Exception + { + this.rnd = random; + this.options = new Options(); + options.blockRestartInterval(restartInterval); + options.blockSize(256); + if (comparator != null) { + this.comparator = new CustomUserComparator(comparator); + options.comparator(comparator); + } + else { + this.comparator = new BytewiseComparator(); + } + constructor = cls.getConstructor(UserComparator.class).newInstance(this.comparator); + desc = cls.getSimpleName() + ", comparator= " + (comparator == null ? null : comparator.getClass().getSimpleName()) + ", restartInterval=" + restartInterval; + } + + public Random getRnd() + { + return rnd; + } + + public T getConstructor() + { + return constructor; + } + + /** + * Skewed: pick "base" uniformly from range [0,maxLog] and then + * return "base" random bits. The effect is to pick a number in the + * range [0,2^maxLog-1] with exponential bias towards smaller numbers. + **/ + private int getRandomSkewed(int maxLog) + { + return rnd.nextInt(Integer.MAX_VALUE) % (1 << rnd.nextInt(Integer.MAX_VALUE) % (maxLog + 1)); + } + + void add(Slice key, Slice value) + { + constructor.add(key, value); + } + + private void testForwardScan(KVMap data) throws IOException + { + Iterator> iterator; + try (SeekingIterator iter = constructor.iterator()) { + assertFalse(iter.valid()); + iter.seekToFirst(); + + iterator = data.entrySet().iterator(); + iterator.forEachRemaining(e -> { + assertTrue(iter.valid()); + assertEqualsEntries(entry(iter), e); + iter.next(); + }); + assertFalse(iter.valid()); + } + } + + private static void assertEqualsEntries(SeekingIterator it, Optional> expected) + { + assertEquals(expected.isPresent(), it.valid()); + expected.ifPresent(sliceSliceEntry -> assertEqualsEntries(entry(it), sliceSliceEntry)); + } + + private static void assertEqualsEntries(Map.Entry actual, Map.Entry expected) + { + assertEquals(actual.getKey(), expected.getKey()); + assertEquals(actual.getValue(), expected.getValue()); + } + + private void testBackwardScan(KVMap data) throws IOException + { + try (SeekingIterator iter = constructor.iterator()) { + assertFalse(iter.valid()); + KVMap.KVIter iter1 = data.iter(); + Optional> entry = iter1.seekToLast(); + assertEquals(iter.seekToLast(), !data.isEmpty()); + boolean prev = true; + while (entry.isPresent()) { + assertTrue(prev); + assertEqualsEntries(iter, entry); + prev = iter.prev(); + entry = iter1.prev(); + } + } + } + + private void testRandomAccess(KVMap data) throws IOException + { + try (SeekingIterator iter = constructor.iterator()) { + assertFalse(iter.valid()); + List keys = Lists.newArrayList(data.keySet()); + + KVMap.KVIter modelIter = data.iter(); + for (int i = 0; i < 200; i++) { + int toss = rnd.nextInt(5); + switch (toss) { + case 0: { + if (iter.valid()) { + iter.next(); + assertEqualsEntries(iter, modelIter.next()); + } + break; + } + + case 1: { + iter.seekToFirst(); + modelIter = data.iter(); + assertEqualsEntries(iter, modelIter.next()); + } + + case 2: { + Slice key = pickRandomKey(rnd, keys); + modelIter = data.iter().seek(key); + iter.seek(key); + assertEqualsEntries(iter, modelIter.entry()); + break; + } + + case 3: { + if (iter.valid()) { + iter.prev(); + assertEqualsEntries(iter, modelIter.prev()); + } + break; + } + case 4: { + iter.seekToLast(); + assertEqualsEntries(iter, modelIter.seekToLast()); + break; + } + } + } + } + } + + Slice pickRandomKey(Random rnd, List keys) + { + if (keys.isEmpty()) { + return asciiToSlice("foo"); + } + else { + int index = rnd.nextInt(keys.size()); + Slice result = keys.get(index).copySlice(); + switch (rnd.nextInt(3)) { + case 0: + // Return an existing key + break; + case 1: { + // Attempt to return something smaller than an existing key + int idx1 = result.length() - 1; + if (result.length() > 0 && result.getByte(idx1) > '\0') { + result.setByte(idx1, result.getByte(idx1) - 1); + } + break; + } + case 2: { + // Return something larger than an existing key + result = increment(comparator, result); + break; + } + } + return result; + } + } + + Slice increment(Comparator cmp, Slice key) + { + Slice k; + if (cmp instanceof BytewiseComparator) { + k = key; + } + else { + k = reverse(key); + } + byte[] bytes = Arrays.copyOf(k.getBytes(), k.length() + 1); + bytes[k.length()] = 0; + return new Slice(bytes); + } + + private Slice reverse(Slice key) + { + byte[] bytes = new byte[key.length()]; + for (int i = 0, k = key.length() - 1; k >= 0; i++, k--) { + bytes[i] = key.getByte(k); + } + return new Slice(bytes); + } + + void test(Env env) throws IOException + { + KVMap data = constructor.finish(options, env); + + testForwardScan(data); + testBackwardScan(data); + testRandomAccess(data); + } + + @Override + public void close() throws Exception + { + constructor.close(); + } + + @Override + public String toString() + { + return desc; + } + } + + private static class BlockConstructor + extends Constructor + { + private Block entries; + + public BlockConstructor(UserComparator comparator) + { + super(comparator); + } + + @Override + public SeekingIterator iterator() + { + return entries.iterator(); + } + + @Override + protected void finish(Options options, Env env, UserComparator cmp, KVMap map) throws IOException + { + BlockBuilder builder = new BlockBuilder(256, options.blockRestartInterval(), cmp); + + for (Map.Entry entry : map.entrySet()) { + builder.add(entry.getKey(), entry.getValue()); + } + + // Open the block + Slice data = builder.finish(); + entries = new Block(data, cmp); + } + } + + private static class MemTableConstructor + extends Constructor + { + private MemTable table; + + public MemTableConstructor(UserComparator comparator) + { + super(comparator); + } + + @Override + protected void finish(Options options, Env env, UserComparator comparator, KVMap kvMap) throws IOException + { + table = new MemTable(new InternalKeyComparator(comparator)); + int seq = 1; + for (Map.Entry e : kvMap.entrySet()) { + table.add(seq++, ValueType.VALUE, e.getKey(), e.getValue()); + } + } + + @Override + public SeekingIterator iterator() + { + return new KeyConverterIterator<>(table.iterator(), InternalKey::getUserKey, k -> new InternalKey(k, MAX_SEQUENCE_NUMBER, ValueType.VALUE)); + } + } + + private static class DbConstructor + extends Constructor + { + private DbImpl db; + private File tmpDir; + private CountingHandlesEnv env; + + public DbConstructor(UserComparator comparator) + { + super(comparator); + } + + @Override + protected void finish(Options options, Env env, UserComparator comparator, KVMap kvMap) throws IOException + { + options + .createIfMissing(true) + .errorIfExists(true) + .writeBufferSize(10000); // Something small to force merging + tmpDir = env.createTempDir("leveldb"); + this.env = new CountingHandlesEnv(env); + this.db = new DbImpl(options, tmpDir.getPath(), this.env); + for (Map.Entry entry : kvMap.entrySet()) { + db.put(entry.getKey().getBytes(), entry.getValue().getBytes()); + } + } + + @Override + public SeekingIterator iterator() + { + return SeekingDBIteratorAdapter.toSeekingIterator(db.iterator(), Slice::getBytes, Slice::new, Slice::new); + } + + @Override + public void close() throws Exception + { + super.close(); + db.close(); + assertEquals(env.getOpenHandles(), 0, "All files should have been closed (validate all iterables should be closed)"); + assertTrue(!tmpDir.exists() || tmpDir.deleteRecursively()); + } + } + + public class ReverseDBComparator + implements DBComparator + { + private final BytewiseComparator com = new BytewiseComparator(); + + @Override + public String name() + { + return "leveldb.ReverseBytewiseComparator"; + } + + @Override + public byte[] findShortestSeparator(byte[] start, byte[] limit) + { + Slice s = reverseToSlice(start); + Slice l = reverseToSlice(limit); + return reverseB(com.findShortestSeparator(s, l).getBytes()); + } + + private Slice reverseToSlice(byte[] key) + { + return new Slice(reverseB(key)); + } + + private byte[] reverseB(byte[] key) + { + byte[] bytes = new byte[key.length]; + for (int i = 0, k = key.length - 1; k >= 0; i++, k--) { + bytes[i] = key[k]; + } + return bytes; + } + + @Override + public byte[] findShortSuccessor(byte[] key) + { + Slice s = reverseToSlice(key); + return reverseB(com.findShortSuccessor(s).getBytes()); + } + + @Override + public int compare(byte[] a, byte[] b) + { + return com.compare(reverseToSlice(a), reverseToSlice(b)); + } + } + + private static class StringSource + implements RandomInputFile + { + byte[] data; + + public StringSource(byte[] data) + { + this.data = data; + } + + @Override + public long size() + { + return data.length; + } + + @Override + public ByteBuffer read(long offset, int length) + { + return Slices.wrappedBuffer(data).copySlice((int) offset, length).toByteBuffer(); + } + + @Override + public void close() + { + } + } + + private static class StringSink + implements WritableFile + { + private ByteArrayOutputStream sb = new ByteArrayOutputStream(); + + byte[] content; + + @Override + public void append(Slice data) throws IOException + { + sb.write(data.getBytes()); + } + + @Override + public void force() throws IOException + { + content = sb.toByteArray(); + } + + @Override + public void close() throws IOException + { + content = sb.toByteArray(); + sb.close(); + sb = null; + } + } + + private void tableTest(int blockSize, int blockRestartInterval, BlockEntry... entries) + throws Exception + { + tableTest(blockSize, blockRestartInterval, asList(entries)); + } + + private void tableTest(int blockSize, int blockRestartInterval, List entries) + throws Exception + { + reopenFile(); + Options options = new Options().blockSize(blockSize).blockRestartInterval(blockRestartInterval); + try (WritableFile writableFile = defaultEnv.newWritableFile(defaultEnv.toFile(file.getPath()))) { + TableBuilder builder = new TableBuilder(options, writableFile, new BytewiseComparator()); + + for (BlockEntry entry : entries) { + builder.add(entry); + } + builder.finish(); + } + List reverseEntries = Lists.reverse(entries); + Table table = null; + try { + table = createTable(file, new BytewiseComparator(), true, null); + + try (SeekingIterator seekingIterator = table.iterator(new ReadOptions())) { + seekingIterator.seekToFirst(); + BlockHelper.assertSequence(seekingIterator, entries); + + seekingIterator.seekToFirst(); + BlockHelper.assertSequence(seekingIterator, entries); + seekingIterator.prev(); + BlockHelper.assertReverseSequence(seekingIterator, reverseEntries); + seekingIterator.seekToLast(); + BlockHelper.assertReverseSequence(seekingIterator, reverseEntries); + + long lastApproximateOffset = 0; + for (BlockEntry entry : entries) { + List nextEntries = entries.subList(entries.indexOf(entry), entries.size()); + seekingIterator.seek(entry.getKey()); + BlockHelper.assertSequence(seekingIterator, nextEntries); + + seekingIterator.seek(BlockHelper.before(entry)); + BlockHelper.assertSequence(seekingIterator, nextEntries); + + seekingIterator.seek(BlockHelper.after(entry)); + BlockHelper.assertSequence(seekingIterator, nextEntries.subList(1, nextEntries.size())); + + long approximateOffset = table.getApproximateOffsetOf(entry.getKey()); + assertTrue(approximateOffset >= lastApproximateOffset); + lastApproximateOffset = approximateOffset; + + seekingIterator.seek(entry.getKey()); + List prevEntries = reverseEntries.subList(reverseEntries.indexOf(entry), reverseEntries.size()); + BlockHelper.assertReverseSequence(seekingIterator, prevEntries); + assertTrue(seekingIterator.next()); + BlockHelper.assertSequence(seekingIterator, entries); + } + + Slice endKey = Slices.wrappedBuffer(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}); + seekingIterator.seek(endKey); + BlockHelper.assertSequence(seekingIterator, Collections.emptyList()); + + long approximateOffset = table.getApproximateOffsetOf(endKey); + assertTrue(approximateOffset >= lastApproximateOffset); + } + } + finally { + if (table != null) { + table.close(); + } + } + } + + @BeforeMethod + public void setUp() + throws Exception + { + reopenFile(); + } + + private void reopenFile() + throws IOException + { + if (file != null) { + file.delete(); + } + defaultEnv = getEnv(); + file = defaultEnv.createTempDir("leveldb_file").child("table.db"); + file.delete(); + defaultEnv.writeStringToFileSync(file, ""); + } + + protected abstract Env getEnv(); + + @AfterMethod + public void tearDown() + throws Exception + { + file.getParentFile().deleteRecursively(); + } + + public static class KeyConverterIterator + implements SeekingIterator + { + private final SeekingIterator it; + private final Function from; + private final Function to; + + public KeyConverterIterator(SeekingIterator it, Function from, Function to) + { + this.it = it; + this.from = from; + this.to = to; + } + + @Override + public boolean valid() + { + return it.valid(); + } + + @Override + public boolean seekToFirst() + { + return it.seekToFirst(); + } + + @Override + public boolean seekToLast() + { + return it.seekToLast(); + } + + @Override + public boolean seek(K2 key) + { + return it.seek(to.apply(key)); + } + + @Override + public boolean next() + { + return it.next(); + } + + @Override + public boolean prev() + { + return it.prev(); + } + + @Override + public K2 key() + { + return from.apply(it.key()); + } + + @Override + public V value() + { + return it.value(); + } + + @Override + public void close() throws IOException + { + it.close(); + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/table/UnbufferedRandomInputFileTableTest.java b/leveldb/src/test/java/org/iq80/leveldb/table/UnbufferedRandomInputFileTableTest.java new file mode 100644 index 0000000..b804372 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/table/UnbufferedRandomInputFileTableTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.table; + +import org.iq80.leveldb.env.Env; +import org.iq80.leveldb.fileenv.EnvImpl; +import org.iq80.leveldb.fileenv.MmapLimiter; + +public class UnbufferedRandomInputFileTableTest + extends TableTest +{ + @Override + protected Env getEnv() + { + //disable memory mapped files + return EnvImpl.createEnv(MmapLimiter.newLimiter(0)); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/util/HashTest.java b/leveldb/src/test/java/org/iq80/leveldb/util/HashTest.java new file mode 100644 index 0000000..b6bc977 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/util/HashTest.java @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; + +/** + * @author Honore Vasconcelos + */ +public class HashTest +{ + @Test + public void testSignedUnsignedTrue() throws Exception + { + byte[] data1 = {0x62}; + byte[] data2 = {(byte) 0xc3, (byte) 0x97}; + byte[] data3 = {(byte) 0xe2, (byte) 0x99, (byte) 0xa5}; + byte[] data4 = {(byte) 0xe1, (byte) 0x80, (byte) 0xb9, 0x32}; + byte[] data5 = { + 0x01, (byte) 0xc0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, + 0x00, 0x00, 0x00, 0x18, + 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + }; + assertEquals(Hash.hash(new byte[0], 0xbc9f1d34), 0xbc9f1d34); + assertEquals(Hash.hash(data1, 0xbc9f1d34), 0xef1345c4); + assertEquals(Hash.hash(data2, 0xbc9f1d34), 0x5b663814); + assertEquals(Hash.hash(data3, 0xbc9f1d34), 0x323c078f); + assertEquals(Hash.hash(data4, 0xbc9f1d34), 0xed21633a); + assertEquals(Hash.hash(data5, 0x12345678), 0xf333dabb); + + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/util/LRUCacheTest.java b/leveldb/src/test/java/org/iq80/leveldb/util/LRUCacheTest.java new file mode 100644 index 0000000..f757efe --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/util/LRUCacheTest.java @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import com.google.common.cache.Weigher; +import org.testng.annotations.Test; + +import java.util.concurrent.ExecutionException; + +import static org.testng.Assert.assertEquals; + +/** + * @author Honore Vasconcelos + */ +public class LRUCacheTest +{ + @Test + public void testMultipleClientWithSameKey() throws Exception + { + final ILRUCache cache = LRUCache.createCache(2 * 5, new CountWeigher()); + final CacheWithStatistics[] caches = CacheWithStatistics.withStatistics(cache, 2); + + for (int x = 0; x < 3; ++x) { + for (int i = 0; i < caches.length; ++i) { + for (int j = 0; j < 5; ++j) { + assertEquals(((int) caches[i].load(j)), j * (i + 1) * 3); + } + } + //only first run should load data into cache, as such, only 5 load should be executed instead of 30 + for (CacheWithStatistics cache1 : caches) { + assertEquals(cache1.count, 5); + } + } + } + + @Test + public void testLimitIsRespected() throws Exception + { + // size is respected by guava but we could have some type of bug :) + final ILRUCache cache = LRUCache.createCache(2, new CountWeigher()); + final CacheWithStatistics[] caches = CacheWithStatistics.withStatistics(cache, 2); + caches[0].load(0); + caches[0].load(1); + caches[0].load(2); + caches[0].load(1); + caches[0].load(0); + + assertEquals(caches[0].count, 4); + assertEquals(caches[1].count, 0); + + caches[1].load(0); + caches[0].load(0); + assertEquals(caches[0].count, 4); + assertEquals(caches[1].count, 1); + + caches[0].load(2); + caches[1].load(1); + assertEquals(caches[0].count, 5); + assertEquals(caches[1].count, 2); + } + + private static class CacheWithStatistics + { + private final ILRUCache cache; + private final int i; + private int count; + + private CacheWithStatistics(ILRUCache cache, final int i) + { + this.cache = cache; + this.i = i; + } + + static CacheWithStatistics[] withStatistics(ILRUCache cache, int clients) + { + final CacheWithStatistics[] caches = new CacheWithStatistics[clients]; + for (int i = 0; i < clients; ++i) { + caches[i] = new CacheWithStatistics(cache, i); + } + return caches; + } + + public Integer load(final Integer key) throws ExecutionException + { + return cache.load(((long) i) << 32 | key, () -> { + count++; + return key * (i + 1) * 3; + + }); + } + } + + private static class CountWeigher implements Weigher + { + @Override + public int weigh(Long key, Integer value) + { + return 1; + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/util/PureJavaCrc32CTest.java b/leveldb/src/test/java/org/iq80/leveldb/util/PureJavaCrc32CTest.java new file mode 100644 index 0000000..6a3e35c --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/util/PureJavaCrc32CTest.java @@ -0,0 +1,204 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import org.testng.Assert; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Arrays; +import java.util.function.Function; +import java.util.function.IntFunction; + +import static java.nio.charset.StandardCharsets.US_ASCII; +import static org.iq80.leveldb.util.PureJavaCrc32C.mask; +import static org.iq80.leveldb.util.PureJavaCrc32C.unmask; +import static org.testng.Assert.assertFalse; + +public class PureJavaCrc32CTest +{ + private static final IntFunction DIRECT_LE = cap -> ByteBuffer.allocateDirect(cap).order(ByteOrder.LITTLE_ENDIAN); + private static final IntFunction DIRECT_BE = cap -> ByteBuffer.allocateDirect(cap).order(ByteOrder.BIG_ENDIAN); + private static final IntFunction HEAP = cap -> ByteBuffer.allocate(cap); + + @Test(dataProvider = "crcs") + public void testStandardResults(int expectedCrc, byte[] data) + { + assertEquals(computeCrc(data), expectedCrc); + } + + @Test(dataProvider = "crcs") + public void testBufferStandardResults(int expectedCrc, byte[] b) + { + //ensure correct handling of offset/positions/limits in DIRECT_LE/DIRECT_BE/array buffers + assertCrcWithBuffers(expectedCrc, b, DIRECT_LE); + assertCrcWithBuffers(expectedCrc, b, DIRECT_BE); + assertCrcWithBuffers(expectedCrc, b, HEAP); + //with array offset + final byte[] dest = new byte[b.length + 2]; + System.arraycopy(b, 0, dest, 2, b.length); + final ByteBuffer byteBuffer = ByteBuffer.wrap(dest, 2, b.length); + assertEquals(expectedCrc, computeCrc(byteBuffer)); + } + + private void assertCrcWithBuffers(int expectedCrc, byte[] b, IntFunction factory) + { + assertEquals(expectedCrc, computeCrc(fillBuffer(b, factory.apply(b.length), 0))); //position = 0 & limit = b.length + assertEquals(expectedCrc, computeCrc(fillBuffer(b, factory.apply(b.length + 5), 0))); //limit < than accessible size + assertEquals(expectedCrc, computeCrc(fillBuffer(b, factory.apply(b.length + 5), 2))); //position > 0 & limit < than accessible size + assertEquals(expectedCrc, computeCrc(fillBuffer(b, factory.apply(b.length + 7), 2))); //position > 0 & limit < than accessible size + } + + @DataProvider(name = "crcs") + public Object[][] data() + { + return new Object[][] { + // Standard results from rfc3720 section B.4. + new Object[] {0x8a9136aa, arrayOf(32, (byte) 0)}, + new Object[] {0x62a8ab43, arrayOf(32, (byte) 0xff)}, + new Object[] {0x46dd794e, arrayOf(32, position -> (byte) position.intValue())}, + new Object[] {0x113fdb5c, arrayOf(32, position -> (byte) (31 - position))}, + new Object[] {0xd9963a56, arrayOf(new int[] { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})} + }; + } + + @Test + public void testProducesDifferentCrcs() + throws UnsupportedEncodingException + { + assertFalse(computeCrc("a".getBytes(US_ASCII)) == computeCrc("foo".getBytes(US_ASCII))); + } + + @Test + public void testProducesDifferentCrcs2() + throws UnsupportedEncodingException + { + assertFalse(computeCrc(fillBuffer("a".getBytes(US_ASCII), ByteBuffer.allocateDirect(10), 0)) == computeCrc(fillBuffer("foo".getBytes(US_ASCII), ByteBuffer.allocateDirect(10), 0))); + } + + @Test + public void testLoopUnroll() throws Exception + { + assertCrcWithBuffers(0xb219db69, new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, DIRECT_BE); + assertCrcWithBuffers(0xb219db69, new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, DIRECT_LE); + assertCrcWithBuffers(0xb219db69, new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, HEAP); + + assertCrcWithBuffers(0xbd3a64dc, new byte[] {1, 2, 3, 4, 5, 6, 7}, DIRECT_BE); + assertCrcWithBuffers(0xbd3a64dc, new byte[] {1, 2, 3, 4, 5, 6, 7}, DIRECT_LE); + assertCrcWithBuffers(0xbd3a64dc, new byte[] {1, 2, 3, 4, 5, 6, 7}, HEAP); + } + + @Test + public void testComposes() + throws UnsupportedEncodingException + { + PureJavaCrc32C crc = new PureJavaCrc32C(); + crc.update("hello ".getBytes(US_ASCII), 0, 6); + crc.update("world".getBytes(US_ASCII), 0, 5); + + assertEquals(crc.getIntValue(), computeCrc("hello world".getBytes(US_ASCII))); + } + + @Test + public void testComposesDirectBuffers() + throws UnsupportedEncodingException + { + PureJavaCrc32C crc = new PureJavaCrc32C(); + crc.update(fillBuffer("hello ".getBytes(US_ASCII), ByteBuffer.allocateDirect(6), 0)); + crc.update(fillBuffer("world".getBytes(US_ASCII), ByteBuffer.allocateDirect(5), 0)); + + assertEquals(crc.getIntValue(), computeCrc("hello world".getBytes(US_ASCII))); + } + + @Test + public void testMask() + throws UnsupportedEncodingException + { + PureJavaCrc32C crc = new PureJavaCrc32C(); + crc.update("foo".getBytes(US_ASCII), 0, 3); + + assertEquals(crc.getMaskedValue(), mask(crc.getIntValue())); + assertFalse(crc.getIntValue() == crc.getMaskedValue(), "crc should not match masked crc"); + assertFalse(crc.getIntValue() == mask(crc.getMaskedValue()), "crc should not match double masked crc"); + assertEquals(crc.getIntValue(), unmask(crc.getMaskedValue())); + assertEquals(crc.getIntValue(), unmask(unmask(mask(crc.getMaskedValue())))); + } + + private static int computeCrc(byte[] data) + { + PureJavaCrc32C crc = new PureJavaCrc32C(); + crc.update(data, 0, data.length); + return crc.getIntValue(); + } + + private static int computeCrc(ByteBuffer buffer) + { + PureJavaCrc32C crc = new PureJavaCrc32C(); + crc.update(buffer); + return crc.getIntValue(); + } + + private static ByteBuffer fillBuffer(byte[] data, ByteBuffer byteBuffer, int initialPos) + { + byteBuffer.position(initialPos); + byteBuffer.put(data); + byteBuffer.position(initialPos); + byteBuffer.limit(initialPos + data.length); + return byteBuffer; + } + + private static byte[] arrayOf(int size, byte value) + { + byte[] result = new byte[size]; + Arrays.fill(result, value); + return result; + } + + @SuppressWarnings("ConstantConditions") + private static byte[] arrayOf(int size, Function generator) + { + byte[] result = new byte[size]; + for (int i = 0; i < result.length; ++i) { + result[i] = generator.apply(i); + } + + return result; + } + + private static byte[] arrayOf(int[] bytes) + { + byte[] result = new byte[bytes.length]; + for (int i = 0; i < result.length; ++i) { + result[i] = (byte) bytes[i]; + } + + return result; + } + + private void assertEquals(int actual, int required) + { + Assert.assertEquals(actual, required, String.format("Required 0x%s but actual is 0x%s", Integer.toHexString(required), Integer.toHexString(actual))); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/util/SafeListBuilderTest.java b/leveldb/src/test/java/org/iq80/leveldb/util/SafeListBuilderTest.java new file mode 100644 index 0000000..0ebc767 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/util/SafeListBuilderTest.java @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +public class SafeListBuilderTest +{ + @Test + public void testAllElementAreClosedEvenOnError() throws Exception + { + AtomicInteger counter = new AtomicInteger(); + SafeListBuilder builder = SafeListBuilder.builder(); + builder.add(counter::incrementAndGet); + builder.add(() -> { + counter.incrementAndGet(); + throw new IOException(); + }); + builder.add(counter::incrementAndGet); + builder.add(() -> { + counter.incrementAndGet(); + throw new IOException(); + }); + builder.add(counter::incrementAndGet); + assertEquals(counter.get(), 0); + try { + builder.close(); + Assert.fail("should fail because not all close succeed"); + } + catch (Exception e) { + assertTrue(e instanceof IOException); + } + assertEquals(counter.get(), 5); + } + + @Test + public void testCloseWithoutExceptions() throws Exception + { + AtomicInteger counter = new AtomicInteger(); + SafeListBuilder builder = SafeListBuilder.builder(); + builder.add(counter::incrementAndGet); + builder.add(counter::incrementAndGet); + builder.close(); + assertEquals(counter.get(), 2); + } + + @Test + public void testNothingHappenIfBuildWasCalled() throws Exception + { + AtomicInteger counter = new AtomicInteger(); + try (SafeListBuilder builder = SafeListBuilder.builder()) { + builder.add(counter::incrementAndGet); + builder.add(counter::incrementAndGet); + builder.add(counter::incrementAndGet); + final List build = builder.build(); + assertEquals(3, build.size()); + } + assertEquals(counter.get(), 0); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/util/SliceComparatorTest.java b/leveldb/src/test/java/org/iq80/leveldb/util/SliceComparatorTest.java new file mode 100644 index 0000000..2a5e41e --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/util/SliceComparatorTest.java @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import org.testng.annotations.Test; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.iq80.leveldb.util.SliceComparator.SLICE_COMPARATOR; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +public class SliceComparatorTest +{ + @Test + public void testSliceComparison() + { + assertTrue(SLICE_COMPARATOR.compare( + Slices.copiedBuffer("beer/ipa", UTF_8), + Slices.copiedBuffer("beer/ale", UTF_8)) + > 0); + + assertTrue(SLICE_COMPARATOR.compare( + Slices.wrappedBuffer(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}), + Slices.wrappedBuffer(new byte[] {(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00})) + > 0); + + assertTrue(SLICE_COMPARATOR.compare( + Slices.wrappedBuffer(new byte[] {(byte) 0xFF}), + Slices.wrappedBuffer(new byte[] {(byte) 0x00})) + > 0); + + assertAllEqual(Slices.copiedBuffer("abcdefghijklmnopqrstuvwxyz", UTF_8), + Slices.copiedBuffer("abcdefghijklmnopqrstuvwxyz", UTF_8)); + } + + public static void assertAllEqual(Slice left, Slice right) + { + for (int i = 0; i < left.length(); i++) { + assertEquals(SLICE_COMPARATOR.compare(left.slice(0, i), right.slice(0, i)), 0); + assertEquals(SLICE_COMPARATOR.compare(right.slice(0, i), left.slice(0, i)), 0); + } + // differ in last byte only + for (int i = 1; i < left.length(); i++) { + Slice slice = right.slice(0, i); + int lastReadableByte = slice.length() - 1; + slice.setByte(lastReadableByte, slice.getByte(lastReadableByte) + 1); + assertTrue(SLICE_COMPARATOR.compare(left.slice(0, i), slice) < 0); + assertTrue(SLICE_COMPARATOR.compare(slice, left.slice(0, i)) > 0); + } + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/util/TestUtils.java b/leveldb/src/test/java/org/iq80/leveldb/util/TestUtils.java new file mode 100644 index 0000000..2c33b80 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/util/TestUtils.java @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.Random; + +import static java.nio.charset.StandardCharsets.US_ASCII; + +public final class TestUtils +{ + private TestUtils() + { + //utility + } + + public static Slice randomString(Random rnd, int len) + { + final byte[] bytes = new byte[len]; + for (int i = 0; i < len; i++) { + bytes[i] = (byte) (' ' + rnd.nextInt(95)); // ' ' .. '~' + } + return new Slice(bytes); + } + + public static byte[] randomKey(Random rnd, int len) + { + // Make sure to generate a wide variety of characters so we + // test the boundary conditions for short-key optimizations. + byte[] kTestChars = { + 0, 1, 'a', 'b', 'c', 'd', 'e', (byte) 0xfd, (byte) 0xfe, (byte) 0xff + }; + byte[] result = new byte[len]; + for (int i = 0; i < len; i++) { + result[i] = kTestChars[rnd.nextInt(kTestChars.length)]; + } + return result; + } + + public static Slice compressibleString(Random rnd, double compressedFraction, int len) throws IOException + { + int raw = (int) (len * compressedFraction); + if (raw < 1) { + raw = 1; + } + final byte[] bytes = randomString(rnd, raw).getBytes(); + + final ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(len); + while (byteOutputStream.size() < len) { + byteOutputStream.write(bytes); + } + final Slice slice = new Slice(byteOutputStream.toByteArray()); + byteOutputStream.close(); + return slice; + } + + public static String longString(int length, char character) + { + char[] chars = new char[length]; + Arrays.fill(chars, character); + return new String(chars); + } + + public static Slice asciiToSlice(String value) + { + return Slices.copiedBuffer(value, US_ASCII); + } + + public static byte[] asciiToBytes(String value) + { + return asciiToSlice(value).getBytes(); + } +} diff --git a/leveldb/src/test/java/org/iq80/leveldb/util/VariableLengthQuantityTest.java b/leveldb/src/test/java/org/iq80/leveldb/util/VariableLengthQuantityTest.java new file mode 100644 index 0000000..8408b44 --- /dev/null +++ b/leveldb/src/test/java/org/iq80/leveldb/util/VariableLengthQuantityTest.java @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.iq80.leveldb.util; + +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; + +public class VariableLengthQuantityTest +{ + @Test + public void testWriteVariableLengthInt() + { + testVariableLengthInt(0x0); + testVariableLengthInt(0xf); + testVariableLengthInt(0xff); + testVariableLengthInt(0xfff); + testVariableLengthInt(0xffff); + testVariableLengthInt(0xfffff); + testVariableLengthInt(0xffffff); + testVariableLengthInt(0xfffffff); + testVariableLengthInt(0xffffffff); + } + + private static void testVariableLengthInt(int value) + { + SliceOutput output = Slices.allocate(5).output(); + VariableLengthQuantity.writeVariableLengthInt(value, output); + assertEquals(output.size(), VariableLengthQuantity.variableLengthSize(value)); + int actual = VariableLengthQuantity.readVariableLengthInt(output.slice().input()); + assertEquals(actual, value); + } + + @Test + public void testWriteVariableLengthLong() + { + testVariableLengthLong(0x0L); + testVariableLengthLong(0xfL); + testVariableLengthLong(0xffL); + testVariableLengthLong(0xfffL); + testVariableLengthLong(0xffffL); + testVariableLengthLong(0xfffffL); + testVariableLengthLong(0xffffffL); + testVariableLengthLong(0xfffffffL); + testVariableLengthLong(0xffffffffL); + testVariableLengthLong(0xfffffffffL); + testVariableLengthLong(0xffffffffffL); + testVariableLengthLong(0xfffffffffffL); + testVariableLengthLong(0xffffffffffffL); + testVariableLengthLong(0xfffffffffffffL); + testVariableLengthLong(0xffffffffffffffL); + testVariableLengthLong(0xfffffffffffffffL); + testVariableLengthLong(0xffffffffffffffffL); + } + + private static void testVariableLengthLong(long value) + { + SliceOutput output = Slices.allocate(12).output(); + VariableLengthQuantity.writeVariableLengthLong(value, output); + assertEquals(output.size(), VariableLengthQuantity.variableLengthSize(value)); + long actual = VariableLengthQuantity.readVariableLengthLong(output.slice().input()); + assertEquals(actual, value); + } +} diff --git a/license.txt b/license.txt new file mode 100644 index 0000000..6b0b127 --- /dev/null +++ b/license.txt @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/notice.md b/notice.md new file mode 100644 index 0000000..f21e158 --- /dev/null +++ b/notice.md @@ -0,0 +1,5 @@ +LevelDB Copyright Notices +========================= + +* Copyright 2011 Dain Sundstrom +* Copyright 2011 FuseSource Corp. http://fusesource.com diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..6724367 --- /dev/null +++ b/pom.xml @@ -0,0 +1,126 @@ + + + 4.0.0 + + + io.airlift + airbase + 100 + + + com.hivemc.leveldb + leveldb-project + 1.0.0-SNAPSHOT + pom + + ${project.groupId}:${project.artifactId} + Port of LevelDB to Java + https://github.com/HiveGamesOSS/leveldb-mcpe-java + + + leveldb-api + leveldb + leveldb-benchmark + + + 2011 + + + + Apache License 2.0 + http://www.apache.org/licenses/LICENSE-2.0.html + repo + + + + + + dain + Dain Sundstrom + dain@iq80.com + + + chirino + Hiram Chirino + hiram@hiramchirino.com + http://hiramchirino.com + -5 + + + pcmind + Honore Vasconcelos + honore.vasconcelos@gmail.com + + + hivegames + Hive Games + https://github.com/HiveGamesOSS + + + + + scm:git:git://github.com/HiveGamesOSS/leveldb-mcpe-java.git + scm:git:git@github.com:HiveGamesOSS/leveldb-mcpe-java.git + http://github.com/HiveGamesOSS/leveldb-mcpe-java/tree/master + HEAD + + + + src/checkstyle/checks.xml + src/license/LICENSE-HEADER.txt + true + true + true + + ${air.check.skip-basic} + ${air.check.fail-basic} + + true + 11 + -missing + + + + + + com.hivemc.leveldb + leveldb-api + ${project.version} + + + + com.hivemc.leveldb + leveldb + ${project.version} + + + + + + chunker-dev + https://pkgs.dev.azure.com/hivetooling/Chunker/_packaging/chunker/maven/v1 + + + chunker-dev + https://pkgs.dev.azure.com/hivetooling/Chunker/_packaging/chunker/maven/v1 + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + org.eluder.coveralls + coveralls-maven-plugin + 4.3.0 + + ${converallstoken} + + + + + diff --git a/src/checkstyle/checks.xml b/src/checkstyle/checks.xml new file mode 100644 index 0000000..fd9eb62 --- /dev/null +++ b/src/checkstyle/checks.xml @@ -0,0 +1,118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/license/LICENSE-HEADER.txt b/src/license/LICENSE-HEADER.txt new file mode 100644 index 0000000..b72ec76 --- /dev/null +++ b/src/license/LICENSE-HEADER.txt @@ -0,0 +1,15 @@ +Copyright (C) 2011 the original author or authors. +See the notice.md file distributed with this work for additional +information regarding copyright ownership. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/site/site.xml b/src/site/site.xml new file mode 100644 index 0000000..e32fcc5 --- /dev/null +++ b/src/site/site.xml @@ -0,0 +1,41 @@ + + + + + + com.googlecode.fluido-skin + fluido-skin + 1.3 + + + + + + + + + + +