Skip to content

Commit

Permalink
tune RocksDB parameters (#3623)
Browse files Browse the repository at this point in the history
  • Loading branch information
bxq2011hust authored May 4, 2023
1 parent 59e8c30 commit 4501483
Show file tree
Hide file tree
Showing 15 changed files with 90 additions and 36 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/workflow-self-hosted-arm-static-bulid.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ jobs:

- name: update vcpkg
run: |
cd ${{ env.VCPKG_ROOT }} && git checkout master
git pull && cd -
cd ${{ env.VCPKG_ROOT }} && git checkout master #&& git pull
cd -
- name: Build for linux
run: |
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/workflow-self-hosted-arm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ jobs:

- name: update vcpkg
run: |
cd ${{ env.VCPKG_ROOT }} && git checkout master
git pull && cd -
cd ${{ env.VCPKG_ROOT }} && git checkout master #&& git pull
cd -
- name: Build for linux
run: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ jobs:

- name: update vcpkg
run: |
cd ${{ env.VCPKG_ROOT }} && git checkout master
git pull && cd -
cd ${{ env.VCPKG_ROOT }} && git checkout master #&& git pull
cd -
- name: Build for linux
run: |
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/workflow-self-hosted-centos.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ jobs:

- name: update vcpkg
run: |
cd ${{ env.VCPKG_ROOT }} && git checkout master
git pull && cd -
cd ${{ env.VCPKG_ROOT }} && git checkout master #&& git pull
cd -
- name: Build for linux
run: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ jobs:

- name: update vcpkg
run: |
cd ${{ env.VCPKG_ROOT }} && git checkout master
git pull && cd -
cd ${{ env.VCPKG_ROOT }} && git checkout master #&& git pull
cd -
- name: Build for linux
run: |
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/workflow-self-hosted-ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ jobs:

- name: update vcpkg
run: |
cd ${{ env.VCPKG_ROOT }} && git checkout master
git pull && cd -
cd ${{ env.VCPKG_ROOT }} && git checkout master #&& git pull
cd -
- name: Build for linux
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
- name: Prepare vcpkg
if: runner.os != 'Windows'
uses: friendlyanon/setup-vcpkg@v1
with: { committish: 82e03905f54fc49d11346e5148e3860747604d86 }
with: { committish: 4116148a7f5c09d39da34ffb75f33283796d687b }
- uses: actions/cache@v2
id: deps_cache
with:
Expand Down
2 changes: 1 addition & 1 deletion bcos-crypto/demo/perf_demo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ void stTest(std::string_view inputData, size_t _count)
// auto hashImpl = std::make_shared<Keccak256>();
// auto keccak256Old = hashPerf(hashImpl, "Keccak256", inputData, _count);

// openssl::OPENSSL_Keccak256_Hasher hasherKeccak256;
// hasher::openssl::OpenSSL_Keccak256_Hasher hasherKeccak256;
// auto keccak256New = hashingPerf(hasherKeccak256, inputData, _count);
// if (keccak256Old[0] != keccak256New[0])
// {
Expand Down
5 changes: 4 additions & 1 deletion bcos-tool/bcos-tool/NodeConfig.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -623,8 +623,11 @@ void NodeConfig::loadStorageConfig(boost::property_tree::ptree const& _pt)
m_storagePath = _pt.get<std::string>("storage.data_path", "data/" + m_groupId);
m_storageType = _pt.get<std::string>("storage.type", "RocksDB");
m_keyPageSize = _pt.get<int32_t>("storage.key_page_size", 10240);
m_maxWriteBufferNumber = _pt.get<int32_t>("storage.max_write_buffer_number", 3);
m_maxWriteBufferNumber = _pt.get<int32_t>("storage.max_write_buffer_number", 4);
m_maxBackgroundJobs = _pt.get<int32_t>("storage.max_background_jobs", 3);
m_writeBufferSize = _pt.get<size_t>("storage.write_buffer_size", 128 << 20);
m_minWriteBufferNumberToMerge = _pt.get<int32_t>("storage.min_write_buffer_number_to_merge", 2);
m_blockCacheSize = _pt.get<size_t>("storage.block_cache_size", 128 << 20);
m_enableDBStatistics = _pt.get<bool>("storage.enable_statistics", false);
m_pdCaPath = _pt.get<std::string>("storage.pd_ssl_ca_path", "");
m_pdCertPath = _pt.get<std::string>("storage.pd_ssl_cert_path", "");
Expand Down
9 changes: 8 additions & 1 deletion bcos-tool/bcos-tool/NodeConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,9 @@ class NodeConfig
int maxWriteBufferNumber() const { return m_maxWriteBufferNumber; }
bool enableStatistics() const { return m_enableDBStatistics; }
int maxBackgroundJobs() const { return m_maxBackgroundJobs; }
size_t writeBufferSize() const { return m_writeBufferSize; }
int minWriteBufferNumberToMerge() const { return m_minWriteBufferNumberToMerge; }
size_t blockCacheSize() const { return m_blockCacheSize; }
std::vector<std::string> const& pdAddrs() const { return m_pd_addrs; }
std::string const& pdCaPath() const { return m_pdCaPath; }
std::string const& pdCertPath() const { return m_pdCertPath; }
Expand Down Expand Up @@ -331,9 +334,13 @@ class NodeConfig
std::string m_pdCaPath;
std::string m_pdCertPath;
std::string m_pdKeyPath;
int m_maxWriteBufferNumber = 3;
bool m_enableDBStatistics = false;
int m_maxWriteBufferNumber = 3;
int m_maxBackgroundJobs = 3;
size_t m_writeBufferSize = 64 << 21;
int m_minWriteBufferNumberToMerge = 2;
size_t m_blockCacheSize = 128 << 20;

bool m_enableArchive = false;
std::string m_archiveListenIP;
uint16_t m_archiveListenPort = 0;
Expand Down
6 changes: 5 additions & 1 deletion bcos-txpool/bcos-txpool/txpool/storage/MemoryStorage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -896,6 +896,7 @@ void MemoryStorage::batchMarkTxsWithoutLock(
continue;
}
// the tx has already been re-sealed, can not enforce unseal
// TODO: if tx->batchHash() is empty,should be re-sealed?
if ((tx->batchId() != _batchId || tx->batchHash() != _batchHash) && tx->sealed() &&
!_sealFlag)
{
Expand Down Expand Up @@ -1110,6 +1111,7 @@ void MemoryStorage::cleanUpExpiredTransactions()
}
size_t traversedTxsNum = 0;
size_t erasedTxs = 0;
size_t sealedTxs = 0;
uint64_t currentTime = utcTime();

m_txsTable.forEach<TxsMap::ReadAccessor>(
Expand Down Expand Up @@ -1170,7 +1172,9 @@ void MemoryStorage::cleanUpExpiredTransactions()
});

TXPOOL_LOG(INFO) << LOG_DESC("cleanUpExpiredTransactions")
<< LOG_KV("pendingTxs", m_txsTable.size()) << LOG_KV("erasedTxs", erasedTxs);
<< LOG_KV("pendingTxs", m_txsTable.size()) << LOG_KV("erasedTxs", erasedTxs)
<< LOG_KV("sealedTxs", sealedTxs)
<< LOG_KV("traversedTxsNum", traversedTxsNum);

removeInvalidTxs(true);
}
Expand Down
15 changes: 11 additions & 4 deletions libinitializer/Initializer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,13 +161,20 @@ void Initializer::init(bcos::protocol::NodeArchitectureType _nodeArchType,

if (boost::iequals(m_nodeConfig->storageType(), "RocksDB"))
{
RocksDBOption option;
option.maxWriteBufferNumber = m_nodeConfig->maxWriteBufferNumber();
option.maxBackgroundJobs = m_nodeConfig->maxBackgroundJobs();
option.writeBufferSize = m_nodeConfig->writeBufferSize();
option.minWriteBufferNumberToMerge = m_nodeConfig->minWriteBufferNumberToMerge();
option.blockCacheSize = m_nodeConfig->blockCacheSize();

// m_protocolInitializer->dataEncryption() will return nullptr when storage_security = false
storage = StorageInitializer::build(storagePath, m_protocolInitializer->dataEncryption(),
m_nodeConfig->keyPageSize(), m_nodeConfig->maxWriteBufferNumber(),
m_nodeConfig->enableStatistics(), m_nodeConfig->maxBackgroundJobs());
storage =
StorageInitializer::build(storagePath, option, m_protocolInitializer->dataEncryption(),
m_nodeConfig->keyPageSize(), m_nodeConfig->enableStatistics());
schedulerStorage = storage;
consensusStorage = StorageInitializer::build(
consensusStoragePath, m_protocolInitializer->dataEncryption());
consensusStoragePath, option, m_protocolInitializer->dataEncryption(), 0);
airExecutorStorage = storage;
}
#ifdef WITH_TIKV
Expand Down
37 changes: 25 additions & 12 deletions libinitializer/StorageInitializer.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,24 @@

namespace bcos::initializer
{

struct RocksDBOption
{
int maxWriteBufferNumber = 3;
int maxBackgroundJobs = 3;
size_t writeBufferSize = 128 << 20; // 128MB
int minWriteBufferNumberToMerge = 2;
size_t blockCacheSize = 128 << 20; // 128MB
};

class StorageInitializer
{
public:
static auto createRocksDB(const std::string& _path, int _max_write_buffer_number = 3,
bool _enableDBStatistics = false, int _max_background_jobs = 3)
static auto createRocksDB(
const std::string& _path, RocksDBOption& rocksDBOption, bool _enableDBStatistics = false)
{
boost::filesystem::create_directories(_path);
rocksdb::DB* db;
rocksdb::DB* db = nullptr;
rocksdb::Options options;
// Note: This option will increase much memory
// options.IncreaseParallelism();
Expand All @@ -50,22 +60,27 @@ class StorageInitializer
// create the DB if it's not already present
options.create_if_missing = true;
// to mitigate write stalls
options.max_background_jobs = _max_background_jobs;
options.max_write_buffer_number = _max_write_buffer_number;
options.max_background_jobs = rocksDBOption.maxBackgroundJobs;
options.max_write_buffer_number = rocksDBOption.maxWriteBufferNumber;
// FIXME: enable blob support when space amplification is acceptable
// options.enable_blob_files = keyPageSize > 1 ? true : false;
options.compression = rocksdb::kZSTD;
options.bottommost_compression = rocksdb::kZSTD; // last level compression
options.max_open_files = 256;
options.write_buffer_size = 64 << 20; // default is 64MB
options.write_buffer_size =
rocksDBOption.writeBufferSize; // default is 64MB, set 256MB here
options.min_write_buffer_number_to_merge =
rocksDBOption.minWriteBufferNumberToMerge; // default is 1
options.enable_pipelined_write = true;
// options.min_blob_size = 1024;

if (_enableDBStatistics)
{
options.statistics = rocksdb::CreateDBStatistics();
}
// block cache 128MB
std::shared_ptr<rocksdb::Cache> cache = rocksdb::NewLRUCache(128 << 20);
std::shared_ptr<rocksdb::Cache> cache =
rocksdb::NewLRUCache(rocksDBOption.blockCacheSize);
rocksdb::BlockBasedTableOptions table_options;
table_options.block_cache = cache;
// use bloom filter to optimize point lookup, i.e. get
Expand Down Expand Up @@ -94,12 +109,10 @@ class StorageInitializer
});
}
static bcos::storage::TransactionalStorageInterface::Ptr build(const std::string& _storagePath,
const bcos::security::DataEncryptInterface::Ptr& _dataEncrypt,
[[maybe_unused]] size_t keyPageSize = 0, int _max_write_buffer_number = 3,
bool _enableDBStatistics = false, int _max_background_jobs = 3)
RocksDBOption& rocksDBOption, const bcos::security::DataEncryptInterface::Ptr& _dataEncrypt,
[[maybe_unused]] size_t keyPageSize = 0, bool _enableDBStatistics = false)
{
auto unique_db = createRocksDB(
_storagePath, _max_write_buffer_number, _enableDBStatistics, _max_background_jobs);
auto unique_db = createRocksDB(_storagePath, rocksDBOption, _enableDBStatistics);
return std::make_shared<bcos::storage::RocksDBStorage>(std::move(unique_db), _dataEncrypt);
}

Expand Down
17 changes: 15 additions & 2 deletions tools/archive-tool/archiveTool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,14 @@ TransactionalStorageInterface::Ptr createBackendStorage(
}
if (write)
{
RocksDBOption option;
option.maxWriteBufferNumber = nodeConfig->maxWriteBufferNumber();
option.maxBackgroundJobs = nodeConfig->maxBackgroundJobs();
option.writeBufferSize = nodeConfig->writeBufferSize();
option.minWriteBufferNumberToMerge = nodeConfig->minWriteBufferNumberToMerge();
option.blockCacheSize = nodeConfig->blockCacheSize();
storage = StorageInitializer::build(
nodeConfig->storagePath(), dataEncryption, nodeConfig->keyPageSize());
nodeConfig->storagePath(), option, dataEncryption, nodeConfig->keyPageSize());
}
else
{
Expand Down Expand Up @@ -673,7 +679,14 @@ int main(int argc, const char* argv[])
StorageInterface::Ptr archiveStorage = nullptr;
if (boost::iequals(archiveType, "RocksDB"))
{ // create archive rocksDB storage
archiveStorage = StorageInitializer::build(archivePath, nullptr, nodeConfig->keyPageSize());
RocksDBOption option;
option.maxWriteBufferNumber = nodeConfig->maxWriteBufferNumber();
option.maxBackgroundJobs = nodeConfig->maxBackgroundJobs();
option.writeBufferSize = nodeConfig->writeBufferSize();
option.minWriteBufferNumberToMerge = nodeConfig->minWriteBufferNumberToMerge();
option.blockCacheSize = nodeConfig->blockCacheSize();
archiveStorage =
StorageInitializer::build(archivePath, option, nullptr, nodeConfig->keyPageSize());
}
else if (boost::iequals(archiveType, "TiKV"))
{ // create archive TiKV storage
Expand Down
9 changes: 8 additions & 1 deletion tools/storage-tool/storageTool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,15 @@ TransactionalStorageInterface::Ptr createBackendStorage(
}
if (write)
{
RocksDBOption option;
option.maxWriteBufferNumber = nodeConfig->maxWriteBufferNumber();
option.maxBackgroundJobs = nodeConfig->maxBackgroundJobs();
option.writeBufferSize = nodeConfig->writeBufferSize();
option.minWriteBufferNumberToMerge = nodeConfig->minWriteBufferNumberToMerge();
option.blockCacheSize = nodeConfig->blockCacheSize();

storage = StorageInitializer::build(
nodeConfig->storagePath(), dataEncryption, nodeConfig->keyPageSize());
nodeConfig->storagePath(), option, dataEncryption, nodeConfig->keyPageSize());
}
else
{
Expand Down

0 comments on commit 4501483

Please sign in to comment.