From 79a34f86ea24662704587bcefdaef2f0225481be Mon Sep 17 00:00:00 2001 From: Mixficsol <838844609@qq.com> Date: Wed, 27 Mar 2024 16:05:44 +0800 Subject: [PATCH] Complete the multi-key judgment of a pair of simple commands --- src/client.cc | 3 + src/client.h | 1 + src/cmd_hash.cc | 3 +- src/storage/include/storage/storage_define.h | 15 +-- src/storage/src/base_filter.h | 2 + src/storage/src/base_meta_value_format.h | 19 ++- src/storage/src/base_value_format.h | 1 + src/storage/src/coding.h | 9 ++ src/storage/src/redis.cc | 55 +++----- src/storage/src/redis.h | 10 +- src/storage/src/redis_hashes.cc | 101 ++++++++------- src/storage/src/redis_lists.cc | 94 +++++++------- src/storage/src/redis_sets.cc | 126 ++++++++++--------- src/storage/src/redis_zsets.cc | 104 +++++++-------- 14 files changed, 283 insertions(+), 260 deletions(-) diff --git a/src/client.cc b/src/client.cc index 4296a7b96..3c3adfa25 100644 --- a/src/client.cc +++ b/src/client.cc @@ -130,6 +130,9 @@ void CmdRes::SetRes(CmdRes::CmdRet _ret, const std::string& content) { case kInvalidCursor: AppendStringRaw("-ERR invalid cursor"); break; + case kmultikey: + AppendStringRaw("-WRONGTYPE Operation against a key holding the wrong kind of value"); + break; default: break; } diff --git a/src/client.h b/src/client.h index e4aefe02e..2d1649504 100644 --- a/src/client.h +++ b/src/client.h @@ -48,6 +48,7 @@ class CmdRes { kErrOther, KIncrByOverFlow, kInvalidCursor, + kmultikey, }; CmdRes() = default; diff --git a/src/cmd_hash.cc b/src/cmd_hash.cc index 7b3358e6f..27ae36b48 100644 --- a/src/cmd_hash.cc +++ b/src/cmd_hash.cc @@ -40,13 +40,14 @@ void HSetCmd::DoCmd(PClient* client) { s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HSet(client->Key(), field, value, &temp); if (s.ok()) { ret += temp; + } else if (s.IsInvalidArgument()) { + client->SetRes(CmdRes::kmultikey); } else { // FIXME(century): need txn, if bw crashes, it should rollback client->SetRes(CmdRes::kErrOther); return; } } - client->AppendInteger(ret); } diff --git a/src/storage/include/storage/storage_define.h b/src/storage/include/storage/storage_define.h index 053177c17..ac062af70 100644 --- a/src/storage/include/storage/storage_define.h +++ b/src/storage/include/storage/storage_define.h @@ -33,15 +33,12 @@ const int kTimestampLength = 8; enum ColumnFamilyIndex { kStringsCF = 0, - kHashesMetaCF = 1, - kHashesDataCF = 2, - kSetsMetaCF = 3, - kSetsDataCF = 4, - kListsMetaCF = 5, - kListsDataCF = 6, - kZsetsMetaCF = 7, - kZsetsDataCF = 8, - kZsetsScoreCF = 9, + kHashesDataCF = 1, + kSetsDataCF = 2, + kListsDataCF = 3, + kZsetsDataCF = 4, + kZsetsScoreCF = 5, + kMetaCF = 6, }; const static char kNeedTransformCharacter = '\u0000'; diff --git a/src/storage/src/base_filter.h b/src/storage/src/base_filter.h index d2c7a629f..acfa55313 100644 --- a/src/storage/src/base_filter.h +++ b/src/storage/src/base_filter.h @@ -166,5 +166,7 @@ using ZSetsMetaFilterFactory = BaseMetaFilterFactory; using ZSetsDataFilter = BaseDataFilter; using ZSetsDataFilterFactory = BaseDataFilterFactory; +using MetaFilter = BaseMetaFilter; +using MetaFilterFactory = BaseMetaFilterFactory; } // namespace storage #endif // SRC_BASE_FILTER_H_ diff --git a/src/storage/src/base_meta_value_format.h b/src/storage/src/base_meta_value_format.h index 12a5e12a7..a88e4aecf 100644 --- a/src/storage/src/base_meta_value_format.h +++ b/src/storage/src/base_meta_value_format.h @@ -59,17 +59,26 @@ class ParsedBaseMetaValue : public ParsedInternalValue { explicit ParsedBaseMetaValue(std::string* internal_value_str) : ParsedInternalValue(internal_value_str) { if (internal_value_str->size() >= kBaseMetaValueSuffixLength) { int offset = 0; - user_value_ = Slice(internal_value_str->data(), internal_value_str->size() - kBaseMetaValueSuffixLength); + type_ = Slice(internal_value_str->data(), 1); + offset += 1; + // std::cout << "type: " << type_.ToStringView() << std::endl; + user_value_ = Slice(internal_value_str->data() + 1, internal_value_str->size() - kBaseMetaValueSuffixLength - 1); + // std::cout << "user_value: " << user_value_.ToStringView() << std::endl; + // std::cout << "user_value_size: " << user_value_.size() << std::endl; offset += user_value_.size(); version_ = DecodeFixed64(internal_value_str->data() + offset); + // std::cout << "version:" << version_ << std::endl; offset += sizeof(version_); memcpy(reserve_, internal_value_str->data() + offset, sizeof(reserve_)); offset += sizeof(reserve_); ctime_ = DecodeFixed64(internal_value_str->data() + offset); + // std::cout << "ctime: " << ctime_ << std::endl; offset += sizeof(ctime_); etime_ = DecodeFixed64(internal_value_str->data() + offset); + // std::cout << "etime: " << etime_ << std::endl; } - count_ = DecodeFixed32(internal_value_str->data()); + count_ = DecodeFixed32(internal_value_str->data() + 1); + // std::cout << "count: " << count_ << std::endl; } // Use this constructor in rocksdb::CompactionFilter::Filter(); @@ -134,11 +143,13 @@ class ParsedBaseMetaValue : public ParsedInternalValue { int32_t Count() { return count_; } + bool IsType(Slice c) { return type_.ToStringView() == c.ToStringView(); } + void SetCount(int32_t count) { count_ = count; if (value_) { char* dst = const_cast(value_->data()); - EncodeFixed32(dst, count_); + EncodeFixed32(dst + 1, count_); } } @@ -155,7 +166,7 @@ class ParsedBaseMetaValue : public ParsedInternalValue { count_ += delta; if (value_) { char* dst = const_cast(value_->data()); - EncodeFixed32(dst, count_); + EncodeFixed32(dst + 1, count_); } } diff --git a/src/storage/src/base_value_format.h b/src/storage/src/base_value_format.h index 7654c11e4..d94030018 100644 --- a/src/storage/src/base_value_format.h +++ b/src/storage/src/base_value_format.h @@ -130,6 +130,7 @@ class ParsedInternalValue { virtual void SetEtimeToValue() = 0; virtual void SetCtimeToValue() = 0; std::string* value_ = nullptr; + Slice type_; Slice user_value_; uint64_t version_ = 0; uint64_t ctime_ = 0; diff --git a/src/storage/src/coding.h b/src/storage/src/coding.h index e26c29373..87d7996f4 100644 --- a/src/storage/src/coding.h +++ b/src/storage/src/coding.h @@ -26,6 +26,15 @@ namespace storage { static const bool kLittleEndian = STORAGE_PLATFORM_IS_LITTLE_ENDIAN; #undef STORAGE_PLATFORM_IS_LITTLE_ENDIAN +inline void EncodeFixed8(char* buf, char type) { + if (kLittleEndian) { + memcpy(buf, &type, sizeof(type)); + } else { + uint8_t converted_value = static_cast(type); + buf[0] = converted_value; + } +} + inline void EncodeFixed32(char* buf, uint32_t value) { if (kLittleEndian) { memcpy(buf, &value, sizeof(value)); diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 5203e3bb3..778efa954 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -61,6 +61,14 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ rocksdb::DBOptions db_ops(storage_options.options); db_ops.create_missing_column_families = true; + // Meta column-famil options + rocksdb::ColumnFamilyOptions meta_cf_ops(storage_options.options); + rocksdb::BlockBasedTableOptions meta_cf_table_ops(table_ops); + meta_cf_ops.compaction_filter_factory = std::make_shared(); + if (!storage_options.share_block_cache && (storage_options.block_cache_size > 0)) { + meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(meta_cf_table_ops)); // string column-family options rocksdb::ColumnFamilyOptions string_cf_ops(storage_options.options); string_cf_ops.compaction_filter_factory = std::make_shared(); @@ -71,86 +79,61 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ string_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(string_table_ops)); // hash column-family options - rocksdb::ColumnFamilyOptions hash_meta_cf_ops(storage_options.options); rocksdb::ColumnFamilyOptions hash_data_cf_ops(storage_options.options); - hash_meta_cf_ops.compaction_filter_factory = std::make_shared(); - hash_data_cf_ops.compaction_filter_factory = - std::make_shared(&db_, &handles_, kHashesMetaCF); - rocksdb::BlockBasedTableOptions hash_meta_cf_table_ops(table_ops); + hash_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kMetaCF); rocksdb::BlockBasedTableOptions hash_data_cf_table_ops(table_ops); if (!storage_options.share_block_cache && (storage_options.block_cache_size > 0)) { - hash_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); hash_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); } - hash_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(hash_meta_cf_table_ops)); hash_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(hash_data_cf_table_ops)); // list column-family options - rocksdb::ColumnFamilyOptions list_meta_cf_ops(storage_options.options); rocksdb::ColumnFamilyOptions list_data_cf_ops(storage_options.options); - list_meta_cf_ops.compaction_filter_factory = std::make_shared(); - list_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kListsMetaCF); + list_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kMetaCF); list_data_cf_ops.comparator = ListsDataKeyComparator(); - rocksdb::BlockBasedTableOptions list_meta_cf_table_ops(table_ops); rocksdb::BlockBasedTableOptions list_data_cf_table_ops(table_ops); if (!storage_options.share_block_cache && (storage_options.block_cache_size > 0)) { - list_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); list_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); } - list_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(list_meta_cf_table_ops)); list_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(list_data_cf_table_ops)); // set column-family options - rocksdb::ColumnFamilyOptions set_meta_cf_ops(storage_options.options); rocksdb::ColumnFamilyOptions set_data_cf_ops(storage_options.options); - set_meta_cf_ops.compaction_filter_factory = std::make_shared(); - set_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kSetsMetaCF); - rocksdb::BlockBasedTableOptions set_meta_cf_table_ops(table_ops); + set_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kMetaCF); rocksdb::BlockBasedTableOptions set_data_cf_table_ops(table_ops); if (!storage_options.share_block_cache && (storage_options.block_cache_size > 0)) { - set_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); set_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); } - set_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(set_meta_cf_table_ops)); set_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(set_data_cf_table_ops)); // zset column-family options - rocksdb::ColumnFamilyOptions zset_meta_cf_ops(storage_options.options); rocksdb::ColumnFamilyOptions zset_data_cf_ops(storage_options.options); rocksdb::ColumnFamilyOptions zset_score_cf_ops(storage_options.options); - zset_meta_cf_ops.compaction_filter_factory = std::make_shared(); - zset_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kZsetsMetaCF); - zset_score_cf_ops.compaction_filter_factory = - std::make_shared(&db_, &handles_, kZsetsMetaCF); + zset_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kMetaCF); + zset_score_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kMetaCF); zset_score_cf_ops.comparator = ZSetsScoreKeyComparator(); - rocksdb::BlockBasedTableOptions zset_meta_cf_table_ops(table_ops); rocksdb::BlockBasedTableOptions zset_data_cf_table_ops(table_ops); rocksdb::BlockBasedTableOptions zset_score_cf_table_ops(table_ops); if (!storage_options.share_block_cache && (storage_options.block_cache_size > 0)) { - zset_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); zset_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - zset_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); } - zset_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_meta_cf_table_ops)); zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); std::vector column_families; column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); // hash CF - column_families.emplace_back("hash_meta_cf", hash_meta_cf_ops); column_families.emplace_back("hash_data_cf", hash_data_cf_ops); // set CF - column_families.emplace_back("set_meta_cf", set_meta_cf_ops); column_families.emplace_back("set_data_cf", set_data_cf_ops); // list CF - column_families.emplace_back("list_meta_cf", list_meta_cf_ops); column_families.emplace_back("list_data_cf", list_data_cf_ops); // zset CF - column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops); column_families.emplace_back("zset_data_cf", zset_data_cf_ops); column_families.emplace_back("zset_score_cf", zset_score_cf_ops); + // meta CF + column_families.emplace_back("meta_cf", meta_cf_ops); return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); } @@ -194,7 +177,7 @@ Status Redis::CompactRange(const DataType& dtype, const rocksdb::Slice* begin, c break; case DataType::kHashes: if (type == kMeta || type == kMetaAndData) { - s = db_->CompactRange(default_compact_range_options_, handles_[kHashesMetaCF], begin, end); + s = db_->CompactRange(default_compact_range_options_, handles_[kMetaCF], begin, end); } if (s.ok() && (type == kData || type == kMetaAndData)) { s = db_->CompactRange(default_compact_range_options_, handles_[kHashesDataCF], begin, end); @@ -202,7 +185,7 @@ Status Redis::CompactRange(const DataType& dtype, const rocksdb::Slice* begin, c break; case DataType::kSets: if (type == kMeta || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[kSetsMetaCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kMetaCF], begin, end); } if (s.ok() && (type == kData || type == kMetaAndData)) { db_->CompactRange(default_compact_range_options_, handles_[kSetsDataCF], begin, end); @@ -210,7 +193,7 @@ Status Redis::CompactRange(const DataType& dtype, const rocksdb::Slice* begin, c break; case DataType::kLists: if (type == kMeta || type == kMetaAndData) { - s = db_->CompactRange(default_compact_range_options_, handles_[kListsMetaCF], begin, end); + s = db_->CompactRange(default_compact_range_options_, handles_[kMetaCF], begin, end); } if (s.ok() && (type == kData || type == kMetaAndData)) { s = db_->CompactRange(default_compact_range_options_, handles_[kListsDataCF], begin, end); @@ -218,7 +201,7 @@ Status Redis::CompactRange(const DataType& dtype, const rocksdb::Slice* begin, c break; case DataType::kZSets: if (type == kMeta || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[kZsetsMetaCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kMetaCF], begin, end); } if (s.ok() && (type == kData || type == kMetaAndData)) { db_->CompactRange(default_compact_range_options_, handles_[kZsetsDataCF], begin, end); diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index e5439042a..a797f3348 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -308,19 +308,19 @@ class Redis { options.iterate_upper_bound = upper_bound; switch (type) { case 'k': - return new StringsIterator(options, db_, handles_[kStringsCF], pattern); + return new StringsIterator(options, db_, handles_[kMetaCF], pattern); break; case 'h': - return new HashesIterator(options, db_, handles_[kHashesMetaCF], pattern); + return new HashesIterator(options, db_, handles_[kMetaCF], pattern); break; case 's': - return new SetsIterator(options, db_, handles_[kSetsMetaCF], pattern); + return new SetsIterator(options, db_, handles_[kMetaCF], pattern); break; case 'l': - return new ListsIterator(options, db_, handles_[kListsMetaCF], pattern); + return new ListsIterator(options, db_, handles_[kMetaCF], pattern); break; case 'z': - return new ZsetsIterator(options, db_, handles_[kZsetsMetaCF], pattern); + return new ZsetsIterator(options, db_, handles_[kMetaCF], pattern); break; default: WARN("Invalid datatype to create iterator"); diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index 9abf51e87..2904c7f07 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -21,6 +21,8 @@ #include "storage/storage_define.h" #include "storage/util.h" +rocksdb::Slice HashType("h"); + namespace storage { Status Redis::ScanHashesKeyNum(KeyInfo* key_info) { uint64_t keys = 0; @@ -37,7 +39,7 @@ Status Redis::ScanHashesKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kHashesMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedHashesMetaValue parsed_hashes_meta_value(iter->value()); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { @@ -71,7 +73,7 @@ Status Redis::HashesPKPatternMatchDel(const std::string& pattern, int32_t* ret) int32_t total_delete = 0; Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kHashesMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { key = iter->key().ToString(); @@ -80,7 +82,7 @@ Status Redis::HashesPKPatternMatchDel(const std::string& pattern, int32_t* ret) if (!parsed_hashes_meta_value.IsStale() && (parsed_hashes_meta_value.Count() != 0) && (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0)) { parsed_hashes_meta_value.InitialMetaValue(); - batch.Put(handles_[kHashesMetaCF], key, meta_value); + batch.Put(handles_[kMetaCF], key, meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -130,7 +132,7 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { @@ -157,7 +159,7 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { *ret = 0; @@ -184,7 +186,7 @@ Status Redis::HGet(const Slice& key, const Slice& field, std::string* value) { read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -214,7 +216,7 @@ Status Redis::HGetall(const Slice& key, std::vector* fvs) { read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -247,7 +249,7 @@ Status Redis::HGetallWithTTL(const Slice& key, std::vector* fvs, uin ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.Count() == 0) { @@ -292,7 +294,7 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); char value_buf[32] = {0}; char meta_value_buf[4] = {0}; if (s.ok()) { @@ -301,7 +303,7 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 version = parsed_hashes_meta_value.UpdateVersion(); parsed_hashes_meta_value.SetCount(1); parsed_hashes_meta_value.SetEtime(0); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey hashes_data_key(key, version, field); Int64ToStr(value_buf, 32, value); batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), value_buf); @@ -332,7 +334,7 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 } BaseDataValue internal_value(value_buf); parsed_hashes_meta_value.ModifyCount(1); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = value; } else { @@ -343,7 +345,7 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 EncodeFixed32(meta_value_buf, 1); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); HashesDataKey hashes_data_key(key, version, field); Int64ToStr(value_buf, 32, value); @@ -374,7 +376,7 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by } BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); @@ -382,7 +384,7 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by version = parsed_hashes_meta_value.UpdateVersion(); parsed_hashes_meta_value.SetCount(1); parsed_hashes_meta_value.SetEtime(0); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey hashes_data_key(key, version, field); LongDoubleToStr(long_double_by, new_value); @@ -415,7 +417,7 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by } parsed_hashes_meta_value.ModifyCount(1); BaseDataValue internal_value(*new_value); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); } else { return s; @@ -425,7 +427,7 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by EncodeFixed32(meta_value_buf, 1); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); HashesDataKey hashes_data_key(key, version, field); LongDoubleToStr(long_double_by, new_value); @@ -449,7 +451,7 @@ Status Redis::HKeys(const Slice& key, std::vector* fields) { read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -477,7 +479,7 @@ Status Redis::HLen(const Slice& key, int32_t* ret) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -506,7 +508,7 @@ Status Redis::HMGet(const Slice& key, const std::vector& fields, st ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if ((is_stale = parsed_hashes_meta_value.IsStale()) || parsed_hashes_meta_value.Count() == 0) { @@ -559,7 +561,7 @@ Status Redis::HMSet(const Slice& key, const std::vector& fvs) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); @@ -569,7 +571,7 @@ Status Redis::HMSet(const Slice& key, const std::vector& fvs) { return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.SetCount(static_cast(filtered_fvs.size())); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); for (const auto& fv : filtered_fvs) { HashesDataKey hashes_data_key(key, version, fv.field); BaseDataValue inter_value(fv.value); @@ -597,13 +599,13 @@ Status Redis::HMSet(const Slice& key, const std::vector& fvs) { return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(count); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { EncodeFixed32(meta_value_buf, filtered_fvs.size()); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); for (const auto& fv : filtered_fvs) { HashesDataKey hashes_data_key(key, version, fv.field); BaseDataValue inter_value(fv.value); @@ -624,14 +626,17 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (!parsed_hashes_meta_value.IsType(HashType)) { + return Status::InvalidArgument("WRONGTYPE Operation against a key holding the wrong kind of value"); + } if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.InitialMetaValue(); parsed_hashes_meta_value.SetCount(1); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey data_key(key, version, field); BaseDataValue internal_value(value); batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); @@ -656,7 +661,7 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int } parsed_hashes_meta_value.ModifyCount(1); BaseDataValue internal_value(value); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *res = 1; } else { @@ -667,7 +672,7 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int EncodeFixed32(meta_value_buf, 1); HashesMetaValue meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = meta_value.UpdateVersion(); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value.Encode()); HashesDataKey data_key(key, version, field); BaseDataValue internal_value(value); batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); @@ -689,14 +694,14 @@ Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, i BaseMetaKey base_meta_key(key); BaseDataValue internal_value(value); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.InitialMetaValue(); parsed_hashes_meta_value.SetCount(1); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey hashes_data_key(key, version, field); batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = 1; @@ -712,7 +717,7 @@ Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, i return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(1); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = 1; } else { @@ -723,7 +728,7 @@ Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, i EncodeFixed32(meta_value_buf, 1); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); HashesDataKey hashes_data_key(key, version, field); batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = 1; @@ -743,7 +748,7 @@ Status Redis::HVals(const Slice& key, std::vector* values) { read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -796,7 +801,7 @@ Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { @@ -863,7 +868,7 @@ Status Redis::HScanx(const Slice& key, const std::string& start_field, const std read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { @@ -905,7 +910,7 @@ Status Redis::HScanx(const Slice& key, const std::string& start_field, const std Status Redis::HRandField(const Slice& key, int64_t count, bool with_values, std::vector* res) { BaseMetaKey base_meta_key(key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (!s.ok()) { return s; } @@ -996,7 +1001,7 @@ Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std } BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { @@ -1057,7 +1062,7 @@ Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const st } BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { @@ -1104,7 +1109,7 @@ Status Redis::HashesExpire(const Slice& key, uint64_t ttl) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -1115,10 +1120,10 @@ Status Redis::HashesExpire(const Slice& key, uint64_t ttl) { if (ttl > 0) { parsed_hashes_meta_value.SetRelativeTimestamp(ttl); - s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } else { parsed_hashes_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } return s; @@ -1129,7 +1134,7 @@ Status Redis::HashesDel(const Slice& key) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -1139,7 +1144,7 @@ Status Redis::HashesDel(const Slice& key) { } else { uint32_t statistic = parsed_hashes_meta_value.Count(); parsed_hashes_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); } } @@ -1151,7 +1156,7 @@ Status Redis::HashesExpireat(const Slice& key, uint64_t timestamp) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -1164,7 +1169,7 @@ Status Redis::HashesExpireat(const Slice& key, uint64_t timestamp) { } else { parsed_hashes_meta_value.InitialMetaValue(); } - s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } return s; @@ -1175,7 +1180,7 @@ Status Redis::HashesPersist(const Slice& key) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -1188,7 +1193,7 @@ Status Redis::HashesPersist(const Slice& key) { return Status::NotFound("Not have an associated timeout"); } else { parsed_hashes_meta_value.SetEtime(0); - s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } } @@ -1199,7 +1204,7 @@ Status Redis::HashesTTL(const Slice& key, uint64_t* timestamp) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { @@ -1233,7 +1238,7 @@ void Redis::ScanHashes() { auto current_time = static_cast(time(nullptr)); INFO("***************rocksdb instance: {} Hashes Meta Data***************", index_); - auto meta_iter = db_->NewIterator(iterator_options, handles_[kHashesMetaCF]); + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { ParsedHashesMetaValue parsed_hashes_meta_value(meta_iter->value()); uint64_t survival_time = 0; diff --git a/src/storage/src/redis_lists.cc b/src/storage/src/redis_lists.cc index 2ec1da18e..55c410b83 100644 --- a/src/storage/src/redis_lists.cc +++ b/src/storage/src/redis_lists.cc @@ -30,7 +30,7 @@ Status Redis::ScanListsKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kListsMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedListsMetaValue parsed_lists_meta_value(iter->value()); if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { @@ -64,7 +64,7 @@ Status Redis::ListsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { int32_t total_delete = 0; Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kListsMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { ParsedBaseMetaKey parsed_meta_key(iter->key().ToString()); @@ -74,7 +74,7 @@ Status Redis::ListsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { parsed_lists_meta_value.InitialMetaValue(); - batch.Put(handles_[kListsMetaCF], iter->key(), meta_value); + batch.Put(handles_[kMetaCF], iter->key(), meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -109,7 +109,7 @@ Status Redis::LIndex(const Slice& key, int64_t index, std::string* element) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); uint64_t version = parsed_lists_meta_value.Version(); @@ -143,7 +143,7 @@ Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, co std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -225,7 +225,7 @@ Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, co parsed_lists_meta_value.ModifyRightIndex(1); } parsed_lists_meta_value.ModifyCount(1); - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); ListsDataKey lists_target_key(key, version, target_index); BaseDataValue i_val(value); batch.Put(handles_[kListsDataCF], lists_target_key.Encode(), i_val.Encode()); @@ -244,7 +244,7 @@ Status Redis::LLen(const Slice& key, uint64_t* len) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -269,7 +269,7 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector* el std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -293,7 +293,7 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector* el parsed_lists_meta_value.ModifyCount(-1); parsed_lists_meta_value.ModifyLeftIndex(-1); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); delete iter; } } @@ -317,7 +317,7 @@ Status Redis::LPush(const Slice& key, const std::vector& values, ui std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { @@ -333,7 +333,7 @@ Status Redis::LPush(const Slice& key, const std::vector& values, ui BaseDataValue i_val(value); batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); *ret = parsed_lists_meta_value.Count(); } else if (s.IsNotFound()) { char str[8]; @@ -347,7 +347,7 @@ Status Redis::LPush(const Slice& key, const std::vector& values, ui BaseDataValue i_val(value); batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); *ret = lists_meta_value.RightIndex() - lists_meta_value.LeftIndex() - 1; } else { return s; @@ -363,7 +363,7 @@ Status Redis::LPushx(const Slice& key, const std::vector& values, u std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -380,7 +380,7 @@ Status Redis::LPushx(const Slice& key, const std::vector& values, u BaseDataValue i_val(value); batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); *len = parsed_lists_meta_value.Count(); return db_->Write(default_write_options_, &batch); } @@ -397,7 +397,7 @@ Status Redis::LRange(const Slice& key, int64_t start, int64_t stop, std::vector< std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -448,7 +448,7 @@ Status Redis::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std:: std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.Count() == 0) { @@ -506,7 +506,7 @@ Status Redis::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -607,7 +607,7 @@ Status Redis::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t parsed_lists_meta_value.ModifyRightIndex(-target_index.size()); } parsed_lists_meta_value.ModifyCount(-target_index.size()); - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); for (const auto& idx : delete_index) { ListsDataKey lists_data_key(key, version, idx); batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); @@ -628,7 +628,7 @@ Status Redis::LSet(const Slice& key, int64_t index, const Slice& value) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -661,7 +661,7 @@ Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); uint64_t version = parsed_lists_meta_value.Version(); @@ -678,7 +678,7 @@ Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) { if (sublist_left_index > sublist_right_index || sublist_left_index > origin_right_index || sublist_right_index < origin_left_index) { parsed_lists_meta_value.InitialMetaValue(); - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } else { if (sublist_left_index < origin_left_index) { sublist_left_index = origin_left_index; @@ -693,7 +693,7 @@ Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) { parsed_lists_meta_value.ModifyLeftIndex(-(sublist_left_index - origin_left_index)); parsed_lists_meta_value.ModifyRightIndex(-(origin_right_index - sublist_right_index)); parsed_lists_meta_value.ModifyCount(-delete_node_num); - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); for (uint64_t idx = origin_left_index; idx < sublist_left_index; ++idx) { statistic++; ListsDataKey lists_data_key(key, version, idx); @@ -724,7 +724,7 @@ Status Redis::RPop(const Slice& key, int64_t count, std::vector* el std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -749,7 +749,7 @@ Status Redis::RPop(const Slice& key, int64_t count, std::vector* el parsed_lists_meta_value.ModifyCount(-1); parsed_lists_meta_value.ModifyRightIndex(-1); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); delete iter; } } @@ -772,7 +772,7 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri if (source.compare(destination) == 0) { std::string meta_value; BaseMetaKey base_source(source); - s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_source.Encode(), &meta_value); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_source.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -799,7 +799,7 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri statistic++; parsed_lists_meta_value.ModifyRightIndex(-1); parsed_lists_meta_value.ModifyLeftIndex(1); - batch.Put(handles_[kListsMetaCF], base_source.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_source.Encode(), meta_value); s = db_->Write(default_write_options_, &batch); UpdateSpecificKeyStatistics(DataType::kLists, source.ToString(), statistic); return s; @@ -817,7 +817,7 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri std::string target; std::string source_meta_value; BaseMetaKey base_source(source); - s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_source.Encode(), &source_meta_value); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_source.Encode(), &source_meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&source_meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -834,7 +834,7 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri statistic++; parsed_lists_meta_value.ModifyCount(-1); parsed_lists_meta_value.ModifyRightIndex(-1); - batch.Put(handles_[kListsMetaCF], base_source.Encode(), source_meta_value); + batch.Put(handles_[kMetaCF], base_source.Encode(), source_meta_value); } else { return s; } @@ -845,7 +845,7 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri std::string destination_meta_value; BaseMetaKey base_destination(destination); - s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_destination.Encode(), &destination_meta_value); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_destination.Encode(), &destination_meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&destination_meta_value); if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { @@ -858,7 +858,7 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), target); parsed_lists_meta_value.ModifyCount(1); parsed_lists_meta_value.ModifyLeftIndex(1); - batch.Put(handles_[kListsMetaCF], base_destination.Encode(), destination_meta_value); + batch.Put(handles_[kMetaCF], base_destination.Encode(), destination_meta_value); } else if (s.IsNotFound()) { char str[8]; EncodeFixed64(str, 1); @@ -868,7 +868,7 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri ListsDataKey lists_data_key(destination, version, target_index); batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), target); lists_meta_value.ModifyLeftIndex(1); - batch.Put(handles_[kListsMetaCF], base_destination.Encode(), lists_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_destination.Encode(), lists_meta_value.Encode()); } else { return s; } @@ -892,7 +892,7 @@ Status Redis::RPush(const Slice& key, const std::vector& values, ui std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { @@ -908,7 +908,7 @@ Status Redis::RPush(const Slice& key, const std::vector& values, ui BaseDataValue i_val(value); batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); *ret = parsed_lists_meta_value.Count(); } else if (s.IsNotFound()) { char str[8]; @@ -922,7 +922,7 @@ Status Redis::RPush(const Slice& key, const std::vector& values, ui BaseDataValue i_val(value); batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); *ret = lists_meta_value.RightIndex() - lists_meta_value.LeftIndex() - 1; } else { return s; @@ -938,7 +938,7 @@ Status Redis::RPushx(const Slice& key, const std::vector& values, u std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -955,7 +955,7 @@ Status Redis::RPushx(const Slice& key, const std::vector& values, u BaseDataValue i_val(value); batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); *len = parsed_lists_meta_value.Count(); return db_->Write(default_write_options_, &batch); } @@ -968,7 +968,7 @@ Status Redis::ListsExpire(const Slice& key, uint64_t ttl) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -979,10 +979,10 @@ Status Redis::ListsExpire(const Slice& key, uint64_t ttl) { if (ttl > 0) { parsed_lists_meta_value.SetRelativeTimestamp(ttl); - s = db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } else { parsed_lists_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } return s; @@ -993,7 +993,7 @@ Status Redis::ListsDel(const Slice& key) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -1003,7 +1003,7 @@ Status Redis::ListsDel(const Slice& key) { } else { uint32_t statistic = parsed_lists_meta_value.Count(); parsed_lists_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); } } @@ -1015,7 +1015,7 @@ Status Redis::ListsExpireat(const Slice& key, uint64_t timestamp) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -1028,7 +1028,7 @@ Status Redis::ListsExpireat(const Slice& key, uint64_t timestamp) { } else { parsed_lists_meta_value.InitialMetaValue(); } - return db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } return s; @@ -1038,7 +1038,7 @@ Status Redis::ListsPersist(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -1051,7 +1051,7 @@ Status Redis::ListsPersist(const Slice& key) { return Status::NotFound("Not have an associated timeout"); } else { parsed_lists_meta_value.SetEtime(0); - return db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } } @@ -1062,7 +1062,7 @@ Status Redis::ListsTTL(const Slice& key, uint64_t* timestamp) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { @@ -1096,7 +1096,7 @@ void Redis::ScanLists() { auto current_time = static_cast(time(nullptr)); INFO("***************rocksdb instance: {} List Meta Data***************", index_); - auto meta_iter = db_->NewIterator(iterator_options, handles_[kListsMetaCF]); + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { ParsedListsMetaValue parsed_lists_meta_value(meta_iter->value()); ParsedBaseMetaKey parsed_meta_key(meta_iter->value()); diff --git a/src/storage/src/redis_sets.cc b/src/storage/src/redis_sets.cc index a3bccd1d2..85d83f4c9 100644 --- a/src/storage/src/redis_sets.cc +++ b/src/storage/src/redis_sets.cc @@ -20,6 +20,7 @@ #include "src/scope_snapshot.h" #include "storage/util.h" +rocksdb::Slice SetType("s"); namespace storage { rocksdb::Status Redis::ScanSetsKeyNum(KeyInfo* key_info) { uint64_t keys = 0; @@ -36,7 +37,7 @@ rocksdb::Status Redis::ScanSetsKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kSetsMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedSetsMetaValue parsed_sets_meta_value(iter->value()); if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { @@ -70,7 +71,7 @@ rocksdb::Status Redis::SetsPKPatternMatchDel(const std::string& pattern, int32_t int32_t total_delete = 0; rocksdb::Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kSetsMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { ParsedBaseMetaKey parsed_meta_key(iter->key()); @@ -80,7 +81,7 @@ rocksdb::Status Redis::SetsPKPatternMatchDel(const std::string& pattern, int32_t (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { parsed_sets_meta_value.InitialMetaValue(); - batch.Put(handles_[kSetsMetaCF], iter->key(), meta_value); + batch.Put(handles_[kMetaCF], iter->key(), meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -122,16 +123,20 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me std::string meta_value; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsType(SetType)) { + return Status::InvalidArgument("WRONGTYPE Operation against a key holding the wrong kind of value"); + ; + } if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { version = parsed_sets_meta_value.InitialMetaValue(); if (!parsed_sets_meta_value.check_set_count(static_cast(filtered_members.size()))) { return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.SetCount(static_cast(filtered_members.size())); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); for (const auto& member : filtered_members) { SetsMemberKey sets_member_key(key, version, member); BaseDataValue iter_value(Slice{}); @@ -162,15 +167,16 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(cnt); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } } else if (s.IsNotFound()) { - char str[4]; - EncodeFixed32(str, filtered_members.size()); - SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); + char str[5]; + EncodeFixed8(str, 's'); + EncodeFixed32(str + 1, filtered_members.size()); + SetsMetaValue sets_meta_value(Slice(str, 5)); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), sets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), sets_meta_value.Encode()); for (const auto& member : filtered_members) { SetsMemberKey sets_member_key(key, version, member); BaseDataValue i_val(Slice{}); @@ -188,9 +194,13 @@ rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret) { std::string meta_value; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsType(SetType)) { + return Status::InvalidArgument("WRONGTYPE Operation against a key holding the wrong kind of value"); + ; + } if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); } else { @@ -220,7 +230,7 @@ rocksdb::Status Redis::SDiff(const std::vector& keys, std::vectorGet(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { @@ -232,7 +242,7 @@ rocksdb::Status Redis::SDiff(const std::vector& keys, std::vectorGet(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { @@ -292,7 +302,7 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vectorGet(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { @@ -305,7 +315,7 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector members; BaseMetaKey base_meta_key0(keys[0]); - s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { @@ -344,7 +354,7 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vectorGet(read_options, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); statistic = parsed_sets_meta_value.Count(); @@ -353,13 +363,13 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector(members.size())); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); } else if (s.IsNotFound()) { char str[4]; EncodeFixed32(str, members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); } else { return s; } @@ -392,7 +402,7 @@ rocksdb::Status Redis::SInter(const std::vector& keys, std::vector< for (uint32_t idx = 1; idx < keys.size(); ++idx) { BaseMetaKey base_meta_key(keys[idx]); - s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { @@ -408,7 +418,7 @@ rocksdb::Status Redis::SInter(const std::vector& keys, std::vector< } BaseMetaKey base_meta_key0(keys[0]); - s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { @@ -474,7 +484,7 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vectorGet(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { @@ -494,7 +504,7 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector members; if (!have_invalid_sets) { BaseMetaKey base_meta_key0(keys[0]); - s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { @@ -539,7 +549,7 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vectorGet(read_options, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); statistic = parsed_sets_meta_value.Count(); @@ -548,13 +558,13 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector(members.size())); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); } else if (s.IsNotFound()) { char str[4]; EncodeFixed32(str, members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); } else { return s; } @@ -581,7 +591,7 @@ rocksdb::Status Redis::SIsmember(const Slice& key, const Slice& member, int32_t* read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -611,7 +621,7 @@ rocksdb::Status Redis::SMembers(const Slice& key, std::vector* memb read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -643,7 +653,7 @@ Status Redis::SMembersWithTTL(const Slice& key, std::vector* member ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.Count() == 0) { @@ -693,7 +703,7 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons } BaseMetaKey base_source(source); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_source.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_source.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -711,7 +721,7 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(-1); - batch.Put(handles_[kSetsMetaCF], base_source.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_source.Encode(), meta_value); batch.Delete(handles_[kSetsDataCF], sets_member_key.Encode()); statistic++; } else if (s.IsNotFound()) { @@ -729,13 +739,13 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons } BaseMetaKey base_destination(destination); - s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { version = parsed_sets_meta_value.InitialMetaValue(); parsed_sets_meta_value.SetCount(1); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); SetsMemberKey sets_member_key(destination, version, member); BaseDataValue i_val(Slice{}); batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); @@ -750,7 +760,7 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons } parsed_sets_meta_value.ModifyCount(1); BaseDataValue iter_value(Slice{}); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); } else if (!s.ok()) { return s; @@ -761,7 +771,7 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons EncodeFixed32(str, 1); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); SetsMemberKey sets_member_key(destination, version, member); BaseDataValue iter_value(Slice{}); batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); @@ -783,7 +793,7 @@ rocksdb::Status Redis::SPop(const Slice& key, std::vector* members, uint64_t start_us = pstd::NowMicros(); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -806,8 +816,8 @@ rocksdb::Status Redis::SPop(const Slice& key, std::vector* members, } // parsed_sets_meta_value.ModifyCount(-cnt); - // batch.Put(handles_[kSetsMetaCF], key, meta_value); - batch.Delete(handles_[kSetsMetaCF], base_meta_key.Encode()); + // batch.Put(handles_[kMetaCF], key, meta_value); + batch.Delete(handles_[kMetaCF], base_meta_key.Encode()); delete iter; } else { @@ -847,7 +857,7 @@ rocksdb::Status Redis::SPop(const Slice& key, std::vector* members, return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(static_cast(-cnt)); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); delete iter; } } @@ -883,7 +893,7 @@ rocksdb::Status Redis::SRandmember(const Slice& key, int32_t count, std::vector< std::unordered_set unique; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -947,7 +957,7 @@ rocksdb::Status Redis::SRem(const Slice& key, const std::vector& me std::string meta_value; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -975,7 +985,7 @@ rocksdb::Status Redis::SRem(const Slice& key, const std::vector& me return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(-cnt); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { *ret = 0; @@ -1004,7 +1014,7 @@ rocksdb::Status Redis::SUnion(const std::vector& keys, std::vector< for (const auto& key : keys) { BaseMetaKey base_meta_key(key); - s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { @@ -1055,7 +1065,7 @@ rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vectorGet(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { @@ -1087,7 +1097,7 @@ rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vectorGet(read_options, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); statistic = parsed_sets_meta_value.Count(); @@ -1096,13 +1106,13 @@ rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vector(members.size())); - batch.Put(handles_[kSetsMetaCF], destination, meta_value); + batch.Put(handles_[kMetaCF], destination, meta_value); } else if (s.IsNotFound()) { char str[4]; EncodeFixed32(str, members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); } else { return s; } @@ -1137,7 +1147,7 @@ rocksdb::Status Redis::SScan(const Slice& key, int64_t cursor, const std::string read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { @@ -1195,7 +1205,7 @@ rocksdb::Status Redis::SetsExpire(const Slice& key, uint64_t ttl) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -1206,10 +1216,10 @@ rocksdb::Status Redis::SetsExpire(const Slice& key, uint64_t ttl) { if (ttl > 0) { parsed_sets_meta_value.SetRelativeTimestamp(ttl); - s = db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } else { parsed_sets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } return s; @@ -1220,7 +1230,7 @@ rocksdb::Status Redis::SetsDel(const Slice& key) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -1230,7 +1240,7 @@ rocksdb::Status Redis::SetsDel(const Slice& key) { } else { uint32_t statistic = parsed_sets_meta_value.Count(); parsed_sets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); } } @@ -1242,7 +1252,7 @@ rocksdb::Status Redis::SetsExpireat(const Slice& key, uint64_t timestamp) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -1255,7 +1265,7 @@ rocksdb::Status Redis::SetsExpireat(const Slice& key, uint64_t timestamp) { } else { parsed_sets_meta_value.InitialMetaValue(); } - return db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } return s; @@ -1266,7 +1276,7 @@ rocksdb::Status Redis::SetsPersist(const Slice& key) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -1279,7 +1289,7 @@ rocksdb::Status Redis::SetsPersist(const Slice& key) { return rocksdb::Status::NotFound("Not have an associated timeout"); } else { parsed_sets_meta_value.SetEtime(0); - return db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } } @@ -1290,7 +1300,7 @@ rocksdb::Status Redis::SetsTTL(const Slice& key, uint64_t* timestamp) { std::string meta_value; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_setes_meta_value(&meta_value); if (parsed_setes_meta_value.IsStale()) { @@ -1324,7 +1334,7 @@ void Redis::ScanSets() { auto current_time = static_cast(time(nullptr)); INFO("***************Sets Meta Data***************"); - auto meta_iter = db_->NewIterator(iterator_options, handles_[kSetsMetaCF]); + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { ParsedSetsMetaValue parsed_sets_meta_value(meta_iter->value()); ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); diff --git a/src/storage/src/redis_zsets.cc b/src/storage/src/redis_zsets.cc index ea4864ae1..e8ce0cec9 100644 --- a/src/storage/src/redis_zsets.cc +++ b/src/storage/src/redis_zsets.cc @@ -36,7 +36,7 @@ Status Redis::ScanZsetsKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kZsetsMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedZSetsMetaValue parsed_zsets_meta_value(iter->value()); if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { @@ -70,7 +70,7 @@ Status Redis::ZsetsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { int32_t total_delete = 0; Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kZsetsMetaCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { ParsedBaseMetaKey meta_key(iter->key().ToString()); @@ -79,7 +79,7 @@ Status Redis::ZsetsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { if (!parsed_zsets_meta_value.IsStale() && (parsed_zsets_meta_value.Count() != 0) && (StringMatch(pattern.data(), pattern.size(), meta_key.Key().data(), meta_key.Key().size(), 0) != 0)) { parsed_zsets_meta_value.InitialMetaValue(); - batch.Put(handles_[kZsetsMetaCF], key, meta_value); + batch.Put(handles_[kMetaCF], key, meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -113,7 +113,7 @@ Status Redis::ZPopMax(const Slice& key, const int64_t count, std::vectorGet(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -143,7 +143,7 @@ Status Redis::ZPopMax(const Slice& key, const int64_t count, std::vectorWrite(default_write_options_, &batch); UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; @@ -161,7 +161,7 @@ Status Redis::ZPopMin(const Slice& key, const int64_t count, std::vectorGet(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -191,7 +191,7 @@ Status Redis::ZPopMin(const Slice& key, const int64_t count, std::vectorWrite(default_write_options_, &batch); UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; @@ -220,7 +220,7 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { bool vaild = true; ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); @@ -276,14 +276,14 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(cnt); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); *ret = cnt; } else if (s.IsNotFound()) { char buf[4]; EncodeFixed32(buf, filtered_score_members.size()); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); for (const auto& sm : filtered_score_members) { ZSetsMemberKey zsets_member_key(key, version, sm.member); const void* ptr_score = reinterpret_cast(&sm.score); @@ -309,7 +309,7 @@ Status Redis::ZCard(const Slice& key, int32_t* card) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -335,7 +335,7 @@ Status Redis::ZCount(const Slice& key, double min, double max, bool left_close, read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -393,7 +393,7 @@ Status Redis::ZIncrby(const Slice& key, const Slice& member, double increment, d ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { @@ -422,7 +422,7 @@ Status Redis::ZIncrby(const Slice& key, const Slice& member, double increment, d return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(1); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } else { return s; } @@ -431,7 +431,7 @@ Status Redis::ZIncrby(const Slice& key, const Slice& member, double increment, d EncodeFixed32(buf, 1); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); score = increment; } else { return s; @@ -461,7 +461,7 @@ Status Redis::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector< read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -509,7 +509,7 @@ Status Redis::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std:: read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.Count() == 0) { @@ -566,7 +566,7 @@ Status Redis::ZRangebyscore(const Slice& key, double min, double max, bool left_ read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -633,7 +633,7 @@ Status Redis::ZRank(const Slice& key, const Slice& member, int32_t* rank) { read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -685,7 +685,7 @@ Status Redis::ZRem(const Slice& key, const std::vector& members, in ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -720,7 +720,7 @@ Status Redis::ZRem(const Slice& key, const std::vector& members, in return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } else { return s; @@ -738,7 +738,7 @@ Status Redis::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -777,7 +777,7 @@ Status Redis::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } else { return s; @@ -796,7 +796,7 @@ Status Redis::ZRemrangebyscore(const Slice& key, double min, double max, bool le ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -847,7 +847,7 @@ Status Redis::ZRemrangebyscore(const Slice& key, double min, double max, bool le return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } else { return s; @@ -867,7 +867,7 @@ Status Redis::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vect read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -915,7 +915,7 @@ Status Redis::ZRevrangebyscore(const Slice& key, double min, double max, bool le read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -981,7 +981,7 @@ Status Redis::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -1024,7 +1024,7 @@ Status Redis::ZScore(const Slice& key, const Slice& member, double* score) { read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); uint64_t version = parsed_zsets_meta_value.Version(); @@ -1061,7 +1061,7 @@ Status Redis::ZGetAll(const Slice& key, double weight, std::mapGet(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.Count() != 0) { @@ -1104,7 +1104,7 @@ Status Redis::ZUnionstore(const Slice& destination, const std::vectorGet(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.Count() != 0) { @@ -1148,7 +1148,7 @@ Status Redis::ZUnionstore(const Slice& destination, const std::vectorGet(read_options, handles_[kZsetsMetaCF], base_destination.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); statistic = parsed_zsets_meta_value.Count(); @@ -1157,13 +1157,13 @@ Status Redis::ZUnionstore(const Slice& destination, const std::vector(member_score_map.size())); - batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); } else { char buf[4]; EncodeFixed32(buf, member_score_map.size()); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); } char score_buf[8]; @@ -1215,7 +1215,7 @@ Status Redis::ZInterstore(const Slice& destination, const std::vectorGet(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { @@ -1286,7 +1286,7 @@ Status Redis::ZInterstore(const Slice& destination, const std::vectorGet(read_options, handles_[kZsetsMetaCF], base_destination.Encode(), &meta_value); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); statistic = parsed_zsets_meta_value.Count(); @@ -1295,13 +1295,13 @@ Status Redis::ZInterstore(const Slice& destination, const std::vector(final_score_members.size())); - batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); } else { char buf[4]; EncodeFixed32(buf, final_score_members.size()); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); + batch.Put(handles_[kMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); } char score_buf[8]; for (const auto& sm : final_score_members) { @@ -1337,7 +1337,7 @@ Status Redis::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool right_not_limit = max.compare("+") == 0; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { @@ -1400,7 +1400,7 @@ Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& ma std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { @@ -1446,7 +1446,7 @@ Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& ma return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); *ret = del_cnt; } } else { @@ -1462,7 +1462,7 @@ Status Redis::ZsetsExpire(const Slice& key, uint64_t ttl) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -1476,7 +1476,7 @@ Status Redis::ZsetsExpire(const Slice& key, uint64_t ttl) { } else { parsed_zsets_meta_value.InitialMetaValue(); } - s = db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } return s; } @@ -1486,7 +1486,7 @@ Status Redis::ZsetsDel(const Slice& key) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -1496,7 +1496,7 @@ Status Redis::ZsetsDel(const Slice& key) { } else { uint32_t statistic = parsed_zsets_meta_value.Count(); parsed_zsets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); } } @@ -1508,7 +1508,7 @@ Status Redis::ZsetsExpireat(const Slice& key, uint64_t timestamp) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -1521,7 +1521,7 @@ Status Redis::ZsetsExpireat(const Slice& key, uint64_t timestamp) { } else { parsed_zsets_meta_value.InitialMetaValue(); } - return db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } return s; @@ -1546,7 +1546,7 @@ Status Redis::ZScan(const Slice& key, int64_t cursor, const std::string& pattern read_options.snapshot = snapshot; BaseMetaKey base_meta_key(key); - Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { @@ -1608,7 +1608,7 @@ Status Redis::ZsetsPersist(const Slice& key) { ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -1621,7 +1621,7 @@ Status Redis::ZsetsPersist(const Slice& key) { return Status::NotFound("Not have an associated timeout"); } else { parsed_zsets_meta_value.SetEtime(0); - return db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); } } } @@ -1632,7 +1632,7 @@ Status Redis::ZsetsTTL(const Slice& key, uint64_t* timestamp) { std::string meta_value; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -1666,7 +1666,7 @@ void Redis::ScanZsets() { auto current_time = static_cast(time(nullptr)); INFO("***************rocksdb instance: {} ZSets Meta Data***************", index_); - auto meta_iter = db_->NewIterator(iterator_options, handles_[kZsetsMetaCF]); + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); ParsedZSetsMetaValue parsed_zsets_meta_value(meta_iter->value());