内容简介:levelDB 源码分析(六):Cache
leveldb 内部通过双向循环链表实现了一个 LRUCache。 LRUCache内部实现了一个 Hashtable,用于快速查找 key 对应的双向链表节点。
Class Cache采用虚函数定义了 Cache 的接口,具体实现的 LRUCache 继承 Cache,实现了相应的功能,实际操作为 ShardedLRUCache 类,而且外部节点为 Handle,内部采用 LRUHandle 操作。
LRUCache是线程安全的,为了多线程访问,尽可能快速,减少锁开销,ShardedLRUCache 内部有16个LRUCache,查找 Key 时,通过 key 的哈希值的高4位确定 key 在哪一个 LRUCache 上,然后在相应的 LRUCache 中进行查找,这样就大大减少了多线程的访问锁的开销。
在使用的时候首先定义一个 Cache/* 指针,Cache 是一个抽象类,是不能定义这种类型的变量的,所以调用 NewLRUCache()
返回一个 SharedLRUCache 对象,SharedLRUCache 定义了一个 LRUCache 数组,这才是真正的缓冲区。
LRUCache 中维护一个双向循环链表,链表的元素为 LRUHandle。如下所示:
1. cache接口
在 include/leveldb/cache.h
中定义了 cache 的接口,通过实现这个接口可以实现不同的 cache 策略。
// 创建一个固定大小的cache并利用LRU实现替换策略
extern Cache* NewLRUCache(size_t capacity);
class Cache {
public:
Cache() { }
// 析构函数
virtual ~Cache();
// 用于管理cache节点
struct Handle { };
// Insert a mapping from key->value into the cache and assign it
// the specified charge against the total cache capacity.
//
// Returns a handle that corresponds to the mapping. The caller
// must call this->Release(handle) when the returned mapping is no longer needed.
//
// When the inserted entry is no longer needed, the key and value will be passed to "deleter".
virtual Handle* Insert(const Slice& key, void* value
, size_t charge
, void (*deleter)(const Slice& key, void* value))=0;
// If the cache has no mapping for "key", returns NULL.
//
// Else return a handle that corresponds to the mapping.
// The caller must call this->Release(handle) when the returned mapping is no longer needed.
virtual Handle* Lookup(const Slice& key) = 0;
// Release a mapping returned by a previous Lookup().
// REQUIRES: handle must not have been released yet.
// REQUIRES: handle must have been returned by a method on *this.
virtual void Release(Handle* handle) = 0;
// Return the value encapsulated in a handle returned by a successful Lookup().
// REQUIRES: handle must not have been released yet.
// REQUIRES: handle must have been returned by a method on *this.
virtual void* Value(Handle* handle) = 0;
// If the cache contains entry for key, erase it.
// Note that the underlying entry will be kept around until all existing handles
// to it have been released.
virtual void Erase(const Slice& key) = 0;
// Return a new numeric id. May be used by multiple clients who are
// sharing the same cache to partition the key space.
// Typically the client will allocate a new id at startup and prepend the id to its cache keys.
virtual uint64_t NewId() = 0; // 返回唯一的ID
// Remove all cache entries that are not actively in use.
// Memory-constrained applications may wish to call this method to reduce memory usage.
// Default implementation of Prune() does nothing.
// Subclasses are strongly encouraged to override the default implementation.
// A future release of leveldb may change Prune() to a pure abstract method.
virtual void Prune() {}
// Return an estimate of the combined charges of all elements stored in the cache.
virtual size_t TotalCharge() const = 0;
private:
void LRU_Remove(Handle* e); // 从LRU队列中删除节点e
void LRU_Append(Handle* e); // 将节点e插入LRU队列
void Unref(Handle* e); // 减少节点e的引用计数
struct Rep;
Rep* rep_;
// 不允许拷贝构造函数和赋值运算符
Cache(const Cache&);
void operator=(const Cache&);
};
2. LRUCache
在 util/cache.cc
中利用双向循环链表实现了一个 LRU Cache。
链表节点的结构体
// 每一个记录都是可变长度的 heap-allocated 结构
// 所有记录都是按照到达时间顺序存放在环形的双向链表中
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
uint32_t refs; // 用于内存管理的引用计数,初始化的时候是2
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
char key_data[1]; // Beginning of key
Slice key() const {
// For cheaper lookups, we allow a temporary Handle object
// to store a pointer to a key in "value".
if (next == this) {
return *(reinterpret_cast<Slice*>(value));
} else {
return Slice(key_data, key_length);
}
}
};
哈希表的实现
class HandleTable {
public:
HandleTable():length_(0),elems_(0),list_(NULL){ Resize(); }
~HandleTable(){ delete[] list_; }
LRUHandle* Lookup(const Slice& key,uint32_t hash){
return *FindPointer(key, hash);
}
// 插入节点,存在时替换,返回旧的
LRUHandle* Insert(LRUHandle* h){
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr; // 如果old==NULL,表示没有找到
h->next_hash = (old == NULL ? NULL : old->next_hash); // 如果存在,则替换,否则插入最后
*ptr = h;
if(old == NULL){ // old==NULL没有找到,表示插入新节点
++elems_;
if(elems_ > length_){
// Since each cache entry is fairly large, we aim for a small average linked list length (<= 1).
Resize(); // 元素的数量大于哈希表的长度时就重新哈希,使查找的平均代价为o(1)
}
}
return old;
}
// 如果存在key节点,则从哈希表删除
LRUHandle* Remove(const Slice& key, uint32_t hash){
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
if(result != NULL){
*ptr = result->next_hash;
--elems_;
}
return result;
}
private:
// hash table 由bucket数组组成,
// 每一个 bucket 都是由cache节点的哈希值组成的链表
// 也就是说用挂链表的方式来解决hash碰撞
uint32_t length_; // 哈希表长度
uint32_t elems_; // 哈希表元素数量
LRUHandle** list_; // 哈希表指针数组
// 如果找到就返回指向命中节点的指针,
// 否则,返回对应double linked list的尾指针
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
// 节点非空 && (hash相同时,再判断key是否相同)
while(*ptr!=NULL&&((*ptr)->hash!=hash || key!=(*ptr)->key())){
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void Resize() {
uint32_t new_length = 4;
while (new_length < elems_) { // 以2倍的大小重新设置哈希表的长度
new_length *= 2;
}
LRUHandle** new_list = new LRUHandle*[new_length]; // 重新分配哈希表
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) { // 重新hash,一次全部执行完毕
LRUHandle* h = list_[i]; // 一个bucket
while (h != NULL) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash; // 记录hash,不用重新计算了
LRUHandle** ptr = &new_list[hash & (new_length - 1)]; // 应该放入的bucket
h->next_hash = *ptr; // 放到新bucket的开头,*ptr为指针数组的一个指针值
*ptr = h;
h = next;
count++;
}
}
assert(elems_ == count);
delete[] list_; // 删除旧的哈希表
list_ = new_list; // 新的哈希表
length_ = new_length;
}
};
单个的 LRU Cache的实现
// A single shard of sharded cache.
class LRUCache {
public:
LRUCache();
~LRUCache();
// Separate from constructor so caller can easily make an array of LRUCache
void SetCapacity(size_t capacity) { capacity_ = capacity; } // 设置容量
// Like Cache methods, but with an extra "hash" parameter.
// 插入一个节点
Cache::Handle* Insert(const Slice& key, uint32_t hash,
void* value, size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);// 查找一个节点
void Release(Cache::Handle* handle); // 释放一个节点引用计数
void Erase(const Slice& key, uint32_t hash); // 清除一个节点
void Prune();
size_t TotalCharge() const{
MutexLock l(&mutex_);
return usage_;
}
private:
void LRU_Remove(LRUHandle* e);
void LRU_Append(LRUHandle* e);
void Unref(LRUHandle* e);
// cache的大小,使用之前先初始化
size_t capacity_;
// mutex_ protects the following state.
mutable port::Mutex mutex_; //互斥访问
size_t usage_; // 已用缓冲区空间
// Dummy head of LRU list.
// 头节点。lru.prev 是最新被访问的记录, lru.next 是最老被访问的记录
LRUHandle lru_;
HandleTable table_; // LRU队列采用hashtable,是为了访问节点效率更高
};
LRUCache::LRUCache():usage_(0){
// 设置空的循环链表
lru_.next = &lru_;
lru_.prev = &lru_;
}
LRUCache::~LRUCache(){
for (LRUHandle* e = lru_.next; e != &lru_; ) {
LRUHandle* next = e->next;
assert(e->refs == 1); // Error if caller has an unreleased handle
Unref(e);
e = next;
}
}
// 减少引用计数
void LRUCache::Unref(LRUHandle* e){
assert(e->refs > 0);
e->refs--;
if(e->refs <= 0){
usage_ -= e->charge;
(*e->deleter)(e->key(), e->value);
free(e);
}
}
// 删除节点
void LRUCache::LRU_Remove(LRUHandle* e){
e->next->prev = e->prev;
e->prev->next = e->next;
}
// 插入节点
void LRUCache::LRU_Append(LRUHandle* e){
// Make "e" newest entry by inserting just before lru_
e->next = &lru_;
e->prev = lru_.prev;
e->prev->next = e;
e->next->prev = e;
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash){
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash); // 访问
if(e != NULL){
e->refs++;
LRU_Remove(e); // 删除
LRU_Append(e); // 重新插入
}
return reinterpret_cast<Cache::Handle*>(e);
}
// 减少引用计数
void LRUCache::Release(Cache::Handle* handle){
MutexLock l(&mutex_);
Unref(reinterpret_cast<LRUHandle*>(handle));
}
// 插入节点,如果存在旧的,则删除
Cache::Handle* LRUCache::Insert(
const Slice& key, uint32_t hash, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)){
MutexLock l(&mutex_);
LRUHandle* e = reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle)-1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
e->refs = 2; // One from LRUCache, one for the returned handle
memcpy(e->key_data, key.data(), key.size());
LRU_Append(e); // 加入LRU列表
usage_ += charge;
LRUHandle* old = table_.Insert(e);
if(old != NULL){ // 如果存在旧的相同的key,则插入新的,返回旧的
LRU_Remove(old);
Unref(old);
}
// 回收,next指向最近最少访问的节点
while(usage_ > capacity_ && lru_.next != &lru_){
LRUHandle* old = lru_.next;
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
Unref(old);
}
return reinterpret_cast<Cache::Handle*>(e);
}
// 删除节点
void LRUCache::Erase(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Remove(key, hash);
if(e != NULL){
LRU_Remove(e);
Unref(e);
}
}
void LRUCache::Prune() {
MutexLock l(&mutex_);
for(LRUHandle* e = lru_.next; e != &lru_; ){
LRUHandle* next = e->next;
if(e->refs == 1){
table_.Remove(e->key(), e->hash);
LRU_Remove(e);
Unref(e);
}
e = next;
}
}
Sharded LRU Cache 的实现
static const int kNumShardBits = 4;
static const int kNumShards = 1 << kNumShardBits;
class ShardedLRUCache : public Cache {
private:
LRUCache shard_[kNumShards]; // (1<<4=16)LRUCache
port::Mutex id_mutex_;
uint64_t last_id_;
// 计算hash值
static inline uint32_t HashSlice(const Slice& s){
return Hash(s.data(),s.size(),0);
}
// 32位hash值的高4位
// 高4位决定映射到哪个LRUCache
static uint32_t Shard(uint32_t hash){
return hash >> (32 - kNumShardBits);
}
public:
// 构造函数
explicit ShardedLRUCache(size_t capacity):last_id_(0){
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards; // 平均每个LRUCache的容量
for(int s = 0; s < kNumShards; s++){
shard_[s].SetCapacity(per_shard);
}
}
virtual ~ShardedLRUCache() { }
// 插入一个kv对到的LRUCache和hashtable中
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)){
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
// 查找
virtual Handle* Lookup(const Slice& key) {
const uint32_t hash = HashSlice(key); // 计算hash值
return shard_[Shard(hash)].Lookup(key, hash); // 到指定的LRUCache中查找
}
// 减少引用计数
virtual void Release(Handle* handle) {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
// 删除
virtual void Erase(const Slice& key) {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
// 返回对应的值
virtual void* Value(Handle* handle) {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
// 新的id
virtual uint64_t NewId() {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
virtual void Prune() {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
virtual size_t TotalCharge() const {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
}
return total;
}
};
以上所述就是小编给大家介绍的《levelDB 源码分析(六):Cache》,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对 码农网 的支持!
猜你喜欢:- 以太坊源码分析(36)ethdb源码分析
- [源码分析] kubelet源码分析(一)之 NewKubeletCommand
- libmodbus源码分析(3)从机(服务端)功能源码分析
- [源码分析] nfs-client-provisioner源码分析
- [源码分析] kubelet源码分析(三)之 Pod的创建
- Spring事务源码分析专题(一)JdbcTemplate使用及源码分析
本站部分资源来源于网络,本站转载出于传递更多信息之目的,版权归原作者或者来源机构所有,如转载稿涉及版权问题,请联系我们。