22 #ifndef PAGESPEED_KERNEL_SHAREDMEM_SHARED_MEM_CACHE_DATA_H_
23 #define PAGESPEED_KERNEL_SHAREDMEM_SHARED_MEM_CACHE_DATA_H_
28 #include "base/logging.h"
32 #include "pagespeed/kernel/base/thread_annotations.h"
34 namespace net_instaweb {
37 class AbstractSharedMem;
38 class AbstractSharedMemSegment;
41 namespace SharedMemCacheData {
43 typedef int32 EntryNum;
44 typedef int32 BlockNum;
45 typedef std::vector<BlockNum> BlockVector;
47 const BlockNum kInvalidBlock = -1;
48 const EntryNum kInvalidEntry = -1;
49 const size_t kHashSize = 16;
64 int64 num_put_concurrent_create;
65 int64 num_put_concurrent_full_set;
83 BlockNum free_list_front;
84 EntryNum lru_list_front;
85 EntryNum lru_list_rear;
94 char hash_bytes[kHashSize];
95 int64 last_use_timestamp_ms;
104 BlockNum first_block;
120 template<
size_t kBlockSize>
133 size_t cache_entries,
size_t data_blocks);
152 size_t cache_entries,
size_t data_blocks);
163 DCHECK_LT(block, static_cast<BlockNum>(data_blocks_));
164 return block_successors_[block];
167 void SetBlockSuccessor(BlockNum block, BlockNum next)
168 EXCLUSIVE_LOCKS_REQUIRED(
mutex()) {
170 DCHECK_LT(block, static_cast<BlockNum>(data_blocks_));
172 DCHECK_GE(next, kInvalidBlock);
173 DCHECK_LT(next, static_cast<BlockNum>(data_blocks_));
175 block_successors_[block] = next;
181 EXCLUSIVE_LOCKS_REQUIRED(
mutex()) {
182 for (
size_t pos = 0; pos < blocks.size(); ++pos) {
183 if (pos == (blocks.size() - 1)) {
184 SetBlockSuccessor(blocks[pos], kInvalidBlock);
186 SetBlockSuccessor(blocks[pos], blocks[pos + 1]);
201 EXCLUSIVE_LOCKS_REQUIRED(
mutex());
207 EXCLUSIVE_LOCKS_REQUIRED(
mutex());
214 return reinterpret_cast<CacheEntry*
>(directory_base_) + slot;
224 EntryNum OldestEntryNum() {
225 return sector_header_->lru_list_rear;
232 return blocks_base_ + kBlockSize * block_num;
241 return NeededPieces(size, kBlockSize);
247 static size_t BytesInPortion(
size_t total_bytes,
size_t b,
size_t total);
252 EXCLUSIVE_LOCKS_REQUIRED(
mutex());
268 static size_t NeededPieces(
size_t total,
size_t piece_size) {
269 return (total + piece_size - 1) / piece_size;
273 size_t cache_entries_;
279 SectorHeader* sector_header_;
280 BlockNum* block_successors_ PT_GUARDED_BY(
mutex());
281 char* directory_base_;
283 size_t sector_offset_;
void DumpStats(MessageHandler *handler)
static size_t RequiredSize(AbstractSharedMem *shmem_runtime, size_t cache_entries, size_t data_blocks)
Definition: shared_mem_cache_data.h:121
int64 used_entries
Current state stats — updated by SharedMemCacheData.
Definition: shared_mem_cache_data.h:72
int64 num_put
Definition: shared_mem_cache_data.h:61
Sector(AbstractSharedMemSegment *segment, size_t sector_offset, size_t cache_entries, size_t data_blocks)
void InsertEntryIntoLRU(EntryNum entry_num)
static size_t BytesInPortion(size_t total_bytes, size_t b, size_t total)
void Add(const SectorStats &other)
Adds number to this object's. No concurrency control is done.
Abstract interface for implementing a mutex.
Definition: abstract_mutex.h:28
CacheEntry * EntryAt(EntryNum slot)
Returns the given # entry.
Definition: shared_mem_cache_data.h:213
int64 num_put_replace
replacement of different key
Definition: shared_mem_cache_data.h:63
uint32 open_count
Number of readers currently accessing the data.
Definition: shared_mem_cache_data.h:110
bool Attach(MessageHandler *handler)
bool Initialize(MessageHandler *handler)
Definition: shared_mem_cache_data.h:93
int64 num_put_update
update of the same key
Definition: shared_mem_cache_data.h:62
Definition: scoped_ptr.h:30
static size_t DataBlocksForSize(size_t size)
Number of blocks of data needed for size blocks.
Definition: shared_mem_cache_data.h:240
std::string GoogleString
PAGESPEED_KERNEL_BASE_STRING_H_.
Definition: string.h:24
void ReturnBlocksToFreeList(const BlockVector &blocks) EXCLUSIVE_LOCKS_REQUIRED(mutex())
int64 last_checkpoint_ms
When this sector was last checkpointed to disk.
Definition: shared_mem_cache_data.h:69
int64 num_put_spins
of times writers had to sleep behind readers
Definition: shared_mem_cache_data.h:66
Definition: abstract_shared_mem.h:86
int AllocBlocksFromFreeList(int goal, BlockVector *blocks) EXCLUSIVE_LOCKS_REQUIRED(mutex())
uint32 padding
ensures we're 8-aligned.
Definition: shared_mem_cache_data.h:112
void UnlinkEntryFromLRU(EntryNum entry_num)
Removes from the LRU. Safe to call if not in the LRU already.
void LinkBlockSuccessors(const BlockVector &blocks) EXCLUSIVE_LOCKS_REQUIRED(mutex())
Definition: shared_mem_cache_data.h:180
EntryNum lru_prev
Definition: shared_mem_cache_data.h:101
Definition: message_handler.h:39
int BlockListForEntry(CacheEntry *entry, BlockVector *out_blocks) EXCLUSIVE_LOCKS_REQUIRED(mutex())
BlockNum GetBlockSuccessor(BlockNum block) EXCLUSIVE_LOCKS_REQUIRED(mutex())
Definition: shared_mem_cache_data.h:161
Definition: abstract_shared_mem.h:31
Definition: shared_mem_cache_data.h:51
GoogleString Dump(size_t total_entries, size_t total_blocks) const
Text dump of the statistics. No concurrency control is done.
AbstractMutex * mutex() const LOCK_RETURNED(mutex_)
Mutex ops.
Definition: shared_mem_cache_data.h:157
char * BlockBytes(BlockNum block_num)
Definition: shared_mem_cache_data.h:231
bool creating
When this is true, someone is trying to overwrite this entry.
Definition: shared_mem_cache_data.h:107
int64 num_get
of calls to get
Definition: shared_mem_cache_data.h:67
SectorStats * sector_stats()
Definition: shared_mem_cache_data.h:257