2019-06-07 20:02:01 +02:00
|
|
|
#include <Kernel/Arch/i386/CPU.h>
|
2019-09-30 10:31:06 +02:00
|
|
|
#include <Kernel/FileSystem/DiskBackedFileSystem.h>
|
|
|
|
|
#include <Kernel/KBuffer.h>
|
2019-03-23 22:03:17 +01:00
|
|
|
#include <Kernel/Process.h>
|
2018-10-25 12:35:49 +02:00
|
|
|
|
2018-10-10 11:53:07 +02:00
|
|
|
//#define DBFS_DEBUG
|
|
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
struct CacheEntry {
|
|
|
|
|
u32 timestamp { 0 };
|
|
|
|
|
u32 block_index { 0 };
|
|
|
|
|
u8* data { nullptr };
|
|
|
|
|
bool has_data { false };
|
|
|
|
|
bool is_dirty { false };
|
2019-02-10 20:07:14 +01:00
|
|
|
};
|
|
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
class DiskCache {
|
|
|
|
|
public:
|
2019-09-30 11:45:22 +02:00
|
|
|
explicit DiskCache(DiskBackedFS& fs)
|
|
|
|
|
: m_fs(fs)
|
|
|
|
|
, m_cached_block_data(KBuffer::create_with_size(m_entry_count * m_fs.block_size()))
|
2019-09-30 10:31:06 +02:00
|
|
|
{
|
|
|
|
|
m_entries = (CacheEntry*)kmalloc_eternal(m_entry_count * sizeof(CacheEntry));
|
|
|
|
|
for (size_t i = 0; i < m_entry_count; ++i) {
|
2019-09-30 11:45:22 +02:00
|
|
|
m_entries[i].data = m_cached_block_data.data() + i * m_fs.block_size();
|
2019-09-30 10:31:06 +02:00
|
|
|
}
|
|
|
|
|
}
|
2019-02-10 20:07:14 +01:00
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
~DiskCache() {}
|
2019-02-10 20:07:14 +01:00
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
bool is_dirty() const { return m_dirty; }
|
|
|
|
|
void set_dirty(bool b) { m_dirty = b; }
|
2019-02-10 20:07:14 +01:00
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
CacheEntry& get(u32 block_index) const
|
2019-02-10 20:07:14 +01:00
|
|
|
{
|
2019-09-30 10:31:06 +02:00
|
|
|
auto now = kgettimeofday().tv_sec;
|
|
|
|
|
|
|
|
|
|
CacheEntry* oldest_clean_entry = nullptr;
|
|
|
|
|
for (size_t i = 0; i < m_entry_count; ++i) {
|
|
|
|
|
auto& entry = m_entries[i];
|
|
|
|
|
if (entry.block_index == block_index) {
|
|
|
|
|
entry.timestamp = now;
|
|
|
|
|
return entry;
|
|
|
|
|
}
|
|
|
|
|
if (!entry.is_dirty) {
|
|
|
|
|
if (!oldest_clean_entry)
|
|
|
|
|
oldest_clean_entry = &entry;
|
|
|
|
|
else if (entry.timestamp < oldest_clean_entry->timestamp)
|
|
|
|
|
oldest_clean_entry = &entry;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-09-30 11:45:22 +02:00
|
|
|
if (!oldest_clean_entry) {
|
|
|
|
|
// Not a single clean entry! Flush writes and try again.
|
|
|
|
|
m_fs.flush_writes();
|
|
|
|
|
return get(block_index);
|
|
|
|
|
}
|
2019-09-30 10:31:06 +02:00
|
|
|
|
|
|
|
|
// Replace the oldest clean entry.
|
|
|
|
|
auto& new_entry = *oldest_clean_entry;
|
|
|
|
|
new_entry.timestamp = now;
|
|
|
|
|
new_entry.block_index = block_index;
|
|
|
|
|
new_entry.has_data = false;
|
|
|
|
|
new_entry.is_dirty = false;
|
|
|
|
|
return new_entry;
|
2019-02-10 20:07:14 +01:00
|
|
|
}
|
|
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
template<typename Callback>
|
|
|
|
|
void for_each_entry(Callback callback)
|
|
|
|
|
{
|
|
|
|
|
for (size_t i = 0; i < m_entry_count; ++i)
|
|
|
|
|
callback(m_entries[i]);
|
|
|
|
|
}
|
2019-02-10 20:07:14 +01:00
|
|
|
|
2019-09-30 11:45:22 +02:00
|
|
|
private:
|
|
|
|
|
DiskBackedFS& m_fs;
|
2019-09-30 10:31:06 +02:00
|
|
|
size_t m_entry_count { 10000 };
|
|
|
|
|
KBuffer m_cached_block_data;
|
|
|
|
|
CacheEntry* m_entries { nullptr };
|
|
|
|
|
bool m_dirty { false };
|
2019-02-10 20:07:14 +01:00
|
|
|
};
|
|
|
|
|
|
2019-06-21 18:37:47 +02:00
|
|
|
DiskBackedFS::DiskBackedFS(NonnullRefPtr<DiskDevice>&& device)
|
2018-10-17 10:55:43 +02:00
|
|
|
: m_device(move(device))
|
2018-10-10 11:53:07 +02:00
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-15 17:13:10 +01:00
|
|
|
DiskBackedFS::~DiskBackedFS()
|
2018-10-10 11:53:07 +02:00
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-30 11:20:51 +02:00
|
|
|
bool DiskBackedFS::write_block(unsigned index, const u8* data)
|
2018-10-10 11:53:07 +02:00
|
|
|
{
|
|
|
|
|
#ifdef DBFS_DEBUG
|
2019-01-31 17:31:23 +01:00
|
|
|
kprintf("DiskBackedFileSystem::write_block %u, size=%u\n", index, data.size());
|
2018-10-10 11:53:07 +02:00
|
|
|
#endif
|
2019-09-30 10:31:06 +02:00
|
|
|
auto& entry = cache().get(index);
|
2019-09-30 11:20:51 +02:00
|
|
|
memcpy(entry.data, data, block_size());
|
2019-09-30 10:31:06 +02:00
|
|
|
entry.is_dirty = true;
|
|
|
|
|
entry.has_data = true;
|
2019-04-27 17:30:32 +02:00
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
cache().set_dirty(true);
|
2019-04-25 22:05:53 +02:00
|
|
|
return true;
|
2018-10-10 11:53:07 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-30 11:20:51 +02:00
|
|
|
bool DiskBackedFS::write_blocks(unsigned index, unsigned count, const u8* data)
|
2018-10-10 11:53:07 +02:00
|
|
|
{
|
|
|
|
|
#ifdef DBFS_DEBUG
|
2019-01-31 17:31:23 +01:00
|
|
|
kprintf("DiskBackedFileSystem::write_blocks %u x%u\n", index, count);
|
2018-10-10 11:53:07 +02:00
|
|
|
#endif
|
2019-04-25 22:05:53 +02:00
|
|
|
for (unsigned i = 0; i < count; ++i)
|
2019-09-30 11:20:51 +02:00
|
|
|
write_block(index + i, data + i * block_size());
|
2019-04-25 22:05:53 +02:00
|
|
|
return true;
|
2018-10-10 11:53:07 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-30 11:04:30 +02:00
|
|
|
bool DiskBackedFS::read_block(unsigned index, u8* buffer) const
|
2018-10-10 11:53:07 +02:00
|
|
|
{
|
|
|
|
|
#ifdef DBFS_DEBUG
|
2019-01-31 17:31:23 +01:00
|
|
|
kprintf("DiskBackedFileSystem::read_block %u\n", index);
|
2018-10-10 11:53:07 +02:00
|
|
|
#endif
|
2019-04-25 22:05:53 +02:00
|
|
|
|
2019-09-30 10:31:06 +02:00
|
|
|
auto& entry = cache().get(index);
|
|
|
|
|
if (!entry.has_data) {
|
|
|
|
|
DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
|
|
|
|
|
bool success = device().read(base_offset, block_size(), entry.data);
|
|
|
|
|
entry.has_data = true;
|
|
|
|
|
ASSERT(success);
|
2019-02-10 20:07:14 +01:00
|
|
|
}
|
2019-09-30 11:04:30 +02:00
|
|
|
memcpy(buffer, entry.data, block_size());
|
|
|
|
|
return true;
|
2018-10-10 11:53:07 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-30 11:04:30 +02:00
|
|
|
bool DiskBackedFS::read_blocks(unsigned index, unsigned count, u8* buffer) const
|
2018-10-10 11:53:07 +02:00
|
|
|
{
|
|
|
|
|
if (!count)
|
2019-09-30 11:04:30 +02:00
|
|
|
return false;
|
2018-10-10 11:53:07 +02:00
|
|
|
if (count == 1)
|
2019-09-30 11:04:30 +02:00
|
|
|
return read_block(index, buffer);
|
|
|
|
|
u8* out = buffer;
|
2018-10-10 11:53:07 +02:00
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
2019-09-30 11:04:30 +02:00
|
|
|
if (!read_block(index + i, out))
|
|
|
|
|
return false;
|
2019-01-31 17:31:23 +01:00
|
|
|
out += block_size();
|
2018-10-10 11:53:07 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-30 11:04:30 +02:00
|
|
|
return true;
|
2018-10-10 11:53:07 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-25 22:05:53 +02:00
|
|
|
void DiskBackedFS::flush_writes()
|
|
|
|
|
{
|
|
|
|
|
LOCKER(m_lock);
|
2019-09-30 10:31:06 +02:00
|
|
|
if (!cache().is_dirty())
|
|
|
|
|
return;
|
|
|
|
|
u32 count = 0;
|
|
|
|
|
cache().for_each_entry([&](CacheEntry& entry) {
|
|
|
|
|
if (!entry.is_dirty)
|
|
|
|
|
return;
|
|
|
|
|
DiskOffset base_offset = static_cast<DiskOffset>(entry.block_index) * static_cast<DiskOffset>(block_size());
|
|
|
|
|
device().write(base_offset, block_size(), entry.data);
|
|
|
|
|
++count;
|
|
|
|
|
entry.is_dirty = false;
|
|
|
|
|
});
|
|
|
|
|
cache().set_dirty(false);
|
|
|
|
|
dbg() << class_name() << ": " << "Flushed " << count << " blocks to disk";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
DiskCache& DiskBackedFS::cache() const
|
|
|
|
|
{
|
|
|
|
|
if (!m_cache)
|
2019-09-30 11:45:22 +02:00
|
|
|
m_cache = make<DiskCache>(const_cast<DiskBackedFS&>(*this));
|
2019-09-30 10:31:06 +02:00
|
|
|
return *m_cache;
|
2019-04-25 22:05:53 +02:00
|
|
|
}
|