diff options
author | 2023-10-10 11:40:56 +0000 | |
---|---|---|
committer | 2023-10-10 11:40:56 +0000 | |
commit | e02cda008591317b1625707ff8e115a4841aa889 (patch) | |
tree | aee302e3cf8b59ec2d32ec481be3d1afddfc8968 /migration/page_cache.c | |
parent | cc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff) |
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback
design to work with QEMU and rust-vmm vhost-user backend without require any
changes.
Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'migration/page_cache.c')
-rw-r--r-- | migration/page_cache.c | 175 |
1 files changed, 175 insertions, 0 deletions
diff --git a/migration/page_cache.c b/migration/page_cache.c new file mode 100644 index 000000000..6d4f7a9bb --- /dev/null +++ b/migration/page_cache.c @@ -0,0 +1,175 @@ +/* + * Page cache for QEMU + * The cache is base on a hash of the page address + * + * Copyright 2012 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Orit Wasserman <owasserm@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "qapi/qmp/qerror.h" +#include "qapi/error.h" +#include "qemu/host-utils.h" +#include "page_cache.h" +#include "trace.h" + +/* the page in cache will not be replaced in two cycles */ +#define CACHED_PAGE_LIFETIME 2 + +typedef struct CacheItem CacheItem; + +struct CacheItem { + uint64_t it_addr; + uint64_t it_age; + uint8_t *it_data; +}; + +struct PageCache { + CacheItem *page_cache; + size_t page_size; + size_t max_num_items; + size_t num_items; +}; + +PageCache *cache_init(uint64_t new_size, size_t page_size, Error **errp) +{ + int64_t i; + size_t num_pages = new_size / page_size; + PageCache *cache; + + if (new_size < page_size) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", + "is smaller than one target page size"); + return NULL; + } + + /* round down to the nearest power of 2 */ + if (!is_power_of_2(num_pages)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", + "is not a power of two number of pages"); + return NULL; + } + + /* We prefer not to abort if there is no memory */ + cache = g_try_malloc(sizeof(*cache)); + if (!cache) { + error_setg(errp, "Failed to allocate cache"); + return NULL; + } + cache->page_size = page_size; + cache->num_items = 0; + cache->max_num_items = num_pages; + + trace_migration_pagecache_init(cache->max_num_items); + + /* We prefer not to abort if there is no memory */ + cache->page_cache = g_try_malloc((cache->max_num_items) * + sizeof(*cache->page_cache)); + if (!cache->page_cache) { + error_setg(errp, "Failed to allocate page cache"); + g_free(cache); + return NULL; + } + + for (i = 0; i < cache->max_num_items; i++) { + cache->page_cache[i].it_data = NULL; + cache->page_cache[i].it_age = 0; + cache->page_cache[i].it_addr = -1; + } + + return cache; +} + +void cache_fini(PageCache *cache) +{ + int64_t i; + + g_assert(cache); + g_assert(cache->page_cache); + + for (i = 0; i < cache->max_num_items; i++) { + g_free(cache->page_cache[i].it_data); + } + + g_free(cache->page_cache); + cache->page_cache = NULL; + g_free(cache); +} + +static size_t cache_get_cache_pos(const PageCache *cache, + uint64_t address) +{ + g_assert(cache->max_num_items); + return (address / cache->page_size) & (cache->max_num_items - 1); +} + +static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr) +{ + size_t pos; + + g_assert(cache); + g_assert(cache->page_cache); + + pos = cache_get_cache_pos(cache, addr); + + return &cache->page_cache[pos]; +} + +uint8_t *get_cached_data(const PageCache *cache, uint64_t addr) +{ + return cache_get_by_addr(cache, addr)->it_data; +} + +bool cache_is_cached(const PageCache *cache, uint64_t addr, + uint64_t current_age) +{ + CacheItem *it; + + it = cache_get_by_addr(cache, addr); + + if (it->it_addr == addr) { + /* update the it_age when the cache hit */ + it->it_age = current_age; + return true; + } + return false; +} + +int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata, + uint64_t current_age) +{ + + CacheItem *it; + + /* actual update of entry */ + it = cache_get_by_addr(cache, addr); + + if (it->it_data && it->it_addr != addr && + it->it_age + CACHED_PAGE_LIFETIME > current_age) { + /* the cache page is fresh, don't replace it */ + return -1; + } + /* allocate page */ + if (!it->it_data) { + it->it_data = g_try_malloc(cache->page_size); + if (!it->it_data) { + trace_migration_pagecache_insert(); + return -1; + } + cache->num_items++; + } + + memcpy(it->it_data, pdata, cache->page_size); + + it->it_age = current_age; + it->it_addr = addr; + + return 0; +} |