aboutsummaryrefslogtreecommitdiffstats
path: root/util/atomic64.c
diff options
context:
space:
mode:
authorTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
committerTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
commite02cda008591317b1625707ff8e115a4841aa889 (patch)
treeaee302e3cf8b59ec2d32ec481be3d1afddfc8968 /util/atomic64.c
parentcc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff)
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'util/atomic64.c')
-rw-r--r--util/atomic64.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/util/atomic64.c b/util/atomic64.c
new file mode 100644
index 000000000..93037d5b1
--- /dev/null
+++ b/util/atomic64.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/atomic.h"
+#include "qemu/thread.h"
+
+#ifdef CONFIG_ATOMIC64
+#error This file must only be compiled if !CONFIG_ATOMIC64
+#endif
+
+/*
+ * When !CONFIG_ATOMIC64, we serialize both reads and writes with spinlocks.
+ * We use an array of spinlocks, with padding computed at run-time based on
+ * the host's dcache line size.
+ * We point to the array with a void * to simplify the padding's computation.
+ * Each spinlock is located every lock_size bytes.
+ */
+static void *lock_array;
+static size_t lock_size;
+
+/*
+ * Systems without CONFIG_ATOMIC64 are unlikely to have many cores, so we use a
+ * small array of locks.
+ */
+#define NR_LOCKS 16
+
+static QemuSpin *addr_to_lock(const void *addr)
+{
+ uintptr_t a = (uintptr_t)addr;
+ uintptr_t idx;
+
+ idx = a >> qemu_dcache_linesize_log;
+ idx ^= (idx >> 8) ^ (idx >> 16);
+ idx &= NR_LOCKS - 1;
+ return lock_array + idx * lock_size;
+}
+
+#define GEN_READ(name, type) \
+ type name(const type *ptr) \
+ { \
+ QemuSpin *lock = addr_to_lock(ptr); \
+ type ret; \
+ \
+ qemu_spin_lock(lock); \
+ ret = *ptr; \
+ qemu_spin_unlock(lock); \
+ return ret; \
+ }
+
+GEN_READ(qatomic_read_i64, int64_t)
+GEN_READ(qatomic_read_u64, uint64_t)
+#undef GEN_READ
+
+#define GEN_SET(name, type) \
+ void name(type *ptr, type val) \
+ { \
+ QemuSpin *lock = addr_to_lock(ptr); \
+ \
+ qemu_spin_lock(lock); \
+ *ptr = val; \
+ qemu_spin_unlock(lock); \
+ }
+
+GEN_SET(qatomic_set_i64, int64_t)
+GEN_SET(qatomic_set_u64, uint64_t)
+#undef GEN_SET
+
+void qatomic64_init(void)
+{
+ int i;
+
+ lock_size = ROUND_UP(sizeof(QemuSpin), qemu_dcache_linesize);
+ lock_array = qemu_memalign(qemu_dcache_linesize, lock_size * NR_LOCKS);
+ for (i = 0; i < NR_LOCKS; i++) {
+ QemuSpin *lock = lock_array + i * lock_size;
+
+ qemu_spin_init(lock);
+ }
+}